From: Peter Zijlstra <a.p.zijlstra@chello.nl>
To: linux-mm@kvack.org, linux-kernel@vger.kernel.org
Cc: Andrew Morton <akpm@osdl.org>,
Peter Zijlstra <a.p.zijlstra@chello.nl>,
Christoph Lameter <christoph@lameter.com>,
Wu Fengguang <wfg@mail.ustc.edu.cn>,
Nick Piggin <npiggin@suse.de>, Marijn Meijles <marijn@bitpit.net>,
Rik van Riel <riel@redhat.com>,
Marcelo Tosatti <marcelo.tosatti@cyclades.com>
Subject: [PATCH 7/9] clockpro-remove-old.patch
Date: Fri, 30 Dec 2005 23:43:44 +0100 [thread overview]
Message-ID: <20051230224322.765.51438.sendpatchset@twins.localnet> (raw)
In-Reply-To: <20051230223952.765.21096.sendpatchset@twins.localnet>
From: Peter Zijlstra <a.p.zijlstra@chello.nl>
Remove the old page replacement code, unused now.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
mm/page_replace.c | 298 ------------------------------------------------------
1 file changed, 298 deletions(-)
Index: linux-2.6-git/mm/page_replace.c
===================================================================
--- linux-2.6-git.orig/mm/page_replace.c
+++ /dev/null
@@ -1,298 +0,0 @@
-#include <linux/mm_page_replace.h>
-#include <linux/swap.h>
-#include <linux/pagevec.h>
-#include <linux/init.h>
-#include <linux/rmap.h>
-#include <linux/buffer_head.h> /* for try_to_release_page(),
- buffer_heads_over_limit */
-
-/*
- * From 0 .. 100. Higher means more swappy.
- */
-int vm_swappiness = 60;
-static long total_memory;
-
-static void refill_inactive_zone(struct zone *, int);
-
-static int __init page_replace_init(void)
-{
- total_memory = nr_free_pagecache_pages();
- return 0;
-}
-
-module_init(page_replace_init)
-
-void __init page_replace_init_zone(struct zone *zone)
-{
- INIT_LIST_HEAD(&zone->active_list);
- INIT_LIST_HEAD(&zone->inactive_list);
- zone->nr_active = 0;
- zone->nr_inactive = 0;
- zone->nr_scan_active = 0;
-}
-
-static inline void
-add_page_to_inactive_list(struct zone *zone, struct page *page)
-{
- list_add(&page->lru, &zone->inactive_list);
- zone->nr_inactive++;
-}
-
-void __page_replace_insert(struct zone *zone, struct page *page)
-{
- if (PageActive(page))
- add_page_to_active_list(zone, page);
- else
- add_page_to_inactive_list(zone, page);
-}
-
-/*
- * zone->lru_lock is heavily contended. Some of the functions that
- * shrink the lists perform better by taking out a batch of pages
- * and working on them outside the LRU lock.
- *
- * For pagecache intensive workloads, this function is the hottest
- * spot in the kernel (apart from copy_*_user functions).
- *
- * Appropriate locks must be held before calling this function.
- *
- * @nr_to_scan: The number of pages to look through on the list.
- * @src: The LRU list to pull pages off.
- * @dst: The temp list to put pages on to.
- * @scanned: The number of pages that were scanned.
- *
- * returns how many pages were moved onto *@dst.
- */
-static int isolate_lru_pages(int nr_to_scan, struct list_head *src,
- struct list_head *dst, int *scanned)
-{
- int nr_taken = 0;
- struct page *page;
- int scan = 0;
-
- while (scan++ < nr_to_scan && !list_empty(src)) {
- page = lru_to_page(src);
- prefetchw_prev_lru_page(page, src, flags);
-
- if (!TestClearPageLRU(page))
- BUG();
- list_del(&page->lru);
- if (get_page_testone(page)) {
- /*
- * It is being freed elsewhere
- */
- __put_page(page);
- SetPageLRU(page);
- list_add(&page->lru, src);
- continue;
- } else {
- list_add(&page->lru, dst);
- nr_taken++;
- }
- }
-
- *scanned = scan;
- return nr_taken;
-}
-
-void page_replace_candidates(struct zone *zone, int nr_to_scan, struct list_head *page_list)
-{
- int nr_taken;
- int nr_scan;
- unsigned long long nr_scan_active;
-
- spin_lock_irq(&zone->lru_lock);
- nr_taken = isolate_lru_pages(nr_to_scan, &zone->inactive_list,
- page_list, &nr_scan);
- zone->nr_inactive -= nr_taken;
- zone->pages_scanned += nr_scan;
- spin_unlock_irq(&zone->lru_lock);
-
- if (current_is_kswapd())
- mod_page_state_zone(zone, pgscan_kswapd, nr_scan);
- else
- mod_page_state_zone(zone, pgscan_direct, nr_scan);
-
- /*
- * Add one to `nr_to_scan' just to make sure that the kernel will
- * slowly sift through the active list.
- */
- nr_scan_active = (nr_scan + 1ULL) * zone->nr_active * 1024ULL;
- do_div(nr_scan_active, zone->nr_inactive + nr_taken + 1UL);
- zone->nr_scan_active += nr_scan_active;
- while (zone->nr_scan_active >= SWAP_CLUSTER_MAX * 1024UL) {
- zone->nr_scan_active -= SWAP_CLUSTER_MAX * 1024UL;
- refill_inactive_zone(zone, SWAP_CLUSTER_MAX);
- }
-}
-
-/*
- * Put back any unfreeable pages.
- */
-void page_replace_reinsert(struct zone *zone, struct list_head *page_list)
-{
- struct pagevec pvec;
-
- pagevec_init(&pvec, 1);
- spin_lock_irq(&zone->lru_lock);
- while (!list_empty(page_list)) {
- struct page *page = lru_to_page(page_list);
- BUG_ON(PageLRU(page));
- SetPageLRU(page);
- list_del(&page->lru);
- if (PageActive(page))
- add_page_to_active_list(zone, page);
- else
- add_page_to_inactive_list(zone, page);
- if (!pagevec_add(&pvec, page)) {
- spin_unlock_irq(&zone->lru_lock);
- __pagevec_release(&pvec);
- spin_lock_irq(&zone->lru_lock);
- }
- }
- spin_unlock_irq(&zone->lru_lock);
- pagevec_release(&pvec);
-}
-
-/*
- * This moves pages from the active list to the inactive list.
- *
- * We move them the other way if the page is referenced by one or more
- * processes, from rmap.
- *
- * If the pages are mostly unmapped, the processing is fast and it is
- * appropriate to hold zone->lru_lock across the whole operation. But if
- * the pages are mapped, the processing is slow (page_referenced()) so we
- * should drop zone->lru_lock around each page. It's impossible to balance
- * this, so instead we remove the pages from the LRU while processing them.
- * It is safe to rely on PG_active against the non-LRU pages in here because
- * nobody will play with that bit on a non-LRU page.
- *
- * The downside is that we have to touch page->_count against each page.
- * But we had to alter page->flags anyway.
- */
-static void refill_inactive_zone(struct zone *zone, int nr_pages)
-{
- int pgmoved;
- int pgdeactivate = 0;
- int pgscanned;
- LIST_HEAD(l_hold); /* The pages which were snipped off */
- LIST_HEAD(l_inactive); /* Pages to go onto the inactive_list */
- LIST_HEAD(l_active); /* Pages to go onto the active_list */
- struct page *page;
- struct pagevec pvec;
- int reclaim_mapped = 0;
- long mapped_ratio;
- long distress;
- long swap_tendency;
-
- lru_add_drain();
- spin_lock_irq(&zone->lru_lock);
- pgmoved = isolate_lru_pages(nr_pages, &zone->active_list,
- &l_hold, &pgscanned);
- zone->pages_scanned += pgscanned;
- zone->nr_active -= pgmoved;
- spin_unlock_irq(&zone->lru_lock);
-
- /*
- * `distress' is a measure of how much trouble we're having reclaiming
- * pages. 0 -> no problems. 100 -> great trouble.
- */
- distress = 100 >> zone->prev_priority;
-
- /*
- * The point of this algorithm is to decide when to start reclaiming
- * mapped memory instead of just pagecache. Work out how much memory
- * is mapped.
- */
- mapped_ratio = (read_page_state(nr_mapped) * 100) / total_memory;
-
- /*
- * Now decide how much we really want to unmap some pages. The mapped
- * ratio is downgraded - just because there's a lot of mapped memory
- * doesn't necessarily mean that page reclaim isn't succeeding.
- *
- * The distress ratio is important - we don't want to start going oom.
- *
- * A 100% value of vm_swappiness overrides this algorithm altogether.
- */
- swap_tendency = mapped_ratio / 2 + distress + vm_swappiness;
-
- /*
- * Now use this metric to decide whether to start moving mapped memory
- * onto the inactive list.
- */
- if (swap_tendency >= 100)
- reclaim_mapped = 1;
-
- while (!list_empty(&l_hold)) {
- cond_resched();
- page = lru_to_page(&l_hold);
- list_del(&page->lru);
- if (page_mapped(page)) {
- if (!reclaim_mapped ||
- (total_swap_pages == 0 && PageAnon(page)) ||
- page_referenced(page, 0, 0)) {
- list_add(&page->lru, &l_active);
- continue;
- }
- }
- list_add(&page->lru, &l_inactive);
- }
-
- pagevec_init(&pvec, 1);
- pgmoved = 0;
- spin_lock_irq(&zone->lru_lock);
- while (!list_empty(&l_inactive)) {
- page = lru_to_page(&l_inactive);
- prefetchw_prev_lru_page(page, &l_inactive, flags);
- if (TestSetPageLRU(page))
- BUG();
- if (!TestClearPageActive(page))
- BUG();
- list_move(&page->lru, &zone->inactive_list);
- pgmoved++;
- if (!pagevec_add(&pvec, page)) {
- zone->nr_inactive += pgmoved;
- spin_unlock_irq(&zone->lru_lock);
- pgdeactivate += pgmoved;
- pgmoved = 0;
- if (buffer_heads_over_limit)
- pagevec_strip(&pvec);
- __pagevec_release(&pvec);
- spin_lock_irq(&zone->lru_lock);
- }
- }
- zone->nr_inactive += pgmoved;
- pgdeactivate += pgmoved;
- if (buffer_heads_over_limit) {
- spin_unlock_irq(&zone->lru_lock);
- pagevec_strip(&pvec);
- spin_lock_irq(&zone->lru_lock);
- }
-
- pgmoved = 0;
- while (!list_empty(&l_active)) {
- page = lru_to_page(&l_active);
- prefetchw_prev_lru_page(page, &l_active, flags);
- if (TestSetPageLRU(page))
- BUG();
- BUG_ON(!PageActive(page));
- list_move(&page->lru, &zone->active_list);
- pgmoved++;
- if (!pagevec_add(&pvec, page)) {
- zone->nr_active += pgmoved;
- pgmoved = 0;
- spin_unlock_irq(&zone->lru_lock);
- __pagevec_release(&pvec);
- spin_lock_irq(&zone->lru_lock);
- }
- }
- zone->nr_active += pgmoved;
- spin_unlock_irq(&zone->lru_lock);
- pagevec_release(&pvec);
-
- mod_page_state_zone(zone, pgrefill, pgscanned);
- mod_page_state(pgdeactivate, pgdeactivate);
-}
-
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
next prev parent reply other threads:[~2005-12-30 22:43 UTC|newest]
Thread overview: 66+ messages / expand[flat|nested] mbox.gz Atom feed top
2005-12-30 22:40 [PATCH] vm: page-replace and clockpro Peter Zijlstra
2005-12-30 22:40 ` [PATCH 01/14] page-replace-single-batch-insert.patch Peter Zijlstra
2005-12-31 7:03 ` Marcelo Tosatti
2005-12-31 9:43 ` Peter Zijlstra
2005-12-31 14:44 ` Rik van Riel
2005-12-31 22:19 ` Marcelo Tosatti
2005-12-30 22:40 ` [PATCH 02/14] page-replace-try_pageout.patch Peter Zijlstra
2005-12-30 22:40 ` [PATCH 03/14] page-replace-remove-sc-from-refill.patch Peter Zijlstra
2005-12-30 22:40 ` [PATCH 04/14] page-replace-activate_page.patch Peter Zijlstra
2005-12-30 22:41 ` [PATCH 05/14] page-replace-remove-loop.patch Peter Zijlstra
2005-12-30 22:41 ` [PATCH 06/14] page-replace-move-macros.patch Peter Zijlstra
2005-12-30 22:41 ` [PATCH 07/14] page-replace-move-isolate_lru_pages.patch Peter Zijlstra
2005-12-30 22:41 ` [PATCH 08/14] page-replace-candidates.patch Peter Zijlstra
2005-12-30 22:41 ` [PATCH 09/14] page-replace-reinsert.patch Peter Zijlstra
2005-12-30 22:41 ` [PATCH 10/14] page-replace-remove-mm_inline.patch Peter Zijlstra
2005-12-30 22:42 ` [PATCH 11/14] page-replace-move-refill.patch Peter Zijlstra
2005-12-30 22:42 ` [PATCH 12/14] page-replace-rotate.patch Peter Zijlstra
2005-12-30 22:42 ` [PATCH 13/14] page-replace-init.patch Peter Zijlstra
2005-12-30 22:42 ` [PATCH 14/14] page-replace-kswapd-incmin.patch Peter Zijlstra
2005-12-31 1:15 ` Marcelo Tosatti
2005-12-31 9:40 ` Peter Zijlstra
2005-12-30 22:42 ` [PATCH 1/9] clockpro-nonresident.patch Peter Zijlstra
2005-12-31 1:13 ` Marcelo Tosatti
2005-12-31 9:54 ` Peter Zijlstra
2005-12-31 14:53 ` Rik van Riel
2005-12-31 22:20 ` Marcelo Tosatti
2005-12-30 22:42 ` [PATCH 2/9] clockpro-nonresident-del.patch Peter Zijlstra
2005-12-30 22:43 ` [PATCH 3/9] clockpro-PG_test.patch Peter Zijlstra
2005-12-30 22:43 ` [PATCH 4/9] clockpro-use-once.patch Peter Zijlstra
2005-12-30 22:43 ` [PATCH 5/9] clockpro-ignore_token.patch Peter Zijlstra
2005-12-30 22:43 ` [PATCH 6/9] clockpro-clockpro.patch Peter Zijlstra
2005-12-31 0:24 ` Marcelo Tosatti
2005-12-31 1:22 ` Rik van Riel
2005-12-31 3:27 ` Marcelo Tosatti
2005-12-31 5:24 ` Rik van Riel
2005-12-31 10:57 ` Peter Zijlstra
2005-12-31 10:48 ` Peter Zijlstra
2005-12-31 22:12 ` Marcelo Tosatti
2006-01-03 19:30 ` Christoph Lameter
2005-12-31 11:29 ` Peter Zijlstra
2006-01-05 9:47 ` IWAMOTO Toshihiro
2006-01-05 13:32 ` Rik van Riel
2006-01-06 9:01 ` IWAMOTO Toshihiro
2006-01-24 6:30 ` IWAMOTO Toshihiro
2006-01-24 7:25 ` IWAMOTO Toshihiro
2006-01-25 8:00 ` Peter Zijlstra
2006-02-03 9:25 ` Peter Zijlstra
2006-02-06 9:30 ` IWAMOTO Toshihiro
2006-02-06 10:07 ` Peter Zijlstra
2006-02-08 10:05 ` IWAMOTO Toshihiro
2006-02-08 20:00 ` Peter Zijlstra
2006-02-09 6:57 ` Peter Zijlstra
2006-02-09 7:22 ` IWAMOTO Toshihiro
2006-02-09 10:07 ` IWAMOTO Toshihiro
2006-02-09 15:23 ` Rik van Riel
2006-02-08 9:53 ` IWAMOTO Toshihiro
2005-12-31 22:40 ` Marcelo Tosatti
2006-01-01 10:37 ` Peter Zijlstra
2006-01-03 12:21 ` Marcelo Tosatti
2006-02-14 7:29 ` IWAMOTO Toshihiro
2006-02-15 6:35 ` Peter Zijlstra
2006-02-16 6:25 ` IWAMOTO Toshihiro
2005-12-30 22:43 ` Peter Zijlstra [this message]
2005-12-30 22:43 ` [PATCH 8/9] clockpro-rename_PG_active.patch Peter Zijlstra
2005-12-30 22:44 ` [PATCH 9/9] clockpro-clockpro-stats.patch Peter Zijlstra
2005-12-31 18:59 ` [PATCH 10/9] clockpro-document.patch Peter Zijlstra
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20051230224322.765.51438.sendpatchset@twins.localnet \
--to=a.p.zijlstra@chello.nl \
--cc=akpm@osdl.org \
--cc=christoph@lameter.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=marcelo.tosatti@cyclades.com \
--cc=marijn@bitpit.net \
--cc=npiggin@suse.de \
--cc=riel@redhat.com \
--cc=wfg@mail.ustc.edu.cn \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox