From: Christoph Lameter <clameter@sgi.com>
To: akpm@osdl.org
Cc: Mike Kravetz <kravetz@us.ibm.com>,
linux-kernel@vger.kernel.org, linux-mm@kvack.org,
Christoph Lameter <clameter@sgi.com>,
Magnus Damm <magnus.damm@gmail.com>,
Marcelo Tosatti <marcelo.tosatti@cyclades.com>
Subject: [PATCH 2/4] Swap migration V3: Page Eviction
Date: Thu, 20 Oct 2005 15:59:45 -0700 (PDT) [thread overview]
Message-ID: <20051020225945.19761.15772.sendpatchset@schroedinger.engr.sgi.com> (raw)
In-Reply-To: <20051020225935.19761.57434.sendpatchset@schroedinger.engr.sgi.com>
Page eviction support in vmscan.c
This patch adds functions that allow the eviction of pages to swap space.
Page eviction may be useful to migrate pages, to suspend programs or for
ummapping single pages (useful for faulty pages or pages with soft ECC
failures)
The process is as follows:
The function wanting to evict pages must first build a list of pages to be evicted
and take them off the lru lists. This is done using the isolate_lru_page function.
isolate_lru_page determines that a page is freeable based on the LRU bit set and
adds the page if it is indeed freeable to the list specified.
isolate_lru_page will return 0 for a page that is not freeable.
Then the actual swapout can happen by calling swapout_pages().
swapout_pages does its best to swapout the pages and does multiple passes over the list.
However, swapout_pages may not be able to evict all pages for a variety of reasons.
The remaining pages may be returned to the LRU lists using putback_lru_pages().
V3:
- Extract common code from shrink_list() and swapout_pages()
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Index: linux-2.6.14-rc4-mm1/include/linux/swap.h
===================================================================
--- linux-2.6.14-rc4-mm1.orig/include/linux/swap.h 2005-10-20 13:13:24.000000000 -0700
+++ linux-2.6.14-rc4-mm1/include/linux/swap.h 2005-10-20 13:20:53.000000000 -0700
@@ -179,6 +179,8 @@ extern int vm_swappiness;
extern int isolate_lru_page(struct page *p, struct list_head *l);
extern int putback_lru_pages(struct list_head *l);
+extern int swapout_pages(struct list_head *l);
+
#ifdef CONFIG_MMU
/* linux/mm/shmem.c */
extern int shmem_unuse(swp_entry_t entry, struct page *page);
Index: linux-2.6.14-rc4-mm1/mm/vmscan.c
===================================================================
--- linux-2.6.14-rc4-mm1.orig/mm/vmscan.c 2005-10-20 13:18:05.000000000 -0700
+++ linux-2.6.14-rc4-mm1/mm/vmscan.c 2005-10-20 13:22:33.000000000 -0700
@@ -370,6 +370,42 @@ static pageout_t pageout(struct page *pa
return PAGE_CLEAN;
}
+static inline int remove_mapping(struct address_space *mapping,
+ struct page *page)
+{
+ if (!mapping)
+ return 0; /* truncate got there first */
+
+ write_lock_irq(&mapping->tree_lock);
+
+ /*
+ * The non-racy check for busy page. It is critical to check
+ * PageDirty _after_ making sure that the page is freeable and
+ * not in use by anybody. (pagecache + us == 2)
+ */
+ if (page_count(page) != 2 || PageDirty(page)) {
+ write_unlock_irq(&mapping->tree_lock);
+ return 0;
+ }
+
+#ifdef CONFIG_SWAP
+ if (PageSwapCache(page)) {
+ swp_entry_t swap = { .val = page->private };
+ add_to_swapped_list(swap.val);
+ __delete_from_swap_cache(page);
+ write_unlock_irq(&mapping->tree_lock);
+ swap_free(swap);
+ __put_page(page); /* The pagecache ref */
+ return 1;
+ }
+#endif /* CONFIG_SWAP */
+
+ __remove_from_page_cache(page);
+ write_unlock_irq(&mapping->tree_lock);
+ __put_page(page);
+ return 1;
+}
+
/*
* shrink_list adds the number of reclaimed pages to sc->nr_reclaimed
*/
@@ -508,36 +544,8 @@ static int shrink_list(struct list_head
goto free_it;
}
- if (!mapping)
- goto keep_locked; /* truncate got there first */
-
- write_lock_irq(&mapping->tree_lock);
-
- /*
- * The non-racy check for busy page. It is critical to check
- * PageDirty _after_ making sure that the page is freeable and
- * not in use by anybody. (pagecache + us == 2)
- */
- if (page_count(page) != 2 || PageDirty(page)) {
- write_unlock_irq(&mapping->tree_lock);
+ if (!remove_mapping(mapping, page))
goto keep_locked;
- }
-
-#ifdef CONFIG_SWAP
- if (PageSwapCache(page)) {
- swp_entry_t swap = { .val = page->private };
- add_to_swapped_list(swap.val);
- __delete_from_swap_cache(page);
- write_unlock_irq(&mapping->tree_lock);
- swap_free(swap);
- __put_page(page); /* The pagecache ref */
- goto free_it;
- }
-#endif /* CONFIG_SWAP */
-
- __remove_from_page_cache(page);
- write_unlock_irq(&mapping->tree_lock);
- __put_page(page);
free_it:
unlock_page(page);
@@ -564,6 +572,122 @@ keep:
}
/*
+ * Swapout evicts the pages on the list to swap space.
+ * This is essentially a dumbed down version of shrink_list
+ *
+ * returns the number of pages that were not evictable
+ *
+ * Multiple passes are performed over the list. The first
+ * pass avoids waiting on locks and triggers writeout
+ * actions. Later passes begin to wait on locks in order
+ * to have a better chance of acquiring the lock.
+ */
+int swapout_pages(struct list_head *l)
+{
+ int retry;
+ int failed;
+ int pass = 0;
+ struct page *page;
+ struct page *page2;
+
+ current->flags |= PF_KSWAPD;
+
+redo:
+ retry = 0;
+ failed = 0;
+
+ list_for_each_entry_safe(page, page2, l, lru) {
+ struct address_space *mapping;
+
+ cond_resched();
+
+ /*
+ * Skip locked pages during the first two passes to give the
+ * functions holding the lock time to release the page. Later we use
+ * lock_page to have a higher chance of acquiring the lock.
+ */
+ if (pass > 2)
+ lock_page(page);
+ else
+ if (TestSetPageLocked(page))
+ goto retry_later;
+
+ /*
+ * Only wait on writeback if we have already done a pass where
+ * we we may have triggered writeouts for lots of pages.
+ */
+ if (pass > 0)
+ wait_on_page_writeback(page);
+ else
+ if (PageWriteback(page))
+ goto retry_later_locked;
+
+#ifdef CONFIG_SWAP
+ if (PageAnon(page) && !PageSwapCache(page)) {
+ if (!add_to_swap(page))
+ goto failed;
+ }
+#endif /* CONFIG_SWAP */
+
+ mapping = page_mapping(page);
+ if (page_mapped(page) && mapping)
+ if (try_to_unmap(page) != SWAP_SUCCESS)
+ goto retry_later_locked;
+
+ if (PageDirty(page)) {
+ /* Page is dirty, try to write it out here */
+ switch(pageout(page, mapping)) {
+ case PAGE_KEEP:
+ case PAGE_ACTIVATE:
+ goto retry_later_locked;
+ case PAGE_SUCCESS:
+ goto retry_later;
+ case PAGE_CLEAN:
+ ; /* try to free the page below */
+ }
+ }
+
+ if (PagePrivate(page)) {
+ if (!try_to_release_page(page, GFP_KERNEL))
+ goto retry_later_locked;
+ if (!mapping && page_count(page) == 1)
+ goto free_it;
+ }
+
+ if (!remove_mapping(mapping, page))
+ goto retry_later_locked; /* truncate got there first */
+
+free_it:
+ /*
+ * We may free pages that were taken off the active list
+ * by isolate_lru_page. However, free_hot_cold_page will check
+ * if the active bit is set. So clear it.
+ */
+ ClearPageActive(page);
+
+ list_del(&page->lru);
+ unlock_page(page);
+ put_page(page);
+ continue;
+
+failed:
+ failed++;
+ unlock_page(page);
+ continue;
+
+retry_later_locked:
+ unlock_page(page);
+retry_later:
+ retry++;
+ }
+ if (retry && pass++ < 10)
+ goto redo;
+
+ current->flags &= ~PF_KSWAPD;
+ return failed + retry;
+}
+
+/*
* zone->lru_lock is heavily contended. Some of the functions that
* shrink the lists perform better by taking out a batch of pages
* and working on them outside the LRU lock.
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
next prev parent reply other threads:[~2005-10-20 22:59 UTC|newest]
Thread overview: 55+ messages / expand[flat|nested] mbox.gz Atom feed top
2005-10-20 22:59 [PATCH 0/4] Swap migration V3: Overview Christoph Lameter
2005-10-20 22:59 ` [PATCH 1/4] Swap migration V3: LRU operations Christoph Lameter
2005-10-21 6:06 ` Dave Hansen
2005-10-21 6:27 ` Magnus Damm
2005-10-21 6:56 ` Dave Hansen
2005-10-21 7:25 ` Magnus Damm
2005-10-21 15:42 ` Christoph Lameter
2005-10-21 11:49 ` Nikita Danilov
2005-10-20 22:59 ` Christoph Lameter [this message]
2005-10-22 1:06 ` [PATCH 2/4] Swap migration V3: Page Eviction Marcelo Tosatti
2005-10-20 22:59 ` [PATCH 3/4] Swap migration V3: MPOL_MF_MOVE interface Christoph Lameter
2005-10-20 22:59 ` [PATCH 4/4] Swap migration V3: sys_migrate_pages interface Christoph Lameter
2005-10-21 2:55 ` KAMEZAWA Hiroyuki
2005-10-21 7:07 ` Simon Derr
2005-10-21 7:20 ` KAMEZAWA Hiroyuki
2005-10-21 7:39 ` Simon Derr
2005-10-21 7:46 ` KAMEZAWA Hiroyuki
2005-10-21 15:22 ` Paul Jackson
2005-10-21 15:15 ` Paul Jackson
2005-10-21 15:21 ` Kamezawa Hiroyuki
2005-10-21 18:10 ` Paul Jackson
2005-10-21 18:26 ` Christoph Lameter
2005-10-21 18:57 ` Paul Jackson
2005-10-21 15:47 ` Christoph Lameter
2005-10-21 16:18 ` Ray Bryant
2005-10-21 16:33 ` Christoph Lameter
2005-10-21 15:18 ` Paul Jackson
2005-10-21 16:27 ` Christoph Lameter
2005-10-21 16:59 ` Kamezawa Hiroyuki
2005-10-21 17:03 ` Paul Jackson
2005-10-21 17:06 ` Christoph Lameter
2005-10-21 18:17 ` Paul Jackson
2005-10-20 23:06 ` [PATCH 0/4] Swap migration V3: Overview Andrew Morton
2005-10-20 23:46 ` mike kravetz
2005-10-21 3:22 ` KAMEZAWA Hiroyuki
2005-10-21 3:32 ` mike kravetz
2005-10-21 3:56 ` KAMEZAWA Hiroyuki
2005-10-21 4:22 ` mike kravetz
2005-10-21 5:13 ` KAMEZAWA Hiroyuki
2005-10-21 15:28 ` Paul Jackson
2005-10-21 16:00 ` mike kravetz
2005-10-21 5:59 ` KAMEZAWA Hiroyuki
2005-10-22 1:16 ` Marcelo Tosatti
2005-10-21 15:54 ` Christoph Lameter
2005-10-21 1:57 ` Magnus Damm
2005-10-22 0:50 ` Marcelo Tosatti
2005-10-23 12:50 ` Magnus Damm
2005-10-24 7:44 ` Marcelo Tosatti
2005-10-25 11:37 ` Magnus Damm
2005-10-25 14:37 ` Marcelo Tosatti
2005-10-26 7:04 ` Magnus Damm
2005-10-27 15:01 ` Marcelo Tosatti
2005-10-27 20:43 ` Andrew Morton
2005-10-27 21:35 ` Marcelo Tosatti
2005-10-28 3:07 ` Andrew Morton
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20051020225945.19761.15772.sendpatchset@schroedinger.engr.sgi.com \
--to=clameter@sgi.com \
--cc=akpm@osdl.org \
--cc=kravetz@us.ibm.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=magnus.damm@gmail.com \
--cc=marcelo.tosatti@cyclades.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox