From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mail137.messagelabs.com (mail137.messagelabs.com [216.82.249.19]) by kanga.kvack.org (Postfix) with ESMTP id 6AC559000C2 for ; Mon, 4 Jul 2011 10:05:42 -0400 (EDT) Received: by mail-iw0-f169.google.com with SMTP id 8so6166032iwn.14 for ; Mon, 04 Jul 2011 07:05:41 -0700 (PDT) From: Minchan Kim Subject: [PATCH v4 07/10] compaction: make compaction use in-order putback Date: Mon, 4 Jul 2011 23:04:40 +0900 Message-Id: <97da258e9d0cb9a1a4f7745fe9b84962b3ce8bdf.1309787991.git.minchan.kim@gmail.com> In-Reply-To: References: In-Reply-To: References: Sender: owner-linux-mm@kvack.org List-ID: To: Andrew Morton Cc: linux-mm , LKML , Johannes Weiner , KAMEZAWA Hiroyuki , KOSAKI Motohiro , Mel Gorman , Rik van Riel , Michal Hocko , Andrea Arcangeli , Minchan Kim Compaction is good solution to get contiguous page but it makes LRU churing which is not good. Moreover, LRU order is important when VM has memory pressure to select right victim pages. This patch makes that compaction code use inorder putback so after compaction completion, migrated pages are keeping LRU ordering. Cc: Johannes Weiner Cc: KAMEZAWA Hiroyuki Cc: KOSAKI Motohiro Cc: Mel Gorman Cc: Rik van Riel Cc: Andrea Arcangeli Signed-off-by: Minchan Kim --- mm/compaction.c | 25 +++++++++++++------------ 1 files changed, 13 insertions(+), 12 deletions(-) diff --git a/mm/compaction.c b/mm/compaction.c index a0e4202..7bc784a 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -28,7 +28,7 @@ */ struct compact_control { struct list_head freepages; /* List of free pages to migrate to */ - struct list_head migratepages; /* List of pages being migrated */ + struct inorder_lru migratepages;/* List of pages being migrated */ unsigned long nr_freepages; /* Number of isolated free pages */ unsigned long nr_migratepages; /* Number of pages to migrate */ unsigned long free_pfn; /* isolate_freepages search base */ @@ -221,7 +221,7 @@ static void acct_isolated(struct zone *zone, struct compact_control *cc) struct page *page; unsigned int count[2] = { 0, }; - list_for_each_entry(page, &cc->migratepages, lru) + list_for_each_ilru_entry(page, &cc->migratepages, ilru) count[!!page_is_file_cache(page)]++; __mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]); @@ -260,7 +260,7 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone, unsigned long low_pfn, end_pfn; unsigned long last_pageblock_nr = 0, pageblock_nr; unsigned long nr_scanned = 0, nr_isolated = 0; - struct list_head *migratelist = &cc->migratepages; + struct inorder_lru *migratelist = &cc->migratepages; isolate_mode_t mode = ISOLATE_ACTIVE|ISOLATE_INACTIVE; /* Do not scan outside zone boundaries */ @@ -295,7 +295,7 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone, cond_resched(); spin_lock_irq(&zone->lru_lock); for (; low_pfn < end_pfn; low_pfn++) { - struct page *page; + struct page *page, *prev_page; bool locked = true; /* give a chance to irqs before checking need_resched() */ @@ -353,14 +353,14 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone, mode |= ISOLATE_CLEAN; /* Try isolate the page */ - if (__isolate_lru_page(page, mode, 0) != 0) + if (isolate_ilru_page(page, mode, 0, &prev_page) != 0) continue; VM_BUG_ON(PageTransCompound(page)); /* Successfully isolated */ del_page_from_lru_list(zone, page, page_lru(page)); - list_add(&page->lru, migratelist); + ilru_list_add(page, prev_page, migratelist); cc->nr_migratepages++; nr_isolated++; @@ -416,7 +416,7 @@ static void update_nr_listpages(struct compact_control *cc) int nr_freepages = 0; struct page *page; - list_for_each_entry(page, &cc->migratepages, lru) + list_for_each_ilru_entry(page, &cc->migratepages, ilru) nr_migratepages++; list_for_each_entry(page, &cc->freepages, lru) nr_freepages++; @@ -553,7 +553,8 @@ static int compact_zone(struct zone *zone, struct compact_control *cc) } nr_migrate = cc->nr_migratepages; - err = migrate_pages(&cc->migratepages, compaction_alloc, + err = migrate_ilru_pages(&cc->migratepages, + compaction_alloc, (unsigned long)cc, false, cc->sync); update_nr_listpages(cc); @@ -568,7 +569,7 @@ static int compact_zone(struct zone *zone, struct compact_control *cc) /* Release LRU pages not migrated */ if (err) { - putback_lru_pages(&cc->migratepages); + putback_ilru_pages(&cc->migratepages); cc->nr_migratepages = 0; } @@ -595,7 +596,7 @@ unsigned long compact_zone_order(struct zone *zone, .sync = sync, }; INIT_LIST_HEAD(&cc.freepages); - INIT_LIST_HEAD(&cc.migratepages); + INIT_ILRU_LIST(&cc.migratepages); return compact_zone(zone, &cc); } @@ -677,12 +678,12 @@ static int compact_node(int nid) cc.zone = zone; INIT_LIST_HEAD(&cc.freepages); - INIT_LIST_HEAD(&cc.migratepages); + INIT_ILRU_LIST(&cc.migratepages); compact_zone(zone, &cc); VM_BUG_ON(!list_empty(&cc.freepages)); - VM_BUG_ON(!list_empty(&cc.migratepages)); + VM_BUG_ON(!ilru_list_empty(&cc.migratepages)); } return 0; -- 1.7.4.1 -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@kvack.org. For more info on Linux MM, see: http://www.linux-mm.org/ . Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/ Don't email: email@kvack.org