* [patch 1/2] shrink_all_memory(): use sc.nr_reclaimed
2009-02-12 11:36 [patch 0/2] vmscan: one cleanup, one bugfix Johannes Weiner
@ 2009-02-12 11:36 ` Johannes Weiner
2009-02-12 11:36 ` [patch 2/2] vmscan: clip swap_cluster_max in shrink_all_memory() Johannes Weiner
1 sibling, 0 replies; 3+ messages in thread
From: Johannes Weiner @ 2009-02-12 11:36 UTC (permalink / raw)
To: Andrew Morton
Cc: KOSAKI Motohiro, MinChan Kim, Rafael J. Wysocki, Rik van Riel,
linux-mm, linux-kernel
[-- Attachment #1: shrink_all_memory-use-sc.nr_reclaimed.patch --]
[-- Type: text/plain, Size: 4254 bytes --]
Impact: cleanup
Commit a79311c14eae4bb946a97af25f3e1b17d625985d "vmscan: bail out of
direct reclaim after swap_cluster_max pages" moved the nr_reclaimed
counter into the scan control to accumulate the number of all
reclaimed pages in a reclaim invocation.
shrink_all_memory() can use the same mechanism. it increase code
consistency.
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Cc: MinChan Kim <minchan.kim@gmail.com>
Cc: "Rafael J. Wysocki" <rjw@sisk.pl>
Cc: Rik van Riel <riel@redhat.com>
---
mm/vmscan.c | 49 +++++++++++++++++++++++--------------------------
1 file changed, 23 insertions(+), 26 deletions(-)
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2048,16 +2048,14 @@ unsigned long global_lru_pages(void)
#ifdef CONFIG_PM
/*
* Helper function for shrink_all_memory(). Tries to reclaim 'nr_pages' pages
- * from LRU lists system-wide, for given pass and priority, and returns the
- * number of reclaimed pages
+ * from LRU lists system-wide, for given pass and priority.
*
* For pass > 3 we also try to shrink the LRU lists that contain a few pages
*/
-static unsigned long shrink_all_zones(unsigned long nr_pages, int prio,
- int pass, struct scan_control *sc)
+static void shrink_all_zones(unsigned long nr_pages, int prio,
+ int pass, struct scan_control *sc)
{
struct zone *zone;
- unsigned long ret = 0;
for_each_zone(zone) {
enum lru_list l;
@@ -2082,14 +2080,13 @@ static unsigned long shrink_all_zones(un
zone->lru[l].nr_scan = 0;
nr_to_scan = min(nr_pages, lru_pages);
- ret += shrink_list(l, nr_to_scan, zone,
- sc, prio);
- if (ret >= nr_pages)
- return ret;
+ sc->nr_reclaimed += shrink_list(l, nr_to_scan,
+ zone, sc, prio);
+ if (sc->nr_reclaimed >= nr_pages)
+ return;
}
}
}
- return ret;
}
/*
@@ -2103,7 +2100,6 @@ static unsigned long shrink_all_zones(un
unsigned long shrink_all_memory(unsigned long nr_pages)
{
unsigned long lru_pages, nr_slab;
- unsigned long ret = 0;
int pass;
struct reclaim_state reclaim_state;
struct scan_control sc = {
@@ -2125,8 +2121,8 @@ unsigned long shrink_all_memory(unsigned
if (!reclaim_state.reclaimed_slab)
break;
- ret += reclaim_state.reclaimed_slab;
- if (ret >= nr_pages)
+ sc.nr_reclaimed += reclaim_state.reclaimed_slab;
+ if (sc.nr_reclaimed >= nr_pages)
goto out;
nr_slab -= reclaim_state.reclaimed_slab;
@@ -2148,18 +2144,18 @@ unsigned long shrink_all_memory(unsigned
sc.may_unmap = 1;
for (prio = DEF_PRIORITY; prio >= 0; prio--) {
- unsigned long nr_to_scan = nr_pages - ret;
+ unsigned long nr_to_scan = nr_pages - sc.nr_reclaimed;
sc.nr_scanned = 0;
- ret += shrink_all_zones(nr_to_scan, prio, pass, &sc);
- if (ret >= nr_pages)
+ shrink_all_zones(nr_to_scan, prio, pass, &sc);
+ if (sc.nr_reclaimed >= nr_pages)
goto out;
reclaim_state.reclaimed_slab = 0;
shrink_slab(sc.nr_scanned, sc.gfp_mask,
global_lru_pages());
- ret += reclaim_state.reclaimed_slab;
- if (ret >= nr_pages)
+ sc.nr_reclaimed += reclaim_state.reclaimed_slab;
+ if (sc.nr_reclaimed >= nr_pages)
goto out;
if (sc.nr_scanned && prio < DEF_PRIORITY - 2)
@@ -2167,22 +2163,23 @@ unsigned long shrink_all_memory(unsigned
}
}
- /*
- * If ret = 0, we could not shrink LRUs, but there may be something
- * in slab caches
- */
- if (!ret) {
+ if (!sc.nr_reclaimed) {
+ /*
+ * We could not shrink LRUs, but there may be something
+ * in slab caches.
+ */
do {
reclaim_state.reclaimed_slab = 0;
shrink_slab(nr_pages, sc.gfp_mask, global_lru_pages());
- ret += reclaim_state.reclaimed_slab;
- } while (ret < nr_pages && reclaim_state.reclaimed_slab > 0);
+ sc.nr_reclaimed += reclaim_state.reclaimed_slab;
+ } while (sc.nr_reclaimed < nr_pages &&
+ reclaim_state.reclaimed_slab > 0);
}
out:
current->reclaim_state = NULL;
- return ret;
+ return sc.nr_reclaimed;
}
#endif
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 3+ messages in thread* [patch 2/2] vmscan: clip swap_cluster_max in shrink_all_memory()
2009-02-12 11:36 [patch 0/2] vmscan: one cleanup, one bugfix Johannes Weiner
2009-02-12 11:36 ` [patch 1/2] shrink_all_memory(): use sc.nr_reclaimed Johannes Weiner
@ 2009-02-12 11:36 ` Johannes Weiner
1 sibling, 0 replies; 3+ messages in thread
From: Johannes Weiner @ 2009-02-12 11:36 UTC (permalink / raw)
To: Andrew Morton
Cc: KOSAKI Motohiro, MinChan Kim, Rafael J. Wysocki,
Nigel Cunningham, Rik van Riel, linux-mm, linux-kernel
[-- Attachment #1: vmscan-clip-swap_cluster_max-in-shrink_all_memory.patch --]
[-- Type: text/plain, Size: 2928 bytes --]
shrink_inactive_list() scans in sc->swap_cluster_max chunks until it
hits the scan limit it was passed.
shrink_inactive_list()
{
do {
isolate_pages(swap_cluster_max)
shrink_page_list()
} while (nr_scanned < max_scan);
}
This assumes that swap_cluster_max is not bigger than the scan limit
because the latter is checked only after at least one iteration.
In shrink_all_memory() sc->swap_cluster_max is initialized to the
overall reclaim goal in the beginning but not decreased while reclaim
is making progress which leads to subsequent calls to
shrink_inactive_list() reclaiming way too much in the one iteration
that is done unconditionally.
Set sc->swap_cluster_max always to the proper goal before doing
shrink_all_zones()
shrink_list()
shrink_inactive_list().
While the current shrink_all_memory() happily reclaims more than
actually requested, this patch fixes it to never exceed the goal:
unpatched
wanted=10000 reclaimed=13356
wanted=10000 reclaimed=19711
wanted=10000 reclaimed=10289
wanted=10000 reclaimed=17306
wanted=10000 reclaimed=10700
wanted=10000 reclaimed=10004
wanted=10000 reclaimed=13301
wanted=10000 reclaimed=10976
wanted=10000 reclaimed=10605
wanted=10000 reclaimed=10088
wanted=10000 reclaimed=15000
patched
wanted=10000 reclaimed=10000
wanted=10000 reclaimed=9599
wanted=10000 reclaimed=8476
wanted=10000 reclaimed=8326
wanted=10000 reclaimed=10000
wanted=10000 reclaimed=10000
wanted=10000 reclaimed=9919
wanted=10000 reclaimed=10000
wanted=10000 reclaimed=10000
wanted=10000 reclaimed=10000
wanted=10000 reclaimed=10000
wanted=10000 reclaimed=9624
wanted=10000 reclaimed=10000
wanted=10000 reclaimed=10000
wanted=8500 reclaimed=8092
wanted=316 reclaimed=316
Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: MinChan Kim <minchan.kim@gmail.com>
Cc: "Rafael J. Wysocki" <rjw@sisk.pl>
Cc: Nigel Cunningham <ncunningham-lkml@crca.org.au>
Cc: Rik van Riel <riel@redhat.com>
---
mm/vmscan.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2105,7 +2105,6 @@ unsigned long shrink_all_memory(unsigned
struct scan_control sc = {
.gfp_mask = GFP_KERNEL,
.may_unmap = 0,
- .swap_cluster_max = nr_pages,
.may_writepage = 1,
.isolate_pages = isolate_pages_global,
};
@@ -2147,6 +2146,7 @@ unsigned long shrink_all_memory(unsigned
unsigned long nr_to_scan = nr_pages - sc.nr_reclaimed;
sc.nr_scanned = 0;
+ sc.swap_cluster_max = nr_to_scan;
shrink_all_zones(nr_to_scan, prio, pass, &sc);
if (sc.nr_reclaimed >= nr_pages)
goto out;
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 3+ messages in thread