Use the actual number of pages difference when trying to keep the inactive list 1/2 the size of the active list (1/3 the size of all pages) instead of a meaningless ratio. linux-2.6-npiggin/mm/vmscan.c | 37 +++++++++++++++++++------------------ 1 files changed, 19 insertions(+), 18 deletions(-) diff -puN mm/vmscan.c~vm-fix-shrink-zone mm/vmscan.c --- linux-2.6/mm/vmscan.c~vm-fix-shrink-zone 2004-01-22 14:47:25.000000000 +1100 +++ linux-2.6-npiggin/mm/vmscan.c 2004-01-22 16:25:04.000000000 +1100 @@ -745,38 +745,39 @@ static int shrink_zone(struct zone *zone, int max_scan, unsigned int gfp_mask, const int nr_pages, int *nr_mapped, struct page_state *ps, int priority) { - unsigned long ratio; + unsigned long imbalance; + unsigned long nr_refill_inact; /* * Try to keep the active list 2/3 of the size of the cache. And * make sure that refill_inactive is given a decent number of pages. * - * The "ratio+1" here is important. With pagecache-intensive workloads - * the inactive list is huge, and `ratio' evaluates to zero all the - * time. Which pins the active list memory. So we add one to `ratio' - * just to make sure that the kernel will slowly sift through the - * active list. + * Keeping imbalance > 0 is important. With pagecache-intensive loads + * the inactive list is huge, and imbalance evaluates to zero all the + * time which would pin the active list memory. */ - ratio = (unsigned long)nr_pages * zone->nr_active / - ((zone->nr_inactive | 1) * 2); - atomic_add(ratio+1, &zone->refill_counter); - if (atomic_read(&zone->refill_counter) > SWAP_CLUSTER_MAX) { - int count; - + imbalance = zone->nr_active - (zone->nr_inactive*2); + if (imbalance <= 0) + imbalance = 1; + else { /* * Don't try to bring down too many pages in one attempt. * If this fails, the caller will increase `priority' and * we'll try again, with an increased chance of reclaiming * mapped memory. */ - count = atomic_read(&zone->refill_counter); - if (count > SWAP_CLUSTER_MAX * 4) - count = SWAP_CLUSTER_MAX * 4; + + imbalance >>= priority; + } + + atomic_add(imbalance, &zone->refill_counter); + nr_refill_inact = atomic_read(&zone->refill_counter); + if (nr_refill_inact > SWAP_CLUSTER_MAX) { atomic_set(&zone->refill_counter, 0); - refill_inactive_zone(zone, count, ps, priority); + refill_inactive_zone(zone, nr_refill_inact, ps, priority); } - return shrink_cache(nr_pages, zone, gfp_mask, - max_scan, nr_mapped); + + return shrink_cache(nr_pages, zone, gfp_mask, max_scan, nr_mapped); } /* _