--- linux-2.6-npiggin/include/linux/mm_inline.h | 1 linux-2.6-npiggin/include/linux/page-flags.h | 8 +++ linux-2.6-npiggin/mm/swap.c | 2 linux-2.6-npiggin/mm/vmscan.c | 62 ++++++++++++++++----------- mm/page_alloc.c | 0 5 files changed, 49 insertions(+), 24 deletions(-) diff -puN mm/vmscan.c~vm-page-skipped mm/vmscan.c --- linux-2.6/mm/vmscan.c~vm-page-skipped 2005-02-09 20:48:24.000000000 +1100 +++ linux-2.6-npiggin/mm/vmscan.c 2005-02-11 20:56:44.000000000 +1100 @@ -361,8 +361,20 @@ static int shrink_list(struct list_head if (PageWriteback(page)) goto keep_locked; - if (page_referenced(page, 1, sc->priority <= 0)) - goto activate_locked; + if (page_referenced(page, 1, sc->priority <= 0)) { + /* + * Has been referenced. Activate used twice or + * mapped pages, otherwise give it another chance + * on the inactive list + */ + if (TestSetPageUsedOnce(page) || mapped) + goto activate_locked; + if (page_test_clear_pte_dirty(page, 1)) + set_page_dirty(page); + if (PageDirty(page)) + sc->nr_dirty_inactive++; + goto keep_locked; + } #ifdef CONFIG_SWAP /* @@ -581,9 +593,10 @@ static void shrink_cache(struct zone *zo if (TestSetPageLRU(page)) BUG(); list_del(&page->lru); - if (PageActive(page)) + if (PageActive(page)) { + ClearPageUsedOnce(page); add_page_to_active_list(zone, page); - else + } else add_page_to_inactive_list(zone, page); if (!pagevec_add(&pvec, page)) { spin_unlock_irq(&zone->lru_lock); @@ -617,7 +630,7 @@ done: static void refill_inactive_zone(struct zone *zone, struct scan_control *sc) { - int pgmoved, pgmoved_dirty; + int pgmoved; int pgdeactivate = 0; int pgscanned = 0; int nr_pages = sc->nr_to_scan; @@ -633,7 +646,6 @@ refill_inactive_zone(struct zone *zone, lru_add_drain(); pgmoved = 0; - pgmoved_dirty = 0; spin_lock_irq(&zone->lru_lock); while (pgscanned < nr_pages && !list_empty(&zone->active_list)) { @@ -717,24 +729,6 @@ refill_inactive_zone(struct zone *zone, list_add(&page->lru, &l_inactive); } - /* - * Try to write back as many pages as the number of dirty ones - * we're adding to the inactive list. This tends to cause slow - * streaming writers to write data to the disk smoothly, at the - * dirtying rate, which is nice. But that's undesirable in - * laptop mode, where we *want* lumpy writeout. So in laptop - * mode, write out the whole world. - */ - zone->nr_dirty_inactive += pgmoved_dirty; - pgmoved_dirty = zone->nr_dirty_inactive; - if (pgmoved_dirty > zone->nr_inactive / 2 - || (!(laptop_mode && !sc->may_writepage) - && pgmoved_dirty > SWAP_CLUSTER_MAX)) { - zone->nr_dirty_inactive = 0; - wakeup_bdflush(laptop_mode ? 0 : pgmoved_dirty*2); - sc->may_writepage = 1; - } - pagevec_init(&pvec, 1); pgmoved = 0; spin_lock_irq(&zone->lru_lock); @@ -799,6 +793,7 @@ shrink_zone(struct zone *zone, struct sc { unsigned long nr_active; unsigned long nr_inactive; + unsigned long count; /* * Add one to `nr_to_scan' just to make sure that the kernel will @@ -819,6 +814,7 @@ shrink_zone(struct zone *zone, struct sc nr_inactive = 0; sc->nr_to_reclaim = SWAP_CLUSTER_MAX; + sc->nr_dirty_inactive = 0; while (nr_active || nr_inactive) { if (nr_active) { @@ -837,6 +833,24 @@ shrink_zone(struct zone *zone, struct sc break; } } + + /* + * Try to write back as many pages as the number of dirty ones + * we're adding to the inactive list. This tends to cause slow + * streaming writers to write data to the disk smoothly, at the + * dirtying rate, which is nice. But that's undesirable in + * laptop mode, where we *want* lumpy writeout. So in laptop + * mode, write out the whole world. + */ + zone->nr_dirty_inactive += sc->nr_dirty_inactive; + count = zone->nr_dirty_inactive; + if (count > zone->nr_inactive / 2 + || (!(laptop_mode && !sc->may_writepage) + && count > SWAP_CLUSTER_MAX)) { + zone->nr_dirty_inactive = 0; + wakeup_bdflush(laptop_mode ? 0 : count*2); + sc->may_writepage = 1; + } } /* diff -puN include/linux/page-flags.h~vm-page-skipped include/linux/page-flags.h --- linux-2.6/include/linux/page-flags.h~vm-page-skipped 2005-02-09 20:48:24.000000000 +1100 +++ linux-2.6-npiggin/include/linux/page-flags.h 2005-02-11 20:56:43.000000000 +1100 @@ -76,6 +76,8 @@ #define PG_reclaim 18 /* To be reclaimed asap */ #define PG_nosave_free 19 /* Free, should not be written */ +#define PG_usedonce 20 /* LRU page has been touched once */ + /* * Global page accounting. One instance per CPU. Only unsigned longs are @@ -293,6 +295,12 @@ extern void __mod_page_state(unsigned of #define SetPageCompound(page) set_bit(PG_compound, &(page)->flags) #define ClearPageCompound(page) clear_bit(PG_compound, &(page)->flags) +#define PageUsedOnce(page) test_bit(PG_usedonce, &(page)->flags) +#define SetPageUsedOnce(page) set_bit(PG_usedonce, &(page)->flags) +#define TestSetPageUsedOnce(page) test_and_set_bit(PG_usedonce, &(page)->flags) +#define ClearPageUsedOnce(page) clear_bit(PG_usedonce, &(page)->flags) +#define TestClearPageUsedOnce(page) test_and_clear_bit(PG_usedonce, &(page)->flags) + #ifdef CONFIG_SWAP #define PageSwapCache(page) test_bit(PG_swapcache, &(page)->flags) #define SetPageSwapCache(page) set_bit(PG_swapcache, &(page)->flags) diff -puN mm/truncate.c~vm-page-skipped mm/truncate.c diff -puN mm/swap.c~vm-page-skipped mm/swap.c --- linux-2.6/mm/swap.c~vm-page-skipped 2005-02-09 20:48:24.000000000 +1100 +++ linux-2.6-npiggin/mm/swap.c 2005-02-11 20:56:43.000000000 +1100 @@ -267,6 +267,7 @@ void __pagevec_lru_add(struct pagevec *p } if (TestSetPageLRU(page)) BUG(); + ClearPageUsedOnce(page); add_page_to_inactive_list(zone, page); } if (zone) @@ -296,6 +297,7 @@ void __pagevec_lru_add_active(struct pag BUG(); if (TestSetPageActive(page)) BUG(); + ClearPageUsedOnce(page); add_page_to_active_list(zone, page); } if (zone) diff -puN include/linux/swap.h~vm-page-skipped include/linux/swap.h diff -puN include/linux/mm_inline.h~vm-page-skipped include/linux/mm_inline.h --- linux-2.6/include/linux/mm_inline.h~vm-page-skipped 2005-02-09 20:48:24.000000000 +1100 +++ linux-2.6-npiggin/include/linux/mm_inline.h 2005-02-11 20:56:43.000000000 +1100 @@ -35,6 +35,7 @@ del_page_from_lru(struct zone *zone, str ClearPageActive(page); zone->nr_active--; } else { + ClearPageUsedOnce(page); zone->nr_inactive--; } } diff -puN mm/page_alloc.c~vm-page-skipped mm/page_alloc.c _