Changes mark_page_accessed to only set the PageAccessed bit, and not move pages around the LRUs. This means we don't have to take the lru_lock, and it also makes page ageing and scanning more consistient and all handled in mm/vmscan.c --- linux-2.6-npiggin/include/linux/swap.h | 8 +++++-- linux-2.6-npiggin/mm/filemap.c | 16 ++------------ linux-2.6-npiggin/mm/shmem.c | 6 ----- linux-2.6-npiggin/mm/swap.c | 36 --------------------------------- linux-2.6-npiggin/mm/swapfile.c | 6 ++--- 5 files changed, 13 insertions(+), 59 deletions(-) diff -puN mm/filemap.c~vm-mark_page_accessed mm/filemap.c --- linux-2.6/mm/filemap.c~vm-mark_page_accessed 2005-02-09 20:47:41.000000000 +1100 +++ linux-2.6-npiggin/mm/filemap.c 2005-02-09 21:02:53.000000000 +1100 @@ -438,10 +438,8 @@ EXPORT_SYMBOL(unlock_page); */ void end_page_writeback(struct page *page) { - if (!TestClearPageReclaim(page) || rotate_reclaimable_page(page)) { - if (!test_clear_page_writeback(page)) - BUG(); - } + if (!test_clear_page_writeback(page)) + BUG(); smp_mb__after_clear_bit(); wake_up_page(page, PG_writeback); } @@ -693,7 +691,6 @@ void do_generic_mapping_read(struct addr unsigned long offset; unsigned long req_size; unsigned long next_index; - unsigned long prev_index; loff_t isize; struct page *cached_page; int error; @@ -702,7 +699,6 @@ void do_generic_mapping_read(struct addr cached_page = NULL; index = *ppos >> PAGE_CACHE_SHIFT; next_index = index; - prev_index = ra.prev_page; req_size = (desc->count + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; offset = *ppos & ~PAGE_CACHE_MASK; @@ -752,13 +748,7 @@ page_ok: if (mapping_writably_mapped(mapping)) flush_dcache_page(page); - /* - * When (part of) the same page is read multiple times - * in succession, only mark it as accessed the first time. - */ - if (prev_index != index) - mark_page_accessed(page); - prev_index = index; + mark_page_accessed(page); /* * Ok, we have the page, and it's up-to-date, so diff -puN mm/swap.c~vm-mark_page_accessed mm/swap.c --- linux-2.6/mm/swap.c~vm-mark_page_accessed 2005-02-09 20:47:41.000000000 +1100 +++ linux-2.6-npiggin/mm/swap.c 2005-02-11 20:56:44.000000000 +1100 @@ -96,42 +96,6 @@ int rotate_reclaimable_page(struct page return 0; } -/* - * FIXME: speed this up? - */ -void fastcall activate_page(struct page *page) -{ - struct zone *zone = page_zone(page); - - spin_lock_irq(&zone->lru_lock); - if (PageLRU(page) && !PageActive(page)) { - del_page_from_inactive_list(zone, page); - SetPageActive(page); - add_page_to_active_list(zone, page); - inc_page_state(pgactivate); - } - spin_unlock_irq(&zone->lru_lock); -} - -/* - * Mark a page as having seen activity. - * - * inactive,unreferenced -> inactive,referenced - * inactive,referenced -> active,unreferenced - * active,unreferenced -> active,referenced - */ -void fastcall mark_page_accessed(struct page *page) -{ - if (!PageActive(page) && PageReferenced(page) && PageLRU(page)) { - activate_page(page); - ClearPageReferenced(page); - } else if (!PageReferenced(page)) { - SetPageReferenced(page); - } -} - -EXPORT_SYMBOL(mark_page_accessed); - /** * lru_cache_add: add a page to the page lists * @page: the page to add diff -puN include/linux/swap.h~vm-mark_page_accessed include/linux/swap.h --- linux-2.6/include/linux/swap.h~vm-mark_page_accessed 2005-02-09 20:47:41.000000000 +1100 +++ linux-2.6-npiggin/include/linux/swap.h 2005-02-11 20:56:43.000000000 +1100 @@ -165,12 +165,16 @@ extern unsigned int nr_free_pagecache_pa /* linux/mm/swap.c */ extern void FASTCALL(lru_cache_add(struct page *)); extern void FASTCALL(lru_cache_add_active(struct page *)); -extern void FASTCALL(activate_page(struct page *)); -extern void FASTCALL(mark_page_accessed(struct page *)); extern void lru_add_drain(void); extern int rotate_reclaimable_page(struct page *page); extern void swap_setup(void); +/* Mark a page as having seen activity. */ +#define mark_page_accessed(page) \ +do { \ + SetPageReferenced(page); \ +} while (0) + /* linux/mm/vmscan.c */ extern int try_to_free_pages(struct zone **, unsigned int, unsigned int); extern int shrink_all_memory(int); diff -puN include/linux/mm_inline.h~vm-mark_page_accessed include/linux/mm_inline.h diff -puN mm/memory.c~vm-mark_page_accessed mm/memory.c diff -puN mm/shmem.c~vm-mark_page_accessed mm/shmem.c --- linux-2.6/mm/shmem.c~vm-mark_page_accessed 2005-02-09 20:47:42.000000000 +1100 +++ linux-2.6-npiggin/mm/shmem.c 2005-02-09 20:47:42.000000000 +1100 @@ -1523,11 +1523,7 @@ static void do_shmem_file_read(struct fi */ if (mapping_writably_mapped(mapping)) flush_dcache_page(page); - /* - * Mark the page accessed if we read the beginning. - */ - if (!offset) - mark_page_accessed(page); + mark_page_accessed(page); } else page = ZERO_PAGE(0); diff -puN include/linux/buffer_head.h~vm-mark_page_accessed include/linux/buffer_head.h diff -puN mm/swapfile.c~vm-mark_page_accessed mm/swapfile.c --- linux-2.6/mm/swapfile.c~vm-mark_page_accessed 2005-02-09 20:47:42.000000000 +1100 +++ linux-2.6-npiggin/mm/swapfile.c 2005-02-09 20:47:42.000000000 +1100 @@ -467,10 +467,10 @@ static unsigned long unuse_pmd(struct vm pte_unmap(pte); /* - * Move the page to the active list so it is not - * immediately swapped out again after swapon. + * Touch the page so it is not immediately swapped + * out again after swapon. */ - activate_page(page); + mark_page_accessed(page); /* add 1 since address may be 0 */ return 1 + address; _