--- linux/mm/page_alloc.c.orig Mon Oct 16 23:54:03 2000 +++ linux/mm/page_alloc.c Tue Oct 17 01:16:13 2000 @@ -264,7 +264,8 @@ static struct page * __alloc_pages_limit if (z->free_pages + z->inactive_clean_pages >= water_mark) { struct page *page = NULL; /* If possible, reclaim a page directly. */ - if (direct_reclaim && z->free_pages < z->pages_min + 8) + /* Riel: the magical "+ 8" please explain */ + if (direct_reclaim && z->free_pages < water_mark + 8) page = reclaim_page(z); /* If that fails, fall back to rmqueue. */ if (!page) @@ -340,6 +341,8 @@ try_again: if (!z->size) BUG(); + /* Riel: what about using z->pages_min instead of low when + * !direct_reclaim or are they too common? */ if (z->free_pages >= z->pages_low) { page = rmqueue(z, order); if (page) @@ -382,7 +385,7 @@ try_again: * resolve this situation before memory gets tight. * * We also yield the CPU, because that: - * - gives kswapd a chance to do something + * - gives kswapd/kreclaimd/bdflush a chance to do something * - slows down allocations, in particular the * allocations from the fast allocator that's * causing the problems ... @@ -666,14 +669,15 @@ void show_free_areas_core(pg_data_t *pgd nr_free_pages() << (PAGE_SHIFT-10), nr_free_highpages() << (PAGE_SHIFT-10)); - printk("( Active: %d, inactive_dirty: %d, inactive_clean: %d, free: %d (%d %d %d) )\n", + printk("( Active: %d, inactive_dirty: %d, inactive_clean: %d, free: %d (%d %d %d) inactive_target: %d)\n", nr_active_pages, nr_inactive_dirty_pages, nr_inactive_clean_pages(), nr_free_pages(), freepages.min, freepages.low, - freepages.high); + freepages.high, + inactive_target); for (type = 0; type < MAX_NR_ZONES; type++) { struct list_head *head, *curr;