linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
* [PATCH](1/2) rmap14 for ac  (was: Re: 2.5.34-mm4)
@ 2002-09-16  2:47 Rik van Riel
  0 siblings, 0 replies; 2+ messages in thread
From: Rik van Riel @ 2002-09-16  2:47 UTC (permalink / raw)
  To: Alan Cox; +Cc: linux-mm, Kernel Mailing List

On 16 Sep 2002, Alan Cox wrote:

> So send me rmap-14a patches by all means

And here is the patch that takes you from rmap14 to
rmap14a + small bugfixes.  Don't be fooled by the
bitkeeper changelog, there was a one-line (whitespace)
reject I had to fix ;)

please apply,

Rik
-- 
Bravely reimplemented by the knights who say "NIH".
http://www.surriel.com/		http://distro.conectiva.com/
Spamtraps of the month:  september@surriel.com trac@trac.org


# This is a BitKeeper generated patch for the following project:
# Project Name: Linux kernel tree
# This patch format is intended for GNU patch command version 2.5 or higher.
# This patch includes the following deltas:
#	           mm/rmap.c	1.7     -> 1.8
#	include/linux/mmzone.h	1.12    -> 1.13
#	     mm/page_alloc.c	1.51    -> 1.53
#	         mm/vmscan.c	1.72    -> 1.77
#	        mm/filemap.c	1.71    -> 1.72
#	         mm/memory.c	1.55    -> 1.56
#
# The following is the BitKeeper ChangeSet Log
# --------------------------------------------
# 02/08/15	riel@imladris.surriel.com	1.677
# rmap 14 release
# --------------------------------------------
# 02/08/16	riel@imladris.surriel.com	1.678
# low latency zap_page_range also without preempt
# --------------------------------------------
# 02/08/16	riel@imladris.surriel.com	1.679
# remove unneeded pte_chain_unlock/lock pair from vmscan.c
# --------------------------------------------
# 02/08/16	riel@imladris.surriel.com	1.680
# semicolon day, fix typo in rmap.c w/ DEBUG_RMAP
# --------------------------------------------
# 02/08/17	riel@imladris.surriel.com	1.681
# fix smp lock in page_launder_zone (Arjan van de Ven)
# --------------------------------------------
# 02/08/17	riel@imladris.surriel.com	1.682
# Ingo Molnar's per-cpu pages
# --------------------------------------------
# 02/08/18	riel@imladris.surriel.com	1.683
# another (minor) smp fix for page_launder
# --------------------------------------------
# 02/08/18	riel@imladris.surriel.com	1.684
# - throughput tuning for page_launder
# - don't allocate swap space for pages we're not writing
# --------------------------------------------
# 02/08/18	riel@imladris.surriel.com	1.685
# rmap 14a
# --------------------------------------------
# 02/08/19	riel@imladris.surriel.com	1.686
# page_launder bug found by Andrew Morton
# --------------------------------------------
# 02/08/19	riel@imladris.surriel.com	1.687
# clean up mark_page_accessed
# --------------------------------------------
# 02/08/30	riel@imladris.surriel.com	1.688
# Alpha NUMA fix for Ingo's per-cpu pages
# --------------------------------------------
#
--- linux-2.4.20-pre5-ac6-rmap14/mm/filemap.c.orig	2002-09-15 23:39:25.000000000 -0300
+++ linux-2.4.20-pre5-ac6-rmap14/mm/filemap.c	2002-09-15 23:43:42.000000000 -0300
@@ -1383,20 +1383,17 @@
  */
 void mark_page_accessed(struct page *page)
 {
-	if (PageInactiveClean(page)) {
+	/* Mark the page referenced, AFTER checking for previous usage.. */
+	SetPageReferenced(page);
+
+	if (unlikely(PageInactiveClean(page))) {
 		struct zone_struct *zone = page_zone(page);
 		int free = zone->free_pages + zone->inactive_clean_pages;

 		activate_page(page);
 		if (free < zone->pages_low)
 			wakeup_kswapd(GFP_NOIO);
-		if (zone->free_pages < zone->pages_min)
-			fixup_freespace(zone, 1);
-		return;
 	}
-
-	/* Mark the page referenced, AFTER checking for previous usage.. */
-	SetPageReferenced(page);
 }

 /*
--- linux-2.4.20-pre5-ac6-rmap14/mm/memory.c.orig	2002-09-15 23:39:25.000000000 -0300
+++ linux-2.4.20-pre5-ac6-rmap14/mm/memory.c	2002-09-15 23:43:05.000000000 -0300
@@ -436,6 +436,9 @@

 		spin_unlock(&mm->page_table_lock);

+		if (current->need_resched)
+			schedule();
+
 		address += block;
 		size -= block;
 	}
--- linux-2.4.20-pre5-ac6-rmap14/mm/page_alloc.c.orig	2002-09-15 23:38:38.000000000 -0300
+++ linux-2.4.20-pre5-ac6-rmap14/mm/page_alloc.c	2002-09-15 23:43:05.000000000 -0300
@@ -10,6 +10,7 @@
  *  Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
  *  Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
  *  Zone balancing, Kanoj Sarcar, SGI, Jan 2000
+ *  Per-CPU page pool, Ingo Molnar, Red Hat, 2001, 2002
  */

 #include <linux/config.h>
@@ -22,6 +23,7 @@
 #include <linux/slab.h>
 #include <linux/module.h>
 #include <linux/mm_inline.h>
+#include <linux/smp.h>

 int nr_swap_pages;
 int nr_active_pages;
@@ -85,6 +87,7 @@
 	unsigned long index, page_idx, mask, flags;
 	free_area_t *area;
 	struct page *base;
+	per_cpu_t *per_cpu;
 	zone_t *zone;

 	/* Yes, think what happens when other parts of the kernel take
@@ -93,6 +96,13 @@
 	if (PageLRU(page))
 		lru_cache_del(page);

+	/*
+	 * This late check is safe because reserved pages do not
+	 * have a valid page->count. This trick avoids overhead
+	 * in __free_pages().
+	 */
+	if (PageReserved(page))
+		return;
 	if (page->buffers)
 		BUG();
 	if (page->mapping) {
@@ -129,7 +139,18 @@

 	area = zone->free_area + order;

-	spin_lock_irqsave(&zone->lock, flags);
+	per_cpu = zone->cpu_pages + smp_processor_id();
+
+	__save_flags(flags);
+	__cli();
+	if (!order && (per_cpu->nr_pages < per_cpu->max_nr_pages) && (free_high(zone) <= 0)) {
+		list_add(&page->list, &per_cpu->head);
+		per_cpu->nr_pages++;
+		__restore_flags(flags);
+		return;
+	}
+
+	spin_lock(&zone->lock);

 	zone->free_pages -= mask;

@@ -193,13 +214,32 @@
 static FASTCALL(struct page * rmqueue(zone_t *zone, unsigned int order));
 static struct page * rmqueue(zone_t *zone, unsigned int order)
 {
+	per_cpu_t *per_cpu = zone->cpu_pages + smp_processor_id();
 	free_area_t * area = zone->free_area + order;
 	unsigned int curr_order = order;
 	struct list_head *head, *curr;
 	unsigned long flags;
 	struct page *page;
+	int threshold = 0;
+
+	if (!(current->flags & PF_MEMALLOC))
+		 threshold = (per_cpu->max_nr_pages / 8);
+	__save_flags(flags);
+	__cli();

-	spin_lock_irqsave(&zone->lock, flags);
+	if (!order && (per_cpu->nr_pages>threshold)) {
+		if (unlikely(list_empty(&per_cpu->head)))
+			BUG();
+		page = list_entry(per_cpu->head.next, struct page, list);
+		list_del(&page->list);
+		per_cpu->nr_pages--;
+		__restore_flags(flags);
+
+		set_page_count(page, 1);
+		return page;
+	}
+
+ 	spin_lock(&zone->lock);
 	do {
 		head = &area->free_list;
 		curr = head->next;
@@ -596,7 +636,7 @@

 void __free_pages(struct page *page, unsigned int order)
 {
-	if (!PageReserved(page) && put_page_testzero(page))
+	if (put_page_testzero(page))
 		__free_pages_ok(page, order);
 }

@@ -879,6 +919,7 @@

 	offset = lmem_map - mem_map;
 	for (j = 0; j < MAX_NR_ZONES; j++) {
+		int k;
 		zone_t *zone = pgdat->node_zones + j;
 		unsigned long mask, extrafree = 0;
 		unsigned long size, realsize;
@@ -891,6 +932,18 @@
 		printk("zone(%lu): %lu pages.\n", j, size);
 		zone->size = size;
 		zone->name = zone_names[j];
+
+		for (k = 0; k < NR_CPUS; k++) {
+			per_cpu_t *per_cpu = zone->cpu_pages + k;
+
+			INIT_LIST_HEAD(&per_cpu->head);
+			per_cpu->nr_pages = 0;
+			per_cpu->max_nr_pages = realsize / smp_num_cpus / 128;
+			if (per_cpu->max_nr_pages > MAX_PER_CPU_PAGES)
+				per_cpu->max_nr_pages = MAX_PER_CPU_PAGES;
+			else if (!per_cpu->max_nr_pages)
+				per_cpu->max_nr_pages = 1;
+		}
 		zone->lock = SPIN_LOCK_UNLOCKED;
 		zone->zone_pgdat = pgdat;
 		zone->free_pages = 0;
--- linux-2.4.20-pre5-ac6-rmap14/mm/rmap.c.orig	2002-09-15 23:38:38.000000000 -0300
+++ linux-2.4.20-pre5-ac6-rmap14/mm/rmap.c	2002-09-15 23:43:05.000000000 -0300
@@ -95,7 +95,7 @@
 		BUG();
 	if (!pte_present(*ptep))
 		BUG();
-	if (!ptep_to_mm(ptep));
+	if (!ptep_to_mm(ptep))
 		BUG();
 #endif

--- linux-2.4.20-pre5-ac6-rmap14/mm/vmscan.c.orig	2002-09-15 23:39:25.000000000 -0300
+++ linux-2.4.20-pre5-ac6-rmap14/mm/vmscan.c	2002-09-15 23:43:05.000000000 -0300
@@ -214,7 +214,7 @@
 	int maxscan, cleaned_pages, target, maxlaunder, iopages;
 	struct list_head * entry, * next;

-	target = free_plenty(zone);
+	target = max(free_plenty(zone), zone->pages_min);
 	cleaned_pages = iopages = 0;

 	/* If we can get away with it, only flush 2 MB worth of dirty pages */
@@ -222,12 +222,12 @@
 		maxlaunder = 1000000;
 	else {
 		maxlaunder = min_t(int, 512, zone->inactive_dirty_pages / 4);
-		maxlaunder = max(maxlaunder, free_plenty(zone));
+		maxlaunder = max(maxlaunder, free_plenty(zone) * 4);
 	}

 	/* The main launder loop. */
-rescan:
 	spin_lock(&pagemap_lru_lock);
+rescan:
 	maxscan = zone->inactive_dirty_pages;
 	entry = zone->inactive_dirty_list.prev;
 	next = entry->prev;
@@ -295,7 +295,6 @@
 			UnlockPage(page);
 			continue;
 		}
-		pte_chain_unlock(page);

 		/*
 		 * Anonymous process memory without backing store. Try to
@@ -303,8 +302,15 @@
 		 *
 		 * XXX: implement swap clustering ?
 		 */
-		pte_chain_lock(page);
 		if (page->pte_chain && !page->mapping && !page->buffers) {
+			/* Don't bother if we can't swap it out now. */
+			if (maxlaunder < 0) {
+				pte_chain_unlock(page);
+				UnlockPage(page);
+				list_del(entry);
+				list_add(entry, &zone->inactive_dirty_list);
+				continue;
+			}
 			page_cache_get(page);
 			pte_chain_unlock(page);
 			spin_unlock(&pagemap_lru_lock);
@@ -324,7 +330,7 @@
 		 * The page is mapped into the page tables of one or more
 		 * processes. Try to unmap it here.
 		 */
-		if (page->pte_chain) {
+		if (page->pte_chain && page->mapping) {
 			switch (try_to_unmap(page)) {
 				case SWAP_ERROR:
 				case SWAP_FAIL:
@@ -377,11 +383,11 @@
 		 * the page as well.
 		 */
 		if (page->buffers) {
-			spin_unlock(&pagemap_lru_lock);
-
 			/* To avoid freeing our page before we're done. */
 			page_cache_get(page);

+			spin_unlock(&pagemap_lru_lock);
+
 			if (try_to_release_page(page, gfp_mask)) {
 				if (!page->mapping) {
 					/*
@@ -405,10 +411,19 @@
 					 * slept; undo the stuff we did before
 					 * try_to_release_page and fall through
 					 * to the next step.
+					 * But only if the page is still on the inact. dirty
+					 * list.
 					 */
-					page_cache_release(page);

 					spin_lock(&pagemap_lru_lock);
+					/* Check if the page was removed from the list
+					 * while we looked the other way.
+					 */
+					if (!PageInactiveDirty(page)) {
+						page_cache_release(page);
+						continue;
+					}
+					page_cache_release(page);
 				}
 			} else {
 				/* failed to drop the buffers so stop here */
@@ -478,8 +493,10 @@

 	/* Clean up the remaining zones with a serious shortage, if any. */
 	for_each_zone(zone)
-		if (free_min(zone) >= 0)
-			freed += page_launder_zone(zone, gfp_mask, 1);
+		if (free_low(zone) >= 0) {
+			int fullflush = free_min(zone) > 0;
+			freed += page_launder_zone(zone, gfp_mask, fullflush);
+		}

 	return freed;
 }
--- linux-2.4.20-pre5-ac6-rmap14/include/linux/mmzone.h.orig	2002-09-15 23:39:25.000000000 -0300
+++ linux-2.4.20-pre5-ac6-rmap14/include/linux/mmzone.h	2002-09-15 23:43:04.000000000 -0300
@@ -27,6 +27,12 @@
 struct pglist_data;
 struct pte_chain;

+#define MAX_PER_CPU_PAGES 512
+typedef struct per_cpu_pages_s {
+	int			nr_pages, max_nr_pages;
+	struct list_head	head;
+} __attribute__((aligned(L1_CACHE_BYTES))) per_cpu_t;
+
 /*
  * On machines where it is needed (eg PCs) we divide physical memory
  * into multiple physical zones. On a PC we have 3 zones:
@@ -39,6 +45,7 @@
 	/*
 	 * Commonly accessed fields:
 	 */
+	per_cpu_t		cpu_pages[NR_CPUS];
 	spinlock_t		lock;
 	unsigned long		free_pages;
 	unsigned long		active_pages;

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/

^ permalink raw reply	[flat|nested] 2+ messages in thread

* [PATCH](1/2) rmap14 for ac  (was: Re: 2.5.34-mm4)
  2002-09-16  1:33 2.5.34-mm4 Alan Cox
@ 2002-09-16  2:32 ` Rik van Riel
  0 siblings, 0 replies; 2+ messages in thread
From: Rik van Riel @ 2002-09-16  2:32 UTC (permalink / raw)
  To: Alan Cox; +Cc: linux-mm, lkml

On 16 Sep 2002, Alan Cox wrote:

> So send me rmap-14a patches by all means

Here they come.  This first patch updates 2.4.20-pre5-ac6 to
rmap14. An incremental patch to rmap14a + misc bugfixes will
be in your mailbox in a few minutes...

Rik
-- 
Bravely reimplemented by the knights who say "NIH".
http://www.surriel.com/		http://distro.conectiva.com/
Spamtraps of the month:  september@surriel.com trac@trac.org


--- linux-2.4.19-pre2-ac3/mm/filemap.c.rmap13b	2002-08-15 23:53:06.000000000 -0300
+++ linux-2.4.19-pre2-ac3/mm/filemap.c	2002-08-15 23:56:37.000000000 -0300
@@ -237,12 +237,11 @@

 static void truncate_complete_page(struct page *page)
 {
-	/* Page has already been removed from processes, by vmtruncate()  */
-	if (page->pte_chain)
-		BUG();
-
-	/* Leave it on the LRU if it gets converted into anonymous buffers */
-	if (!page->buffers || do_flushpage(page, 0))
+	/*
+	 * Leave it on the LRU if it gets converted into anonymous buffers
+	 * or anonymous process memory.
+	 */
+	if ((!page->buffers || do_flushpage(page, 0)) && !page->pte_chain)
 		lru_cache_del(page);

 	/*
--- linux-2.4.19-pre2-ac3/mm/memory.c.rmap13b	2002-08-15 23:53:14.000000000 -0300
+++ linux-2.4.19-pre2-ac3/mm/memory.c	2002-08-15 23:59:04.000000000 -0300
@@ -380,49 +380,65 @@
 	return freed;
 }

-/*
- * remove user pages in a given range.
+#define ZAP_BLOCK_SIZE	(256 * PAGE_SIZE)
+
+/**
+ * zap_page_range - remove user pages in a given range
+ * @mm: mm_struct containing the applicable pages
+ * @address: starting address of pages to zap
+ * @size: number of bytes to zap
  */
 void zap_page_range(struct mm_struct *mm, unsigned long address, unsigned long size)
 {
 	mmu_gather_t *tlb;
 	pgd_t * dir;
-	unsigned long start = address, end = address + size;
-	int freed = 0;
-
-	dir = pgd_offset(mm, address);
-
+	unsigned long start, end, addr, block;
+	int freed;
+
 	/*
-	 * This is a long-lived spinlock. That's fine.
-	 * There's no contention, because the page table
-	 * lock only protects against kswapd anyway, and
-	 * even if kswapd happened to be looking at this
-	 * process we _want_ it to get stuck.
+	 * Break the work up into blocks of ZAP_BLOCK_SIZE pages:
+	 * this decreases lock-hold time for the page_table_lock
+	 * dramatically, which could otherwise be held for a very
+	 * long time.  This decreases lock contention and increases
+	 * periods of preemptibility.
 	 */
-	if (address >= end)
-		BUG();
-	spin_lock(&mm->page_table_lock);
-	flush_cache_range(mm, address, end);
-	tlb = tlb_gather_mmu(mm);
+	while (size) {
+		if (size > ZAP_BLOCK_SIZE)
+			block = ZAP_BLOCK_SIZE;
+		else
+			block = size;
+
+		freed = 0;
+		start = addr = address;
+		end = address + block;
+		dir = pgd_offset(mm, address);

-	do {
-		freed += zap_pmd_range(tlb, dir, address, end - address);
-		address = (address + PGDIR_SIZE) & PGDIR_MASK;
-		dir++;
-	} while (address && (address < end));
+		BUG_ON(address >= end);

-	/* this will flush any remaining tlb entries */
-	tlb_finish_mmu(tlb, start, end);
+		spin_lock(&mm->page_table_lock);
+		flush_cache_range(mm, start, end);
+		tlb = tlb_gather_mmu(mm);

-	/*
-	 * Update rss for the mm_struct (not necessarily current->mm)
-	 * Notice that rss is an unsigned long.
-	 */
-	if (mm->rss > freed)
-		mm->rss -= freed;
-	else
-		mm->rss = 0;
-	spin_unlock(&mm->page_table_lock);
+		do {
+			freed += zap_pmd_range(tlb, dir, addr, end - addr);
+			addr = (addr + PGDIR_SIZE) & PGDIR_MASK;
+			dir++;
+		} while (addr && (addr < end));
+
+		/* this will flush any remaining tlb entries */
+		tlb_finish_mmu(tlb, start, end);
+
+		/* Update rss for the mm_struct (need not be current->mm) */
+		if (mm->rss > freed)
+			mm->rss -= freed;
+		else
+			mm->rss = 0;
+
+		spin_unlock(&mm->page_table_lock);
+
+		address += block;
+		size -= block;
+	}
 }

 /*
@@ -873,18 +889,19 @@
 static inline int remap_pmd_range(struct mm_struct *mm, pmd_t * pmd, unsigned long address, unsigned long size,
 	unsigned long phys_addr, pgprot_t prot)
 {
-	unsigned long end;
+	unsigned long base, end;

+	base = address & PGDIR_MASK;
 	address &= ~PGDIR_MASK;
 	end = address + size;
 	if (end > PGDIR_SIZE)
 		end = PGDIR_SIZE;
 	phys_addr -= address;
 	do {
-		pte_t * pte = pte_alloc(mm, pmd, address);
+		pte_t * pte = pte_alloc(mm, pmd, address + base);
 		if (!pte)
 			return -ENOMEM;
-		remap_pte_range(pte, address, end - address, address + phys_addr, prot);
+		remap_pte_range(pte, base + address, end - address, address + phys_addr, prot);
 		address = (address + PMD_SIZE) & PMD_MASK;
 		pmd++;
 	} while (address && (address < end));
--- linux-2.4.19-pre2-ac3/mm/vmscan.c.rmap13b	2002-08-15 23:53:26.000000000 -0300
+++ linux-2.4.19-pre2-ac3/mm/vmscan.c	2002-08-15 23:59:04.000000000 -0300
@@ -195,6 +195,7 @@
  * page_launder_zone - clean dirty inactive pages, move to inactive_clean list
  * @zone: zone to free pages in
  * @gfp_mask: what operations we are allowed to do
+ * @full_flush: full-out page flushing, if we couldn't get enough clean pages
  *
  * This function is called when we are low on free / inactive_clean
  * pages, its purpose is to refill the free/clean list as efficiently
@@ -208,19 +209,30 @@
  * This code is heavily inspired by the FreeBSD source code. Thanks
  * go out to Matthew Dillon.
  */
-#define	CAN_DO_FS	((gfp_mask & __GFP_FS) && should_write)
-int page_launder_zone(zone_t * zone, int gfp_mask, int priority)
+int page_launder_zone(zone_t * zone, int gfp_mask, int full_flush)
 {
-	int maxscan, cleaned_pages, target;
-	struct list_head * entry;
+	int maxscan, cleaned_pages, target, maxlaunder, iopages;
+	struct list_head * entry, * next;

 	target = free_plenty(zone);
-	cleaned_pages = 0;
+	cleaned_pages = iopages = 0;
+
+	/* If we can get away with it, only flush 2 MB worth of dirty pages */
+	if (full_flush)
+		maxlaunder = 1000000;
+	else {
+		maxlaunder = min_t(int, 512, zone->inactive_dirty_pages / 4);
+		maxlaunder = max(maxlaunder, free_plenty(zone));
+	}

 	/* The main launder loop. */
+rescan:
 	spin_lock(&pagemap_lru_lock);
-	maxscan = zone->inactive_dirty_pages >> priority;
-	while (maxscan-- && !list_empty(&zone->inactive_dirty_list)) {
+	maxscan = zone->inactive_dirty_pages;
+	entry = zone->inactive_dirty_list.prev;
+	next = entry->prev;
+	while (maxscan-- && !list_empty(&zone->inactive_dirty_list) &&
+			next != &zone->inactive_dirty_list) {
 		struct page * page;

 		/* Low latency reschedule point */
@@ -231,14 +243,20 @@
 			continue;
 		}

-		entry = zone->inactive_dirty_list.prev;
+		entry = next;
+		next = entry->prev;
 		page = list_entry(entry, struct page, lru);

+		/* This page was removed while we looked the other way. */
+		if (!PageInactiveDirty(page))
+			goto rescan;
+
 		if (cleaned_pages > target)
 			break;

-		list_del(entry);
-		list_add(entry, &zone->inactive_dirty_list);
+		/* Stop doing IO if we've laundered too many pages already. */
+		if (maxlaunder < 0)
+			gfp_mask &= ~(__GFP_IO|__GFP_FS);

 		/* Wrong page on list?! (list corruption, should not happen) */
 		if (!PageInactiveDirty(page)) {
@@ -257,7 +275,6 @@

 		/*
 		 * The page is locked. IO in progress?
-		 * Move it to the back of the list.
 		 * Acquire PG_locked early in order to safely
 		 * access page->mapping.
 		 */
@@ -341,10 +358,16 @@
 				spin_unlock(&pagemap_lru_lock);

 				writepage(page);
+				maxlaunder--;
 				page_cache_release(page);

 				spin_lock(&pagemap_lru_lock);
 				continue;
+			} else {
+				UnlockPage(page);
+				list_del(entry);
+				list_add(entry, &zone->inactive_dirty_list);
+				continue;
 			}
 		}

@@ -391,6 +414,7 @@
 				/* failed to drop the buffers so stop here */
 				UnlockPage(page);
 				page_cache_release(page);
+				maxlaunder--;

 				spin_lock(&pagemap_lru_lock);
 				continue;
@@ -443,21 +467,19 @@
  */
 int page_launder(int gfp_mask)
 {
-	int maxtry = 1 << DEF_PRIORITY;
 	struct zone_struct * zone;
 	int freed = 0;

 	/* Global balancing while we have a global shortage. */
-	while (maxtry-- && free_high(ALL_ZONES) >= 0) {
+	if (free_high(ALL_ZONES) >= 0)
 		for_each_zone(zone)
 			if (free_plenty(zone) >= 0)
-				freed += page_launder_zone(zone, gfp_mask, 6);
-	}
+				freed += page_launder_zone(zone, gfp_mask, 0);

 	/* Clean up the remaining zones with a serious shortage, if any. */
 	for_each_zone(zone)
 		if (free_min(zone) >= 0)
-			freed += page_launder_zone(zone, gfp_mask, 0);
+			freed += page_launder_zone(zone, gfp_mask, 1);

 	return freed;
 }
@@ -814,6 +836,7 @@
 	set_current_state(TASK_UNINTERRUPTIBLE);
 	schedule_timeout(HZ / 4);
 	kswapd_overloaded = 0;
+	wmb();
 	return;
 }

--- linux-2.4.19-pre2-ac3/include/linux/mm.h.rmap13b	2002-08-15 23:52:54.000000000 -0300
+++ linux-2.4.19-pre2-ac3/include/linux/mm.h	2002-08-16 00:01:31.000000000 -0300
@@ -344,15 +344,19 @@
 	 * busywait with less bus contention for a good time to
 	 * attempt to acquire the lock bit.
 	 */
+#ifdef CONFIG_SMP
 	while (test_and_set_bit(PG_chainlock, &page->flags)) {
 		while (test_bit(PG_chainlock, &page->flags))
 			cpu_relax();
 	}
+#endif
 }

 static inline void pte_chain_unlock(struct page *page)
 {
+#ifdef CONFIG_SMP
 	clear_bit(PG_chainlock, &page->flags);
+#endif
 }

 /*
--- linux-2.4.19-pre2-ac3/include/linux/mmzone.h.rmap13b	2002-08-15 23:53:00.000000000 -0300
+++ linux-2.4.19-pre2-ac3/include/linux/mmzone.h	2002-08-16 00:01:31.000000000 -0300
@@ -27,8 +27,6 @@
 struct pglist_data;
 struct pte_chain;

-#define MAX_CHUNKS_PER_NODE 8
-
 /*
  * On machines where it is needed (eg PCs) we divide physical memory
  * into multiple physical zones. On a PC we have 3 zones:

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/

^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2002-09-16  2:47 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2002-09-16  2:47 [PATCH](1/2) rmap14 for ac (was: Re: 2.5.34-mm4) Rik van Riel
  -- strict thread matches above, loose matches on Subject: below --
2002-09-16  1:33 2.5.34-mm4 Alan Cox
2002-09-16  2:32 ` [PATCH](1/2) rmap14 for ac (was: Re: 2.5.34-mm4) Rik van Riel

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox