linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
* pte_chain_mempool-2.5.27-1
@ 2002-07-21  3:55 William Lee Irwin III
  2002-07-22  6:07 ` pte_chain_mempool-2.5.27-1 Andrew Morton
  0 siblings, 1 reply; 3+ messages in thread
From: William Lee Irwin III @ 2002-07-21  3:55 UTC (permalink / raw)
  To: linux-kernel; +Cc: linux-mm, riel, anton

This patch, in order to achieve more reliable and efficient allocation,
converts the pte_chain freelist to use mempool, which in turn uses the
slab allocator as a front-end. This includes a cleanup of the obsoleted
statistics, which are now slab-maintained, and a replacement of them
with the ReverseMaps: statistics which reports the number of reverse
mappings performed.

$ diffstat ~/patches/pte_chain_mempool-1 
 fs/proc/proc_misc.c        |    6 --
 include/linux/page-flags.h |    3 -
 init/main.c                |    4 -
 mm/page_alloc.c            |    3 -
 mm/rmap.c                  |   99 ++++++++++++++++-----------------------------
 5 files changed, 43 insertions(+), 72 deletions(-)


Cheers,
Bill


===== fs/proc/proc_misc.c 1.31 vs edited =====
--- 1.31/fs/proc/proc_misc.c	Tue Jul 16 14:46:30 2002
+++ edited/fs/proc/proc_misc.c	Sat Jul 20 18:42:07 2002
@@ -161,8 +161,7 @@
 		"Dirty:        %8lu kB\n"
 		"Writeback:    %8lu kB\n"
 		"PageTables:   %8lu kB\n"
-		"PteChainTot:  %8lu kB\n"
-		"PteChainUsed: %8lu kB\n",
+		"ReverseMaps:  %8lu\n",
 		K(i.totalram),
 		K(i.freeram),
 		K(i.sharedram),
@@ -179,8 +178,7 @@
 		K(ps.nr_dirty),
 		K(ps.nr_writeback),
 		K(ps.nr_page_table_pages),
-		K(ps.nr_pte_chain_pages),
-		ps.used_pte_chains_bytes >> 10
+		K(ps.nr_reverse_maps)
 		);
 
 	return proc_calc_metrics(page, start, off, count, eof, len);
===== include/linux/page-flags.h 1.12 vs edited =====
--- 1.12/include/linux/page-flags.h	Tue Jul 16 14:46:30 2002
+++ edited/include/linux/page-flags.h	Sat Jul 20 18:39:12 2002
@@ -79,8 +79,7 @@
 	unsigned long nr_active;	/* on active_list LRU */
 	unsigned long nr_inactive;	/* on inactive_list LRU */
 	unsigned long nr_page_table_pages;
-	unsigned long nr_pte_chain_pages;
-	unsigned long used_pte_chains_bytes;
+	unsigned long nr_reverse_maps;
 } ____cacheline_aligned_in_smp page_states[NR_CPUS];
 
 extern void get_page_state(struct page_state *ret);
===== init/main.c 1.51 vs edited =====
--- 1.51/init/main.c	Fri Jul 19 16:00:55 2002
+++ edited/init/main.c	Sat Jul 20 16:03:02 2002
@@ -70,7 +70,7 @@
 extern void sysctl_init(void);
 extern void signals_init(void);
 extern void buffer_init(void);
-
+extern void pte_chain_init(void);
 extern void radix_tree_init(void);
 extern void free_initmem(void);
 
@@ -386,7 +386,7 @@
 	mem_init();
 	kmem_cache_sizes_init();
 	pgtable_cache_init();
-
+	pte_chain_init();
 	mempages = num_physpages;
 
 	fork_init(mempages);
===== mm/page_alloc.c 1.82 vs edited =====
--- 1.82/mm/page_alloc.c	Tue Jul 16 14:46:36 2002
+++ edited/mm/page_alloc.c	Sat Jul 20 18:39:37 2002
@@ -566,8 +566,7 @@
 		ret->nr_active += ps->nr_active;
 		ret->nr_inactive += ps->nr_inactive;
 		ret->nr_page_table_pages += ps->nr_page_table_pages;
-		ret->nr_pte_chain_pages += ps->nr_pte_chain_pages;
-		ret->used_pte_chains_bytes += ps->used_pte_chains_bytes;
+		ret->nr_reverse_maps += ps->nr_reverse_maps;
 	}
 }
 
===== mm/rmap.c 1.3 vs edited =====
--- 1.3/mm/rmap.c	Tue Jul 16 14:46:30 2002
+++ edited/mm/rmap.c	Sat Jul 20 18:41:35 2002
@@ -23,6 +23,9 @@
 #include <linux/mm.h>
 #include <linux/pagemap.h>
 #include <linux/swapops.h>
+#include <linux/mempool.h>
+#include <linux/slab.h>
+#include <linux/init.h>
 
 #include <asm/pgalloc.h>
 #include <asm/rmap.h>
@@ -50,10 +53,12 @@
 	pte_t * ptep;
 };
 
+
+static kmem_cache_t	*pte_chain_cache;
+static mempool_t	*pte_chain_pool;
 static inline struct pte_chain * pte_chain_alloc(void);
 static inline void pte_chain_free(struct pte_chain *, struct pte_chain *,
 		struct page *);
-static void alloc_new_pte_chains(void);
 
 /**
  * page_referenced - test if the page was referenced
@@ -148,6 +153,7 @@
 	}
 
 	pte_chain_unlock(page);
+	inc_page_state(nr_reverse_maps);
 }
 
 /**
@@ -208,9 +214,9 @@
 #endif
 
 out:
+	dec_page_state(nr_reverse_maps);
 	pte_chain_unlock(page);
 	return;
-			
 }
 
 /**
@@ -355,27 +361,6 @@
  ** functions.
  **/
 
-struct pte_chain * pte_chain_freelist;
-spinlock_t pte_chain_freelist_lock = SPIN_LOCK_UNLOCKED;
-
-/* Maybe we should have standard ops for singly linked lists ... - Rik */
-static inline void pte_chain_push(struct pte_chain * pte_chain)
-{
-	pte_chain->ptep = NULL;
-	pte_chain->next = pte_chain_freelist;
-	pte_chain_freelist = pte_chain;
-}
-
-static inline struct pte_chain * pte_chain_pop(void)
-{
-	struct pte_chain *pte_chain;
-
-	pte_chain = pte_chain_freelist;
-	pte_chain_freelist = pte_chain->next;
-	pte_chain->next = NULL;
-
-	return pte_chain;
-}
 
 /**
  * pte_chain_free - free pte_chain structure
@@ -391,15 +376,12 @@
 static inline void pte_chain_free(struct pte_chain * pte_chain,
 		struct pte_chain * prev_pte_chain, struct page * page)
 {
-	mod_page_state(used_pte_chains_bytes, -sizeof(struct pte_chain));
 	if (prev_pte_chain)
 		prev_pte_chain->next = pte_chain->next;
 	else if (page)
 		page->pte.chain = pte_chain->next;
 
-	spin_lock(&pte_chain_freelist_lock);
-	pte_chain_push(pte_chain);
-	spin_unlock(&pte_chain_freelist_lock);
+	mempool_free(pte_chain, pte_chain_pool);
 }
 
 /**
@@ -411,45 +393,38 @@
  */
 static inline struct pte_chain * pte_chain_alloc()
 {
-	struct pte_chain * pte_chain;
-
-	spin_lock(&pte_chain_freelist_lock);
-
-	/* Allocate new pte_chain structs as needed. */
-	if (!pte_chain_freelist)
-		alloc_new_pte_chains();
-
-	/* Grab the first pte_chain from the freelist. */
-	pte_chain = pte_chain_pop();
+	return (struct pte_chain *)mempool_alloc(pte_chain_pool, GFP_ATOMIC);
+}
 
-	spin_unlock(&pte_chain_freelist_lock);
+static void *pte_chain_pool_alloc(int gfp_mask, void *ignored)
+{
+	(void)gfp_mask;
+	(void)ignored;
+	return kmem_cache_alloc(pte_chain_cache, GFP_ATOMIC);
+}
 
-	mod_page_state(used_pte_chains_bytes, sizeof(struct pte_chain));
-	return pte_chain;
+static void pte_chain_pool_free(void *pte_chain, void *ignored)
+{
+	kmem_cache_free(pte_chain_cache, pte_chain);
 }
 
-/**
- * alloc_new_pte_chains - convert a free page to pte_chain structures
- *
- * Grabs a free page and converts it to pte_chain structures. We really
- * should pre-allocate these earlier in the pagefault path or come up
- * with some other trick.
- *
- * Note that we cannot use the slab cache because the pte_chain structure
- * is way smaller than the minimum size of a slab cache allocation.
- * Caller needs to hold the pte_chain_freelist_lock
- */
-static void alloc_new_pte_chains()
+void __init pte_chain_init(void)
 {
-	struct pte_chain * pte_chain = (void *) get_zeroed_page(GFP_ATOMIC);
-	int i = PAGE_SIZE / sizeof(struct pte_chain);
+	pte_chain_cache = kmem_cache_create(	"pte_chain",
+						sizeof(struct pte_chain),
+						0,
+						0,
+						NULL,
+						NULL);
 
-	if (pte_chain) {
-		inc_page_state(nr_pte_chain_pages);
-		for (; i-- > 0; pte_chain++)
-			pte_chain_push(pte_chain);
-	} else {
-		/* Yeah yeah, I'll fix the pte_chain allocation ... */
-		panic("Fix pte_chain allocation, you lazy bastard!\n");
-	}
+	if (!pte_chain_cache)
+		panic("failed to create pte_chain cache!\n");
+
+	pte_chain_pool = mempool_create(16*1024,
+					pte_chain_pool_alloc,
+					pte_chain_pool_free,
+					NULL);
+
+	if (!pte_chain_pool)
+		panic("Failed to create pte_chain mempool!\n");
 }
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/

^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2002-07-22  6:17 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2002-07-21  3:55 pte_chain_mempool-2.5.27-1 William Lee Irwin III
2002-07-22  6:07 ` pte_chain_mempool-2.5.27-1 Andrew Morton
2002-07-22  6:17   ` pte_chain_mempool-2.5.27-1 William Lee Irwin III

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox