From: Matthew Dobson <colpatch@us.ibm.com>
To: linux-kernel@vger.kernel.org
Cc: sri@us.ibm.com, andrea@suse.de, pavel@suse.cz, linux-mm@kvack.org
Subject: [patch 9/9] slab - Implement single mempool backing for slab allocator
Date: Wed, 25 Jan 2006 11:40:24 -0800 [thread overview]
Message-ID: <1138218024.2092.9.camel@localhost.localdomain> (raw)
In-Reply-To: <20060125161321.647368000@localhost.localdomain>
plain text document attachment (critical_mempools)
Support for using a single mempool as a critical pool for all slab allocations.
This patch completes the actual implementation of this functionality. What we
do is take the mempool_t pointer, which is now passed into the slab allocator
by all the externally callable functions (thanks to the last patch), and pass
it all the way down through the slab allocator code. If the slab allocator
needs to allocate memory to satisfy a slab request, which only happens in
kmem_getpages(), it will allocate that memory via the mempool's allocator,
rather than calling alloc_pages_node() directly. This allows us to use a
single mempool to back ALL slab allocations for a single subsystem, rather than
having to back each & every kmem_cache_alloc/kmalloc allocation that subsystem
makes with it's own mempool.
Signed-off-by: Matthew Dobson <colpatch@us.ibm.com>
slab.c | 60 +++++++++++++++++++++++++++++++++++++++---------------------
1 files changed, 39 insertions(+), 21 deletions(-)
Index: linux-2.6.16-rc1+critical_mempools/mm/slab.c
===================================================================
--- linux-2.6.16-rc1+critical_mempools.orig/mm/slab.c
+++ linux-2.6.16-rc1+critical_mempools/mm/slab.c
@@ -1209,15 +1209,26 @@ __initcall(cpucache_init);
* If we requested dmaable memory, we will get it. Even if we
* did not request dmaable memory, we might get it, but that
* would be relatively rare and ignorable.
+ *
+ * For now, we only support order-0 allocations with mempools.
*/
-static void *kmem_getpages(kmem_cache_t *cachep, gfp_t flags, int nodeid)
+static void *kmem_getpages(kmem_cache_t *cachep, gfp_t flags, int nodeid,
+ mempool_t *pool)
{
struct page *page;
void *addr;
int i;
flags |= cachep->gfpflags;
- page = alloc_pages_node(nodeid, flags, cachep->gfporder);
+ /*
+ * If this allocation request isn't backed by a memory pool, or if that
+ * memory pool's gfporder is not the same as the cache's gfporder, fall
+ * back to alloc_pages_node().
+ */
+ if (!pool || cachep->gfporder != (int)pool->pool_data)
+ page = alloc_pages_node(nodeid, flags, cachep->gfporder);
+ else
+ page = mempool_alloc_node(pool, flags, nodeid);
if (!page)
return NULL;
addr = page_address(page);
@@ -2084,13 +2095,15 @@ EXPORT_SYMBOL(kmem_cache_destroy);
/* Get the memory for a slab management obj. */
static struct slab *alloc_slabmgmt(kmem_cache_t *cachep, void *objp,
- int colour_off, gfp_t local_flags)
+ int colour_off, gfp_t local_flags,
+ mempool_t *pool)
{
struct slab *slabp;
if (OFF_SLAB(cachep)) {
/* Slab management obj is off-slab. */
- slabp = kmem_cache_alloc(cachep->slabp_cache, local_flags);
+ slabp = kmem_cache_alloc_mempool(cachep->slabp_cache,
+ local_flags, pool);
if (!slabp)
return NULL;
} else {
@@ -2188,7 +2201,8 @@ static void set_slab_attr(kmem_cache_t *
* Grow (by 1) the number of slabs within a cache. This is called by
* kmem_cache_alloc() when there are no active objs left in a cache.
*/
-static int cache_grow(kmem_cache_t *cachep, gfp_t flags, int nodeid)
+static int cache_grow(kmem_cache_t *cachep, gfp_t flags, int nodeid,
+ mempool_t *pool)
{
struct slab *slabp;
void *objp;
@@ -2242,11 +2256,11 @@ static int cache_grow(kmem_cache_t *cach
/* Get mem for the objs.
* Attempt to allocate a physical page from 'nodeid',
*/
- if (!(objp = kmem_getpages(cachep, flags, nodeid)))
+ if (!(objp = kmem_getpages(cachep, flags, nodeid, pool)))
goto failed;
/* Get slab management. */
- if (!(slabp = alloc_slabmgmt(cachep, objp, offset, local_flags)))
+ if (!(slabp = alloc_slabmgmt(cachep, objp, offset, local_flags, pool)))
goto opps1;
slabp->nodeid = nodeid;
@@ -2406,7 +2420,8 @@ static void check_slabp(kmem_cache_t *ca
#define check_slabp(x,y) do { } while(0)
#endif
-static void *cache_alloc_refill(kmem_cache_t *cachep, gfp_t flags)
+static void *cache_alloc_refill(kmem_cache_t *cachep, gfp_t flags,
+ mempool_t *pool)
{
int batchcount;
struct kmem_list3 *l3;
@@ -2492,7 +2507,7 @@ static void *cache_alloc_refill(kmem_cac
if (unlikely(!ac->avail)) {
int x;
- x = cache_grow(cachep, flags, numa_node_id());
+ x = cache_grow(cachep, flags, numa_node_id(), pool);
// cache_grow can reenable interrupts, then ac could change.
ac = ac_data(cachep);
@@ -2565,7 +2580,8 @@ static void *cache_alloc_debugcheck_afte
#define cache_alloc_debugcheck_after(a,b,objp,d) (objp)
#endif
-static inline void *____cache_alloc(kmem_cache_t *cachep, gfp_t flags)
+static inline void *____cache_alloc(kmem_cache_t *cachep, gfp_t flags,
+ mempool_t *pool)
{
void *objp;
struct array_cache *ac;
@@ -2578,12 +2594,13 @@ static inline void *____cache_alloc(kmem
objp = ac->entry[--ac->avail];
} else {
STATS_INC_ALLOCMISS(cachep);
- objp = cache_alloc_refill(cachep, flags);
+ objp = cache_alloc_refill(cachep, flags, pool);
}
return objp;
}
-static inline void *__cache_alloc(kmem_cache_t *cachep, gfp_t flags)
+static inline void *__cache_alloc(kmem_cache_t *cachep, gfp_t flags,
+ mempool_t *pool)
{
unsigned long save_flags;
void *objp;
@@ -2591,7 +2608,7 @@ static inline void *__cache_alloc(kmem_c
cache_alloc_debugcheck_before(cachep, flags);
local_irq_save(save_flags);
- objp = ____cache_alloc(cachep, flags);
+ objp = ____cache_alloc(cachep, flags, pool);
local_irq_restore(save_flags);
objp = cache_alloc_debugcheck_after(cachep, flags, objp,
__builtin_return_address(0));
@@ -2603,7 +2620,8 @@ static inline void *__cache_alloc(kmem_c
/*
* A interface to enable slab creation on nodeid
*/
-static void *__cache_alloc_node(kmem_cache_t *cachep, gfp_t flags, int nodeid)
+static void *__cache_alloc_node(kmem_cache_t *cachep, gfp_t flags, int nodeid,
+ mempool_t *pool)
{
struct list_head *entry;
struct slab *slabp;
@@ -2659,7 +2677,7 @@ static void *__cache_alloc_node(kmem_cac
must_grow:
spin_unlock(&l3->list_lock);
- x = cache_grow(cachep, flags, nodeid);
+ x = cache_grow(cachep, flags, nodeid, pool);
if (!x)
return NULL;
@@ -2848,7 +2866,7 @@ static inline void __cache_free(kmem_cac
void *kmem_cache_alloc_mempool(kmem_cache_t *cachep, gfp_t flags,
mempool_t *pool)
{
- return __cache_alloc(cachep, flags);
+ return __cache_alloc(cachep, flags, pool);
}
EXPORT_SYMBOL(kmem_cache_alloc_mempool);
@@ -2921,22 +2939,22 @@ void *kmem_cache_alloc_node_mempool(kmem
void *ptr;
if (nodeid == -1)
- return __cache_alloc(cachep, flags);
+ return __cache_alloc(cachep, flags, pool);
if (unlikely(!cachep->nodelists[nodeid])) {
/* Fall back to __cache_alloc if we run into trouble */
printk(KERN_WARNING
"slab: not allocating in inactive node %d for cache %s\n",
nodeid, cachep->name);
- return __cache_alloc(cachep, flags);
+ return __cache_alloc(cachep, flags, pool);
}
cache_alloc_debugcheck_before(cachep, flags);
local_irq_save(save_flags);
if (nodeid == numa_node_id())
- ptr = ____cache_alloc(cachep, flags);
+ ptr = ____cache_alloc(cachep, flags, pool);
else
- ptr = __cache_alloc_node(cachep, flags, nodeid);
+ ptr = __cache_alloc_node(cachep, flags, nodeid, pool);
local_irq_restore(save_flags);
ptr =
cache_alloc_debugcheck_after(cachep, flags, ptr,
@@ -3004,7 +3022,7 @@ void *__kmalloc(size_t size, gfp_t flags
cachep = __find_general_cachep(size, flags);
if (unlikely(cachep == NULL))
return NULL;
- return __cache_alloc(cachep, flags);
+ return __cache_alloc(cachep, flags, pool);
}
EXPORT_SYMBOL(__kmalloc);
--
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
next prev parent reply other threads:[~2006-01-25 21:37 UTC|newest]
Thread overview: 44+ messages / expand[flat|nested] mbox.gz Atom feed top
[not found] <20060125161321.647368000@localhost.localdomain>
2006-01-25 19:39 ` [patch 1/9] mempool - Add page allocator Matthew Dobson
2006-01-25 19:39 ` [patch 2/9] mempool - Use common mempool " Matthew Dobson
2006-01-25 19:40 ` [patch 4/9] mempool - Update mempool page allocator user Matthew Dobson
2006-01-25 19:40 ` [patch 5/9] mempool - Update kmalloc mempool users Matthew Dobson
2006-01-25 19:40 ` [patch 6/9] mempool - Update kzalloc " Matthew Dobson
2006-01-26 7:30 ` Pekka Enberg
2006-01-26 22:03 ` Matthew Dobson
2006-01-25 19:40 ` [patch 8/9] slab - Add *_mempool slab variants Matthew Dobson
2006-01-26 7:41 ` Pekka Enberg
2006-01-26 22:40 ` Matthew Dobson
2006-01-27 7:09 ` Pekka J Enberg
2006-01-27 7:10 ` Pekka J Enberg
2006-01-25 19:40 ` Matthew Dobson [this message]
2006-01-26 8:11 ` [patch 9/9] slab - Implement single mempool backing for slab allocator Pekka Enberg
2006-01-26 22:48 ` Matthew Dobson
2006-01-27 7:22 ` Pekka J Enberg
2006-01-25 23:51 ` [patch 1/9] mempool - Add page allocator Matthew Dobson
2006-01-25 23:51 ` [patch 3/9] mempool - Make mempools NUMA aware Matthew Dobson
2006-01-26 17:54 ` Christoph Lameter
2006-01-26 22:57 ` Matthew Dobson
2006-01-26 23:15 ` Christoph Lameter
2006-01-26 23:24 ` Matthew Dobson
2006-01-26 23:29 ` Christoph Lameter
2006-01-27 0:15 ` Matthew Dobson
2006-01-27 0:21 ` Christoph Lameter
2006-01-27 0:34 ` Matthew Dobson
2006-01-27 0:39 ` Christoph Lameter
2006-01-27 0:44 ` Matthew Dobson
2006-01-27 0:57 ` Christoph Lameter
2006-01-27 1:07 ` Andi Kleen
2006-01-27 10:51 ` Paul Jackson
2006-01-28 1:00 ` Matthew Dobson
2006-01-28 5:08 ` Paul Jackson
2006-01-28 8:16 ` Pavel Machek
2006-01-28 16:14 ` Sridhar Samudrala
2006-01-28 16:41 ` Pavel Machek
2006-01-28 16:53 ` Sridhar Samudrala
2006-01-28 22:59 ` Pavel Machek
2006-01-28 23:10 ` Let the flames begin... [was Re: [patch 3/9] mempool - Make mempools NUMA aware] Pavel Machek
2006-01-27 0:23 ` [patch 3/9] mempool - Make mempools NUMA aware Benjamin LaHaise
2006-01-27 0:35 ` Matthew Dobson
2006-01-27 3:23 ` Benjamin LaHaise
2006-01-28 1:08 ` Matthew Dobson
2006-01-25 23:51 ` [patch 7/9] mempool - Update other mempool users Matthew Dobson
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1138218024.2092.9.camel@localhost.localdomain \
--to=colpatch@us.ibm.com \
--cc=andrea@suse.de \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=pavel@suse.cz \
--cc=sri@us.ibm.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox