From: Pekka J Enberg <penberg@cs.Helsinki.FI>
To: manfred@colorfullife.com, christoph@lameter.com, pj@sgi.com
Cc: linux-kernel@vger.kernel.org, linux-mm@kvack.org
Subject: [RFC/PATCH] slab: clean up allocation
Date: Fri, 29 Sep 2006 13:56:07 +0300 (EEST) [thread overview]
Message-ID: <Pine.LNX.4.58.0609291353060.30021@sbz-30.cs.Helsinki.FI> (raw)
Hi!
This patch cleans up the slab allocation path by making the UMA case look like
NUMA that always allocates from the current node. In addition, I merged up the
two NUMA allocation paths (kmem_cache_alloc_node and __cache_alloc) into single
function so that we always use alternate_node_alloc() if PF_SPREAD_SLAB or
PF_MEMPOLICY is defined.
Note: increases kernel text on numa by 70 bytes on x86 due to inlining of
the cache_alloc function in multiple call sites.
Cc: Manfred Spraul <manfred@colorfullife.com>
Cc: Christoph Lameter <christoph@lameter.com>
Cc: Paul Jackson <pj@sgi.com>
Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
---
mm/slab.c | 116 +++++++++++++++++++++++++++++++++++---------------------------
1 file changed, 66 insertions(+), 50 deletions(-)
Index: 2.6/mm/slab.c
===================================================================
--- 2.6.orig/mm/slab.c
+++ 2.6/mm/slab.c
@@ -210,6 +210,11 @@ typedef unsigned int kmem_bufctl_t;
#define SLAB_LIMIT (((kmem_bufctl_t)(~0U))-3)
/*
+ * When allocating from current node.
+ */
+#define SLAB_CURRENT_NODE (-1)
+
+/*
* struct slab
*
* Manages the objs in a slab. Placed either at the beginning of mem allocated
@@ -3041,7 +3046,7 @@ static void *cache_alloc_debugcheck_afte
#define cache_alloc_debugcheck_after(a,b,objp,d) (objp)
#endif
-static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
+static inline void *cache_alloc_local(struct kmem_cache *cachep, gfp_t flags)
{
void *objp;
struct array_cache *ac;
@@ -3059,35 +3064,6 @@ static inline void *____cache_alloc(stru
return objp;
}
-static __always_inline void *__cache_alloc(struct kmem_cache *cachep,
- gfp_t flags, void *caller)
-{
- unsigned long save_flags;
- void *objp = NULL;
-
- cache_alloc_debugcheck_before(cachep, flags);
-
- local_irq_save(save_flags);
-
- if (unlikely(NUMA_BUILD &&
- current->flags & (PF_SPREAD_SLAB | PF_MEMPOLICY)))
- objp = alternate_node_alloc(cachep, flags);
-
- if (!objp)
- objp = ____cache_alloc(cachep, flags);
- /*
- * We may just have run out of memory on the local node.
- * __cache_alloc_node() knows how to locate memory on other nodes
- */
- if (NUMA_BUILD && !objp)
- objp = __cache_alloc_node(cachep, flags, numa_node_id());
- local_irq_restore(save_flags);
- objp = cache_alloc_debugcheck_after(cachep, flags, objp,
- caller);
- prefetchw(objp);
- return objp;
-}
-
#ifdef CONFIG_NUMA
/*
* Try allocating on another node if PF_SPREAD_SLAB|PF_MEMPOLICY.
@@ -3198,8 +3174,62 @@ must_grow:
done:
return obj;
}
+
+static __always_inline void *__cache_alloc(struct kmem_cache *cachep,
+ gfp_t flags, int nodeid)
+{
+ void *objp = NULL;
+
+ if (nodeid == SLAB_CURRENT_NODE || nodeid == numa_node_id() ||
+ !cachep->nodelists[nodeid]) {
+ if (unlikely(current->flags & (PF_SPREAD_SLAB | PF_MEMPOLICY)))
+ objp = alternate_node_alloc(cachep, flags);
+
+ if (!objp)
+ objp = cache_alloc_local(cachep, flags);
+ /*
+ * We may just have run out of memory on the local node.
+ * __cache_alloc_node() knows how to locate memory on other nodes
+ */
+ if (!objp)
+ objp = __cache_alloc_node(cachep, flags, numa_node_id());
+ } else
+ objp = __cache_alloc_node(cachep, flags, nodeid);
+
+ return objp;
+}
+
+#else
+/*
+ * On UMA, we always allocate from the local node.
+ */
+static __always_inline void *__cache_alloc(struct kmem_cache *cachep,
+ gfp_t flags, int nodeid)
+{
+ return cache_alloc_local(cachep, flags);
+}
#endif
+static __always_inline void *cache_alloc(struct kmem_cache *cachep,
+ gfp_t flags, int nodeid,
+ void *caller)
+{
+ unsigned long save_flags;
+ void *objp;
+
+ cache_alloc_debugcheck_before(cachep, flags);
+
+ local_irq_save(save_flags);
+ objp = __cache_alloc(cachep, flags, nodeid);
+ local_irq_restore(save_flags);
+
+ objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
+ prefetchw(objp);
+
+ return objp;
+}
+
+
/*
* Caller needs to acquire correct kmem_list's list_lock
*/
@@ -3333,7 +3363,8 @@ static inline void __cache_free(struct k
*/
void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
{
- return __cache_alloc(cachep, flags, __builtin_return_address(0));
+ return cache_alloc(cachep, flags, SLAB_CURRENT_NODE,
+ __builtin_return_address(0));
}
EXPORT_SYMBOL(kmem_cache_alloc);
@@ -3347,7 +3378,8 @@ EXPORT_SYMBOL(kmem_cache_alloc);
*/
void *kmem_cache_zalloc(struct kmem_cache *cache, gfp_t flags)
{
- void *ret = __cache_alloc(cache, flags, __builtin_return_address(0));
+ void *ret = cache_alloc(cache, flags, SLAB_CURRENT_NODE,
+ __builtin_return_address(0));
if (ret)
memset(ret, 0, obj_size(cache));
return ret;
@@ -3411,23 +3443,7 @@ out:
*/
void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
{
- unsigned long save_flags;
- void *ptr;
-
- cache_alloc_debugcheck_before(cachep, flags);
- local_irq_save(save_flags);
-
- if (nodeid == -1 || nodeid == numa_node_id() ||
- !cachep->nodelists[nodeid])
- ptr = ____cache_alloc(cachep, flags);
- else
- ptr = __cache_alloc_node(cachep, flags, nodeid);
- local_irq_restore(save_flags);
-
- ptr = cache_alloc_debugcheck_after(cachep, flags, ptr,
- __builtin_return_address(0));
-
- return ptr;
+ return cache_alloc(cachep, flags, nodeid, __builtin_return_address(0));
}
EXPORT_SYMBOL(kmem_cache_alloc_node);
@@ -3462,7 +3478,7 @@ static __always_inline void *__do_kmallo
cachep = __find_general_cachep(size, flags);
if (unlikely(cachep == NULL))
return NULL;
- return __cache_alloc(cachep, flags, caller);
+ return cache_alloc(cachep, flags, SLAB_CURRENT_NODE, caller);
}
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
next reply other threads:[~2006-09-29 10:56 UTC|newest]
Thread overview: 2+ messages / expand[flat|nested] mbox.gz Atom feed top
2006-09-29 10:56 Pekka J Enberg [this message]
2006-09-29 16:10 ` Christoph Lameter
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=Pine.LNX.4.58.0609291353060.30021@sbz-30.cs.Helsinki.FI \
--to=penberg@cs.helsinki.fi \
--cc=christoph@lameter.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=manfred@colorfullife.com \
--cc=pj@sgi.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox