linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: cl@linux-foundation.org
To: akpm@linux-foundation.org
Cc: linux-mm@kvack.org,
	Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>,
	Pekka Enberg <penberg@cs.helsinki.fi>, Tejun Heo <tj@kernel.org>,
	mingo@elte.hu, rusty@rustcorp.com.au, davem@davemloft.net
Subject: [this_cpu_xx V3 19/19] this_cpu: slub aggressive use of this_cpu operations in the hotpaths
Date: Thu, 01 Oct 2009 13:40:52 -0400	[thread overview]
Message-ID: <20091001174123.171779521@gentwo.org> (raw)
In-Reply-To: <20091001174033.576397715@gentwo.org>

[-- Attachment #1: this_cpu_slub_aggressive_cpu_ops --]
[-- Type: text/plain, Size: 6095 bytes --]

Use this_cpu_* operations in the hotpath to avoid calculations of
kmem_cache_cpu pointer addresses.

On x86 there is a tradeof: Multiple uses segment prefixes against an
address calculation and more register pressure. Code size is reduced
therefore it is an advantage.

The use of prefixes is necessary if we want to use
Mathieus' scheme for fastpaths that do not require disabling
interrupts.

Cc: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
Cc: Pekka Enberg <penberg@cs.helsinki.fi>
Signed-off-by: Christoph Lameter <cl@linux-foundation.org>

---
 mm/slub.c |   80 ++++++++++++++++++++++++++++++--------------------------------
 1 file changed, 39 insertions(+), 41 deletions(-)

Index: linux-2.6/mm/slub.c
===================================================================
--- linux-2.6.orig/mm/slub.c	2009-09-30 15:58:20.000000000 -0500
+++ linux-2.6/mm/slub.c	2009-09-30 16:24:45.000000000 -0500
@@ -1512,10 +1512,10 @@ static void flush_all(struct kmem_cache 
  * Check if the objects in a per cpu structure fit numa
  * locality expectations.
  */
-static inline int node_match(struct kmem_cache_cpu *c, int node)
+static inline int node_match(struct kmem_cache *s, int node)
 {
 #ifdef CONFIG_NUMA
-	if (node != -1 && c->node != node)
+	if (node != -1 && __this_cpu_read(s->cpu_slab->node) != node)
 		return 0;
 #endif
 	return 1;
@@ -1603,46 +1603,46 @@ slab_out_of_memory(struct kmem_cache *s,
  * a call to the page allocator and the setup of a new slab.
  */
 static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
-			  unsigned long addr, struct kmem_cache_cpu *c)
+			  unsigned long addr)
 {
 	void **object;
-	struct page *new;
+	struct page *page = __this_cpu_read(s->cpu_slab->page);
 
 	/* We handle __GFP_ZERO in the caller */
 	gfpflags &= ~__GFP_ZERO;
 
-	if (!c->page)
+	if (!page)
 		goto new_slab;
 
-	slab_lock(c->page);
-	if (unlikely(!node_match(c, node)))
+	slab_lock(page);
+	if (unlikely(!node_match(s, node)))
 		goto another_slab;
 
 	stat(s, ALLOC_REFILL);
 
 load_freelist:
-	object = c->page->freelist;
+	object = page->freelist;
 	if (unlikely(!object))
 		goto another_slab;
-	if (unlikely(SLABDEBUG && PageSlubDebug(c->page)))
+	if (unlikely(SLABDEBUG && PageSlubDebug(page)))
 		goto debug;
 
-	c->freelist = get_freepointer(s, object);
-	c->page->inuse = c->page->objects;
-	c->page->freelist = NULL;
-	c->node = page_to_nid(c->page);
+	__this_cpu_write(s->cpu_slab->freelist, get_freepointer(s, object));
+	page->inuse = page->objects;
+	page->freelist = NULL;
+	__this_cpu_write(s->cpu_slab->node, page_to_nid(page));
 unlock_out:
-	slab_unlock(c->page);
+	slab_unlock(page);
 	stat(s, ALLOC_SLOWPATH);
 	return object;
 
 another_slab:
-	deactivate_slab(s, c);
+	deactivate_slab(s, __this_cpu_ptr(s->cpu_slab));
 
 new_slab:
-	new = get_partial(s, gfpflags, node);
-	if (new) {
-		c->page = new;
+	page = get_partial(s, gfpflags, node);
+	if (page) {
+		__this_cpu_write(s->cpu_slab->page, page);
 		stat(s, ALLOC_FROM_PARTIAL);
 		goto load_freelist;
 	}
@@ -1650,31 +1650,30 @@ new_slab:
 	if (gfpflags & __GFP_WAIT)
 		local_irq_enable();
 
-	new = new_slab(s, gfpflags, node);
+	page = new_slab(s, gfpflags, node);
 
 	if (gfpflags & __GFP_WAIT)
 		local_irq_disable();
 
-	if (new) {
-		c = __this_cpu_ptr(s->cpu_slab);
+	if (page) {
 		stat(s, ALLOC_SLAB);
-		if (c->page)
-			flush_slab(s, c);
-		slab_lock(new);
-		__SetPageSlubFrozen(new);
-		c->page = new;
+		if (__this_cpu_read(s->cpu_slab->page))
+			flush_slab(s, __this_cpu_ptr(s->cpu_slab));
+		slab_lock(page);
+		__SetPageSlubFrozen(page);
+		__this_cpu_write(s->cpu_slab->page, page);
 		goto load_freelist;
 	}
 	if (!(gfpflags & __GFP_NOWARN) && printk_ratelimit())
 		slab_out_of_memory(s, gfpflags, node);
 	return NULL;
 debug:
-	if (!alloc_debug_processing(s, c->page, object, addr))
+	if (!alloc_debug_processing(s, page, object, addr))
 		goto another_slab;
 
-	c->page->inuse++;
-	c->page->freelist = get_freepointer(s, object);
-	c->node = -1;
+	page->inuse++;
+	page->freelist = get_freepointer(s, object);
+	__this_cpu_write(s->cpu_slab->node, -1);
 	goto unlock_out;
 }
 
@@ -1692,7 +1691,6 @@ static __always_inline void *slab_alloc(
 		gfp_t gfpflags, int node, unsigned long addr)
 {
 	void **object;
-	struct kmem_cache_cpu *c;
 	unsigned long flags;
 
 	gfpflags &= gfp_allowed_mask;
@@ -1704,14 +1702,14 @@ static __always_inline void *slab_alloc(
 		return NULL;
 
 	local_irq_save(flags);
-	c = __this_cpu_ptr(s->cpu_slab);
-	object = c->freelist;
-	if (unlikely(!object || !node_match(c, node)))
+	object = __this_cpu_read(s->cpu_slab->freelist);
+	if (unlikely(!object || !node_match(s, node)))
 
-		object = __slab_alloc(s, gfpflags, node, addr, c);
+		object = __slab_alloc(s, gfpflags, node, addr);
 
 	else {
-		c->freelist = get_freepointer(s, object);
+		__this_cpu_write(s->cpu_slab->freelist,
+			get_freepointer(s, object));
 		stat(s, ALLOC_FASTPATH);
 	}
 	local_irq_restore(flags);
@@ -1847,19 +1845,19 @@ static __always_inline void slab_free(st
 			struct page *page, void *x, unsigned long addr)
 {
 	void **object = (void *)x;
-	struct kmem_cache_cpu *c;
 	unsigned long flags;
 
 	kmemleak_free_recursive(x, s->flags);
 	local_irq_save(flags);
-	c = __this_cpu_ptr(s->cpu_slab);
 	kmemcheck_slab_free(s, object, s->objsize);
 	debug_check_no_locks_freed(object, s->objsize);
 	if (!(s->flags & SLAB_DEBUG_OBJECTS))
 		debug_check_no_obj_freed(object, s->objsize);
-	if (likely(page == c->page && c->node >= 0)) {
-		set_freepointer(s, object, c->freelist);
-		c->freelist = object;
+
+	if (likely(page == __this_cpu_read(s->cpu_slab->page) &&
+			__this_cpu_read(s->cpu_slab->node) >= 0)) {
+		set_freepointer(s, object, __this_cpu_read(s->cpu_slab->freelist));
+		__this_cpu_write(s->cpu_slab->freelist, object);
 		stat(s, FREE_FASTPATH);
 	} else
 		__slab_free(s, page, x, addr);

-- 

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

  parent reply	other threads:[~2009-10-01 17:03 UTC|newest]

Thread overview: 24+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2009-10-01 17:40 [this_cpu_xx V3 00/19] Introduce per cpu atomic operations and avoid per cpu address arithmetic cl
2009-10-01 17:40 ` [this_cpu_xx V3 01/19] Introduce this_cpu_ptr() and generic this_cpu_* operations cl
2009-10-01 17:40 ` [this_cpu_xx V3 02/19] this_cpu: X86 optimized this_cpu operations cl
2009-10-01 17:40 ` [this_cpu_xx V3 03/19] Use this_cpu operations for SNMP statistics cl
2009-10-01 17:40 ` [this_cpu_xx V3 04/19] Use this_cpu operations for NFS statistics cl
2009-10-01 17:40 ` [this_cpu_xx V3 05/19] use this_cpu ops for network statistics cl
2009-10-01 19:35   ` David Miller
2009-10-01 17:40 ` [this_cpu_xx V3 06/19] this_cpu_ptr: Straight transformations cl
2009-10-01 17:40 ` [this_cpu_xx V3 07/19] this_cpu_ptr: Eliminate get/put_cpu cl
2009-10-01 17:40 ` [this_cpu_xx V3 08/19] this_cpu_ptr: xfs_icsb_modify_counters does not need "cpu" variable cl
2009-10-01 17:40 ` [this_cpu_xx V3 09/19] Use this_cpu_ptr in crypto subsystem cl
2009-10-01 17:40 ` [this_cpu_xx V3 10/19] Use this_cpu ops for VM statistics cl
2009-10-01 17:40 ` [this_cpu_xx V3 11/19] RCU: Use this_cpu operations cl
2009-10-01 19:17   ` Ingo Molnar
2009-10-01 17:40 ` [this_cpu_xx V3 12/19] Move early initialization of pagesets out of zone_wait_table_init() cl
2009-10-01 17:40 ` [this_cpu_xx V3 13/19] this_cpu_ops: page allocator conversion cl
2009-10-01 17:40 ` [this_cpu_xx V3 14/19] this_cpu ops: Remove pageset_notifier cl
2009-10-01 17:40 ` [this_cpu_xx V3 15/19] Use this_cpu operations in slub cl
2009-10-01 17:40 ` [this_cpu_xx V3 16/19] SLUB: Get rid of dynamic DMA kmalloc cache allocation cl
2009-10-01 17:40 ` [this_cpu_xx V3 17/19] this_cpu: Remove slub kmem_cache fields cl
2009-10-01 17:40 ` [this_cpu_xx V3 18/19] Make slub statistics use this_cpu_inc cl
2009-10-01 17:40 ` cl [this message]
2009-10-02  9:29 ` [this_cpu_xx V3 00/19] Introduce per cpu atomic operations and avoid per cpu address arithmetic Ingo Molnar
2009-10-02  9:31   ` Ingo Molnar

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20091001174123.171779521@gentwo.org \
    --to=cl@linux-foundation.org \
    --cc=akpm@linux-foundation.org \
    --cc=davem@davemloft.net \
    --cc=linux-mm@kvack.org \
    --cc=mathieu.desnoyers@polymtl.ca \
    --cc=mingo@elte.hu \
    --cc=penberg@cs.helsinki.fi \
    --cc=rusty@rustcorp.com.au \
    --cc=tj@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox