linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: "Shi, Alex" <alex.shi@intel.com>
To: Pekka Enberg <penberg@cs.helsinki.fi>,
	"Duyck, Alexander H" <alexander.h.duyck@intel.com>
Cc: "cl@linux.com" <cl@linux.com>,
	"linux-mm@kvack.org" <linux-mm@kvack.org>,
	Zhang Yanmin <yanmin_zhang@linux.intel.com>,
	"Chen, Tim C" <tim.c.chen@intel.com>
Subject: RE: [PATCH] slub: move kmem_cache_node into it's own cacheline
Date: Fri, 21 May 2010 22:41:45 +0800	[thread overview]
Message-ID: <6E3BC7F7C9A4BF4286DD4C043110F30B0B5969081B@shsmsx502.ccr.corp.intel.com> (raw)
In-Reply-To: <AANLkTilfJh65QAkb9FPaqI3UEtbgwLuuoqSdaTtIsXWZ@mail.gmail.com>

I have tested this patch based latest Linus' kernel tree. It real works!
About 10% improvement happened for hackbench threads mode and 8%~13% improve for process mode on our 2 sockets Westmere machine and about 7% hackbench improvement on 2 sockets NHM. 

Alex 
>-----Original Message-----
>From: penberg@gmail.com [mailto:penberg@gmail.com] On Behalf Of Pekka Enberg
>Sent: Friday, May 21, 2010 1:00 PM
>To: Duyck, Alexander H
>Cc: cl@linux.com; linux-mm@kvack.org; Shi, Alex; Zhang Yanmin
>Subject: Re: [PATCH] slub: move kmem_cache_node into it's own cacheline
>
>On Fri, May 21, 2010 at 2:47 AM, Alexander Duyck
><alexander.h.duyck@intel.com> wrote:
>> This patch is meant to improve the performance of SLUB by moving the local
>> kmem_cache_node lock into it's own cacheline separate from kmem_cache.
>> This is accomplished by simply removing the local_node when NUMA is enabled.
>>
>> On my system with 2 nodes I saw around a 5% performance increase w/
>> hackbench times dropping from 6.2 seconds to 5.9 seconds on average.  I
>> suspect the performance gain would increase as the number of nodes
>> increases, but I do not have the data to currently back that up.
>>
>> Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com>
>
>Yanmin, does this fix the hackbench regression for you?
>
>> ---
>>
>>  include/linux/slub_def.h |   11 ++++-------
>>  mm/slub.c                |   33 +++++++++++----------------------
>>  2 files changed, 15 insertions(+), 29 deletions(-)
>>
>> diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
>> index 0249d41..e6217bb 100644
>> --- a/include/linux/slub_def.h
>> +++ b/include/linux/slub_def.h
>> @@ -52,7 +52,7 @@ struct kmem_cache_node {
>>        atomic_long_t total_objects;
>>        struct list_head full;
>>  #endif
>> -};
>> +} ____cacheline_internodealigned_in_smp;
>>
>>  /*
>>  * Word size structure that can be atomically updated or read and that
>> @@ -75,12 +75,6 @@ struct kmem_cache {
>>        int offset;             /* Free pointer offset. */
>>        struct kmem_cache_order_objects oo;
>>
>> -       /*
>> -        * Avoid an extra cache line for UP, SMP and for the node local to
>> -        * struct kmem_cache.
>> -        */
>> -       struct kmem_cache_node local_node;
>> -
>>        /* Allocation and freeing of slabs */
>>        struct kmem_cache_order_objects max;
>>        struct kmem_cache_order_objects min;
>> @@ -102,6 +96,9 @@ struct kmem_cache {
>>         */
>>        int remote_node_defrag_ratio;
>>        struct kmem_cache_node *node[MAX_NUMNODES];
>> +#else
>> +       /* Avoid an extra cache line for UP */
>> +       struct kmem_cache_node local_node;
>>  #endif
>>  };
>>
>> diff --git a/mm/slub.c b/mm/slub.c
>> index 461314b..8af03de 100644
>> --- a/mm/slub.c
>> +++ b/mm/slub.c
>> @@ -2141,7 +2141,7 @@ static void free_kmem_cache_nodes(struct kmem_cache *s)
>>
>>        for_each_node_state(node, N_NORMAL_MEMORY) {
>>                struct kmem_cache_node *n = s->node[node];
>> -               if (n && n != &s->local_node)
>> +               if (n)
>>                        kmem_cache_free(kmalloc_caches, n);
>>                s->node[node] = NULL;
>>        }
>> @@ -2150,33 +2150,22 @@ static void free_kmem_cache_nodes(struct kmem_cache *s)
>>  static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags)
>>  {
>>        int node;
>> -       int local_node;
>> -
>> -       if (slab_state >= UP && (s < kmalloc_caches ||
>> -                       s >= kmalloc_caches + KMALLOC_CACHES))
>> -               local_node = page_to_nid(virt_to_page(s));
>> -       else
>> -               local_node = 0;
>>
>>        for_each_node_state(node, N_NORMAL_MEMORY) {
>>                struct kmem_cache_node *n;
>>
>> -               if (local_node == node)
>> -                       n = &s->local_node;
>> -               else {
>> -                       if (slab_state == DOWN) {
>> -                               early_kmem_cache_node_alloc(gfpflags, node);
>> -                               continue;
>> -                       }
>> -                       n = kmem_cache_alloc_node(kmalloc_caches,
>> -                                                       gfpflags, node);
>> -
>> -                       if (!n) {
>> -                               free_kmem_cache_nodes(s);
>> -                               return 0;
>> -                       }
>> +               if (slab_state == DOWN) {
>> +                       early_kmem_cache_node_alloc(gfpflags, node);
>> +                       continue;
>> +               }
>> +               n = kmem_cache_alloc_node(kmalloc_caches,
>> +                                               gfpflags, node);
>>
>> +               if (!n) {
>> +                       free_kmem_cache_nodes(s);
>> +                       return 0;
>>                }
>> +
>>                s->node[node] = n;
>>                init_kmem_cache_node(n, s);
>>        }
>>
>> --
>> To unsubscribe, send a message with 'unsubscribe linux-mm' in
>> the body to majordomo@kvack.org.  For more info on Linux MM,
>> see: http://www.linux-mm.org/ .
>> Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
>>

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

  reply	other threads:[~2010-05-21 14:44 UTC|newest]

Thread overview: 12+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2010-05-20 23:47 Alexander Duyck
2010-05-21  4:59 ` Pekka Enberg
2010-05-21 14:41   ` Shi, Alex [this message]
2010-05-21 18:03     ` Christoph Lameter
2010-05-24 18:14     ` Pekka Enberg
2010-05-26  0:52       ` Shi, Alex
2010-05-21 18:06 ` Christoph Lameter
2010-05-21 18:17   ` Duyck, Alexander H
2010-05-21 18:24     ` Christoph Lameter
2010-05-21 18:33       ` Christoph Lameter
2010-05-21 20:23         ` Duyck, Alexander H
2010-05-21 20:41           ` Christoph Lameter

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=6E3BC7F7C9A4BF4286DD4C043110F30B0B5969081B@shsmsx502.ccr.corp.intel.com \
    --to=alex.shi@intel.com \
    --cc=alexander.h.duyck@intel.com \
    --cc=cl@linux.com \
    --cc=linux-mm@kvack.org \
    --cc=penberg@cs.helsinki.fi \
    --cc=tim.c.chen@intel.com \
    --cc=yanmin_zhang@linux.intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox