linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Pekka Enberg <penberg@kernel.org>
To: Christoph Lameter <cl@linux.com>
Cc: David Rientjes <rientjes@google.com>,
	Andi Kleen <andi@firstfloor.org>,
	tj@kernel.org, Metathronius Galabant <m.galabant@googlemail.com>,
	Matt Mackall <mpm@selenic.com>,
	Eric Dumazet <eric.dumazet@gmail.com>,
	Adrian Drzewiecki <z@drze.net>, Shaohua Li <shaohua.li@intel.com>,
	Alex Shi <alex.shi@intel.com>,
	linux-mm@kvack.org
Subject: Re: [rfc 01/18] slub: Get rid of the node field
Date: Mon, 14 Nov 2011 23:42:38 +0200	[thread overview]
Message-ID: <CAOJsxLFM9W=NiGFwjt8-iwrTYrAZiJ2_Mw_EUYyXYE4TKPs9-A@mail.gmail.com> (raw)
In-Reply-To: <20111111200725.634567005@linux.com>

On Fri, Nov 11, 2011 at 10:07 PM, Christoph Lameter <cl@linux.com> wrote:
> The node field is always page_to_nid(c->page). So its rather easy to
> replace. Note that there will be additional overhead in various hot paths
> due to the need to mask a set of bits in page->flags and shift the
> result.
>
> Signed-off-by: Christoph Lameter <cl@linux.com>

This is a nice cleanup even if we never go irqless in the slowpaths.
Is page_to_nid() really that slow?

>
> ---
>  include/linux/slub_def.h |    1 -
>  mm/slub.c                |   15 ++++++---------
>  2 files changed, 6 insertions(+), 10 deletions(-)
>
> Index: linux-2.6/mm/slub.c
> ===================================================================
> --- linux-2.6.orig/mm/slub.c    2011-11-08 09:53:04.043865616 -0600
> +++ linux-2.6/mm/slub.c 2011-11-09 11:10:46.111334466 -0600
> @@ -1551,7 +1551,6 @@ static void *get_partial_node(struct kme
>
>                if (!object) {
>                        c->page = page;
> -                       c->node = page_to_nid(page);
>                        stat(s, ALLOC_FROM_PARTIAL);
>                        object = t;
>                        available =  page->objects - page->inuse;
> @@ -2016,7 +2015,7 @@ static void flush_all(struct kmem_cache
>  static inline int node_match(struct kmem_cache_cpu *c, int node)
>  {
>  #ifdef CONFIG_NUMA
> -       if (node != NUMA_NO_NODE && c->node != node)
> +       if (node != NUMA_NO_NODE && page_to_nid(c->page) != node)
>                return 0;
>  #endif
>        return 1;
> @@ -2105,7 +2104,6 @@ static inline void *new_slab_objects(str
>                page->freelist = NULL;
>
>                stat(s, ALLOC_SLAB);
> -               c->node = page_to_nid(page);
>                c->page = page;
>                *pc = c;
>        } else
> @@ -2202,7 +2200,6 @@ new_slab:
>        if (c->partial) {
>                c->page = c->partial;
>                c->partial = c->page->next;
> -               c->node = page_to_nid(c->page);
>                stat(s, CPU_PARTIAL_ALLOC);
>                c->freelist = NULL;
>                goto redo;
> @@ -2233,7 +2230,6 @@ new_slab:
>
>        c->freelist = get_freepointer(s, object);
>        deactivate_slab(s, c);
> -       c->node = NUMA_NO_NODE;
>        local_irq_restore(flags);
>        return object;
>  }
> @@ -4437,9 +4433,10 @@ static ssize_t show_slab_objects(struct
>                        struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
>                        struct page *page;
>
> -                       if (!c || c->node < 0)
> +                       if (!c || !c->page)
>                                continue;
>
> +                       node = page_to_nid(c->page);
>                        if (c->page) {
>                                        if (flags & SO_TOTAL)
>                                                x = c->page->objects;
> @@ -4449,16 +4446,16 @@ static ssize_t show_slab_objects(struct
>                                        x = 1;
>
>                                total += x;
> -                               nodes[c->node] += x;
> +                               nodes[node] += x;
>                        }
>                        page = c->partial;
>
>                        if (page) {
>                                x = page->pobjects;
>                                 total += x;
> -                                nodes[c->node] += x;
> +                                nodes[node] += x;
>                        }
> -                       per_cpu[c->node]++;
> +                       per_cpu[node]++;
>                }
>        }
>
> Index: linux-2.6/include/linux/slub_def.h
> ===================================================================
> --- linux-2.6.orig/include/linux/slub_def.h     2011-11-08 09:53:03.979865196 -0600
> +++ linux-2.6/include/linux/slub_def.h  2011-11-09 11:10:46.121334523 -0600
> @@ -45,7 +45,6 @@ struct kmem_cache_cpu {
>        unsigned long tid;      /* Globally unique transaction id */
>        struct page *page;      /* The slab from which we are allocating */
>        struct page *partial;   /* Partially allocated frozen slabs */
> -       int node;               /* The node of the page (or -1 for debug) */
>  #ifdef CONFIG_SLUB_STATS
>        unsigned stat[NR_SLUB_STAT_ITEMS];
>  #endif
>
> --
> To unsubscribe, send a message with 'unsubscribe linux-mm' in
> the body to majordomo@kvack.org.  For more info on Linux MM,
> see: http://www.linux-mm.org/ .
> Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/
> Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
>

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

  reply	other threads:[~2011-11-14 21:42 UTC|newest]

Thread overview: 39+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2011-11-11 20:07 [rfc 00/18] slub: irqless/lockless slow allocation paths Christoph Lameter
2011-11-11 20:07 ` [rfc 01/18] slub: Get rid of the node field Christoph Lameter
2011-11-14 21:42   ` Pekka Enberg [this message]
2011-11-15 16:07     ` Christoph Lameter
2011-11-20 23:01   ` David Rientjes
2011-11-21 17:17     ` Christoph Lameter
2011-11-11 20:07 ` [rfc 02/18] slub: Separate out kmem_cache_cpu processing from deactivate_slab Christoph Lameter
2011-11-20 23:10   ` David Rientjes
2011-11-11 20:07 ` [rfc 03/18] slub: Extract get_freelist from __slab_alloc Christoph Lameter
2011-11-14 21:43   ` Pekka Enberg
2011-11-15 16:08     ` Christoph Lameter
2011-12-13 20:31       ` Pekka Enberg
2011-11-20 23:18   ` David Rientjes
2011-11-11 20:07 ` [rfc 04/18] slub: Use freelist instead of "object" in __slab_alloc Christoph Lameter
2011-11-14 21:44   ` Pekka Enberg
2011-11-20 23:22   ` David Rientjes
2011-11-11 20:07 ` [rfc 05/18] slub: Simplify control flow in __slab_alloc() Christoph Lameter
2011-11-14 21:45   ` Pekka Enberg
2011-11-20 23:24   ` David Rientjes
2011-11-11 20:07 ` [rfc 06/18] slub: Use page variable instead of c->page Christoph Lameter
2011-11-14 21:46   ` Pekka Enberg
2011-11-20 23:27   ` David Rientjes
2011-11-11 20:07 ` [rfc 07/18] slub: pass page to node_match() instead of kmem_cache_cpu structure Christoph Lameter
2011-11-20 23:28   ` David Rientjes
2011-11-11 20:07 ` [rfc 08/18] slub: enable use of deactivate_slab with interrupts on Christoph Lameter
2011-11-11 20:07 ` [rfc 09/18] slub: Run deactivate_slab with interrupts enabled Christoph Lameter
2011-11-11 20:07 ` [rfc 10/18] slub: Enable use of get_partial " Christoph Lameter
2011-11-11 20:07 ` [rfc 11/18] slub: Acquire_slab() avoid loop Christoph Lameter
2011-11-11 20:07 ` [rfc 12/18] slub: Remove kmem_cache_cpu dependency from acquire slab Christoph Lameter
2011-11-11 20:07 ` [rfc 13/18] slub: Add functions to manage per cpu freelists Christoph Lameter
2011-11-11 20:07 ` [rfc 14/18] slub: Decomplicate the get_pointer_safe call and fixup statistics Christoph Lameter
2011-11-11 20:07 ` [rfc 15/18] slub: new_slab_objects() can also get objects from partial list Christoph Lameter
2011-11-11 20:07 ` [rfc 16/18] slub: Drop page field from kmem_cache_cpu Christoph Lameter
2011-11-11 20:07 ` [rfc 17/18] slub: Move __slab_free() into slab_free() Christoph Lameter
2011-11-11 20:07 ` [rfc 18/18] slub: Move __slab_alloc() into slab_alloc() Christoph Lameter
2011-11-16 17:39 ` [rfc 00/18] slub: irqless/lockless slow allocation paths Eric Dumazet
2011-11-16 17:45   ` Eric Dumazet
2011-11-20 23:32     ` David Rientjes
2011-11-20 23:30 ` David Rientjes

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to='CAOJsxLFM9W=NiGFwjt8-iwrTYrAZiJ2_Mw_EUYyXYE4TKPs9-A@mail.gmail.com' \
    --to=penberg@kernel.org \
    --cc=alex.shi@intel.com \
    --cc=andi@firstfloor.org \
    --cc=cl@linux.com \
    --cc=eric.dumazet@gmail.com \
    --cc=linux-mm@kvack.org \
    --cc=m.galabant@googlemail.com \
    --cc=mpm@selenic.com \
    --cc=rientjes@google.com \
    --cc=shaohua.li@intel.com \
    --cc=tj@kernel.org \
    --cc=z@drze.net \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox