linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Hyeonggon Yoo <42.hyeyoo@gmail.com>
To: chengming.zhou@linux.dev
Cc: vbabka@suse.cz, cl@linux.com, penberg@kernel.org,
	rientjes@google.com,  iamjoonsoo.kim@lge.com,
	akpm@linux-foundation.org, roman.gushchin@linux.dev,
	 linux-mm@kvack.org, linux-kernel@vger.kernel.org,
	 Chengming Zhou <zhouchengming@bytedance.com>
Subject: Re: [PATCH v5 2/9] slub: Change get_partial() interfaces to return slab
Date: Wed, 22 Nov 2023 10:09:04 +0900	[thread overview]
Message-ID: <CAB=+i9RNC5vvVDNdzPB6nTuSE0xL7JVSQLO=O4BonW4S2DGcnw@mail.gmail.com> (raw)
In-Reply-To: <20231102032330.1036151-3-chengming.zhou@linux.dev>

On Thu, Nov 2, 2023 at 12:24 PM <chengming.zhou@linux.dev> wrote:
>
> From: Chengming Zhou <zhouchengming@bytedance.com>
>
> We need all get_partial() related interfaces to return a slab, instead
> of returning the freelist (or object).
>
> Use the partial_context.object to return back freelist or object for
> now. This patch shouldn't have any functional changes.
>
> Suggested-by: Vlastimil Babka <vbabka@suse.cz>
> Signed-off-by: Chengming Zhou <zhouchengming@bytedance.com>
> Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
> Tested-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
> ---
>  mm/slub.c | 63 +++++++++++++++++++++++++++++--------------------------
>  1 file changed, 33 insertions(+), 30 deletions(-)
>
> diff --git a/mm/slub.c b/mm/slub.c
> index 0b0fdc8c189f..03384cd965c5 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -204,9 +204,9 @@ DEFINE_STATIC_KEY_FALSE(slub_debug_enabled);
>
>  /* Structure holding parameters for get_partial() call chain */
>  struct partial_context {
> -       struct slab **slab;
>         gfp_t flags;
>         unsigned int orig_size;
> +       void *object;
>  };
>
>  static inline bool kmem_cache_debug(struct kmem_cache *s)
> @@ -2269,10 +2269,11 @@ static inline bool pfmemalloc_match(struct slab *slab, gfp_t gfpflags);
>  /*
>   * Try to allocate a partial slab from a specific node.
>   */
> -static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
> -                             struct partial_context *pc)
> +static struct slab *get_partial_node(struct kmem_cache *s,
> +                                    struct kmem_cache_node *n,
> +                                    struct partial_context *pc)
>  {
> -       struct slab *slab, *slab2;
> +       struct slab *slab, *slab2, *partial = NULL;
>         void *object = NULL;
>         unsigned long flags;
>         unsigned int partial_slabs = 0;
> @@ -2288,27 +2289,28 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
>
>         spin_lock_irqsave(&n->list_lock, flags);
>         list_for_each_entry_safe(slab, slab2, &n->partial, slab_list) {
> -               void *t;
> -
>                 if (!pfmemalloc_match(slab, pc->flags))
>                         continue;
>
>                 if (IS_ENABLED(CONFIG_SLUB_TINY) || kmem_cache_debug(s)) {
>                         object = alloc_single_from_partial(s, n, slab,
>                                                         pc->orig_size);
> -                       if (object)
> +                       if (object) {
> +                               partial = slab;
> +                               pc->object = object;
>                                 break;
> +                       }
>                         continue;
>                 }
>
> -               t = acquire_slab(s, n, slab, object == NULL);
> -               if (!t)
> +               object = acquire_slab(s, n, slab, object == NULL);
> +               if (!object)
>                         break;
>
> -               if (!object) {
> -                       *pc->slab = slab;
> +               if (!partial) {
> +                       partial = slab;
> +                       pc->object = object;
>                         stat(s, ALLOC_FROM_PARTIAL);
> -                       object = t;
>                 } else {
>                         put_cpu_partial(s, slab, 0);
>                         stat(s, CPU_PARTIAL_NODE);
> @@ -2324,20 +2326,21 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
>
>         }
>         spin_unlock_irqrestore(&n->list_lock, flags);
> -       return object;
> +       return partial;
>  }
>
>  /*
>   * Get a slab from somewhere. Search in increasing NUMA distances.
>   */
> -static void *get_any_partial(struct kmem_cache *s, struct partial_context *pc)
> +static struct slab *get_any_partial(struct kmem_cache *s,
> +                                   struct partial_context *pc)
>  {
>  #ifdef CONFIG_NUMA
>         struct zonelist *zonelist;
>         struct zoneref *z;
>         struct zone *zone;
>         enum zone_type highest_zoneidx = gfp_zone(pc->flags);
> -       void *object;
> +       struct slab *slab;
>         unsigned int cpuset_mems_cookie;
>
>         /*
> @@ -2372,8 +2375,8 @@ static void *get_any_partial(struct kmem_cache *s, struct partial_context *pc)
>
>                         if (n && cpuset_zone_allowed(zone, pc->flags) &&
>                                         n->nr_partial > s->min_partial) {
> -                               object = get_partial_node(s, n, pc);
> -                               if (object) {
> +                               slab = get_partial_node(s, n, pc);
> +                               if (slab) {
>                                         /*
>                                          * Don't check read_mems_allowed_retry()
>                                          * here - if mems_allowed was updated in
> @@ -2381,7 +2384,7 @@ static void *get_any_partial(struct kmem_cache *s, struct partial_context *pc)
>                                          * between allocation and the cpuset
>                                          * update
>                                          */
> -                                       return object;
> +                                       return slab;
>                                 }
>                         }
>                 }
> @@ -2393,17 +2396,18 @@ static void *get_any_partial(struct kmem_cache *s, struct partial_context *pc)
>  /*
>   * Get a partial slab, lock it and return it.
>   */
> -static void *get_partial(struct kmem_cache *s, int node, struct partial_context *pc)
> +static struct slab *get_partial(struct kmem_cache *s, int node,
> +                               struct partial_context *pc)
>  {
> -       void *object;
> +       struct slab *slab;
>         int searchnode = node;
>
>         if (node == NUMA_NO_NODE)
>                 searchnode = numa_mem_id();
>
> -       object = get_partial_node(s, get_node(s, searchnode), pc);
> -       if (object || node != NUMA_NO_NODE)
> -               return object;
> +       slab = get_partial_node(s, get_node(s, searchnode), pc);
> +       if (slab || node != NUMA_NO_NODE)
> +               return slab;
>
>         return get_any_partial(s, pc);
>  }
> @@ -3213,10 +3217,10 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
>  new_objects:
>
>         pc.flags = gfpflags;
> -       pc.slab = &slab;
>         pc.orig_size = orig_size;
> -       freelist = get_partial(s, node, &pc);
> -       if (freelist) {
> +       slab = get_partial(s, node, &pc);
> +       if (slab) {
> +               freelist = pc.object;
>                 if (kmem_cache_debug(s)) {
>                         /*
>                          * For debug caches here we had to go through
> @@ -3408,12 +3412,11 @@ static void *__slab_alloc_node(struct kmem_cache *s,
>         void *object;
>
>         pc.flags = gfpflags;
> -       pc.slab = &slab;
>         pc.orig_size = orig_size;
> -       object = get_partial(s, node, &pc);
> +       slab = get_partial(s, node, &pc);
>
> -       if (object)
> -               return object;
> +       if (slab)
> +               return pc.object;
>
>         slab = new_slab(s, gfpflags, node);
>         if (unlikely(!slab)) {

Looks good to me,
Reviewed-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>


  reply	other threads:[~2023-11-22  1:09 UTC|newest]

Thread overview: 44+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-11-02  3:23 [PATCH v5 0/9] slub: Delay freezing of CPU partial slabs chengming.zhou
2023-11-02  3:23 ` [PATCH v5 1/9] slub: Reflow ___slab_alloc() chengming.zhou
2023-11-22  0:26   ` Hyeonggon Yoo
2023-11-02  3:23 ` [PATCH v5 2/9] slub: Change get_partial() interfaces to return slab chengming.zhou
2023-11-22  1:09   ` Hyeonggon Yoo [this message]
2023-11-02  3:23 ` [PATCH v5 3/9] slub: Keep track of whether slub is on the per-node partial list chengming.zhou
2023-11-22  1:21   ` Hyeonggon Yoo
2023-11-02  3:23 ` [PATCH v5 4/9] slub: Prepare __slab_free() for unfrozen partial slab out of node " chengming.zhou
2023-12-03  6:01   ` Hyeonggon Yoo
2023-11-02  3:23 ` [PATCH v5 5/9] slub: Introduce freeze_slab() chengming.zhou
2023-11-02  3:23 ` [PATCH v5 6/9] slub: Delay freezing of partial slabs chengming.zhou
2023-11-14  5:44   ` kernel test robot
2023-11-20 18:49   ` Mark Brown
2023-11-21  0:58     ` Chengming Zhou
2023-11-21  1:29       ` Mark Brown
2023-11-21 15:47         ` Chengming Zhou
2023-11-21 18:21           ` Mark Brown
2023-11-22  8:52             ` Vlastimil Babka
2023-11-22  9:37     ` Vlastimil Babka
2023-11-22 11:27       ` Mark Brown
2023-11-22 11:35       ` Chengming Zhou
2023-11-22 11:40         ` Vlastimil Babka
2023-11-22 11:54           ` Chengming Zhou
2023-11-22 13:19             ` Vlastimil Babka
2023-11-22 14:28               ` Chengming Zhou
2023-11-22 14:32                 ` Vlastimil Babka
2023-12-03  6:53   ` Hyeonggon Yoo
2023-12-03 10:15     ` Chengming Zhou
2023-12-04 16:58       ` Vlastimil Babka
2023-11-02  3:23 ` [PATCH v5 7/9] slub: Optimize deactivate_slab() chengming.zhou
2023-12-03  9:23   ` Hyeonggon Yoo
2023-12-03 10:26     ` Chengming Zhou
2023-12-03 11:19       ` Hyeonggon Yoo
2023-12-03 11:47         ` Chengming Zhou
2023-12-04 17:55     ` Vlastimil Babka
2023-12-05  0:20       ` Hyeonggon Yoo
2023-11-02  3:23 ` [PATCH v5 8/9] slub: Rename all *unfreeze_partials* functions to *put_partials* chengming.zhou
2023-12-03  9:27   ` Hyeonggon Yoo
2023-11-02  3:23 ` [PATCH v5 9/9] slub: Update frozen slabs documentations in the source chengming.zhou
2023-12-03  9:47   ` Hyeonggon Yoo
2023-12-04 21:41   ` Christoph Lameter (Ampere)
2023-12-05  6:06     ` Chengming Zhou
2023-12-05  9:39       ` Vlastimil Babka
2023-11-13  8:36 ` [PATCH v5 0/9] slub: Delay freezing of CPU partial slabs Vlastimil Babka

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to='CAB=+i9RNC5vvVDNdzPB6nTuSE0xL7JVSQLO=O4BonW4S2DGcnw@mail.gmail.com' \
    --to=42.hyeyoo@gmail.com \
    --cc=akpm@linux-foundation.org \
    --cc=chengming.zhou@linux.dev \
    --cc=cl@linux.com \
    --cc=iamjoonsoo.kim@lge.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=penberg@kernel.org \
    --cc=rientjes@google.com \
    --cc=roman.gushchin@linux.dev \
    --cc=vbabka@suse.cz \
    --cc=zhouchengming@bytedance.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox