linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Marco Elver <elver@google.com>
To: Vlastimil Babka <vbabka@suse.cz>
Cc: Matthew Wilcox <willy@infradead.org>,
	linux-mm@kvack.org,  Christoph Lameter <cl@linux.com>,
	David Rientjes <rientjes@google.com>,
	Joonsoo Kim <iamjoonsoo.kim@lge.com>,
	 Pekka Enberg <penberg@kernel.org>,
	Alexander Potapenko <glider@google.com>,
	 Dmitry Vyukov <dvyukov@google.com>,
	kasan-dev@googlegroups.com
Subject: Re: [RFC PATCH 30/32] mm/sl*b: Differentiate struct slab fields by sl*b implementations
Date: Wed, 17 Nov 2021 08:00:00 +0100	[thread overview]
Message-ID: <CANpmjNMjMZE1n-5v2sCZZOrLLs9hDkhVMKfij1GSwi+T0HY7sA@mail.gmail.com> (raw)
In-Reply-To: <20211116001628.24216-31-vbabka@suse.cz>

On Tue, 16 Nov 2021 at 01:16, Vlastimil Babka <vbabka@suse.cz> wrote:
> With a struct slab definition separate from struct page, we can go further and
> define only fields that the chosen sl*b implementation uses. This means
> everything between __page_flags and __page_refcount placeholders now depends on
> the chosen CONFIG_SL*B. Some fields exist in all implementations (slab_list)
> but can be part of a union in some, so it's simpler to repeat them than
> complicate the definition with ifdefs even more.
>
> The patch doesn't change physical offsets of the fields, although it could be
> done later - for example it's now clear that tighter packing in SLOB could be
> possible.
>
> This should also prevent accidental use of fields that don't exist in given
> implementation. Before this patch virt_to_cache() and and cache_from_obj() was
> visible for SLOB (albeit not used), although it relies on the slab_cache field
> that isn't set by SLOB. With this patch it's now a compile error, so these
> functions are now hidden behind #ifndef CONFIG_SLOB.
>
> Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
> Cc: Alexander Potapenko <glider@google.com> (maintainer:KFENCE)
> Cc: Marco Elver <elver@google.com> (maintainer:KFENCE)
> Cc: Dmitry Vyukov <dvyukov@google.com> (reviewer:KFENCE)
> Cc: <kasan-dev@googlegroups.com>

Ran kfence_test with both slab and slub, and all passes:

Tested-by: Marco Elver <elver@google.com>

> ---
>  mm/kfence/core.c |  9 +++++----
>  mm/slab.h        | 46 ++++++++++++++++++++++++++++++++++++----------
>  2 files changed, 41 insertions(+), 14 deletions(-)
>
> diff --git a/mm/kfence/core.c b/mm/kfence/core.c
> index 4eb60cf5ff8b..46103a7628a6 100644
> --- a/mm/kfence/core.c
> +++ b/mm/kfence/core.c
> @@ -427,10 +427,11 @@ static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t g
>         /* Set required slab fields. */
>         slab = virt_to_slab((void *)meta->addr);
>         slab->slab_cache = cache;
> -       if (IS_ENABLED(CONFIG_SLUB))
> -               slab->objects = 1;
> -       if (IS_ENABLED(CONFIG_SLAB))
> -               slab->s_mem = addr;
> +#if defined(CONFIG_SLUB)
> +       slab->objects = 1;
> +#elif defined (CONFIG_SLAB)
> +       slab->s_mem = addr;
> +#endif
>
>         /* Memory initialization. */
>         for_each_canary(meta, set_canary_byte);
> diff --git a/mm/slab.h b/mm/slab.h
> index 58b65e5e5d49..10a9ee195249 100644
> --- a/mm/slab.h
> +++ b/mm/slab.h
> @@ -8,9 +8,24 @@
>  /* Reuses the bits in struct page */
>  struct slab {
>         unsigned long __page_flags;
> +
> +#if defined(CONFIG_SLAB)
> +
> +       union {
> +               struct list_head slab_list;
> +               struct rcu_head rcu_head;
> +       };
> +       struct kmem_cache *slab_cache;
> +       void *freelist; /* array of free object indexes */
> +       void * s_mem;   /* first object */
> +       unsigned int active;
> +
> +#elif defined(CONFIG_SLUB)
> +
>         union {
>                 struct list_head slab_list;
> -               struct {        /* Partial pages */
> +               struct rcu_head rcu_head;
> +               struct {
>                         struct slab *next;
>  #ifdef CONFIG_64BIT
>                         int slabs;      /* Nr of slabs left */
> @@ -18,25 +33,32 @@ struct slab {
>                         short int slabs;
>  #endif
>                 };
> -               struct rcu_head rcu_head;
>         };
> -       struct kmem_cache *slab_cache; /* not slob */
> +       struct kmem_cache *slab_cache;
>         /* Double-word boundary */
>         void *freelist;         /* first free object */
>         union {
> -               void *s_mem;    /* slab: first object */
> -               unsigned long counters;         /* SLUB */
> -               struct {                        /* SLUB */
> +               unsigned long counters;
> +               struct {
>                         unsigned inuse:16;
>                         unsigned objects:15;
>                         unsigned frozen:1;
>                 };
>         };
> +       unsigned int __unused;
> +
> +#elif defined(CONFIG_SLOB)
> +
> +       struct list_head slab_list;
> +       void * __unused_1;
> +       void *freelist;         /* first free block */
> +       void * __unused_2;
> +       int units;
> +
> +#else
> +#error "Unexpected slab allocator configured"
> +#endif
>
> -       union {
> -               unsigned int active;            /* SLAB */
> -               int units;                      /* SLOB */
> -       };
>         atomic_t __page_refcount;
>  #ifdef CONFIG_MEMCG
>         unsigned long memcg_data;
> @@ -47,7 +69,9 @@ struct slab {
>         static_assert(offsetof(struct page, pg) == offsetof(struct slab, sl))
>  SLAB_MATCH(flags, __page_flags);
>  SLAB_MATCH(compound_head, slab_list);  /* Ensure bit 0 is clear */
> +#ifndef CONFIG_SLOB
>  SLAB_MATCH(rcu_head, rcu_head);
> +#endif
>  SLAB_MATCH(_refcount, __page_refcount);
>  #ifdef CONFIG_MEMCG
>  SLAB_MATCH(memcg_data, memcg_data);
> @@ -623,6 +647,7 @@ static inline void memcg_slab_free_hook(struct kmem_cache *s,
>  }
>  #endif /* CONFIG_MEMCG_KMEM */
>
> +#ifndef CONFIG_SLOB
>  static inline struct kmem_cache *virt_to_cache(const void *obj)
>  {
>         struct slab *slab;
> @@ -669,6 +694,7 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
>                 print_tracking(cachep, x);
>         return cachep;
>  }
> +#endif /* CONFIG_SLOB */
>
>  static inline size_t slab_ksize(const struct kmem_cache *s)
>  {
> --
> 2.33.1
>


  reply	other threads:[~2021-11-17  7:01 UTC|newest]

Thread overview: 41+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-11-16  0:15 [RFC PATCH 00/32] Separate struct slab from struct page Vlastimil Babka
2021-11-16  0:15 ` [RFC PATCH 01/32] mm/slab: Dissolve slab_map_pages() in its caller Vlastimil Babka
2021-11-16  0:15 ` [RFC PATCH 02/32] mm/slub: Make object_err() static Vlastimil Babka
2021-11-16  0:15 ` [RFC PATCH 03/32] mm: Split slab into its own type Vlastimil Babka
2021-11-16  0:16 ` [RFC PATCH 04/32] mm: Add account_slab() and unaccount_slab() Vlastimil Babka
2021-11-16  0:16 ` [RFC PATCH 05/32] mm: Convert virt_to_cache() to use struct slab Vlastimil Babka
2021-11-16  0:16 ` [RFC PATCH 06/32] mm: Convert __ksize() to " Vlastimil Babka
2021-11-16  0:16 ` [RFC PATCH 07/32] mm: Use struct slab in kmem_obj_info() Vlastimil Babka
2021-11-16  0:16 ` [RFC PATCH 08/32] mm: Convert check_heap_object() to use struct slab Vlastimil Babka
2021-11-16  0:16 ` [RFC PATCH 09/32] mm/slub: Convert detached_freelist to use a " Vlastimil Babka
2021-11-16  0:16 ` [RFC PATCH 10/32] mm/slub: Convert kfree() " Vlastimil Babka
2021-11-16  0:16 ` [RFC PATCH 11/32] mm/slub: Convert __slab_lock() and __slab_unlock() to " Vlastimil Babka
2021-11-16  0:16 ` [RFC PATCH 12/32] mm/slub: Convert print_page_info() to print_slab_info() Vlastimil Babka
2021-11-16  0:16 ` [RFC PATCH 13/32] mm/slub: Convert alloc_slab_page() to return a struct slab Vlastimil Babka
2021-11-16  0:16 ` [RFC PATCH 14/32] mm/slub: Convert __free_slab() to use " Vlastimil Babka
2021-11-16  0:16 ` [RFC PATCH 15/32] mm/slub: Convert pfmemalloc_match() to take a " Vlastimil Babka
2021-11-16  0:16 ` [RFC PATCH 16/32] mm/slub: Convert most struct page to struct slab by spatch Vlastimil Babka
2021-11-16  0:16 ` [RFC PATCH 17/32] mm/slub: Finish struct page to struct slab conversion Vlastimil Babka
2021-11-16  0:16 ` [RFC PATCH 18/32] mm/slab: Convert kmem_getpages() and kmem_freepages() to struct slab Vlastimil Babka
2021-11-16  0:16 ` [RFC PATCH 19/32] mm/slab: Convert most struct page to struct slab by spatch Vlastimil Babka
2021-11-16  0:16 ` [RFC PATCH 20/32] mm/slab: Finish struct page to struct slab conversion Vlastimil Babka
2021-11-16  0:16 ` [RFC PATCH 21/32] mm: Convert struct page to struct slab in functions used by other subsystems Vlastimil Babka
2021-11-16 14:02   ` Andrey Konovalov
2021-11-16 16:32     ` Vlastimil Babka
2021-11-16 23:04       ` Andrey Konovalov
2021-11-16 23:37         ` Vlastimil Babka
2021-11-16  0:16 ` [RFC PATCH 22/32] mm/memcg: Convert slab objcgs from struct page to struct slab Vlastimil Babka
2021-11-16  0:16 ` [RFC PATCH 23/32] mm/slob: Convert SLOB to use " Vlastimil Babka
2021-11-16  0:16 ` [RFC PATCH 24/32] mm/kasan: Convert to " Vlastimil Babka
2021-11-16 13:58   ` Andrey Konovalov
2021-11-16 18:17   ` Matthew Wilcox
2021-11-16  0:16 ` [RFC PATCH 25/32] mm/kfence: Convert kfence_guarded_alloc() " Vlastimil Babka
2021-11-17  7:00   ` Marco Elver
2021-11-16  0:16 ` [RFC PATCH 26/32] zsmalloc: Stop using slab fields in struct page Vlastimil Babka
2021-11-16  0:16 ` [RFC PATCH 27/32] bootmem: Use page->index instead of page->freelist Vlastimil Babka
2021-11-16  0:16 ` [RFC PATCH 28/32] iommu: Use put_pages_list Vlastimil Babka
2021-11-16  0:16 ` [RFC PATCH 29/32] mm: Remove slab from struct page Vlastimil Babka
2021-11-16  0:16 ` [RFC PATCH 30/32] mm/sl*b: Differentiate struct slab fields by sl*b implementations Vlastimil Babka
2021-11-17  7:00   ` Marco Elver [this message]
2021-11-16  0:16 ` [RFC PATCH 31/32] mm/slub: Simplify struct slab slabs field definition Vlastimil Babka
2021-11-16  0:16 ` [RFC PATCH 32/32] mm/slub: Define struct slab fields for CONFIG_SLUB_CPU_PARTIAL only when enabled Vlastimil Babka

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=CANpmjNMjMZE1n-5v2sCZZOrLLs9hDkhVMKfij1GSwi+T0HY7sA@mail.gmail.com \
    --to=elver@google.com \
    --cc=cl@linux.com \
    --cc=dvyukov@google.com \
    --cc=glider@google.com \
    --cc=iamjoonsoo.kim@lge.com \
    --cc=kasan-dev@googlegroups.com \
    --cc=linux-mm@kvack.org \
    --cc=penberg@kernel.org \
    --cc=rientjes@google.com \
    --cc=vbabka@suse.cz \
    --cc=willy@infradead.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox