linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Andrey Konovalov <andreyknvl@gmail.com>
To: Vlastimil Babka <vbabka@suse.cz>
Cc: Matthew Wilcox <willy@infradead.org>,
	Christoph Lameter <cl@linux.com>,
	 David Rientjes <rientjes@google.com>,
	Joonsoo Kim <iamjoonsoo.kim@lge.com>,
	 Pekka Enberg <penberg@kernel.org>,
	Linux Memory Management List <linux-mm@kvack.org>,
	 Andrew Morton <akpm@linux-foundation.org>,
	patches@lists.linux.dev,  Julia Lawall <julia.lawall@inria.fr>,
	Luis Chamberlain <mcgrof@kernel.org>,
	 Andrey Ryabinin <ryabinin.a.a@gmail.com>,
	Alexander Potapenko <glider@google.com>,
	 Dmitry Vyukov <dvyukov@google.com>,
	Marco Elver <elver@google.com>,
	 Johannes Weiner <hannes@cmpxchg.org>,
	Michal Hocko <mhocko@kernel.org>,
	 Vladimir Davydov <vdavydov.dev@gmail.com>,
	kasan-dev <kasan-dev@googlegroups.com>,
	 cgroups@vger.kernel.org
Subject: Re: [PATCH v2 22/33] mm: Convert struct page to struct slab in functions used by other subsystems
Date: Thu, 2 Dec 2021 18:16:30 +0100	[thread overview]
Message-ID: <CA+fCnZfj4mngOf9roarq6RFQLgkcGhcM1aFMi7OjJek3T4sgYA@mail.gmail.com> (raw)
In-Reply-To: <20211201181510.18784-23-vbabka@suse.cz>

On Wed, Dec 1, 2021 at 7:15 PM Vlastimil Babka <vbabka@suse.cz> wrote:
>
> KASAN, KFENCE and memcg interact with SLAB or SLUB internals through functions
> nearest_obj(), obj_to_index() and objs_per_slab() that use struct page as
> parameter. This patch converts it to struct slab including all callers, through
> a coccinelle semantic patch.
>
> // Options: --include-headers --no-includes --smpl-spacing include/linux/slab_def.h include/linux/slub_def.h mm/slab.h mm/kasan/*.c mm/kfence/kfence_test.c mm/memcontrol.c mm/slab.c mm/slub.c
> // Note: needs coccinelle 1.1.1 to avoid breaking whitespace
>
> @@
> @@
>
> -objs_per_slab_page(
> +objs_per_slab(
>  ...
>  )
>  { ... }
>
> @@
> @@
>
> -objs_per_slab_page(
> +objs_per_slab(
>  ...
>  )
>
> @@
> identifier fn =~ "obj_to_index|objs_per_slab";
> @@
>
>  fn(...,
> -   const struct page *page
> +   const struct slab *slab
>     ,...)
>  {
> <...
> (
> - page_address(page)
> + slab_address(slab)
> |
> - page
> + slab
> )
> ...>
>  }
>
> @@
> identifier fn =~ "nearest_obj";
> @@
>
>  fn(...,
> -   struct page *page
> +   const struct slab *slab
>     ,...)
>  {
> <...
> (
> - page_address(page)
> + slab_address(slab)
> |
> - page
> + slab
> )
> ...>
>  }
>
> @@
> identifier fn =~ "nearest_obj|obj_to_index|objs_per_slab";
> expression E;
> @@
>
>  fn(...,
> (
> - slab_page(E)
> + E
> |
> - virt_to_page(E)
> + virt_to_slab(E)
> |
> - virt_to_head_page(E)
> + virt_to_slab(E)
> |
> - page
> + page_slab(page)
> )
>   ,...)
>
> Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
> Cc: Julia Lawall <julia.lawall@inria.fr>
> Cc: Luis Chamberlain <mcgrof@kernel.org>
> Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
> Cc: Alexander Potapenko <glider@google.com>
> Cc: Andrey Konovalov <andreyknvl@gmail.com>
> Cc: Dmitry Vyukov <dvyukov@google.com>
> Cc: Marco Elver <elver@google.com>
> Cc: Johannes Weiner <hannes@cmpxchg.org>
> Cc: Michal Hocko <mhocko@kernel.org>
> Cc: Vladimir Davydov <vdavydov.dev@gmail.com>
> Cc: <kasan-dev@googlegroups.com>
> Cc: <cgroups@vger.kernel.org>
> ---
>  include/linux/slab_def.h | 16 ++++++++--------
>  include/linux/slub_def.h | 18 +++++++++---------
>  mm/kasan/common.c        |  4 ++--
>  mm/kasan/generic.c       |  2 +-
>  mm/kasan/report.c        |  2 +-
>  mm/kasan/report_tags.c   |  2 +-
>  mm/kfence/kfence_test.c  |  4 ++--
>  mm/memcontrol.c          |  4 ++--
>  mm/slab.c                | 10 +++++-----
>  mm/slab.h                |  4 ++--
>  mm/slub.c                |  2 +-
>  11 files changed, 34 insertions(+), 34 deletions(-)
>
> diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
> index 3aa5e1e73ab6..e24c9aff6fed 100644
> --- a/include/linux/slab_def.h
> +++ b/include/linux/slab_def.h
> @@ -87,11 +87,11 @@ struct kmem_cache {
>         struct kmem_cache_node *node[MAX_NUMNODES];
>  };
>
> -static inline void *nearest_obj(struct kmem_cache *cache, struct page *page,
> +static inline void *nearest_obj(struct kmem_cache *cache, const struct slab *slab,
>                                 void *x)
>  {
> -       void *object = x - (x - page->s_mem) % cache->size;
> -       void *last_object = page->s_mem + (cache->num - 1) * cache->size;
> +       void *object = x - (x - slab->s_mem) % cache->size;
> +       void *last_object = slab->s_mem + (cache->num - 1) * cache->size;
>
>         if (unlikely(object > last_object))
>                 return last_object;
> @@ -106,16 +106,16 @@ static inline void *nearest_obj(struct kmem_cache *cache, struct page *page,
>   *   reciprocal_divide(offset, cache->reciprocal_buffer_size)
>   */
>  static inline unsigned int obj_to_index(const struct kmem_cache *cache,
> -                                       const struct page *page, void *obj)
> +                                       const struct slab *slab, void *obj)
>  {
> -       u32 offset = (obj - page->s_mem);
> +       u32 offset = (obj - slab->s_mem);
>         return reciprocal_divide(offset, cache->reciprocal_buffer_size);
>  }
>
> -static inline int objs_per_slab_page(const struct kmem_cache *cache,
> -                                    const struct page *page)
> +static inline int objs_per_slab(const struct kmem_cache *cache,
> +                                    const struct slab *slab)
>  {
> -       if (is_kfence_address(page_address(page)))
> +       if (is_kfence_address(slab_address(slab)))
>                 return 1;
>         return cache->num;
>  }
> diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
> index 8a9c2876ca89..33c5c0e3bd8d 100644
> --- a/include/linux/slub_def.h
> +++ b/include/linux/slub_def.h
> @@ -158,11 +158,11 @@ static inline void sysfs_slab_release(struct kmem_cache *s)
>
>  void *fixup_red_left(struct kmem_cache *s, void *p);
>
> -static inline void *nearest_obj(struct kmem_cache *cache, struct page *page,
> +static inline void *nearest_obj(struct kmem_cache *cache, const struct slab *slab,
>                                 void *x) {
> -       void *object = x - (x - page_address(page)) % cache->size;
> -       void *last_object = page_address(page) +
> -               (page->objects - 1) * cache->size;
> +       void *object = x - (x - slab_address(slab)) % cache->size;
> +       void *last_object = slab_address(slab) +
> +               (slab->objects - 1) * cache->size;
>         void *result = (unlikely(object > last_object)) ? last_object : object;
>
>         result = fixup_red_left(cache, result);
> @@ -178,16 +178,16 @@ static inline unsigned int __obj_to_index(const struct kmem_cache *cache,
>  }
>
>  static inline unsigned int obj_to_index(const struct kmem_cache *cache,
> -                                       const struct page *page, void *obj)
> +                                       const struct slab *slab, void *obj)
>  {
>         if (is_kfence_address(obj))
>                 return 0;
> -       return __obj_to_index(cache, page_address(page), obj);
> +       return __obj_to_index(cache, slab_address(slab), obj);
>  }
>
> -static inline int objs_per_slab_page(const struct kmem_cache *cache,
> -                                    const struct page *page)
> +static inline int objs_per_slab(const struct kmem_cache *cache,
> +                                    const struct slab *slab)
>  {
> -       return page->objects;
> +       return slab->objects;
>  }
>  #endif /* _LINUX_SLUB_DEF_H */
> diff --git a/mm/kasan/common.c b/mm/kasan/common.c
> index 8428da2aaf17..6a1cd2d38bff 100644
> --- a/mm/kasan/common.c
> +++ b/mm/kasan/common.c
> @@ -298,7 +298,7 @@ static inline u8 assign_tag(struct kmem_cache *cache,
>         /* For caches that either have a constructor or SLAB_TYPESAFE_BY_RCU: */
>  #ifdef CONFIG_SLAB
>         /* For SLAB assign tags based on the object index in the freelist. */
> -       return (u8)obj_to_index(cache, virt_to_head_page(object), (void *)object);
> +       return (u8)obj_to_index(cache, virt_to_slab(object), (void *)object);
>  #else
>         /*
>          * For SLUB assign a random tag during slab creation, otherwise reuse
> @@ -341,7 +341,7 @@ static inline bool ____kasan_slab_free(struct kmem_cache *cache, void *object,
>         if (is_kfence_address(object))
>                 return false;
>
> -       if (unlikely(nearest_obj(cache, virt_to_head_page(object), object) !=
> +       if (unlikely(nearest_obj(cache, virt_to_slab(object), object) !=
>             object)) {
>                 kasan_report_invalid_free(tagged_object, ip);
>                 return true;
> diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c
> index 84a038b07c6f..5d0b79416c4e 100644
> --- a/mm/kasan/generic.c
> +++ b/mm/kasan/generic.c
> @@ -339,7 +339,7 @@ static void __kasan_record_aux_stack(void *addr, bool can_alloc)
>                 return;
>
>         cache = page->slab_cache;
> -       object = nearest_obj(cache, page, addr);
> +       object = nearest_obj(cache, page_slab(page), addr);
>         alloc_meta = kasan_get_alloc_meta(cache, object);
>         if (!alloc_meta)
>                 return;
> diff --git a/mm/kasan/report.c b/mm/kasan/report.c
> index 0bc10f452f7e..e00999dc6499 100644
> --- a/mm/kasan/report.c
> +++ b/mm/kasan/report.c
> @@ -249,7 +249,7 @@ static void print_address_description(void *addr, u8 tag)
>
>         if (page && PageSlab(page)) {
>                 struct kmem_cache *cache = page->slab_cache;
> -               void *object = nearest_obj(cache, page, addr);
> +               void *object = nearest_obj(cache, page_slab(page),      addr);
>
>                 describe_object(cache, object, addr, tag);
>         }
> diff --git a/mm/kasan/report_tags.c b/mm/kasan/report_tags.c
> index 8a319fc16dab..06c21dd77493 100644
> --- a/mm/kasan/report_tags.c
> +++ b/mm/kasan/report_tags.c
> @@ -23,7 +23,7 @@ const char *kasan_get_bug_type(struct kasan_access_info *info)
>         page = kasan_addr_to_page(addr);
>         if (page && PageSlab(page)) {
>                 cache = page->slab_cache;
> -               object = nearest_obj(cache, page, (void *)addr);
> +               object = nearest_obj(cache, page_slab(page), (void *)addr);
>                 alloc_meta = kasan_get_alloc_meta(cache, object);
>
>                 if (alloc_meta) {
> diff --git a/mm/kfence/kfence_test.c b/mm/kfence/kfence_test.c
> index 695030c1fff8..f7276711d7b9 100644
> --- a/mm/kfence/kfence_test.c
> +++ b/mm/kfence/kfence_test.c
> @@ -291,8 +291,8 @@ static void *test_alloc(struct kunit *test, size_t size, gfp_t gfp, enum allocat
>                          * even for KFENCE objects; these are required so that
>                          * memcg accounting works correctly.
>                          */
> -                       KUNIT_EXPECT_EQ(test, obj_to_index(s, page, alloc), 0U);
> -                       KUNIT_EXPECT_EQ(test, objs_per_slab_page(s, page), 1);
> +                       KUNIT_EXPECT_EQ(test, obj_to_index(s, page_slab(page), alloc), 0U);
> +                       KUNIT_EXPECT_EQ(test, objs_per_slab(s, page_slab(page)), 1);
>
>                         if (policy == ALLOCATE_ANY)
>                                 return alloc;
> diff --git a/mm/memcontrol.c b/mm/memcontrol.c
> index 6863a834ed42..906edbd92436 100644
> --- a/mm/memcontrol.c
> +++ b/mm/memcontrol.c
> @@ -2819,7 +2819,7 @@ static struct mem_cgroup *get_mem_cgroup_from_objcg(struct obj_cgroup *objcg)
>  int memcg_alloc_page_obj_cgroups(struct page *page, struct kmem_cache *s,
>                                  gfp_t gfp, bool new_page)
>  {
> -       unsigned int objects = objs_per_slab_page(s, page);
> +       unsigned int objects = objs_per_slab(s, page_slab(page));
>         unsigned long memcg_data;
>         void *vec;
>
> @@ -2881,7 +2881,7 @@ struct mem_cgroup *mem_cgroup_from_obj(void *p)
>                 struct obj_cgroup *objcg;
>                 unsigned int off;
>
> -               off = obj_to_index(page->slab_cache, page, p);
> +               off = obj_to_index(page->slab_cache, page_slab(page), p);
>                 objcg = page_objcgs(page)[off];
>                 if (objcg)
>                         return obj_cgroup_memcg(objcg);
> diff --git a/mm/slab.c b/mm/slab.c
> index f0447b087d02..785fffd527fe 100644
> --- a/mm/slab.c
> +++ b/mm/slab.c
> @@ -1560,7 +1560,7 @@ static void check_poison_obj(struct kmem_cache *cachep, void *objp)
>                 struct slab *slab = virt_to_slab(objp);
>                 unsigned int objnr;
>
> -               objnr = obj_to_index(cachep, slab_page(slab), objp);
> +               objnr = obj_to_index(cachep, slab, objp);
>                 if (objnr) {
>                         objp = index_to_obj(cachep, slab, objnr - 1);
>                         realobj = (char *)objp + obj_offset(cachep);
> @@ -2530,7 +2530,7 @@ static void *slab_get_obj(struct kmem_cache *cachep, struct slab *slab)
>  static void slab_put_obj(struct kmem_cache *cachep,
>                         struct slab *slab, void *objp)
>  {
> -       unsigned int objnr = obj_to_index(cachep, slab_page(slab), objp);
> +       unsigned int objnr = obj_to_index(cachep, slab, objp);
>  #if DEBUG
>         unsigned int i;
>
> @@ -2717,7 +2717,7 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
>         if (cachep->flags & SLAB_STORE_USER)
>                 *dbg_userword(cachep, objp) = (void *)caller;
>
> -       objnr = obj_to_index(cachep, slab_page(slab), objp);
> +       objnr = obj_to_index(cachep, slab, objp);
>
>         BUG_ON(objnr >= cachep->num);
>         BUG_ON(objp != index_to_obj(cachep, slab, objnr));
> @@ -3663,7 +3663,7 @@ void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
>         objp = object - obj_offset(cachep);
>         kpp->kp_data_offset = obj_offset(cachep);
>         slab = virt_to_slab(objp);
> -       objnr = obj_to_index(cachep, slab_page(slab), objp);
> +       objnr = obj_to_index(cachep, slab, objp);
>         objp = index_to_obj(cachep, slab, objnr);
>         kpp->kp_objp = objp;
>         if (DEBUG && cachep->flags & SLAB_STORE_USER)
> @@ -4181,7 +4181,7 @@ void __check_heap_object(const void *ptr, unsigned long n,
>
>         /* Find and validate object. */
>         cachep = slab->slab_cache;
> -       objnr = obj_to_index(cachep, slab_page(slab), (void *)ptr);
> +       objnr = obj_to_index(cachep, slab, (void *)ptr);
>         BUG_ON(objnr >= cachep->num);
>
>         /* Find offset within object. */
> diff --git a/mm/slab.h b/mm/slab.h
> index 7376c9d8aa2b..15d109d8ec89 100644
> --- a/mm/slab.h
> +++ b/mm/slab.h
> @@ -483,7 +483,7 @@ static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
>                                 continue;
>                         }
>
> -                       off = obj_to_index(s, page, p[i]);
> +                       off = obj_to_index(s, page_slab(page), p[i]);
>                         obj_cgroup_get(objcg);
>                         page_objcgs(page)[off] = objcg;
>                         mod_objcg_state(objcg, page_pgdat(page),
> @@ -522,7 +522,7 @@ static inline void memcg_slab_free_hook(struct kmem_cache *s_orig,
>                 else
>                         s = s_orig;
>
> -               off = obj_to_index(s, page, p[i]);
> +               off = obj_to_index(s, page_slab(page), p[i]);
>                 objcg = objcgs[off];
>                 if (!objcg)
>                         continue;
> diff --git a/mm/slub.c b/mm/slub.c
> index f5344211d8cc..61aaaa662c5e 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -4342,7 +4342,7 @@ void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
>  #else
>         objp = objp0;
>  #endif
> -       objnr = obj_to_index(s, slab_page(slab), objp);
> +       objnr = obj_to_index(s, slab, objp);
>         kpp->kp_data_offset = (unsigned long)((char *)objp0 - (char *)objp);
>         objp = base + s->size * objnr;
>         kpp->kp_objp = objp;
> --
> 2.33.1
>

Reviewed-by: Andrey Konovalov <andreyknvl@gmail.com>

Thanks!


  reply	other threads:[~2021-12-02 17:17 UTC|newest]

Thread overview: 89+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-12-01 18:14 [PATCH v2 00/33] Separate struct slab from struct page Vlastimil Babka
2021-12-01 18:14 ` [PATCH v2 01/33] mm: add virt_to_folio() and folio_address() Vlastimil Babka
2021-12-14 14:20   ` Johannes Weiner
2021-12-14 14:27     ` Vlastimil Babka
2021-12-01 18:14 ` [PATCH v2 02/33] mm/slab: Dissolve slab_map_pages() in its caller Vlastimil Babka
2021-12-01 18:14 ` [PATCH v2 03/33] mm/slub: Make object_err() static Vlastimil Babka
2021-12-01 18:14 ` [PATCH v2 04/33] mm: Split slab into its own type Vlastimil Babka
2021-12-14 14:24   ` Johannes Weiner
2021-12-01 18:14 ` [PATCH v2 05/33] mm: Add account_slab() and unaccount_slab() Vlastimil Babka
2021-12-14 14:25   ` Johannes Weiner
2021-12-01 18:14 ` [PATCH v2 06/33] mm: Convert virt_to_cache() to use struct slab Vlastimil Babka
2021-12-14 14:26   ` Johannes Weiner
2021-12-01 18:14 ` [PATCH v2 07/33] mm: Convert __ksize() to " Vlastimil Babka
2021-12-14 14:28   ` Johannes Weiner
2021-12-01 18:14 ` [PATCH v2 08/33] mm: Use struct slab in kmem_obj_info() Vlastimil Babka
2021-12-01 18:14 ` [PATCH v2 09/33] mm: Convert check_heap_object() to use struct slab Vlastimil Babka
2021-12-01 18:14 ` [PATCH v2 10/33] mm/slub: Convert detached_freelist to use a " Vlastimil Babka
2021-12-01 18:14 ` [PATCH v2 11/33] mm/slub: Convert kfree() " Vlastimil Babka
2021-12-01 18:14 ` [PATCH v2 12/33] mm/slub: Convert __slab_lock() and __slab_unlock() to " Vlastimil Babka
2021-12-01 18:14 ` [PATCH v2 13/33] mm/slub: Convert print_page_info() to print_slab_info() Vlastimil Babka
2021-12-01 18:14 ` [PATCH v2 14/33] mm/slub: Convert alloc_slab_page() to return a struct slab Vlastimil Babka
2021-12-01 18:14 ` [PATCH v2 15/33] mm/slub: Convert __free_slab() to use " Vlastimil Babka
2021-12-01 18:14 ` [PATCH v2 16/33] mm/slub: Convert pfmemalloc_match() to take a " Vlastimil Babka
2021-12-01 18:14 ` [PATCH v2 17/33] mm/slub: Convert most struct page to struct slab by spatch Vlastimil Babka
2021-12-01 18:14 ` [PATCH v2 18/33] mm/slub: Finish struct page to struct slab conversion Vlastimil Babka
2021-12-01 18:14 ` [PATCH v2 19/33] mm/slab: Convert kmem_getpages() and kmem_freepages() to struct slab Vlastimil Babka
2021-12-01 18:14 ` [PATCH v2 20/33] mm/slab: Convert most struct page to struct slab by spatch Vlastimil Babka
2021-12-01 18:14 ` [PATCH v2 21/33] mm/slab: Finish struct page to struct slab conversion Vlastimil Babka
2021-12-01 18:14 ` [PATCH v2 22/33] mm: Convert struct page to struct slab in functions used by other subsystems Vlastimil Babka
2021-12-02 17:16   ` Andrey Konovalov [this message]
2021-12-14 14:31   ` Johannes Weiner
2021-12-01 18:15 ` [PATCH v2 23/33] mm/memcg: Convert slab objcgs from struct page to struct slab Vlastimil Babka
2021-12-14 14:43   ` Johannes Weiner
2021-12-20 23:31     ` Vlastimil Babka
2021-12-01 18:15 ` [PATCH v2 24/33] mm/slob: Convert SLOB to use " Vlastimil Babka
2021-12-10 10:44   ` Hyeonggon Yoo
2021-12-10 11:44     ` Vlastimil Babka
2021-12-10 15:29       ` Hyeonggon Yoo
2021-12-10 18:09         ` Vlastimil Babka
2021-12-11 10:54           ` Hyeonggon Yoo
2021-12-01 18:15 ` [PATCH v2 25/33] mm/kasan: Convert to struct folio and " Vlastimil Babka
2021-12-02 17:16   ` Andrey Konovalov
2021-12-01 18:15 ` [PATCH v2 26/33] mm/kfence: Convert kfence_guarded_alloc() to " Vlastimil Babka
2021-12-01 18:15 ` [PATCH v2 27/33] zsmalloc: Stop using slab fields in struct page Vlastimil Babka
2021-12-01 23:34   ` Minchan Kim
2021-12-14 14:58   ` Johannes Weiner
2021-12-01 18:15 ` [PATCH v2 28/33] bootmem: Use page->index instead of page->freelist Vlastimil Babka
2021-12-14 14:59   ` Johannes Weiner
2021-12-01 18:15 ` [PATCH v2 29/33] iommu: Use put_pages_list Vlastimil Babka
2021-12-01 19:07   ` Matthew Wilcox
2021-12-01 19:45     ` Robin Murphy
2021-12-01 18:15 ` [PATCH v2 30/33] mm: Remove slab from struct page Vlastimil Babka
2021-12-14 14:46   ` Johannes Weiner
2021-12-01 18:15 ` [PATCH v2 31/33] mm/sl*b: Differentiate struct slab fields by sl*b implementations Vlastimil Babka
     [not found]   ` <20211210163757.GA717823@odroid>
2021-12-10 18:26     ` Vlastimil Babka
2021-12-11 16:23       ` Matthew Wilcox
     [not found]       ` <20211211115527.GA822127@odroid>
2021-12-11 16:52         ` Matthew Wilcox
2021-12-01 18:15 ` [PATCH v2 32/33] mm/slub: Simplify struct slab slabs field definition Vlastimil Babka
2021-12-14 15:06   ` Johannes Weiner
2021-12-01 18:15 ` [PATCH v2 33/33] mm/slub: Define struct slab fields for CONFIG_SLUB_CPU_PARTIAL only when enabled Vlastimil Babka
2021-12-01 18:39 ` slab tree for next Vlastimil Babka
2021-12-01 20:34   ` Vlastimil Babka
2021-12-02 16:36     ` Vlastimil Babka
2021-12-02 20:39       ` Stephen Rothwell
2022-01-04  0:21   ` Vlastimil Babka
2022-01-04  8:44     ` Stephen Rothwell
2023-08-29  9:55     ` Vlastimil Babka
2023-08-29 21:33       ` Stephen Rothwell
2024-11-05 16:33         ` Vlastimil Babka
2024-11-05 21:08           ` Stephen Rothwell
2021-12-02 12:25 ` [PATCH v2 00/33] Separate struct slab from struct page Vlastimil Babka
2021-12-14 12:57 ` Vlastimil Babka
     [not found]   ` <20211214143822.GA1063445@odroid>
2021-12-14 14:43     ` Vlastimil Babka
2021-12-15  1:03   ` Roman Gushchin
2021-12-15 23:38     ` Roman Gushchin
2021-12-16  9:19       ` Vlastimil Babka
2021-12-20  0:47       ` Vlastimil Babka
2021-12-20  1:42         ` Matthew Wilcox
2021-12-20  0:24     ` Vlastimil Babka
2021-12-16 15:00   ` Hyeonggon Yoo
2021-12-20 23:58     ` Vlastimil Babka
2021-12-21 17:25       ` Robin Murphy
2021-12-22  7:36       ` Hyeonggon Yoo
2021-12-22 16:56   ` Vlastimil Babka
2021-12-25  9:16     ` Hyeonggon Yoo
2021-12-25 17:53       ` Matthew Wilcox
2021-12-27  2:43         ` Hyeonggon Yoo
2021-12-29 11:22     ` Hyeonggon Yoo
2022-01-03 17:56       ` Vlastimil Babka

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=CA+fCnZfj4mngOf9roarq6RFQLgkcGhcM1aFMi7OjJek3T4sgYA@mail.gmail.com \
    --to=andreyknvl@gmail.com \
    --cc=akpm@linux-foundation.org \
    --cc=cgroups@vger.kernel.org \
    --cc=cl@linux.com \
    --cc=dvyukov@google.com \
    --cc=elver@google.com \
    --cc=glider@google.com \
    --cc=hannes@cmpxchg.org \
    --cc=iamjoonsoo.kim@lge.com \
    --cc=julia.lawall@inria.fr \
    --cc=kasan-dev@googlegroups.com \
    --cc=linux-mm@kvack.org \
    --cc=mcgrof@kernel.org \
    --cc=mhocko@kernel.org \
    --cc=patches@lists.linux.dev \
    --cc=penberg@kernel.org \
    --cc=rientjes@google.com \
    --cc=ryabinin.a.a@gmail.com \
    --cc=vbabka@suse.cz \
    --cc=vdavydov.dev@gmail.com \
    --cc=willy@infradead.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox