* [PATCH v3 1/1] mm/slab: mark alloc tags empty for sheaves allocated with __GFP_NO_OBJ_EXT
@ 2026-02-25 16:34 Suren Baghdasaryan
2026-02-25 19:08 ` Suren Baghdasaryan
0 siblings, 1 reply; 4+ messages in thread
From: Suren Baghdasaryan @ 2026-02-25 16:34 UTC (permalink / raw)
To: akpm
Cc: vbabka, harry.yoo, 00107082, cl, rientjes, roman.gushchin,
linux-mm, linux-kernel, Suren Baghdasaryan
alloc_empty_sheaf() allocates sheaves from SLAB_KMALLOC caches using
__GFP_NO_OBJ_EXT to avoid recursion, however it does not mark their
allocation tags empty before freeing, which results in a warning when
CONFIG_MEM_ALLOC_PROFILING_DEBUG is set. Fix this by marking allocation
tags for such sheaves as empty.
Reported-by: David Wang <00107082@163.com>
Closes: https://lore.kernel.org/all/20260223155128.3849-1-00107082@163.com/
Analyzed-by: Harry Yoo <harry.yoo@oracle.com>
Signed-off-by: Suren Baghdasaryan <surenb@google.com>
Reviewed-by: Harry Yoo <harry.yoo@oracle.com>
Tested-by: Harry Yoo <harry.yoo@oracle.com>
Tested-by: David Wang <00107082@163.com>
---
include/linux/gfp_types.h | 2 ++
mm/slab.h | 4 ++--
mm/slub.c | 33 +++++++++++++++++++++++----------
3 files changed, 27 insertions(+), 12 deletions(-)
diff --git a/include/linux/gfp_types.h b/include/linux/gfp_types.h
index 814bb2892f99..6c75df30a281 100644
--- a/include/linux/gfp_types.h
+++ b/include/linux/gfp_types.h
@@ -139,6 +139,8 @@ enum {
* %__GFP_ACCOUNT causes the allocation to be accounted to kmemcg.
*
* %__GFP_NO_OBJ_EXT causes slab allocation to have no object extension.
+ * mark_obj_codetag_empty() should be called upon freeing for objects allocated
+ * with this flag to indicate that their NULL tags are expected and normal.
*/
#define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE)
#define __GFP_WRITE ((__force gfp_t)___GFP_WRITE)
diff --git a/mm/slab.h b/mm/slab.h
index 71c7261bf822..f6ef862b60ef 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -290,14 +290,14 @@ static inline void *nearest_obj(struct kmem_cache *cache,
/* Determine object index from a given position */
static inline unsigned int __obj_to_index(const struct kmem_cache *cache,
- void *addr, void *obj)
+ void *addr, const void *obj)
{
return reciprocal_divide(kasan_reset_tag(obj) - addr,
cache->reciprocal_size);
}
static inline unsigned int obj_to_index(const struct kmem_cache *cache,
- const struct slab *slab, void *obj)
+ const struct slab *slab, const void *obj)
{
if (is_kfence_address(obj))
return 0;
diff --git a/mm/slub.c b/mm/slub.c
index 862642c165ed..34c32749f091 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2041,18 +2041,18 @@ static inline void dec_slabs_node(struct kmem_cache *s, int node,
#ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG
-static inline void mark_objexts_empty(struct slabobj_ext *obj_exts)
+static inline void mark_obj_codetag_empty(const void *obj)
{
- struct slab *obj_exts_slab;
+ struct slab *obj_slab;
unsigned long slab_exts;
- obj_exts_slab = virt_to_slab(obj_exts);
- slab_exts = slab_obj_exts(obj_exts_slab);
+ obj_slab = virt_to_slab(obj);
+ slab_exts = slab_obj_exts(obj_slab);
if (slab_exts) {
get_slab_obj_exts(slab_exts);
- unsigned int offs = obj_to_index(obj_exts_slab->slab_cache,
- obj_exts_slab, obj_exts);
- struct slabobj_ext *ext = slab_obj_ext(obj_exts_slab,
+ unsigned int offs = obj_to_index(obj_slab->slab_cache,
+ obj_slab, obj);
+ struct slabobj_ext *ext = slab_obj_ext(obj_slab,
slab_exts, offs);
if (unlikely(is_codetag_empty(&ext->ref))) {
@@ -2090,7 +2090,7 @@ static inline void handle_failed_objexts_alloc(unsigned long obj_exts,
#else /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */
-static inline void mark_objexts_empty(struct slabobj_ext *obj_exts) {}
+static inline void mark_obj_codetag_empty(const void *obj) {}
static inline bool mark_failed_objexts_alloc(struct slab *slab) { return false; }
static inline void handle_failed_objexts_alloc(unsigned long obj_exts,
struct slabobj_ext *vec, unsigned int objects) {}
@@ -2211,7 +2211,7 @@ int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
* assign slabobj_exts in parallel. In this case the existing
* objcg vector should be reused.
*/
- mark_objexts_empty(vec);
+ mark_obj_codetag_empty(vec);
if (unlikely(!allow_spin))
kfree_nolock(vec);
else
@@ -2254,7 +2254,7 @@ static inline void free_slab_obj_exts(struct slab *slab, bool allow_spin)
* NULL, therefore replace NULL with CODETAG_EMPTY to indicate that
* the extension for obj_exts is expected to be NULL.
*/
- mark_objexts_empty(obj_exts);
+ mark_obj_codetag_empty(obj_exts);
if (allow_spin)
kfree(obj_exts);
else
@@ -2312,6 +2312,10 @@ static void alloc_slab_obj_exts_early(struct kmem_cache *s, struct slab *slab)
#else /* CONFIG_SLAB_OBJ_EXT */
+static inline void mark_obj_codetag_empty(const void *obj)
+{
+}
+
static inline void init_slab_obj_exts(struct slab *slab)
{
}
@@ -2783,6 +2787,15 @@ static inline struct slab_sheaf *alloc_empty_sheaf(struct kmem_cache *s,
static void free_empty_sheaf(struct kmem_cache *s, struct slab_sheaf *sheaf)
{
+ /*
+ * If the sheaf was created with __GFP_NO_OBJ_EXT flag then its
+ * corresponding extension is NULL and alloc_tag_sub() will throw a
+ * warning, therefore replace NULL with CODETAG_EMPTY to indicate
+ * that the extension for this sheaf is expected to be NULL.
+ */
+ if (s->flags & SLAB_KMALLOC)
+ mark_obj_codetag_empty(sheaf);
+
kfree(sheaf);
stat(s, SHEAF_FREE);
base-commit: 7dff99b354601dd01829e1511711846e04340a69
--
2.53.0.414.gf7e9f6c205-goog
^ permalink raw reply [flat|nested] 4+ messages in thread* Re: [PATCH v3 1/1] mm/slab: mark alloc tags empty for sheaves allocated with __GFP_NO_OBJ_EXT
2026-02-25 16:34 [PATCH v3 1/1] mm/slab: mark alloc tags empty for sheaves allocated with __GFP_NO_OBJ_EXT Suren Baghdasaryan
@ 2026-02-25 19:08 ` Suren Baghdasaryan
2026-02-25 21:23 ` Vlastimil Babka
0 siblings, 1 reply; 4+ messages in thread
From: Suren Baghdasaryan @ 2026-02-25 19:08 UTC (permalink / raw)
To: akpm
Cc: vbabka, harry.yoo, 00107082, cl, rientjes, roman.gushchin,
linux-mm, linux-kernel
On Wed, Feb 25, 2026 at 4:34 PM Suren Baghdasaryan <surenb@google.com> wrote:
>
> alloc_empty_sheaf() allocates sheaves from SLAB_KMALLOC caches using
> __GFP_NO_OBJ_EXT to avoid recursion, however it does not mark their
> allocation tags empty before freeing, which results in a warning when
> CONFIG_MEM_ALLOC_PROFILING_DEBUG is set. Fix this by marking allocation
> tags for such sheaves as empty.
>
I think this should also have:
Fixes: 4c0a17e28340 ("slab: prevent recursive kmalloc() in alloc_empty_sheaf()")
and CC to stable for inclusion into 6.19.
Andrew, Vlastimil, should I post another version CC'ing stable or you
can add that line and forward to stable?
> Reported-by: David Wang <00107082@163.com>
> Closes: https://lore.kernel.org/all/20260223155128.3849-1-00107082@163.com/
> Analyzed-by: Harry Yoo <harry.yoo@oracle.com>
> Signed-off-by: Suren Baghdasaryan <surenb@google.com>
> Reviewed-by: Harry Yoo <harry.yoo@oracle.com>
> Tested-by: Harry Yoo <harry.yoo@oracle.com>
> Tested-by: David Wang <00107082@163.com>
> ---
> include/linux/gfp_types.h | 2 ++
> mm/slab.h | 4 ++--
> mm/slub.c | 33 +++++++++++++++++++++++----------
> 3 files changed, 27 insertions(+), 12 deletions(-)
>
> diff --git a/include/linux/gfp_types.h b/include/linux/gfp_types.h
> index 814bb2892f99..6c75df30a281 100644
> --- a/include/linux/gfp_types.h
> +++ b/include/linux/gfp_types.h
> @@ -139,6 +139,8 @@ enum {
> * %__GFP_ACCOUNT causes the allocation to be accounted to kmemcg.
> *
> * %__GFP_NO_OBJ_EXT causes slab allocation to have no object extension.
> + * mark_obj_codetag_empty() should be called upon freeing for objects allocated
> + * with this flag to indicate that their NULL tags are expected and normal.
> */
> #define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE)
> #define __GFP_WRITE ((__force gfp_t)___GFP_WRITE)
> diff --git a/mm/slab.h b/mm/slab.h
> index 71c7261bf822..f6ef862b60ef 100644
> --- a/mm/slab.h
> +++ b/mm/slab.h
> @@ -290,14 +290,14 @@ static inline void *nearest_obj(struct kmem_cache *cache,
>
> /* Determine object index from a given position */
> static inline unsigned int __obj_to_index(const struct kmem_cache *cache,
> - void *addr, void *obj)
> + void *addr, const void *obj)
> {
> return reciprocal_divide(kasan_reset_tag(obj) - addr,
> cache->reciprocal_size);
> }
>
> static inline unsigned int obj_to_index(const struct kmem_cache *cache,
> - const struct slab *slab, void *obj)
> + const struct slab *slab, const void *obj)
> {
> if (is_kfence_address(obj))
> return 0;
> diff --git a/mm/slub.c b/mm/slub.c
> index 862642c165ed..34c32749f091 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -2041,18 +2041,18 @@ static inline void dec_slabs_node(struct kmem_cache *s, int node,
>
> #ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG
>
> -static inline void mark_objexts_empty(struct slabobj_ext *obj_exts)
> +static inline void mark_obj_codetag_empty(const void *obj)
> {
> - struct slab *obj_exts_slab;
> + struct slab *obj_slab;
> unsigned long slab_exts;
>
> - obj_exts_slab = virt_to_slab(obj_exts);
> - slab_exts = slab_obj_exts(obj_exts_slab);
> + obj_slab = virt_to_slab(obj);
> + slab_exts = slab_obj_exts(obj_slab);
> if (slab_exts) {
> get_slab_obj_exts(slab_exts);
> - unsigned int offs = obj_to_index(obj_exts_slab->slab_cache,
> - obj_exts_slab, obj_exts);
> - struct slabobj_ext *ext = slab_obj_ext(obj_exts_slab,
> + unsigned int offs = obj_to_index(obj_slab->slab_cache,
> + obj_slab, obj);
> + struct slabobj_ext *ext = slab_obj_ext(obj_slab,
> slab_exts, offs);
>
> if (unlikely(is_codetag_empty(&ext->ref))) {
> @@ -2090,7 +2090,7 @@ static inline void handle_failed_objexts_alloc(unsigned long obj_exts,
>
> #else /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */
>
> -static inline void mark_objexts_empty(struct slabobj_ext *obj_exts) {}
> +static inline void mark_obj_codetag_empty(const void *obj) {}
> static inline bool mark_failed_objexts_alloc(struct slab *slab) { return false; }
> static inline void handle_failed_objexts_alloc(unsigned long obj_exts,
> struct slabobj_ext *vec, unsigned int objects) {}
> @@ -2211,7 +2211,7 @@ int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
> * assign slabobj_exts in parallel. In this case the existing
> * objcg vector should be reused.
> */
> - mark_objexts_empty(vec);
> + mark_obj_codetag_empty(vec);
> if (unlikely(!allow_spin))
> kfree_nolock(vec);
> else
> @@ -2254,7 +2254,7 @@ static inline void free_slab_obj_exts(struct slab *slab, bool allow_spin)
> * NULL, therefore replace NULL with CODETAG_EMPTY to indicate that
> * the extension for obj_exts is expected to be NULL.
> */
> - mark_objexts_empty(obj_exts);
> + mark_obj_codetag_empty(obj_exts);
> if (allow_spin)
> kfree(obj_exts);
> else
> @@ -2312,6 +2312,10 @@ static void alloc_slab_obj_exts_early(struct kmem_cache *s, struct slab *slab)
>
> #else /* CONFIG_SLAB_OBJ_EXT */
>
> +static inline void mark_obj_codetag_empty(const void *obj)
> +{
> +}
> +
> static inline void init_slab_obj_exts(struct slab *slab)
> {
> }
> @@ -2783,6 +2787,15 @@ static inline struct slab_sheaf *alloc_empty_sheaf(struct kmem_cache *s,
>
> static void free_empty_sheaf(struct kmem_cache *s, struct slab_sheaf *sheaf)
> {
> + /*
> + * If the sheaf was created with __GFP_NO_OBJ_EXT flag then its
> + * corresponding extension is NULL and alloc_tag_sub() will throw a
> + * warning, therefore replace NULL with CODETAG_EMPTY to indicate
> + * that the extension for this sheaf is expected to be NULL.
> + */
> + if (s->flags & SLAB_KMALLOC)
> + mark_obj_codetag_empty(sheaf);
> +
> kfree(sheaf);
>
> stat(s, SHEAF_FREE);
>
> base-commit: 7dff99b354601dd01829e1511711846e04340a69
> --
> 2.53.0.414.gf7e9f6c205-goog
>
^ permalink raw reply [flat|nested] 4+ messages in thread* Re: [PATCH v3 1/1] mm/slab: mark alloc tags empty for sheaves allocated with __GFP_NO_OBJ_EXT
2026-02-25 19:08 ` Suren Baghdasaryan
@ 2026-02-25 21:23 ` Vlastimil Babka
2026-02-25 21:28 ` Suren Baghdasaryan
0 siblings, 1 reply; 4+ messages in thread
From: Vlastimil Babka @ 2026-02-25 21:23 UTC (permalink / raw)
To: Suren Baghdasaryan, akpm
Cc: vbabka, harry.yoo, 00107082, cl, rientjes, roman.gushchin,
linux-mm, linux-kernel
On 2/25/26 8:08 PM, Suren Baghdasaryan wrote:
> On Wed, Feb 25, 2026 at 4:34 PM Suren Baghdasaryan <surenb@google.com> wrote:
>>
>> alloc_empty_sheaf() allocates sheaves from SLAB_KMALLOC caches using
>> __GFP_NO_OBJ_EXT to avoid recursion, however it does not mark their
>> allocation tags empty before freeing, which results in a warning when
>> CONFIG_MEM_ALLOC_PROFILING_DEBUG is set. Fix this by marking allocation
>> tags for such sheaves as empty.
>>
>
> I think this should also have:
>
> Fixes: 4c0a17e28340 ("slab: prevent recursive kmalloc() in alloc_empty_sheaf()")
>
> and CC to stable for inclusion into 6.19.
> Andrew, Vlastimil, should I post another version CC'ing stable or you
> can add that line and forward to stable?
I will add it. Note, they don't care about getting an actual email, but
seeing the Cc: line in the mainline commit.
>> Reported-by: David Wang <00107082@163.com>
>> Closes: https://lore.kernel.org/all/20260223155128.3849-1-00107082@163.com/
>> Analyzed-by: Harry Yoo <harry.yoo@oracle.com>
>> Signed-off-by: Suren Baghdasaryan <surenb@google.com>
>> Reviewed-by: Harry Yoo <harry.yoo@oracle.com>
>> Tested-by: Harry Yoo <harry.yoo@oracle.com>
>> Tested-by: David Wang <00107082@163.com>
>> ---
>> include/linux/gfp_types.h | 2 ++
>> mm/slab.h | 4 ++--
>> mm/slub.c | 33 +++++++++++++++++++++++----------
>> 3 files changed, 27 insertions(+), 12 deletions(-)
>>
>> diff --git a/include/linux/gfp_types.h b/include/linux/gfp_types.h
>> index 814bb2892f99..6c75df30a281 100644
>> --- a/include/linux/gfp_types.h
>> +++ b/include/linux/gfp_types.h
>> @@ -139,6 +139,8 @@ enum {
>> * %__GFP_ACCOUNT causes the allocation to be accounted to kmemcg.
>> *
>> * %__GFP_NO_OBJ_EXT causes slab allocation to have no object extension.
>> + * mark_obj_codetag_empty() should be called upon freeing for objects allocated
>> + * with this flag to indicate that their NULL tags are expected and normal.
>> */
>> #define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE)
>> #define __GFP_WRITE ((__force gfp_t)___GFP_WRITE)
>> diff --git a/mm/slab.h b/mm/slab.h
>> index 71c7261bf822..f6ef862b60ef 100644
>> --- a/mm/slab.h
>> +++ b/mm/slab.h
>> @@ -290,14 +290,14 @@ static inline void *nearest_obj(struct kmem_cache *cache,
>>
>> /* Determine object index from a given position */
>> static inline unsigned int __obj_to_index(const struct kmem_cache *cache,
>> - void *addr, void *obj)
>> + void *addr, const void *obj)
>> {
>> return reciprocal_divide(kasan_reset_tag(obj) - addr,
>> cache->reciprocal_size);
>> }
>>
>> static inline unsigned int obj_to_index(const struct kmem_cache *cache,
>> - const struct slab *slab, void *obj)
>> + const struct slab *slab, const void *obj)
>> {
>> if (is_kfence_address(obj))
>> return 0;
>> diff --git a/mm/slub.c b/mm/slub.c
>> index 862642c165ed..34c32749f091 100644
>> --- a/mm/slub.c
>> +++ b/mm/slub.c
>> @@ -2041,18 +2041,18 @@ static inline void dec_slabs_node(struct kmem_cache *s, int node,
>>
>> #ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG
>>
>> -static inline void mark_objexts_empty(struct slabobj_ext *obj_exts)
>> +static inline void mark_obj_codetag_empty(const void *obj)
>> {
>> - struct slab *obj_exts_slab;
>> + struct slab *obj_slab;
>> unsigned long slab_exts;
>>
>> - obj_exts_slab = virt_to_slab(obj_exts);
>> - slab_exts = slab_obj_exts(obj_exts_slab);
>> + obj_slab = virt_to_slab(obj);
>> + slab_exts = slab_obj_exts(obj_slab);
>> if (slab_exts) {
>> get_slab_obj_exts(slab_exts);
>> - unsigned int offs = obj_to_index(obj_exts_slab->slab_cache,
>> - obj_exts_slab, obj_exts);
>> - struct slabobj_ext *ext = slab_obj_ext(obj_exts_slab,
>> + unsigned int offs = obj_to_index(obj_slab->slab_cache,
>> + obj_slab, obj);
>> + struct slabobj_ext *ext = slab_obj_ext(obj_slab,
>> slab_exts, offs);
>>
>> if (unlikely(is_codetag_empty(&ext->ref))) {
>> @@ -2090,7 +2090,7 @@ static inline void handle_failed_objexts_alloc(unsigned long obj_exts,
>>
>> #else /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */
>>
>> -static inline void mark_objexts_empty(struct slabobj_ext *obj_exts) {}
>> +static inline void mark_obj_codetag_empty(const void *obj) {}
>> static inline bool mark_failed_objexts_alloc(struct slab *slab) { return false; }
>> static inline void handle_failed_objexts_alloc(unsigned long obj_exts,
>> struct slabobj_ext *vec, unsigned int objects) {}
>> @@ -2211,7 +2211,7 @@ int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
>> * assign slabobj_exts in parallel. In this case the existing
>> * objcg vector should be reused.
>> */
>> - mark_objexts_empty(vec);
>> + mark_obj_codetag_empty(vec);
>> if (unlikely(!allow_spin))
>> kfree_nolock(vec);
>> else
>> @@ -2254,7 +2254,7 @@ static inline void free_slab_obj_exts(struct slab *slab, bool allow_spin)
>> * NULL, therefore replace NULL with CODETAG_EMPTY to indicate that
>> * the extension for obj_exts is expected to be NULL.
>> */
>> - mark_objexts_empty(obj_exts);
>> + mark_obj_codetag_empty(obj_exts);
>> if (allow_spin)
>> kfree(obj_exts);
>> else
>> @@ -2312,6 +2312,10 @@ static void alloc_slab_obj_exts_early(struct kmem_cache *s, struct slab *slab)
>>
>> #else /* CONFIG_SLAB_OBJ_EXT */
>>
>> +static inline void mark_obj_codetag_empty(const void *obj)
>> +{
>> +}
>> +
>> static inline void init_slab_obj_exts(struct slab *slab)
>> {
>> }
>> @@ -2783,6 +2787,15 @@ static inline struct slab_sheaf *alloc_empty_sheaf(struct kmem_cache *s,
>>
>> static void free_empty_sheaf(struct kmem_cache *s, struct slab_sheaf *sheaf)
>> {
>> + /*
>> + * If the sheaf was created with __GFP_NO_OBJ_EXT flag then its
>> + * corresponding extension is NULL and alloc_tag_sub() will throw a
>> + * warning, therefore replace NULL with CODETAG_EMPTY to indicate
>> + * that the extension for this sheaf is expected to be NULL.
>> + */
>> + if (s->flags & SLAB_KMALLOC)
>> + mark_obj_codetag_empty(sheaf);
>> +
>> kfree(sheaf);
>>
>> stat(s, SHEAF_FREE);
>>
>> base-commit: 7dff99b354601dd01829e1511711846e04340a69
>> --
>> 2.53.0.414.gf7e9f6c205-goog
>>
^ permalink raw reply [flat|nested] 4+ messages in thread* Re: [PATCH v3 1/1] mm/slab: mark alloc tags empty for sheaves allocated with __GFP_NO_OBJ_EXT
2026-02-25 21:23 ` Vlastimil Babka
@ 2026-02-25 21:28 ` Suren Baghdasaryan
0 siblings, 0 replies; 4+ messages in thread
From: Suren Baghdasaryan @ 2026-02-25 21:28 UTC (permalink / raw)
To: Vlastimil Babka
Cc: akpm, vbabka, harry.yoo, 00107082, cl, rientjes, roman.gushchin,
linux-mm, linux-kernel
On Wed, Feb 25, 2026 at 9:23 PM Vlastimil Babka <vbabka@suse.com> wrote:
>
> On 2/25/26 8:08 PM, Suren Baghdasaryan wrote:
> > On Wed, Feb 25, 2026 at 4:34 PM Suren Baghdasaryan <surenb@google.com> wrote:
> >>
> >> alloc_empty_sheaf() allocates sheaves from SLAB_KMALLOC caches using
> >> __GFP_NO_OBJ_EXT to avoid recursion, however it does not mark their
> >> allocation tags empty before freeing, which results in a warning when
> >> CONFIG_MEM_ALLOC_PROFILING_DEBUG is set. Fix this by marking allocation
> >> tags for such sheaves as empty.
> >>
> >
> > I think this should also have:
> >
> > Fixes: 4c0a17e28340 ("slab: prevent recursive kmalloc() in alloc_empty_sheaf()")
> >
> > and CC to stable for inclusion into 6.19.
> > Andrew, Vlastimil, should I post another version CC'ing stable or you
> > can add that line and forward to stable?
>
> I will add it. Note, they don't care about getting an actual email, but
> seeing the Cc: line in the mainline commit.
Perfect! Thank you. Let me know if anything else is needed on my end.
>
> >> Reported-by: David Wang <00107082@163.com>
> >> Closes: https://lore.kernel.org/all/20260223155128.3849-1-00107082@163.com/
> >> Analyzed-by: Harry Yoo <harry.yoo@oracle.com>
> >> Signed-off-by: Suren Baghdasaryan <surenb@google.com>
> >> Reviewed-by: Harry Yoo <harry.yoo@oracle.com>
> >> Tested-by: Harry Yoo <harry.yoo@oracle.com>
> >> Tested-by: David Wang <00107082@163.com>
> >> ---
> >> include/linux/gfp_types.h | 2 ++
> >> mm/slab.h | 4 ++--
> >> mm/slub.c | 33 +++++++++++++++++++++++----------
> >> 3 files changed, 27 insertions(+), 12 deletions(-)
> >>
> >> diff --git a/include/linux/gfp_types.h b/include/linux/gfp_types.h
> >> index 814bb2892f99..6c75df30a281 100644
> >> --- a/include/linux/gfp_types.h
> >> +++ b/include/linux/gfp_types.h
> >> @@ -139,6 +139,8 @@ enum {
> >> * %__GFP_ACCOUNT causes the allocation to be accounted to kmemcg.
> >> *
> >> * %__GFP_NO_OBJ_EXT causes slab allocation to have no object extension.
> >> + * mark_obj_codetag_empty() should be called upon freeing for objects allocated
> >> + * with this flag to indicate that their NULL tags are expected and normal.
> >> */
> >> #define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE)
> >> #define __GFP_WRITE ((__force gfp_t)___GFP_WRITE)
> >> diff --git a/mm/slab.h b/mm/slab.h
> >> index 71c7261bf822..f6ef862b60ef 100644
> >> --- a/mm/slab.h
> >> +++ b/mm/slab.h
> >> @@ -290,14 +290,14 @@ static inline void *nearest_obj(struct kmem_cache *cache,
> >>
> >> /* Determine object index from a given position */
> >> static inline unsigned int __obj_to_index(const struct kmem_cache *cache,
> >> - void *addr, void *obj)
> >> + void *addr, const void *obj)
> >> {
> >> return reciprocal_divide(kasan_reset_tag(obj) - addr,
> >> cache->reciprocal_size);
> >> }
> >>
> >> static inline unsigned int obj_to_index(const struct kmem_cache *cache,
> >> - const struct slab *slab, void *obj)
> >> + const struct slab *slab, const void *obj)
> >> {
> >> if (is_kfence_address(obj))
> >> return 0;
> >> diff --git a/mm/slub.c b/mm/slub.c
> >> index 862642c165ed..34c32749f091 100644
> >> --- a/mm/slub.c
> >> +++ b/mm/slub.c
> >> @@ -2041,18 +2041,18 @@ static inline void dec_slabs_node(struct kmem_cache *s, int node,
> >>
> >> #ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG
> >>
> >> -static inline void mark_objexts_empty(struct slabobj_ext *obj_exts)
> >> +static inline void mark_obj_codetag_empty(const void *obj)
> >> {
> >> - struct slab *obj_exts_slab;
> >> + struct slab *obj_slab;
> >> unsigned long slab_exts;
> >>
> >> - obj_exts_slab = virt_to_slab(obj_exts);
> >> - slab_exts = slab_obj_exts(obj_exts_slab);
> >> + obj_slab = virt_to_slab(obj);
> >> + slab_exts = slab_obj_exts(obj_slab);
> >> if (slab_exts) {
> >> get_slab_obj_exts(slab_exts);
> >> - unsigned int offs = obj_to_index(obj_exts_slab->slab_cache,
> >> - obj_exts_slab, obj_exts);
> >> - struct slabobj_ext *ext = slab_obj_ext(obj_exts_slab,
> >> + unsigned int offs = obj_to_index(obj_slab->slab_cache,
> >> + obj_slab, obj);
> >> + struct slabobj_ext *ext = slab_obj_ext(obj_slab,
> >> slab_exts, offs);
> >>
> >> if (unlikely(is_codetag_empty(&ext->ref))) {
> >> @@ -2090,7 +2090,7 @@ static inline void handle_failed_objexts_alloc(unsigned long obj_exts,
> >>
> >> #else /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */
> >>
> >> -static inline void mark_objexts_empty(struct slabobj_ext *obj_exts) {}
> >> +static inline void mark_obj_codetag_empty(const void *obj) {}
> >> static inline bool mark_failed_objexts_alloc(struct slab *slab) { return false; }
> >> static inline void handle_failed_objexts_alloc(unsigned long obj_exts,
> >> struct slabobj_ext *vec, unsigned int objects) {}
> >> @@ -2211,7 +2211,7 @@ int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
> >> * assign slabobj_exts in parallel. In this case the existing
> >> * objcg vector should be reused.
> >> */
> >> - mark_objexts_empty(vec);
> >> + mark_obj_codetag_empty(vec);
> >> if (unlikely(!allow_spin))
> >> kfree_nolock(vec);
> >> else
> >> @@ -2254,7 +2254,7 @@ static inline void free_slab_obj_exts(struct slab *slab, bool allow_spin)
> >> * NULL, therefore replace NULL with CODETAG_EMPTY to indicate that
> >> * the extension for obj_exts is expected to be NULL.
> >> */
> >> - mark_objexts_empty(obj_exts);
> >> + mark_obj_codetag_empty(obj_exts);
> >> if (allow_spin)
> >> kfree(obj_exts);
> >> else
> >> @@ -2312,6 +2312,10 @@ static void alloc_slab_obj_exts_early(struct kmem_cache *s, struct slab *slab)
> >>
> >> #else /* CONFIG_SLAB_OBJ_EXT */
> >>
> >> +static inline void mark_obj_codetag_empty(const void *obj)
> >> +{
> >> +}
> >> +
> >> static inline void init_slab_obj_exts(struct slab *slab)
> >> {
> >> }
> >> @@ -2783,6 +2787,15 @@ static inline struct slab_sheaf *alloc_empty_sheaf(struct kmem_cache *s,
> >>
> >> static void free_empty_sheaf(struct kmem_cache *s, struct slab_sheaf *sheaf)
> >> {
> >> + /*
> >> + * If the sheaf was created with __GFP_NO_OBJ_EXT flag then its
> >> + * corresponding extension is NULL and alloc_tag_sub() will throw a
> >> + * warning, therefore replace NULL with CODETAG_EMPTY to indicate
> >> + * that the extension for this sheaf is expected to be NULL.
> >> + */
> >> + if (s->flags & SLAB_KMALLOC)
> >> + mark_obj_codetag_empty(sheaf);
> >> +
> >> kfree(sheaf);
> >>
> >> stat(s, SHEAF_FREE);
> >>
> >> base-commit: 7dff99b354601dd01829e1511711846e04340a69
> >> --
> >> 2.53.0.414.gf7e9f6c205-goog
> >>
>
^ permalink raw reply [flat|nested] 4+ messages in thread
end of thread, other threads:[~2026-02-25 21:29 UTC | newest]
Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2026-02-25 16:34 [PATCH v3 1/1] mm/slab: mark alloc tags empty for sheaves allocated with __GFP_NO_OBJ_EXT Suren Baghdasaryan
2026-02-25 19:08 ` Suren Baghdasaryan
2026-02-25 21:23 ` Vlastimil Babka
2026-02-25 21:28 ` Suren Baghdasaryan
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox