linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Harry Yoo <harry.yoo@oracle.com>
To: venkat88@linux.ibm.com
Cc: akpm@linux-foundation.org, ast@kernel.org,
	cgroups@vger.kernel.org, cl@gentwo.org, hannes@cmpxchg.org,
	hao.li@linux.dev, harry.yoo@oracle.com, linux-mm@kvack.org,
	mhocko@kernel.org, muchun.song@linux.dev, rientjes@google.com,
	roman.gushchin@linux.dev, shakeel.butt@linux.dev,
	surenb@google.com, vbabka@suse.cz
Subject: [PATCH] mm/slab: a debug patch to investigate the issue further
Date: Fri, 27 Feb 2026 12:07:33 +0900	[thread overview]
Message-ID: <20260227030733.9517-1-harry.yoo@oracle.com> (raw)
In-Reply-To: <84492f08-04c2-485c-9a18-cdafd5a9c3e5@linux.ibm.com>

Hi Venkat, could you please help testing this patch and
check if it hits any warning? It's based on v7.0-rc1 tag.

This (hopefully) should give us more information
that would help debugging the issue.

1. set stride early in alloc_slab_obj_exts_early()
2. move some obj_exts helpers to slab.h
3. in slab_obj_ext(), check three things:
   3-1. is the obj_ext address is the right one for this object?
   3-2. does the obj_ext address change after smp_rmb()?
   3-3. does obj_ext->objcg change after smp_rmb()?

No smp_wmb() is used, intentionally.

It is expected that the issue will still reproduce.

Signed-off-by: Harry Yoo <harry.yoo@oracle.com>
---
 mm/slab.h | 131 ++++++++++++++++++++++++++++++++++++++++++++++++++++--
 mm/slub.c | 100 ++---------------------------------------
 2 files changed, 130 insertions(+), 101 deletions(-)

diff --git a/mm/slab.h b/mm/slab.h
index 71c7261bf822..d1e44cd01ea1 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -578,6 +578,101 @@ static inline unsigned short slab_get_stride(struct slab *slab)
 }
 #endif
 
+#ifdef CONFIG_SLAB_OBJ_EXT
+
+/*
+ * Check if memory cgroup or memory allocation profiling is enabled.
+ * If enabled, SLUB tries to reduce memory overhead of accounting
+ * slab objects. If neither is enabled when this function is called,
+ * the optimization is simply skipped to avoid affecting caches that do not
+ * need slabobj_ext metadata.
+ *
+ * However, this may disable optimization when memory cgroup or memory
+ * allocation profiling is used, but slabs are created too early
+ * even before those subsystems are initialized.
+ */
+static inline bool need_slab_obj_exts(struct kmem_cache *s)
+{
+	if (s->flags & SLAB_NO_OBJ_EXT)
+		return false;
+
+	if (memcg_kmem_online() && (s->flags & SLAB_ACCOUNT))
+		return true;
+
+	if (mem_alloc_profiling_enabled())
+		return true;
+
+	return false;
+}
+
+static inline unsigned int obj_exts_size_in_slab(struct slab *slab)
+{
+	return sizeof(struct slabobj_ext) * slab->objects;
+}
+
+static inline unsigned long obj_exts_offset_in_slab(struct kmem_cache *s,
+						    struct slab *slab)
+{
+	unsigned long objext_offset;
+
+	objext_offset = s->size * slab->objects;
+	objext_offset = ALIGN(objext_offset, sizeof(struct slabobj_ext));
+	return objext_offset;
+}
+
+static inline bool obj_exts_fit_within_slab_leftover(struct kmem_cache *s,
+						     struct slab *slab)
+{
+	unsigned long objext_offset = obj_exts_offset_in_slab(s, slab);
+	unsigned long objext_size = obj_exts_size_in_slab(slab);
+
+	return objext_offset + objext_size <= slab_size(slab);
+}
+
+static inline bool obj_exts_in_slab(struct kmem_cache *s, struct slab *slab)
+{
+	unsigned long obj_exts;
+	unsigned long start;
+	unsigned long end;
+
+	obj_exts = slab_obj_exts(slab);
+	if (!obj_exts)
+		return false;
+
+	start = (unsigned long)slab_address(slab);
+	end = start + slab_size(slab);
+	return (obj_exts >= start) && (obj_exts < end);
+}
+#else
+static inline bool need_slab_obj_exts(struct kmem_cache *s)
+{
+	return false;
+}
+
+static inline unsigned int obj_exts_size_in_slab(struct slab *slab)
+{
+	return 0;
+}
+
+static inline unsigned long obj_exts_offset_in_slab(struct kmem_cache *s,
+						    struct slab *slab)
+{
+	return 0;
+}
+
+static inline bool obj_exts_fit_within_slab_leftover(struct kmem_cache *s,
+						     struct slab *slab)
+{
+	return false;
+}
+
+static inline bool obj_exts_in_slab(struct kmem_cache *s, struct slab *slab)
+{
+	return false;
+}
+
+#endif
+
 /*
  * slab_obj_ext - get the pointer to the slab object extension metadata
  * associated with an object in a slab.
@@ -592,13 +687,41 @@ static inline struct slabobj_ext *slab_obj_ext(struct slab *slab,
 					       unsigned long obj_exts,
 					       unsigned int index)
 {
-	struct slabobj_ext *obj_ext;
+	struct slabobj_ext *ext_before;
+	struct slabobj_ext *ext_after;
+	struct obj_cgroup *objcg_before;
+	struct obj_cgroup *objcg_after;
 
 	VM_WARN_ON_ONCE(obj_exts != slab_obj_exts(slab));
 
-	obj_ext = (struct slabobj_ext *)(obj_exts +
-					 slab_get_stride(slab) * index);
-	return kasan_reset_tag(obj_ext);
+	ext_before = (struct slabobj_ext *)(obj_exts +
+					    slab_get_stride(slab) * index);
+	objcg_before = ext_before->objcg;
+	// re-read things after rmb
+	smp_rmb();
+	// is ext_before the right obj_ext for this object?
+	if (obj_exts_in_slab(slab->slab_cache, slab)) {
+		struct kmem_cache *s = slab->slab_cache;
+
+		if (obj_exts_fit_within_slab_leftover(s, slab))
+			WARN(ext_before != (struct slabobj_ext *)(obj_exts + sizeof(struct slabobj_ext) * index),
+			     "obj_exts array in leftover");
+		else
+			WARN(ext_before != (struct slabobj_ext *)(obj_exts + s->size * index),
+			     "obj_ext in object");
+
+	} else {
+		WARN(ext_before != (struct slabobj_ext *)(obj_exts + sizeof(struct slabobj_ext) * index),
+		     "obj_exts array allocated from slab");
+	}
+
+	ext_after = (struct slabobj_ext *)(obj_exts +
+					   slab_get_stride(slab) * index);
+	objcg_after = ext_after->objcg;
+
+	WARN(ext_before != ext_after, "obj_ext pointer has changed");
+	WARN(objcg_before != objcg_after, "obj_ext->objcg has changed");
+	return kasan_reset_tag(ext_before);
 }
 
 int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
diff --git a/mm/slub.c b/mm/slub.c
index 862642c165ed..8eb64534370e 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -757,101 +757,6 @@ static inline unsigned long get_orig_size(struct kmem_cache *s, void *object)
 	return *(unsigned long *)p;
 }
 
-#ifdef CONFIG_SLAB_OBJ_EXT
-
-/*
- * Check if memory cgroup or memory allocation profiling is enabled.
- * If enabled, SLUB tries to reduce memory overhead of accounting
- * slab objects. If neither is enabled when this function is called,
- * the optimization is simply skipped to avoid affecting caches that do not
- * need slabobj_ext metadata.
- *
- * However, this may disable optimization when memory cgroup or memory
- * allocation profiling is used, but slabs are created too early
- * even before those subsystems are initialized.
- */
-static inline bool need_slab_obj_exts(struct kmem_cache *s)
-{
-	if (s->flags & SLAB_NO_OBJ_EXT)
-		return false;
-
-	if (memcg_kmem_online() && (s->flags & SLAB_ACCOUNT))
-		return true;
-
-	if (mem_alloc_profiling_enabled())
-		return true;
-
-	return false;
-}
-
-static inline unsigned int obj_exts_size_in_slab(struct slab *slab)
-{
-	return sizeof(struct slabobj_ext) * slab->objects;
-}
-
-static inline unsigned long obj_exts_offset_in_slab(struct kmem_cache *s,
-						    struct slab *slab)
-{
-	unsigned long objext_offset;
-
-	objext_offset = s->size * slab->objects;
-	objext_offset = ALIGN(objext_offset, sizeof(struct slabobj_ext));
-	return objext_offset;
-}
-
-static inline bool obj_exts_fit_within_slab_leftover(struct kmem_cache *s,
-						     struct slab *slab)
-{
-	unsigned long objext_offset = obj_exts_offset_in_slab(s, slab);
-	unsigned long objext_size = obj_exts_size_in_slab(slab);
-
-	return objext_offset + objext_size <= slab_size(slab);
-}
-
-static inline bool obj_exts_in_slab(struct kmem_cache *s, struct slab *slab)
-{
-	unsigned long obj_exts;
-	unsigned long start;
-	unsigned long end;
-
-	obj_exts = slab_obj_exts(slab);
-	if (!obj_exts)
-		return false;
-
-	start = (unsigned long)slab_address(slab);
-	end = start + slab_size(slab);
-	return (obj_exts >= start) && (obj_exts < end);
-}
-#else
-static inline bool need_slab_obj_exts(struct kmem_cache *s)
-{
-	return false;
-}
-
-static inline unsigned int obj_exts_size_in_slab(struct slab *slab)
-{
-	return 0;
-}
-
-static inline unsigned long obj_exts_offset_in_slab(struct kmem_cache *s,
-						    struct slab *slab)
-{
-	return 0;
-}
-
-static inline bool obj_exts_fit_within_slab_leftover(struct kmem_cache *s,
-						     struct slab *slab)
-{
-	return false;
-}
-
-static inline bool obj_exts_in_slab(struct kmem_cache *s, struct slab *slab)
-{
-	return false;
-}
-
-#endif
-
 #if defined(CONFIG_SLAB_OBJ_EXT) && defined(CONFIG_64BIT)
 static bool obj_exts_in_object(struct kmem_cache *s, struct slab *slab)
 {
@@ -2196,7 +2101,6 @@ int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
 retry:
 	old_exts = READ_ONCE(slab->obj_exts);
 	handle_failed_objexts_alloc(old_exts, vec, objects);
-	slab_set_stride(slab, sizeof(struct slabobj_ext));
 
 	if (new_slab) {
 		/*
@@ -2272,6 +2176,9 @@ static void alloc_slab_obj_exts_early(struct kmem_cache *s, struct slab *slab)
 	void *addr;
 	unsigned long obj_exts;
 
+	/* Initialize stride early to avoid memory ordering issues */
+	slab_set_stride(slab, sizeof(struct slabobj_ext));
+
 	if (!need_slab_obj_exts(s))
 		return;
 
@@ -2288,7 +2195,6 @@ static void alloc_slab_obj_exts_early(struct kmem_cache *s, struct slab *slab)
 		obj_exts |= MEMCG_DATA_OBJEXTS;
 #endif
 		slab->obj_exts = obj_exts;
-		slab_set_stride(slab, sizeof(struct slabobj_ext));
 	} else if (s->flags & SLAB_OBJ_EXT_IN_OBJ) {
 		unsigned int offset = obj_exts_offset_in_object(s);
 
-- 
2.43.0



  parent reply	other threads:[~2026-02-27  3:07 UTC|newest]

Thread overview: 11+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2026-02-23  7:58 [PATCH] mm/slab: initialize slab->stride early to avoid memory ordering issues Harry Yoo
2026-02-23 11:44 ` Harry Yoo
2026-02-23 17:04   ` Vlastimil Babka
2026-02-23 20:23 ` Shakeel Butt
2026-02-24  9:04 ` Venkat Rao Bagalkote
2026-02-24 11:10   ` Harry Yoo
2026-02-25  9:14     ` Venkat Rao Bagalkote
2026-02-25 10:15       ` Harry Yoo
2026-02-27  3:07       ` Harry Yoo [this message]
2026-02-27  5:52         ` [PATCH] mm/slab: a debug patch to investigate the issue further kernel test robot
2026-02-27  6:02         ` kernel test robot

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20260227030733.9517-1-harry.yoo@oracle.com \
    --to=harry.yoo@oracle.com \
    --cc=akpm@linux-foundation.org \
    --cc=ast@kernel.org \
    --cc=cgroups@vger.kernel.org \
    --cc=cl@gentwo.org \
    --cc=hannes@cmpxchg.org \
    --cc=hao.li@linux.dev \
    --cc=linux-mm@kvack.org \
    --cc=mhocko@kernel.org \
    --cc=muchun.song@linux.dev \
    --cc=rientjes@google.com \
    --cc=roman.gushchin@linux.dev \
    --cc=shakeel.butt@linux.dev \
    --cc=surenb@google.com \
    --cc=vbabka@suse.cz \
    --cc=venkat88@linux.ibm.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox