From: Harry Yoo <harry.yoo@oracle.com>
To: akpm@linux-foundation.org, vbabka@suse.cz
Cc: andreyknvl@gmail.com, cl@gentwo.org, dvyukov@google.com,
glider@google.com, hannes@cmpxchg.org, linux-mm@kvack.org,
mhocko@kernel.org, muchun.song@linux.dev, rientjes@google.com,
roman.gushchin@linux.dev, ryabinin.a.a@gmail.com,
shakeel.butt@linux.dev, surenb@google.com,
vincenzo.frascino@arm.com, yeoreum.yun@arm.com,
harry.yoo@oracle.com, tytso@mit.edu, adilger.kernel@dilger.ca,
linux-ext4@vger.kernel.org, linux-kernel@vger.kernel.org,
cgroups@vger.kernel.org, hao.li@linux.dev
Subject: [PATCH V6 8/9] mm/slab: move [__]ksize and slab_ksize() to mm/slub.c
Date: Tue, 13 Jan 2026 15:18:44 +0900 [thread overview]
Message-ID: <20260113061845.159790-9-harry.yoo@oracle.com> (raw)
In-Reply-To: <20260113061845.159790-1-harry.yoo@oracle.com>
To access SLUB's internal implementation details beyond cache flags in
ksize(), move __ksize(), ksize(), and slab_ksize() to mm/slub.c.
Signed-off-by: Harry Yoo <harry.yoo@oracle.com>
---
mm/slab.h | 25 --------------
mm/slab_common.c | 61 ----------------------------------
mm/slub.c | 86 ++++++++++++++++++++++++++++++++++++++++++++++++
3 files changed, 86 insertions(+), 86 deletions(-)
diff --git a/mm/slab.h b/mm/slab.h
index 5176c762ec7c..957586d68b3c 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -665,31 +665,6 @@ void kvfree_rcu_cb(struct rcu_head *head);
size_t __ksize(const void *objp);
-static inline size_t slab_ksize(const struct kmem_cache *s)
-{
-#ifdef CONFIG_SLUB_DEBUG
- /*
- * Debugging requires use of the padding between object
- * and whatever may come after it.
- */
- if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
- return s->object_size;
-#endif
- if (s->flags & SLAB_KASAN)
- return s->object_size;
- /*
- * If we have the need to store the freelist pointer
- * back there or track user information then we can
- * only use the space before that information.
- */
- if (s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER))
- return s->inuse;
- /*
- * Else we can use all the padding etc for the allocation
- */
- return s->size;
-}
-
static inline unsigned int large_kmalloc_order(const struct page *page)
{
return page[1].flags.f & 0xff;
diff --git a/mm/slab_common.c b/mm/slab_common.c
index c4cf9ed2ec92..aed91fd6fd10 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -983,43 +983,6 @@ void __init create_kmalloc_caches(void)
0, SLAB_NO_MERGE, NULL);
}
-/**
- * __ksize -- Report full size of underlying allocation
- * @object: pointer to the object
- *
- * This should only be used internally to query the true size of allocations.
- * It is not meant to be a way to discover the usable size of an allocation
- * after the fact. Instead, use kmalloc_size_roundup(). Using memory beyond
- * the originally requested allocation size may trigger KASAN, UBSAN_BOUNDS,
- * and/or FORTIFY_SOURCE.
- *
- * Return: size of the actual memory used by @object in bytes
- */
-size_t __ksize(const void *object)
-{
- const struct page *page;
- const struct slab *slab;
-
- if (unlikely(object == ZERO_SIZE_PTR))
- return 0;
-
- page = virt_to_page(object);
-
- if (unlikely(PageLargeKmalloc(page)))
- return large_kmalloc_size(page);
-
- slab = page_slab(page);
- /* Delete this after we're sure there are no users */
- if (WARN_ON(!slab))
- return page_size(page);
-
-#ifdef CONFIG_SLUB_DEBUG
- skip_orig_size_check(slab->slab_cache, object);
-#endif
-
- return slab_ksize(slab->slab_cache);
-}
-
gfp_t kmalloc_fix_flags(gfp_t flags)
{
gfp_t invalid_mask = flags & GFP_SLAB_BUG_MASK;
@@ -1235,30 +1198,6 @@ void kfree_sensitive(const void *p)
}
EXPORT_SYMBOL(kfree_sensitive);
-size_t ksize(const void *objp)
-{
- /*
- * We need to first check that the pointer to the object is valid.
- * The KASAN report printed from ksize() is more useful, then when
- * it's printed later when the behaviour could be undefined due to
- * a potential use-after-free or double-free.
- *
- * We use kasan_check_byte(), which is supported for the hardware
- * tag-based KASAN mode, unlike kasan_check_read/write().
- *
- * If the pointed to memory is invalid, we return 0 to avoid users of
- * ksize() writing to and potentially corrupting the memory region.
- *
- * We want to perform the check before __ksize(), to avoid potentially
- * crashing in __ksize() due to accessing invalid metadata.
- */
- if (unlikely(ZERO_OR_NULL_PTR(objp)) || !kasan_check_byte(objp))
- return 0;
-
- return kfence_ksize(objp) ?: __ksize(objp);
-}
-EXPORT_SYMBOL(ksize);
-
#ifdef CONFIG_BPF_SYSCALL
#include <linux/btf.h>
diff --git a/mm/slub.c b/mm/slub.c
index e4a4e01de42f..2b76f352c3b0 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -6948,6 +6948,92 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
}
EXPORT_SYMBOL(kmem_cache_free);
+static inline size_t slab_ksize(const struct kmem_cache *s)
+{
+#ifdef CONFIG_SLUB_DEBUG
+ /*
+ * Debugging requires use of the padding between object
+ * and whatever may come after it.
+ */
+ if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
+ return s->object_size;
+#endif
+ if (s->flags & SLAB_KASAN)
+ return s->object_size;
+ /*
+ * If we have the need to store the freelist pointer
+ * back there or track user information then we can
+ * only use the space before that information.
+ */
+ if (s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER))
+ return s->inuse;
+ /*
+ * Else we can use all the padding etc for the allocation
+ */
+ return s->size;
+}
+
+/**
+ * __ksize -- Report full size of underlying allocation
+ * @object: pointer to the object
+ *
+ * This should only be used internally to query the true size of allocations.
+ * It is not meant to be a way to discover the usable size of an allocation
+ * after the fact. Instead, use kmalloc_size_roundup(). Using memory beyond
+ * the originally requested allocation size may trigger KASAN, UBSAN_BOUNDS,
+ * and/or FORTIFY_SOURCE.
+ *
+ * Return: size of the actual memory used by @object in bytes
+ */
+size_t __ksize(const void *object)
+{
+ const struct page *page;
+ const struct slab *slab;
+
+ if (unlikely(object == ZERO_SIZE_PTR))
+ return 0;
+
+ page = virt_to_page(object);
+
+ if (unlikely(PageLargeKmalloc(page)))
+ return large_kmalloc_size(page);
+
+ slab = page_slab(page);
+ /* Delete this after we're sure there are no users */
+ if (WARN_ON(!slab))
+ return page_size(page);
+
+#ifdef CONFIG_SLUB_DEBUG
+ skip_orig_size_check(slab->slab_cache, object);
+#endif
+
+ return slab_ksize(slab->slab_cache);
+}
+
+size_t ksize(const void *objp)
+{
+ /*
+ * We need to first check that the pointer to the object is valid.
+ * The KASAN report printed from ksize() is more useful, then when
+ * it's printed later when the behaviour could be undefined due to
+ * a potential use-after-free or double-free.
+ *
+ * We use kasan_check_byte(), which is supported for the hardware
+ * tag-based KASAN mode, unlike kasan_check_read/write().
+ *
+ * If the pointed to memory is invalid, we return 0 to avoid users of
+ * ksize() writing to and potentially corrupting the memory region.
+ *
+ * We want to perform the check before __ksize(), to avoid potentially
+ * crashing in __ksize() due to accessing invalid metadata.
+ */
+ if (unlikely(ZERO_OR_NULL_PTR(objp)) || !kasan_check_byte(objp))
+ return 0;
+
+ return kfence_ksize(objp) ?: __ksize(objp);
+}
+EXPORT_SYMBOL(ksize);
+
static void free_large_kmalloc(struct page *page, void *object)
{
unsigned int order = compound_order(page);
--
2.43.0
next prev parent reply other threads:[~2026-01-13 6:19 UTC|newest]
Thread overview: 10+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-01-13 6:18 [PATCH V6 0/9] mm/slab: reduce slab accounting memory overhead by allocating slabobj_ext metadata within unsed slab space Harry Yoo
2026-01-13 6:18 ` [PATCH V6 1/9] mm/slab: use unsigned long for orig_size to ensure proper metadata align Harry Yoo
2026-01-13 6:18 ` [PATCH V6 2/9] mm/slab: allow specifying free pointer offset when using constructor Harry Yoo
2026-01-13 6:18 ` [PATCH V6 3/9] ext4: specify the free pointer offset for ext4_inode_cache Harry Yoo
2026-01-13 6:18 ` [PATCH V6 4/9] mm/slab: abstract slabobj_ext access via new slab_obj_ext() helper Harry Yoo
2026-01-13 6:18 ` [PATCH V6 5/9] mm/slab: use stride to access slabobj_ext Harry Yoo
2026-01-13 6:18 ` [PATCH V6 6/9] mm/memcontrol,alloc_tag: handle slabobj_ext access under KASAN poison Harry Yoo
2026-01-13 6:18 ` [PATCH V6 7/9] mm/slab: save memory by allocating slabobj_ext array from leftover Harry Yoo
2026-01-13 6:18 ` Harry Yoo [this message]
2026-01-13 6:18 ` [PATCH V6 9/9] mm/slab: place slabobj_ext metadata in unused space within s->size Harry Yoo
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260113061845.159790-9-harry.yoo@oracle.com \
--to=harry.yoo@oracle.com \
--cc=adilger.kernel@dilger.ca \
--cc=akpm@linux-foundation.org \
--cc=andreyknvl@gmail.com \
--cc=cgroups@vger.kernel.org \
--cc=cl@gentwo.org \
--cc=dvyukov@google.com \
--cc=glider@google.com \
--cc=hannes@cmpxchg.org \
--cc=hao.li@linux.dev \
--cc=linux-ext4@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=mhocko@kernel.org \
--cc=muchun.song@linux.dev \
--cc=rientjes@google.com \
--cc=roman.gushchin@linux.dev \
--cc=ryabinin.a.a@gmail.com \
--cc=shakeel.butt@linux.dev \
--cc=surenb@google.com \
--cc=tytso@mit.edu \
--cc=vbabka@suse.cz \
--cc=vincenzo.frascino@arm.com \
--cc=yeoreum.yun@arm.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox