From: Vlastimil Babka <vbabka@suse.cz>
To: Harry Yoo <harry.yoo@oracle.com>,
Petr Tesarik <ptesarik@suse.com>,
Christoph Lameter <cl@gentwo.org>,
David Rientjes <rientjes@google.com>,
Roman Gushchin <roman.gushchin@linux.dev>
Cc: Hao Li <hao.li@linux.dev>,
Andrew Morton <akpm@linux-foundation.org>,
Uladzislau Rezki <urezki@gmail.com>,
"Liam R. Howlett" <Liam.Howlett@oracle.com>,
Suren Baghdasaryan <surenb@google.com>,
Sebastian Andrzej Siewior <bigeasy@linutronix.de>,
Alexei Starovoitov <ast@kernel.org>,
linux-mm@kvack.org, linux-kernel@vger.kernel.org,
linux-rt-devel@lists.linux.dev, bpf@vger.kernel.org,
kasan-dev@googlegroups.com, Vlastimil Babka <vbabka@suse.cz>
Subject: [PATCH RFC v2 07/20] slab: handle kmalloc sheaves bootstrap
Date: Mon, 12 Jan 2026 16:17:01 +0100 [thread overview]
Message-ID: <20260112-sheaves-for-all-v2-7-98225cfb50cf@suse.cz> (raw)
In-Reply-To: <20260112-sheaves-for-all-v2-0-98225cfb50cf@suse.cz>
Enable sheaves for kmalloc caches. For other types than KMALLOC_NORMAL,
we can simply allow them in calculate_sizes() as they are created later
than KMALLOC_NORMAL caches and can allocate sheaves and barns from
those.
For KMALLOC_NORMAL caches we perform additional step after first
creating them without sheaves. Then bootstrap_cache_sheaves() simply
allocates and initializes barns and sheaves and finally sets
s->sheaf_capacity to make them actually used.
Afterwards the only caches left without sheaves (unless SLUB_TINY or
debugging is enabled) are kmem_cache and kmem_cache_node. These are only
used when creating or destroying other kmem_caches. Thus they are not
performance critical and we can simply leave it that way.
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
---
mm/slub.c | 88 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++---
1 file changed, 84 insertions(+), 4 deletions(-)
diff --git a/mm/slub.c b/mm/slub.c
index 0177a654a06a..f2de44f8bda4 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2593,7 +2593,8 @@ static void *setup_object(struct kmem_cache *s, void *object)
return object;
}
-static struct slab_sheaf *alloc_empty_sheaf(struct kmem_cache *s, gfp_t gfp)
+static struct slab_sheaf *__alloc_empty_sheaf(struct kmem_cache *s, gfp_t gfp,
+ unsigned int capacity)
{
struct slab_sheaf *sheaf;
size_t sheaf_size;
@@ -2611,7 +2612,7 @@ static struct slab_sheaf *alloc_empty_sheaf(struct kmem_cache *s, gfp_t gfp)
if (s->flags & SLAB_KMALLOC)
gfp |= __GFP_NO_OBJ_EXT;
- sheaf_size = struct_size(sheaf, objects, s->sheaf_capacity);
+ sheaf_size = struct_size(sheaf, objects, capacity);
sheaf = kzalloc(sheaf_size, gfp);
if (unlikely(!sheaf))
@@ -2624,6 +2625,12 @@ static struct slab_sheaf *alloc_empty_sheaf(struct kmem_cache *s, gfp_t gfp)
return sheaf;
}
+static inline struct slab_sheaf *alloc_empty_sheaf(struct kmem_cache *s,
+ gfp_t gfp)
+{
+ return __alloc_empty_sheaf(s, gfp, s->sheaf_capacity);
+}
+
static void free_empty_sheaf(struct kmem_cache *s, struct slab_sheaf *sheaf)
{
kfree(sheaf);
@@ -8117,8 +8124,11 @@ static int calculate_sizes(struct kmem_cache_args *args, struct kmem_cache *s)
if (s->flags & SLAB_RECLAIM_ACCOUNT)
s->allocflags |= __GFP_RECLAIMABLE;
- /* kmalloc caches need extra care to support sheaves */
- if (!is_kmalloc_cache(s))
+ /*
+ * For KMALLOC_NORMAL caches we enable sheaves later by
+ * bootstrap_kmalloc_sheaves() to avoid recursion
+ */
+ if (!is_kmalloc_normal(s))
s->sheaf_capacity = calculate_sheaf_capacity(s, args);
/*
@@ -8613,6 +8623,74 @@ static struct kmem_cache * __init bootstrap(struct kmem_cache *static_cache)
return s;
}
+/*
+ * Finish the sheaves initialization done normally by init_percpu_sheaves() and
+ * init_kmem_cache_nodes(). For normal kmalloc caches we have to bootstrap it
+ * since sheaves and barns are allocated by kmalloc.
+ */
+static void __init bootstrap_cache_sheaves(struct kmem_cache *s)
+{
+ struct kmem_cache_args empty_args = {};
+ unsigned int capacity;
+ bool failed = false;
+ int node, cpu;
+
+ capacity = calculate_sheaf_capacity(s, &empty_args);
+
+ /* capacity can be 0 due to debugging or SLUB_TINY */
+ if (!capacity)
+ return;
+
+ for_each_node_mask(node, slab_nodes) {
+ struct node_barn *barn;
+
+ barn = kmalloc_node(sizeof(*barn), GFP_KERNEL, node);
+
+ if (!barn) {
+ failed = true;
+ goto out;
+ }
+
+ barn_init(barn);
+ get_node(s, node)->barn = barn;
+ }
+
+ for_each_possible_cpu(cpu) {
+ struct slub_percpu_sheaves *pcs;
+
+ pcs = per_cpu_ptr(s->cpu_sheaves, cpu);
+
+ pcs->main = __alloc_empty_sheaf(s, GFP_KERNEL, capacity);
+
+ if (!pcs->main) {
+ failed = true;
+ break;
+ }
+ }
+
+out:
+ /*
+ * It's still early in boot so treat this like same as a failure to
+ * create the kmalloc cache in the first place
+ */
+ if (failed)
+ panic("Out of memory when creating kmem_cache %s\n", s->name);
+
+ s->sheaf_capacity = capacity;
+}
+
+static void __init bootstrap_kmalloc_sheaves(void)
+{
+ enum kmalloc_cache_type type;
+
+ for (type = KMALLOC_NORMAL; type <= KMALLOC_RANDOM_END; type++) {
+ for (int idx = 0; idx < KMALLOC_SHIFT_HIGH + 1; idx++) {
+ if (kmalloc_caches[type][idx])
+ bootstrap_cache_sheaves(kmalloc_caches[type][idx]);
+ }
+ }
+}
+
void __init kmem_cache_init(void)
{
static __initdata struct kmem_cache boot_kmem_cache,
@@ -8656,6 +8734,8 @@ void __init kmem_cache_init(void)
setup_kmalloc_cache_index_table();
create_kmalloc_caches();
+ bootstrap_kmalloc_sheaves();
+
/* Setup random freelists for each cache */
init_freelist_randomization();
--
2.52.0
next prev parent reply other threads:[~2026-01-12 15:17 UTC|newest]
Thread overview: 22+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-01-12 15:16 [PATCH RFC v2 00/20] slab: replace cpu (partial) slabs with sheaves Vlastimil Babka
2026-01-12 15:16 ` [PATCH RFC v2 01/20] mm/slab: add rcu_barrier() to kvfree_rcu_barrier_on_cache() Vlastimil Babka
2026-01-12 15:16 ` [PATCH RFC v2 02/20] mm/slab: move and refactor __kmem_cache_alias() Vlastimil Babka
2026-01-12 15:16 ` [PATCH RFC v2 03/20] mm/slab: make caches with sheaves mergeable Vlastimil Babka
2026-01-12 15:16 ` [PATCH RFC v2 04/20] slab: add sheaves to most caches Vlastimil Babka
2026-01-12 15:16 ` [PATCH RFC v2 05/20] slab: introduce percpu sheaves bootstrap Vlastimil Babka
2026-01-12 15:17 ` [PATCH RFC v2 06/20] slab: make percpu sheaves compatible with kmalloc_nolock()/kfree_nolock() Vlastimil Babka
2026-01-12 15:17 ` Vlastimil Babka [this message]
2026-01-12 15:17 ` [PATCH RFC v2 08/20] slab: add optimized sheaf refill from partial list Vlastimil Babka
2026-01-12 15:17 ` [PATCH RFC v2 09/20] slab: remove cpu (partial) slabs usage from allocation paths Vlastimil Babka
2026-01-12 15:17 ` [PATCH RFC v2 10/20] slab: remove SLUB_CPU_PARTIAL Vlastimil Babka
2026-01-12 15:17 ` [PATCH RFC v2 11/20] slab: remove the do_slab_free() fastpath Vlastimil Babka
2026-01-12 15:17 ` [PATCH RFC v2 12/20] slab: remove defer_deactivate_slab() Vlastimil Babka
2026-01-12 15:17 ` [PATCH RFC v2 13/20] slab: simplify kmalloc_nolock() Vlastimil Babka
2026-01-12 15:17 ` [PATCH RFC v2 14/20] slab: remove struct kmem_cache_cpu Vlastimil Babka
2026-01-12 15:17 ` [PATCH RFC v2 15/20] slab: remove unused PREEMPT_RT specific macros Vlastimil Babka
2026-01-12 15:17 ` [PATCH RFC v2 16/20] slab: refill sheaves from all nodes Vlastimil Babka
2026-01-12 15:17 ` [PATCH RFC v2 17/20] slab: update overview comments Vlastimil Babka
2026-01-12 15:17 ` [PATCH RFC v2 18/20] slab: remove frozen slab checks from __slab_free() Vlastimil Babka
2026-01-12 15:17 ` [PATCH RFC v2 19/20] mm/slub: remove DEACTIVATE_TO_* stat items Vlastimil Babka
2026-01-12 15:17 ` [PATCH RFC v2 20/20] mm/slub: cleanup and repurpose some " Vlastimil Babka
2026-01-12 15:20 ` [PATCH v2 00/20] slab: replace cpu (partial) slabs with sheaves Vlastimil Babka
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260112-sheaves-for-all-v2-7-98225cfb50cf@suse.cz \
--to=vbabka@suse.cz \
--cc=Liam.Howlett@oracle.com \
--cc=akpm@linux-foundation.org \
--cc=ast@kernel.org \
--cc=bigeasy@linutronix.de \
--cc=bpf@vger.kernel.org \
--cc=cl@gentwo.org \
--cc=hao.li@linux.dev \
--cc=harry.yoo@oracle.com \
--cc=kasan-dev@googlegroups.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=linux-rt-devel@lists.linux.dev \
--cc=ptesarik@suse.com \
--cc=rientjes@google.com \
--cc=roman.gushchin@linux.dev \
--cc=surenb@google.com \
--cc=urezki@gmail.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox