From: Vlastimil Babka <vbabka@suse.cz>
To: Harry Yoo <harry.yoo@oracle.com>,
Petr Tesarik <ptesarik@suse.com>,
Christoph Lameter <cl@gentwo.org>,
David Rientjes <rientjes@google.com>,
Roman Gushchin <roman.gushchin@linux.dev>
Cc: Hao Li <hao.li@linux.dev>,
Andrew Morton <akpm@linux-foundation.org>,
Uladzislau Rezki <urezki@gmail.com>,
"Liam R. Howlett" <Liam.Howlett@oracle.com>,
Suren Baghdasaryan <surenb@google.com>,
Sebastian Andrzej Siewior <bigeasy@linutronix.de>,
Alexei Starovoitov <ast@kernel.org>,
linux-mm@kvack.org, linux-kernel@vger.kernel.org,
linux-rt-devel@lists.linux.dev, bpf@vger.kernel.org,
kasan-dev@googlegroups.com, Vlastimil Babka <vbabka@suse.cz>
Subject: [PATCH v4 14/22] slab: remove defer_deactivate_slab()
Date: Fri, 23 Jan 2026 07:52:52 +0100 [thread overview]
Message-ID: <20260123-sheaves-for-all-v4-14-041323d506f7@suse.cz> (raw)
In-Reply-To: <20260123-sheaves-for-all-v4-0-041323d506f7@suse.cz>
There are no more cpu slabs so we don't need their deferred
deactivation. The function is now only used from places where we
allocate a new slab but then can't spin on node list_lock to put it on
the partial list. Instead of the deferred action we can free it directly
via __free_slab(), we just need to tell it to use _nolock() freeing of
the underlying pages and take care of the accounting.
Since free_frozen_pages_nolock() variant does not yet exist for code
outside of the page allocator, create it as a trivial wrapper for
__free_frozen_pages(..., FPI_TRYLOCK).
Reviewed-by: Harry Yoo <harry.yoo@oracle.com>
Reviewed-by: Hao Li <hao.li@linux.dev>
Reviewed-by: Suren Baghdasaryan <surenb@google.com>
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
---
mm/internal.h | 1 +
mm/page_alloc.c | 5 +++++
mm/slab.h | 8 +-------
mm/slub.c | 58 +++++++++++++++++++++------------------------------------
4 files changed, 28 insertions(+), 44 deletions(-)
diff --git a/mm/internal.h b/mm/internal.h
index e430da900430..1f44ccb4badf 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -846,6 +846,7 @@ static inline struct page *alloc_frozen_pages_noprof(gfp_t gfp, unsigned int ord
struct page *alloc_frozen_pages_nolock_noprof(gfp_t gfp_flags, int nid, unsigned int order);
#define alloc_frozen_pages_nolock(...) \
alloc_hooks(alloc_frozen_pages_nolock_noprof(__VA_ARGS__))
+void free_frozen_pages_nolock(struct page *page, unsigned int order);
extern void zone_pcp_reset(struct zone *zone);
extern void zone_pcp_disable(struct zone *zone);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index c380f063e8b7..0127e9d661ad 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2981,6 +2981,11 @@ void free_frozen_pages(struct page *page, unsigned int order)
__free_frozen_pages(page, order, FPI_NONE);
}
+void free_frozen_pages_nolock(struct page *page, unsigned int order)
+{
+ __free_frozen_pages(page, order, FPI_TRYLOCK);
+}
+
/*
* Free a batch of folios
*/
diff --git a/mm/slab.h b/mm/slab.h
index 0fbe13bec864..37090a7dffb6 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -71,13 +71,7 @@ struct slab {
struct kmem_cache *slab_cache;
union {
struct {
- union {
- struct list_head slab_list;
- struct { /* For deferred deactivate_slab() */
- struct llist_node llnode;
- void *flush_freelist;
- };
- };
+ struct list_head slab_list;
/* Double-word boundary */
struct freelist_counters;
};
diff --git a/mm/slub.c b/mm/slub.c
index a63a0eed2c55..82950c2bc26d 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3262,7 +3262,7 @@ static struct slab *new_slab(struct kmem_cache *s, gfp_t flags, int node)
flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
}
-static void __free_slab(struct kmem_cache *s, struct slab *slab)
+static void __free_slab(struct kmem_cache *s, struct slab *slab, bool allow_spin)
{
struct page *page = slab_page(slab);
int order = compound_order(page);
@@ -3273,14 +3273,26 @@ static void __free_slab(struct kmem_cache *s, struct slab *slab)
__ClearPageSlab(page);
mm_account_reclaimed_pages(pages);
unaccount_slab(slab, order, s);
- free_frozen_pages(page, order);
+ if (allow_spin)
+ free_frozen_pages(page, order);
+ else
+ free_frozen_pages_nolock(page, order);
+}
+
+static void free_new_slab_nolock(struct kmem_cache *s, struct slab *slab)
+{
+ /*
+ * Since it was just allocated, we can skip the actions in
+ * discard_slab() and free_slab().
+ */
+ __free_slab(s, slab, false);
}
static void rcu_free_slab(struct rcu_head *h)
{
struct slab *slab = container_of(h, struct slab, rcu_head);
- __free_slab(slab->slab_cache, slab);
+ __free_slab(slab->slab_cache, slab, true);
}
static void free_slab(struct kmem_cache *s, struct slab *slab)
@@ -3296,7 +3308,7 @@ static void free_slab(struct kmem_cache *s, struct slab *slab)
if (unlikely(s->flags & SLAB_TYPESAFE_BY_RCU))
call_rcu(&slab->rcu_head, rcu_free_slab);
else
- __free_slab(s, slab);
+ __free_slab(s, slab, true);
}
static void discard_slab(struct kmem_cache *s, struct slab *slab)
@@ -3389,8 +3401,6 @@ static void *alloc_single_from_partial(struct kmem_cache *s,
return object;
}
-static void defer_deactivate_slab(struct slab *slab, void *flush_freelist);
-
/*
* Called only for kmem_cache_debug() caches to allocate from a freshly
* allocated slab. Allocate a single object instead of whole freelist
@@ -3406,8 +3416,8 @@ static void *alloc_single_from_new_slab(struct kmem_cache *s, struct slab *slab,
void *object;
if (!allow_spin && !spin_trylock_irqsave(&n->list_lock, flags)) {
- /* Unlucky, discard newly allocated slab */
- defer_deactivate_slab(slab, NULL);
+ /* Unlucky, discard newly allocated slab. */
+ free_new_slab_nolock(s, slab);
return NULL;
}
@@ -4279,7 +4289,7 @@ static unsigned int alloc_from_new_slab(struct kmem_cache *s, struct slab *slab,
if (!spin_trylock_irqsave(&n->list_lock, flags)) {
/* Unlucky, discard newly allocated slab */
- defer_deactivate_slab(slab, NULL);
+ free_new_slab_nolock(s, slab);
return 0;
}
}
@@ -6056,7 +6066,6 @@ static void free_to_pcs_bulk(struct kmem_cache *s, size_t size, void **p)
struct defer_free {
struct llist_head objects;
- struct llist_head slabs;
struct irq_work work;
};
@@ -6064,23 +6073,21 @@ static void free_deferred_objects(struct irq_work *work);
static DEFINE_PER_CPU(struct defer_free, defer_free_objects) = {
.objects = LLIST_HEAD_INIT(objects),
- .slabs = LLIST_HEAD_INIT(slabs),
.work = IRQ_WORK_INIT(free_deferred_objects),
};
/*
* In PREEMPT_RT irq_work runs in per-cpu kthread, so it's safe
- * to take sleeping spin_locks from __slab_free() and deactivate_slab().
+ * to take sleeping spin_locks from __slab_free().
* In !PREEMPT_RT irq_work will run after local_unlock_irqrestore().
*/
static void free_deferred_objects(struct irq_work *work)
{
struct defer_free *df = container_of(work, struct defer_free, work);
struct llist_head *objs = &df->objects;
- struct llist_head *slabs = &df->slabs;
struct llist_node *llnode, *pos, *t;
- if (llist_empty(objs) && llist_empty(slabs))
+ if (llist_empty(objs))
return;
llnode = llist_del_all(objs);
@@ -6104,16 +6111,6 @@ static void free_deferred_objects(struct irq_work *work)
__slab_free(s, slab, x, x, 1, _THIS_IP_);
}
-
- llnode = llist_del_all(slabs);
- llist_for_each_safe(pos, t, llnode) {
- struct slab *slab = container_of(pos, struct slab, llnode);
-
- if (slab->frozen)
- deactivate_slab(slab->slab_cache, slab, slab->flush_freelist);
- else
- free_slab(slab->slab_cache, slab);
- }
}
static void defer_free(struct kmem_cache *s, void *head)
@@ -6129,19 +6126,6 @@ static void defer_free(struct kmem_cache *s, void *head)
irq_work_queue(&df->work);
}
-static void defer_deactivate_slab(struct slab *slab, void *flush_freelist)
-{
- struct defer_free *df;
-
- slab->flush_freelist = flush_freelist;
-
- guard(preempt)();
-
- df = this_cpu_ptr(&defer_free_objects);
- if (llist_add(&slab->llnode, &df->slabs))
- irq_work_queue(&df->work);
-}
-
void defer_free_barrier(void)
{
int cpu;
--
2.52.0
next prev parent reply other threads:[~2026-01-23 6:53 UTC|newest]
Thread overview: 69+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-01-23 6:52 [PATCH v4 00/22] slab: replace cpu (partial) slabs with sheaves Vlastimil Babka
2026-01-23 6:52 ` [PATCH v4 01/22] mm/slab: add rcu_barrier() to kvfree_rcu_barrier_on_cache() Vlastimil Babka
2026-01-27 16:08 ` Liam R. Howlett
2026-01-23 6:52 ` [PATCH v4 02/22] mm/slab: fix false lockdep warning in __kfree_rcu_sheaf() Vlastimil Babka
2026-01-23 12:03 ` Sebastian Andrzej Siewior
2026-01-24 10:58 ` Harry Yoo
2026-01-23 6:52 ` [PATCH v4 03/22] slab: add SLAB_CONSISTENCY_CHECKS to SLAB_NEVER_MERGE Vlastimil Babka
2026-01-23 6:52 ` [PATCH v4 04/22] mm/slab: move and refactor __kmem_cache_alias() Vlastimil Babka
2026-01-27 16:17 ` Liam R. Howlett
2026-01-27 16:59 ` Vlastimil Babka
2026-01-23 6:52 ` [PATCH v4 05/22] mm/slab: make caches with sheaves mergeable Vlastimil Babka
2026-01-27 16:23 ` Liam R. Howlett
2026-01-23 6:52 ` [PATCH v4 06/22] slab: add sheaves to most caches Vlastimil Babka
2026-01-26 6:36 ` Hao Li
2026-01-26 8:39 ` Vlastimil Babka
2026-01-26 13:59 ` Breno Leitao
2026-01-27 16:34 ` Liam R. Howlett
2026-01-27 17:01 ` Vlastimil Babka
2026-01-29 7:24 ` Zhao Liu
2026-01-29 8:21 ` Vlastimil Babka
2026-01-30 7:15 ` Zhao Liu
2026-02-04 18:01 ` Vlastimil Babka
2026-01-23 6:52 ` [PATCH v4 07/22] slab: introduce percpu sheaves bootstrap Vlastimil Babka
2026-01-26 6:13 ` Hao Li
2026-01-26 8:42 ` Vlastimil Babka
2026-01-27 17:31 ` Liam R. Howlett
2026-01-23 6:52 ` [PATCH v4 08/22] slab: make percpu sheaves compatible with kmalloc_nolock()/kfree_nolock() Vlastimil Babka
2026-01-23 18:05 ` Alexei Starovoitov
2026-01-27 17:36 ` Liam R. Howlett
2026-01-29 8:25 ` Vlastimil Babka
2026-01-23 6:52 ` [PATCH v4 09/22] slab: handle kmalloc sheaves bootstrap Vlastimil Babka
2026-01-27 18:30 ` Liam R. Howlett
2026-01-23 6:52 ` [PATCH v4 10/22] slab: add optimized sheaf refill from partial list Vlastimil Babka
2026-01-26 7:12 ` Hao Li
2026-01-29 7:43 ` Harry Yoo
2026-01-29 8:29 ` Vlastimil Babka
2026-01-27 20:05 ` Liam R. Howlett
2026-01-29 8:01 ` Harry Yoo
2026-01-23 6:52 ` [PATCH v4 11/22] slab: remove cpu (partial) slabs usage from allocation paths Vlastimil Babka
2026-01-23 18:17 ` Alexei Starovoitov
2026-01-23 6:52 ` [PATCH v4 12/22] slab: remove SLUB_CPU_PARTIAL Vlastimil Babka
2026-01-23 6:52 ` [PATCH v4 13/22] slab: remove the do_slab_free() fastpath Vlastimil Babka
2026-01-23 18:15 ` Alexei Starovoitov
2026-01-23 6:52 ` Vlastimil Babka [this message]
2026-01-23 17:31 ` [PATCH v4 14/22] slab: remove defer_deactivate_slab() Alexei Starovoitov
2026-01-23 6:52 ` [PATCH v4 15/22] slab: simplify kmalloc_nolock() Vlastimil Babka
2026-01-23 6:52 ` [PATCH v4 16/22] slab: remove struct kmem_cache_cpu Vlastimil Babka
2026-01-23 6:52 ` [PATCH v4 17/22] slab: remove unused PREEMPT_RT specific macros Vlastimil Babka
2026-01-23 6:52 ` [PATCH v4 18/22] slab: refill sheaves from all nodes Vlastimil Babka
2026-01-27 14:28 ` Mateusz Guzik
2026-01-27 22:04 ` Vlastimil Babka
2026-01-29 9:16 ` Harry Yoo
2026-01-23 6:52 ` [PATCH v4 19/22] slab: update overview comments Vlastimil Babka
2026-01-23 6:52 ` [PATCH v4 20/22] slab: remove frozen slab checks from __slab_free() Vlastimil Babka
2026-01-29 7:16 ` Harry Yoo
2026-01-23 6:52 ` [PATCH v4 21/22] mm/slub: remove DEACTIVATE_TO_* stat items Vlastimil Babka
2026-01-29 7:21 ` Harry Yoo
2026-01-23 6:53 ` [PATCH v4 22/22] mm/slub: cleanup and repurpose some " Vlastimil Babka
2026-01-29 7:40 ` Harry Yoo
2026-01-29 15:18 ` [PATCH v4 00/22] slab: replace cpu (partial) slabs with sheaves Hao Li
2026-01-29 15:28 ` Vlastimil Babka
2026-01-29 16:06 ` Hao Li
2026-01-29 16:44 ` Liam R. Howlett
2026-01-30 4:38 ` Hao Li
2026-01-30 4:50 ` Hao Li
2026-01-30 6:17 ` Hao Li
2026-02-04 18:02 ` Vlastimil Babka
2026-02-04 18:24 ` Christoph Lameter (Ampere)
2026-02-06 16:44 ` Vlastimil Babka
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260123-sheaves-for-all-v4-14-041323d506f7@suse.cz \
--to=vbabka@suse.cz \
--cc=Liam.Howlett@oracle.com \
--cc=akpm@linux-foundation.org \
--cc=ast@kernel.org \
--cc=bigeasy@linutronix.de \
--cc=bpf@vger.kernel.org \
--cc=cl@gentwo.org \
--cc=hao.li@linux.dev \
--cc=harry.yoo@oracle.com \
--cc=kasan-dev@googlegroups.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=linux-rt-devel@lists.linux.dev \
--cc=ptesarik@suse.com \
--cc=rientjes@google.com \
--cc=roman.gushchin@linux.dev \
--cc=surenb@google.com \
--cc=urezki@gmail.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox