linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Harry Yoo <harry.yoo@oracle.com>
To: surenb@google.com
Cc: Liam.Howlett@oracle.com, atomlin@atomlin.com,
	bpf@vger.kernel.org, cl@gentwo.org, da.gomez@kernel.org,
	harry.yoo@oracle.com, linux-kernel@vger.kernel.org,
	linux-mm@kvack.org, linux-modules@vger.kernel.org,
	lucas.demarchi@intel.com, maple-tree@lists.infradead.org,
	mcgrof@kernel.org, petr.pavlu@suse.com, rcu@vger.kernel.org,
	rientjes@google.com, roman.gushchin@linux.dev,
	samitolvanen@google.com, sidhartha.kumar@oracle.com,
	urezki@gmail.com, vbabka@suse.cz, jonathanh@nvidia.com
Subject: [PATCH V1] mm/slab: introduce kvfree_rcu_barrier_on_cache() for cache destruction
Date: Fri, 28 Nov 2025 20:37:40 +0900	[thread overview]
Message-ID: <20251128113740.90129-1-harry.yoo@oracle.com> (raw)
In-Reply-To: <CAJuCfpFTMQD6oyR_Q1ds7XL4Km7h2mmzSv4z7f5fFnQ14=+g_A@mail.gmail.com>

Currently, kvfree_rcu_barrier() flushes RCU sheaves across all slab
caches when a cache is destroyed. This is unnecessary when destroying
a slab cache; only the RCU sheaves belonging to the cache being destroyed
need to be flushed.

As suggested by Vlastimil Babka, introduce a weaker form of
kvfree_rcu_barrier() that operates on a specific slab cache and call it
on cache destruction.

The performance benefit is evaluated on a 12 core 24 threads AMD Ryzen
5900X machine (1 socket), by loading slub_kunit module.

Before:
  Total calls: 19
  Average latency (us): 8529
  Total time (us): 162069

After:
  Total calls: 19
  Average latency (us): 3804
  Total time (us): 72287

Link: https://lore.kernel.org/linux-mm/0406562e-2066-4cf8-9902-b2b0616dd742@kernel.org
Link: https://lore.kernel.org/linux-mm/e988eff6-1287-425e-a06c-805af5bbf262@nvidia.com
Link: https://lore.kernel.org/linux-mm/1bda09da-93be-4737-aef0-d47f8c5c9301@suse.cz
Suggested-by: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Harry Yoo <harry.yoo@oracle.com>
---

Not sure if the regression is worse on the reporters' machines due to
higher core count (or because some cores were busy doing other things,
dunno).

Hopefully this will reduce the time to complete tests,
and Suren could add his patch on top of this ;)

 include/linux/slab.h |  5 ++++
 mm/slab.h            |  1 +
 mm/slab_common.c     | 52 +++++++++++++++++++++++++++++------------
 mm/slub.c            | 55 ++++++++++++++++++++++++--------------------
 4 files changed, 73 insertions(+), 40 deletions(-)

diff --git a/include/linux/slab.h b/include/linux/slab.h
index cf443f064a66..937c93d44e8c 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -1149,6 +1149,10 @@ static inline void kvfree_rcu_barrier(void)
 {
 	rcu_barrier();
 }
+static inline void kvfree_rcu_barrier_on_cache(struct kmem_cache *s)
+{
+	rcu_barrier();
+}
 
 static inline void kfree_rcu_scheduler_running(void) { }
 #else
@@ -1156,6 +1160,7 @@ void kvfree_rcu_barrier(void);
 
 void kfree_rcu_scheduler_running(void);
 #endif
+void kvfree_rcu_barrier_on_cache(struct kmem_cache *s);
 
 /**
  * kmalloc_size_roundup - Report allocation bucket size for the given size
diff --git a/mm/slab.h b/mm/slab.h
index f730e012553c..e767aa7e91b0 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -422,6 +422,7 @@ static inline bool is_kmalloc_normal(struct kmem_cache *s)
 
 bool __kfree_rcu_sheaf(struct kmem_cache *s, void *obj);
 void flush_all_rcu_sheaves(void);
+void flush_rcu_sheaves_on_cache(struct kmem_cache *s);
 
 #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | \
 			 SLAB_CACHE_DMA32 | SLAB_PANIC | \
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 84dfff4f7b1f..dd8a49d6f9cc 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -492,7 +492,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
 		return;
 
 	/* in-flight kfree_rcu()'s may include objects from our cache */
-	kvfree_rcu_barrier();
+	kvfree_rcu_barrier_on_cache(s);
 
 	if (IS_ENABLED(CONFIG_SLUB_RCU_DEBUG) &&
 	    (s->flags & SLAB_TYPESAFE_BY_RCU)) {
@@ -2038,25 +2038,13 @@ void kvfree_call_rcu(struct rcu_head *head, void *ptr)
 }
 EXPORT_SYMBOL_GPL(kvfree_call_rcu);
 
-/**
- * kvfree_rcu_barrier - Wait until all in-flight kvfree_rcu() complete.
- *
- * Note that a single argument of kvfree_rcu() call has a slow path that
- * triggers synchronize_rcu() following by freeing a pointer. It is done
- * before the return from the function. Therefore for any single-argument
- * call that will result in a kfree() to a cache that is to be destroyed
- * during module exit, it is developer's responsibility to ensure that all
- * such calls have returned before the call to kmem_cache_destroy().
- */
-void kvfree_rcu_barrier(void)
+static inline void __kvfree_rcu_barrier(void)
 {
 	struct kfree_rcu_cpu_work *krwp;
 	struct kfree_rcu_cpu *krcp;
 	bool queued;
 	int i, cpu;
 
-	flush_all_rcu_sheaves();
-
 	/*
 	 * Firstly we detach objects and queue them over an RCU-batch
 	 * for all CPUs. Finally queued works are flushed for each CPU.
@@ -2118,8 +2106,43 @@ void kvfree_rcu_barrier(void)
 		}
 	}
 }
+
+/**
+ * kvfree_rcu_barrier - Wait until all in-flight kvfree_rcu() complete.
+ *
+ * Note that a single argument of kvfree_rcu() call has a slow path that
+ * triggers synchronize_rcu() following by freeing a pointer. It is done
+ * before the return from the function. Therefore for any single-argument
+ * call that will result in a kfree() to a cache that is to be destroyed
+ * during module exit, it is developer's responsibility to ensure that all
+ * such calls have returned before the call to kmem_cache_destroy().
+ */
+void kvfree_rcu_barrier(void)
+{
+	flush_all_rcu_sheaves();
+	__kvfree_rcu_barrier();
+}
 EXPORT_SYMBOL_GPL(kvfree_rcu_barrier);
 
+/**
+ * kvfree_rcu_barrier_on_cache - Wait for in-flight kvfree_rcu() calls on a
+ *                               specific slab cache.
+ * @s: slab cache to wait for
+ *
+ * See the description of kvfree_rcu_barrier() for details.
+ */
+void kvfree_rcu_barrier_on_cache(struct kmem_cache *s)
+{
+	if (s->cpu_sheaves)
+		flush_rcu_sheaves_on_cache(s);
+	/*
+	 * TODO: Introduce a version of __kvfree_rcu_barrier() that works
+	 * on a specific slab cache.
+	 */
+	__kvfree_rcu_barrier();
+}
+EXPORT_SYMBOL_GPL(kvfree_rcu_barrier_on_cache);
+
 static unsigned long
 kfree_rcu_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
 {
@@ -2215,4 +2238,3 @@ void __init kvfree_rcu_init(void)
 }
 
 #endif /* CONFIG_KVFREE_RCU_BATCHED */
-
diff --git a/mm/slub.c b/mm/slub.c
index 785e25a14999..7cec2220712b 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -4118,42 +4118,47 @@ static void flush_rcu_sheaf(struct work_struct *w)
 
 
 /* needed for kvfree_rcu_barrier() */
-void flush_all_rcu_sheaves(void)
+void flush_rcu_sheaves_on_cache(struct kmem_cache *s)
 {
 	struct slub_flush_work *sfw;
-	struct kmem_cache *s;
 	unsigned int cpu;
 
-	cpus_read_lock();
-	mutex_lock(&slab_mutex);
+	mutex_lock(&flush_lock);
 
-	list_for_each_entry(s, &slab_caches, list) {
-		if (!s->cpu_sheaves)
-			continue;
+	for_each_online_cpu(cpu) {
+		sfw = &per_cpu(slub_flush, cpu);
 
-		mutex_lock(&flush_lock);
+		/*
+		 * we don't check if rcu_free sheaf exists - racing
+		 * __kfree_rcu_sheaf() might have just removed it.
+		 * by executing flush_rcu_sheaf() on the cpu we make
+		 * sure the __kfree_rcu_sheaf() finished its call_rcu()
+		 */
 
-		for_each_online_cpu(cpu) {
-			sfw = &per_cpu(slub_flush, cpu);
+		INIT_WORK(&sfw->work, flush_rcu_sheaf);
+		sfw->s = s;
+		queue_work_on(cpu, flushwq, &sfw->work);
+	}
 
-			/*
-			 * we don't check if rcu_free sheaf exists - racing
-			 * __kfree_rcu_sheaf() might have just removed it.
-			 * by executing flush_rcu_sheaf() on the cpu we make
-			 * sure the __kfree_rcu_sheaf() finished its call_rcu()
-			 */
+	for_each_online_cpu(cpu) {
+		sfw = &per_cpu(slub_flush, cpu);
+		flush_work(&sfw->work);
+	}
 
-			INIT_WORK(&sfw->work, flush_rcu_sheaf);
-			sfw->s = s;
-			queue_work_on(cpu, flushwq, &sfw->work);
-		}
+	mutex_unlock(&flush_lock);
+}
 
-		for_each_online_cpu(cpu) {
-			sfw = &per_cpu(slub_flush, cpu);
-			flush_work(&sfw->work);
-		}
+void flush_all_rcu_sheaves(void)
+{
+	struct kmem_cache *s;
+
+	cpus_read_lock();
+	mutex_lock(&slab_mutex);
 
-		mutex_unlock(&flush_lock);
+	list_for_each_entry(s, &slab_caches, list) {
+		if (!s->cpu_sheaves)
+			continue;
+		flush_rcu_sheaves_on_cache(s);
 	}
 
 	mutex_unlock(&slab_mutex);
-- 
2.43.0



  reply	other threads:[~2025-11-28 11:38 UTC|newest]

Thread overview: 95+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-09-10  8:01 [PATCH v8 00/23] SLUB percpu sheaves Vlastimil Babka
2025-09-10  8:01 ` [PATCH v8 01/23] locking/local_lock: Expose dep_map in local_trylock_t Vlastimil Babka
2025-09-24 16:49   ` Suren Baghdasaryan
2025-09-10  8:01 ` [PATCH v8 02/23] slab: simplify init_kmem_cache_nodes() error handling Vlastimil Babka
2025-09-24 16:52   ` Suren Baghdasaryan
2025-09-10  8:01 ` [PATCH v8 03/23] slab: add opt-in caching layer of percpu sheaves Vlastimil Babka
2025-12-02  8:48   ` [PATCH] slub: add barn_get_full_sheaf() and refine empty-main sheaf Hao Li
2025-12-02  8:55     ` Hao Li
2025-12-02  9:00   ` slub: add barn_get_full_sheaf() and refine empty-main sheaf replacement Hao Li
2025-12-03  5:46     ` Harry Yoo
2025-12-03 11:15       ` Hao Li
2025-09-10  8:01 ` [PATCH v8 04/23] slab: add sheaf support for batching kfree_rcu() operations Vlastimil Babka
2025-09-12  0:38   ` Sergey Senozhatsky
2025-09-12  7:03     ` Vlastimil Babka
2025-09-17  8:30   ` Harry Yoo
2025-09-17  9:55     ` Vlastimil Babka
2025-09-17 11:32       ` Harry Yoo
2025-09-17 12:05         ` Vlastimil Babka
2025-09-17 13:07           ` Harry Yoo
2025-09-17 13:21             ` Vlastimil Babka
2025-09-17 13:34               ` Harry Yoo
2025-09-17 14:14                 ` Vlastimil Babka
2025-09-18  8:09                   ` Vlastimil Babka
2025-09-19  6:47                     ` Harry Yoo
2025-09-19  7:02                       ` Vlastimil Babka
2025-09-19  8:59                         ` Harry Yoo
2025-09-25  4:35                     ` Suren Baghdasaryan
2025-09-25  8:52                       ` Harry Yoo
2025-09-25 13:38                         ` Suren Baghdasaryan
2025-09-26 10:08                       ` Vlastimil Babka
2025-09-26 15:41                         ` Suren Baghdasaryan
2025-09-17 11:36       ` Paul E. McKenney
2025-09-17 12:13         ` Vlastimil Babka
2025-10-31 21:32   ` Daniel Gomez
2025-11-03  3:17     ` Harry Yoo
2025-11-05 11:25       ` Vlastimil Babka
2025-11-27 14:00         ` Daniel Gomez
2025-11-27 19:29           ` Suren Baghdasaryan
2025-11-28 11:37             ` Harry Yoo [this message]
2025-11-28 12:22               ` [PATCH V1] mm/slab: introduce kvfree_rcu_barrier_on_cache() for cache destruction Harry Yoo
2025-11-28 12:38               ` Daniel Gomez
2025-12-02  9:29               ` Jon Hunter
2025-12-02 10:18                 ` Harry Yoo
2025-11-27 11:38     ` [PATCH v8 04/23] slab: add sheaf support for batching kfree_rcu() operations Jon Hunter
2025-11-27 11:50       ` Jon Hunter
2025-11-27 12:33       ` Harry Yoo
2025-11-27 12:48         ` Harry Yoo
2025-11-28  8:57           ` Jon Hunter
2025-12-01  6:55             ` Harry Yoo
2025-11-27 13:18       ` Vlastimil Babka
2025-11-28  8:59         ` Jon Hunter
2025-09-10  8:01 ` [PATCH v8 05/23] slab: sheaf prefilling for guaranteed allocations Vlastimil Babka
2025-09-10  8:01 ` [PATCH v8 06/23] slab: determine barn status racily outside of lock Vlastimil Babka
2025-09-10  8:01 ` [PATCH v8 07/23] slab: skip percpu sheaves for remote object freeing Vlastimil Babka
2025-09-25 16:14   ` Suren Baghdasaryan
2025-09-10  8:01 ` [PATCH v8 08/23] slab: allow NUMA restricted allocations to use percpu sheaves Vlastimil Babka
2025-09-25 16:27   ` Suren Baghdasaryan
2025-09-10  8:01 ` [PATCH v8 09/23] maple_tree: remove redundant __GFP_NOWARN Vlastimil Babka
2025-09-10  8:01 ` [PATCH v8 10/23] tools/testing/vma: clean up stubs in vma_internal.h Vlastimil Babka
2025-09-10  8:01 ` [PATCH v8 11/23] maple_tree: Drop bulk insert support Vlastimil Babka
2025-09-25 16:38   ` Suren Baghdasaryan
2025-09-10  8:01 ` [PATCH v8 12/23] tools/testing/vma: Implement vm_refcnt reset Vlastimil Babka
2025-09-25 16:38   ` Suren Baghdasaryan
2025-09-10  8:01 ` [PATCH v8 13/23] tools/testing: Add support for changes to slab for sheaves Vlastimil Babka
2025-09-26 23:28   ` Suren Baghdasaryan
2025-09-10  8:01 ` [PATCH v8 14/23] mm, vma: use percpu sheaves for vm_area_struct cache Vlastimil Babka
2025-09-10  8:01 ` [PATCH v8 15/23] maple_tree: use percpu sheaves for maple_node_cache Vlastimil Babka
2025-09-12  2:20   ` Liam R. Howlett
2025-10-16 15:16   ` D, Suneeth
2025-10-16 16:15     ` Vlastimil Babka
2025-10-17 18:26       ` D, Suneeth
2025-09-10  8:01 ` [PATCH v8 16/23] tools/testing: include maple-shim.c in maple.c Vlastimil Babka
2025-09-26 23:45   ` Suren Baghdasaryan
2025-09-10  8:01 ` [PATCH v8 17/23] testing/radix-tree/maple: Hack around kfree_rcu not existing Vlastimil Babka
2025-09-26 23:53   ` Suren Baghdasaryan
2025-09-10  8:01 ` [PATCH v8 18/23] maple_tree: Use kfree_rcu in ma_free_rcu Vlastimil Babka
2025-09-17 11:46   ` Harry Yoo
2025-09-27  0:05     ` Suren Baghdasaryan
2025-09-10  8:01 ` [PATCH v8 19/23] maple_tree: Replace mt_free_one() with kfree() Vlastimil Babka
2025-09-27  0:06   ` Suren Baghdasaryan
2025-09-10  8:01 ` [PATCH v8 20/23] tools/testing: Add support for prefilled slab sheafs Vlastimil Babka
2025-09-27  0:28   ` Suren Baghdasaryan
2025-09-10  8:01 ` [PATCH v8 21/23] maple_tree: Prefilled sheaf conversion and testing Vlastimil Babka
2025-09-27  1:08   ` Suren Baghdasaryan
2025-09-29  7:30     ` Vlastimil Babka
2025-09-29 16:51       ` Liam R. Howlett
2025-09-10  8:01 ` [PATCH v8 22/23] maple_tree: Add single node allocation support to maple state Vlastimil Babka
2025-09-27  1:17   ` Suren Baghdasaryan
2025-09-29  7:39     ` Vlastimil Babka
2025-09-10  8:01 ` [PATCH v8 23/23] maple_tree: Convert forking to use the sheaf interface Vlastimil Babka
2025-10-07  6:34 ` [PATCH v8 00/23] SLUB percpu sheaves Christoph Hellwig
2025-10-07  8:03   ` Vlastimil Babka
2025-10-08  6:04     ` Christoph Hellwig
2025-10-15  8:32       ` Vlastimil Babka
2025-10-22  6:47         ` Christoph Hellwig

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20251128113740.90129-1-harry.yoo@oracle.com \
    --to=harry.yoo@oracle.com \
    --cc=Liam.Howlett@oracle.com \
    --cc=atomlin@atomlin.com \
    --cc=bpf@vger.kernel.org \
    --cc=cl@gentwo.org \
    --cc=da.gomez@kernel.org \
    --cc=jonathanh@nvidia.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=linux-modules@vger.kernel.org \
    --cc=lucas.demarchi@intel.com \
    --cc=maple-tree@lists.infradead.org \
    --cc=mcgrof@kernel.org \
    --cc=petr.pavlu@suse.com \
    --cc=rcu@vger.kernel.org \
    --cc=rientjes@google.com \
    --cc=roman.gushchin@linux.dev \
    --cc=samitolvanen@google.com \
    --cc=sidhartha.kumar@oracle.com \
    --cc=surenb@google.com \
    --cc=urezki@gmail.com \
    --cc=vbabka@suse.cz \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox