linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Vlastimil Babka <vbabka@suse.cz>
To: Andrew Morton <akpm@linux-foundation.org>,
	 Christoph Lameter <cl@gentwo.org>,
	David Rientjes <rientjes@google.com>,
	 Roman Gushchin <roman.gushchin@linux.dev>,
	Harry Yoo <harry.yoo@oracle.com>
Cc: Uladzislau Rezki <urezki@gmail.com>,
	 "Liam R. Howlett" <Liam.Howlett@oracle.com>,
	 Suren Baghdasaryan <surenb@google.com>,
	 Sebastian Andrzej Siewior <bigeasy@linutronix.de>,
	 Alexei Starovoitov <ast@kernel.org>,
	linux-mm@kvack.org,  linux-kernel@vger.kernel.org,
	linux-rt-devel@lists.linux.dev,  bpf@vger.kernel.org,
	kasan-dev@googlegroups.com,  Vlastimil Babka <vbabka@suse.cz>
Subject: [PATCH RFC 02/19] slab: handle pfmemalloc slabs properly with sheaves
Date: Thu, 23 Oct 2025 15:52:24 +0200	[thread overview]
Message-ID: <20251023-sheaves-for-all-v1-2-6ffa2c9941c0@suse.cz> (raw)
In-Reply-To: <20251023-sheaves-for-all-v1-0-6ffa2c9941c0@suse.cz>

When a pfmemalloc allocation actually dips into reserves, the slab is
marked accordingly and non-pfmemalloc allocations should not be allowed
to allocate from it. The sheaves percpu caching currently doesn't follow
this rule, so implement it before we expand sheaves usage to all caches.

Make sure objects from pfmemalloc slabs don't end up in percpu sheaves.
When freeing, skip sheaves when freeing an object from pfmemalloc slab.
When refilling sheaves, use __GFP_NOMEMALLOC to override any pfmemalloc
context - the allocation will fallback to regular slab allocations when
sheaves are depleted and can't be refilled because of the override.

For kfree_rcu(), detect pfmemalloc slabs after processing the rcu_sheaf
after the grace period in __rcu_free_sheaf_prepare() and simply flush
it if any object is from pfmemalloc slabs.

For prefilled sheaves, try to refill them first with __GFP_NOMEMALLOC
and if it fails, retry without __GFP_NOMEMALLOC but then mark the sheaf
pfmemalloc, which makes it flushed back to slabs when returned.

Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
---
 mm/slub.c | 65 +++++++++++++++++++++++++++++++++++++++++++++++++--------------
 1 file changed, 51 insertions(+), 14 deletions(-)

diff --git a/mm/slub.c b/mm/slub.c
index 4731b9e461c2..ab03f29dc3bf 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -469,7 +469,10 @@ struct slab_sheaf {
 		struct rcu_head rcu_head;
 		struct list_head barn_list;
 		/* only used for prefilled sheafs */
-		unsigned int capacity;
+		struct {
+			unsigned int capacity;
+			bool pfmemalloc;
+		};
 	};
 	struct kmem_cache *cache;
 	unsigned int size;
@@ -2645,7 +2648,7 @@ static struct slab_sheaf *alloc_full_sheaf(struct kmem_cache *s, gfp_t gfp)
 	if (!sheaf)
 		return NULL;
 
-	if (refill_sheaf(s, sheaf, gfp)) {
+	if (refill_sheaf(s, sheaf, gfp | __GFP_NOMEMALLOC)) {
 		free_empty_sheaf(s, sheaf);
 		return NULL;
 	}
@@ -2723,12 +2726,13 @@ static void sheaf_flush_unused(struct kmem_cache *s, struct slab_sheaf *sheaf)
 	sheaf->size = 0;
 }
 
-static void __rcu_free_sheaf_prepare(struct kmem_cache *s,
+static bool __rcu_free_sheaf_prepare(struct kmem_cache *s,
 				     struct slab_sheaf *sheaf)
 {
 	bool init = slab_want_init_on_free(s);
 	void **p = &sheaf->objects[0];
 	unsigned int i = 0;
+	bool pfmemalloc = false;
 
 	while (i < sheaf->size) {
 		struct slab *slab = virt_to_slab(p[i]);
@@ -2741,8 +2745,13 @@ static void __rcu_free_sheaf_prepare(struct kmem_cache *s,
 			continue;
 		}
 
+		if (slab_test_pfmemalloc(slab))
+			pfmemalloc = true;
+
 		i++;
 	}
+
+	return pfmemalloc;
 }
 
 static void rcu_free_sheaf_nobarn(struct rcu_head *head)
@@ -5031,7 +5040,7 @@ __pcs_replace_empty_main(struct kmem_cache *s, struct slub_percpu_sheaves *pcs,
 		return NULL;
 
 	if (empty) {
-		if (!refill_sheaf(s, empty, gfp)) {
+		if (!refill_sheaf(s, empty, gfp | __GFP_NOMEMALLOC)) {
 			full = empty;
 		} else {
 			/*
@@ -5331,6 +5340,26 @@ void *kmem_cache_alloc_node_noprof(struct kmem_cache *s, gfp_t gfpflags, int nod
 }
 EXPORT_SYMBOL(kmem_cache_alloc_node_noprof);
 
+static int __prefill_sheaf_pfmemalloc(struct kmem_cache *s,
+				      struct slab_sheaf *sheaf, gfp_t gfp)
+{
+	int ret = 0;
+
+	ret = refill_sheaf(s, sheaf, gfp | __GFP_NOMEMALLOC);
+
+	if (likely(!ret || !gfp_pfmemalloc_allowed(gfp)))
+		return ret;
+
+	/*
+	 * if we are allowed to, refill sheaf with pfmemalloc but then remember
+	 * it for when it's returned
+	 */
+	ret = refill_sheaf(s, sheaf, gfp);
+	sheaf->pfmemalloc = true;
+
+	return ret;
+}
+
 /*
  * returns a sheaf that has at least the requested size
  * when prefilling is needed, do so with given gfp flags
@@ -5401,17 +5430,18 @@ kmem_cache_prefill_sheaf(struct kmem_cache *s, gfp_t gfp, unsigned int size)
 	if (!sheaf)
 		sheaf = alloc_empty_sheaf(s, gfp);
 
-	if (sheaf && sheaf->size < size) {
-		if (refill_sheaf(s, sheaf, gfp)) {
+	if (sheaf) {
+		sheaf->capacity = s->sheaf_capacity;
+		sheaf->pfmemalloc = false;
+
+		if (sheaf->size < size &&
+		    __prefill_sheaf_pfmemalloc(s, sheaf, gfp)) {
 			sheaf_flush_unused(s, sheaf);
 			free_empty_sheaf(s, sheaf);
 			sheaf = NULL;
 		}
 	}
 
-	if (sheaf)
-		sheaf->capacity = s->sheaf_capacity;
-
 	return sheaf;
 }
 
@@ -5431,7 +5461,8 @@ void kmem_cache_return_sheaf(struct kmem_cache *s, gfp_t gfp,
 	struct slub_percpu_sheaves *pcs;
 	struct node_barn *barn;
 
-	if (unlikely(sheaf->capacity != s->sheaf_capacity)) {
+	if (unlikely((sheaf->capacity != s->sheaf_capacity)
+		     || sheaf->pfmemalloc)) {
 		sheaf_flush_unused(s, sheaf);
 		kfree(sheaf);
 		return;
@@ -5497,7 +5528,7 @@ int kmem_cache_refill_sheaf(struct kmem_cache *s, gfp_t gfp,
 
 	if (likely(sheaf->capacity >= size)) {
 		if (likely(sheaf->capacity == s->sheaf_capacity))
-			return refill_sheaf(s, sheaf, gfp);
+			return __prefill_sheaf_pfmemalloc(s, sheaf, gfp);
 
 		if (!__kmem_cache_alloc_bulk(s, gfp, sheaf->capacity - sheaf->size,
 					     &sheaf->objects[sheaf->size])) {
@@ -6177,8 +6208,12 @@ static void rcu_free_sheaf(struct rcu_head *head)
 	 * handles it fine. The only downside is that sheaf will serve fewer
 	 * allocations when reused. It only happens due to debugging, which is a
 	 * performance hit anyway.
+	 *
+	 * If it returns true, there was at least one object from pfmemalloc
+	 * slab so simply flush everything.
 	 */
-	__rcu_free_sheaf_prepare(s, sheaf);
+	if (__rcu_free_sheaf_prepare(s, sheaf))
+		goto flush;
 
 	n = get_node(s, sheaf->node);
 	if (!n)
@@ -6333,7 +6368,8 @@ static void free_to_pcs_bulk(struct kmem_cache *s, size_t size, void **p)
 			continue;
 		}
 
-		if (unlikely(IS_ENABLED(CONFIG_NUMA) && slab_nid(slab) != node)) {
+		if (unlikely((IS_ENABLED(CONFIG_NUMA) && slab_nid(slab) != node)
+			     || slab_test_pfmemalloc(slab))) {
 			remote_objects[remote_nr] = p[i];
 			p[i] = p[--size];
 			if (++remote_nr >= PCS_BATCH_MAX)
@@ -6631,7 +6667,8 @@ void slab_free(struct kmem_cache *s, struct slab *slab, void *object,
 		return;
 
 	if (s->cpu_sheaves && likely(!IS_ENABLED(CONFIG_NUMA) ||
-				     slab_nid(slab) == numa_mem_id())) {
+				     slab_nid(slab) == numa_mem_id())
+			   && likely(!slab_test_pfmemalloc(slab))) {
 		if (likely(free_to_pcs(s, object)))
 			return;
 	}

-- 
2.51.1



  parent reply	other threads:[~2025-10-23 13:53 UTC|newest]

Thread overview: 61+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-10-23 13:52 [PATCH RFC 00/19] slab: replace cpu (partial) slabs " Vlastimil Babka
2025-10-23 13:52 ` [PATCH RFC 01/19] slab: move kfence_alloc() out of internal bulk alloc Vlastimil Babka
2025-10-23 15:20   ` Marco Elver
2025-10-29 14:38     ` Vlastimil Babka
2025-10-29 15:30       ` Marco Elver
2025-10-23 13:52 ` Vlastimil Babka [this message]
2025-10-24 14:21   ` [PATCH RFC 02/19] slab: handle pfmemalloc slabs properly with sheaves Chris Mason
2025-10-29 15:00     ` Vlastimil Babka
2025-10-29 16:06       ` Chris Mason
2025-10-23 13:52 ` [PATCH RFC 03/19] slub: remove CONFIG_SLUB_TINY specific code paths Vlastimil Babka
2025-10-24 22:34   ` Alexei Starovoitov
2025-10-29 15:37     ` Vlastimil Babka
2025-10-23 13:52 ` [PATCH RFC 04/19] slab: prevent recursive kmalloc() in alloc_empty_sheaf() Vlastimil Babka
2025-10-23 13:52 ` [PATCH RFC 05/19] slab: add sheaves to most caches Vlastimil Babka
2025-10-27  0:24   ` Harry Yoo
2025-10-29 15:42     ` Vlastimil Babka
2025-10-23 13:52 ` [PATCH RFC 06/19] slab: introduce percpu sheaves bootstrap Vlastimil Babka
2025-10-24 15:29   ` Chris Mason
2025-10-29 15:51     ` Vlastimil Babka
2025-10-23 13:52 ` [PATCH RFC 07/19] slab: make percpu sheaves compatible with kmalloc_nolock()/kfree_nolock() Vlastimil Babka
2025-10-24 14:04   ` Chris Mason
2025-10-29 17:30     ` Vlastimil Babka
2025-10-24 19:43   ` Alexei Starovoitov
2025-10-29 17:46     ` Vlastimil Babka
2025-10-23 13:52 ` [PATCH RFC 08/19] slab: handle kmalloc sheaves bootstrap Vlastimil Babka
2025-10-27  6:12   ` Harry Yoo
2025-10-29 20:06     ` Vlastimil Babka
2025-10-29 20:06       ` Vlastimil Babka
2025-10-30  0:11         ` Harry Yoo
2025-10-23 13:52 ` [PATCH RFC 09/19] slab: add optimized sheaf refill from partial list Vlastimil Babka
2025-10-27  7:20   ` Harry Yoo
2025-10-27  9:11     ` Harry Yoo
2025-10-29 20:48     ` Vlastimil Babka
2025-10-30  0:07       ` Harry Yoo
2025-10-30 13:18         ` Vlastimil Babka
2025-10-23 13:52 ` [PATCH RFC 10/19] slab: remove cpu (partial) slabs usage from allocation paths Vlastimil Babka
2025-10-24 14:29   ` Chris Mason
2025-10-29 21:31     ` Vlastimil Babka
2025-10-30  4:32   ` Harry Yoo
2025-10-30 13:09     ` Vlastimil Babka
2025-10-30 15:27       ` Alexei Starovoitov
2025-10-30 15:35         ` Vlastimil Babka
2025-10-30 15:59           ` Alexei Starovoitov
2025-11-03  3:44           ` Harry Yoo
2025-10-23 13:52 ` [PATCH RFC 11/19] slab: remove SLUB_CPU_PARTIAL Vlastimil Babka
2025-10-24 20:43   ` Alexei Starovoitov
2025-10-29 22:31     ` Vlastimil Babka
2025-10-30  0:26       ` Alexei Starovoitov
2025-10-23 13:52 ` [PATCH RFC 12/19] slab: remove the do_slab_free() fastpath Vlastimil Babka
2025-10-24 22:32   ` Alexei Starovoitov
2025-10-29 22:44     ` Vlastimil Babka
2025-10-30  0:24       ` Alexei Starovoitov
2025-10-23 13:52 ` [PATCH RFC 13/19] slab: remove defer_deactivate_slab() Vlastimil Babka
2025-10-23 13:52 ` [PATCH RFC 14/19] slab: simplify kmalloc_nolock() Vlastimil Babka
2025-10-23 13:52 ` [PATCH RFC 15/19] slab: remove struct kmem_cache_cpu Vlastimil Babka
2025-10-23 13:52 ` [PATCH RFC 16/19] slab: remove unused PREEMPT_RT specific macros Vlastimil Babka
2025-10-23 13:52 ` [PATCH RFC 17/19] slab: refill sheaves from all nodes Vlastimil Babka
2025-10-23 13:52 ` [PATCH RFC 18/19] slab: update overview comments Vlastimil Babka
2025-10-23 13:52 ` [PATCH RFC 19/19] slab: remove frozen slab checks from __slab_free() Vlastimil Babka
2025-10-24 23:57 ` [PATCH RFC 00/19] slab: replace cpu (partial) slabs with sheaves Alexei Starovoitov
2025-11-04 22:11 ` Christoph Lameter (Ampere)

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20251023-sheaves-for-all-v1-2-6ffa2c9941c0@suse.cz \
    --to=vbabka@suse.cz \
    --cc=Liam.Howlett@oracle.com \
    --cc=akpm@linux-foundation.org \
    --cc=ast@kernel.org \
    --cc=bigeasy@linutronix.de \
    --cc=bpf@vger.kernel.org \
    --cc=cl@gentwo.org \
    --cc=harry.yoo@oracle.com \
    --cc=kasan-dev@googlegroups.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=linux-rt-devel@lists.linux.dev \
    --cc=rientjes@google.com \
    --cc=roman.gushchin@linux.dev \
    --cc=surenb@google.com \
    --cc=urezki@gmail.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox