From: chengming.zhou@linux.dev
To: cl@linux.com, penberg@kernel.org
Cc: rientjes@google.com, iamjoonsoo.kim@lge.com,
akpm@linux-foundation.org, vbabka@suse.cz,
roman.gushchin@linux.dev, 42.hyeyoo@gmail.com,
willy@infradead.org, pcc@google.com, tytso@mit.edu,
maz@kernel.org, ruansy.fnst@fujitsu.com, vishal.moola@gmail.com,
lrh2000@pku.edu.cn, hughd@google.com,
linux-kernel@vger.kernel.org, linux-mm@kvack.org,
chengming.zhou@linux.dev,
Chengming Zhou <zhouchengming@bytedance.com>
Subject: [RFC PATCH v2 5/6] slub: Introduce get_cpu_partial()
Date: Sat, 21 Oct 2023 14:43:16 +0000 [thread overview]
Message-ID: <20231021144317.3400916-6-chengming.zhou@linux.dev> (raw)
In-Reply-To: <20231021144317.3400916-1-chengming.zhou@linux.dev>
From: Chengming Zhou <zhouchengming@bytedance.com>
Since the slabs on cpu partial list are not frozen anymore, we introduce
get_cpu_partial() to get a frozen slab with its freelist from cpu partial
list. It's now much like getting a frozen slab with its freelist from
node partial list.
Another change is about get_partial(), which can return no frozen slab
when all slabs are failed when acquire_slab(), but get some unfreeze slabs
in its cpu partial list, so we need to check this rare case to avoid
allocating a new slab.
Signed-off-by: Chengming Zhou <zhouchengming@bytedance.com>
---
mm/slub.c | 87 +++++++++++++++++++++++++++++++++++++++++++------------
1 file changed, 68 insertions(+), 19 deletions(-)
diff --git a/mm/slub.c b/mm/slub.c
index 9f0b80fefc70..7fae959c56eb 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3055,6 +3055,68 @@ static inline void *get_freelist(struct kmem_cache *s, struct slab *slab)
return freelist;
}
+#ifdef CONFIG_SLUB_CPU_PARTIAL
+
+static void *get_cpu_partial(struct kmem_cache *s, struct kmem_cache_cpu *c,
+ struct slab **slabptr, int node, gfp_t gfpflags)
+{
+ unsigned long flags;
+ struct slab *slab;
+ struct slab new;
+ unsigned long counters;
+ void *freelist;
+
+ while (slub_percpu_partial(c)) {
+ local_lock_irqsave(&s->cpu_slab->lock, flags);
+ if (unlikely(!slub_percpu_partial(c))) {
+ local_unlock_irqrestore(&s->cpu_slab->lock, flags);
+ /* we were preempted and partial list got empty */
+ return NULL;
+ }
+
+ slab = slub_percpu_partial(c);
+ slub_set_percpu_partial(c, slab);
+ local_unlock_irqrestore(&s->cpu_slab->lock, flags);
+ stat(s, CPU_PARTIAL_ALLOC);
+
+ if (unlikely(!node_match(slab, node) ||
+ !pfmemalloc_match(slab, gfpflags))) {
+ slab->next = NULL;
+ __unfreeze_partials(s, slab);
+ continue;
+ }
+
+ do {
+ freelist = slab->freelist;
+ counters = slab->counters;
+
+ new.counters = counters;
+ VM_BUG_ON(new.frozen);
+
+ new.inuse = slab->objects;
+ new.frozen = 1;
+ } while (!__slab_update_freelist(s, slab,
+ freelist, counters,
+ NULL, new.counters,
+ "get_cpu_partial"));
+
+ *slabptr = slab;
+ return freelist;
+ }
+
+ return NULL;
+}
+
+#else /* CONFIG_SLUB_CPU_PARTIAL */
+
+static void *get_cpu_partial(struct kmem_cache *s, struct kmem_cache_cpu *c,
+ struct slab **slabptr, int node, gfp_t gfpflags)
+{
+ return NULL;
+}
+
+#endif
+
/*
* Slow path. The lockless freelist is empty or we need to perform
* debugging duties.
@@ -3097,7 +3159,6 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
node = NUMA_NO_NODE;
goto new_slab;
}
-redo:
if (unlikely(!node_match(slab, node))) {
/*
@@ -3173,24 +3234,9 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
new_slab:
- if (slub_percpu_partial(c)) {
- local_lock_irqsave(&s->cpu_slab->lock, flags);
- if (unlikely(c->slab)) {
- local_unlock_irqrestore(&s->cpu_slab->lock, flags);
- goto reread_slab;
- }
- if (unlikely(!slub_percpu_partial(c))) {
- local_unlock_irqrestore(&s->cpu_slab->lock, flags);
- /* we were preempted and partial list got empty */
- goto new_objects;
- }
-
- slab = c->slab = slub_percpu_partial(c);
- slub_set_percpu_partial(c, slab);
- local_unlock_irqrestore(&s->cpu_slab->lock, flags);
- stat(s, CPU_PARTIAL_ALLOC);
- goto redo;
- }
+ freelist = get_cpu_partial(s, c, &slab, node, gfpflags);
+ if (freelist)
+ goto retry_load_slab;
new_objects:
@@ -3201,6 +3247,9 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
if (freelist)
goto check_new_slab;
+ if (slub_percpu_partial(c))
+ goto new_slab;
+
slub_put_cpu_ptr(s->cpu_slab);
slab = new_slab(s, gfpflags, node);
c = slub_get_cpu_ptr(s->cpu_slab);
--
2.20.1
next prev parent reply other threads:[~2023-10-21 14:44 UTC|newest]
Thread overview: 22+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-10-21 14:43 [RFC PATCH v2 0/6] slub: Delay freezing of CPU partial slabs chengming.zhou
2023-10-21 14:43 ` [RFC PATCH v2 1/6] slub: Keep track of whether slub is on the per-node partial list chengming.zhou
2023-10-23 12:32 ` Matthew Wilcox
2023-10-23 16:22 ` Matthew Wilcox
2023-10-24 1:57 ` Chengming Zhou
2023-10-21 14:43 ` [RFC PATCH v2 2/6] slub: Prepare __slab_free() for unfrozen partial slab out of node " chengming.zhou
2023-10-21 14:43 ` [RFC PATCH v2 3/6] slub: Don't freeze slabs for cpu partial chengming.zhou
2023-10-23 16:00 ` Vlastimil Babka
2023-10-24 2:39 ` Chengming Zhou
2023-10-21 14:43 ` [RFC PATCH v2 4/6] slub: Simplify acquire_slab() chengming.zhou
2023-10-21 14:43 ` chengming.zhou [this message]
2023-10-21 14:43 ` [RFC PATCH v2 6/6] slub: Optimize deactivate_slab() chengming.zhou
2023-10-22 14:52 ` [RFC PATCH v2 0/6] slub: Delay freezing of CPU partial slabs Hyeonggon Yoo
2023-10-24 2:02 ` Chengming Zhou
2023-10-23 15:46 ` Vlastimil Babka
2023-10-23 17:00 ` Christoph Lameter (Ampere)
2023-10-23 18:44 ` Vlastimil Babka
2023-10-23 21:05 ` Christoph Lameter (Ampere)
2023-10-24 8:19 ` Vlastimil Babka
2023-10-24 11:03 ` Chengming Zhou
2023-10-24 2:20 ` Chengming Zhou
2023-10-24 8:20 ` Vlastimil Babka
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20231021144317.3400916-6-chengming.zhou@linux.dev \
--to=chengming.zhou@linux.dev \
--cc=42.hyeyoo@gmail.com \
--cc=akpm@linux-foundation.org \
--cc=cl@linux.com \
--cc=hughd@google.com \
--cc=iamjoonsoo.kim@lge.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=lrh2000@pku.edu.cn \
--cc=maz@kernel.org \
--cc=pcc@google.com \
--cc=penberg@kernel.org \
--cc=rientjes@google.com \
--cc=roman.gushchin@linux.dev \
--cc=ruansy.fnst@fujitsu.com \
--cc=tytso@mit.edu \
--cc=vbabka@suse.cz \
--cc=vishal.moola@gmail.com \
--cc=willy@infradead.org \
--cc=zhouchengming@bytedance.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox