linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Hyeonggon Yoo <42.hyeyoo@gmail.com>
To: Christoph Lameter <cl@linux.com>,
	Pekka Enberg <penberg@kernel.org>,
	David Rientjes <rientjes@google.com>,
	Joonsoo Kim <iamjoonsoo.kim@lge.com>,
	Andrew Morton <akpm@linux-foundation.org>,
	Vlastimil Babka <vbabka@suse.cz>,
	Roman Gushchin <roman.gushchin@linux.dev>,
	Hyeonggon Yoo <42.hyeyoo@gmail.com>,
	Joe Perches <joe@perches.com>,
	Vasily Averin <vasily.averin@linux.dev>,
	Matthew WilCox <willy@infradead.org>
Cc: linux-kernel@vger.kernel.org, linux-mm@kvack.org
Subject: [PATCH v3 1/15] mm/slab: move NUMA-related code to __do_cache_alloc()
Date: Tue, 12 Jul 2022 13:39:31 +0000	[thread overview]
Message-ID: <20220712133946.307181-2-42.hyeyoo@gmail.com> (raw)
In-Reply-To: <20220712133946.307181-1-42.hyeyoo@gmail.com>

To implement slab_alloc_node() independent of NUMA configuration,
move NUMA fallback/alternate allocation code into __do_cache_alloc().

One functional change here is not to check availability of node
when allocating from local node.

Signed-off-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
---

v3:
	Fixed uninitialized variable bug due to missing
	NULL-initialization of variable objp.

 mm/slab.c | 68 +++++++++++++++++++++++++------------------------------
 1 file changed, 31 insertions(+), 37 deletions(-)

diff --git a/mm/slab.c b/mm/slab.c
index 764cbadba69c..3d83d17ff3b3 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3186,13 +3186,14 @@ static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
 	return obj ? obj : fallback_alloc(cachep, flags);
 }
 
+static void *__do_cache_alloc(struct kmem_cache *cachep, gfp_t flags, int nodeid);
+
 static __always_inline void *
 slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, size_t orig_size,
 		   unsigned long caller)
 {
 	unsigned long save_flags;
 	void *ptr;
-	int slab_node = numa_mem_id();
 	struct obj_cgroup *objcg = NULL;
 	bool init = false;
 
@@ -3207,30 +3208,7 @@ slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, size_t orig_
 
 	cache_alloc_debugcheck_before(cachep, flags);
 	local_irq_save(save_flags);
-
-	if (nodeid == NUMA_NO_NODE)
-		nodeid = slab_node;
-
-	if (unlikely(!get_node(cachep, nodeid))) {
-		/* Node not bootstrapped yet */
-		ptr = fallback_alloc(cachep, flags);
-		goto out;
-	}
-
-	if (nodeid == slab_node) {
-		/*
-		 * Use the locally cached objects if possible.
-		 * However ____cache_alloc does not allow fallback
-		 * to other nodes. It may fail while we still have
-		 * objects on other nodes available.
-		 */
-		ptr = ____cache_alloc(cachep, flags);
-		if (ptr)
-			goto out;
-	}
-	/* ___cache_alloc_node can fall back to other nodes */
-	ptr = ____cache_alloc_node(cachep, flags, nodeid);
-out:
+	ptr = __do_cache_alloc(cachep, flags, nodeid);
 	local_irq_restore(save_flags);
 	ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);
 	init = slab_want_init_on_alloc(flags, cachep);
@@ -3241,31 +3219,46 @@ slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, size_t orig_
 }
 
 static __always_inline void *
-__do_cache_alloc(struct kmem_cache *cache, gfp_t flags)
+__do_cache_alloc(struct kmem_cache *cachep, gfp_t flags, int nodeid)
 {
-	void *objp;
+	void *objp = NULL;
+	int slab_node = numa_mem_id();
 
-	if (current->mempolicy || cpuset_do_slab_mem_spread()) {
-		objp = alternate_node_alloc(cache, flags);
-		if (objp)
-			goto out;
+	if (nodeid == NUMA_NO_NODE) {
+		if (current->mempolicy || cpuset_do_slab_mem_spread()) {
+			objp = alternate_node_alloc(cachep, flags);
+			if (objp)
+				goto out;
+		}
+		/*
+		 * Use the locally cached objects if possible.
+		 * However ____cache_alloc does not allow fallback
+		 * to other nodes. It may fail while we still have
+		 * objects on other nodes available.
+		 */
+		objp = ____cache_alloc(cachep, flags);
+		nodeid = slab_node;
+	} else if (nodeid == slab_node) {
+		objp = ____cache_alloc(cachep, flags);
+	} else if (!get_node(cachep, nodeid)) {
+		/* Node not bootstrapped yet */
+		objp = fallback_alloc(cachep, flags);
+		goto out;
 	}
-	objp = ____cache_alloc(cache, flags);
 
 	/*
 	 * We may just have run out of memory on the local node.
 	 * ____cache_alloc_node() knows how to locate memory on other nodes
 	 */
 	if (!objp)
-		objp = ____cache_alloc_node(cache, flags, numa_mem_id());
-
+		objp = ____cache_alloc_node(cachep, flags, nodeid);
 out:
 	return objp;
 }
 #else
 
 static __always_inline void *
-__do_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
+__do_cache_alloc(struct kmem_cache *cachep, gfp_t flags, int nodeid __maybe_unused)
 {
 	return ____cache_alloc(cachep, flags);
 }
@@ -3292,7 +3285,7 @@ slab_alloc(struct kmem_cache *cachep, struct list_lru *lru, gfp_t flags,
 
 	cache_alloc_debugcheck_before(cachep, flags);
 	local_irq_save(save_flags);
-	objp = __do_cache_alloc(cachep, flags);
+	objp = __do_cache_alloc(cachep, flags, NUMA_NO_NODE);
 	local_irq_restore(save_flags);
 	objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
 	prefetchw(objp);
@@ -3531,7 +3524,8 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
 
 	local_irq_disable();
 	for (i = 0; i < size; i++) {
-		void *objp = kfence_alloc(s, s->object_size, flags) ?: __do_cache_alloc(s, flags);
+		void *objp = kfence_alloc(s, s->object_size, flags) ?:
+			     __do_cache_alloc(s, flags, NUMA_NO_NODE);
 
 		if (unlikely(!objp))
 			goto error;
-- 
2.34.1



  reply	other threads:[~2022-07-12 13:39 UTC|newest]

Thread overview: 49+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-07-12 13:39 [PATCH v3 00/15] common kmalloc v3 Hyeonggon Yoo
2022-07-12 13:39 ` Hyeonggon Yoo [this message]
2022-07-12 14:29   ` [PATCH v3 1/15] mm/slab: move NUMA-related code to __do_cache_alloc() Christoph Lameter
2022-07-13  9:39     ` Hyeonggon Yoo
2022-07-12 13:39 ` [PATCH v3 2/15] mm/slab: cleanup slab_alloc() and slab_alloc_node() Hyeonggon Yoo
2022-07-12 13:39 ` [PATCH v3 03/15] mm/slab_common: remove CONFIG_NUMA ifdefs for common kmalloc functions Hyeonggon Yoo
2022-07-12 13:39 ` [PATCH v3 04/15] mm/slab_common: cleanup kmalloc_track_caller() Hyeonggon Yoo
2022-07-12 13:39 ` [PATCH v3 05/15] mm/sl[au]b: factor out __do_kmalloc_node() Hyeonggon Yoo
2022-07-28 14:45   ` Vlastimil Babka
2022-07-12 13:39 ` [PATCH v3 06/15] mm/slab_common: fold kmalloc_order_trace() into kmalloc_large() Hyeonggon Yoo
2022-07-28 15:23   ` Vlastimil Babka
2022-08-01 13:26     ` Hyeonggon Yoo
2022-08-01 13:36       ` Vlastimil Babka
2022-08-02  2:54         ` Hyeonggon Yoo
2022-07-12 13:39 ` [PATCH v3 07/15] mm/slub: move kmalloc_large_node() to slab_common.c Hyeonggon Yoo
2022-07-12 13:39 ` [PATCH v3 08/15] mm/slab_common: kmalloc_node: pass large requests to page allocator Hyeonggon Yoo
2022-07-28 16:09   ` Vlastimil Babka
2022-08-01 14:37     ` Hyeonggon Yoo
2022-08-01 14:44       ` Vlastimil Babka
2022-08-02  8:59         ` Hyeonggon Yoo
2022-08-02  9:32           ` Vlastimil Babka
2022-07-12 13:39 ` [PATCH v3 09/15] mm/slab_common: cleanup kmalloc_large() Hyeonggon Yoo
2022-07-28 16:13   ` Vlastimil Babka
2022-07-12 13:39 ` [PATCH v3 10/15] mm/slab: kmalloc: pass requests larger than order-1 page to page allocator Hyeonggon Yoo
2022-07-28 16:25   ` Vlastimil Babka
2022-07-12 13:39 ` [PATCH v3 11/15] mm/sl[au]b: introduce common alloc/free functions without tracepoint Hyeonggon Yoo
2022-07-29  9:49   ` Vlastimil Babka
2022-07-12 13:39 ` [PATCH v3 12/15] mm/sl[au]b: generalize kmalloc subsystem Hyeonggon Yoo
2022-07-29 10:25   ` Vlastimil Babka
2022-07-12 13:39 ` [PATCH v3 13/15] mm/slab_common: unify NUMA and UMA version of tracepoints Hyeonggon Yoo
2022-07-29 10:52   ` Vlastimil Babka
2022-07-12 13:39 ` [PATCH 14/16] mm/slab_common: drop kmem_alloc & avoid dereferencing fields when not using Hyeonggon Yoo
2022-07-29 11:23   ` Vlastimil Babka
2022-08-02  9:22     ` Hyeonggon Yoo
2022-07-12 13:39 ` [PATCH 15/16] mm/slab_common: move definition of __ksize() to mm/slab.h Hyeonggon Yoo
2022-07-29 11:47   ` Vlastimil Babka
2022-08-02  9:25     ` Hyeonggon Yoo
2022-07-12 13:39 ` [PATCH 16/16] mm/sl[au]b: check if large object is valid in __ksize() Hyeonggon Yoo
2022-07-12 15:13   ` Christoph Lameter
2022-07-13  9:25     ` Hyeonggon Yoo
2022-07-13 10:07       ` Christoph Lameter
2022-07-13 10:33         ` Marco Elver
2022-07-14  9:15           ` Christoph Lameter
2022-07-14 10:30             ` Marco Elver
2022-07-20 10:05               ` Hyeonggon Yoo
2022-07-29 11:50   ` Vlastimil Babka
2022-07-29 15:08 ` [PATCH v3 00/15] common kmalloc v3 Vlastimil Babka
2022-08-14 10:06   ` Hyeonggon Yoo
2022-08-15 12:59     ` Vlastimil Babka

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220712133946.307181-2-42.hyeyoo@gmail.com \
    --to=42.hyeyoo@gmail.com \
    --cc=akpm@linux-foundation.org \
    --cc=cl@linux.com \
    --cc=iamjoonsoo.kim@lge.com \
    --cc=joe@perches.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=penberg@kernel.org \
    --cc=rientjes@google.com \
    --cc=roman.gushchin@linux.dev \
    --cc=vasily.averin@linux.dev \
    --cc=vbabka@suse.cz \
    --cc=willy@infradead.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox