From: Vlastimil Babka <vbabka@suse.cz>
To: David Rientjes <rientjes@google.com>,
Christoph Lameter <cl@linux.com>,
Pekka Enberg <penberg@kernel.org>,
Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Andrew Morton <akpm@linux-foundation.org>,
Hyeonggon Yoo <42.hyeyoo@gmail.com>,
Roman Gushchin <roman.gushchin@linux.dev>,
linux-mm@kvack.org, linux-kernel@vger.kernel.org,
patches@lists.linux.dev, Andrey Ryabinin <ryabinin.a.a@gmail.com>,
Alexander Potapenko <glider@google.com>,
Andrey Konovalov <andreyknvl@gmail.com>,
Dmitry Vyukov <dvyukov@google.com>,
Vincenzo Frascino <vincenzo.frascino@arm.com>,
Marco Elver <elver@google.com>,
Johannes Weiner <hannes@cmpxchg.org>,
Michal Hocko <mhocko@kernel.org>,
Shakeel Butt <shakeelb@google.com>,
Muchun Song <muchun.song@linux.dev>,
Kees Cook <keescook@chromium.org>,
kasan-dev@googlegroups.com, cgroups@vger.kernel.org,
Vlastimil Babka <vbabka@suse.cz>
Subject: [PATCH 17/20] mm/slab: move kmalloc() functions from slab_common.c to slub.c
Date: Mon, 13 Nov 2023 20:13:58 +0100 [thread overview]
Message-ID: <20231113191340.17482-39-vbabka@suse.cz> (raw)
In-Reply-To: <20231113191340.17482-22-vbabka@suse.cz>
This will eliminate a call between compilation units through
__kmem_cache_alloc_node() and allow better inlining of the allocation
fast path.
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
---
mm/slab.h | 3 --
mm/slab_common.c | 119 --------------------------------------------
mm/slub.c | 126 ++++++++++++++++++++++++++++++++++++++++++++---
3 files changed, 118 insertions(+), 130 deletions(-)
diff --git a/mm/slab.h b/mm/slab.h
index 744384efa7be..eb04c8a5dbd1 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -415,9 +415,6 @@ kmalloc_slab(size_t size, gfp_t flags, unsigned long caller)
return kmalloc_caches[kmalloc_type(flags, caller)][index];
}
-void *__kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags,
- int node, size_t orig_size,
- unsigned long caller);
gfp_t kmalloc_fix_flags(gfp_t flags);
/* Functions provided by the slab allocators */
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 31ade17a7ad9..238293b1dbe1 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -936,50 +936,6 @@ void __init create_kmalloc_caches(slab_flags_t flags)
slab_state = UP;
}
-static void *__kmalloc_large_node(size_t size, gfp_t flags, int node);
-static __always_inline
-void *__do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller)
-{
- struct kmem_cache *s;
- void *ret;
-
- if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
- ret = __kmalloc_large_node(size, flags, node);
- trace_kmalloc(caller, ret, size,
- PAGE_SIZE << get_order(size), flags, node);
- return ret;
- }
-
- if (unlikely(!size))
- return ZERO_SIZE_PTR;
-
- s = kmalloc_slab(size, flags, caller);
-
- ret = __kmem_cache_alloc_node(s, flags, node, size, caller);
- ret = kasan_kmalloc(s, ret, size, flags);
- trace_kmalloc(caller, ret, size, s->size, flags, node);
- return ret;
-}
-
-void *__kmalloc_node(size_t size, gfp_t flags, int node)
-{
- return __do_kmalloc_node(size, flags, node, _RET_IP_);
-}
-EXPORT_SYMBOL(__kmalloc_node);
-
-void *__kmalloc(size_t size, gfp_t flags)
-{
- return __do_kmalloc_node(size, flags, NUMA_NO_NODE, _RET_IP_);
-}
-EXPORT_SYMBOL(__kmalloc);
-
-void *__kmalloc_node_track_caller(size_t size, gfp_t flags,
- int node, unsigned long caller)
-{
- return __do_kmalloc_node(size, flags, node, caller);
-}
-EXPORT_SYMBOL(__kmalloc_node_track_caller);
-
/**
* __ksize -- Report full size of underlying allocation
* @object: pointer to the object
@@ -1016,30 +972,6 @@ size_t __ksize(const void *object)
return slab_ksize(folio_slab(folio)->slab_cache);
}
-void *kmalloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
-{
- void *ret = __kmem_cache_alloc_node(s, gfpflags, NUMA_NO_NODE,
- size, _RET_IP_);
-
- trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags, NUMA_NO_NODE);
-
- ret = kasan_kmalloc(s, ret, size, gfpflags);
- return ret;
-}
-EXPORT_SYMBOL(kmalloc_trace);
-
-void *kmalloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
- int node, size_t size)
-{
- void *ret = __kmem_cache_alloc_node(s, gfpflags, node, size, _RET_IP_);
-
- trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags, node);
-
- ret = kasan_kmalloc(s, ret, size, gfpflags);
- return ret;
-}
-EXPORT_SYMBOL(kmalloc_node_trace);
-
gfp_t kmalloc_fix_flags(gfp_t flags)
{
gfp_t invalid_mask = flags & GFP_SLAB_BUG_MASK;
@@ -1052,57 +984,6 @@ gfp_t kmalloc_fix_flags(gfp_t flags)
return flags;
}
-/*
- * To avoid unnecessary overhead, we pass through large allocation requests
- * directly to the page allocator. We use __GFP_COMP, because we will need to
- * know the allocation order to free the pages properly in kfree.
- */
-
-static void *__kmalloc_large_node(size_t size, gfp_t flags, int node)
-{
- struct page *page;
- void *ptr = NULL;
- unsigned int order = get_order(size);
-
- if (unlikely(flags & GFP_SLAB_BUG_MASK))
- flags = kmalloc_fix_flags(flags);
-
- flags |= __GFP_COMP;
- page = alloc_pages_node(node, flags, order);
- if (page) {
- ptr = page_address(page);
- mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B,
- PAGE_SIZE << order);
- }
-
- ptr = kasan_kmalloc_large(ptr, size, flags);
- /* As ptr might get tagged, call kmemleak hook after KASAN. */
- kmemleak_alloc(ptr, size, 1, flags);
- kmsan_kmalloc_large(ptr, size, flags);
-
- return ptr;
-}
-
-void *kmalloc_large(size_t size, gfp_t flags)
-{
- void *ret = __kmalloc_large_node(size, flags, NUMA_NO_NODE);
-
- trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << get_order(size),
- flags, NUMA_NO_NODE);
- return ret;
-}
-EXPORT_SYMBOL(kmalloc_large);
-
-void *kmalloc_large_node(size_t size, gfp_t flags, int node)
-{
- void *ret = __kmalloc_large_node(size, flags, node);
-
- trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << get_order(size),
- flags, node);
- return ret;
-}
-EXPORT_SYMBOL(kmalloc_large_node);
-
#ifdef CONFIG_SLAB_FREELIST_RANDOM
/* Randomize a generic freelist */
static void freelist_randomize(unsigned int *list,
diff --git a/mm/slub.c b/mm/slub.c
index 52e2a65b1b11..b44243e7cc5e 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3851,14 +3851,6 @@ void *kmem_cache_alloc_lru(struct kmem_cache *s, struct list_lru *lru,
}
EXPORT_SYMBOL(kmem_cache_alloc_lru);
-void *__kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags,
- int node, size_t orig_size,
- unsigned long caller)
-{
- return slab_alloc_node(s, NULL, gfpflags, node,
- caller, orig_size);
-}
-
void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
{
void *ret = slab_alloc_node(s, NULL, gfpflags, node, _RET_IP_, s->object_size);
@@ -3869,6 +3861,124 @@ void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
}
EXPORT_SYMBOL(kmem_cache_alloc_node);
+/*
+ * To avoid unnecessary overhead, we pass through large allocation requests
+ * directly to the page allocator. We use __GFP_COMP, because we will need to
+ * know the allocation order to free the pages properly in kfree.
+ */
+static void *__kmalloc_large_node(size_t size, gfp_t flags, int node)
+{
+ struct page *page;
+ void *ptr = NULL;
+ unsigned int order = get_order(size);
+
+ if (unlikely(flags & GFP_SLAB_BUG_MASK))
+ flags = kmalloc_fix_flags(flags);
+
+ flags |= __GFP_COMP;
+ page = alloc_pages_node(node, flags, order);
+ if (page) {
+ ptr = page_address(page);
+ mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B,
+ PAGE_SIZE << order);
+ }
+
+ ptr = kasan_kmalloc_large(ptr, size, flags);
+ /* As ptr might get tagged, call kmemleak hook after KASAN. */
+ kmemleak_alloc(ptr, size, 1, flags);
+ kmsan_kmalloc_large(ptr, size, flags);
+
+ return ptr;
+}
+
+void *kmalloc_large(size_t size, gfp_t flags)
+{
+ void *ret = __kmalloc_large_node(size, flags, NUMA_NO_NODE);
+
+ trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << get_order(size),
+ flags, NUMA_NO_NODE);
+ return ret;
+}
+EXPORT_SYMBOL(kmalloc_large);
+
+void *kmalloc_large_node(size_t size, gfp_t flags, int node)
+{
+ void *ret = __kmalloc_large_node(size, flags, node);
+
+ trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << get_order(size),
+ flags, node);
+ return ret;
+}
+EXPORT_SYMBOL(kmalloc_large_node);
+
+static __always_inline
+void *__do_kmalloc_node(size_t size, gfp_t flags, int node,
+ unsigned long caller)
+{
+ struct kmem_cache *s;
+ void *ret;
+
+ if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
+ ret = __kmalloc_large_node(size, flags, node);
+ trace_kmalloc(caller, ret, size,
+ PAGE_SIZE << get_order(size), flags, node);
+ return ret;
+ }
+
+ if (unlikely(!size))
+ return ZERO_SIZE_PTR;
+
+ s = kmalloc_slab(size, flags, caller);
+
+ ret = slab_alloc_node(s, NULL, flags, node, caller, size);
+ ret = kasan_kmalloc(s, ret, size, flags);
+ trace_kmalloc(caller, ret, size, s->size, flags, node);
+ return ret;
+}
+
+void *__kmalloc_node(size_t size, gfp_t flags, int node)
+{
+ return __do_kmalloc_node(size, flags, node, _RET_IP_);
+}
+EXPORT_SYMBOL(__kmalloc_node);
+
+void *__kmalloc(size_t size, gfp_t flags)
+{
+ return __do_kmalloc_node(size, flags, NUMA_NO_NODE, _RET_IP_);
+}
+EXPORT_SYMBOL(__kmalloc);
+
+void *__kmalloc_node_track_caller(size_t size, gfp_t flags,
+ int node, unsigned long caller)
+{
+ return __do_kmalloc_node(size, flags, node, caller);
+}
+EXPORT_SYMBOL(__kmalloc_node_track_caller);
+
+void *kmalloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
+{
+ void *ret = slab_alloc_node(s, NULL, gfpflags, NUMA_NO_NODE,
+ _RET_IP_, size);
+
+ trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags, NUMA_NO_NODE);
+
+ ret = kasan_kmalloc(s, ret, size, gfpflags);
+ return ret;
+}
+EXPORT_SYMBOL(kmalloc_trace);
+
+void *kmalloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
+ int node, size_t size)
+{
+ void *ret = slab_alloc_node(s, NULL, gfpflags, node, _RET_IP_, size);
+
+ trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags, node);
+
+ ret = kasan_kmalloc(s, ret, size, gfpflags);
+ return ret;
+}
+EXPORT_SYMBOL(kmalloc_node_trace);
+
static noinline void free_to_partial_list(
struct kmem_cache *s, struct slab *slab,
void *head, void *tail, int bulk_cnt,
--
2.42.1
next prev parent reply other threads:[~2023-11-13 19:15 UTC|newest]
Thread overview: 53+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-11-13 19:13 [PATCH 00/20] remove the SLAB allocator Vlastimil Babka
2023-11-13 19:13 ` [PATCH 01/20] mm/slab: remove CONFIG_SLAB from all Kconfig and Makefile Vlastimil Babka
2023-11-14 4:11 ` Kees Cook
2023-11-14 20:11 ` Vlastimil Babka
2023-11-13 19:13 ` [PATCH 02/20] KASAN: remove code paths guarded by CONFIG_SLAB Vlastimil Babka
2023-11-14 4:13 ` Kees Cook
2023-11-14 12:00 ` Marco Elver
2023-11-13 19:13 ` [PATCH 03/20] KFENCE: cleanup kfence_guarded_alloc() after CONFIG_SLAB removal Vlastimil Babka
2023-11-14 4:14 ` Kees Cook
2023-11-14 7:46 ` Marco Elver
2023-11-13 19:13 ` [PATCH 04/20] mm/memcontrol: remove CONFIG_SLAB #ifdef guards Vlastimil Babka
2023-11-14 4:14 ` Kees Cook
2023-11-14 11:14 ` Michal Hocko
2023-11-13 19:13 ` [PATCH 05/20] cpu/hotplug: remove CPUHP_SLAB_PREPARE hooks Vlastimil Babka
2023-11-14 4:20 ` Kees Cook
2023-11-14 20:12 ` Vlastimil Babka
2023-11-13 19:13 ` [PATCH 06/20] mm/slab: remove CONFIG_SLAB code from slab common code Vlastimil Babka
2023-11-14 4:30 ` Kees Cook
2023-11-14 20:19 ` Vlastimil Babka
2023-11-13 19:13 ` [PATCH 07/20] mm/mempool/dmapool: remove CONFIG_DEBUG_SLAB ifdefs Vlastimil Babka
2023-11-14 4:31 ` Kees Cook
2023-11-13 19:13 ` [PATCH 08/20] mm/slab: remove mm/slab.c and slab_def.h Vlastimil Babka
2023-11-13 19:21 ` Vlastimil Babka
2023-11-14 4:34 ` Kees Cook
2023-11-14 8:06 ` Marco Elver
2023-11-14 20:20 ` Vlastimil Babka
2023-11-13 19:13 ` [PATCH 09/20] mm/slab: move struct kmem_cache_cpu declaration to slub.c Vlastimil Babka
2023-11-14 4:35 ` Kees Cook
2023-11-13 19:13 ` [PATCH 10/20] mm/slab: move the rest of slub_def.h to mm/slab.h Vlastimil Babka
2023-11-14 4:38 ` Kees Cook
2023-11-14 20:21 ` Vlastimil Babka
2023-11-13 19:13 ` [PATCH 11/20] mm/slab: consolidate includes in the internal mm/slab.h Vlastimil Babka
2023-11-14 4:41 ` Kees Cook
2023-11-14 20:24 ` Vlastimil Babka
2023-11-13 19:13 ` [PATCH 12/20] mm/slab: move pre/post-alloc hooks from slab.h to slub.c Vlastimil Babka
2023-11-14 4:42 ` Kees Cook
2023-11-13 19:13 ` [PATCH 13/20] mm/slab: move memcg related functions " Vlastimil Babka
2023-11-14 4:44 ` Kees Cook
2023-11-14 11:15 ` Michal Hocko
2023-11-13 19:13 ` [PATCH 14/20] mm/slab: move struct kmem_cache_node " Vlastimil Babka
2023-11-14 4:44 ` Kees Cook
2023-11-13 19:13 ` [PATCH 15/20] mm/slab: move kfree() from slab_common.c " Vlastimil Babka
2023-11-14 4:45 ` Kees Cook
2023-11-13 19:13 ` [PATCH 16/20] mm/slab: move kmalloc_slab() to mm/slab.h Vlastimil Babka
2023-11-14 4:07 ` Kees Cook
2023-11-13 19:13 ` Vlastimil Babka [this message]
2023-11-14 4:46 ` [PATCH 17/20] mm/slab: move kmalloc() functions from slab_common.c to slub.c Kees Cook
2023-11-13 19:13 ` [PATCH 18/20] mm/slub: remove slab_alloc() and __kmem_cache_alloc_lru() wrappers Vlastimil Babka
2023-11-14 4:50 ` Kees Cook
2023-11-14 20:31 ` Vlastimil Babka
2023-11-13 19:14 ` [PATCH 19/20] mm/slub: optimize alloc fastpath code layout Vlastimil Babka
2023-11-13 19:14 ` [PATCH 20/20] mm/slub: optimize free fast path " Vlastimil Babka
2023-11-14 11:18 ` [PATCH 00/20] remove the SLAB allocator Michal Hocko
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20231113191340.17482-39-vbabka@suse.cz \
--to=vbabka@suse.cz \
--cc=42.hyeyoo@gmail.com \
--cc=akpm@linux-foundation.org \
--cc=andreyknvl@gmail.com \
--cc=cgroups@vger.kernel.org \
--cc=cl@linux.com \
--cc=dvyukov@google.com \
--cc=elver@google.com \
--cc=glider@google.com \
--cc=hannes@cmpxchg.org \
--cc=iamjoonsoo.kim@lge.com \
--cc=kasan-dev@googlegroups.com \
--cc=keescook@chromium.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=mhocko@kernel.org \
--cc=muchun.song@linux.dev \
--cc=patches@lists.linux.dev \
--cc=penberg@kernel.org \
--cc=rientjes@google.com \
--cc=roman.gushchin@linux.dev \
--cc=ryabinin.a.a@gmail.com \
--cc=shakeelb@google.com \
--cc=vincenzo.frascino@arm.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox