From: Vlastimil Babka <vbabka@suse.cz>
To: David Rientjes <rientjes@google.com>,
Christoph Lameter <cl@linux.com>,
Pekka Enberg <penberg@kernel.org>,
Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Andrew Morton <akpm@linux-foundation.org>,
Hyeonggon Yoo <42.hyeyoo@gmail.com>,
Roman Gushchin <roman.gushchin@linux.dev>,
linux-mm@kvack.org, linux-kernel@vger.kernel.org,
patches@lists.linux.dev, Andrey Ryabinin <ryabinin.a.a@gmail.com>,
Alexander Potapenko <glider@google.com>,
Andrey Konovalov <andreyknvl@gmail.com>,
Dmitry Vyukov <dvyukov@google.com>,
Vincenzo Frascino <vincenzo.frascino@arm.com>,
Marco Elver <elver@google.com>,
Johannes Weiner <hannes@cmpxchg.org>,
Michal Hocko <mhocko@kernel.org>,
Shakeel Butt <shakeelb@google.com>,
Muchun Song <muchun.song@linux.dev>,
Kees Cook <keescook@chromium.org>,
kasan-dev@googlegroups.com, cgroups@vger.kernel.org,
Vlastimil Babka <vbabka@suse.cz>
Subject: [PATCH 15/20] mm/slab: move kfree() from slab_common.c to slub.c
Date: Mon, 13 Nov 2023 20:13:56 +0100 [thread overview]
Message-ID: <20231113191340.17482-37-vbabka@suse.cz> (raw)
In-Reply-To: <20231113191340.17482-22-vbabka@suse.cz>
This should result in better code. Currently kfree() makes a function
call between compilation units to __kmem_cache_free() which does its own
virt_to_slab(), throwing away the struct slab pointer we already had in
kfree(). Now it can be reused. Additionally kfree() can now inline the
whole SLUB freeing fastpath.
Also move over free_large_kmalloc() as the only callsites are now in
slub.c, and make it static.
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
---
mm/slab.h | 4 ----
mm/slab_common.c | 45 ------------------------------------------
mm/slub.c | 51 +++++++++++++++++++++++++++++++++++++++++++-----
3 files changed, 46 insertions(+), 54 deletions(-)
diff --git a/mm/slab.h b/mm/slab.h
index 1b09fd1b4b04..179467e8aacc 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -394,8 +394,6 @@ struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags, unsigned long caller);
void *__kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags,
int node, size_t orig_size,
unsigned long caller);
-void __kmem_cache_free(struct kmem_cache *s, void *x, unsigned long caller);
-
gfp_t kmalloc_fix_flags(gfp_t flags);
/* Functions provided by the slab allocators */
@@ -558,8 +556,6 @@ static inline int memcg_alloc_slab_cgroups(struct slab *slab,
}
#endif /* CONFIG_MEMCG_KMEM */
-void free_large_kmalloc(struct folio *folio, void *object);
-
size_t __ksize(const void *objp);
static inline size_t slab_ksize(const struct kmem_cache *s)
diff --git a/mm/slab_common.c b/mm/slab_common.c
index bbc2e3f061f1..f4f275613d2a 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -963,22 +963,6 @@ void __init create_kmalloc_caches(slab_flags_t flags)
slab_state = UP;
}
-void free_large_kmalloc(struct folio *folio, void *object)
-{
- unsigned int order = folio_order(folio);
-
- if (WARN_ON_ONCE(order == 0))
- pr_warn_once("object pointer: 0x%p\n", object);
-
- kmemleak_free(object);
- kasan_kfree_large(object);
- kmsan_kfree_large(object);
-
- mod_lruvec_page_state(folio_page(folio, 0), NR_SLAB_UNRECLAIMABLE_B,
- -(PAGE_SIZE << order));
- __free_pages(folio_page(folio, 0), order);
-}
-
static void *__kmalloc_large_node(size_t size, gfp_t flags, int node);
static __always_inline
void *__do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller)
@@ -1023,35 +1007,6 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t flags,
}
EXPORT_SYMBOL(__kmalloc_node_track_caller);
-/**
- * kfree - free previously allocated memory
- * @object: pointer returned by kmalloc() or kmem_cache_alloc()
- *
- * If @object is NULL, no operation is performed.
- */
-void kfree(const void *object)
-{
- struct folio *folio;
- struct slab *slab;
- struct kmem_cache *s;
-
- trace_kfree(_RET_IP_, object);
-
- if (unlikely(ZERO_OR_NULL_PTR(object)))
- return;
-
- folio = virt_to_folio(object);
- if (unlikely(!folio_test_slab(folio))) {
- free_large_kmalloc(folio, (void *)object);
- return;
- }
-
- slab = folio_slab(folio);
- s = slab->slab_cache;
- __kmem_cache_free(s, (void *)object, _RET_IP_);
-}
-EXPORT_SYMBOL(kfree);
-
/**
* __ksize -- Report full size of underlying allocation
* @object: pointer to the object
diff --git a/mm/slub.c b/mm/slub.c
index 0dbb966e28a7..52e2a65b1b11 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -4184,11 +4184,6 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
return cachep;
}
-void __kmem_cache_free(struct kmem_cache *s, void *x, unsigned long caller)
-{
- slab_free(s, virt_to_slab(x), x, NULL, &x, 1, caller);
-}
-
void kmem_cache_free(struct kmem_cache *s, void *x)
{
s = cache_from_obj(s, x);
@@ -4199,6 +4194,52 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
}
EXPORT_SYMBOL(kmem_cache_free);
+static void free_large_kmalloc(struct folio *folio, void *object)
+{
+ unsigned int order = folio_order(folio);
+
+ if (WARN_ON_ONCE(order == 0))
+ pr_warn_once("object pointer: 0x%p\n", object);
+
+ kmemleak_free(object);
+ kasan_kfree_large(object);
+ kmsan_kfree_large(object);
+
+ mod_lruvec_page_state(folio_page(folio, 0), NR_SLAB_UNRECLAIMABLE_B,
+ -(PAGE_SIZE << order));
+ __free_pages(folio_page(folio, 0), order);
+}
+
+/**
+ * kfree - free previously allocated memory
+ * @object: pointer returned by kmalloc() or kmem_cache_alloc()
+ *
+ * If @object is NULL, no operation is performed.
+ */
+void kfree(const void *object)
+{
+ struct folio *folio;
+ struct slab *slab;
+ struct kmem_cache *s;
+ void *x = (void *)object;
+
+ trace_kfree(_RET_IP_, object);
+
+ if (unlikely(ZERO_OR_NULL_PTR(object)))
+ return;
+
+ folio = virt_to_folio(object);
+ if (unlikely(!folio_test_slab(folio))) {
+ free_large_kmalloc(folio, (void *)object);
+ return;
+ }
+
+ slab = folio_slab(folio);
+ s = slab->slab_cache;
+ slab_free(s, slab, x, NULL, &x, 1, _RET_IP_);
+}
+EXPORT_SYMBOL(kfree);
+
struct detached_freelist {
struct slab *slab;
void *tail;
--
2.42.1
next prev parent reply other threads:[~2023-11-13 19:14 UTC|newest]
Thread overview: 53+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-11-13 19:13 [PATCH 00/20] remove the SLAB allocator Vlastimil Babka
2023-11-13 19:13 ` [PATCH 01/20] mm/slab: remove CONFIG_SLAB from all Kconfig and Makefile Vlastimil Babka
2023-11-14 4:11 ` Kees Cook
2023-11-14 20:11 ` Vlastimil Babka
2023-11-13 19:13 ` [PATCH 02/20] KASAN: remove code paths guarded by CONFIG_SLAB Vlastimil Babka
2023-11-14 4:13 ` Kees Cook
2023-11-14 12:00 ` Marco Elver
2023-11-13 19:13 ` [PATCH 03/20] KFENCE: cleanup kfence_guarded_alloc() after CONFIG_SLAB removal Vlastimil Babka
2023-11-14 4:14 ` Kees Cook
2023-11-14 7:46 ` Marco Elver
2023-11-13 19:13 ` [PATCH 04/20] mm/memcontrol: remove CONFIG_SLAB #ifdef guards Vlastimil Babka
2023-11-14 4:14 ` Kees Cook
2023-11-14 11:14 ` Michal Hocko
2023-11-13 19:13 ` [PATCH 05/20] cpu/hotplug: remove CPUHP_SLAB_PREPARE hooks Vlastimil Babka
2023-11-14 4:20 ` Kees Cook
2023-11-14 20:12 ` Vlastimil Babka
2023-11-13 19:13 ` [PATCH 06/20] mm/slab: remove CONFIG_SLAB code from slab common code Vlastimil Babka
2023-11-14 4:30 ` Kees Cook
2023-11-14 20:19 ` Vlastimil Babka
2023-11-13 19:13 ` [PATCH 07/20] mm/mempool/dmapool: remove CONFIG_DEBUG_SLAB ifdefs Vlastimil Babka
2023-11-14 4:31 ` Kees Cook
2023-11-13 19:13 ` [PATCH 08/20] mm/slab: remove mm/slab.c and slab_def.h Vlastimil Babka
2023-11-13 19:21 ` Vlastimil Babka
2023-11-14 4:34 ` Kees Cook
2023-11-14 8:06 ` Marco Elver
2023-11-14 20:20 ` Vlastimil Babka
2023-11-13 19:13 ` [PATCH 09/20] mm/slab: move struct kmem_cache_cpu declaration to slub.c Vlastimil Babka
2023-11-14 4:35 ` Kees Cook
2023-11-13 19:13 ` [PATCH 10/20] mm/slab: move the rest of slub_def.h to mm/slab.h Vlastimil Babka
2023-11-14 4:38 ` Kees Cook
2023-11-14 20:21 ` Vlastimil Babka
2023-11-13 19:13 ` [PATCH 11/20] mm/slab: consolidate includes in the internal mm/slab.h Vlastimil Babka
2023-11-14 4:41 ` Kees Cook
2023-11-14 20:24 ` Vlastimil Babka
2023-11-13 19:13 ` [PATCH 12/20] mm/slab: move pre/post-alloc hooks from slab.h to slub.c Vlastimil Babka
2023-11-14 4:42 ` Kees Cook
2023-11-13 19:13 ` [PATCH 13/20] mm/slab: move memcg related functions " Vlastimil Babka
2023-11-14 4:44 ` Kees Cook
2023-11-14 11:15 ` Michal Hocko
2023-11-13 19:13 ` [PATCH 14/20] mm/slab: move struct kmem_cache_node " Vlastimil Babka
2023-11-14 4:44 ` Kees Cook
2023-11-13 19:13 ` Vlastimil Babka [this message]
2023-11-14 4:45 ` [PATCH 15/20] mm/slab: move kfree() from slab_common.c " Kees Cook
2023-11-13 19:13 ` [PATCH 16/20] mm/slab: move kmalloc_slab() to mm/slab.h Vlastimil Babka
2023-11-14 4:07 ` Kees Cook
2023-11-13 19:13 ` [PATCH 17/20] mm/slab: move kmalloc() functions from slab_common.c to slub.c Vlastimil Babka
2023-11-14 4:46 ` Kees Cook
2023-11-13 19:13 ` [PATCH 18/20] mm/slub: remove slab_alloc() and __kmem_cache_alloc_lru() wrappers Vlastimil Babka
2023-11-14 4:50 ` Kees Cook
2023-11-14 20:31 ` Vlastimil Babka
2023-11-13 19:14 ` [PATCH 19/20] mm/slub: optimize alloc fastpath code layout Vlastimil Babka
2023-11-13 19:14 ` [PATCH 20/20] mm/slub: optimize free fast path " Vlastimil Babka
2023-11-14 11:18 ` [PATCH 00/20] remove the SLAB allocator Michal Hocko
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20231113191340.17482-37-vbabka@suse.cz \
--to=vbabka@suse.cz \
--cc=42.hyeyoo@gmail.com \
--cc=akpm@linux-foundation.org \
--cc=andreyknvl@gmail.com \
--cc=cgroups@vger.kernel.org \
--cc=cl@linux.com \
--cc=dvyukov@google.com \
--cc=elver@google.com \
--cc=glider@google.com \
--cc=hannes@cmpxchg.org \
--cc=iamjoonsoo.kim@lge.com \
--cc=kasan-dev@googlegroups.com \
--cc=keescook@chromium.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=mhocko@kernel.org \
--cc=muchun.song@linux.dev \
--cc=patches@lists.linux.dev \
--cc=penberg@kernel.org \
--cc=rientjes@google.com \
--cc=roman.gushchin@linux.dev \
--cc=ryabinin.a.a@gmail.com \
--cc=shakeelb@google.com \
--cc=vincenzo.frascino@arm.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox