From: Vlastimil Babka <vbabka@suse.cz>
To: Hyeonggon Yoo <42.hyeyoo@gmail.com>,
Christoph Lameter <cl@linux.com>,
Pekka Enberg <penberg@kernel.org>,
David Rientjes <rientjes@google.com>,
Joonsoo Kim <iamjoonsoo.kim@lge.com>,
Andrew Morton <akpm@linux-foundation.org>,
Roman Gushchin <roman.gushchin@linux.dev>,
Joe Perches <joe@perches.com>,
Vasily Averin <vasily.averin@linux.dev>,
Matthew WilCox <willy@infradead.org>
Cc: linux-kernel@vger.kernel.org, linux-mm@kvack.org
Subject: Re: [PATCH v3 05/15] mm/sl[au]b: factor out __do_kmalloc_node()
Date: Thu, 28 Jul 2022 16:45:03 +0200 [thread overview]
Message-ID: <4fcd01a6-4a52-3389-3cd5-be8a17448892@suse.cz> (raw)
In-Reply-To: <20220712133946.307181-6-42.hyeyoo@gmail.com>
On 7/12/22 15:39, Hyeonggon Yoo wrote:
> __kmalloc(), __kmalloc_node(), __kmalloc_node_track_caller()
> mostly do same job. Factor out common code into __do_kmalloc_node().
>
> Note that this patch also fixes missing kasan_kmalloc() in SLUB's
> __kmalloc_node_track_caller().
>
> Signed-off-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
> ---
> mm/slab.c | 30 +----------------------
> mm/slub.c | 71 +++++++++++++++----------------------------------------
> 2 files changed, 20 insertions(+), 81 deletions(-)
>
> diff --git a/mm/slab.c b/mm/slab.c
> index da2f6a5dd8fa..ab34727d61b2 100644
> --- a/mm/slab.c
> +++ b/mm/slab.c
> @@ -3631,37 +3631,9 @@ void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
> }
> #endif
>
> -/**
> - * __do_kmalloc - allocate memory
> - * @size: how many bytes of memory are required.
> - * @flags: the type of memory to allocate (see kmalloc).
> - * @caller: function caller for debug tracking of the caller
> - *
> - * Return: pointer to the allocated memory or %NULL in case of error
> - */
> -static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
> - unsigned long caller)
> -{
> - struct kmem_cache *cachep;
> - void *ret;
> -
> - if (unlikely(size > KMALLOC_MAX_CACHE_SIZE))
> - return NULL;
> - cachep = kmalloc_slab(size, flags);
> - if (unlikely(ZERO_OR_NULL_PTR(cachep)))
> - return cachep;
> - ret = slab_alloc(cachep, NULL, flags, size, caller);
> -
> - ret = kasan_kmalloc(cachep, ret, size, flags);
> - trace_kmalloc(caller, ret, cachep,
> - size, cachep->size, flags);
> -
> - return ret;
> -}
> -
> void *__kmalloc(size_t size, gfp_t flags)
> {
> - return __do_kmalloc(size, flags, _RET_IP_);
> + return __do_kmalloc_node(size, flags, NUMA_NO_NODE, _RET_IP_);
> }
> EXPORT_SYMBOL(__kmalloc);
>
> diff --git a/mm/slub.c b/mm/slub.c
> index 7c284535a62b..2ccc473e0ae7 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -4402,29 +4402,6 @@ static int __init setup_slub_min_objects(char *str)
>
> __setup("slub_min_objects=", setup_slub_min_objects);
>
> -void *__kmalloc(size_t size, gfp_t flags)
> -{
> - struct kmem_cache *s;
> - void *ret;
> -
> - if (unlikely(size > KMALLOC_MAX_CACHE_SIZE))
> - return kmalloc_large(size, flags);
> -
> - s = kmalloc_slab(size, flags);
> -
> - if (unlikely(ZERO_OR_NULL_PTR(s)))
> - return s;
> -
> - ret = slab_alloc(s, NULL, flags, _RET_IP_, size);
> -
> - trace_kmalloc(_RET_IP_, ret, s, size, s->size, flags);
> -
> - ret = kasan_kmalloc(s, ret, size, flags);
> -
> - return ret;
> -}
> -EXPORT_SYMBOL(__kmalloc);
> -
> static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
> {
> struct page *page;
> @@ -4442,7 +4419,8 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
> return kmalloc_large_node_hook(ptr, size, flags);
> }
>
> -void *__kmalloc_node(size_t size, gfp_t flags, int node)
> +static __always_inline
> +void *__do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller)
> {
> struct kmem_cache *s;
> void *ret;
> @@ -4450,7 +4428,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
> if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
> ret = kmalloc_large_node(size, flags, node);
>
> - trace_kmalloc_node(_RET_IP_, ret, NULL,
> + trace_kmalloc_node(caller, ret, NULL,
> size, PAGE_SIZE << get_order(size),
> flags, node);
>
> @@ -4462,16 +4440,28 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
> if (unlikely(ZERO_OR_NULL_PTR(s)))
> return s;
>
> - ret = slab_alloc_node(s, NULL, flags, node, _RET_IP_, size);
> + ret = slab_alloc_node(s, NULL, flags, node, caller, size);
>
> - trace_kmalloc_node(_RET_IP_, ret, s, size, s->size, flags, node);
> + trace_kmalloc_node(caller, ret, s, size, s->size, flags, node);
>
> ret = kasan_kmalloc(s, ret, size, flags);
>
> return ret;
> }
> +
> +void *__kmalloc_node(size_t size, gfp_t flags, int node)
> +{
> + return __do_kmalloc_node(size, flags, node, _RET_IP_);
> +}
> EXPORT_SYMBOL(__kmalloc_node);
>
> +void *__kmalloc(size_t size, gfp_t flags)
> +{
> + return __do_kmalloc_node(size, flags, NUMA_NO_NODE, _RET_IP_);
> +}
> +EXPORT_SYMBOL(__kmalloc);
> +
> +
> #ifdef CONFIG_HARDENED_USERCOPY
> /*
> * Rejects incorrectly sized objects and objects that are to be copied
> @@ -4905,32 +4895,9 @@ int __kmem_cache_create(struct kmem_cache *s, slab_flags_t flags)
> }
>
> void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
> - int node, unsigned long caller)
> + int node, unsigned long caller)
> {
> - struct kmem_cache *s;
> - void *ret;
> -
> - if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
> - ret = kmalloc_large_node(size, gfpflags, node);
> -
> - trace_kmalloc_node(caller, ret, NULL,
> - size, PAGE_SIZE << get_order(size),
> - gfpflags, node);
> -
> - return ret;
> - }
> -
> - s = kmalloc_slab(size, gfpflags);
> -
> - if (unlikely(ZERO_OR_NULL_PTR(s)))
> - return s;
> -
> - ret = slab_alloc_node(s, NULL, gfpflags, node, caller, size);
> -
> - /* Honor the call site pointer we received. */
> - trace_kmalloc_node(caller, ret, s, size, s->size, gfpflags, node);
> -
> - return ret;
> + return __do_kmalloc_node(size, gfpflags, node, caller);
> }
> EXPORT_SYMBOL(__kmalloc_node_track_caller);
>
next prev parent reply other threads:[~2022-07-28 14:45 UTC|newest]
Thread overview: 49+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-07-12 13:39 [PATCH v3 00/15] common kmalloc v3 Hyeonggon Yoo
2022-07-12 13:39 ` [PATCH v3 1/15] mm/slab: move NUMA-related code to __do_cache_alloc() Hyeonggon Yoo
2022-07-12 14:29 ` Christoph Lameter
2022-07-13 9:39 ` Hyeonggon Yoo
2022-07-12 13:39 ` [PATCH v3 2/15] mm/slab: cleanup slab_alloc() and slab_alloc_node() Hyeonggon Yoo
2022-07-12 13:39 ` [PATCH v3 03/15] mm/slab_common: remove CONFIG_NUMA ifdefs for common kmalloc functions Hyeonggon Yoo
2022-07-12 13:39 ` [PATCH v3 04/15] mm/slab_common: cleanup kmalloc_track_caller() Hyeonggon Yoo
2022-07-12 13:39 ` [PATCH v3 05/15] mm/sl[au]b: factor out __do_kmalloc_node() Hyeonggon Yoo
2022-07-28 14:45 ` Vlastimil Babka [this message]
2022-07-12 13:39 ` [PATCH v3 06/15] mm/slab_common: fold kmalloc_order_trace() into kmalloc_large() Hyeonggon Yoo
2022-07-28 15:23 ` Vlastimil Babka
2022-08-01 13:26 ` Hyeonggon Yoo
2022-08-01 13:36 ` Vlastimil Babka
2022-08-02 2:54 ` Hyeonggon Yoo
2022-07-12 13:39 ` [PATCH v3 07/15] mm/slub: move kmalloc_large_node() to slab_common.c Hyeonggon Yoo
2022-07-12 13:39 ` [PATCH v3 08/15] mm/slab_common: kmalloc_node: pass large requests to page allocator Hyeonggon Yoo
2022-07-28 16:09 ` Vlastimil Babka
2022-08-01 14:37 ` Hyeonggon Yoo
2022-08-01 14:44 ` Vlastimil Babka
2022-08-02 8:59 ` Hyeonggon Yoo
2022-08-02 9:32 ` Vlastimil Babka
2022-07-12 13:39 ` [PATCH v3 09/15] mm/slab_common: cleanup kmalloc_large() Hyeonggon Yoo
2022-07-28 16:13 ` Vlastimil Babka
2022-07-12 13:39 ` [PATCH v3 10/15] mm/slab: kmalloc: pass requests larger than order-1 page to page allocator Hyeonggon Yoo
2022-07-28 16:25 ` Vlastimil Babka
2022-07-12 13:39 ` [PATCH v3 11/15] mm/sl[au]b: introduce common alloc/free functions without tracepoint Hyeonggon Yoo
2022-07-29 9:49 ` Vlastimil Babka
2022-07-12 13:39 ` [PATCH v3 12/15] mm/sl[au]b: generalize kmalloc subsystem Hyeonggon Yoo
2022-07-29 10:25 ` Vlastimil Babka
2022-07-12 13:39 ` [PATCH v3 13/15] mm/slab_common: unify NUMA and UMA version of tracepoints Hyeonggon Yoo
2022-07-29 10:52 ` Vlastimil Babka
2022-07-12 13:39 ` [PATCH 14/16] mm/slab_common: drop kmem_alloc & avoid dereferencing fields when not using Hyeonggon Yoo
2022-07-29 11:23 ` Vlastimil Babka
2022-08-02 9:22 ` Hyeonggon Yoo
2022-07-12 13:39 ` [PATCH 15/16] mm/slab_common: move definition of __ksize() to mm/slab.h Hyeonggon Yoo
2022-07-29 11:47 ` Vlastimil Babka
2022-08-02 9:25 ` Hyeonggon Yoo
2022-07-12 13:39 ` [PATCH 16/16] mm/sl[au]b: check if large object is valid in __ksize() Hyeonggon Yoo
2022-07-12 15:13 ` Christoph Lameter
2022-07-13 9:25 ` Hyeonggon Yoo
2022-07-13 10:07 ` Christoph Lameter
2022-07-13 10:33 ` Marco Elver
2022-07-14 9:15 ` Christoph Lameter
2022-07-14 10:30 ` Marco Elver
2022-07-20 10:05 ` Hyeonggon Yoo
2022-07-29 11:50 ` Vlastimil Babka
2022-07-29 15:08 ` [PATCH v3 00/15] common kmalloc v3 Vlastimil Babka
2022-08-14 10:06 ` Hyeonggon Yoo
2022-08-15 12:59 ` Vlastimil Babka
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=4fcd01a6-4a52-3389-3cd5-be8a17448892@suse.cz \
--to=vbabka@suse.cz \
--cc=42.hyeyoo@gmail.com \
--cc=akpm@linux-foundation.org \
--cc=cl@linux.com \
--cc=iamjoonsoo.kim@lge.com \
--cc=joe@perches.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=penberg@kernel.org \
--cc=rientjes@google.com \
--cc=roman.gushchin@linux.dev \
--cc=vasily.averin@linux.dev \
--cc=willy@infradead.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox