From: Kees Cook <kees@kernel.org>
To: Vlastimil Babka <vbabka@suse.cz>
Cc: Kees Cook <kees@kernel.org>,
Suren Baghdasaryan <surenb@google.com>,
Kent Overstreet <kent.overstreet@linux.dev>,
Christoph Lameter <cl@linux.com>,
Pekka Enberg <penberg@kernel.org>,
David Rientjes <rientjes@google.com>,
Joonsoo Kim <iamjoonsoo.kim@lge.com>,
Andrew Morton <akpm@linux-foundation.org>,
Roman Gushchin <roman.gushchin@linux.dev>,
Hyeonggon Yoo <42.hyeyoo@gmail.com>,
linux-mm@kvack.org, "GONG, Ruiqi" <gongruiqi@huaweicloud.com>,
Jann Horn <jannh@google.com>,
Matteo Rizzo <matteorizzo@google.com>,
jvoisin <julien.voisin@dustri.org>,
Xiu Jianfeng <xiujianfeng@huawei.com>,
linux-kernel@vger.kernel.org, linux-hardening@vger.kernel.org
Subject: [PATCH 4/5] alloc_tag: Track fixed vs dynamic sized kmalloc calls
Date: Fri, 9 Aug 2024 00:33:05 -0700 [thread overview]
Message-ID: <20240809073309.2134488-4-kees@kernel.org> (raw)
In-Reply-To: <20240809072532.work.266-kees@kernel.org>
For slab allocations, record whether the call site is using a fixed
size (i.e. compile time constant) or a dynamic size. Report the results
in /proc/allocinfo.
Improvements needed:
- examine realloc routines for needed coverage
Signed-off-by: Kees Cook <kees@kernel.org>
---
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Kent Overstreet <kent.overstreet@linux.dev>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Roman Gushchin <roman.gushchin@linux.dev>
Cc: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Cc: linux-mm@kvack.org
---
include/linux/alloc_tag.h | 30 ++++++++++++++++++++++++++----
include/linux/slab.h | 16 ++++++++--------
lib/alloc_tag.c | 8 ++++++++
mm/Kconfig | 8 ++++++++
4 files changed, 50 insertions(+), 12 deletions(-)
diff --git a/include/linux/alloc_tag.h b/include/linux/alloc_tag.h
index 8c61ccd161ba..f5d8c5849b82 100644
--- a/include/linux/alloc_tag.h
+++ b/include/linux/alloc_tag.h
@@ -20,6 +20,19 @@ struct alloc_tag_counters {
u64 calls;
};
+#ifdef CONFIG_SLAB_PER_SITE
+struct alloc_meta {
+ /* 0 means non-slab, SIZE_MAX means dynamic, and everything else is fixed-size. */
+ size_t sized;
+};
+#define ALLOC_META_INIT(_size) { \
+ .sized = (__builtin_constant_p(_size) ? (_size) : SIZE_MAX), \
+ }
+#else
+struct alloc_meta { };
+#define ALLOC_META_INIT(_size) { }
+#endif
+
/*
* An instance of this structure is created in a special ELF section at every
* allocation callsite. At runtime, the special section is treated as
@@ -27,6 +40,7 @@ struct alloc_tag_counters {
*/
struct alloc_tag {
struct codetag ct;
+ struct alloc_meta meta;
struct alloc_tag_counters __percpu *counters;
} __aligned(8);
@@ -74,19 +88,21 @@ static inline struct alloc_tag *ct_to_alloc_tag(struct codetag *ct)
*/
DECLARE_PER_CPU(struct alloc_tag_counters, _shared_alloc_tag);
-#define DEFINE_ALLOC_TAG(_alloc_tag) \
+#define DEFINE_ALLOC_TAG(_alloc_tag, _meta_init) \
static struct alloc_tag _alloc_tag __used __aligned(8) \
__section("alloc_tags") = { \
.ct = CODE_TAG_INIT, \
+ .meta = _meta_init, \
.counters = &_shared_alloc_tag };
#else /* ARCH_NEEDS_WEAK_PER_CPU */
-#define DEFINE_ALLOC_TAG(_alloc_tag) \
+#define DEFINE_ALLOC_TAG(_alloc_tag, _meta_init) \
static DEFINE_PER_CPU(struct alloc_tag_counters, _alloc_tag_cntr); \
static struct alloc_tag _alloc_tag __used __aligned(8) \
__section("alloc_tags") = { \
.ct = CODE_TAG_INIT, \
+ .meta = _meta_init, \
.counters = &_alloc_tag_cntr };
#endif /* ARCH_NEEDS_WEAK_PER_CPU */
@@ -191,7 +207,7 @@ static inline void alloc_tag_sub(union codetag_ref *ref, size_t bytes)
#else /* CONFIG_MEM_ALLOC_PROFILING */
-#define DEFINE_ALLOC_TAG(_alloc_tag)
+#define DEFINE_ALLOC_TAG(_alloc_tag, _meta_init)
static inline bool mem_alloc_profiling_enabled(void) { return false; }
static inline void alloc_tag_add(union codetag_ref *ref, struct alloc_tag *tag,
size_t bytes) {}
@@ -210,8 +226,14 @@ static inline void alloc_tag_sub(union codetag_ref *ref, size_t bytes) {}
#define alloc_hooks(_do_alloc) \
({ \
- DEFINE_ALLOC_TAG(_alloc_tag); \
+ DEFINE_ALLOC_TAG(_alloc_tag, { }); \
alloc_hooks_tag(&_alloc_tag, _do_alloc); \
})
+#define alloc_sized_hooks(_do_alloc, _size, ...) \
+({ \
+ DEFINE_ALLOC_TAG(_alloc_tag, ALLOC_META_INIT(_size)); \
+ alloc_hooks_tag(&_alloc_tag, _do_alloc(_size, __VA_ARGS__)); \
+})
+
#endif /* _LINUX_ALLOC_TAG_H */
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 86cb61a0102c..314d24c79e05 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -541,7 +541,7 @@ static_assert(PAGE_SHIFT <= 20);
*/
void *kmem_cache_alloc_noprof(struct kmem_cache *cachep,
gfp_t flags) __assume_slab_alignment __malloc;
-#define kmem_cache_alloc(...) alloc_hooks(kmem_cache_alloc_noprof(__VA_ARGS__))
+#define kmem_cache_alloc(...) alloc_hooks(kmem_cache_alloc_noprof(__VA_ARGS__))
void *kmem_cache_alloc_lru_noprof(struct kmem_cache *s, struct list_lru *lru,
gfp_t gfpflags) __assume_slab_alignment __malloc;
@@ -685,7 +685,7 @@ static __always_inline __alloc_size(1) void *kmalloc_noprof(size_t size, gfp_t f
}
return __kmalloc_noprof(size, flags);
}
-#define kmalloc(...) alloc_hooks(kmalloc_noprof(__VA_ARGS__))
+#define kmalloc(size, ...) alloc_sized_hooks(kmalloc_noprof, size, __VA_ARGS__)
#define kmem_buckets_alloc(_b, _size, _flags) \
alloc_hooks(__kmalloc_node_noprof(PASS_BUCKET_PARAMS(_size, _b), _flags, NUMA_NO_NODE))
@@ -708,7 +708,7 @@ static __always_inline __alloc_size(1) void *kmalloc_node_noprof(size_t size, gf
}
return __kmalloc_node_noprof(PASS_BUCKET_PARAMS(size, NULL), flags, node);
}
-#define kmalloc_node(...) alloc_hooks(kmalloc_node_noprof(__VA_ARGS__))
+#define kmalloc_node(size, ...) alloc_sized_hooks(kmalloc_node_noprof, size, __VA_ARGS__)
/**
* kmalloc_array - allocate memory for an array.
@@ -726,7 +726,7 @@ static inline __alloc_size(1, 2) void *kmalloc_array_noprof(size_t n, size_t siz
return kmalloc_noprof(bytes, flags);
return kmalloc_noprof(bytes, flags);
}
-#define kmalloc_array(...) alloc_hooks(kmalloc_array_noprof(__VA_ARGS__))
+#define kmalloc_array(...) alloc_hooks(kmalloc_array_noprof(__VA_ARGS__))
/**
* krealloc_array - reallocate memory for an array.
@@ -761,8 +761,8 @@ void *__kmalloc_node_track_caller_noprof(DECL_BUCKET_PARAMS(size, b), gfp_t flag
unsigned long caller) __alloc_size(1);
#define kmalloc_node_track_caller_noprof(size, flags, node, caller) \
__kmalloc_node_track_caller_noprof(PASS_BUCKET_PARAMS(size, NULL), flags, node, caller)
-#define kmalloc_node_track_caller(...) \
- alloc_hooks(kmalloc_node_track_caller_noprof(__VA_ARGS__, _RET_IP_))
+#define kmalloc_node_track_caller(size, ...) \
+ alloc_sized_hooks(kmalloc_node_track_caller_noprof, size, __VA_ARGS__, _RET_IP_)
/*
* kmalloc_track_caller is a special version of kmalloc that records the
@@ -807,13 +807,13 @@ static inline __alloc_size(1) void *kzalloc_noprof(size_t size, gfp_t flags)
{
return kmalloc_noprof(size, flags | __GFP_ZERO);
}
-#define kzalloc(...) alloc_hooks(kzalloc_noprof(__VA_ARGS__))
+#define kzalloc(size, ...) alloc_sized_hooks(kzalloc_noprof, size, __VA_ARGS__)
#define kzalloc_node(_size, _flags, _node) kmalloc_node(_size, (_flags)|__GFP_ZERO, _node)
void *__kvmalloc_node_noprof(DECL_BUCKET_PARAMS(size, b), gfp_t flags, int node) __alloc_size(1);
#define kvmalloc_node_noprof(size, flags, node) \
__kvmalloc_node_noprof(PASS_BUCKET_PARAMS(size, NULL), flags, node)
-#define kvmalloc_node(...) alloc_hooks(kvmalloc_node_noprof(__VA_ARGS__))
+#define kvmalloc_node(size, ...) alloc_sized_hooks(kvmalloc_node_noprof, size, __VA_ARGS__)
#define kvmalloc(_size, _flags) kvmalloc_node(_size, _flags, NUMA_NO_NODE)
#define kvmalloc_noprof(_size, _flags) kvmalloc_node_noprof(_size, _flags, NUMA_NO_NODE)
diff --git a/lib/alloc_tag.c b/lib/alloc_tag.c
index 81e5f9a70f22..6d2cb72bf269 100644
--- a/lib/alloc_tag.c
+++ b/lib/alloc_tag.c
@@ -78,6 +78,14 @@ static void alloc_tag_to_text(struct seq_buf *out, struct codetag *ct)
seq_buf_printf(out, "%12lli %8llu ", bytes, counter.calls);
codetag_to_text(out, ct);
+#ifdef CONFIG_SLAB_PER_SITE
+ seq_buf_putc(out, ' ');
+ seq_buf_printf(out, "size:%s(%zu) slab:%s",
+ tag->meta.sized == 0 ? "non-slab" :
+ tag->meta.sized == SIZE_MAX ? "dynamic" : "fixed",
+ tag->meta.sized == SIZE_MAX ? 0 : tag->meta.sized,
+ tag->meta.cache ? "ready" : "unused");
+#endif
seq_buf_putc(out, ' ');
seq_buf_putc(out, '\n');
}
diff --git a/mm/Kconfig b/mm/Kconfig
index b72e7d040f78..855c63c3270d 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -296,6 +296,14 @@ config SLAB_BUCKETS
If unsure, say Y.
+config SLAB_PER_SITE
+ bool "Separate slab allocations by call size"
+ depends on !SLUB_TINY
+ default SLAB_FREELIST_HARDENED
+ select SLAB_BUCKETS
+ help
+ Track sizes of kmalloc() call sites.
+
config SLUB_STATS
default n
bool "Enable performance statistics"
--
2.34.1
next prev parent reply other threads:[~2024-08-09 7:33 UTC|newest]
Thread overview: 17+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-08-09 7:33 [RFC][PATCH 0/5] slab: Allocate and use per-call-site caches Kees Cook
2024-08-09 7:33 ` [PATCH 1/5] slab: Introduce kmem_buckets_destroy() Kees Cook
2024-08-09 7:33 ` [PATCH 2/5] codetag: Run module_load hooks for builtin codetags Kees Cook
2024-08-29 15:02 ` Suren Baghdasaryan
2024-09-11 22:17 ` Kees Cook
2024-08-09 7:33 ` [PATCH 3/5] codetag: Introduce codetag_early_walk() Kees Cook
2024-08-29 15:39 ` Suren Baghdasaryan
2024-09-11 22:18 ` Kees Cook
2024-08-09 7:33 ` Kees Cook [this message]
2024-08-29 16:00 ` [PATCH 4/5] alloc_tag: Track fixed vs dynamic sized kmalloc calls Suren Baghdasaryan
2024-09-11 22:23 ` Kees Cook
2024-08-09 7:33 ` [PATCH 5/5] slab: Allocate and use per-call-site caches Kees Cook
2024-08-17 1:30 ` Xiu Jianfeng
2024-08-22 17:47 ` Kees Cook
2024-08-29 17:03 ` Suren Baghdasaryan
2024-09-11 22:30 ` Kees Cook
2024-09-12 15:58 ` Suren Baghdasaryan
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20240809073309.2134488-4-kees@kernel.org \
--to=kees@kernel.org \
--cc=42.hyeyoo@gmail.com \
--cc=akpm@linux-foundation.org \
--cc=cl@linux.com \
--cc=gongruiqi@huaweicloud.com \
--cc=iamjoonsoo.kim@lge.com \
--cc=jannh@google.com \
--cc=julien.voisin@dustri.org \
--cc=kent.overstreet@linux.dev \
--cc=linux-hardening@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=matteorizzo@google.com \
--cc=penberg@kernel.org \
--cc=rientjes@google.com \
--cc=roman.gushchin@linux.dev \
--cc=surenb@google.com \
--cc=vbabka@suse.cz \
--cc=xiujianfeng@huawei.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox