* [PATCH 1/1] alloc_tag: allocate percpu counters for module tags dynamically
@ 2025-05-17 0:07 Suren Baghdasaryan
2025-05-17 6:09 ` David Wang
` (2 more replies)
0 siblings, 3 replies; 12+ messages in thread
From: Suren Baghdasaryan @ 2025-05-17 0:07 UTC (permalink / raw)
To: akpm
Cc: kent.overstreet, 00107082, dennis, tj, cl, pasha.tatashin,
linux-mm, linux-kernel, surenb
When a module gets unloaded it checks whether any of its tags are still
in use and if so, we keep the memory containing module's allocation tags
alive until all tags are unused. However percpu counters referenced by
the tags are freed by free_module(). This will lead to UAF if the memory
allocated by a module is accessed after module was unloaded. To fix this
we allocate percpu counters for module allocation tags dynamically and
we keep it alive for tags which are still in use after module unloading.
This also removes the requirement of a larger PERCPU_MODULE_RESERVE when
memory allocation profiling is enabled because percpu memory for counters
does not need to be reserved anymore.
Fixes: 0db6f8d7820a ("alloc_tag: load module tags into separate contiguous memory")
Reported-by: David Wang <00107082@163.com>
Closes: https://lore.kernel.org/all/20250516131246.6244-1-00107082@163.com/
Signed-off-by: Suren Baghdasaryan <surenb@google.com>
---
include/linux/alloc_tag.h | 12 ++++++
include/linux/codetag.h | 8 ++--
include/linux/percpu.h | 4 --
lib/alloc_tag.c | 87 +++++++++++++++++++++++++++++++--------
lib/codetag.c | 5 ++-
5 files changed, 88 insertions(+), 28 deletions(-)
diff --git a/include/linux/alloc_tag.h b/include/linux/alloc_tag.h
index a946e0203e6d..8f7931eb7d16 100644
--- a/include/linux/alloc_tag.h
+++ b/include/linux/alloc_tag.h
@@ -104,6 +104,16 @@ DECLARE_PER_CPU(struct alloc_tag_counters, _shared_alloc_tag);
#else /* ARCH_NEEDS_WEAK_PER_CPU */
+#ifdef MODULE
+
+#define DEFINE_ALLOC_TAG(_alloc_tag) \
+ static struct alloc_tag _alloc_tag __used __aligned(8) \
+ __section(ALLOC_TAG_SECTION_NAME) = { \
+ .ct = CODE_TAG_INIT, \
+ .counters = NULL };
+
+#else /* MODULE */
+
#define DEFINE_ALLOC_TAG(_alloc_tag) \
static DEFINE_PER_CPU(struct alloc_tag_counters, _alloc_tag_cntr); \
static struct alloc_tag _alloc_tag __used __aligned(8) \
@@ -111,6 +121,8 @@ DECLARE_PER_CPU(struct alloc_tag_counters, _shared_alloc_tag);
.ct = CODE_TAG_INIT, \
.counters = &_alloc_tag_cntr };
+#endif /* MODULE */
+
#endif /* ARCH_NEEDS_WEAK_PER_CPU */
DECLARE_STATIC_KEY_MAYBE(CONFIG_MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT,
diff --git a/include/linux/codetag.h b/include/linux/codetag.h
index d14dbd26b370..0ee4c21c6dbc 100644
--- a/include/linux/codetag.h
+++ b/include/linux/codetag.h
@@ -36,10 +36,10 @@ union codetag_ref {
struct codetag_type_desc {
const char *section;
size_t tag_size;
- void (*module_load)(struct codetag_type *cttype,
- struct codetag_module *cmod);
- void (*module_unload)(struct codetag_type *cttype,
- struct codetag_module *cmod);
+ void (*module_load)(struct module *mod,
+ struct codetag *start, struct codetag *end);
+ void (*module_unload)(struct module *mod,
+ struct codetag *start, struct codetag *end);
#ifdef CONFIG_MODULES
void (*module_replaced)(struct module *mod, struct module *new_mod);
bool (*needs_section_mem)(struct module *mod, unsigned long size);
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index 52b5ea663b9f..85bf8dd9f087 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -15,11 +15,7 @@
/* enough to cover all DEFINE_PER_CPUs in modules */
#ifdef CONFIG_MODULES
-#ifdef CONFIG_MEM_ALLOC_PROFILING
-#define PERCPU_MODULE_RESERVE (8 << 13)
-#else
#define PERCPU_MODULE_RESERVE (8 << 10)
-#endif
#else
#define PERCPU_MODULE_RESERVE 0
#endif
diff --git a/lib/alloc_tag.c b/lib/alloc_tag.c
index 25ecc1334b67..c7f602fa7b23 100644
--- a/lib/alloc_tag.c
+++ b/lib/alloc_tag.c
@@ -350,18 +350,28 @@ static bool needs_section_mem(struct module *mod, unsigned long size)
return size >= sizeof(struct alloc_tag);
}
-static struct alloc_tag *find_used_tag(struct alloc_tag *from, struct alloc_tag *to)
+static bool clean_unused_counters(struct alloc_tag *start_tag,
+ struct alloc_tag *end_tag)
{
- while (from <= to) {
+ struct alloc_tag *tag;
+ bool ret = true;
+
+ for (tag = start_tag; tag <= end_tag; tag++) {
struct alloc_tag_counters counter;
- counter = alloc_tag_read(from);
- if (counter.bytes)
- return from;
- from++;
+ if (!tag->counters)
+ continue;
+
+ counter = alloc_tag_read(tag);
+ if (!counter.bytes) {
+ free_percpu(tag->counters);
+ tag->counters = NULL;
+ } else {
+ ret = false;
+ }
}
- return NULL;
+ return ret;
}
/* Called with mod_area_mt locked */
@@ -371,12 +381,16 @@ static void clean_unused_module_areas_locked(void)
struct module *val;
mas_for_each(&mas, val, module_tags.size) {
+ struct alloc_tag *start_tag;
+ struct alloc_tag *end_tag;
+
if (val != &unloaded_mod)
continue;
/* Release area if all tags are unused */
- if (!find_used_tag((struct alloc_tag *)(module_tags.start_addr + mas.index),
- (struct alloc_tag *)(module_tags.start_addr + mas.last)))
+ start_tag = (struct alloc_tag *)(module_tags.start_addr + mas.index);
+ end_tag = (struct alloc_tag *)(module_tags.start_addr + mas.last);
+ if (clean_unused_counters(start_tag, end_tag))
mas_erase(&mas);
}
}
@@ -561,7 +575,8 @@ static void *reserve_module_tags(struct module *mod, unsigned long size,
static void release_module_tags(struct module *mod, bool used)
{
MA_STATE(mas, &mod_area_mt, module_tags.size, module_tags.size);
- struct alloc_tag *tag;
+ struct alloc_tag *start_tag;
+ struct alloc_tag *end_tag;
struct module *val;
mas_lock(&mas);
@@ -575,15 +590,22 @@ static void release_module_tags(struct module *mod, bool used)
if (!used)
goto release_area;
- /* Find out if the area is used */
- tag = find_used_tag((struct alloc_tag *)(module_tags.start_addr + mas.index),
- (struct alloc_tag *)(module_tags.start_addr + mas.last));
- if (tag) {
- struct alloc_tag_counters counter = alloc_tag_read(tag);
+ start_tag = (struct alloc_tag *)(module_tags.start_addr + mas.index);
+ end_tag = (struct alloc_tag *)(module_tags.start_addr + mas.last);
+ if (!clean_unused_counters(start_tag, end_tag)) {
+ struct alloc_tag *tag;
+
+ for (tag = start_tag; tag <= end_tag; tag++) {
+ struct alloc_tag_counters counter;
+
+ if (!tag->counters)
+ continue;
- pr_info("%s:%u module %s func:%s has %llu allocated at module unload\n",
- tag->ct.filename, tag->ct.lineno, tag->ct.modname,
- tag->ct.function, counter.bytes);
+ counter = alloc_tag_read(tag);
+ pr_info("%s:%u module %s func:%s has %llu allocated at module unload\n",
+ tag->ct.filename, tag->ct.lineno, tag->ct.modname,
+ tag->ct.function, counter.bytes);
+ }
} else {
used = false;
}
@@ -596,6 +618,34 @@ static void release_module_tags(struct module *mod, bool used)
mas_unlock(&mas);
}
+static void load_module(struct module *mod, struct codetag *start, struct codetag *stop)
+{
+ /* Allocate module alloc_tag percpu counters */
+ struct alloc_tag *start_tag;
+ struct alloc_tag *stop_tag;
+ struct alloc_tag *tag;
+
+ if (!mod)
+ return;
+
+ start_tag = ct_to_alloc_tag(start);
+ stop_tag = ct_to_alloc_tag(stop);
+ for (tag = start_tag; tag < stop_tag; tag++) {
+ WARN_ON(tag->counters);
+ tag->counters = alloc_percpu(struct alloc_tag_counters);
+ if (!tag->counters) {
+ while (--tag >= start_tag) {
+ free_percpu(tag->counters);
+ tag->counters = NULL;
+ }
+ shutdown_mem_profiling(true);
+ pr_err("Failed to allocate memory for allocation tag percpu counters in the module %s. Memory allocation profiling is disabled!\n",
+ mod->name);
+ break;
+ }
+ }
+}
+
static void replace_module(struct module *mod, struct module *new_mod)
{
MA_STATE(mas, &mod_area_mt, 0, module_tags.size);
@@ -757,6 +807,7 @@ static int __init alloc_tag_init(void)
.needs_section_mem = needs_section_mem,
.alloc_section_mem = reserve_module_tags,
.free_section_mem = release_module_tags,
+ .module_load = load_module,
.module_replaced = replace_module,
#endif
};
diff --git a/lib/codetag.c b/lib/codetag.c
index 42aadd6c1454..de332e98d6f5 100644
--- a/lib/codetag.c
+++ b/lib/codetag.c
@@ -194,7 +194,7 @@ static int codetag_module_init(struct codetag_type *cttype, struct module *mod)
if (err >= 0) {
cttype->count += range_size(cttype, &range);
if (cttype->desc.module_load)
- cttype->desc.module_load(cttype, cmod);
+ cttype->desc.module_load(mod, range.start, range.stop);
}
up_write(&cttype->mod_lock);
@@ -333,7 +333,8 @@ void codetag_unload_module(struct module *mod)
}
if (found) {
if (cttype->desc.module_unload)
- cttype->desc.module_unload(cttype, cmod);
+ cttype->desc.module_unload(cmod->mod,
+ cmod->range.start, cmod->range.stop);
cttype->count -= range_size(cttype, &cmod->range);
idr_remove(&cttype->mod_idr, mod_id);
base-commit: 6e68a205c07b3c311f19a4c9c5de95d191d5a459
--
2.49.0.1101.gccaa498523-goog
^ permalink raw reply [flat|nested] 12+ messages in thread* Re:[PATCH 1/1] alloc_tag: allocate percpu counters for module tags dynamically
2025-05-17 0:07 [PATCH 1/1] alloc_tag: allocate percpu counters for module tags dynamically Suren Baghdasaryan
@ 2025-05-17 6:09 ` David Wang
2025-05-19 22:51 ` [PATCH " Andrew Morton
2025-05-20 23:16 ` comments on patch "alloc_tag: allocate percpu counters for module tags dynamically" Casey Chen
2 siblings, 0 replies; 12+ messages in thread
From: David Wang @ 2025-05-17 6:09 UTC (permalink / raw)
To: Suren Baghdasaryan
Cc: akpm, kent.overstreet, dennis, tj, cl, pasha.tatashin, linux-mm,
linux-kernel
At 2025-05-17 08:07:39, "Suren Baghdasaryan" <surenb@google.com> wrote:
>When a module gets unloaded it checks whether any of its tags are still
>in use and if so, we keep the memory containing module's allocation tags
>alive until all tags are unused. However percpu counters referenced by
>the tags are freed by free_module(). This will lead to UAF if the memory
>allocated by a module is accessed after module was unloaded. To fix this
>we allocate percpu counters for module allocation tags dynamically and
>we keep it alive for tags which are still in use after module unloading.
>This also removes the requirement of a larger PERCPU_MODULE_RESERVE when
>memory allocation profiling is enabled because percpu memory for counters
>does not need to be reserved anymore.
>
>Fixes: 0db6f8d7820a ("alloc_tag: load module tags into separate contiguous memory")
>Reported-by: David Wang <00107082@163.com>
>Closes: https://lore.kernel.org/all/20250516131246.6244-1-00107082@163.com/
>Signed-off-by: Suren Baghdasaryan <surenb@google.com>
>---
> include/linux/alloc_tag.h | 12 ++++++
> include/linux/codetag.h | 8 ++--
> include/linux/percpu.h | 4 --
> lib/alloc_tag.c | 87 +++++++++++++++++++++++++++++++--------
> lib/codetag.c | 5 ++-
> 5 files changed, 88 insertions(+), 28 deletions(-)
>
>diff --git a/include/linux/alloc_tag.h b/include/linux/alloc_tag.h
>index a946e0203e6d..8f7931eb7d16 100644
>--- a/include/linux/alloc_tag.h
>+++ b/include/linux/alloc_tag.h
>@@ -104,6 +104,16 @@ DECLARE_PER_CPU(struct alloc_tag_counters, _shared_alloc_tag);
>
> #else /* ARCH_NEEDS_WEAK_PER_CPU */
>
>+#ifdef MODULE
>+
>+#define DEFINE_ALLOC_TAG(_alloc_tag) \
>+ static struct alloc_tag _alloc_tag __used __aligned(8) \
>+ __section(ALLOC_TAG_SECTION_NAME) = { \
>+ .ct = CODE_TAG_INIT, \
>+ .counters = NULL };
>+
>+#else /* MODULE */
>+
> #define DEFINE_ALLOC_TAG(_alloc_tag) \
> static DEFINE_PER_CPU(struct alloc_tag_counters, _alloc_tag_cntr); \
> static struct alloc_tag _alloc_tag __used __aligned(8) \
>@@ -111,6 +121,8 @@ DECLARE_PER_CPU(struct alloc_tag_counters, _shared_alloc_tag);
> .ct = CODE_TAG_INIT, \
> .counters = &_alloc_tag_cntr };
>
>+#endif /* MODULE */
>+
> #endif /* ARCH_NEEDS_WEAK_PER_CPU */
>
> DECLARE_STATIC_KEY_MAYBE(CONFIG_MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT,
>diff --git a/include/linux/codetag.h b/include/linux/codetag.h
>index d14dbd26b370..0ee4c21c6dbc 100644
>--- a/include/linux/codetag.h
>+++ b/include/linux/codetag.h
>@@ -36,10 +36,10 @@ union codetag_ref {
> struct codetag_type_desc {
> const char *section;
> size_t tag_size;
>- void (*module_load)(struct codetag_type *cttype,
>- struct codetag_module *cmod);
>- void (*module_unload)(struct codetag_type *cttype,
>- struct codetag_module *cmod);
>+ void (*module_load)(struct module *mod,
>+ struct codetag *start, struct codetag *end);
>+ void (*module_unload)(struct module *mod,
>+ struct codetag *start, struct codetag *end);
> #ifdef CONFIG_MODULES
> void (*module_replaced)(struct module *mod, struct module *new_mod);
> bool (*needs_section_mem)(struct module *mod, unsigned long size);
>diff --git a/include/linux/percpu.h b/include/linux/percpu.h
>index 52b5ea663b9f..85bf8dd9f087 100644
>--- a/include/linux/percpu.h
>+++ b/include/linux/percpu.h
>@@ -15,11 +15,7 @@
>
> /* enough to cover all DEFINE_PER_CPUs in modules */
> #ifdef CONFIG_MODULES
>-#ifdef CONFIG_MEM_ALLOC_PROFILING
>-#define PERCPU_MODULE_RESERVE (8 << 13)
>-#else
> #define PERCPU_MODULE_RESERVE (8 << 10)
>-#endif
> #else
> #define PERCPU_MODULE_RESERVE 0
> #endif
>diff --git a/lib/alloc_tag.c b/lib/alloc_tag.c
>index 25ecc1334b67..c7f602fa7b23 100644
>--- a/lib/alloc_tag.c
>+++ b/lib/alloc_tag.c
>@@ -350,18 +350,28 @@ static bool needs_section_mem(struct module *mod, unsigned long size)
> return size >= sizeof(struct alloc_tag);
> }
>
>-static struct alloc_tag *find_used_tag(struct alloc_tag *from, struct alloc_tag *to)
>+static bool clean_unused_counters(struct alloc_tag *start_tag,
>+ struct alloc_tag *end_tag)
> {
>- while (from <= to) {
>+ struct alloc_tag *tag;
>+ bool ret = true;
>+
>+ for (tag = start_tag; tag <= end_tag; tag++) {
> struct alloc_tag_counters counter;
>
>- counter = alloc_tag_read(from);
>- if (counter.bytes)
>- return from;
>- from++;
>+ if (!tag->counters)
>+ continue;
>+
>+ counter = alloc_tag_read(tag);
>+ if (!counter.bytes) {
>+ free_percpu(tag->counters);
>+ tag->counters = NULL;
>+ } else {
>+ ret = false;
>+ }
> }
>
>- return NULL;
>+ return ret;
> }
>
> /* Called with mod_area_mt locked */
>@@ -371,12 +381,16 @@ static void clean_unused_module_areas_locked(void)
> struct module *val;
>
> mas_for_each(&mas, val, module_tags.size) {
>+ struct alloc_tag *start_tag;
>+ struct alloc_tag *end_tag;
>+
> if (val != &unloaded_mod)
> continue;
>
> /* Release area if all tags are unused */
>- if (!find_used_tag((struct alloc_tag *)(module_tags.start_addr + mas.index),
>- (struct alloc_tag *)(module_tags.start_addr + mas.last)))
>+ start_tag = (struct alloc_tag *)(module_tags.start_addr + mas.index);
>+ end_tag = (struct alloc_tag *)(module_tags.start_addr + mas.last);
>+ if (clean_unused_counters(start_tag, end_tag))
> mas_erase(&mas);
> }
> }
>@@ -561,7 +575,8 @@ static void *reserve_module_tags(struct module *mod, unsigned long size,
> static void release_module_tags(struct module *mod, bool used)
> {
> MA_STATE(mas, &mod_area_mt, module_tags.size, module_tags.size);
>- struct alloc_tag *tag;
>+ struct alloc_tag *start_tag;
>+ struct alloc_tag *end_tag;
> struct module *val;
>
> mas_lock(&mas);
>@@ -575,15 +590,22 @@ static void release_module_tags(struct module *mod, bool used)
> if (!used)
> goto release_area;
>
>- /* Find out if the area is used */
>- tag = find_used_tag((struct alloc_tag *)(module_tags.start_addr + mas.index),
>- (struct alloc_tag *)(module_tags.start_addr + mas.last));
>- if (tag) {
>- struct alloc_tag_counters counter = alloc_tag_read(tag);
>+ start_tag = (struct alloc_tag *)(module_tags.start_addr + mas.index);
>+ end_tag = (struct alloc_tag *)(module_tags.start_addr + mas.last);
>+ if (!clean_unused_counters(start_tag, end_tag)) {
>+ struct alloc_tag *tag;
>+
>+ for (tag = start_tag; tag <= end_tag; tag++) {
>+ struct alloc_tag_counters counter;
>+
>+ if (!tag->counters)
>+ continue;
>
>- pr_info("%s:%u module %s func:%s has %llu allocated at module unload\n",
>- tag->ct.filename, tag->ct.lineno, tag->ct.modname,
>- tag->ct.function, counter.bytes);
>+ counter = alloc_tag_read(tag);
>+ pr_info("%s:%u module %s func:%s has %llu allocated at module unload\n",
>+ tag->ct.filename, tag->ct.lineno, tag->ct.modname,
>+ tag->ct.function, counter.bytes);
>+ }
> } else {
> used = false;
> }
>@@ -596,6 +618,34 @@ static void release_module_tags(struct module *mod, bool used)
> mas_unlock(&mas);
> }
>
>+static void load_module(struct module *mod, struct codetag *start, struct codetag *stop)
>+{
>+ /* Allocate module alloc_tag percpu counters */
>+ struct alloc_tag *start_tag;
>+ struct alloc_tag *stop_tag;
>+ struct alloc_tag *tag;
>+
>+ if (!mod)
>+ return;
>+
>+ start_tag = ct_to_alloc_tag(start);
>+ stop_tag = ct_to_alloc_tag(stop);
>+ for (tag = start_tag; tag < stop_tag; tag++) {
>+ WARN_ON(tag->counters);
>+ tag->counters = alloc_percpu(struct alloc_tag_counters);
>+ if (!tag->counters) {
>+ while (--tag >= start_tag) {
>+ free_percpu(tag->counters);
>+ tag->counters = NULL;
>+ }
>+ shutdown_mem_profiling(true);
>+ pr_err("Failed to allocate memory for allocation tag percpu counters in the module %s. Memory allocation profiling is disabled!\n",
>+ mod->name);
>+ break;
>+ }
>+ }
>+}
>+
> static void replace_module(struct module *mod, struct module *new_mod)
> {
> MA_STATE(mas, &mod_area_mt, 0, module_tags.size);
>@@ -757,6 +807,7 @@ static int __init alloc_tag_init(void)
> .needs_section_mem = needs_section_mem,
> .alloc_section_mem = reserve_module_tags,
> .free_section_mem = release_module_tags,
>+ .module_load = load_module,
> .module_replaced = replace_module,
> #endif
> };
>diff --git a/lib/codetag.c b/lib/codetag.c
>index 42aadd6c1454..de332e98d6f5 100644
>--- a/lib/codetag.c
>+++ b/lib/codetag.c
>@@ -194,7 +194,7 @@ static int codetag_module_init(struct codetag_type *cttype, struct module *mod)
> if (err >= 0) {
> cttype->count += range_size(cttype, &range);
> if (cttype->desc.module_load)
>- cttype->desc.module_load(cttype, cmod);
>+ cttype->desc.module_load(mod, range.start, range.stop);
> }
> up_write(&cttype->mod_lock);
>
>@@ -333,7 +333,8 @@ void codetag_unload_module(struct module *mod)
> }
> if (found) {
> if (cttype->desc.module_unload)
>- cttype->desc.module_unload(cttype, cmod);
>+ cttype->desc.module_unload(cmod->mod,
>+ cmod->range.start, cmod->range.stop);
>
> cttype->count -= range_size(cttype, &cmod->range);
> idr_remove(&cttype->mod_idr, mod_id);
>
>base-commit: 6e68a205c07b3c311f19a4c9c5de95d191d5a459
>--
>2.49.0.1101.gccaa498523-goog
This patch fixes the page fault error.
Tested-by: David Wang <00107082@163.com>
My tests:
1. enter recovery mode
2. install nvidia driver 570.144, failed with Unknown symbol drm_client_setup
3. modprobe drm_client_lib
4. install nvidia driver 570.144
5. install nvidia driver 550.144.03
6. reboot and repeat from step 1
I managed to run 10 rounds of above test, no abnormal detected with this patch.
And without the patch, a page fault error would happen in step 4 with very high probability.
Thanks
David
^ permalink raw reply [flat|nested] 12+ messages in thread* Re: [PATCH 1/1] alloc_tag: allocate percpu counters for module tags dynamically
2025-05-17 0:07 [PATCH 1/1] alloc_tag: allocate percpu counters for module tags dynamically Suren Baghdasaryan
2025-05-17 6:09 ` David Wang
@ 2025-05-19 22:51 ` Andrew Morton
2025-05-19 23:13 ` Suren Baghdasaryan
2025-05-20 23:16 ` comments on patch "alloc_tag: allocate percpu counters for module tags dynamically" Casey Chen
2 siblings, 1 reply; 12+ messages in thread
From: Andrew Morton @ 2025-05-19 22:51 UTC (permalink / raw)
To: Suren Baghdasaryan
Cc: kent.overstreet, 00107082, dennis, tj, cl, pasha.tatashin,
linux-mm, linux-kernel
On Fri, 16 May 2025 17:07:39 -0700 Suren Baghdasaryan <surenb@google.com> wrote:
> When a module gets unloaded it checks whether any of its tags are still
> in use and if so, we keep the memory containing module's allocation tags
> alive until all tags are unused. However percpu counters referenced by
> the tags are freed by free_module(). This will lead to UAF if the memory
> allocated by a module is accessed after module was unloaded. To fix this
> we allocate percpu counters for module allocation tags dynamically and
> we keep it alive for tags which are still in use after module unloading.
> This also removes the requirement of a larger PERCPU_MODULE_RESERVE when
> memory allocation profiling is enabled because percpu memory for counters
> does not need to be reserved anymore.
>
> Fixes: 0db6f8d7820a ("alloc_tag: load module tags into separate contiguous memory")
> Reported-by: David Wang <00107082@163.com>
> Closes: https://lore.kernel.org/all/20250516131246.6244-1-00107082@163.com/
> Signed-off-by: Suren Baghdasaryan <surenb@google.com>
> ---
> include/linux/alloc_tag.h | 12 ++++++
> include/linux/codetag.h | 8 ++--
> include/linux/percpu.h | 4 --
> lib/alloc_tag.c | 87 +++++++++++++++++++++++++++++++--------
> lib/codetag.c | 5 ++-
> 5 files changed, 88 insertions(+), 28 deletions(-)
Should we backport this fix into -stable kernels? I'm thinking yes.
^ permalink raw reply [flat|nested] 12+ messages in thread* Re: [PATCH 1/1] alloc_tag: allocate percpu counters for module tags dynamically
2025-05-19 22:51 ` [PATCH " Andrew Morton
@ 2025-05-19 23:13 ` Suren Baghdasaryan
2025-05-20 0:21 ` Andrew Morton
0 siblings, 1 reply; 12+ messages in thread
From: Suren Baghdasaryan @ 2025-05-19 23:13 UTC (permalink / raw)
To: Andrew Morton
Cc: kent.overstreet, 00107082, dennis, tj, cl, pasha.tatashin,
linux-mm, linux-kernel
On Mon, May 19, 2025 at 3:51 PM Andrew Morton <akpm@linux-foundation.org> wrote:
>
> On Fri, 16 May 2025 17:07:39 -0700 Suren Baghdasaryan <surenb@google.com> wrote:
>
> > When a module gets unloaded it checks whether any of its tags are still
> > in use and if so, we keep the memory containing module's allocation tags
> > alive until all tags are unused. However percpu counters referenced by
> > the tags are freed by free_module(). This will lead to UAF if the memory
> > allocated by a module is accessed after module was unloaded. To fix this
> > we allocate percpu counters for module allocation tags dynamically and
> > we keep it alive for tags which are still in use after module unloading.
> > This also removes the requirement of a larger PERCPU_MODULE_RESERVE when
> > memory allocation profiling is enabled because percpu memory for counters
> > does not need to be reserved anymore.
> >
> > Fixes: 0db6f8d7820a ("alloc_tag: load module tags into separate contiguous memory")
> > Reported-by: David Wang <00107082@163.com>
> > Closes: https://lore.kernel.org/all/20250516131246.6244-1-00107082@163.com/
> > Signed-off-by: Suren Baghdasaryan <surenb@google.com>
> > ---
> > include/linux/alloc_tag.h | 12 ++++++
> > include/linux/codetag.h | 8 ++--
> > include/linux/percpu.h | 4 --
> > lib/alloc_tag.c | 87 +++++++++++++++++++++++++++++++--------
> > lib/codetag.c | 5 ++-
> > 5 files changed, 88 insertions(+), 28 deletions(-)
>
> Should we backport this fix into -stable kernels? I'm thinking yes.
Yes, I should have CC'ed stable. The patch this one is fixing was
first introduced in 6.13. I just tried and it applies cleanly to
stable linux-6.13.y and linux-6.14.y.
Should I forward this email to stable or send a separate patch to them?
^ permalink raw reply [flat|nested] 12+ messages in thread* Re: [PATCH 1/1] alloc_tag: allocate percpu counters for module tags dynamically
2025-05-19 23:13 ` Suren Baghdasaryan
@ 2025-05-20 0:21 ` Andrew Morton
2025-05-20 3:19 ` Suren Baghdasaryan
0 siblings, 1 reply; 12+ messages in thread
From: Andrew Morton @ 2025-05-20 0:21 UTC (permalink / raw)
To: Suren Baghdasaryan
Cc: kent.overstreet, 00107082, dennis, tj, cl, pasha.tatashin,
linux-mm, linux-kernel
On Mon, 19 May 2025 16:13:28 -0700 Suren Baghdasaryan <surenb@google.com> wrote:
> > > Fixes: 0db6f8d7820a ("alloc_tag: load module tags into separate contiguous memory")
> > > Reported-by: David Wang <00107082@163.com>
> > > Closes: https://lore.kernel.org/all/20250516131246.6244-1-00107082@163.com/
> > > Signed-off-by: Suren Baghdasaryan <surenb@google.com>
> > > ---
> > > include/linux/alloc_tag.h | 12 ++++++
> > > include/linux/codetag.h | 8 ++--
> > > include/linux/percpu.h | 4 --
> > > lib/alloc_tag.c | 87 +++++++++++++++++++++++++++++++--------
> > > lib/codetag.c | 5 ++-
> > > 5 files changed, 88 insertions(+), 28 deletions(-)
> >
> > Should we backport this fix into -stable kernels? I'm thinking yes.
>
> Yes, I should have CC'ed stable. The patch this one is fixing was
> first introduced in 6.13. I just tried and it applies cleanly to
> stable linux-6.13.y and linux-6.14.y.
> Should I forward this email to stable or send a separate patch to them?
I added cc:stable to the mm.git copy so all is OK. That's the usual
workflow.
^ permalink raw reply [flat|nested] 12+ messages in thread* Re: [PATCH 1/1] alloc_tag: allocate percpu counters for module tags dynamically
2025-05-20 0:21 ` Andrew Morton
@ 2025-05-20 3:19 ` Suren Baghdasaryan
0 siblings, 0 replies; 12+ messages in thread
From: Suren Baghdasaryan @ 2025-05-20 3:19 UTC (permalink / raw)
To: Andrew Morton
Cc: kent.overstreet, 00107082, dennis, tj, cl, pasha.tatashin,
linux-mm, linux-kernel
On Mon, May 19, 2025 at 5:21 PM Andrew Morton <akpm@linux-foundation.org> wrote:
>
> On Mon, 19 May 2025 16:13:28 -0700 Suren Baghdasaryan <surenb@google.com> wrote:
>
> > > > Fixes: 0db6f8d7820a ("alloc_tag: load module tags into separate contiguous memory")
> > > > Reported-by: David Wang <00107082@163.com>
> > > > Closes: https://lore.kernel.org/all/20250516131246.6244-1-00107082@163.com/
> > > > Signed-off-by: Suren Baghdasaryan <surenb@google.com>
> > > > ---
> > > > include/linux/alloc_tag.h | 12 ++++++
> > > > include/linux/codetag.h | 8 ++--
> > > > include/linux/percpu.h | 4 --
> > > > lib/alloc_tag.c | 87 +++++++++++++++++++++++++++++++--------
> > > > lib/codetag.c | 5 ++-
> > > > 5 files changed, 88 insertions(+), 28 deletions(-)
> > >
> > > Should we backport this fix into -stable kernels? I'm thinking yes.
> >
> > Yes, I should have CC'ed stable. The patch this one is fixing was
> > first introduced in 6.13. I just tried and it applies cleanly to
> > stable linux-6.13.y and linux-6.14.y.
> > Should I forward this email to stable or send a separate patch to them?
>
> I added cc:stable to the mm.git copy so all is OK. That's the usual
> workflow.
Perfect. Thanks!
^ permalink raw reply [flat|nested] 12+ messages in thread
* comments on patch "alloc_tag: allocate percpu counters for module tags dynamically"
2025-05-17 0:07 [PATCH 1/1] alloc_tag: allocate percpu counters for module tags dynamically Suren Baghdasaryan
2025-05-17 6:09 ` David Wang
2025-05-19 22:51 ` [PATCH " Andrew Morton
@ 2025-05-20 23:16 ` Casey Chen
2025-05-20 23:26 ` Suren Baghdasaryan
2 siblings, 1 reply; 12+ messages in thread
From: Casey Chen @ 2025-05-20 23:16 UTC (permalink / raw)
To: surenb
Cc: 00107082, akpm, cl, dennis, kent.overstreet, linux-kernel,
linux-mm, pasha.tatashin, tj, yzhong
Hi Suren,
I have two questions on this patch.
1. If load_module() fails to allocate memory for percpu counters, should we call codetag_free_module_sections() to clean up module tags memory ?
2. How about moving percpu counters allocation to move_module() where codetag_alloc_module_section() is called ? So they can be cleaned up together.
Thanks,
Casey
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: comments on patch "alloc_tag: allocate percpu counters for module tags dynamically"
2025-05-20 23:16 ` comments on patch "alloc_tag: allocate percpu counters for module tags dynamically" Casey Chen
@ 2025-05-20 23:26 ` Suren Baghdasaryan
2025-05-20 23:48 ` Casey Chen
0 siblings, 1 reply; 12+ messages in thread
From: Suren Baghdasaryan @ 2025-05-20 23:26 UTC (permalink / raw)
To: Casey Chen
Cc: 00107082, akpm, cl, dennis, kent.overstreet, linux-kernel,
linux-mm, pasha.tatashin, tj, yzhong
On Tue, May 20, 2025 at 4:16 PM Casey Chen <cachen@purestorage.com> wrote:
>
> Hi Suren,
Hi Casey,
>
> I have two questions on this patch.
> 1. If load_module() fails to allocate memory for percpu counters, should we call codetag_free_module_sections() to clean up module tags memory ?
Does this address your question:
https://lore.kernel.org/all/20250518101212.19930-1-00107082@163.com/
> 2. How about moving percpu counters allocation to move_module() where codetag_alloc_module_section() is called ? So they can be cleaned up together.
That would not work because tag->counters are initialized with NULL
after move_module() executes, so if we allocate there our allocations
will be overridden. We have to do that at the end of load_module()
where codetag_load_module() is.
Thanks,
Suren.
>
> Thanks,
> Casey
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: comments on patch "alloc_tag: allocate percpu counters for module tags dynamically"
2025-05-20 23:26 ` Suren Baghdasaryan
@ 2025-05-20 23:48 ` Casey Chen
2025-05-21 0:45 ` Suren Baghdasaryan
0 siblings, 1 reply; 12+ messages in thread
From: Casey Chen @ 2025-05-20 23:48 UTC (permalink / raw)
To: Suren Baghdasaryan
Cc: 00107082, akpm, cl, dennis, kent.overstreet, linux-kernel,
linux-mm, pasha.tatashin, tj, yzhong
On Tue, May 20, 2025 at 4:26 PM Suren Baghdasaryan <surenb@google.com> wrote:
>
> On Tue, May 20, 2025 at 4:16 PM Casey Chen <cachen@purestorage.com> wrote:
> >
> > Hi Suren,
>
> Hi Casey,
>
> >
> > I have two questions on this patch.
> > 1. If load_module() fails to allocate memory for percpu counters, should we call codetag_free_module_sections() to clean up module tags memory ?
>
> Does this address your question:
> https://lore.kernel.org/all/20250518101212.19930-1-00107082@163.com/
>
module_deallocate() is called in error handling of load_module(). And
codetag_load_module() is at the very end of load_module(). If counter
allocation fails, it doesn't go to the error path to clean up module
tag memory.
My code base is at a5806cd506af ("Linux 6.15-rc7")
3250 /*
3251 * Allocate and load the module: note that size of section 0 is always
3252 * zero, and we rely on this for optional sections.
3253 */
3254 static int load_module(struct load_info *info, const char __user *uargs,
3255 int flags)
3256 {
...
3403
3404 codetag_load_module(mod);
3405
3406 /* Done! */
3407 trace_module_load(mod);
3408
3409 return do_init_module(mod);
...
3445 free_module:
3446 mod_stat_bump_invalid(info, flags);
3447 /* Free lock-classes; relies on the preceding sync_rcu() */
3448 for_class_mod_mem_type(type, core_data) {
3449 lockdep_free_key_range(mod->mem[type].base,
3450 mod->mem[type].size);
3451 }
3452
3453 module_memory_restore_rox(mod);
3454 module_deallocate(mod, info);
> > 2. How about moving percpu counters allocation to move_module() where codetag_alloc_module_section() is called ? So they can be cleaned up together.
>
> That would not work because tag->counters are initialized with NULL
> after move_module() executes, so if we allocate there our allocations
> will be overridden. We have to do that at the end of load_module()
> where codetag_load_module() is.
codetag_alloc_module_section() is called in move_module() to allocate
module tag memory. I mean we can also allocate memory for percpu
counters inside move_module().
We have implemented such a thing in our code base and it works fine.
Just do it right after copying ELF sections to memory. If it fails it
goes to the error path and calls codetag_free_module_sections() to
clean up.
2650 if (codetag_needs_module_section(mod, sname,
shdr->sh_size)) {
2651 dest = codetag_alloc_module_section(mod,
sname, shdr->sh_size,
2652
arch_mod_section_prepend(mod, i), shdr->sh_addralign);
> Thanks,
> Suren.
>
> >
> > Thanks,
> > Casey
^ permalink raw reply [flat|nested] 12+ messages in thread* Re: comments on patch "alloc_tag: allocate percpu counters for module tags dynamically"
2025-05-20 23:48 ` Casey Chen
@ 2025-05-21 0:45 ` Suren Baghdasaryan
2025-05-21 1:22 ` Suren Baghdasaryan
0 siblings, 1 reply; 12+ messages in thread
From: Suren Baghdasaryan @ 2025-05-21 0:45 UTC (permalink / raw)
To: Casey Chen
Cc: 00107082, akpm, cl, dennis, kent.overstreet, linux-kernel,
linux-mm, pasha.tatashin, tj, yzhong
On Tue, May 20, 2025 at 11:48 PM Casey Chen <cachen@purestorage.com> wrote:
>
> On Tue, May 20, 2025 at 4:26 PM Suren Baghdasaryan <surenb@google.com> wrote:
> >
> > On Tue, May 20, 2025 at 4:16 PM Casey Chen <cachen@purestorage.com> wrote:
> > >
> > > Hi Suren,
> >
> > Hi Casey,
> >
> > >
> > > I have two questions on this patch.
> > > 1. If load_module() fails to allocate memory for percpu counters, should we call codetag_free_module_sections() to clean up module tags memory ?
> >
> > Does this address your question:
> > https://lore.kernel.org/all/20250518101212.19930-1-00107082@163.com/
> >
>
> module_deallocate() is called in error handling of load_module(). And
> codetag_load_module() is at the very end of load_module(). If counter
> allocation fails, it doesn't go to the error path to clean up module
> tag memory.
Ah, right. I didn't have the code in front of me but now I see what
you mean. codetag_load_module() does not return a fault if percpu
counters fail to allocate.
>
> My code base is at a5806cd506af ("Linux 6.15-rc7")
> 3250 /*
> 3251 * Allocate and load the module: note that size of section 0 is always
> 3252 * zero, and we rely on this for optional sections.
> 3253 */
> 3254 static int load_module(struct load_info *info, const char __user *uargs,
> 3255 int flags)
> 3256 {
> ...
> 3403
> 3404 codetag_load_module(mod);
> 3405
> 3406 /* Done! */
> 3407 trace_module_load(mod);
> 3408
> 3409 return do_init_module(mod);
> ...
> 3445 free_module:
> 3446 mod_stat_bump_invalid(info, flags);
> 3447 /* Free lock-classes; relies on the preceding sync_rcu() */
> 3448 for_class_mod_mem_type(type, core_data) {
> 3449 lockdep_free_key_range(mod->mem[type].base,
> 3450 mod->mem[type].size);
> 3451 }
> 3452
> 3453 module_memory_restore_rox(mod);
> 3454 module_deallocate(mod, info);
>
>
> > > 2. How about moving percpu counters allocation to move_module() where codetag_alloc_module_section() is called ? So they can be cleaned up together.
> >
> > That would not work because tag->counters are initialized with NULL
> > after move_module() executes, so if we allocate there our allocations
> > will be overridden. We have to do that at the end of load_module()
> > where codetag_load_module() is.
>
> codetag_alloc_module_section() is called in move_module() to allocate
> module tag memory. I mean we can also allocate memory for percpu
> counters inside move_module().
I thought you were suggesting to allocate percpu counters inside
codetag_alloc_module_section(). I guess we could introduce another
callback to allocate these counters at the end of the move_module(). I
think simpler option is to let codetag_load_module() to fail and
handle that failure, OTOH that means that we do more work before
failing... Let me think some more on which way is preferable and I'll
post a fixup to my earlier patch.
> We have implemented such a thing in our code base and it works fine.
> Just do it right after copying ELF sections to memory. If it fails it
> goes to the error path and calls codetag_free_module_sections() to
> clean up.
>
> 2650 if (codetag_needs_module_section(mod, sname,
> shdr->sh_size)) {
> 2651 dest = codetag_alloc_module_section(mod,
> sname, shdr->sh_size,
> 2652
> arch_mod_section_prepend(mod, i), shdr->sh_addralign);
>
> > Thanks,
> > Suren.
> >
> > >
> > > Thanks,
> > > Casey
^ permalink raw reply [flat|nested] 12+ messages in thread* Re: comments on patch "alloc_tag: allocate percpu counters for module tags dynamically"
2025-05-21 0:45 ` Suren Baghdasaryan
@ 2025-05-21 1:22 ` Suren Baghdasaryan
2025-05-21 16:16 ` Suren Baghdasaryan
0 siblings, 1 reply; 12+ messages in thread
From: Suren Baghdasaryan @ 2025-05-21 1:22 UTC (permalink / raw)
To: Casey Chen
Cc: 00107082, akpm, cl, dennis, kent.overstreet, linux-kernel,
linux-mm, pasha.tatashin, tj, yzhong
On Wed, May 21, 2025 at 12:45 AM Suren Baghdasaryan <surenb@google.com> wrote:
>
> On Tue, May 20, 2025 at 11:48 PM Casey Chen <cachen@purestorage.com> wrote:
> >
> > On Tue, May 20, 2025 at 4:26 PM Suren Baghdasaryan <surenb@google.com> wrote:
> > >
> > > On Tue, May 20, 2025 at 4:16 PM Casey Chen <cachen@purestorage.com> wrote:
> > > >
> > > > Hi Suren,
> > >
> > > Hi Casey,
> > >
> > > >
> > > > I have two questions on this patch.
> > > > 1. If load_module() fails to allocate memory for percpu counters, should we call codetag_free_module_sections() to clean up module tags memory ?
> > >
> > > Does this address your question:
> > > https://lore.kernel.org/all/20250518101212.19930-1-00107082@163.com/
> > >
> >
> > module_deallocate() is called in error handling of load_module(). And
> > codetag_load_module() is at the very end of load_module(). If counter
> > allocation fails, it doesn't go to the error path to clean up module
> > tag memory.
>
> Ah, right. I didn't have the code in front of me but now I see what
> you mean. codetag_load_module() does not return a fault if percpu
> counters fail to allocate.
>
> >
> > My code base is at a5806cd506af ("Linux 6.15-rc7")
> > 3250 /*
> > 3251 * Allocate and load the module: note that size of section 0 is always
> > 3252 * zero, and we rely on this for optional sections.
> > 3253 */
> > 3254 static int load_module(struct load_info *info, const char __user *uargs,
> > 3255 int flags)
> > 3256 {
> > ...
> > 3403
> > 3404 codetag_load_module(mod);
> > 3405
> > 3406 /* Done! */
> > 3407 trace_module_load(mod);
> > 3408
> > 3409 return do_init_module(mod);
> > ...
> > 3445 free_module:
> > 3446 mod_stat_bump_invalid(info, flags);
> > 3447 /* Free lock-classes; relies on the preceding sync_rcu() */
> > 3448 for_class_mod_mem_type(type, core_data) {
> > 3449 lockdep_free_key_range(mod->mem[type].base,
> > 3450 mod->mem[type].size);
> > 3451 }
> > 3452
> > 3453 module_memory_restore_rox(mod);
> > 3454 module_deallocate(mod, info);
> >
> >
> > > > 2. How about moving percpu counters allocation to move_module() where codetag_alloc_module_section() is called ? So they can be cleaned up together.
> > >
> > > That would not work because tag->counters are initialized with NULL
> > > after move_module() executes, so if we allocate there our allocations
> > > will be overridden. We have to do that at the end of load_module()
> > > where codetag_load_module() is.
> >
> > codetag_alloc_module_section() is called in move_module() to allocate
> > module tag memory. I mean we can also allocate memory for percpu
> > counters inside move_module().
>
> I thought you were suggesting to allocate percpu counters inside
> codetag_alloc_module_section(). I guess we could introduce another
> callback to allocate these counters at the end of the move_module(). I
> think simpler option is to let codetag_load_module() to fail and
> handle that failure, OTOH that means that we do more work before
> failing... Let me think some more on which way is preferable and I'll
> post a fixup to my earlier patch.
After inspecting the code some more I'm leaning towards a simpler
solution of letting codetag_load_module() to return failure and
handling it with codetag_free_module_sections(). This would avoid
introducing additional hook inside move_module(). I'll wait until
tomorrow and if no objections received will post a fixup to my
previous patch.
>
> > We have implemented such a thing in our code base and it works fine.
> > Just do it right after copying ELF sections to memory. If it fails it
> > goes to the error path and calls codetag_free_module_sections() to
> > clean up.
> >
> > 2650 if (codetag_needs_module_section(mod, sname,
> > shdr->sh_size)) {
> > 2651 dest = codetag_alloc_module_section(mod,
> > sname, shdr->sh_size,
> > 2652
> > arch_mod_section_prepend(mod, i), shdr->sh_addralign);
> >
> > > Thanks,
> > > Suren.
> > >
> > > >
> > > > Thanks,
> > > > Casey
^ permalink raw reply [flat|nested] 12+ messages in thread* Re: comments on patch "alloc_tag: allocate percpu counters for module tags dynamically"
2025-05-21 1:22 ` Suren Baghdasaryan
@ 2025-05-21 16:16 ` Suren Baghdasaryan
0 siblings, 0 replies; 12+ messages in thread
From: Suren Baghdasaryan @ 2025-05-21 16:16 UTC (permalink / raw)
To: Casey Chen
Cc: 00107082, akpm, cl, dennis, kent.overstreet, linux-kernel,
linux-mm, pasha.tatashin, tj, yzhong
On Tue, May 20, 2025 at 6:22 PM Suren Baghdasaryan <surenb@google.com> wrote:
>
> On Wed, May 21, 2025 at 12:45 AM Suren Baghdasaryan <surenb@google.com> wrote:
> >
> > On Tue, May 20, 2025 at 11:48 PM Casey Chen <cachen@purestorage.com> wrote:
> > >
> > > On Tue, May 20, 2025 at 4:26 PM Suren Baghdasaryan <surenb@google.com> wrote:
> > > >
> > > > On Tue, May 20, 2025 at 4:16 PM Casey Chen <cachen@purestorage.com> wrote:
> > > > >
> > > > > Hi Suren,
> > > >
> > > > Hi Casey,
> > > >
> > > > >
> > > > > I have two questions on this patch.
> > > > > 1. If load_module() fails to allocate memory for percpu counters, should we call codetag_free_module_sections() to clean up module tags memory ?
> > > >
> > > > Does this address your question:
> > > > https://lore.kernel.org/all/20250518101212.19930-1-00107082@163.com/
> > > >
> > >
> > > module_deallocate() is called in error handling of load_module(). And
> > > codetag_load_module() is at the very end of load_module(). If counter
> > > allocation fails, it doesn't go to the error path to clean up module
> > > tag memory.
> >
> > Ah, right. I didn't have the code in front of me but now I see what
> > you mean. codetag_load_module() does not return a fault if percpu
> > counters fail to allocate.
> >
> > >
> > > My code base is at a5806cd506af ("Linux 6.15-rc7")
> > > 3250 /*
> > > 3251 * Allocate and load the module: note that size of section 0 is always
> > > 3252 * zero, and we rely on this for optional sections.
> > > 3253 */
> > > 3254 static int load_module(struct load_info *info, const char __user *uargs,
> > > 3255 int flags)
> > > 3256 {
> > > ...
> > > 3403
> > > 3404 codetag_load_module(mod);
> > > 3405
> > > 3406 /* Done! */
> > > 3407 trace_module_load(mod);
> > > 3408
> > > 3409 return do_init_module(mod);
> > > ...
> > > 3445 free_module:
> > > 3446 mod_stat_bump_invalid(info, flags);
> > > 3447 /* Free lock-classes; relies on the preceding sync_rcu() */
> > > 3448 for_class_mod_mem_type(type, core_data) {
> > > 3449 lockdep_free_key_range(mod->mem[type].base,
> > > 3450 mod->mem[type].size);
> > > 3451 }
> > > 3452
> > > 3453 module_memory_restore_rox(mod);
> > > 3454 module_deallocate(mod, info);
> > >
> > >
> > > > > 2. How about moving percpu counters allocation to move_module() where codetag_alloc_module_section() is called ? So they can be cleaned up together.
> > > >
> > > > That would not work because tag->counters are initialized with NULL
> > > > after move_module() executes, so if we allocate there our allocations
> > > > will be overridden. We have to do that at the end of load_module()
> > > > where codetag_load_module() is.
> > >
> > > codetag_alloc_module_section() is called in move_module() to allocate
> > > module tag memory. I mean we can also allocate memory for percpu
> > > counters inside move_module().
> >
> > I thought you were suggesting to allocate percpu counters inside
> > codetag_alloc_module_section(). I guess we could introduce another
> > callback to allocate these counters at the end of the move_module(). I
> > think simpler option is to let codetag_load_module() to fail and
> > handle that failure, OTOH that means that we do more work before
> > failing... Let me think some more on which way is preferable and I'll
> > post a fixup to my earlier patch.
>
> After inspecting the code some more I'm leaning towards a simpler
> solution of letting codetag_load_module() to return failure and
> handling it with codetag_free_module_sections(). This would avoid
> introducing additional hook inside move_module(). I'll wait until
> tomorrow and if no objections received will post a fixup to my
> previous patch.
Posted the fix at
https://lore.kernel.org/all/20250521160602.1940771-1-surenb@google.com/
>
> >
> > > We have implemented such a thing in our code base and it works fine.
> > > Just do it right after copying ELF sections to memory. If it fails it
> > > goes to the error path and calls codetag_free_module_sections() to
> > > clean up.
> > >
> > > 2650 if (codetag_needs_module_section(mod, sname,
> > > shdr->sh_size)) {
> > > 2651 dest = codetag_alloc_module_section(mod,
> > > sname, shdr->sh_size,
> > > 2652
> > > arch_mod_section_prepend(mod, i), shdr->sh_addralign);
> > >
> > > > Thanks,
> > > > Suren.
> > > >
> > > > >
> > > > > Thanks,
> > > > > Casey
^ permalink raw reply [flat|nested] 12+ messages in thread
end of thread, other threads:[~2025-05-21 16:17 UTC | newest]
Thread overview: 12+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2025-05-17 0:07 [PATCH 1/1] alloc_tag: allocate percpu counters for module tags dynamically Suren Baghdasaryan
2025-05-17 6:09 ` David Wang
2025-05-19 22:51 ` [PATCH " Andrew Morton
2025-05-19 23:13 ` Suren Baghdasaryan
2025-05-20 0:21 ` Andrew Morton
2025-05-20 3:19 ` Suren Baghdasaryan
2025-05-20 23:16 ` comments on patch "alloc_tag: allocate percpu counters for module tags dynamically" Casey Chen
2025-05-20 23:26 ` Suren Baghdasaryan
2025-05-20 23:48 ` Casey Chen
2025-05-21 0:45 ` Suren Baghdasaryan
2025-05-21 1:22 ` Suren Baghdasaryan
2025-05-21 16:16 ` Suren Baghdasaryan
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox