linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v2 1/1] alloc_tag: avoid execmem_vmap() when !MMU
@ 2024-10-31 23:36 Suren Baghdasaryan
  0 siblings, 0 replies; only message in thread
From: Suren Baghdasaryan @ 2024-10-31 23:36 UTC (permalink / raw)
  To: akpm
  Cc: arnd, arnd, rppt, pasha.tatashin, mcgrof, song, mhiramat,
	linux-mm, linux-kernel, surenb, kernel test robot

With CONFIG_MMU=n __get_vm_area_node() is not available. Add CONFIG_MMU
dependency for memory allocation tagging since it uses __get_vm_area_node()
via execmem_vmap().

Fixes: 57bc3834fb6f ("alloc_tag: populate memory for module tags as needed")
Reported-by: kernel test robot <lkp@intel.com>
Closes: https://lore.kernel.org/oe-kbuild-all/202410250808.dQGyYjlk-lkp@intel.com/
Closes: https://lore.kernel.org/oe-lkp/202410251525.9f85854d-oliver.sang@intel.com
Closes: https://lore.kernel.org/oe-kbuild-all/202410261016.IO7C6Cml-lkp@intel.com/
Closes: https://lore.kernel.org/oe-kbuild-all/202410270919.LebQlmxD-lkp@intel.com/
Suggested-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
Signed-off-by: Suren Baghdasaryan <surenb@google.com>
Cc: Arnd Bergmann <arnd@arndb.de>
---
Replaces old version with the same name in mm-unstable (current SHA 88e136f0950d)

Changes since v1 [1]
- drop support for (CONFIG_MEM_ALLOC_PROFILING && !CONFIG_MMU) to simplify the
change, per Mike Rapoport

[1] https://lore.kernel.org/all/20241028202935.1047017-1-surenb@google.com/

 include/linux/execmem.h |  2 ++
 lib/Kconfig.debug       |  1 +
 mm/execmem.c            | 32 ++++++++++++++++----------------
 3 files changed, 19 insertions(+), 16 deletions(-)

diff --git a/include/linux/execmem.h b/include/linux/execmem.h
index 5a5e2917f870..64130ae19690 100644
--- a/include/linux/execmem.h
+++ b/include/linux/execmem.h
@@ -139,6 +139,7 @@ void *execmem_alloc(enum execmem_type type, size_t size);
  */
 void execmem_free(void *ptr);
 
+#ifdef CONFIG_MMU
 /**
  * execmem_vmap - create virtual mapping for EXECMEM_MODULE_DATA memory
  * @size: size of the virtual mapping in bytes
@@ -148,6 +149,7 @@ void execmem_free(void *ptr);
  * Return: the area descriptor on success or %NULL on failure.
  */
 struct vm_struct *execmem_vmap(size_t size);
+#endif
 
 /**
  * execmem_update_copy - copy an update to executable memory
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 7312ae7c3cc5..6798bbbcbd32 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -993,6 +993,7 @@ config CODE_TAGGING
 config MEM_ALLOC_PROFILING
 	bool "Enable memory allocation profiling"
 	default n
+	depends on MMU
 	depends on PROC_FS
 	depends on !DEBUG_FORCE_WEAK_PER_CPU
 	select CODE_TAGGING
diff --git a/mm/execmem.c b/mm/execmem.c
index 5c0f9f2d6f83..317b6a8d35be 100644
--- a/mm/execmem.c
+++ b/mm/execmem.c
@@ -64,6 +64,22 @@ static void *execmem_vmalloc(struct execmem_range *range, size_t size,
 
 	return p;
 }
+
+struct vm_struct *execmem_vmap(size_t size)
+{
+	struct execmem_range *range = &execmem_info->ranges[EXECMEM_MODULE_DATA];
+	struct vm_struct *area;
+
+	area = __get_vm_area_node(size, range->alignment, PAGE_SHIFT, VM_ALLOC,
+				  range->start, range->end, NUMA_NO_NODE,
+				  GFP_KERNEL, __builtin_return_address(0));
+	if (!area && range->fallback_start)
+		area = __get_vm_area_node(size, range->alignment, PAGE_SHIFT, VM_ALLOC,
+					  range->fallback_start, range->fallback_end,
+					  NUMA_NO_NODE, GFP_KERNEL, __builtin_return_address(0));
+
+	return area;
+}
 #else
 static void *execmem_vmalloc(struct execmem_range *range, size_t size,
 			     pgprot_t pgprot, unsigned long vm_flags)
@@ -368,22 +384,6 @@ void execmem_free(void *ptr)
 		vfree(ptr);
 }
 
-struct vm_struct *execmem_vmap(size_t size)
-{
-	struct execmem_range *range = &execmem_info->ranges[EXECMEM_MODULE_DATA];
-	struct vm_struct *area;
-
-	area = __get_vm_area_node(size, range->alignment, PAGE_SHIFT, VM_ALLOC,
-				  range->start, range->end, NUMA_NO_NODE,
-				  GFP_KERNEL, __builtin_return_address(0));
-	if (!area && range->fallback_start)
-		area = __get_vm_area_node(size, range->alignment, PAGE_SHIFT, VM_ALLOC,
-					  range->fallback_start, range->fallback_end,
-					  NUMA_NO_NODE, GFP_KERNEL, __builtin_return_address(0));
-
-	return area;
-}
-
 void *execmem_update_copy(void *dst, const void *src, size_t size)
 {
 	return text_poke_copy(dst, src, size);
-- 
2.47.0.163.g1226f6d8fa-goog



^ permalink raw reply	[flat|nested] only message in thread

only message in thread, other threads:[~2024-10-31 23:36 UTC | newest]

Thread overview: (only message) (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2024-10-31 23:36 [PATCH v2 1/1] alloc_tag: avoid execmem_vmap() when !MMU Suren Baghdasaryan

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox