linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v2] mm/memmap: Prevent double scanning of memmap by kmemleak
@ 2025-01-03 11:01 Guo Weikang
  2025-01-03 14:27 ` Catalin Marinas
  0 siblings, 1 reply; 3+ messages in thread
From: Guo Weikang @ 2025-01-03 11:01 UTC (permalink / raw)
  To: Mike Rapoport, Andrew Morton; +Cc: linux-mm, linux-kernel, Guo Weikang

kmemleak explicitly scans the mem_map through the valid struct page objects.
However, memmap_alloc() was also adding this memory to the gray object list,
causing it to be scanned twice. Removes memmap_alloc() from the
scan list and adds a comment to clarify the behavior.

Link: https://lore.kernel.org/lkml/CAOm6qn=FVeTpH54wGDFMHuCOeYtvoTx30ktnv9-w3Nh8RMofEA@mail.gmail.com/

Signed-off-by: Guo Weikang <guoweikang.kernel@gmail.com>
---
v1 -> v2: Fix CI: Recover __earlyonly_bootmem_alloc that is marked with
__ref to prevent init section mismatch WARNING.
Links:  https://lore.kernel.org/oe-kbuild-all/202501021601.jub4p3EM-lkp@intel.com/
---
 include/linux/memblock.h | 4 ++++
 mm/mm_init.c             | 8 ++++++--
 mm/sparse-vmemmap.c      | 5 ++---
 3 files changed, 12 insertions(+), 5 deletions(-)

diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index dee628350cd1..e79eb6ac516f 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -378,6 +378,10 @@ static inline int memblock_get_region_node(const struct memblock_region *r)
 /* Flags for memblock allocation APIs */
 #define MEMBLOCK_ALLOC_ANYWHERE	(~(phys_addr_t)0)
 #define MEMBLOCK_ALLOC_ACCESSIBLE	0
+/*
+ *  MEMBLOCK_ALLOC_NOLEAKTRACE avoids kmemleak tracing. It implies
+ *  MEMBLOCK_ALLOC_ACCESSIBLE
+ */
 #define MEMBLOCK_ALLOC_NOLEAKTRACE	1
 
 /* We are using top down, so it is safe to use 0 here */
diff --git a/mm/mm_init.c b/mm/mm_init.c
index 24b68b425afb..71b58f5f2492 100644
--- a/mm/mm_init.c
+++ b/mm/mm_init.c
@@ -1580,6 +1580,10 @@ static void __init free_area_init_core(struct pglist_data *pgdat)
 	}
 }
 
+/*
+ * Kmemleak will explicitly scan mem_map by traversing all valid `struct *page`,
+ * so memblock does not need to be added to the scan list.
+ */
 void __init *memmap_alloc(phys_addr_t size, phys_addr_t align,
 			  phys_addr_t min_addr, int nid, bool exact_nid)
 {
@@ -1587,11 +1591,11 @@ void __init *memmap_alloc(phys_addr_t size, phys_addr_t align,
 
 	if (exact_nid)
 		ptr = memblock_alloc_exact_nid_raw(size, align, min_addr,
-						   MEMBLOCK_ALLOC_ACCESSIBLE,
+						   MEMBLOCK_ALLOC_NOLEAKTRACE,
 						   nid);
 	else
 		ptr = memblock_alloc_try_nid_raw(size, align, min_addr,
-						 MEMBLOCK_ALLOC_ACCESSIBLE,
+						 MEMBLOCK_ALLOC_NOLEAKTRACE,
 						 nid);
 
 	if (ptr && size > 0)
diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
index cec67c5f37d8..903a5422907b 100644
--- a/mm/sparse-vmemmap.c
+++ b/mm/sparse-vmemmap.c
@@ -27,7 +27,7 @@
 #include <linux/spinlock.h>
 #include <linux/vmalloc.h>
 #include <linux/sched.h>
-
+#include "internal.h"
 #include <asm/dma.h>
 #include <asm/pgalloc.h>
 
@@ -42,8 +42,7 @@ static void * __ref __earlyonly_bootmem_alloc(int node,
 				unsigned long align,
 				unsigned long goal)
 {
-	return memblock_alloc_try_nid_raw(size, align, goal,
-					       MEMBLOCK_ALLOC_ACCESSIBLE, node);
+	return memmap_alloc(size, align, goal, node, false);
 }
 
 void * __meminit vmemmap_alloc_block(unsigned long size, int node)
-- 
2.25.1



^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2025-01-06  1:45 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2025-01-03 11:01 [PATCH v2] mm/memmap: Prevent double scanning of memmap by kmemleak Guo Weikang
2025-01-03 14:27 ` Catalin Marinas
2025-01-06  1:45   ` Weikang Guo

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox