linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] memblock: mark init_deferred_page as __init_memblock
@ 2025-04-23 16:08 Arnd Bergmann
  2025-04-24 16:29 ` Mike Rapoport
  0 siblings, 1 reply; 4+ messages in thread
From: Arnd Bergmann @ 2025-04-23 16:08 UTC (permalink / raw)
  To: Andrew Morton, Mike Rapoport, Changyuan Lyu
  Cc: Arnd Bergmann, David Hildenbrand, Vlastimil Babka,
	Matthew Wilcox (Oracle),
	Lorenzo Stoakes, Kefeng Wang, Ryan Roberts, Barry Song, Jeff Xu,
	Wei Yang, Baoquan He, Suren Baghdasaryan, Frank van der Linden,
	York Jasper Niebuhr, linux-mm, linux-kernel

From: Arnd Bergmann <arnd@arndb.de>

On architectures that set CONFIG_ARCH_KEEP_MEMBLOCK, memmap_init_kho_scratch_pages
is not discarded but calls a function that is:

WARNING: modpost: vmlinux: section mismatch in reference: memmap_init_kho_scratch_pages+0x120 (section: .text) -> init_deferred_page (section: .init.text)
ERROR: modpost: Section mismatches detected.
Set CONFIG_SECTION_MISMATCH_WARN_ONLY=y to allow them.

Mark init_deferred_page the same way as memmap_init_kho_scratch_pages
to avoid that warning. Unfortunately this requires marking additional
functions the same way to have them stay around as well.

Ideally memmap_init_kho_scratch_pages would become __meminit instead
of __init_memblock, but I could not convince myself that this is safe.

Fixes: 1b7936623970 ("memblock: introduce memmap_init_kho_scratch()")
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
---
 mm/internal.h | 7 ++++---
 mm/mm_init.c  | 8 ++++----
 2 files changed, 8 insertions(+), 7 deletions(-)

diff --git a/mm/internal.h b/mm/internal.h
index 838f840ded83..40464f755092 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -9,6 +9,7 @@
 
 #include <linux/fs.h>
 #include <linux/khugepaged.h>
+#include <linux/memblock.h>
 #include <linux/mm.h>
 #include <linux/mm_inline.h>
 #include <linux/pagemap.h>
@@ -543,7 +544,7 @@ extern int defrag_mode;
 
 void setup_per_zone_wmarks(void);
 void calculate_min_free_kbytes(void);
-int __meminit init_per_zone_wmark_min(void);
+int __init_memblock init_per_zone_wmark_min(void);
 void page_alloc_sysctl_init(void);
 
 /*
@@ -1532,9 +1533,9 @@ static inline bool pte_needs_soft_dirty_wp(struct vm_area_struct *vma, pte_t pte
 	return vma_soft_dirty_enabled(vma) && !pte_soft_dirty(pte);
 }
 
-void __meminit __init_single_page(struct page *page, unsigned long pfn,
+void __init_memblock __init_single_page(struct page *page, unsigned long pfn,
 				unsigned long zone, int nid);
-void __meminit __init_page_from_nid(unsigned long pfn, int nid);
+void __init_memblock __init_page_from_nid(unsigned long pfn, int nid);
 
 /* shrinker related functions */
 unsigned long shrink_slab(gfp_t gfp_mask, int nid, struct mem_cgroup *memcg,
diff --git a/mm/mm_init.c b/mm/mm_init.c
index 7bb5f77cf195..31cf8bc31cc2 100644
--- a/mm/mm_init.c
+++ b/mm/mm_init.c
@@ -578,7 +578,7 @@ static void __init find_zone_movable_pfns_for_nodes(void)
 	node_states[N_MEMORY] = saved_node_state;
 }
 
-void __meminit __init_single_page(struct page *page, unsigned long pfn,
+void __init_memblock __init_single_page(struct page *page, unsigned long pfn,
 				unsigned long zone, int nid)
 {
 	mm_zero_struct_page(page);
@@ -669,7 +669,7 @@ static inline void fixup_hashdist(void) {}
 /*
  * Initialize a reserved page unconditionally, finding its zone first.
  */
-void __meminit __init_page_from_nid(unsigned long pfn, int nid)
+void __init_memblock __init_page_from_nid(unsigned long pfn, int nid)
 {
 	pg_data_t *pgdat;
 	int zid;
@@ -744,7 +744,7 @@ defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
 	return false;
 }
 
-static void __meminit __init_deferred_page(unsigned long pfn, int nid)
+static void __init_memblock __init_deferred_page(unsigned long pfn, int nid)
 {
 	if (early_page_initialised(pfn, nid))
 		return;
@@ -769,7 +769,7 @@ static inline void __init_deferred_page(unsigned long pfn, int nid)
 }
 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
 
-void __meminit init_deferred_page(unsigned long pfn, int nid)
+void __init_memblock init_deferred_page(unsigned long pfn, int nid)
 {
 	__init_deferred_page(pfn, nid);
 }
-- 
2.39.5



^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2025-04-24 21:29 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2025-04-23 16:08 [PATCH] memblock: mark init_deferred_page as __init_memblock Arnd Bergmann
2025-04-24 16:29 ` Mike Rapoport
2025-04-24 19:21   ` Mike Rapoport
2025-04-24 21:28     ` Arnd Bergmann

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox