linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Changyuan Lyu <changyuanl@google.com>
To: linux-kernel@vger.kernel.org
Cc: akpm@linux-foundation.org, anthony.yznaga@oracle.com,
	arnd@arndb.de,  ashish.kalra@amd.com, benh@kernel.crashing.org,
	bp@alien8.de,  catalin.marinas@arm.com, corbet@lwn.net,
	dave.hansen@linux.intel.com,  devicetree@vger.kernel.org,
	dwmw2@infradead.org, ebiederm@xmission.com,  graf@amazon.com,
	hpa@zytor.com, jgowans@amazon.com, kexec@lists.infradead.org,
	 krzk@kernel.org, linux-arm-kernel@lists.infradead.org,
	 linux-doc@vger.kernel.org, linux-mm@kvack.org, luto@kernel.org,
	 mark.rutland@arm.com, mingo@redhat.com,
	pasha.tatashin@soleen.com,  pbonzini@redhat.com,
	peterz@infradead.org, ptyadav@amazon.de, robh@kernel.org,
	 rostedt@goodmis.org, rppt@kernel.org, saravanak@google.com,
	 skinsburskii@linux.microsoft.com, tglx@linutronix.de,
	thomas.lendacky@amd.com,  will@kernel.org, x86@kernel.org,
	Changyuan Lyu <changyuanl@google.com>
Subject: [PATCH v6 03/14] memblock: introduce memmap_init_kho_scratch()
Date: Thu, 10 Apr 2025 22:37:34 -0700	[thread overview]
Message-ID: <20250411053745.1817356-4-changyuanl@google.com> (raw)
In-Reply-To: <20250411053745.1817356-1-changyuanl@google.com>

From: "Mike Rapoport (Microsoft)" <rppt@kernel.org>

With deferred initialization of struct page it will be necessary to
initialize memory map for KHO scratch regions early.

Add memmap_init_kho_scratch() method that will allow such initialization
in upcoming patches.

Signed-off-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
Signed-off-by: Changyuan Lyu <changyuanl@google.com>
---
 include/linux/memblock.h |  2 ++
 mm/internal.h            |  2 ++
 mm/memblock.c            | 22 ++++++++++++++++++++++
 mm/mm_init.c             | 11 ++++++++---
 4 files changed, 34 insertions(+), 3 deletions(-)

diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index 993937a6b9620..bb19a25342246 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -635,9 +635,11 @@ static inline void memtest_report_meminfo(struct seq_file *m) { }
 #ifdef CONFIG_MEMBLOCK_KHO_SCRATCH
 void memblock_set_kho_scratch_only(void);
 void memblock_clear_kho_scratch_only(void);
+void memmap_init_kho_scratch_pages(void);
 #else
 static inline void memblock_set_kho_scratch_only(void) { }
 static inline void memblock_clear_kho_scratch_only(void) { }
+static inline void memmap_init_kho_scratch_pages(void) {}
 #endif
 
 #endif /* _LINUX_MEMBLOCK_H */
diff --git a/mm/internal.h b/mm/internal.h
index 50c2f590b2d04..a47e5539321b4 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -1121,6 +1121,8 @@ DECLARE_STATIC_KEY_TRUE(deferred_pages);
 bool __init deferred_grow_zone(struct zone *zone, unsigned int order);
 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
 
+void init_deferred_page(unsigned long pfn, int nid);
+
 enum mminit_level {
 	MMINIT_WARNING,
 	MMINIT_VERIFY,
diff --git a/mm/memblock.c b/mm/memblock.c
index 3a213e2a485bc..c2633003ed8ea 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -946,6 +946,28 @@ __init_memblock void memblock_clear_kho_scratch_only(void)
 {
 	kho_scratch_only = false;
 }
+
+void __init_memblock memmap_init_kho_scratch_pages(void)
+{
+	phys_addr_t start, end;
+	unsigned long pfn;
+	int nid;
+	u64 i;
+
+	if (!IS_ENABLED(CONFIG_DEFERRED_STRUCT_PAGE_INIT))
+		return;
+
+	/*
+	 * Initialize struct pages for free scratch memory.
+	 * The struct pages for reserved scratch memory will be set up in
+	 * reserve_bootmem_region()
+	 */
+	__for_each_mem_range(i, &memblock.memory, NULL, NUMA_NO_NODE,
+			     MEMBLOCK_KHO_SCRATCH, &start, &end, &nid) {
+		for (pfn = PFN_UP(start); pfn < PFN_DOWN(end); pfn++)
+			init_deferred_page(pfn, nid);
+	}
+}
 #endif
 
 /**
diff --git a/mm/mm_init.c b/mm/mm_init.c
index 84f14fa12d0dd..1451cb250fd3f 100644
--- a/mm/mm_init.c
+++ b/mm/mm_init.c
@@ -743,7 +743,7 @@ defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
 	return false;
 }
 
-static void __meminit init_deferred_page(unsigned long pfn, int nid)
+static void __meminit __init_deferred_page(unsigned long pfn, int nid)
 {
 	if (early_page_initialised(pfn, nid))
 		return;
@@ -763,11 +763,16 @@ static inline bool defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
 	return false;
 }
 
-static inline void init_deferred_page(unsigned long pfn, int nid)
+static inline void __init_deferred_page(unsigned long pfn, int nid)
 {
 }
 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
 
+void __meminit init_deferred_page(unsigned long pfn, int nid)
+{
+	__init_deferred_page(pfn, nid);
+}
+
 /*
  * Initialised pages do not have PageReserved set. This function is
  * called for each range allocated by the bootmem allocator and
@@ -784,7 +789,7 @@ void __meminit reserve_bootmem_region(phys_addr_t start,
 		if (pfn_valid(start_pfn)) {
 			struct page *page = pfn_to_page(start_pfn);
 
-			init_deferred_page(start_pfn, nid);
+			__init_deferred_page(start_pfn, nid);
 
 			/*
 			 * no need for atomic set_bit because the struct
-- 
2.49.0.604.gff1f9ca942-goog



  parent reply	other threads:[~2025-04-11  5:38 UTC|newest]

Thread overview: 26+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-04-11  5:37 [PATCH v6 00/14] kexec: introduce Kexec HandOver (KHO) Changyuan Lyu
2025-04-11  5:37 ` [PATCH v6 01/14] memblock: add MEMBLOCK_RSRV_KERN flag Changyuan Lyu
2025-04-11  5:37 ` [PATCH v6 02/14] memblock: Add support for scratch memory Changyuan Lyu
2025-04-11  5:37 ` Changyuan Lyu [this message]
2025-04-11  5:37 ` [PATCH v6 04/14] kexec: add Kexec HandOver (KHO) generation helpers Changyuan Lyu
2025-04-11  5:37 ` [PATCH v6 05/14] kexec: add KHO parsing support Changyuan Lyu
2025-04-11  5:37 ` [PATCH v6 06/14] kexec: enable KHO support for memory preservation Changyuan Lyu
2025-04-11  5:37 ` [PATCH v6 07/14] kexec: add KHO support to kexec file loads Changyuan Lyu
2025-04-11  5:37 ` [PATCH v6 08/14] kexec: add config option for KHO Changyuan Lyu
2025-04-11  5:37 ` [PATCH v6 09/14] arm64: add KHO support Changyuan Lyu
2025-04-11  5:37 ` [PATCH v6 10/14] x86/setup: use memblock_reserve_kern for memory used by kernel Changyuan Lyu
2025-04-28 22:15   ` Dave Hansen
2025-04-11  5:37 ` [PATCH v6 11/14] x86: add KHO support Changyuan Lyu
2025-04-28 22:05   ` Dave Hansen
2025-04-29  8:06     ` Mike Rapoport
2025-04-29 16:06       ` Dave Hansen
2025-04-29 16:32         ` Mike Rapoport
2025-04-29 15:53     ` Mike Rapoport
2025-04-29 16:05       ` Dave Hansen
2025-04-29 16:34         ` Mike Rapoport
2025-04-11  5:37 ` [PATCH v6 12/14] memblock: add KHO support for reserve_mem Changyuan Lyu
2025-04-22 13:31   ` Mike Rapoport
2025-04-24  8:32     ` Changyuan Lyu
2025-04-11  5:37 ` [PATCH v6 13/14] Documentation: add documentation for KHO Changyuan Lyu
2025-04-11  5:37 ` [PATCH v6 14/14] Documentation: KHO: Add memblock bindings Changyuan Lyu
2025-04-28 22:19 ` [PATCH v6 00/14] kexec: introduce Kexec HandOver (KHO) Dave Hansen

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20250411053745.1817356-4-changyuanl@google.com \
    --to=changyuanl@google.com \
    --cc=akpm@linux-foundation.org \
    --cc=anthony.yznaga@oracle.com \
    --cc=arnd@arndb.de \
    --cc=ashish.kalra@amd.com \
    --cc=benh@kernel.crashing.org \
    --cc=bp@alien8.de \
    --cc=catalin.marinas@arm.com \
    --cc=corbet@lwn.net \
    --cc=dave.hansen@linux.intel.com \
    --cc=devicetree@vger.kernel.org \
    --cc=dwmw2@infradead.org \
    --cc=ebiederm@xmission.com \
    --cc=graf@amazon.com \
    --cc=hpa@zytor.com \
    --cc=jgowans@amazon.com \
    --cc=kexec@lists.infradead.org \
    --cc=krzk@kernel.org \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-doc@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=luto@kernel.org \
    --cc=mark.rutland@arm.com \
    --cc=mingo@redhat.com \
    --cc=pasha.tatashin@soleen.com \
    --cc=pbonzini@redhat.com \
    --cc=peterz@infradead.org \
    --cc=ptyadav@amazon.de \
    --cc=robh@kernel.org \
    --cc=rostedt@goodmis.org \
    --cc=rppt@kernel.org \
    --cc=saravanak@google.com \
    --cc=skinsburskii@linux.microsoft.com \
    --cc=tglx@linutronix.de \
    --cc=thomas.lendacky@amd.com \
    --cc=will@kernel.org \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox