From: Changyuan Lyu <changyuanl@google.com>
To: linux-kernel@vger.kernel.org
Cc: graf@amazon.com, akpm@linux-foundation.org, luto@kernel.org,
anthony.yznaga@oracle.com, arnd@arndb.de, ashish.kalra@amd.com,
benh@kernel.crashing.org, bp@alien8.de, catalin.marinas@arm.com,
dave.hansen@linux.intel.com, dwmw2@infradead.org,
ebiederm@xmission.com, mingo@redhat.com, jgowans@amazon.com,
corbet@lwn.net, krzk@kernel.org, rppt@kernel.org,
mark.rutland@arm.com, pbonzini@redhat.com,
pasha.tatashin@soleen.com, hpa@zytor.com, peterz@infradead.org,
ptyadav@amazon.de, robh+dt@kernel.org, robh@kernel.org,
saravanak@google.com, skinsburskii@linux.microsoft.com,
rostedt@goodmis.org, tglx@linutronix.de,
thomas.lendacky@amd.com, usama.arif@bytedance.com,
will@kernel.org, devicetree@vger.kernel.org,
kexec@lists.infradead.org, linux-arm-kernel@lists.infradead.org,
linux-doc@vger.kernel.org, linux-mm@kvack.org, x86@kernel.org,
Changyuan Lyu <changyuanl@google.com>
Subject: [PATCH v5 14/16] x86: add KHO support
Date: Wed, 19 Mar 2025 18:55:49 -0700 [thread overview]
Message-ID: <20250320015551.2157511-15-changyuanl@google.com> (raw)
In-Reply-To: <20250320015551.2157511-1-changyuanl@google.com>
From: Alexander Graf <graf@amazon.com>
We now have all bits in place to support KHO kexecs. This patch adds
awareness of KHO in the kexec file as well as boot path for x86 and
adds the respective kconfig option to the architecture so that it can
use KHO successfully.
In addition, it enlightens it decompression code with KHO so that its
KASLR location finder only considers memory regions that are not already
occupied by KHO memory.
Signed-off-by: Alexander Graf <graf@amazon.com>
Co-developed-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
Signed-off-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
Co-developed-by: Changyuan Lyu <changyuanl@google.com>
Signed-off-by: Changyuan Lyu <changyuanl@google.com>
---
arch/x86/Kconfig | 3 ++
arch/x86/boot/compressed/kaslr.c | 52 +++++++++++++++++++++++++-
arch/x86/include/asm/setup.h | 4 ++
arch/x86/include/uapi/asm/setup_data.h | 13 ++++++-
arch/x86/kernel/e820.c | 18 +++++++++
arch/x86/kernel/kexec-bzimage64.c | 36 ++++++++++++++++++
arch/x86/kernel/setup.c | 25 +++++++++++++
arch/x86/realmode/init.c | 2 +
include/linux/kexec_handover.h | 13 +++++--
9 files changed, 161 insertions(+), 5 deletions(-)
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 0e27ebd7e36a..acd180e3002f 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -2091,6 +2091,9 @@ config ARCH_SUPPORTS_KEXEC_BZIMAGE_VERIFY_SIG
config ARCH_SUPPORTS_KEXEC_JUMP
def_bool y
+config ARCH_SUPPORTS_KEXEC_HANDOVER
+ def_bool y
+
config ARCH_SUPPORTS_CRASH_DUMP
def_bool X86_64 || (X86_32 && HIGHMEM)
diff --git a/arch/x86/boot/compressed/kaslr.c b/arch/x86/boot/compressed/kaslr.c
index f03d59ea6e40..ff1168881016 100644
--- a/arch/x86/boot/compressed/kaslr.c
+++ b/arch/x86/boot/compressed/kaslr.c
@@ -760,6 +760,55 @@ static void process_e820_entries(unsigned long minimum,
}
}
+/*
+ * If KHO is active, only process its scratch areas to ensure we are not
+ * stepping onto preserved memory.
+ */
+#ifdef CONFIG_KEXEC_HANDOVER
+static bool process_kho_entries(unsigned long minimum, unsigned long image_size)
+{
+ struct kho_scratch *kho_scratch;
+ struct setup_data *ptr;
+ int i, nr_areas = 0;
+
+ ptr = (struct setup_data *)(unsigned long)boot_params_ptr->hdr.setup_data;
+ while (ptr) {
+ if (ptr->type == SETUP_KEXEC_KHO) {
+ struct kho_data *kho = (struct kho_data *)ptr->data;
+
+ kho_scratch = (void *)kho->scratch_addr;
+ nr_areas = kho->scratch_size / sizeof(*kho_scratch);
+
+ break;
+ }
+
+ ptr = (struct setup_data *)(unsigned long)ptr->next;
+ }
+
+ if (!nr_areas)
+ return false;
+
+ for (i = 0; i < nr_areas; i++) {
+ struct kho_scratch *area = &kho_scratch[i];
+ struct mem_vector region = {
+ .start = area->addr,
+ .size = area->size,
+ };
+
+ if (process_mem_region(®ion, minimum, image_size))
+ break;
+ }
+
+ return true;
+}
+#else
+static inline bool process_kho_entries(unsigned long minimum,
+ unsigned long image_size)
+{
+ return false;
+}
+#endif
+
static unsigned long find_random_phys_addr(unsigned long minimum,
unsigned long image_size)
{
@@ -775,7 +824,8 @@ static unsigned long find_random_phys_addr(unsigned long minimum,
return 0;
}
- if (!process_efi_entries(minimum, image_size))
+ if (!process_kho_entries(minimum, image_size) &&
+ !process_efi_entries(minimum, image_size))
process_e820_entries(minimum, image_size);
phys_addr = slots_fetch_random();
diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h
index 85f4fde3515c..70e045321d4b 100644
--- a/arch/x86/include/asm/setup.h
+++ b/arch/x86/include/asm/setup.h
@@ -66,6 +66,10 @@ extern void x86_ce4100_early_setup(void);
static inline void x86_ce4100_early_setup(void) { }
#endif
+#ifdef CONFIG_KEXEC_HANDOVER
+#include <linux/kexec_handover.h>
+#endif
+
#ifndef _SETUP
#include <asm/espfix.h>
diff --git a/arch/x86/include/uapi/asm/setup_data.h b/arch/x86/include/uapi/asm/setup_data.h
index b111b0c18544..c258c37768ee 100644
--- a/arch/x86/include/uapi/asm/setup_data.h
+++ b/arch/x86/include/uapi/asm/setup_data.h
@@ -13,7 +13,8 @@
#define SETUP_CC_BLOB 7
#define SETUP_IMA 8
#define SETUP_RNG_SEED 9
-#define SETUP_ENUM_MAX SETUP_RNG_SEED
+#define SETUP_KEXEC_KHO 10
+#define SETUP_ENUM_MAX SETUP_KEXEC_KHO
#define SETUP_INDIRECT (1<<31)
#define SETUP_TYPE_MAX (SETUP_ENUM_MAX | SETUP_INDIRECT)
@@ -78,6 +79,16 @@ struct ima_setup_data {
__u64 size;
} __attribute__((packed));
+/*
+ * Locations of kexec handover metadata
+ */
+struct kho_data {
+ __u64 dt_addr;
+ __u64 dt_size;
+ __u64 scratch_addr;
+ __u64 scratch_size;
+} __attribute__((packed));
+
#endif /* __ASSEMBLY__ */
#endif /* _UAPI_ASM_X86_SETUP_DATA_H */
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index 82b96ed9890a..0b81cd70b02a 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -1329,6 +1329,24 @@ void __init e820__memblock_setup(void)
memblock_add(entry->addr, entry->size);
}
+ /*
+ * At this point with KHO we only allocate from scratch memory.
+ * At the same time, we configure memblock to only allow
+ * allocations from memory below ISA_END_ADDRESS which is not
+ * a natural scratch region, because Linux ignores memory below
+ * ISA_END_ADDRESS at runtime. Beside very few (if any) early
+ * allocations, we must allocate real-mode trapoline below
+ * ISA_END_ADDRESS.
+ *
+ * To make sure that we can actually perform allocations during
+ * this phase, let's mark memory below ISA_END_ADDRESS as scratch
+ * so we can allocate from there in a scratch-only world.
+ *
+ * After real mode trampoline is allocated, we clear scratch
+ * marking from the memory below ISA_END_ADDRESS
+ */
+ memblock_mark_kho_scratch(0, ISA_END_ADDRESS);
+
/* Throw away partial pages: */
memblock_trim_memory(PAGE_SIZE);
diff --git a/arch/x86/kernel/kexec-bzimage64.c b/arch/x86/kernel/kexec-bzimage64.c
index 68530fad05f7..09d6a068b14c 100644
--- a/arch/x86/kernel/kexec-bzimage64.c
+++ b/arch/x86/kernel/kexec-bzimage64.c
@@ -233,6 +233,31 @@ setup_ima_state(const struct kimage *image, struct boot_params *params,
#endif /* CONFIG_IMA_KEXEC */
}
+static void setup_kho(const struct kimage *image, struct boot_params *params,
+ unsigned long params_load_addr,
+ unsigned int setup_data_offset)
+{
+#ifdef CONFIG_KEXEC_HANDOVER
+ struct setup_data *sd = (void *)params + setup_data_offset;
+ struct kho_data *kho = (void *)sd + sizeof(*sd);
+
+ sd->type = SETUP_KEXEC_KHO;
+ sd->len = sizeof(struct kho_data);
+
+ /* Only add if we have all KHO images in place */
+ if (!image->kho.fdt || !image->kho.scratch)
+ return;
+
+ /* Add setup data */
+ kho->dt_addr = image->kho.fdt->mem;
+ kho->dt_size = image->kho.fdt->memsz;
+ kho->scratch_addr = image->kho.scratch->mem;
+ kho->scratch_size = image->kho.scratch->bufsz;
+ sd->next = params->hdr.setup_data;
+ params->hdr.setup_data = params_load_addr + setup_data_offset;
+#endif /* CONFIG_KEXEC_HANDOVER */
+}
+
static int
setup_boot_parameters(struct kimage *image, struct boot_params *params,
unsigned long params_load_addr,
@@ -312,6 +337,13 @@ setup_boot_parameters(struct kimage *image, struct boot_params *params,
sizeof(struct ima_setup_data);
}
+ if (IS_ENABLED(CONFIG_KEXEC_HANDOVER)) {
+ /* Setup space to store preservation metadata */
+ setup_kho(image, params, params_load_addr, setup_data_offset);
+ setup_data_offset += sizeof(struct setup_data) +
+ sizeof(struct kho_data);
+ }
+
/* Setup RNG seed */
setup_rng_seed(params, params_load_addr, setup_data_offset);
@@ -479,6 +511,10 @@ static void *bzImage64_load(struct kimage *image, char *kernel,
kbuf.bufsz += sizeof(struct setup_data) +
sizeof(struct ima_setup_data);
+ if (IS_ENABLED(CONFIG_KEXEC_HANDOVER))
+ kbuf.bufsz += sizeof(struct setup_data) +
+ sizeof(struct kho_data);
+
params = kzalloc(kbuf.bufsz, GFP_KERNEL);
if (!params)
return ERR_PTR(-ENOMEM);
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index ead370570eb2..e2c54181405b 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -385,6 +385,28 @@ int __init ima_get_kexec_buffer(void **addr, size_t *size)
}
#endif
+static void __init add_kho(u64 phys_addr, u32 data_len)
+{
+#ifdef CONFIG_KEXEC_HANDOVER
+ struct kho_data *kho;
+ u64 addr = phys_addr + sizeof(struct setup_data);
+ u64 size = data_len - sizeof(struct setup_data);
+
+ kho = early_memremap(addr, size);
+ if (!kho) {
+ pr_warn("setup: failed to memremap kho data (0x%llx, 0x%llx)\n",
+ addr, size);
+ return;
+ }
+
+ kho_populate(kho->dt_addr, kho->scratch_addr, kho->scratch_size);
+
+ early_memunmap(kho, size);
+#else
+ pr_warn("Passed KHO data, but CONFIG_KEXEC_HANDOVER not set. Ignoring.\n");
+#endif
+}
+
static void __init parse_setup_data(void)
{
struct setup_data *data;
@@ -413,6 +435,9 @@ static void __init parse_setup_data(void)
case SETUP_IMA:
add_early_ima_buffer(pa_data);
break;
+ case SETUP_KEXEC_KHO:
+ add_kho(pa_data, data_len);
+ break;
case SETUP_RNG_SEED:
data = early_memremap(pa_data, data_len);
add_bootloader_randomness(data->data, data->len);
diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
index f9bc444a3064..9b9f4534086d 100644
--- a/arch/x86/realmode/init.c
+++ b/arch/x86/realmode/init.c
@@ -65,6 +65,8 @@ void __init reserve_real_mode(void)
* setup_arch().
*/
memblock_reserve(0, SZ_1M);
+
+ memblock_clear_kho_scratch(0, SZ_1M);
}
static void __init sme_sev_setup_real_mode(struct trampoline_header *th)
diff --git a/include/linux/kexec_handover.h b/include/linux/kexec_handover.h
index d52a7b500f4c..2dd51a77d56c 100644
--- a/include/linux/kexec_handover.h
+++ b/include/linux/kexec_handover.h
@@ -3,9 +3,6 @@
#define LINUX_KEXEC_HANDOVER_H
#include <linux/types.h>
-#include <linux/hashtable.h>
-#include <linux/notifier.h>
-#include <linux/mm_types.h>
struct kho_scratch {
phys_addr_t addr;
@@ -18,6 +15,15 @@ enum kho_event {
KEXEC_KHO_UNFREEZE = 1,
};
+#ifdef _SETUP
+struct notifier_block;
+struct kho_node;
+struct folio;
+#else
+#include <linux/notifier.h>
+#include <linux/hashtable.h>
+#include <linux/mm_types.h>
+
#define KHO_HASHTABLE_BITS 3
#define KHO_NODE_INIT \
{ \
@@ -35,6 +41,7 @@ struct kho_node {
struct list_head list;
bool visited;
};
+#endif /* _SETUP */
struct kho_in_node {
int offset;
--
2.48.1.711.g2feabab25a-goog
next prev parent reply other threads:[~2025-03-20 1:56 UTC|newest]
Thread overview: 103+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-03-20 1:55 [PATCH v5 00/16] kexec: introduce Kexec HandOver (KHO) Changyuan Lyu
2025-03-20 1:55 ` [PATCH v5 01/16] kexec: define functions to map and unmap segments Changyuan Lyu
2025-03-20 1:55 ` [PATCH v5 02/16] mm/mm_init: rename init_reserved_page to init_deferred_page Changyuan Lyu
2025-03-20 7:10 ` Krzysztof Kozlowski
2025-03-20 17:15 ` Changyuan Lyu
2025-03-20 1:55 ` [PATCH v5 03/16] memblock: add MEMBLOCK_RSRV_KERN flag Changyuan Lyu
2025-03-20 1:55 ` [PATCH v5 04/16] memblock: Add support for scratch memory Changyuan Lyu
2025-03-20 1:55 ` [PATCH v5 05/16] memblock: introduce memmap_init_kho_scratch() Changyuan Lyu
2025-03-20 1:55 ` [PATCH v5 06/16] hashtable: add macro HASHTABLE_INIT Changyuan Lyu
2025-03-20 1:55 ` [PATCH v5 07/16] kexec: add Kexec HandOver (KHO) generation helpers Changyuan Lyu
2025-03-21 13:34 ` Jason Gunthorpe
2025-03-23 19:02 ` Changyuan Lyu
2025-03-24 16:28 ` Jason Gunthorpe
2025-03-25 0:21 ` Changyuan Lyu
2025-03-25 2:20 ` Jason Gunthorpe
2025-03-24 18:40 ` Frank van der Linden
2025-03-25 19:19 ` Mike Rapoport
2025-03-25 21:56 ` Frank van der Linden
2025-03-26 11:59 ` Mike Rapoport
2025-03-26 16:25 ` Frank van der Linden
2025-03-20 1:55 ` [PATCH v5 08/16] kexec: add KHO parsing support Changyuan Lyu
2025-03-20 1:55 ` [PATCH v5 09/16] kexec: enable KHO support for memory preservation Changyuan Lyu
2025-03-21 13:46 ` Jason Gunthorpe
2025-03-22 19:12 ` Mike Rapoport
2025-03-23 18:55 ` Jason Gunthorpe
2025-03-24 18:18 ` Mike Rapoport
2025-03-24 20:07 ` Jason Gunthorpe
2025-03-26 12:07 ` Mike Rapoport
2025-03-23 19:07 ` Changyuan Lyu
2025-03-25 2:04 ` Jason Gunthorpe
2025-03-27 10:03 ` Pratyush Yadav
2025-03-27 13:31 ` Jason Gunthorpe
2025-03-27 17:28 ` Pratyush Yadav
2025-03-28 12:53 ` Jason Gunthorpe
2025-04-02 16:44 ` Changyuan Lyu
2025-04-02 16:47 ` Pratyush Yadav
2025-04-02 18:37 ` Pasha Tatashin
2025-04-02 18:49 ` Pratyush Yadav
2025-04-02 19:16 ` Pratyush Yadav
2025-04-03 11:42 ` Jason Gunthorpe
2025-04-03 13:58 ` Mike Rapoport
2025-04-03 14:24 ` Jason Gunthorpe
2025-04-04 9:54 ` Mike Rapoport
2025-04-04 12:47 ` Jason Gunthorpe
2025-04-04 13:53 ` Mike Rapoport
2025-04-04 14:30 ` Jason Gunthorpe
2025-04-04 16:24 ` Pratyush Yadav
2025-04-04 17:31 ` Jason Gunthorpe
2025-04-06 16:13 ` Mike Rapoport
2025-04-06 16:11 ` Mike Rapoport
2025-04-07 14:16 ` Jason Gunthorpe
2025-04-07 16:31 ` Mike Rapoport
2025-04-07 17:03 ` Jason Gunthorpe
2025-04-09 9:06 ` Mike Rapoport
2025-04-09 12:56 ` Jason Gunthorpe
2025-04-09 13:58 ` Mike Rapoport
2025-04-09 15:37 ` Jason Gunthorpe
2025-04-09 16:19 ` Mike Rapoport
2025-04-09 16:28 ` Jason Gunthorpe
2025-04-10 16:51 ` Matthew Wilcox
2025-04-10 17:31 ` Jason Gunthorpe
2025-04-09 16:28 ` Mike Rapoport
2025-04-09 18:32 ` Jason Gunthorpe
2025-04-04 16:15 ` Pratyush Yadav
2025-04-06 16:34 ` Mike Rapoport
2025-04-07 14:23 ` Jason Gunthorpe
2025-04-03 13:57 ` Mike Rapoport
2025-04-11 4:02 ` Changyuan Lyu
2025-04-03 15:50 ` Pratyush Yadav
2025-04-03 16:10 ` Jason Gunthorpe
2025-04-03 17:37 ` Pratyush Yadav
2025-04-04 12:54 ` Jason Gunthorpe
2025-04-04 15:39 ` Pratyush Yadav
2025-04-09 8:35 ` Mike Rapoport
2025-03-20 1:55 ` [PATCH v5 10/16] kexec: add KHO support to kexec file loads Changyuan Lyu
2025-03-21 13:48 ` Jason Gunthorpe
2025-03-20 1:55 ` [PATCH v5 11/16] kexec: add config option for KHO Changyuan Lyu
2025-03-20 7:10 ` Krzysztof Kozlowski
2025-03-20 17:18 ` Changyuan Lyu
2025-03-24 4:18 ` Dave Young
2025-03-24 19:26 ` Pasha Tatashin
2025-03-25 1:24 ` Dave Young
2025-03-25 3:07 ` Dave Young
2025-03-25 6:57 ` Baoquan He
2025-03-25 8:36 ` Dave Young
2025-03-26 9:17 ` Dave Young
2025-03-26 11:28 ` Mike Rapoport
2025-03-26 12:09 ` Dave Young
2025-03-25 14:04 ` Pasha Tatashin
2025-03-20 1:55 ` [PATCH v5 12/16] arm64: add KHO support Changyuan Lyu
2025-03-20 7:13 ` Krzysztof Kozlowski
2025-03-20 8:30 ` Krzysztof Kozlowski
2025-03-20 23:29 ` Changyuan Lyu
2025-04-11 3:47 ` Changyuan Lyu
2025-03-20 1:55 ` [PATCH v5 13/16] x86/setup: use memblock_reserve_kern for memory used by kernel Changyuan Lyu
2025-03-20 1:55 ` Changyuan Lyu [this message]
2025-03-20 1:55 ` [PATCH v5 15/16] memblock: add KHO support for reserve_mem Changyuan Lyu
2025-03-20 1:55 ` [PATCH v5 16/16] Documentation: add documentation for KHO Changyuan Lyu
2025-03-20 14:45 ` Jonathan Corbet
2025-03-21 6:33 ` Changyuan Lyu
2025-03-21 13:46 ` Jonathan Corbet
2025-03-25 14:19 ` [PATCH v5 00/16] kexec: introduce Kexec HandOver (KHO) Pasha Tatashin
2025-03-25 15:03 ` Mike Rapoport
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250320015551.2157511-15-changyuanl@google.com \
--to=changyuanl@google.com \
--cc=akpm@linux-foundation.org \
--cc=anthony.yznaga@oracle.com \
--cc=arnd@arndb.de \
--cc=ashish.kalra@amd.com \
--cc=benh@kernel.crashing.org \
--cc=bp@alien8.de \
--cc=catalin.marinas@arm.com \
--cc=corbet@lwn.net \
--cc=dave.hansen@linux.intel.com \
--cc=devicetree@vger.kernel.org \
--cc=dwmw2@infradead.org \
--cc=ebiederm@xmission.com \
--cc=graf@amazon.com \
--cc=hpa@zytor.com \
--cc=jgowans@amazon.com \
--cc=kexec@lists.infradead.org \
--cc=krzk@kernel.org \
--cc=linux-arm-kernel@lists.infradead.org \
--cc=linux-doc@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=luto@kernel.org \
--cc=mark.rutland@arm.com \
--cc=mingo@redhat.com \
--cc=pasha.tatashin@soleen.com \
--cc=pbonzini@redhat.com \
--cc=peterz@infradead.org \
--cc=ptyadav@amazon.de \
--cc=robh+dt@kernel.org \
--cc=robh@kernel.org \
--cc=rostedt@goodmis.org \
--cc=rppt@kernel.org \
--cc=saravanak@google.com \
--cc=skinsburskii@linux.microsoft.com \
--cc=tglx@linutronix.de \
--cc=thomas.lendacky@amd.com \
--cc=usama.arif@bytedance.com \
--cc=will@kernel.org \
--cc=x86@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox