From: Changyuan Lyu <changyuanl@google.com>
To: linux-kernel@vger.kernel.org
Cc: graf@amazon.com, akpm@linux-foundation.org, luto@kernel.org,
anthony.yznaga@oracle.com, arnd@arndb.de, ashish.kalra@amd.com,
benh@kernel.crashing.org, bp@alien8.de, catalin.marinas@arm.com,
dave.hansen@linux.intel.com, dwmw2@infradead.org,
ebiederm@xmission.com, mingo@redhat.com, jgowans@amazon.com,
corbet@lwn.net, krzk@kernel.org, rppt@kernel.org,
mark.rutland@arm.com, pbonzini@redhat.com,
pasha.tatashin@soleen.com, hpa@zytor.com, peterz@infradead.org,
ptyadav@amazon.de, robh+dt@kernel.org, robh@kernel.org,
saravanak@google.com, skinsburskii@linux.microsoft.com,
rostedt@goodmis.org, tglx@linutronix.de,
thomas.lendacky@amd.com, usama.arif@bytedance.com,
will@kernel.org, devicetree@vger.kernel.org,
kexec@lists.infradead.org, linux-arm-kernel@lists.infradead.org,
linux-doc@vger.kernel.org, linux-mm@kvack.org, x86@kernel.org,
Jason Gunthorpe <jgg@nvidia.com>,
Changyuan Lyu <changyuanl@google.com>
Subject: [PATCH v5 09/16] kexec: enable KHO support for memory preservation
Date: Wed, 19 Mar 2025 18:55:44 -0700 [thread overview]
Message-ID: <20250320015551.2157511-10-changyuanl@google.com> (raw)
In-Reply-To: <20250320015551.2157511-1-changyuanl@google.com>
From: "Mike Rapoport (Microsoft)" <rppt@kernel.org>
Introduce APIs allowing KHO users to preserve memory across kexec and
get access to that memory after boot of the kexeced kernel
kho_preserve_folio() - record a folio to be preserved over kexec
kho_restore_folio() - recreates the folio from the preserved memory
kho_preserve_phys() - record physically contiguous range to be
preserved over kexec.
kho_restore_phys() - recreates order-0 pages corresponding to the
preserved physical range
The memory preservations are tracked by two levels of xarrays to manage
chunks of per-order 512 byte bitmaps. For instance the entire 1G order
of a 1TB x86 system would fit inside a single 512 byte bitmap. For
order 0 allocations each bitmap will cover 16M of address space. Thus,
for 16G of memory at most 512K of bitmap memory will be needed for order 0.
At serialization time all bitmaps are recorded in a linked list of pages
for the next kernel to process and the physical address of the list is
recorded in KHO FDT.
The next kernel then processes that list, reserves the memory ranges and
later, when a user requests a folio or a physical range, KHO restores
corresponding memory map entries.
Suggested-by: Jason Gunthorpe <jgg@nvidia.com>
Signed-off-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
Co-developed-by: Changyuan Lyu <changyuanl@google.com>
Signed-off-by: Changyuan Lyu <changyuanl@google.com>
---
include/linux/kexec_handover.h | 38 +++
kernel/kexec_handover.c | 486 ++++++++++++++++++++++++++++++++-
2 files changed, 522 insertions(+), 2 deletions(-)
diff --git a/include/linux/kexec_handover.h b/include/linux/kexec_handover.h
index c665ff6cd728..d52a7b500f4c 100644
--- a/include/linux/kexec_handover.h
+++ b/include/linux/kexec_handover.h
@@ -5,6 +5,7 @@
#include <linux/types.h>
#include <linux/hashtable.h>
#include <linux/notifier.h>
+#include <linux/mm_types.h>
struct kho_scratch {
phys_addr_t addr;
@@ -54,6 +55,13 @@ int kho_add_string_prop(struct kho_node *node, const char *key,
int register_kho_notifier(struct notifier_block *nb);
int unregister_kho_notifier(struct notifier_block *nb);
+int kho_preserve_folio(struct folio *folio);
+int kho_unpreserve_folio(struct folio *folio);
+int kho_preserve_phys(phys_addr_t phys, size_t size);
+int kho_unpreserve_phys(phys_addr_t phys, size_t size);
+struct folio *kho_restore_folio(phys_addr_t phys);
+void *kho_restore_phys(phys_addr_t phys, size_t size);
+
void kho_memory_init(void);
void kho_populate(phys_addr_t handover_fdt_phys, phys_addr_t scratch_phys,
@@ -118,6 +126,36 @@ static inline int unregister_kho_notifier(struct notifier_block *nb)
return -EOPNOTSUPP;
}
+static inline int kho_preserve_folio(struct folio *folio)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int kho_unpreserve_folio(struct folio *folio)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int kho_preserve_phys(phys_addr_t phys, size_t size)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int kho_unpreserve_phys(phys_addr_t phys, size_t size)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline struct folio *kho_restore_folio(phys_addr_t phys)
+{
+ return NULL;
+}
+
+static inline void *kho_restore_phys(phys_addr_t phys, size_t size)
+{
+ return NULL;
+}
+
static inline void kho_memory_init(void)
{
}
diff --git a/kernel/kexec_handover.c b/kernel/kexec_handover.c
index 6ebad2f023f9..592563c21369 100644
--- a/kernel/kexec_handover.c
+++ b/kernel/kexec_handover.c
@@ -62,6 +62,13 @@ struct kho_out {
struct rw_semaphore tree_lock;
struct kho_node root;
+ /**
+ * Physical address of the first struct khoser_mem_chunk containing
+ * serialized data from struct kho_mem_track.
+ */
+ phys_addr_t first_chunk_phys;
+ struct kho_node preserved_memory;
+
void *fdt;
u64 fdt_max;
};
@@ -70,6 +77,7 @@ static struct kho_out kho_out = {
.chain_head = BLOCKING_NOTIFIER_INIT(kho_out.chain_head),
.tree_lock = __RWSEM_INITIALIZER(kho_out.tree_lock),
.root = KHO_NODE_INIT,
+ .preserved_memory = KHO_NODE_INIT,
.fdt_max = 10 * SZ_1M,
};
@@ -237,6 +245,461 @@ int kho_node_check_compatible(const struct kho_in_node *node,
}
EXPORT_SYMBOL_GPL(kho_node_check_compatible);
+/*
+ * Keep track of memory that is to be preserved across KHO.
+ *
+ * The serializing side uses two levels of xarrays to manage chunks of per-order
+ * 512 byte bitmaps. For instance the entire 1G order of a 1TB system would fit
+ * inside a single 512 byte bitmap. For order 0 allocations each bitmap will
+ * cover 16M of address space. Thus, for 16G of memory at most 512K
+ * of bitmap memory will be needed for order 0.
+ *
+ * This approach is fully incremental, as the serialization progresses folios
+ * can continue be aggregated to the tracker. The final step, immediately prior
+ * to kexec would serialize the xarray information into a linked list for the
+ * successor kernel to parse.
+ */
+
+#define PRESERVE_BITS (512 * 8)
+
+struct kho_mem_phys_bits {
+ DECLARE_BITMAP(preserve, PRESERVE_BITS);
+};
+
+struct kho_mem_phys {
+ /*
+ * Points to kho_mem_phys_bits, a sparse bitmap array. Each bit is sized
+ * to order.
+ */
+ struct xarray phys_bits;
+};
+
+struct kho_mem_track {
+ /* Points to kho_mem_phys, each order gets its own bitmap tree */
+ struct xarray orders;
+};
+
+static struct kho_mem_track kho_mem_track;
+
+static void *xa_load_or_alloc(struct xarray *xa, unsigned long index, size_t sz)
+{
+ void *elm, *res;
+
+ elm = xa_load(xa, index);
+ if (elm)
+ return elm;
+
+ elm = kzalloc(sz, GFP_KERNEL);
+ if (!elm)
+ return ERR_PTR(-ENOMEM);
+
+ res = xa_cmpxchg(xa, index, NULL, elm, GFP_KERNEL);
+ if (xa_is_err(res))
+ res = ERR_PTR(xa_err(res));
+
+ if (res) {
+ kfree(elm);
+ return res;
+ }
+
+ return elm;
+}
+
+static void __kho_unpreserve(struct kho_mem_track *tracker, unsigned long pfn,
+ unsigned int order)
+{
+ struct kho_mem_phys_bits *bits;
+ struct kho_mem_phys *physxa;
+ unsigned long pfn_hi = pfn >> order;
+
+ physxa = xa_load(&tracker->orders, order);
+ if (!physxa)
+ return;
+
+ bits = xa_load(&physxa->phys_bits, pfn_hi / PRESERVE_BITS);
+ if (!bits)
+ return;
+
+ clear_bit(pfn_hi % PRESERVE_BITS, bits->preserve);
+}
+
+static int __kho_preserve(struct kho_mem_track *tracker, unsigned long pfn,
+ unsigned int order)
+{
+ struct kho_mem_phys_bits *bits;
+ struct kho_mem_phys *physxa;
+ unsigned long pfn_hi = pfn >> order;
+
+ might_sleep();
+
+ physxa = xa_load_or_alloc(&tracker->orders, order, sizeof(*physxa));
+ if (IS_ERR(physxa))
+ return PTR_ERR(physxa);
+
+ bits = xa_load_or_alloc(&physxa->phys_bits, pfn_hi / PRESERVE_BITS,
+ sizeof(*bits));
+ if (IS_ERR(bits))
+ return PTR_ERR(bits);
+
+ set_bit(pfn_hi % PRESERVE_BITS, bits->preserve);
+
+ return 0;
+}
+
+/**
+ * kho_preserve_folio - preserve a folio across KHO.
+ * @folio: folio to preserve
+ *
+ * Records that the entire folio is preserved across KHO. The order
+ * will be preserved as well.
+ *
+ * Return: 0 on success, error code on failure
+ */
+int kho_preserve_folio(struct folio *folio)
+{
+ unsigned long pfn = folio_pfn(folio);
+ unsigned int order = folio_order(folio);
+ int err;
+
+ if (!kho_enable)
+ return -EOPNOTSUPP;
+
+ down_read(&kho_out.tree_lock);
+ if (kho_out.fdt) {
+ err = -EBUSY;
+ goto unlock;
+ }
+
+ err = __kho_preserve(&kho_mem_track, pfn, order);
+
+unlock:
+ up_read(&kho_out.tree_lock);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(kho_preserve_folio);
+
+/**
+ * kho_unpreserve_folio - unpreserve a folio
+ * @folio: folio to unpreserve
+ *
+ * Remove the record of a folio previously preserved by kho_preserve_folio().
+ *
+ * Return: 0 on success, error code on failure
+ */
+int kho_unpreserve_folio(struct folio *folio)
+{
+ unsigned long pfn = folio_pfn(folio);
+ unsigned int order = folio_order(folio);
+ int err = 0;
+
+ down_read(&kho_out.tree_lock);
+ if (kho_out.fdt) {
+ err = -EBUSY;
+ goto unlock;
+ }
+
+ __kho_unpreserve(&kho_mem_track, pfn, order);
+
+unlock:
+ up_read(&kho_out.tree_lock);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(kho_unpreserve_folio);
+
+/**
+ * kho_preserve_phys - preserve a physically contiguous range across KHO.
+ * @phys: physical address of the range
+ * @size: size of the range
+ *
+ * Records that the entire range from @phys to @phys + @size is preserved
+ * across KHO.
+ *
+ * Return: 0 on success, error code on failure
+ */
+int kho_preserve_phys(phys_addr_t phys, size_t size)
+{
+ unsigned long pfn = PHYS_PFN(phys), end_pfn = PHYS_PFN(phys + size);
+ unsigned int order = ilog2(end_pfn - pfn);
+ unsigned long failed_pfn;
+ int err = 0;
+
+ if (!kho_enable)
+ return -EOPNOTSUPP;
+
+ down_read(&kho_out.tree_lock);
+ if (kho_out.fdt) {
+ err = -EBUSY;
+ goto unlock;
+ }
+
+ for (; pfn < end_pfn;
+ pfn += (1 << order), order = ilog2(end_pfn - pfn)) {
+ err = __kho_preserve(&kho_mem_track, pfn, order);
+ if (err) {
+ failed_pfn = pfn;
+ break;
+ }
+ }
+
+ if (err)
+ for (pfn = PHYS_PFN(phys); pfn < failed_pfn;
+ pfn += (1 << order), order = ilog2(end_pfn - pfn))
+ __kho_unpreserve(&kho_mem_track, pfn, order);
+
+unlock:
+ up_read(&kho_out.tree_lock);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(kho_preserve_phys);
+
+/**
+ * kho_unpreserve_phys - unpreserve a physically contiguous range
+ * @phys: physical address of the range
+ * @size: size of the range
+ *
+ * Remove the record of a range previously preserved by kho_preserve_phys().
+ *
+ * Return: 0 on success, error code on failure
+ */
+int kho_unpreserve_phys(phys_addr_t phys, size_t size)
+{
+ unsigned long pfn = PHYS_PFN(phys), end_pfn = PHYS_PFN(phys + size);
+ unsigned int order = ilog2(end_pfn - pfn);
+ int err = 0;
+
+ down_read(&kho_out.tree_lock);
+ if (kho_out.fdt) {
+ err = -EBUSY;
+ goto unlock;
+ }
+
+ for (; pfn < end_pfn; pfn += (1 << order), order = ilog2(end_pfn - pfn))
+ __kho_unpreserve(&kho_mem_track, pfn, order);
+
+unlock:
+ up_read(&kho_out.tree_lock);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(kho_unpreserve_phys);
+
+/* almost as free_reserved_page(), just don't free the page */
+static void kho_restore_page(struct page *page)
+{
+ ClearPageReserved(page);
+ init_page_count(page);
+ adjust_managed_page_count(page, 1);
+}
+
+struct folio *kho_restore_folio(phys_addr_t phys)
+{
+ struct page *page = pfn_to_online_page(PHYS_PFN(phys));
+ unsigned long order = page->private;
+
+ if (!page)
+ return NULL;
+
+ order = page->private;
+ if (order)
+ prep_compound_page(page, order);
+ else
+ kho_restore_page(page);
+
+ return page_folio(page);
+}
+EXPORT_SYMBOL_GPL(kho_restore_folio);
+
+void *kho_restore_phys(phys_addr_t phys, size_t size)
+{
+ unsigned long start_pfn, end_pfn, pfn;
+ void *va = __va(phys);
+
+ start_pfn = PFN_DOWN(phys);
+ end_pfn = PFN_UP(phys + size);
+
+ for (pfn = start_pfn; pfn < end_pfn; pfn++) {
+ struct page *page = pfn_to_online_page(pfn);
+
+ if (!page)
+ return NULL;
+ kho_restore_page(page);
+ }
+
+ return va;
+}
+EXPORT_SYMBOL_GPL(kho_restore_phys);
+
+#define KHOSER_PTR(type) \
+ union { \
+ phys_addr_t phys; \
+ type ptr; \
+ }
+#define KHOSER_STORE_PTR(dest, val) \
+ ({ \
+ (dest).phys = virt_to_phys(val); \
+ typecheck(typeof((dest).ptr), val); \
+ })
+#define KHOSER_LOAD_PTR(src) \
+ ((src).phys ? (typeof((src).ptr))(phys_to_virt((src).phys)) : NULL)
+
+struct khoser_mem_bitmap_ptr {
+ phys_addr_t phys_start;
+ KHOSER_PTR(struct kho_mem_phys_bits *) bitmap;
+};
+
+struct khoser_mem_chunk;
+
+struct khoser_mem_chunk_hdr {
+ KHOSER_PTR(struct khoser_mem_chunk *) next;
+ unsigned int order;
+ unsigned int num_elms;
+};
+
+#define KHOSER_BITMAP_SIZE \
+ ((PAGE_SIZE - sizeof(struct khoser_mem_chunk_hdr)) / \
+ sizeof(struct khoser_mem_bitmap_ptr))
+
+struct khoser_mem_chunk {
+ struct khoser_mem_chunk_hdr hdr;
+ struct khoser_mem_bitmap_ptr bitmaps[KHOSER_BITMAP_SIZE];
+};
+static_assert(sizeof(struct khoser_mem_chunk) == PAGE_SIZE);
+
+static struct khoser_mem_chunk *new_chunk(struct khoser_mem_chunk *cur_chunk,
+ unsigned long order)
+{
+ struct khoser_mem_chunk *chunk;
+
+ chunk = (struct khoser_mem_chunk *)get_zeroed_page(GFP_KERNEL);
+ if (!chunk)
+ return NULL;
+ chunk->hdr.order = order;
+ if (cur_chunk)
+ KHOSER_STORE_PTR(cur_chunk->hdr.next, chunk);
+ return chunk;
+}
+
+static void kho_mem_ser_free(struct khoser_mem_chunk *first_chunk)
+{
+ struct khoser_mem_chunk *chunk = first_chunk;
+
+ while (chunk) {
+ unsigned long chunk_page = (unsigned long)chunk;
+
+ chunk = KHOSER_LOAD_PTR(chunk->hdr.next);
+ free_page(chunk_page);
+ }
+}
+
+/*
+ * Record all the bitmaps in a linked list of pages for the next kernel to
+ * process. Each chunk holds bitmaps of the same order and each block of bitmaps
+ * starts at a given physical address. This allows the bitmaps to be sparse. The
+ * xarray is used to store them in a tree while building up the data structure,
+ * but the KHO successor kernel only needs to process them once in order.
+ *
+ * All of this memory is normal kmalloc() memory and is not marked for
+ * preservation. The successor kernel will remain isolated to the scratch space
+ * until it completes processing this list. Once processed all the memory
+ * storing these ranges will be marked as free.
+ */
+static struct khoser_mem_chunk *kho_mem_serialize(void)
+{
+ struct kho_mem_track *tracker = &kho_mem_track;
+ struct khoser_mem_chunk *first_chunk = NULL;
+ struct khoser_mem_chunk *chunk = NULL;
+ struct kho_mem_phys *physxa;
+ unsigned long order;
+
+ xa_for_each(&tracker->orders, order, physxa) {
+ struct kho_mem_phys_bits *bits;
+ unsigned long phys;
+
+ chunk = new_chunk(chunk, order);
+ if (!chunk)
+ goto err_free;
+
+ if (!first_chunk)
+ first_chunk = chunk;
+
+ xa_for_each(&physxa->phys_bits, phys, bits) {
+ struct khoser_mem_bitmap_ptr *elm;
+
+ if (chunk->hdr.num_elms == ARRAY_SIZE(chunk->bitmaps)) {
+ chunk = new_chunk(chunk, order);
+ if (!chunk)
+ goto err_free;
+ }
+
+ elm = &chunk->bitmaps[chunk->hdr.num_elms];
+ chunk->hdr.num_elms++;
+ elm->phys_start = (phys * PRESERVE_BITS)
+ << (order + PAGE_SHIFT);
+ KHOSER_STORE_PTR(elm->bitmap, bits);
+ }
+ }
+
+ return first_chunk;
+
+err_free:
+ kho_mem_ser_free(first_chunk);
+ return ERR_PTR(-ENOMEM);
+}
+
+static void deserialize_bitmap(unsigned int order,
+ struct khoser_mem_bitmap_ptr *elm)
+{
+ struct kho_mem_phys_bits *bitmap = KHOSER_LOAD_PTR(elm->bitmap);
+ unsigned long bit;
+
+ for_each_set_bit(bit, bitmap->preserve, PRESERVE_BITS) {
+ int sz = 1 << (order + PAGE_SHIFT);
+ phys_addr_t phys =
+ elm->phys_start + (bit << (order + PAGE_SHIFT));
+ struct page *page = phys_to_page(phys);
+
+ memblock_reserve(phys, sz);
+ memblock_reserved_mark_noinit(phys, sz);
+ page->private = order;
+ }
+}
+
+static void __init kho_mem_deserialize(void)
+{
+ struct khoser_mem_chunk *chunk;
+ struct kho_in_node preserved_mem;
+ const phys_addr_t *mem;
+ int err;
+ u32 len;
+
+ err = kho_get_node(NULL, "preserved-memory", &preserved_mem);
+ if (err) {
+ pr_err("no preserved-memory node: %d\n", err);
+ return;
+ }
+
+ mem = kho_get_prop(&preserved_mem, "metadata", &len);
+ if (!mem || len != sizeof(*mem)) {
+ pr_err("failed to get preserved memory bitmaps\n");
+ return;
+ }
+
+ chunk = *mem ? phys_to_virt(*mem) : NULL;
+ while (chunk) {
+ unsigned int i;
+
+ memblock_reserve(virt_to_phys(chunk), sizeof(*chunk));
+
+ for (i = 0; i != chunk->hdr.num_elms; i++)
+ deserialize_bitmap(chunk->hdr.order,
+ &chunk->bitmaps[i]);
+ chunk = KHOSER_LOAD_PTR(chunk->hdr.next);
+ }
+}
+
/* Helper functions for KHO state tree */
struct kho_prop {
@@ -545,6 +1008,11 @@ static int kho_unfreeze(void)
if (fdt)
kvfree(fdt);
+ if (kho_out.first_chunk_phys) {
+ kho_mem_ser_free(phys_to_virt(kho_out.first_chunk_phys));
+ kho_out.first_chunk_phys = 0;
+ }
+
err = blocking_notifier_call_chain(&kho_out.chain_head,
KEXEC_KHO_UNFREEZE, NULL);
err = notifier_to_errno(err);
@@ -633,6 +1101,7 @@ static int kho_finalize(void)
{
int err = 0;
void *fdt;
+ struct khoser_mem_chunk *first_chunk;
fdt = kvmalloc(kho_out.fdt_max, GFP_KERNEL);
if (!fdt)
@@ -648,6 +1117,13 @@ static int kho_finalize(void)
kho_out.fdt = fdt;
up_write(&kho_out.tree_lock);
+ first_chunk = kho_mem_serialize();
+ if (IS_ERR(first_chunk)) {
+ err = PTR_ERR(first_chunk);
+ goto unfreeze;
+ }
+ kho_out.first_chunk_phys = first_chunk ? virt_to_phys(first_chunk) : 0;
+
err = kho_convert_tree(fdt, kho_out.fdt_max);
unfreeze:
@@ -829,6 +1305,10 @@ static __init int kho_init(void)
kho_out.root.name = "";
err = kho_add_string_prop(&kho_out.root, "compatible", "kho-v1");
+ err |= kho_add_prop(&kho_out.preserved_memory, "metadata",
+ &kho_out.first_chunk_phys, sizeof(phys_addr_t));
+ err |= kho_add_node(&kho_out.root, "preserved-memory",
+ &kho_out.preserved_memory);
if (err)
goto err_free_scratch;
@@ -1079,10 +1559,12 @@ static void __init kho_release_scratch(void)
void __init kho_memory_init(void)
{
- if (!kho_get_fdt())
+ if (!kho_get_fdt()) {
kho_reserve_scratch();
- else
+ } else {
+ kho_mem_deserialize();
kho_release_scratch();
+ }
}
void __init kho_populate(phys_addr_t handover_fdt_phys,
--
2.48.1.711.g2feabab25a-goog
next prev parent reply other threads:[~2025-03-20 1:56 UTC|newest]
Thread overview: 103+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-03-20 1:55 [PATCH v5 00/16] kexec: introduce Kexec HandOver (KHO) Changyuan Lyu
2025-03-20 1:55 ` [PATCH v5 01/16] kexec: define functions to map and unmap segments Changyuan Lyu
2025-03-20 1:55 ` [PATCH v5 02/16] mm/mm_init: rename init_reserved_page to init_deferred_page Changyuan Lyu
2025-03-20 7:10 ` Krzysztof Kozlowski
2025-03-20 17:15 ` Changyuan Lyu
2025-03-20 1:55 ` [PATCH v5 03/16] memblock: add MEMBLOCK_RSRV_KERN flag Changyuan Lyu
2025-03-20 1:55 ` [PATCH v5 04/16] memblock: Add support for scratch memory Changyuan Lyu
2025-03-20 1:55 ` [PATCH v5 05/16] memblock: introduce memmap_init_kho_scratch() Changyuan Lyu
2025-03-20 1:55 ` [PATCH v5 06/16] hashtable: add macro HASHTABLE_INIT Changyuan Lyu
2025-03-20 1:55 ` [PATCH v5 07/16] kexec: add Kexec HandOver (KHO) generation helpers Changyuan Lyu
2025-03-21 13:34 ` Jason Gunthorpe
2025-03-23 19:02 ` Changyuan Lyu
2025-03-24 16:28 ` Jason Gunthorpe
2025-03-25 0:21 ` Changyuan Lyu
2025-03-25 2:20 ` Jason Gunthorpe
2025-03-24 18:40 ` Frank van der Linden
2025-03-25 19:19 ` Mike Rapoport
2025-03-25 21:56 ` Frank van der Linden
2025-03-26 11:59 ` Mike Rapoport
2025-03-26 16:25 ` Frank van der Linden
2025-03-20 1:55 ` [PATCH v5 08/16] kexec: add KHO parsing support Changyuan Lyu
2025-03-20 1:55 ` Changyuan Lyu [this message]
2025-03-21 13:46 ` [PATCH v5 09/16] kexec: enable KHO support for memory preservation Jason Gunthorpe
2025-03-22 19:12 ` Mike Rapoport
2025-03-23 18:55 ` Jason Gunthorpe
2025-03-24 18:18 ` Mike Rapoport
2025-03-24 20:07 ` Jason Gunthorpe
2025-03-26 12:07 ` Mike Rapoport
2025-03-23 19:07 ` Changyuan Lyu
2025-03-25 2:04 ` Jason Gunthorpe
2025-03-27 10:03 ` Pratyush Yadav
2025-03-27 13:31 ` Jason Gunthorpe
2025-03-27 17:28 ` Pratyush Yadav
2025-03-28 12:53 ` Jason Gunthorpe
2025-04-02 16:44 ` Changyuan Lyu
2025-04-02 16:47 ` Pratyush Yadav
2025-04-02 18:37 ` Pasha Tatashin
2025-04-02 18:49 ` Pratyush Yadav
2025-04-02 19:16 ` Pratyush Yadav
2025-04-03 11:42 ` Jason Gunthorpe
2025-04-03 13:58 ` Mike Rapoport
2025-04-03 14:24 ` Jason Gunthorpe
2025-04-04 9:54 ` Mike Rapoport
2025-04-04 12:47 ` Jason Gunthorpe
2025-04-04 13:53 ` Mike Rapoport
2025-04-04 14:30 ` Jason Gunthorpe
2025-04-04 16:24 ` Pratyush Yadav
2025-04-04 17:31 ` Jason Gunthorpe
2025-04-06 16:13 ` Mike Rapoport
2025-04-06 16:11 ` Mike Rapoport
2025-04-07 14:16 ` Jason Gunthorpe
2025-04-07 16:31 ` Mike Rapoport
2025-04-07 17:03 ` Jason Gunthorpe
2025-04-09 9:06 ` Mike Rapoport
2025-04-09 12:56 ` Jason Gunthorpe
2025-04-09 13:58 ` Mike Rapoport
2025-04-09 15:37 ` Jason Gunthorpe
2025-04-09 16:19 ` Mike Rapoport
2025-04-09 16:28 ` Jason Gunthorpe
2025-04-10 16:51 ` Matthew Wilcox
2025-04-10 17:31 ` Jason Gunthorpe
2025-04-09 16:28 ` Mike Rapoport
2025-04-09 18:32 ` Jason Gunthorpe
2025-04-04 16:15 ` Pratyush Yadav
2025-04-06 16:34 ` Mike Rapoport
2025-04-07 14:23 ` Jason Gunthorpe
2025-04-03 13:57 ` Mike Rapoport
2025-04-11 4:02 ` Changyuan Lyu
2025-04-03 15:50 ` Pratyush Yadav
2025-04-03 16:10 ` Jason Gunthorpe
2025-04-03 17:37 ` Pratyush Yadav
2025-04-04 12:54 ` Jason Gunthorpe
2025-04-04 15:39 ` Pratyush Yadav
2025-04-09 8:35 ` Mike Rapoport
2025-03-20 1:55 ` [PATCH v5 10/16] kexec: add KHO support to kexec file loads Changyuan Lyu
2025-03-21 13:48 ` Jason Gunthorpe
2025-03-20 1:55 ` [PATCH v5 11/16] kexec: add config option for KHO Changyuan Lyu
2025-03-20 7:10 ` Krzysztof Kozlowski
2025-03-20 17:18 ` Changyuan Lyu
2025-03-24 4:18 ` Dave Young
2025-03-24 19:26 ` Pasha Tatashin
2025-03-25 1:24 ` Dave Young
2025-03-25 3:07 ` Dave Young
2025-03-25 6:57 ` Baoquan He
2025-03-25 8:36 ` Dave Young
2025-03-26 9:17 ` Dave Young
2025-03-26 11:28 ` Mike Rapoport
2025-03-26 12:09 ` Dave Young
2025-03-25 14:04 ` Pasha Tatashin
2025-03-20 1:55 ` [PATCH v5 12/16] arm64: add KHO support Changyuan Lyu
2025-03-20 7:13 ` Krzysztof Kozlowski
2025-03-20 8:30 ` Krzysztof Kozlowski
2025-03-20 23:29 ` Changyuan Lyu
2025-04-11 3:47 ` Changyuan Lyu
2025-03-20 1:55 ` [PATCH v5 13/16] x86/setup: use memblock_reserve_kern for memory used by kernel Changyuan Lyu
2025-03-20 1:55 ` [PATCH v5 14/16] x86: add KHO support Changyuan Lyu
2025-03-20 1:55 ` [PATCH v5 15/16] memblock: add KHO support for reserve_mem Changyuan Lyu
2025-03-20 1:55 ` [PATCH v5 16/16] Documentation: add documentation for KHO Changyuan Lyu
2025-03-20 14:45 ` Jonathan Corbet
2025-03-21 6:33 ` Changyuan Lyu
2025-03-21 13:46 ` Jonathan Corbet
2025-03-25 14:19 ` [PATCH v5 00/16] kexec: introduce Kexec HandOver (KHO) Pasha Tatashin
2025-03-25 15:03 ` Mike Rapoport
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250320015551.2157511-10-changyuanl@google.com \
--to=changyuanl@google.com \
--cc=akpm@linux-foundation.org \
--cc=anthony.yznaga@oracle.com \
--cc=arnd@arndb.de \
--cc=ashish.kalra@amd.com \
--cc=benh@kernel.crashing.org \
--cc=bp@alien8.de \
--cc=catalin.marinas@arm.com \
--cc=corbet@lwn.net \
--cc=dave.hansen@linux.intel.com \
--cc=devicetree@vger.kernel.org \
--cc=dwmw2@infradead.org \
--cc=ebiederm@xmission.com \
--cc=graf@amazon.com \
--cc=hpa@zytor.com \
--cc=jgg@nvidia.com \
--cc=jgowans@amazon.com \
--cc=kexec@lists.infradead.org \
--cc=krzk@kernel.org \
--cc=linux-arm-kernel@lists.infradead.org \
--cc=linux-doc@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=luto@kernel.org \
--cc=mark.rutland@arm.com \
--cc=mingo@redhat.com \
--cc=pasha.tatashin@soleen.com \
--cc=pbonzini@redhat.com \
--cc=peterz@infradead.org \
--cc=ptyadav@amazon.de \
--cc=robh+dt@kernel.org \
--cc=robh@kernel.org \
--cc=rostedt@goodmis.org \
--cc=rppt@kernel.org \
--cc=saravanak@google.com \
--cc=skinsburskii@linux.microsoft.com \
--cc=tglx@linutronix.de \
--cc=thomas.lendacky@amd.com \
--cc=usama.arif@bytedance.com \
--cc=will@kernel.org \
--cc=x86@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox