From: Pasha Tatashin <pasha.tatashin@soleen.com>
To: akpm@linux-foundation.org, brauner@kernel.org, corbet@lwn.net,
graf@amazon.com, jgg@ziepe.ca, linux-kernel@vger.kernel.org,
linux-kselftest@vger.kernel.org, linux-mm@kvack.org,
masahiroy@kernel.org, ojeda@kernel.org,
pasha.tatashin@soleen.com, pratyush@kernel.org,
rdunlap@infradead.org, rppt@kernel.org, tj@kernel.org
Subject: [PATCH v8 4/8] kho: add interfaces to unpreserve folios and page ranges
Date: Fri, 24 Oct 2025 12:09:58 -0400 [thread overview]
Message-ID: <20251024161002.747372-5-pasha.tatashin@soleen.com> (raw)
In-Reply-To: <20251024161002.747372-1-pasha.tatashin@soleen.com>
Allow users of KHO to cancel the previous preservation by adding the
necessary interfaces to unpreserve folio and pages.
Signed-off-by: Pasha Tatashin <pasha.tatashin@soleen.com>
Reviewed-by: Pratyush Yadav <pratyush@kernel.org>
Reviewed-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
---
include/linux/kexec_handover.h | 12 +++++
kernel/kexec_handover.c | 84 ++++++++++++++++++++++++++++------
2 files changed, 83 insertions(+), 13 deletions(-)
diff --git a/include/linux/kexec_handover.h b/include/linux/kexec_handover.h
index 2faf290803ce..4ba145713838 100644
--- a/include/linux/kexec_handover.h
+++ b/include/linux/kexec_handover.h
@@ -43,7 +43,9 @@ bool kho_is_enabled(void);
bool is_kho_boot(void);
int kho_preserve_folio(struct folio *folio);
+int kho_unpreserve_folio(struct folio *folio);
int kho_preserve_pages(struct page *page, unsigned int nr_pages);
+int kho_unpreserve_pages(struct page *page, unsigned int nr_pages);
int kho_preserve_vmalloc(void *ptr, struct kho_vmalloc *preservation);
struct folio *kho_restore_folio(phys_addr_t phys);
struct page *kho_restore_pages(phys_addr_t phys, unsigned int nr_pages);
@@ -76,11 +78,21 @@ static inline int kho_preserve_folio(struct folio *folio)
return -EOPNOTSUPP;
}
+static inline int kho_unpreserve_folio(struct folio *folio)
+{
+ return -EOPNOTSUPP;
+}
+
static inline int kho_preserve_pages(struct page *page, unsigned int nr_pages)
{
return -EOPNOTSUPP;
}
+static inline int kho_unpreserve_pages(struct page *page, unsigned int nr_pages)
+{
+ return -EOPNOTSUPP;
+}
+
static inline int kho_preserve_vmalloc(void *ptr,
struct kho_vmalloc *preservation)
{
diff --git a/kernel/kexec_handover.c b/kernel/kexec_handover.c
index 82137eba1474..994ee0b70757 100644
--- a/kernel/kexec_handover.c
+++ b/kernel/kexec_handover.c
@@ -157,26 +157,33 @@ static void *xa_load_or_alloc(struct xarray *xa, unsigned long index)
return no_free_ptr(elm);
}
-static void __kho_unpreserve(struct kho_mem_track *track, unsigned long pfn,
- unsigned long end_pfn)
+static void __kho_unpreserve_order(struct kho_mem_track *track, unsigned long pfn,
+ unsigned int order)
{
struct kho_mem_phys_bits *bits;
struct kho_mem_phys *physxa;
+ const unsigned long pfn_high = pfn >> order;
- while (pfn < end_pfn) {
- const unsigned int order =
- min(count_trailing_zeros(pfn), ilog2(end_pfn - pfn));
- const unsigned long pfn_high = pfn >> order;
+ physxa = xa_load(&track->orders, order);
+ if (!physxa)
+ return;
+
+ bits = xa_load(&physxa->phys_bits, pfn_high / PRESERVE_BITS);
+ if (!bits)
+ return;
- physxa = xa_load(&track->orders, order);
- if (!physxa)
- continue;
+ clear_bit(pfn_high % PRESERVE_BITS, bits->preserve);
+}
+
+static void __kho_unpreserve(struct kho_mem_track *track, unsigned long pfn,
+ unsigned long end_pfn)
+{
+ unsigned int order;
- bits = xa_load(&physxa->phys_bits, pfn_high / PRESERVE_BITS);
- if (!bits)
- continue;
+ while (pfn < end_pfn) {
+ order = min(count_trailing_zeros(pfn), ilog2(end_pfn - pfn));
- clear_bit(pfn_high % PRESERVE_BITS, bits->preserve);
+ __kho_unpreserve_order(track, pfn, order);
pfn += 1 << order;
}
@@ -745,6 +752,30 @@ int kho_preserve_folio(struct folio *folio)
}
EXPORT_SYMBOL_GPL(kho_preserve_folio);
+/**
+ * kho_unpreserve_folio - unpreserve a folio.
+ * @folio: folio to unpreserve.
+ *
+ * Instructs KHO to unpreserve a folio that was preserved by
+ * kho_preserve_folio() before. The provided @folio (pfn and order)
+ * must exactly match a previously preserved folio.
+ *
+ * Return: 0 on success, error code on failure
+ */
+int kho_unpreserve_folio(struct folio *folio)
+{
+ const unsigned long pfn = folio_pfn(folio);
+ const unsigned int order = folio_order(folio);
+ struct kho_mem_track *track = &kho_out.track;
+
+ if (kho_out.finalized)
+ return -EBUSY;
+
+ __kho_unpreserve_order(track, pfn, order);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(kho_unpreserve_folio);
+
/**
* kho_preserve_pages - preserve contiguous pages across kexec
* @page: first page in the list.
@@ -789,6 +820,33 @@ int kho_preserve_pages(struct page *page, unsigned int nr_pages)
}
EXPORT_SYMBOL_GPL(kho_preserve_pages);
+/**
+ * kho_unpreserve_pages - unpreserve contiguous pages.
+ * @page: first page in the list.
+ * @nr_pages: number of pages.
+ *
+ * Instructs KHO to unpreserve @nr_pages contiguous pages starting from @page.
+ * This must be called with the same @page and @nr_pages as the corresponding
+ * kho_preserve_pages() call. Unpreserving arbitrary sub-ranges of larger
+ * preserved blocks is not supported.
+ *
+ * Return: 0 on success, error code on failure
+ */
+int kho_unpreserve_pages(struct page *page, unsigned int nr_pages)
+{
+ struct kho_mem_track *track = &kho_out.track;
+ const unsigned long start_pfn = page_to_pfn(page);
+ const unsigned long end_pfn = start_pfn + nr_pages;
+
+ if (kho_out.finalized)
+ return -EBUSY;
+
+ __kho_unpreserve(track, start_pfn, end_pfn);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(kho_unpreserve_pages);
+
struct kho_vmalloc_hdr {
DECLARE_KHOSER_PTR(next, struct kho_vmalloc_chunk *);
};
--
2.51.1.821.gb6fe4d2222-goog
next prev parent reply other threads:[~2025-10-24 16:10 UTC|newest]
Thread overview: 19+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-10-24 16:09 [PATCH v8 0/8] liveupdate: Rework KHO for in-kernel users Pasha Tatashin
2025-10-24 16:09 ` [PATCH v8 1/8] kho: allow to drive kho from within kernel Pasha Tatashin
2025-10-24 16:09 ` [PATCH v8 2/8] kho: make debugfs interface optional Pasha Tatashin
2025-10-24 16:09 ` [PATCH v8 3/8] kho: drop notifiers Pasha Tatashin
2025-10-24 16:43 ` Pratyush Yadav
2025-10-26 16:23 ` Mike Rapoport
2025-10-24 16:09 ` Pasha Tatashin [this message]
2025-10-24 16:09 ` [PATCH v8 5/8] kho: don't unpreserve memory during abort Pasha Tatashin
2025-10-24 16:33 ` Pratyush Yadav
2025-10-24 16:10 ` [PATCH v8 6/8] liveupdate: kho: move to kernel/liveupdate Pasha Tatashin
2025-10-24 16:10 ` [PATCH v8 7/8] liveupdate: kho: move kho debugfs directory to liveupdate Pasha Tatashin
2025-10-26 16:32 ` Mike Rapoport
2025-10-26 16:47 ` Andrew Morton
2025-10-24 16:10 ` [PATCH v8 8/8] memblock: Unpreserve memory in case of error Pasha Tatashin
2025-10-24 16:43 ` Pratyush Yadav
2025-10-26 16:29 ` Mike Rapoport
2025-10-26 17:41 ` Pasha Tatashin
2025-10-27 6:58 ` Mike Rapoport
2025-10-27 6:56 ` Mike Rapoport
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20251024161002.747372-5-pasha.tatashin@soleen.com \
--to=pasha.tatashin@soleen.com \
--cc=akpm@linux-foundation.org \
--cc=brauner@kernel.org \
--cc=corbet@lwn.net \
--cc=graf@amazon.com \
--cc=jgg@ziepe.ca \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-kselftest@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=masahiroy@kernel.org \
--cc=ojeda@kernel.org \
--cc=pratyush@kernel.org \
--cc=rdunlap@infradead.org \
--cc=rppt@kernel.org \
--cc=tj@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox