* [PATCH v3 1/6] KVM: SVM: Fix a missing kunmap_local() in sev_gmem_post_populate()
2026-01-08 21:46 [PATCH v3 0/6] KVM: guest_memfd: Rework preparation/population flows in prep for in-place conversion Michael Roth
@ 2026-01-08 21:46 ` Michael Roth
2026-01-08 21:46 ` [PATCH v3 2/6] KVM: guest_memfd: Remove partial hugepage handling from kvm_gmem_populate() Michael Roth
` (4 subsequent siblings)
5 siblings, 0 replies; 7+ messages in thread
From: Michael Roth @ 2026-01-08 21:46 UTC (permalink / raw)
To: kvm
Cc: linux-coco, linux-mm, linux-kernel, thomas.lendacky, pbonzini,
seanjc, vbabka, ashish.kalra, liam.merwick, david, vannapurve,
ackerleytng, aik, ira.weiny, yan.y.zhao, pankaj.gupta
From: Yan Zhao <yan.y.zhao@intel.com>
sev_gmem_post_populate() needs to unmap the target vaddr after
copy_from_user() to the vaddr fails.
Fixes: dee5a47cc7a4 ("KVM: SEV: Add KVM_SEV_SNP_LAUNCH_UPDATE command")
Signed-off-by: Yan Zhao <yan.y.zhao@intel.com>
Signed-off-by: Michael Roth <michael.roth@amd.com>
---
arch/x86/kvm/svm/sev.c | 1 +
1 file changed, 1 insertion(+)
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index f59c65abe3cf..261d9ef8631b 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -2296,6 +2296,7 @@ static int sev_gmem_post_populate(struct kvm *kvm, gfn_t gfn_start, kvm_pfn_t pf
void *vaddr = kmap_local_pfn(pfn + i);
if (copy_from_user(vaddr, src + i * PAGE_SIZE, PAGE_SIZE)) {
+ kunmap_local(vaddr);
ret = -EFAULT;
goto err;
}
--
2.25.1
^ permalink raw reply [flat|nested] 7+ messages in thread* [PATCH v3 2/6] KVM: guest_memfd: Remove partial hugepage handling from kvm_gmem_populate()
2026-01-08 21:46 [PATCH v3 0/6] KVM: guest_memfd: Rework preparation/population flows in prep for in-place conversion Michael Roth
2026-01-08 21:46 ` [PATCH v3 1/6] KVM: SVM: Fix a missing kunmap_local() in sev_gmem_post_populate() Michael Roth
@ 2026-01-08 21:46 ` Michael Roth
2026-01-08 21:46 ` [PATCH v3 3/6] KVM: guest_memfd: Remove preparation tracking Michael Roth
` (3 subsequent siblings)
5 siblings, 0 replies; 7+ messages in thread
From: Michael Roth @ 2026-01-08 21:46 UTC (permalink / raw)
To: kvm
Cc: linux-coco, linux-mm, linux-kernel, thomas.lendacky, pbonzini,
seanjc, vbabka, ashish.kalra, liam.merwick, david, vannapurve,
ackerleytng, aik, ira.weiny, yan.y.zhao, pankaj.gupta, Kai Huang
kvm_gmem_populate(), and the associated post-populate callbacks, have
some limited support for dealing with guests backed by hugepages by
passing the order information along to each post-populate callback and
iterating through the pages passed to kvm_gmem_populate() in
hugepage-chunks.
However, guest_memfd doesn't yet support hugepages, and in most cases
additional changes in the kvm_gmem_populate() path would also be needed
to actually allow for this functionality.
This makes the existing code unecessarily complex, and makes changes
difficult to work through upstream due to theoretical impacts on
hugepage support that can't be considered properly without an actual
hugepage implementation to reference. So for now, remove what's there
so changes for things like in-place conversion can be
implemented/reviewed more efficiently.
Suggested-by: Vishal Annapurve <vannapurve@google.com>
Co-developed-by: Vishal Annapurve <vannapurve@google.com>
Signed-off-by: Vishal Annapurve <vannapurve@google.com>
Tested-by: Vishal Annapurve <vannapurve@google.com>
Tested-by: Kai Huang <kai.huang@intel.com>
Signed-off-by: Michael Roth <michael.roth@amd.com>
---
arch/x86/kvm/svm/sev.c | 94 ++++++++++++++++------------------------
arch/x86/kvm/vmx/tdx.c | 2 +-
include/linux/kvm_host.h | 2 +-
virt/kvm/guest_memfd.c | 30 +++++++------
4 files changed, 56 insertions(+), 72 deletions(-)
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index 261d9ef8631b..a70bd3f19e29 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -2267,67 +2267,53 @@ struct sev_gmem_populate_args {
int fw_error;
};
-static int sev_gmem_post_populate(struct kvm *kvm, gfn_t gfn_start, kvm_pfn_t pfn,
- void __user *src, int order, void *opaque)
+static int sev_gmem_post_populate(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn,
+ void __user *src, void *opaque)
{
struct sev_gmem_populate_args *sev_populate_args = opaque;
+ struct sev_data_snp_launch_update fw_args = {0};
struct kvm_sev_info *sev = to_kvm_sev_info(kvm);
- int n_private = 0, ret, i;
- int npages = (1 << order);
- gfn_t gfn;
+ bool assigned = false;
+ int level;
+ int ret;
if (WARN_ON_ONCE(sev_populate_args->type != KVM_SEV_SNP_PAGE_TYPE_ZERO && !src))
return -EINVAL;
- for (gfn = gfn_start, i = 0; gfn < gfn_start + npages; gfn++, i++) {
- struct sev_data_snp_launch_update fw_args = {0};
- bool assigned = false;
- int level;
-
- ret = snp_lookup_rmpentry((u64)pfn + i, &assigned, &level);
- if (ret || assigned) {
- pr_debug("%s: Failed to ensure GFN 0x%llx RMP entry is initial shared state, ret: %d assigned: %d\n",
- __func__, gfn, ret, assigned);
- ret = ret ? -EINVAL : -EEXIST;
- goto err;
- }
+ ret = snp_lookup_rmpentry((u64)pfn, &assigned, &level);
+ if (ret || assigned) {
+ pr_debug("%s: Failed to ensure GFN 0x%llx RMP entry is initial shared state, ret: %d assigned: %d\n",
+ __func__, gfn, ret, assigned);
+ ret = ret ? -EINVAL : -EEXIST;
+ goto out;
+ }
- if (src) {
- void *vaddr = kmap_local_pfn(pfn + i);
+ if (src) {
+ void *vaddr = kmap_local_pfn(pfn);
- if (copy_from_user(vaddr, src + i * PAGE_SIZE, PAGE_SIZE)) {
- kunmap_local(vaddr);
- ret = -EFAULT;
- goto err;
- }
+ if (copy_from_user(vaddr, src, PAGE_SIZE)) {
kunmap_local(vaddr);
+ ret = -EFAULT;
+ goto out;
}
-
- ret = rmp_make_private(pfn + i, gfn << PAGE_SHIFT, PG_LEVEL_4K,
- sev_get_asid(kvm), true);
- if (ret)
- goto err;
-
- n_private++;
-
- fw_args.gctx_paddr = __psp_pa(sev->snp_context);
- fw_args.address = __sme_set(pfn_to_hpa(pfn + i));
- fw_args.page_size = PG_LEVEL_TO_RMP(PG_LEVEL_4K);
- fw_args.page_type = sev_populate_args->type;
-
- ret = __sev_issue_cmd(sev_populate_args->sev_fd, SEV_CMD_SNP_LAUNCH_UPDATE,
- &fw_args, &sev_populate_args->fw_error);
- if (ret)
- goto fw_err;
+ kunmap_local(vaddr);
}
- return 0;
+ ret = rmp_make_private(pfn, gfn << PAGE_SHIFT, PG_LEVEL_4K,
+ sev_get_asid(kvm), true);
+ if (ret)
+ goto out;
+
+ fw_args.gctx_paddr = __psp_pa(sev->snp_context);
+ fw_args.address = __sme_set(pfn_to_hpa(pfn));
+ fw_args.page_size = PG_LEVEL_TO_RMP(PG_LEVEL_4K);
+ fw_args.page_type = sev_populate_args->type;
-fw_err:
+ ret = __sev_issue_cmd(sev_populate_args->sev_fd, SEV_CMD_SNP_LAUNCH_UPDATE,
+ &fw_args, &sev_populate_args->fw_error);
/*
* If the firmware command failed handle the reclaim and cleanup of that
- * PFN specially vs. prior pages which can be cleaned up below without
- * needing to reclaim in advance.
+ * PFN before reporting an error.
*
* Additionally, when invalid CPUID function entries are detected,
* firmware writes the expected values into the page and leaves it
@@ -2337,26 +2323,20 @@ static int sev_gmem_post_populate(struct kvm *kvm, gfn_t gfn_start, kvm_pfn_t pf
* information to provide information on which CPUID leaves/fields
* failed CPUID validation.
*/
- if (!snp_page_reclaim(kvm, pfn + i) &&
+ if (ret && !snp_page_reclaim(kvm, pfn) &&
sev_populate_args->type == KVM_SEV_SNP_PAGE_TYPE_CPUID &&
sev_populate_args->fw_error == SEV_RET_INVALID_PARAM) {
- void *vaddr = kmap_local_pfn(pfn + i);
+ void *vaddr = kmap_local_pfn(pfn);
- if (copy_to_user(src + i * PAGE_SIZE, vaddr, PAGE_SIZE))
+ if (copy_to_user(src, vaddr, PAGE_SIZE))
pr_debug("Failed to write CPUID page back to userspace\n");
kunmap_local(vaddr);
}
- /* pfn + i is hypervisor-owned now, so skip below cleanup for it. */
- n_private--;
-
-err:
- pr_debug("%s: exiting with error ret %d (fw_error %d), restoring %d gmem PFNs to shared.\n",
- __func__, ret, sev_populate_args->fw_error, n_private);
- for (i = 0; i < n_private; i++)
- kvm_rmp_make_shared(kvm, pfn + i, PG_LEVEL_4K);
-
+out:
+ pr_debug("%s: exiting with return code %d (fw_error %d)\n",
+ __func__, ret, sev_populate_args->fw_error);
return ret;
}
diff --git a/arch/x86/kvm/vmx/tdx.c b/arch/x86/kvm/vmx/tdx.c
index 2d7a4d52ccfb..4fb042ce8ed1 100644
--- a/arch/x86/kvm/vmx/tdx.c
+++ b/arch/x86/kvm/vmx/tdx.c
@@ -3118,7 +3118,7 @@ struct tdx_gmem_post_populate_arg {
};
static int tdx_gmem_post_populate(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn,
- void __user *src, int order, void *_arg)
+ void __user *src, void *_arg)
{
struct tdx_gmem_post_populate_arg *arg = _arg;
struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm);
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index d93f75b05ae2..1d0cee72e560 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -2581,7 +2581,7 @@ int kvm_arch_gmem_prepare(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn, int max_ord
* Returns the number of pages that were populated.
*/
typedef int (*kvm_gmem_populate_cb)(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn,
- void __user *src, int order, void *opaque);
+ void __user *src, void *opaque);
long kvm_gmem_populate(struct kvm *kvm, gfn_t gfn, void __user *src, long npages,
kvm_gmem_populate_cb post_populate, void *opaque);
diff --git a/virt/kvm/guest_memfd.c b/virt/kvm/guest_memfd.c
index fdaea3422c30..9dafa44838fe 100644
--- a/virt/kvm/guest_memfd.c
+++ b/virt/kvm/guest_memfd.c
@@ -151,6 +151,15 @@ static struct folio *kvm_gmem_get_folio(struct inode *inode, pgoff_t index)
mapping_gfp_mask(inode->i_mapping), policy);
mpol_cond_put(policy);
+ /*
+ * External interfaces like kvm_gmem_get_pfn() support dealing
+ * with hugepages to a degree, but internally, guest_memfd currently
+ * assumes that all folios are order-0 and handling would need
+ * to be updated for anything otherwise (e.g. page-clearing
+ * operations).
+ */
+ WARN_ON_ONCE(folio_order(folio));
+
return folio;
}
@@ -829,7 +838,7 @@ long kvm_gmem_populate(struct kvm *kvm, gfn_t start_gfn, void __user *src, long
struct kvm_memory_slot *slot;
void __user *p;
- int ret = 0, max_order;
+ int ret = 0;
long i;
lockdep_assert_held(&kvm->slots_lock);
@@ -848,7 +857,7 @@ long kvm_gmem_populate(struct kvm *kvm, gfn_t start_gfn, void __user *src, long
filemap_invalidate_lock(file->f_mapping);
npages = min_t(ulong, slot->npages - (start_gfn - slot->base_gfn), npages);
- for (i = 0; i < npages; i += (1 << max_order)) {
+ for (i = 0; i < npages; i++) {
struct folio *folio;
gfn_t gfn = start_gfn + i;
pgoff_t index = kvm_gmem_get_index(slot, gfn);
@@ -860,7 +869,7 @@ long kvm_gmem_populate(struct kvm *kvm, gfn_t start_gfn, void __user *src, long
break;
}
- folio = __kvm_gmem_get_pfn(file, slot, index, &pfn, &is_prepared, &max_order);
+ folio = __kvm_gmem_get_pfn(file, slot, index, &pfn, &is_prepared, NULL);
if (IS_ERR(folio)) {
ret = PTR_ERR(folio);
break;
@@ -874,20 +883,15 @@ long kvm_gmem_populate(struct kvm *kvm, gfn_t start_gfn, void __user *src, long
}
folio_unlock(folio);
- WARN_ON(!IS_ALIGNED(gfn, 1 << max_order) ||
- (npages - i) < (1 << max_order));
ret = -EINVAL;
- while (!kvm_range_has_memory_attributes(kvm, gfn, gfn + (1 << max_order),
- KVM_MEMORY_ATTRIBUTE_PRIVATE,
- KVM_MEMORY_ATTRIBUTE_PRIVATE)) {
- if (!max_order)
- goto put_folio_and_exit;
- max_order--;
- }
+ if (!kvm_range_has_memory_attributes(kvm, gfn, gfn + 1,
+ KVM_MEMORY_ATTRIBUTE_PRIVATE,
+ KVM_MEMORY_ATTRIBUTE_PRIVATE))
+ goto put_folio_and_exit;
p = src ? src + i * PAGE_SIZE : NULL;
- ret = post_populate(kvm, gfn, pfn, p, max_order, opaque);
+ ret = post_populate(kvm, gfn, pfn, p, opaque);
if (!ret)
kvm_gmem_mark_prepared(folio);
--
2.25.1
^ permalink raw reply [flat|nested] 7+ messages in thread* [PATCH v3 3/6] KVM: guest_memfd: Remove preparation tracking
2026-01-08 21:46 [PATCH v3 0/6] KVM: guest_memfd: Rework preparation/population flows in prep for in-place conversion Michael Roth
2026-01-08 21:46 ` [PATCH v3 1/6] KVM: SVM: Fix a missing kunmap_local() in sev_gmem_post_populate() Michael Roth
2026-01-08 21:46 ` [PATCH v3 2/6] KVM: guest_memfd: Remove partial hugepage handling from kvm_gmem_populate() Michael Roth
@ 2026-01-08 21:46 ` Michael Roth
2026-01-08 21:46 ` [PATCH v3 4/6] KVM: SEV: Document/enforce page-alignment for KVM_SEV_SNP_LAUNCH_UPDATE Michael Roth
` (2 subsequent siblings)
5 siblings, 0 replies; 7+ messages in thread
From: Michael Roth @ 2026-01-08 21:46 UTC (permalink / raw)
To: kvm
Cc: linux-coco, linux-mm, linux-kernel, thomas.lendacky, pbonzini,
seanjc, vbabka, ashish.kalra, liam.merwick, david, vannapurve,
ackerleytng, aik, ira.weiny, yan.y.zhao, pankaj.gupta, Kai Huang
guest_memfd currently uses the folio uptodate flag to track:
1) whether or not a page has been cleared before initial usage
2) whether or not the architecture hooks have been issued to put the
page in a private state as defined by the architecture
In practice, 2) is only actually being tracked for SEV-SNP VMs, and
there do not seem to be any plans/reasons that would suggest this will
change in the future, so this additional tracking/complexity is not
really providing any general benefit to guest_memfd users. Future plans
around in-place conversion and hugepage support, where the per-folio
uptodate flag is planned to be used purely to track the initial clearing
of folios, whereas conversion operations could trigger multiple
transitions between 'prepared' and 'unprepared' and thus need separate
tracking, will make the burden of tracking this information within
guest_memfd even more complex, since preparation generally happens
during fault time, on the "read-side" of any global locks that might
protect state tracked by guest_memfd, and so may require more complex
locking schemes to allow for concurrent handling of page faults for
multiple vCPUs where the "preparedness" state tracked by guest_memfd
might need to be updated as part of handling the fault.
Instead of keeping this current/future complexity within guest_memfd for
what is essentially just SEV-SNP, just drop the tracking for 2) and have
the arch-specific preparation hooks get triggered unconditionally on
every fault so the arch-specific hooks can check the preparation state
directly and decide whether or not a folio still needs additional
preparation. In the case of SEV-SNP, the preparation state is already
checked again via the preparation hooks to avoid double-preparation, so
nothing extra needs to be done to update the handling of things there.
Reviewed-by: Vishal Annapurve <vannapurve@google.com>
Tested-by: Vishal Annapurve <vannapurve@google.com>
Reviewed-by: Pankaj Gupta <pankaj.gupta@amd.com>
Tested-by: Kai Huang <kai.huang@intel.com>
Signed-off-by: Michael Roth <michael.roth@amd.com>
---
virt/kvm/guest_memfd.c | 44 ++++++++++++------------------------------
1 file changed, 12 insertions(+), 32 deletions(-)
diff --git a/virt/kvm/guest_memfd.c b/virt/kvm/guest_memfd.c
index 9dafa44838fe..8b1248f42aae 100644
--- a/virt/kvm/guest_memfd.c
+++ b/virt/kvm/guest_memfd.c
@@ -76,11 +76,6 @@ static int __kvm_gmem_prepare_folio(struct kvm *kvm, struct kvm_memory_slot *slo
return 0;
}
-static inline void kvm_gmem_mark_prepared(struct folio *folio)
-{
- folio_mark_uptodate(folio);
-}
-
/*
* Process @folio, which contains @gfn, so that the guest can use it.
* The folio must be locked and the gfn must be contained in @slot.
@@ -90,13 +85,7 @@ static inline void kvm_gmem_mark_prepared(struct folio *folio)
static int kvm_gmem_prepare_folio(struct kvm *kvm, struct kvm_memory_slot *slot,
gfn_t gfn, struct folio *folio)
{
- unsigned long nr_pages, i;
pgoff_t index;
- int r;
-
- nr_pages = folio_nr_pages(folio);
- for (i = 0; i < nr_pages; i++)
- clear_highpage(folio_page(folio, i));
/*
* Preparing huge folios should always be safe, since it should
@@ -114,11 +103,8 @@ static int kvm_gmem_prepare_folio(struct kvm *kvm, struct kvm_memory_slot *slot,
WARN_ON(!IS_ALIGNED(slot->gmem.pgoff, folio_nr_pages(folio)));
index = kvm_gmem_get_index(slot, gfn);
index = ALIGN_DOWN(index, folio_nr_pages(folio));
- r = __kvm_gmem_prepare_folio(kvm, slot, index, folio);
- if (!r)
- kvm_gmem_mark_prepared(folio);
- return r;
+ return __kvm_gmem_prepare_folio(kvm, slot, index, folio);
}
/*
@@ -429,7 +415,7 @@ static vm_fault_t kvm_gmem_fault_user_mapping(struct vm_fault *vmf)
if (!folio_test_uptodate(folio)) {
clear_highpage(folio_page(folio, 0));
- kvm_gmem_mark_prepared(folio);
+ folio_mark_uptodate(folio);
}
vmf->page = folio_file_page(folio, vmf->pgoff);
@@ -766,7 +752,7 @@ void kvm_gmem_unbind(struct kvm_memory_slot *slot)
static struct folio *__kvm_gmem_get_pfn(struct file *file,
struct kvm_memory_slot *slot,
pgoff_t index, kvm_pfn_t *pfn,
- bool *is_prepared, int *max_order)
+ int *max_order)
{
struct file *slot_file = READ_ONCE(slot->gmem.file);
struct gmem_file *f = file->private_data;
@@ -796,7 +782,6 @@ static struct folio *__kvm_gmem_get_pfn(struct file *file,
if (max_order)
*max_order = 0;
- *is_prepared = folio_test_uptodate(folio);
return folio;
}
@@ -806,19 +791,22 @@ int kvm_gmem_get_pfn(struct kvm *kvm, struct kvm_memory_slot *slot,
{
pgoff_t index = kvm_gmem_get_index(slot, gfn);
struct folio *folio;
- bool is_prepared = false;
int r = 0;
CLASS(gmem_get_file, file)(slot);
if (!file)
return -EFAULT;
- folio = __kvm_gmem_get_pfn(file, slot, index, pfn, &is_prepared, max_order);
+ folio = __kvm_gmem_get_pfn(file, slot, index, pfn, max_order);
if (IS_ERR(folio))
return PTR_ERR(folio);
- if (!is_prepared)
- r = kvm_gmem_prepare_folio(kvm, slot, gfn, folio);
+ if (!folio_test_uptodate(folio)) {
+ clear_highpage(folio_page(folio, 0));
+ folio_mark_uptodate(folio);
+ }
+
+ r = kvm_gmem_prepare_folio(kvm, slot, gfn, folio);
folio_unlock(folio);
@@ -861,7 +849,6 @@ long kvm_gmem_populate(struct kvm *kvm, gfn_t start_gfn, void __user *src, long
struct folio *folio;
gfn_t gfn = start_gfn + i;
pgoff_t index = kvm_gmem_get_index(slot, gfn);
- bool is_prepared = false;
kvm_pfn_t pfn;
if (signal_pending(current)) {
@@ -869,19 +856,12 @@ long kvm_gmem_populate(struct kvm *kvm, gfn_t start_gfn, void __user *src, long
break;
}
- folio = __kvm_gmem_get_pfn(file, slot, index, &pfn, &is_prepared, NULL);
+ folio = __kvm_gmem_get_pfn(file, slot, index, &pfn, NULL);
if (IS_ERR(folio)) {
ret = PTR_ERR(folio);
break;
}
- if (is_prepared) {
- folio_unlock(folio);
- folio_put(folio);
- ret = -EEXIST;
- break;
- }
-
folio_unlock(folio);
ret = -EINVAL;
@@ -893,7 +873,7 @@ long kvm_gmem_populate(struct kvm *kvm, gfn_t start_gfn, void __user *src, long
p = src ? src + i * PAGE_SIZE : NULL;
ret = post_populate(kvm, gfn, pfn, p, opaque);
if (!ret)
- kvm_gmem_mark_prepared(folio);
+ folio_mark_uptodate(folio);
put_folio_and_exit:
folio_put(folio);
--
2.25.1
^ permalink raw reply [flat|nested] 7+ messages in thread* [PATCH v3 4/6] KVM: SEV: Document/enforce page-alignment for KVM_SEV_SNP_LAUNCH_UPDATE
2026-01-08 21:46 [PATCH v3 0/6] KVM: guest_memfd: Rework preparation/population flows in prep for in-place conversion Michael Roth
` (2 preceding siblings ...)
2026-01-08 21:46 ` [PATCH v3 3/6] KVM: guest_memfd: Remove preparation tracking Michael Roth
@ 2026-01-08 21:46 ` Michael Roth
2026-01-08 21:46 ` [PATCH v3 5/6] KVM: TDX: Document alignment requirements for KVM_TDX_INIT_MEM_REGION Michael Roth
2026-01-08 21:46 ` [PATCH v3 6/6] KVM: guest_memfd: GUP source pages prior to populating guest memory Michael Roth
5 siblings, 0 replies; 7+ messages in thread
From: Michael Roth @ 2026-01-08 21:46 UTC (permalink / raw)
To: kvm
Cc: linux-coco, linux-mm, linux-kernel, thomas.lendacky, pbonzini,
seanjc, vbabka, ashish.kalra, liam.merwick, david, vannapurve,
ackerleytng, aik, ira.weiny, yan.y.zhao, pankaj.gupta, Kai Huang
In the past, KVM_SEV_SNP_LAUNCH_UPDATE accepted a non-page-aligned
'uaddr' parameter to copy data from, but continuing to support this with
new functionality like in-place conversion and hugepages in the pipeline
has proven to be more trouble than it is worth, since there are no known
users that have been identified who use a non-page-aligned 'uaddr'
parameter.
Rather than locking guest_memfd into continuing to support this, go
ahead and document page-alignment as a requirement and begin enforcing
this in the handling function.
Reviewed-by: Vishal Annapurve <vannapurve@google.com>
Tested-by: Kai Huang <kai.huang@intel.com>
Signed-off-by: Michael Roth <michael.roth@amd.com>
---
Documentation/virt/kvm/x86/amd-memory-encryption.rst | 2 +-
arch/x86/kvm/svm/sev.c | 6 +++++-
2 files changed, 6 insertions(+), 2 deletions(-)
diff --git a/Documentation/virt/kvm/x86/amd-memory-encryption.rst b/Documentation/virt/kvm/x86/amd-memory-encryption.rst
index 1ddb6a86ce7f..5a88d0197cb3 100644
--- a/Documentation/virt/kvm/x86/amd-memory-encryption.rst
+++ b/Documentation/virt/kvm/x86/amd-memory-encryption.rst
@@ -523,7 +523,7 @@ Returns: 0 on success, < 0 on error, -EAGAIN if caller should retry
struct kvm_sev_snp_launch_update {
__u64 gfn_start; /* Guest page number to load/encrypt data into. */
- __u64 uaddr; /* Userspace address of data to be loaded/encrypted. */
+ __u64 uaddr; /* 4k-aligned address of data to be loaded/encrypted. */
__u64 len; /* 4k-aligned length in bytes to copy into guest memory.*/
__u8 type; /* The type of the guest pages being initialized. */
__u8 pad0;
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index a70bd3f19e29..b4409bc652d1 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -2367,6 +2367,11 @@ static int snp_launch_update(struct kvm *kvm, struct kvm_sev_cmd *argp)
params.type != KVM_SEV_SNP_PAGE_TYPE_CPUID))
return -EINVAL;
+ src = params.type == KVM_SEV_SNP_PAGE_TYPE_ZERO ? NULL : u64_to_user_ptr(params.uaddr);
+
+ if (!PAGE_ALIGNED(src))
+ return -EINVAL;
+
npages = params.len / PAGE_SIZE;
/*
@@ -2398,7 +2403,6 @@ static int snp_launch_update(struct kvm *kvm, struct kvm_sev_cmd *argp)
sev_populate_args.sev_fd = argp->sev_fd;
sev_populate_args.type = params.type;
- src = params.type == KVM_SEV_SNP_PAGE_TYPE_ZERO ? NULL : u64_to_user_ptr(params.uaddr);
count = kvm_gmem_populate(kvm, params.gfn_start, src, npages,
sev_gmem_post_populate, &sev_populate_args);
--
2.25.1
^ permalink raw reply [flat|nested] 7+ messages in thread* [PATCH v3 5/6] KVM: TDX: Document alignment requirements for KVM_TDX_INIT_MEM_REGION
2026-01-08 21:46 [PATCH v3 0/6] KVM: guest_memfd: Rework preparation/population flows in prep for in-place conversion Michael Roth
` (3 preceding siblings ...)
2026-01-08 21:46 ` [PATCH v3 4/6] KVM: SEV: Document/enforce page-alignment for KVM_SEV_SNP_LAUNCH_UPDATE Michael Roth
@ 2026-01-08 21:46 ` Michael Roth
2026-01-08 21:46 ` [PATCH v3 6/6] KVM: guest_memfd: GUP source pages prior to populating guest memory Michael Roth
5 siblings, 0 replies; 7+ messages in thread
From: Michael Roth @ 2026-01-08 21:46 UTC (permalink / raw)
To: kvm
Cc: linux-coco, linux-mm, linux-kernel, thomas.lendacky, pbonzini,
seanjc, vbabka, ashish.kalra, liam.merwick, david, vannapurve,
ackerleytng, aik, ira.weiny, yan.y.zhao, pankaj.gupta, Kai Huang
Since it was never possible to use a non-PAGE_SIZE-aligned @source_addr,
go ahead and document this as a requirement. This is in preparation for
enforcing page-aligned @source_addr for all architectures in
guest_memfd.
Reviewed-by: Vishal Annapurve <vannapurve@google.com>
Tested-by: Kai Huang <kai.huang@intel.com>
Signed-off-by: Michael Roth <michael.roth@amd.com>
---
Documentation/virt/kvm/x86/intel-tdx.rst | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/Documentation/virt/kvm/x86/intel-tdx.rst b/Documentation/virt/kvm/x86/intel-tdx.rst
index 5efac62c92c7..6a222e9d0954 100644
--- a/Documentation/virt/kvm/x86/intel-tdx.rst
+++ b/Documentation/virt/kvm/x86/intel-tdx.rst
@@ -156,7 +156,7 @@ KVM_TDX_INIT_MEM_REGION
:Returns: 0 on success, <0 on error
Initialize @nr_pages TDX guest private memory starting from @gpa with userspace
-provided data from @source_addr.
+provided data from @source_addr. @source_addr must be PAGE_SIZE-aligned.
Note, before calling this sub command, memory attribute of the range
[gpa, gpa + nr_pages] needs to be private. Userspace can use
--
2.25.1
^ permalink raw reply [flat|nested] 7+ messages in thread* [PATCH v3 6/6] KVM: guest_memfd: GUP source pages prior to populating guest memory
2026-01-08 21:46 [PATCH v3 0/6] KVM: guest_memfd: Rework preparation/population flows in prep for in-place conversion Michael Roth
` (4 preceding siblings ...)
2026-01-08 21:46 ` [PATCH v3 5/6] KVM: TDX: Document alignment requirements for KVM_TDX_INIT_MEM_REGION Michael Roth
@ 2026-01-08 21:46 ` Michael Roth
5 siblings, 0 replies; 7+ messages in thread
From: Michael Roth @ 2026-01-08 21:46 UTC (permalink / raw)
To: kvm
Cc: linux-coco, linux-mm, linux-kernel, thomas.lendacky, pbonzini,
seanjc, vbabka, ashish.kalra, liam.merwick, david, vannapurve,
ackerleytng, aik, ira.weiny, yan.y.zhao, pankaj.gupta, Kai Huang
Currently the post-populate callbacks handle copying source pages into
private GPA ranges backed by guest_memfd, where kvm_gmem_populate()
acquires the filemap invalidate lock, then calls a post-populate
callback which may issue a get_user_pages() on the source pages prior to
copying them into the private GPA (e.g. TDX).
This will not be compatible with in-place conversion, where the
userspace page fault path will attempt to acquire the filemap invalidate
lock while holding the mm->mmap_lock, leading to a potential ABBA
deadlock.
Address this by hoisting the GUP above the filemap invalidate lock so
that these page faults path can be taken early, prior to acquiring the
filemap invalidate lock.
It's not currently clear whether this issue is reachable with the
current implementation of guest_memfd, which doesn't support in-place
conversion, however it does provide a consistent mechanism to provide
stable source/target PFNs to callbacks rather than punting to
vendor-specific code, which allows for more commonality across
architectures, which may be worthwhile even without in-place conversion.
As part of this change, also begin enforcing that the 'src' argument to
kvm_gmem_populate() must be page-aligned, as this greatly reduces the
complexity around how the post-populate callbacks are implemented, and
since no current in-tree users support using a non-page-aligned 'src'
argument.
Suggested-by: Sean Christopherson <seanjc@google.com>
Co-developed-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Sean Christopherson <seanjc@google.com>
Co-developed-by: Vishal Annapurve <vannapurve@google.com>
Signed-off-by: Vishal Annapurve <vannapurve@google.com>
Tested-by: Vishal Annapurve <vannapurve@google.com>
Tested-by: Kai Huang <kai.huang@intel.com>
Signed-off-by: Michael Roth <michael.roth@amd.com>
---
arch/x86/kvm/svm/sev.c | 33 ++++++++--------
arch/x86/kvm/vmx/tdx.c | 16 ++------
include/linux/kvm_host.h | 4 +-
virt/kvm/guest_memfd.c | 84 +++++++++++++++++++++++++++-------------
4 files changed, 79 insertions(+), 58 deletions(-)
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index b4409bc652d1..0ab7c89262fb 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -2268,7 +2268,7 @@ struct sev_gmem_populate_args {
};
static int sev_gmem_post_populate(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn,
- void __user *src, void *opaque)
+ struct page *src_page, void *opaque)
{
struct sev_gmem_populate_args *sev_populate_args = opaque;
struct sev_data_snp_launch_update fw_args = {0};
@@ -2277,7 +2277,7 @@ static int sev_gmem_post_populate(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn,
int level;
int ret;
- if (WARN_ON_ONCE(sev_populate_args->type != KVM_SEV_SNP_PAGE_TYPE_ZERO && !src))
+ if (WARN_ON_ONCE(sev_populate_args->type != KVM_SEV_SNP_PAGE_TYPE_ZERO && !src_page))
return -EINVAL;
ret = snp_lookup_rmpentry((u64)pfn, &assigned, &level);
@@ -2288,15 +2288,14 @@ static int sev_gmem_post_populate(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn,
goto out;
}
- if (src) {
- void *vaddr = kmap_local_pfn(pfn);
+ if (src_page) {
+ void *src_vaddr = kmap_local_page(src_page);
+ void *dst_vaddr = kmap_local_pfn(pfn);
- if (copy_from_user(vaddr, src, PAGE_SIZE)) {
- kunmap_local(vaddr);
- ret = -EFAULT;
- goto out;
- }
- kunmap_local(vaddr);
+ memcpy(dst_vaddr, src_vaddr, PAGE_SIZE);
+
+ kunmap_local(src_vaddr);
+ kunmap_local(dst_vaddr);
}
ret = rmp_make_private(pfn, gfn << PAGE_SHIFT, PG_LEVEL_4K,
@@ -2326,17 +2325,19 @@ static int sev_gmem_post_populate(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn,
if (ret && !snp_page_reclaim(kvm, pfn) &&
sev_populate_args->type == KVM_SEV_SNP_PAGE_TYPE_CPUID &&
sev_populate_args->fw_error == SEV_RET_INVALID_PARAM) {
- void *vaddr = kmap_local_pfn(pfn);
+ void *src_vaddr = kmap_local_page(src_page);
+ void *dst_vaddr = kmap_local_pfn(pfn);
- if (copy_to_user(src, vaddr, PAGE_SIZE))
- pr_debug("Failed to write CPUID page back to userspace\n");
+ memcpy(src_vaddr, dst_vaddr, PAGE_SIZE);
- kunmap_local(vaddr);
+ kunmap_local(src_vaddr);
+ kunmap_local(dst_vaddr);
}
out:
- pr_debug("%s: exiting with return code %d (fw_error %d)\n",
- __func__, ret, sev_populate_args->fw_error);
+ if (ret)
+ pr_debug("%s: error updating GFN %llx, return code %d (fw_error %d)\n",
+ __func__, gfn, ret, sev_populate_args->fw_error);
return ret;
}
diff --git a/arch/x86/kvm/vmx/tdx.c b/arch/x86/kvm/vmx/tdx.c
index 4fb042ce8ed1..5df9d32d2058 100644
--- a/arch/x86/kvm/vmx/tdx.c
+++ b/arch/x86/kvm/vmx/tdx.c
@@ -3118,34 +3118,24 @@ struct tdx_gmem_post_populate_arg {
};
static int tdx_gmem_post_populate(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn,
- void __user *src, void *_arg)
+ struct page *src_page, void *_arg)
{
struct tdx_gmem_post_populate_arg *arg = _arg;
struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm);
u64 err, entry, level_state;
gpa_t gpa = gfn_to_gpa(gfn);
- struct page *src_page;
int ret, i;
if (KVM_BUG_ON(kvm_tdx->page_add_src, kvm))
return -EIO;
- /*
- * Get the source page if it has been faulted in. Return failure if the
- * source page has been swapped out or unmapped in primary memory.
- */
- ret = get_user_pages_fast((unsigned long)src, 1, 0, &src_page);
- if (ret < 0)
- return ret;
- if (ret != 1)
- return -ENOMEM;
+ if (!src_page)
+ return -EOPNOTSUPP;
kvm_tdx->page_add_src = src_page;
ret = kvm_tdp_mmu_map_private_pfn(arg->vcpu, gfn, pfn);
kvm_tdx->page_add_src = NULL;
- put_page(src_page);
-
if (ret || !(arg->flags & KVM_TDX_MEASURE_MEMORY_REGION))
return ret;
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 1d0cee72e560..49c0cfe24fd8 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -2566,7 +2566,7 @@ int kvm_arch_gmem_prepare(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn, int max_ord
* @gfn: starting GFN to be populated
* @src: userspace-provided buffer containing data to copy into GFN range
* (passed to @post_populate, and incremented on each iteration
- * if not NULL)
+ * if not NULL). Must be page-aligned.
* @npages: number of pages to copy from userspace-buffer
* @post_populate: callback to issue for each gmem page that backs the GPA
* range
@@ -2581,7 +2581,7 @@ int kvm_arch_gmem_prepare(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn, int max_ord
* Returns the number of pages that were populated.
*/
typedef int (*kvm_gmem_populate_cb)(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn,
- void __user *src, void *opaque);
+ struct page *page, void *opaque);
long kvm_gmem_populate(struct kvm *kvm, gfn_t gfn, void __user *src, long npages,
kvm_gmem_populate_cb post_populate, void *opaque);
diff --git a/virt/kvm/guest_memfd.c b/virt/kvm/guest_memfd.c
index 8b1248f42aae..18ae59b92257 100644
--- a/virt/kvm/guest_memfd.c
+++ b/virt/kvm/guest_memfd.c
@@ -820,12 +820,48 @@ int kvm_gmem_get_pfn(struct kvm *kvm, struct kvm_memory_slot *slot,
EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_gmem_get_pfn);
#ifdef CONFIG_HAVE_KVM_ARCH_GMEM_POPULATE
+
+static long __kvm_gmem_populate(struct kvm *kvm, struct kvm_memory_slot *slot,
+ struct file *file, gfn_t gfn, struct page *src_page,
+ kvm_gmem_populate_cb post_populate, void *opaque)
+{
+ pgoff_t index = kvm_gmem_get_index(slot, gfn);
+ struct folio *folio;
+ kvm_pfn_t pfn;
+ int ret;
+
+ filemap_invalidate_lock(file->f_mapping);
+
+ folio = __kvm_gmem_get_pfn(file, slot, index, &pfn, NULL);
+ if (IS_ERR(folio)) {
+ ret = PTR_ERR(folio);
+ goto out_unlock;
+ }
+
+ folio_unlock(folio);
+
+ if (!kvm_range_has_memory_attributes(kvm, gfn, gfn + 1,
+ KVM_MEMORY_ATTRIBUTE_PRIVATE,
+ KVM_MEMORY_ATTRIBUTE_PRIVATE)) {
+ ret = -EINVAL;
+ goto out_put_folio;
+ }
+
+ ret = post_populate(kvm, gfn, pfn, src_page, opaque);
+ if (!ret)
+ folio_mark_uptodate(folio);
+
+out_put_folio:
+ folio_put(folio);
+out_unlock:
+ filemap_invalidate_unlock(file->f_mapping);
+ return ret;
+}
+
long kvm_gmem_populate(struct kvm *kvm, gfn_t start_gfn, void __user *src, long npages,
kvm_gmem_populate_cb post_populate, void *opaque)
{
struct kvm_memory_slot *slot;
- void __user *p;
-
int ret = 0;
long i;
@@ -834,6 +870,9 @@ long kvm_gmem_populate(struct kvm *kvm, gfn_t start_gfn, void __user *src, long
if (WARN_ON_ONCE(npages <= 0))
return -EINVAL;
+ if (WARN_ON_ONCE(!PAGE_ALIGNED(src)))
+ return -EINVAL;
+
slot = gfn_to_memslot(kvm, start_gfn);
if (!kvm_slot_has_gmem(slot))
return -EINVAL;
@@ -842,47 +881,38 @@ long kvm_gmem_populate(struct kvm *kvm, gfn_t start_gfn, void __user *src, long
if (!file)
return -EFAULT;
- filemap_invalidate_lock(file->f_mapping);
-
npages = min_t(ulong, slot->npages - (start_gfn - slot->base_gfn), npages);
for (i = 0; i < npages; i++) {
- struct folio *folio;
- gfn_t gfn = start_gfn + i;
- pgoff_t index = kvm_gmem_get_index(slot, gfn);
- kvm_pfn_t pfn;
+ struct page *src_page = NULL;
+ void __user *p;
if (signal_pending(current)) {
ret = -EINTR;
break;
}
- folio = __kvm_gmem_get_pfn(file, slot, index, &pfn, NULL);
- if (IS_ERR(folio)) {
- ret = PTR_ERR(folio);
- break;
- }
+ p = src ? src + i * PAGE_SIZE : NULL;
- folio_unlock(folio);
+ if (p) {
+ ret = get_user_pages_fast((unsigned long)p, 1, 0, &src_page);
+ if (ret < 0)
+ break;
+ if (ret != 1) {
+ ret = -ENOMEM;
+ break;
+ }
+ }
- ret = -EINVAL;
- if (!kvm_range_has_memory_attributes(kvm, gfn, gfn + 1,
- KVM_MEMORY_ATTRIBUTE_PRIVATE,
- KVM_MEMORY_ATTRIBUTE_PRIVATE))
- goto put_folio_and_exit;
+ ret = __kvm_gmem_populate(kvm, slot, file, start_gfn + i, src_page,
+ post_populate, opaque);
- p = src ? src + i * PAGE_SIZE : NULL;
- ret = post_populate(kvm, gfn, pfn, p, opaque);
- if (!ret)
- folio_mark_uptodate(folio);
+ if (src_page)
+ put_page(src_page);
-put_folio_and_exit:
- folio_put(folio);
if (ret)
break;
}
- filemap_invalidate_unlock(file->f_mapping);
-
return ret && !i ? ret : i;
}
EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_gmem_populate);
--
2.25.1
^ permalink raw reply [flat|nested] 7+ messages in thread