From: Paolo Bonzini <pbonzini@redhat.com>
To: Michael Roth <michael.roth@amd.com>, kvm@vger.kernel.org
Cc: linux-coco@lists.linux.dev, linux-mm@kvack.org,
linux-crypto@vger.kernel.org, x86@kernel.org,
linux-kernel@vger.kernel.org, tglx@linutronix.de,
mingo@redhat.com, jroedel@suse.de, thomas.lendacky@amd.com,
hpa@zytor.com, ardb@kernel.org, seanjc@google.com,
vkuznets@redhat.com, jmattson@google.com, luto@kernel.org,
dave.hansen@linux.intel.com, slp@redhat.com, pgonda@google.com,
peterz@infradead.org, srinivas.pandruvada@linux.intel.com,
rientjes@google.com, dovmurik@linux.ibm.com, tobin@ibm.com,
bp@alien8.de, vbabka@suse.cz, kirill@shutemov.name,
ak@linux.intel.com, tony.luck@intel.com,
sathyanarayanan.kuppuswamy@linux.intel.com, alpergun@google.com,
jarkko@kernel.org, ashish.kalra@amd.com, nikunj.dadhania@amd.com,
pankaj.gupta@amd.com, liam.merwick@oracle.com
Subject: Re: [PATCH v12 21/29] KVM: SEV: Implement gmem hook for initializing private pages
Date: Sat, 30 Mar 2024 22:05:28 +0100 [thread overview]
Message-ID: <a0799504-385b-40d8-a84c-eddb1bae930d@redhat.com> (raw)
In-Reply-To: <20240329225835.400662-22-michael.roth@amd.com>
On 3/29/24 23:58, Michael Roth wrote:
> This will handle the RMP table updates needed to put a page into a
> private state before mapping it into an SEV-SNP guest.
>
> Signed-off-by: Michael Roth <michael.roth@amd.com>
> ---
> arch/x86/kvm/Kconfig | 1 +
> arch/x86/kvm/svm/sev.c | 98 ++++++++++++++++++++++++++++++++++++++++++
> arch/x86/kvm/svm/svm.c | 2 +
> arch/x86/kvm/svm/svm.h | 5 +++
> arch/x86/kvm/x86.c | 5 +++
> virt/kvm/guest_memfd.c | 4 +-
> 6 files changed, 113 insertions(+), 2 deletions(-)
>
> diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
> index d0bb0e7a4e80..286b40d0b07c 100644
> --- a/arch/x86/kvm/Kconfig
> +++ b/arch/x86/kvm/Kconfig
> @@ -124,6 +124,7 @@ config KVM_AMD_SEV
> depends on CRYPTO_DEV_SP_PSP && !(KVM_AMD=y && CRYPTO_DEV_CCP_DD=m)
> select ARCH_HAS_CC_PLATFORM
> select KVM_GENERIC_PRIVATE_MEM
> + select HAVE_KVM_GMEM_PREPARE
> help
> Provides support for launching Encrypted VMs (SEV) and Encrypted VMs
> with Encrypted State (SEV-ES) on AMD processors.
> diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
> index 9ea13c2de668..e1f8be1df219 100644
> --- a/arch/x86/kvm/svm/sev.c
> +++ b/arch/x86/kvm/svm/sev.c
> @@ -4282,3 +4282,101 @@ void sev_handle_rmp_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u64 error_code)
> out:
> put_page(pfn_to_page(pfn));
> }
> +
> +static bool is_pfn_range_shared(kvm_pfn_t start, kvm_pfn_t end)
> +{
> + kvm_pfn_t pfn = start;
> +
> + while (pfn < end) {
> + int ret, rmp_level;
> + bool assigned;
> +
> + ret = snp_lookup_rmpentry(pfn, &assigned, &rmp_level);
> + if (ret) {
> + pr_warn_ratelimited("SEV: Failed to retrieve RMP entry: PFN 0x%llx GFN start 0x%llx GFN end 0x%llx RMP level %d error %d\n",
> + pfn, start, end, rmp_level, ret);
> + return false;
> + }
> +
> + if (assigned) {
> + pr_debug("%s: overlap detected, PFN 0x%llx start 0x%llx end 0x%llx RMP level %d\n",
> + __func__, pfn, start, end, rmp_level);
> + return false;
> + }
> +
> + pfn++;
> + }
> +
> + return true;
> +}
> +
> +static u8 max_level_for_order(int order)
> +{
> + if (order >= KVM_HPAGE_GFN_SHIFT(PG_LEVEL_2M))
> + return PG_LEVEL_2M;
> +
> + return PG_LEVEL_4K;
> +}
> +
> +static bool is_large_rmp_possible(struct kvm *kvm, kvm_pfn_t pfn, int order)
> +{
> + kvm_pfn_t pfn_aligned = ALIGN_DOWN(pfn, PTRS_PER_PMD);
> +
> + /*
> + * If this is a large folio, and the entire 2M range containing the
> + * PFN is currently shared, then the entire 2M-aligned range can be
> + * set to private via a single 2M RMP entry.
> + */
> + if (max_level_for_order(order) > PG_LEVEL_4K &&
> + is_pfn_range_shared(pfn_aligned, pfn_aligned + PTRS_PER_PMD))
> + return true;
> +
> + return false;
> +}
> +
> +int sev_gmem_prepare(struct kvm *kvm, kvm_pfn_t pfn, gfn_t gfn, int max_order)
> +{
> + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
> + kvm_pfn_t pfn_aligned;
> + gfn_t gfn_aligned;
> + int level, rc;
> + bool assigned;
> +
> + if (!sev_snp_guest(kvm))
> + return 0;
> +
> + rc = snp_lookup_rmpentry(pfn, &assigned, &level);
> + if (rc) {
> + pr_err_ratelimited("SEV: Failed to look up RMP entry: GFN %llx PFN %llx error %d\n",
> + gfn, pfn, rc);
> + return -ENOENT;
> + }
> +
> + if (assigned) {
> + pr_debug("%s: already assigned: gfn %llx pfn %llx max_order %d level %d\n",
> + __func__, gfn, pfn, max_order, level);
> + return 0;
> + }
> +
> + if (is_large_rmp_possible(kvm, pfn, max_order)) {
> + level = PG_LEVEL_2M;
> + pfn_aligned = ALIGN_DOWN(pfn, PTRS_PER_PMD);
> + gfn_aligned = ALIGN_DOWN(gfn, PTRS_PER_PMD);
> + } else {
> + level = PG_LEVEL_4K;
> + pfn_aligned = pfn;
> + gfn_aligned = gfn;
> + }
> +
> + rc = rmp_make_private(pfn_aligned, gfn_to_gpa(gfn_aligned), level, sev->asid, false);
> + if (rc) {
> + pr_err_ratelimited("SEV: Failed to update RMP entry: GFN %llx PFN %llx level %d error %d\n",
> + gfn, pfn, level, rc);
> + return -EINVAL;
> + }
> +
> + pr_debug("%s: updated: gfn %llx pfn %llx pfn_aligned %llx max_order %d level %d\n",
> + __func__, gfn, pfn, pfn_aligned, max_order, level);
> +
> + return 0;
> +}
> diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
> index a895d3f07cb8..c099154e326a 100644
> --- a/arch/x86/kvm/svm/svm.c
> +++ b/arch/x86/kvm/svm/svm.c
> @@ -5078,6 +5078,8 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
> .vcpu_deliver_sipi_vector = svm_vcpu_deliver_sipi_vector,
> .vcpu_get_apicv_inhibit_reasons = avic_vcpu_get_apicv_inhibit_reasons,
> .alloc_apic_backing_page = svm_alloc_apic_backing_page,
> +
> + .gmem_prepare = sev_gmem_prepare,
> };
>
> /*
> diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
> index 0cdcd0759fe0..53618cfc2b89 100644
> --- a/arch/x86/kvm/svm/svm.h
> +++ b/arch/x86/kvm/svm/svm.h
> @@ -730,6 +730,7 @@ extern unsigned int max_sev_asid;
> void sev_handle_rmp_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u64 error_code);
> void sev_vcpu_unblocking(struct kvm_vcpu *vcpu);
> void sev_snp_init_protected_guest_state(struct kvm_vcpu *vcpu);
> +int sev_gmem_prepare(struct kvm *kvm, kvm_pfn_t pfn, gfn_t gfn, int max_order);
> #else
> static inline struct page *snp_safe_alloc_page(struct kvm_vcpu *vcpu) {
> return alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
> @@ -746,6 +747,10 @@ static inline int sev_dev_get_attr(u64 attr, u64 *val) { return -ENXIO; }
> static inline void sev_handle_rmp_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u64 error_code) {}
> static inline void sev_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
> static inline void sev_snp_init_protected_guest_state(struct kvm_vcpu *vcpu) {}
> +static inline int sev_gmem_prepare(struct kvm *kvm, kvm_pfn_t pfn, gfn_t gfn, int max_order)
> +{
> + return 0;
> +}
>
> #endif
>
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index 617c38656757..d05922684005 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -13615,6 +13615,11 @@ bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
> EXPORT_SYMBOL_GPL(kvm_arch_no_poll);
>
> #ifdef CONFIG_HAVE_KVM_GMEM_PREPARE
> +bool kvm_arch_gmem_prepare_needed(struct kvm *kvm)
> +{
> + return kvm->arch.vm_type == KVM_X86_SNP_VM;
> +}
> +
> int kvm_arch_gmem_prepare(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn, int max_order)
> {
> return static_call(kvm_x86_gmem_prepare)(kvm, pfn, gfn, max_order);
> diff --git a/virt/kvm/guest_memfd.c b/virt/kvm/guest_memfd.c
> index 3e3c4b7fff3b..11952254ae48 100644
> --- a/virt/kvm/guest_memfd.c
> +++ b/virt/kvm/guest_memfd.c
> @@ -46,8 +46,8 @@ static int kvm_gmem_prepare_folio(struct inode *inode, pgoff_t index, struct fol
> gfn = slot->base_gfn + index - slot->gmem.pgoff;
> rc = kvm_arch_gmem_prepare(kvm, gfn, pfn, compound_order(compound_head(page)));
> if (rc) {
> - pr_warn_ratelimited("gmem: Failed to prepare folio for index %lx, error %d.\n",
> - index, rc);
> + pr_warn_ratelimited("gmem: Failed to prepare folio for index %lx GFN %llx PFN %llx error %d.\n",
> + index, gfn, pfn, rc);
> return rc;
> }
> }
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Paolo
next prev parent reply other threads:[~2024-03-30 21:05 UTC|newest]
Thread overview: 55+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-03-29 22:58 [PATCH v12 00/29] Add AMD Secure Nested Paging (SEV-SNP) Hypervisor Support Michael Roth
2024-03-29 22:58 ` [PATCH v12 01/29] [TEMP] x86/kvm/Kconfig: Have KVM_AMD_SEV select ARCH_HAS_CC_PLATFORM Michael Roth
2024-03-29 22:58 ` [PATCH v12 02/29] [TEMP] x86/cc: Add cc_platform_set/_clear() helpers Michael Roth
2024-03-29 22:58 ` [PATCH v12 03/29] [TEMP] x86/CPU/AMD: Track SNP host status with cc_platform_*() Michael Roth
2024-03-29 22:58 ` [PATCH v12 04/29] [TEMP] fixup! KVM: SEV: sync FPU and AVX state at LAUNCH_UPDATE_VMSA time Michael Roth
2024-03-29 22:58 ` [PATCH v12 05/29] KVM: x86: Define RMP page fault error bits for #NPF Michael Roth
2024-03-30 19:28 ` Paolo Bonzini
2024-03-29 22:58 ` [PATCH v12 06/29] KVM: SEV: Select KVM_GENERIC_PRIVATE_MEM when CONFIG_KVM_AMD_SEV=y Michael Roth
2024-03-29 22:58 ` [PATCH v12 07/29] KVM: SEV: Add support to handle AP reset MSR protocol Michael Roth
2024-03-29 22:58 ` [PATCH v12 08/29] KVM: SEV: Add GHCB handling for Hypervisor Feature Support requests Michael Roth
2024-03-29 22:58 ` [PATCH v12 09/29] KVM: SEV: Add initial SEV-SNP support Michael Roth
2024-03-30 19:58 ` Paolo Bonzini
2024-03-29 22:58 ` [PATCH v12 10/29] KVM: SEV: Add KVM_SEV_SNP_LAUNCH_START command Michael Roth
2024-03-29 22:58 ` [PATCH v12 11/29] KVM: SEV: Add KVM_SEV_SNP_LAUNCH_UPDATE command Michael Roth
[not found] ` <8c3685a6-833c-4b3c-83f4-c0bd78bba36e@redhat.com>
2024-04-01 22:22 ` Michael Roth
2024-04-02 22:58 ` Isaku Yamahata
2024-04-03 12:51 ` Paolo Bonzini
2024-04-03 15:37 ` Isaku Yamahata
2024-03-29 22:58 ` [PATCH v12 12/29] KVM: SEV: Add KVM_SEV_SNP_LAUNCH_FINISH command Michael Roth
[not found] ` <40382494-7253-442b-91a8-e80c38fb4f2c@redhat.com>
2024-04-01 23:17 ` Michael Roth
2024-04-03 12:56 ` Paolo Bonzini
2024-03-29 22:58 ` [PATCH v12 13/29] KVM: SEV: Add support to handle GHCB GPA register VMGEXIT Michael Roth
2024-03-29 22:58 ` [PATCH v12 14/29] KVM: SEV: Add support to handle MSR based Page State Change VMGEXIT Michael Roth
2024-03-29 22:58 ` [PATCH v12 15/29] KVM: SEV: Add support to handle " Michael Roth
2024-03-29 22:58 ` [PATCH v12 16/29] KVM: x86: Export the kvm_zap_gfn_range() for the SNP use Michael Roth
2024-03-29 22:58 ` [PATCH v12 17/29] KVM: SEV: Add support to handle RMP nested page faults Michael Roth
2024-03-30 20:55 ` Paolo Bonzini
2024-03-29 22:58 ` [PATCH v12 18/29] KVM: SEV: Use a VMSA physical address variable for populating VMCB Michael Roth
2024-03-30 21:01 ` Paolo Bonzini
2024-04-16 11:53 ` Paolo Bonzini
2024-04-16 14:25 ` Tom Lendacky
2024-04-16 17:00 ` Paolo Bonzini
2024-04-17 20:57 ` Michael Roth
2024-03-29 22:58 ` [PATCH v12 19/29] KVM: SEV: Support SEV-SNP AP Creation NAE event Michael Roth
2024-03-29 22:58 ` [PATCH v12 20/29] KVM: SEV: Add support for GHCB-based termination requests Michael Roth
2024-03-29 22:58 ` [PATCH v12 21/29] KVM: SEV: Implement gmem hook for initializing private pages Michael Roth
2024-03-30 21:05 ` Paolo Bonzini [this message]
2024-03-29 22:58 ` [PATCH v12 22/29] KVM: SEV: Implement gmem hook for invalidating " Michael Roth
2024-03-30 21:31 ` Paolo Bonzini
2024-04-18 19:57 ` Michael Roth
2024-03-29 22:58 ` [PATCH v12 23/29] KVM: x86: Implement gmem hook for determining max NPT mapping level Michael Roth
2024-03-30 21:35 ` Paolo Bonzini
2024-03-29 22:58 ` [PATCH v12 24/29] KVM: SEV: Avoid WBINVD for HVA-based MMU notifications for SNP Michael Roth
2024-03-30 21:35 ` Paolo Bonzini
2024-03-29 22:58 ` [PATCH v12 25/29] KVM: SVM: Add module parameter to enable the SEV-SNP Michael Roth
2024-03-30 21:35 ` Paolo Bonzini
2024-03-29 22:58 ` [PATCH v12 26/29] KVM: SEV: Provide support for SNP_GUEST_REQUEST NAE event Michael Roth
2024-04-10 22:14 ` Tom Lendacky
2024-03-29 22:58 ` [PATCH v12 27/29] crypto: ccp: Add the SNP_VLEK_LOAD command Michael Roth
2024-04-10 22:20 ` Tom Lendacky
2024-03-29 22:58 ` [PATCH v12 28/29] crypto: ccp: Add the SNP_{PAUSE,RESUME}_ATTESTATION commands Michael Roth
2024-04-10 22:27 ` Tom Lendacky
2024-03-29 22:58 ` [PATCH v12 29/29] KVM: SEV: Provide support for SNP_EXTENDED_GUEST_REQUEST NAE event Michael Roth
2024-04-11 13:33 ` Tom Lendacky
2024-03-30 21:44 ` [PATCH v12 00/29] Add AMD Secure Nested Paging (SEV-SNP) Hypervisor Support Paolo Bonzini
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=a0799504-385b-40d8-a84c-eddb1bae930d@redhat.com \
--to=pbonzini@redhat.com \
--cc=ak@linux.intel.com \
--cc=alpergun@google.com \
--cc=ardb@kernel.org \
--cc=ashish.kalra@amd.com \
--cc=bp@alien8.de \
--cc=dave.hansen@linux.intel.com \
--cc=dovmurik@linux.ibm.com \
--cc=hpa@zytor.com \
--cc=jarkko@kernel.org \
--cc=jmattson@google.com \
--cc=jroedel@suse.de \
--cc=kirill@shutemov.name \
--cc=kvm@vger.kernel.org \
--cc=liam.merwick@oracle.com \
--cc=linux-coco@lists.linux.dev \
--cc=linux-crypto@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=luto@kernel.org \
--cc=michael.roth@amd.com \
--cc=mingo@redhat.com \
--cc=nikunj.dadhania@amd.com \
--cc=pankaj.gupta@amd.com \
--cc=peterz@infradead.org \
--cc=pgonda@google.com \
--cc=rientjes@google.com \
--cc=sathyanarayanan.kuppuswamy@linux.intel.com \
--cc=seanjc@google.com \
--cc=slp@redhat.com \
--cc=srinivas.pandruvada@linux.intel.com \
--cc=tglx@linutronix.de \
--cc=thomas.lendacky@amd.com \
--cc=tobin@ibm.com \
--cc=tony.luck@intel.com \
--cc=vbabka@suse.cz \
--cc=vkuznets@redhat.com \
--cc=x86@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox