From: Fuad Tabba <tabba@google.com>
To: kvm@vger.kernel.org, linux-arm-msm@vger.kernel.org, linux-mm@kvack.org
Cc: pbonzini@redhat.com, chenhuacai@kernel.org, mpe@ellerman.id.au,
anup@brainfault.org, paul.walmsley@sifive.com,
palmer@dabbelt.com, aou@eecs.berkeley.edu, seanjc@google.com,
viro@zeniv.linux.org.uk, brauner@kernel.org,
willy@infradead.org, akpm@linux-foundation.org,
xiaoyao.li@intel.com, yilun.xu@intel.com,
chao.p.peng@linux.intel.com, jarkko@kernel.org,
amoorthy@google.com, dmatlack@google.com,
yu.c.zhang@linux.intel.com, isaku.yamahata@intel.com,
mic@digikod.net, vbabka@suse.cz, vannapurve@google.com,
ackerleytng@google.com, mail@maciej.szmigiero.name,
david@redhat.com, michael.roth@amd.com, wei.w.wang@intel.com,
liam.merwick@oracle.com, isaku.yamahata@gmail.com,
kirill.shutemov@linux.intel.com, suzuki.poulose@arm.com,
steven.price@arm.com, quic_eberman@quicinc.com,
quic_mnalajal@quicinc.com, quic_tsoni@quicinc.com,
quic_svaddagi@quicinc.com, quic_cvanscha@quicinc.com,
quic_pderrin@quicinc.com, quic_pheragu@quicinc.com,
catalin.marinas@arm.com, james.morse@arm.com,
yuzenghui@huawei.com, oliver.upton@linux.dev, maz@kernel.org,
will@kernel.org, qperret@google.com, keirf@google.com,
roypat@amazon.co.uk, shuah@kernel.org, hch@infradead.org,
jgg@nvidia.com, rientjes@google.com, jhubbard@nvidia.com,
fvdl@google.com, hughd@google.com, jthoughton@google.com,
tabba@google.com
Subject: [RFC PATCH v4 08/14] KVM: guest_memfd: Add guest_memfd support to kvm_(read|/write)_guest_page()
Date: Fri, 13 Dec 2024 16:48:04 +0000 [thread overview]
Message-ID: <20241213164811.2006197-9-tabba@google.com> (raw)
In-Reply-To: <20241213164811.2006197-1-tabba@google.com>
Make kvm_(read|/write)_guest_page() capable of accessing guest
memory for slots that don't have a userspace address, but only if
the memory is mappable, which also indicates that it is
accessible by the host.
Signed-off-by: Fuad Tabba <tabba@google.com>
---
virt/kvm/kvm_main.c | 133 +++++++++++++++++++++++++++++++++++++-------
1 file changed, 114 insertions(+), 19 deletions(-)
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index fffff01cebe7..53692feb6213 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -3184,23 +3184,110 @@ int kvm_gmem_clear_mappable(struct kvm *kvm, gfn_t start, gfn_t end)
return r;
}
+static int __kvm_read_guest_memfd_page(struct kvm *kvm,
+ struct kvm_memory_slot *slot,
+ gfn_t gfn, void *data, int offset,
+ int len)
+{
+ struct page *page;
+ u64 pfn;
+ int r;
+
+ /*
+ * Holds the folio lock until after checking whether it can be faulted
+ * in, to avoid races with paths that change a folio's mappability.
+ */
+ r = kvm_gmem_get_pfn_locked(kvm, slot, gfn, &pfn, &page, NULL);
+ if (r)
+ return r;
+
+ if (!kvm_gmem_is_mappable(kvm, gfn, gfn + 1)) {
+ r = -EPERM;
+ goto unlock;
+ }
+ memcpy(data, page_address(page) + offset, len);
+unlock:
+ unlock_page(page);
+ if (r)
+ put_page(page);
+ else
+ kvm_release_page_clean(page);
+
+ return r;
+}
+
+static int __kvm_write_guest_memfd_page(struct kvm *kvm,
+ struct kvm_memory_slot *slot,
+ gfn_t gfn, const void *data,
+ int offset, int len)
+{
+ struct page *page;
+ u64 pfn;
+ int r;
+
+ /*
+ * Holds the folio lock until after checking whether it can be faulted
+ * in, to avoid races with paths that change a folio's mappability.
+ */
+ r = kvm_gmem_get_pfn_locked(kvm, slot, gfn, &pfn, &page, NULL);
+ if (r)
+ return r;
+
+ if (!kvm_gmem_is_mappable(kvm, gfn, gfn + 1)) {
+ r = -EPERM;
+ goto unlock;
+ }
+ memcpy(page_address(page) + offset, data, len);
+unlock:
+ unlock_page(page);
+ if (r)
+ put_page(page);
+ else
+ kvm_release_page_dirty(page);
+
+ return r;
+}
+#else
+static int __kvm_read_guest_memfd_page(struct kvm *kvm,
+ struct kvm_memory_slot *slot,
+ gfn_t gfn, void *data, int offset,
+ int len)
+{
+ WARN_ON_ONCE(1);
+ return -EIO;
+}
+
+static int __kvm_write_guest_memfd_page(struct kvm *kvm,
+ struct kvm_memory_slot *slot,
+ gfn_t gfn, const void *data,
+ int offset, int len)
+{
+ WARN_ON_ONCE(1);
+ return -EIO;
+}
#endif /* CONFIG_KVM_GMEM_MAPPABLE */
/* Copy @len bytes from guest memory at '(@gfn * PAGE_SIZE) + @offset' to @data */
-static int __kvm_read_guest_page(struct kvm_memory_slot *slot, gfn_t gfn,
- void *data, int offset, int len)
+
+static int __kvm_read_guest_page(struct kvm *kvm, struct kvm_memory_slot *slot,
+ gfn_t gfn, void *data, int offset, int len)
{
- int r;
unsigned long addr;
if (WARN_ON_ONCE(offset + len > PAGE_SIZE))
return -EFAULT;
+ if (IS_ENABLED(CONFIG_KVM_GMEM_MAPPABLE) &&
+ kvm_slot_can_be_private(slot) &&
+ !slot->userspace_addr) {
+ return __kvm_read_guest_memfd_page(kvm, slot, gfn, data,
+ offset, len);
+ }
+
addr = gfn_to_hva_memslot_prot(slot, gfn, NULL);
if (kvm_is_error_hva(addr))
return -EFAULT;
- r = __copy_from_user(data, (void __user *)addr + offset, len);
- if (r)
+ if (__copy_from_user(data, (void __user *)addr + offset, len))
return -EFAULT;
return 0;
}
@@ -3210,7 +3297,7 @@ int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
{
struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
- return __kvm_read_guest_page(slot, gfn, data, offset, len);
+ return __kvm_read_guest_page(kvm, slot, gfn, data, offset, len);
}
EXPORT_SYMBOL_GPL(kvm_read_guest_page);
@@ -3219,7 +3306,7 @@ int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data,
{
struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
- return __kvm_read_guest_page(slot, gfn, data, offset, len);
+ return __kvm_read_guest_page(vcpu->kvm, slot, gfn, data, offset, len);
}
EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_page);
@@ -3296,22 +3383,30 @@ EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_atomic);
/* Copy @len bytes from @data into guest memory at '(@gfn * PAGE_SIZE) + @offset' */
static int __kvm_write_guest_page(struct kvm *kvm,
- struct kvm_memory_slot *memslot, gfn_t gfn,
- const void *data, int offset, int len)
+ struct kvm_memory_slot *slot, gfn_t gfn,
+ const void *data, int offset, int len)
{
- int r;
- unsigned long addr;
-
if (WARN_ON_ONCE(offset + len > PAGE_SIZE))
return -EFAULT;
- addr = gfn_to_hva_memslot(memslot, gfn);
- if (kvm_is_error_hva(addr))
- return -EFAULT;
- r = __copy_to_user((void __user *)addr + offset, data, len);
- if (r)
- return -EFAULT;
- mark_page_dirty_in_slot(kvm, memslot, gfn);
+ if (IS_ENABLED(CONFIG_KVM_GMEM_MAPPABLE) &&
+ kvm_slot_can_be_private(slot) &&
+ !slot->userspace_addr) {
+ int r = __kvm_write_guest_memfd_page(kvm, slot, gfn, data,
+ offset, len);
+
+ if (r)
+ return r;
+ } else {
+ unsigned long addr = gfn_to_hva_memslot(slot, gfn);
+
+ if (kvm_is_error_hva(addr))
+ return -EFAULT;
+ if (__copy_to_user((void __user *)addr + offset, data, len))
+ return -EFAULT;
+ }
+
+ mark_page_dirty_in_slot(kvm, slot, gfn);
return 0;
}
--
2.47.1.613.gc27f4b7a9f-goog
next prev parent reply other threads:[~2024-12-13 16:48 UTC|newest]
Thread overview: 26+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-12-13 16:47 [RFC PATCH v4 00/14] KVM: Restricted mapping of guest_memfd at the host and arm64 support Fuad Tabba
2024-12-13 16:47 ` [RFC PATCH v4 01/14] mm: Consolidate freeing of typed folios on final folio_put() Fuad Tabba
2024-12-13 16:47 ` [RFC PATCH v4 02/14] KVM: guest_memfd: Make guest mem use guest mem inodes instead of anonymous inodes Fuad Tabba
2024-12-13 16:47 ` [RFC PATCH v4 03/14] KVM: guest_memfd: Introduce kvm_gmem_get_pfn_locked(), which retains the folio lock Fuad Tabba
2024-12-13 16:48 ` [RFC PATCH v4 04/14] KVM: guest_memfd: Track mappability within a struct kvm_gmem_private Fuad Tabba
2024-12-13 16:48 ` [RFC PATCH v4 05/14] KVM: guest_memfd: Folio mappability states and functions that manage their transition Fuad Tabba
2024-12-13 16:48 ` [RFC PATCH v4 06/14] KVM: guest_memfd: Handle final folio_put() of guestmem pages Fuad Tabba
2024-12-13 16:48 ` [RFC PATCH v4 07/14] KVM: guest_memfd: Allow host to mmap guest_memfd() pages when shared Fuad Tabba
2024-12-27 4:21 ` Alexey Kardashevskiy
2025-01-09 10:17 ` Fuad Tabba
2024-12-13 16:48 ` Fuad Tabba [this message]
2024-12-13 16:48 ` [RFC PATCH v4 09/14] KVM: guest_memfd: Add KVM capability to check if guest_memfd is host mappable Fuad Tabba
2024-12-13 16:48 ` [RFC PATCH v4 10/14] KVM: guest_memfd: Add a guest_memfd() flag to initialize it as mappable Fuad Tabba
2024-12-13 16:48 ` [RFC PATCH v4 11/14] KVM: guest_memfd: selftests: guest_memfd mmap() test when mapping is allowed Fuad Tabba
2024-12-13 16:48 ` [RFC PATCH v4 12/14] KVM: arm64: Skip VMA checks for slots without userspace address Fuad Tabba
2024-12-13 16:48 ` [RFC PATCH v4 13/14] KVM: arm64: Handle guest_memfd()-backed guest page faults Fuad Tabba
2025-01-16 14:48 ` Patrick Roy
2025-01-16 15:16 ` Fuad Tabba
2024-12-13 16:48 ` [RFC PATCH v4 14/14] KVM: arm64: Enable guest_memfd private memory when pKVM is enabled Fuad Tabba
2025-01-09 16:34 ` [RFC PATCH v4 00/14] KVM: Restricted mapping of guest_memfd at the host and arm64 support Fuad Tabba
2025-01-16 0:35 ` Ackerley Tng
2025-01-16 9:19 ` Fuad Tabba
2025-01-20 9:26 ` Vlastimil Babka
2025-01-20 9:36 ` David Hildenbrand
2025-01-16 14:48 ` Patrick Roy
2025-01-16 15:02 ` Fuad Tabba
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20241213164811.2006197-9-tabba@google.com \
--to=tabba@google.com \
--cc=ackerleytng@google.com \
--cc=akpm@linux-foundation.org \
--cc=amoorthy@google.com \
--cc=anup@brainfault.org \
--cc=aou@eecs.berkeley.edu \
--cc=brauner@kernel.org \
--cc=catalin.marinas@arm.com \
--cc=chao.p.peng@linux.intel.com \
--cc=chenhuacai@kernel.org \
--cc=david@redhat.com \
--cc=dmatlack@google.com \
--cc=fvdl@google.com \
--cc=hch@infradead.org \
--cc=hughd@google.com \
--cc=isaku.yamahata@gmail.com \
--cc=isaku.yamahata@intel.com \
--cc=james.morse@arm.com \
--cc=jarkko@kernel.org \
--cc=jgg@nvidia.com \
--cc=jhubbard@nvidia.com \
--cc=jthoughton@google.com \
--cc=keirf@google.com \
--cc=kirill.shutemov@linux.intel.com \
--cc=kvm@vger.kernel.org \
--cc=liam.merwick@oracle.com \
--cc=linux-arm-msm@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=mail@maciej.szmigiero.name \
--cc=maz@kernel.org \
--cc=mic@digikod.net \
--cc=michael.roth@amd.com \
--cc=mpe@ellerman.id.au \
--cc=oliver.upton@linux.dev \
--cc=palmer@dabbelt.com \
--cc=paul.walmsley@sifive.com \
--cc=pbonzini@redhat.com \
--cc=qperret@google.com \
--cc=quic_cvanscha@quicinc.com \
--cc=quic_eberman@quicinc.com \
--cc=quic_mnalajal@quicinc.com \
--cc=quic_pderrin@quicinc.com \
--cc=quic_pheragu@quicinc.com \
--cc=quic_svaddagi@quicinc.com \
--cc=quic_tsoni@quicinc.com \
--cc=rientjes@google.com \
--cc=roypat@amazon.co.uk \
--cc=seanjc@google.com \
--cc=shuah@kernel.org \
--cc=steven.price@arm.com \
--cc=suzuki.poulose@arm.com \
--cc=vannapurve@google.com \
--cc=vbabka@suse.cz \
--cc=viro@zeniv.linux.org.uk \
--cc=wei.w.wang@intel.com \
--cc=will@kernel.org \
--cc=willy@infradead.org \
--cc=xiaoyao.li@intel.com \
--cc=yilun.xu@intel.com \
--cc=yu.c.zhang@linux.intel.com \
--cc=yuzenghui@huawei.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox