linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: "Kirill A. Shutemov" <kirill@shutemov.name>
To: Dave Hansen <dave.hansen@linux.intel.com>,
	Andy Lutomirski <luto@kernel.org>,
	Peter Zijlstra <peterz@infradead.org>,
	Paolo Bonzini <pbonzini@redhat.com>,
	Sean Christopherson <sean.j.christopherson@intel.com>,
	Vitaly Kuznetsov <vkuznets@redhat.com>,
	Wanpeng Li <wanpengli@tencent.com>,
	Jim Mattson <jmattson@google.com>, Joerg Roedel <joro@8bytes.org>
Cc: David Rientjes <rientjes@google.com>,
	Andrea Arcangeli <aarcange@redhat.com>,
	Kees Cook <keescook@chromium.org>, Will Drewry <wad@chromium.org>,
	"Edgecombe, Rick P" <rick.p.edgecombe@intel.com>,
	"Kleen, Andi" <andi.kleen@intel.com>,
	x86@kernel.org, kvm@vger.kernel.org, linux-mm@kvack.org,
	linux-kernel@vger.kernel.org,
	"Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Subject: [RFC 11/16] KVM: Rework copy_to/from_guest() to avoid direct mapping
Date: Fri, 22 May 2020 15:52:09 +0300	[thread overview]
Message-ID: <20200522125214.31348-12-kirill.shutemov@linux.intel.com> (raw)
In-Reply-To: <20200522125214.31348-1-kirill.shutemov@linux.intel.com>

We are going unmap guest pages from direct mapping and cannot rely on it
for guest memory access. Use temporary kmap_atomic()-style mapping to
access guest memory.

Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
---
 virt/kvm/kvm_main.c | 57 +++++++++++++++++++++++++++++++++++++++++++--
 1 file changed, 55 insertions(+), 2 deletions(-)

diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 07d45da5d2aa..63282def3760 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -2258,17 +2258,45 @@ static int next_segment(unsigned long len, int offset)
 		return len;
 }
 
+static pte_t **guest_map_ptes;
+static struct vm_struct *guest_map_area;
+
+static void *map_page_atomic(struct page *page)
+{
+	pte_t *pte;
+	void *vaddr;
+
+	preempt_disable();
+	pte = guest_map_ptes[smp_processor_id()];
+	vaddr = guest_map_area->addr + smp_processor_id() * PAGE_SIZE;
+	set_pte(pte, mk_pte(page, PAGE_KERNEL));
+	return vaddr;
+}
+
+static void unmap_page_atomic(void *vaddr)
+{
+	pte_t *pte = guest_map_ptes[smp_processor_id()];
+	set_pte(pte, __pte(0));
+	__flush_tlb_one_kernel((unsigned long)vaddr);
+	preempt_enable();
+}
+
 int copy_from_guest(void *data, unsigned long hva, int len)
 {
 	int offset = offset_in_page(hva);
 	struct page *page;
 	int npages, seg;
+	void *vaddr;
 
 	while ((seg = next_segment(len, offset)) != 0) {
 		npages = get_user_pages_unlocked(hva, 1, &page, FOLL_KVM);
 		if (npages != 1)
 			return -EFAULT;
-		memcpy(data, page_address(page) + offset, seg);
+
+		vaddr = map_page_atomic(page);
+		memcpy(data, vaddr + offset, seg);
+		unmap_page_atomic(vaddr);
+
 		put_page(page);
 		len -= seg;
 		hva += seg;
@@ -2283,13 +2311,18 @@ int copy_to_guest(unsigned long hva, const void *data, int len)
 	int offset = offset_in_page(hva);
 	struct page *page;
 	int npages, seg;
+	void *vaddr;
 
 	while ((seg = next_segment(len, offset)) != 0) {
 		npages = get_user_pages_unlocked(hva, 1, &page,
 						 FOLL_WRITE | FOLL_KVM);
 		if (npages != 1)
 			return -EFAULT;
-		memcpy(page_address(page) + offset, data, seg);
+
+		vaddr = map_page_atomic(page);
+		memcpy(vaddr + offset, data, seg);
+		unmap_page_atomic(vaddr);
+
 		put_page(page);
 		len -= seg;
 		hva += seg;
@@ -4921,6 +4954,18 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
 	if (r)
 		goto out_free;
 
+	if (VM_KVM_PROTECTED) {
+		guest_map_ptes = kmalloc_array(num_possible_cpus(),
+					       sizeof(pte_t *), GFP_KERNEL);
+		if (!guest_map_ptes)
+			goto out_unreg;
+
+		guest_map_area = alloc_vm_area(PAGE_SIZE * num_possible_cpus(),
+					       guest_map_ptes);
+		if (!guest_map_ptes)
+			goto out_unreg;
+	}
+
 	kvm_chardev_ops.owner = module;
 	kvm_vm_fops.owner = module;
 	kvm_vcpu_fops.owner = module;
@@ -4944,6 +4989,10 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
 	return 0;
 
 out_unreg:
+	if (guest_map_area)
+		free_vm_area(guest_map_area);
+	if (guest_map_ptes)
+		kfree(guest_map_ptes);
 	kvm_async_pf_deinit();
 out_free:
 	kmem_cache_destroy(kvm_vcpu_cache);
@@ -4965,6 +5014,10 @@ EXPORT_SYMBOL_GPL(kvm_init);
 
 void kvm_exit(void)
 {
+	if (guest_map_area)
+		free_vm_area(guest_map_area);
+	if (guest_map_ptes)
+		kfree(guest_map_ptes);
 	debugfs_remove_recursive(kvm_debugfs_dir);
 	misc_deregister(&kvm_dev);
 	kmem_cache_destroy(kvm_vcpu_cache);
-- 
2.26.2



  parent reply	other threads:[~2020-05-22 12:52 UTC|newest]

Thread overview: 62+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-05-22 12:51 [RFC 00/16] KVM protected memory extension Kirill A. Shutemov
2020-05-22 12:51 ` [RFC 01/16] x86/mm: Move force_dma_unencrypted() to common code Kirill A. Shutemov
2020-05-22 12:52 ` [RFC 02/16] x86/kvm: Introduce KVM memory protection feature Kirill A. Shutemov
2020-05-25 14:58   ` Vitaly Kuznetsov
2020-05-25 15:15     ` Kirill A. Shutemov
2020-05-27  5:03       ` Sean Christopherson
2020-05-27  8:39         ` Vitaly Kuznetsov
2020-05-27  8:52           ` Sean Christopherson
2020-06-03  2:09           ` Huang, Kai
2020-06-03 11:14             ` Vitaly Kuznetsov
2020-05-22 12:52 ` [RFC 03/16] x86/kvm: Make DMA pages shared Kirill A. Shutemov
2020-05-22 12:52 ` [RFC 04/16] x86/kvm: Use bounce buffers for KVM memory protection Kirill A. Shutemov
2020-05-22 12:52 ` [RFC 05/16] x86/kvm: Make VirtIO use DMA API in KVM guest Kirill A. Shutemov
2020-05-22 12:52 ` [RFC 06/16] KVM: Use GUP instead of copy_from/to_user() to access guest memory Kirill A. Shutemov
2020-05-25 15:08   ` Vitaly Kuznetsov
2020-05-25 15:17     ` Kirill A. Shutemov
2020-06-01 16:35       ` Paolo Bonzini
2020-06-02 13:33         ` Kirill A. Shutemov
2020-05-26  6:14   ` Mike Rapoport
2020-05-26 21:56     ` Kirill A. Shutemov
2020-05-29 15:24   ` Kees Cook
2020-05-22 12:52 ` [RFC 07/16] KVM: mm: Introduce VM_KVM_PROTECTED Kirill A. Shutemov
2020-05-26  6:15   ` Mike Rapoport
2020-05-26 22:01     ` Kirill A. Shutemov
2020-05-26  6:40   ` John Hubbard
2020-05-26 22:04     ` Kirill A. Shutemov
2020-05-22 12:52 ` [RFC 08/16] KVM: x86: Use GUP for page walk instead of __get_user() Kirill A. Shutemov
2020-05-22 12:52 ` [RFC 09/16] KVM: Protected memory extension Kirill A. Shutemov
2020-05-25 15:26   ` Vitaly Kuznetsov
2020-05-25 15:34     ` Kirill A. Shutemov
2020-06-03  1:34       ` Huang, Kai
2020-05-22 12:52 ` [RFC 10/16] KVM: x86: Enabled protected " Kirill A. Shutemov
2020-05-25 15:26   ` Vitaly Kuznetsov
2020-05-26  6:16   ` Mike Rapoport
2020-05-26 21:58     ` Kirill A. Shutemov
2020-05-22 12:52 ` Kirill A. Shutemov [this message]
2020-05-22 12:52 ` [RFC 12/16] x86/kvm: Share steal time page with host Kirill A. Shutemov
2020-05-22 12:52 ` [RFC 13/16] x86/kvmclock: Share hvclock memory with the host Kirill A. Shutemov
2020-05-25 15:22   ` Vitaly Kuznetsov
2020-05-25 15:25     ` Kirill A. Shutemov
2020-05-25 15:42       ` Vitaly Kuznetsov
2020-05-22 12:52 ` [RFC 14/16] KVM: Introduce gfn_to_pfn_memslot_protected() Kirill A. Shutemov
2020-05-22 12:52 ` [RFC 15/16] KVM: Handle protected memory in __kvm_map_gfn()/__kvm_unmap_gfn() Kirill A. Shutemov
2020-05-22 12:52 ` [RFC 16/16] KVM: Unmap protected pages from direct mapping Kirill A. Shutemov
2020-05-26  6:16   ` Mike Rapoport
2020-05-26 22:10     ` Kirill A. Shutemov
2020-05-25  5:27 ` [RFC 00/16] KVM protected memory extension Kirill A. Shutemov
2020-05-25 13:47 ` Liran Alon
2020-05-25 14:46   ` Kirill A. Shutemov
2020-05-25 15:56     ` Liran Alon
2020-05-26  6:17   ` Mike Rapoport
2020-05-26 10:16     ` Liran Alon
2020-05-26 11:38       ` Mike Rapoport
2020-05-27 15:45         ` Dave Hansen
2020-05-27 21:22           ` Mike Rapoport
2020-06-04 15:15 ` Marc Zyngier
2020-06-04 15:48   ` Sean Christopherson
2020-06-04 16:27     ` Marc Zyngier
2020-06-04 16:35     ` Will Deacon
2020-06-04 19:09       ` Nakajima, Jun
2020-06-04 21:03         ` Jim Mattson
2020-06-04 23:29           ` Nakajima, Jun

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200522125214.31348-12-kirill.shutemov@linux.intel.com \
    --to=kirill@shutemov.name \
    --cc=aarcange@redhat.com \
    --cc=andi.kleen@intel.com \
    --cc=dave.hansen@linux.intel.com \
    --cc=jmattson@google.com \
    --cc=joro@8bytes.org \
    --cc=keescook@chromium.org \
    --cc=kirill.shutemov@linux.intel.com \
    --cc=kvm@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=luto@kernel.org \
    --cc=pbonzini@redhat.com \
    --cc=peterz@infradead.org \
    --cc=rick.p.edgecombe@intel.com \
    --cc=rientjes@google.com \
    --cc=sean.j.christopherson@intel.com \
    --cc=vkuznets@redhat.com \
    --cc=wad@chromium.org \
    --cc=wanpengli@tencent.com \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox