linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Atish Patra <atishp@rivosinc.com>
To: linux-kernel@vger.kernel.org
Cc: "Atish Patra" <atishp@rivosinc.com>,
	"Alexandre Ghiti" <alex@ghiti.fr>,
	"Andrew Jones" <ajones@ventanamicro.com>,
	"Andrew Morton" <akpm@linux-foundation.org>,
	"Anup Patel" <anup@brainfault.org>,
	"Atish Patra" <atishp@atishpatra.org>,
	"Björn Töpel" <bjorn@rivosinc.com>,
	"Suzuki K Poulose" <suzuki.poulose@arm.com>,
	"Will Deacon" <will@kernel.org>, "Marc Zyngier" <maz@kernel.org>,
	"Sean Christopherson" <seanjc@google.com>,
	linux-coco@lists.linux.dev, "Dylan Reid" <dylan@rivosinc.com>,
	abrestic@rivosinc.com, "Samuel Ortiz" <sameo@rivosinc.com>,
	"Christoph Hellwig" <hch@infradead.org>,
	"Conor Dooley" <conor.dooley@microchip.com>,
	"Greg Kroah-Hartman" <gregkh@linuxfoundation.org>,
	"Guo Ren" <guoren@kernel.org>, "Heiko Stuebner" <heiko@sntech.de>,
	"Jiri Slaby" <jirislaby@kernel.org>,
	kvm-riscv@lists.infradead.org, kvm@vger.kernel.org,
	linux-mm@kvack.org, linux-riscv@lists.infradead.org,
	"Mayuresh Chitale" <mchitale@ventanamicro.com>,
	"Palmer Dabbelt" <palmer@dabbelt.com>,
	"Paolo Bonzini" <pbonzini@redhat.com>,
	"Paul Walmsley" <paul.walmsley@sifive.com>,
	"Rajnesh Kanwal" <rkanwal@rivosinc.com>,
	"Uladzislau Rezki" <urezki@gmail.com>
Subject: [RFC 28/48] RISC-V: KVM: Add interrupt management functions for TVM
Date: Wed, 19 Apr 2023 15:16:56 -0700	[thread overview]
Message-ID: <20230419221716.3603068-29-atishp@rivosinc.com> (raw)
In-Reply-To: <20230419221716.3603068-1-atishp@rivosinc.com>

The COVI SBI extension defines the functions related to interrupt
management for TVMs. These functions are the glue logic between
AIA code and the actually CoVE Interrupt SBI extension(COVI).

Signed-off-by: Atish Patra <atishp@rivosinc.com>
---
 arch/riscv/include/asm/kvm_cove.h |  34 ++++
 arch/riscv/kvm/cove.c             | 256 ++++++++++++++++++++++++++++++
 2 files changed, 290 insertions(+)

diff --git a/arch/riscv/include/asm/kvm_cove.h b/arch/riscv/include/asm/kvm_cove.h
index b63682f..74bad2f 100644
--- a/arch/riscv/include/asm/kvm_cove.h
+++ b/arch/riscv/include/asm/kvm_cove.h
@@ -61,10 +61,19 @@ struct kvm_riscv_cove_page {
 	unsigned long gpa;
 };
 
+struct imsic_tee_state {
+	bool bind_required;
+	bool bound;
+	int vsfile_hgei;
+};
+
 struct kvm_cove_tvm_vcpu_context {
 	struct kvm_vcpu *vcpu;
 	/* Pages storing each vcpu state of the TVM in TSM */
 	struct kvm_riscv_cove_page vcpu_state;
+
+	/* Per VCPU imsic state */
+	struct imsic_tee_state imsic;
 };
 
 struct kvm_cove_tvm_context {
@@ -133,6 +142,16 @@ int kvm_riscv_cove_vm_add_memreg(struct kvm *kvm, unsigned long gpa, unsigned lo
 int kvm_riscv_cove_gstage_map(struct kvm_vcpu *vcpu, gpa_t gpa, unsigned long hva);
 /* Fence related function */
 int kvm_riscv_cove_tvm_fence(struct kvm_vcpu *vcpu);
+
+/* AIA related CoVE functions */
+int kvm_riscv_cove_aia_init(struct kvm *kvm);
+int kvm_riscv_cove_vcpu_inject_interrupt(struct kvm_vcpu *vcpu, unsigned long iid);
+int kvm_riscv_cove_vcpu_imsic_unbind(struct kvm_vcpu *vcpu, int old_cpu);
+int kvm_riscv_cove_vcpu_imsic_bind(struct kvm_vcpu *vcpu, unsigned long imsic_mask);
+int kvm_riscv_cove_vcpu_imsic_rebind(struct kvm_vcpu *vcpu, int old_pcpu);
+int kvm_riscv_cove_aia_claim_imsic(struct kvm_vcpu *vcpu, phys_addr_t imsic_pa);
+int kvm_riscv_cove_aia_convert_imsic(struct kvm_vcpu *vcpu, phys_addr_t imsic_pa);
+int kvm_riscv_cove_vcpu_imsic_addr(struct kvm_vcpu *vcpu);
 #else
 static inline bool kvm_riscv_cove_enabled(void) {return false; };
 static inline int kvm_riscv_cove_init(void) { return -1; }
@@ -162,6 +181,21 @@ static inline int kvm_riscv_cove_vm_measure_pages(struct kvm *kvm,
 }
 static inline int kvm_riscv_cove_gstage_map(struct kvm_vcpu *vcpu,
 					    gpa_t gpa, unsigned long hva) {return -1; }
+/* AIA related TEE functions */
+static inline int kvm_riscv_cove_aia_init(struct kvm *kvm) { return -1; }
+static inline int kvm_riscv_cove_vcpu_inject_interrupt(struct kvm_vcpu *vcpu,
+						       unsigned long iid) { return -1; }
+static inline int kvm_riscv_cove_vcpu_imsic_unbind(struct kvm_vcpu *vcpu,
+						   int old_cpu) { return -1; }
+static inline int kvm_riscv_cove_vcpu_imsic_bind(struct kvm_vcpu *vcpu,
+						 unsigned long imsic_mask) { return -1; }
+static inline int kvm_riscv_cove_aia_claim_imsic(struct kvm_vcpu *vcpu,
+						 phys_addr_t imsic_pa) { return -1; }
+static inline int kvm_riscv_cove_aia_convert_imsic(struct kvm_vcpu *vcpu,
+						 phys_addr_t imsic_pa) { return -1; }
+static inline int kvm_riscv_cove_vcpu_imsic_addr(struct kvm_vcpu *vcpu) { return -1; }
+static inline int kvm_riscv_cove_vcpu_imsic_rebind(struct kvm_vcpu *vcpu,
+						   int old_pcpu) { return -1; }
 #endif /* CONFIG_RISCV_COVE_HOST */
 
 #endif /* __KVM_RISCV_COVE_H */
diff --git a/arch/riscv/kvm/cove.c b/arch/riscv/kvm/cove.c
index 4a8a8db..154b01a 100644
--- a/arch/riscv/kvm/cove.c
+++ b/arch/riscv/kvm/cove.c
@@ -8,6 +8,7 @@
  *     Atish Patra <atishp@rivosinc.com>
  */
 
+#include <linux/cpumask.h>
 #include <linux/errno.h>
 #include <linux/err.h>
 #include <linux/kvm_host.h>
@@ -137,6 +138,247 @@ __always_inline bool kvm_riscv_cove_enabled(void)
 	return riscv_cove_enabled;
 }
 
+static void kvm_cove_imsic_clone(void *info)
+{
+	int rc;
+	struct kvm_vcpu *vcpu = info;
+	struct kvm *kvm = vcpu->kvm;
+
+	rc = sbi_covi_rebind_vcpu_imsic_clone(kvm->arch.tvmc->tvm_guest_id, vcpu->vcpu_idx);
+	if (rc)
+		kvm_err("Imsic clone failed guest %ld vcpu %d pcpu %d\n",
+			 kvm->arch.tvmc->tvm_guest_id, vcpu->vcpu_idx, smp_processor_id());
+}
+
+static void kvm_cove_imsic_unbind(void *info)
+{
+	struct kvm_vcpu *vcpu = info;
+	struct kvm_cove_tvm_context *tvmc = vcpu->kvm->arch.tvmc;
+
+	/*TODO: We probably want to return but the remote function call doesn't allow any return */
+	if (sbi_covi_unbind_vcpu_imsic_begin(tvmc->tvm_guest_id, vcpu->vcpu_idx))
+		return;
+
+	/* This may issue IPIs to running vcpus. */
+	if (kvm_riscv_cove_tvm_fence(vcpu))
+		return;
+
+	if (sbi_covi_unbind_vcpu_imsic_end(tvmc->tvm_guest_id, vcpu->vcpu_idx))
+		return;
+
+	kvm_info("Unbind success for guest %ld vcpu %d pcpu %d\n",
+		  tvmc->tvm_guest_id, smp_processor_id(), vcpu->vcpu_idx);
+}
+
+int kvm_riscv_cove_vcpu_imsic_addr(struct kvm_vcpu *vcpu)
+{
+	struct kvm_cove_tvm_context *tvmc;
+	struct kvm *kvm = vcpu->kvm;
+	struct kvm_vcpu_aia *vaia = &vcpu->arch.aia_context;
+	int ret;
+
+	if (!kvm->arch.tvmc)
+		return -EINVAL;
+
+	tvmc = kvm->arch.tvmc;
+
+	ret = sbi_covi_set_vcpu_imsic_addr(tvmc->tvm_guest_id, vcpu->vcpu_idx, vaia->imsic_addr);
+	if (ret)
+		return -EPERM;
+
+	return 0;
+}
+
+int kvm_riscv_cove_aia_convert_imsic(struct kvm_vcpu *vcpu, phys_addr_t imsic_pa)
+{
+	struct kvm *kvm = vcpu->kvm;
+	int ret;
+
+	if (!kvm->arch.tvmc)
+		return -EINVAL;
+
+	ret = sbi_covi_convert_imsic(imsic_pa);
+	if (ret)
+		return -EPERM;
+
+	ret = kvm_riscv_cove_fence();
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+int kvm_riscv_cove_aia_claim_imsic(struct kvm_vcpu *vcpu, phys_addr_t imsic_pa)
+{
+	int ret;
+	struct kvm *kvm = vcpu->kvm;
+
+	if (!kvm->arch.tvmc)
+		return -EINVAL;
+
+	ret = sbi_covi_reclaim_imsic(imsic_pa);
+	if (ret)
+		return -EPERM;
+
+	return 0;
+}
+
+int kvm_riscv_cove_vcpu_imsic_rebind(struct kvm_vcpu *vcpu, int old_pcpu)
+{
+	struct kvm_cove_tvm_context *tvmc;
+	struct kvm *kvm = vcpu->kvm;
+	struct kvm_cove_tvm_vcpu_context *tvcpu = vcpu->arch.tc;
+	int ret;
+	cpumask_t tmpmask;
+
+	if (!kvm->arch.tvmc)
+		return -EINVAL;
+
+	tvmc = kvm->arch.tvmc;
+
+	ret = sbi_covi_rebind_vcpu_imsic_begin(tvmc->tvm_guest_id, vcpu->vcpu_idx,
+					       BIT(tvcpu->imsic.vsfile_hgei));
+	if (ret) {
+		kvm_err("Imsic rebind begin failed guest %ld vcpu %d pcpu %d\n",
+			 tvmc->tvm_guest_id, vcpu->vcpu_idx, smp_processor_id());
+		return ret;
+	}
+
+	ret = kvm_riscv_cove_tvm_fence(vcpu);
+	if (ret)
+		return ret;
+
+	cpumask_clear(&tmpmask);
+	cpumask_set_cpu(old_pcpu, &tmpmask);
+	on_each_cpu_mask(&tmpmask, kvm_cove_imsic_clone, vcpu, 1);
+
+	ret = sbi_covi_rebind_vcpu_imsic_end(tvmc->tvm_guest_id, vcpu->vcpu_idx);
+	if (ret) {
+		kvm_err("Imsic rebind end failed guest %ld vcpu %d pcpu %d\n",
+			 tvmc->tvm_guest_id, vcpu->vcpu_idx, smp_processor_id());
+		return ret;
+	}
+
+	tvcpu->imsic.bound = true;
+
+	return 0;
+}
+
+int kvm_riscv_cove_vcpu_imsic_bind(struct kvm_vcpu *vcpu, unsigned long imsic_mask)
+{
+	struct kvm_cove_tvm_context *tvmc;
+	struct kvm *kvm = vcpu->kvm;
+	struct kvm_cove_tvm_vcpu_context *tvcpu = vcpu->arch.tc;
+	int ret;
+
+	if (!kvm->arch.tvmc)
+		return -EINVAL;
+
+	tvmc = kvm->arch.tvmc;
+
+	ret = sbi_covi_bind_vcpu_imsic(tvmc->tvm_guest_id, vcpu->vcpu_idx, imsic_mask);
+	if (ret) {
+		kvm_err("Imsic bind failed for imsic %lx guest %ld vcpu %d pcpu %d\n",
+			imsic_mask, tvmc->tvm_guest_id, vcpu->vcpu_idx, smp_processor_id());
+		return ret;
+	}
+	tvcpu->imsic.bound = true;
+	pr_err("%s: rebind success vcpu %d hgei %d pcpu %d\n", __func__,
+	vcpu->vcpu_idx, tvcpu->imsic.vsfile_hgei, smp_processor_id());
+
+	return 0;
+}
+
+int kvm_riscv_cove_vcpu_imsic_unbind(struct kvm_vcpu *vcpu, int old_pcpu)
+{
+	struct kvm *kvm = vcpu->kvm;
+	struct kvm_cove_tvm_vcpu_context *tvcpu = vcpu->arch.tc;
+	cpumask_t tmpmask;
+
+	if (!kvm->arch.tvmc)
+		return -EINVAL;
+
+	/* No need to unbind if it is not bound already */
+	if (!tvcpu->imsic.bound)
+		return 0;
+
+	/* Do it first even if there is failure to prevent it to try again */
+	tvcpu->imsic.bound = false;
+
+	if (smp_processor_id() == old_pcpu) {
+		kvm_cove_imsic_unbind(vcpu);
+	} else {
+		/* Unbind can be invoked from a different physical cpu */
+		cpumask_clear(&tmpmask);
+		cpumask_set_cpu(old_pcpu, &tmpmask);
+		on_each_cpu_mask(&tmpmask, kvm_cove_imsic_unbind, vcpu, 1);
+	}
+
+	return 0;
+}
+
+int kvm_riscv_cove_vcpu_inject_interrupt(struct kvm_vcpu *vcpu, unsigned long iid)
+{
+	struct kvm_cove_tvm_context *tvmc;
+	struct kvm *kvm = vcpu->kvm;
+	int ret;
+
+	if (!kvm->arch.tvmc)
+		return -EINVAL;
+
+	tvmc = kvm->arch.tvmc;
+
+	ret = sbi_covi_inject_external_interrupt(tvmc->tvm_guest_id, vcpu->vcpu_idx, iid);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+int kvm_riscv_cove_aia_init(struct kvm *kvm)
+{
+	struct kvm_aia *aia = &kvm->arch.aia;
+	struct sbi_cove_tvm_aia_params *tvm_aia;
+	struct kvm_vcpu *vcpu;
+	struct kvm_cove_tvm_context *tvmc;
+	int ret;
+
+	if (!kvm->arch.tvmc)
+		return -EINVAL;
+
+	tvmc = kvm->arch.tvmc;
+
+	/* Sanity Check */
+	if (aia->aplic_addr != KVM_RISCV_AIA_UNDEF_ADDR)
+		return -EINVAL;
+
+	/* TVMs must have a physical guest interrut file */
+	if (aia->mode != KVM_DEV_RISCV_AIA_MODE_HWACCEL)
+		return -ENODEV;
+
+	tvm_aia = kzalloc(sizeof(*tvm_aia), GFP_KERNEL);
+	if (!tvm_aia)
+		return -ENOMEM;
+
+	/* Address of the IMSIC group ID, hart ID & guest ID of 0 */
+	vcpu = kvm_get_vcpu_by_id(kvm, 0);
+	tvm_aia->imsic_base_addr = vcpu->arch.aia_context.imsic_addr;
+
+	tvm_aia->group_index_bits = aia->nr_group_bits;
+	tvm_aia->group_index_shift = aia->nr_group_shift;
+	tvm_aia->hart_index_bits = aia->nr_hart_bits;
+	tvm_aia->guest_index_bits = aia->nr_guest_bits;
+	/* Nested TVMs are not supported yet */
+	tvm_aia->guests_per_hart = 0;
+
+
+	ret = sbi_covi_tvm_aia_init(tvmc->tvm_guest_id, tvm_aia);
+	if (ret)
+		kvm_err("TVM AIA init failed with rc %d\n", ret);
+
+	return ret;
+}
+
 void kvm_riscv_cove_vcpu_load(struct kvm_vcpu *vcpu)
 {
 	kvm_riscv_vcpu_timer_restore(vcpu);
@@ -283,6 +525,7 @@ void noinstr kvm_riscv_cove_vcpu_switchto(struct kvm_vcpu *vcpu, struct kvm_cpu_
 	struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
 	void *nshmem;
 	struct kvm_guest_timer *gt = &kvm->arch.timer;
+	struct kvm_cove_tvm_vcpu_context *tvcpuc = vcpu->arch.tc;
 
 	if (!kvm->arch.tvmc)
 		return;
@@ -301,6 +544,19 @@ void noinstr kvm_riscv_cove_vcpu_switchto(struct kvm_vcpu *vcpu, struct kvm_cpu_
 		tvmc->finalized_done = true;
 	}
 
+	/*
+	 * Bind the vsfile here instead during the new vsfile allocation because
+	 * COVH bind call requires the TVM to be in finalized state.
+	 */
+	if (tvcpuc->imsic.bind_required) {
+		tvcpuc->imsic.bind_required = false;
+		rc = kvm_riscv_cove_vcpu_imsic_bind(vcpu, BIT(tvcpuc->imsic.vsfile_hgei));
+		if (rc) {
+			kvm_err("bind failed with rc %d\n", rc);
+			return;
+		}
+	}
+
 	rc = sbi_covh_run_tvm_vcpu(tvmc->tvm_guest_id, vcpu->vcpu_idx);
 	if (rc) {
 		trap->scause = EXC_CUSTOM_KVM_COVE_RUN_FAIL;
-- 
2.25.1



  parent reply	other threads:[~2023-04-19 22:18 UTC|newest]

Thread overview: 67+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-04-19 22:16 [RFC 00/48] RISC-V CoVE support Atish Patra
2023-04-19 22:16 ` [RFC 01/48] mm/vmalloc: Introduce arch hooks to notify ioremap/unmap changes Atish Patra
2023-04-20 19:42   ` Lorenzo Stoakes
2023-04-20 22:01     ` Atish Kumar Patra
2023-04-19 22:16 ` [RFC 02/48] RISC-V: KVM: Improve KVM error reporting to the user space Atish Patra
2023-04-19 22:16 ` [RFC 03/48] RISC-V: KVM: Invoke aia_update with preempt disabled/irq enabled Atish Patra
2023-04-19 22:16 ` [RFC 04/48] RISC-V: KVM: Add a helper function to get pgd size Atish Patra
2023-04-19 22:16 ` [RFC 05/48] RISC-V: Add COVH SBI extensions definitions Atish Patra
2023-04-19 22:16 ` [RFC 06/48] RISC-V: KVM: Implement COVH SBI extension Atish Patra
2023-04-19 22:16 ` [RFC 07/48] RISC-V: KVM: Add a barebone CoVE implementation Atish Patra
2023-04-19 22:16 ` [RFC 08/48] RISC-V: KVM: Add UABI to support static memory region attestation Atish Patra
2023-04-19 22:16 ` [RFC 09/48] RISC-V: KVM: Add CoVE related nacl helpers Atish Patra
2023-04-19 22:16 ` [RFC 10/48] RISC-V: KVM: Implement static memory region measurement Atish Patra
2023-04-20 15:17   ` Sean Christopherson
2023-04-21 18:50     ` Atish Kumar Patra
2023-04-19 22:16 ` [RFC 11/48] RISC-V: KVM: Use the new VM IOCTL for measuring pages Atish Patra
2023-04-19 22:16 ` [RFC 12/48] RISC-V: KVM: Exit to the user space for trap redirection Atish Patra
2023-04-19 22:16 ` [RFC 13/48] RISC-V: KVM: Return early for gstage modifications Atish Patra
2023-04-19 22:16 ` [RFC 14/48] RISC-V: KVM: Skip dirty logging updates for TVM Atish Patra
2023-04-19 22:16 ` [RFC 15/48] RISC-V: KVM: Add a helper function to trigger fence ops Atish Patra
2023-04-19 22:16 ` [RFC 16/48] RISC-V: KVM: Skip most VCPU requests for TVMs Atish Patra
2023-04-19 22:16 ` [RFC 17/48] RISC-V : KVM: Skip vmid/hgatp management " Atish Patra
2023-04-19 22:16 ` [RFC 18/48] RISC-V: KVM: Skip TLB " Atish Patra
2023-04-19 22:16 ` [RFC 19/48] RISC-V: KVM: Register memory regions as confidential " Atish Patra
2023-04-19 22:16 ` [RFC 20/48] RISC-V: KVM: Add gstage mapping " Atish Patra
2023-04-19 22:16 ` [RFC 21/48] RISC-V: KVM: Handle SBI call forward from the TSM Atish Patra
2023-04-19 22:16 ` [RFC 22/48] RISC-V: KVM: Implement vcpu load/put functions for CoVE guests Atish Patra
2023-04-19 22:16 ` [RFC 23/48] RISC-V: KVM: Wireup TVM world switch Atish Patra
2023-04-19 22:16 ` [RFC 24/48] RISC-V: KVM: Update timer functionality for TVMs Atish Patra
2023-04-19 22:16 ` [RFC 25/48] RISC-V: KVM: Skip HVIP update " Atish Patra
2023-04-19 22:16 ` [RFC 26/48] RISC-V: Add COVI extension definitions Atish Patra
2023-04-19 22:16 ` [RFC 27/48] RISC-V: KVM: Implement COVI SBI extension Atish Patra
2023-04-19 22:16 ` Atish Patra [this message]
2023-04-19 22:16 ` [RFC 29/48] RISC-V: KVM: Skip AIA CSR updates for TVMs Atish Patra
2023-04-19 22:16 ` [RFC 30/48] RISC-V: KVM: Perform limited operations in hardware enable/disable Atish Patra
2023-04-19 22:16 ` [RFC 31/48] RISC-V: KVM: Indicate no support user space emulated IRQCHIP Atish Patra
2023-04-19 22:17 ` [RFC 32/48] RISC-V: KVM: Add AIA support for TVMs Atish Patra
2023-04-19 22:17 ` [RFC 33/48] RISC-V: KVM: Hookup TVM VCPU init/destroy Atish Patra
2023-04-19 22:17 ` [RFC 34/48] RISC-V: KVM: Initialize CoVE Atish Patra
2023-04-19 22:17 ` [RFC 35/48] RISC-V: KVM: Add TVM init/destroy calls Atish Patra
2023-04-19 22:17 ` [RFC 36/48] RISC-V: KVM: Read/write gprs from/to shmem in case of TVM VCPU Atish Patra
2023-04-19 22:17 ` [RFC 37/48] RISC-V: Add COVG SBI extension definitions Atish Patra
2023-04-19 22:17 ` [RFC 38/48] RISC-V: Add CoVE guest config and helper functions Atish Patra
2023-04-19 22:17 ` [RFC 39/48] RISC-V: Implement COVG SBI extension Atish Patra
2023-04-19 22:17 ` [RFC 40/48] RISC-V: COVE: Add COVH invalidate, validate, promote, demote and remove APIs Atish Patra
2023-04-19 22:17 ` [RFC 41/48] RISC-V: KVM: Add host side support to handle COVG SBI calls Atish Patra
2023-04-19 22:17 ` [RFC 42/48] RISC-V: Allow host to inject any ext interrupt id to a CoVE guest Atish Patra
2023-04-19 22:17 ` [RFC 43/48] RISC-V: Add base memory encryption functions Atish Patra
2023-04-19 22:17 ` [RFC 44/48] RISC-V: Add cc_platform_has() for RISC-V for CoVE Atish Patra
2023-04-19 22:17 ` [RFC 45/48] RISC-V: ioremap: Implement for arch specific ioremap hooks Atish Patra
2023-04-20 22:15   ` Dave Hansen
2023-04-21 19:24     ` Atish Kumar Patra
2023-04-24 13:48       ` Dave Hansen
2023-04-25  8:00         ` Atish Kumar Patra
2023-04-25 13:10           ` Dave Hansen
2023-04-26  8:02             ` Atish Kumar Patra
2023-04-26 10:30               ` Anup Patel
2023-04-26 13:55                 ` Andrew Bresticker
2023-04-19 22:17 ` [RFC 46/48] riscv/virtio: Have CoVE guests enforce restricted virtio memory access Atish Patra
2023-04-19 22:17 ` [RFC 47/48] RISC-V: Add shared bounce buffer to support DBCN for CoVE Guest Atish Patra
2023-04-19 22:17 ` [RFC 48/48] drivers/hvc: sbi: Disable HVC console for TVMs Atish Patra
2023-04-19 22:58 ` [RFC 00/48] RISC-V CoVE support Atish Patra
2023-04-20 16:30 ` Sean Christopherson
2023-04-20 19:13   ` Atish Kumar Patra
2023-04-20 20:21     ` Sean Christopherson
2023-04-21 15:35   ` Michael Roth
2023-04-24 12:23 ` Christophe de Dinechin

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230419221716.3603068-29-atishp@rivosinc.com \
    --to=atishp@rivosinc.com \
    --cc=abrestic@rivosinc.com \
    --cc=ajones@ventanamicro.com \
    --cc=akpm@linux-foundation.org \
    --cc=alex@ghiti.fr \
    --cc=anup@brainfault.org \
    --cc=atishp@atishpatra.org \
    --cc=bjorn@rivosinc.com \
    --cc=conor.dooley@microchip.com \
    --cc=dylan@rivosinc.com \
    --cc=gregkh@linuxfoundation.org \
    --cc=guoren@kernel.org \
    --cc=hch@infradead.org \
    --cc=heiko@sntech.de \
    --cc=jirislaby@kernel.org \
    --cc=kvm-riscv@lists.infradead.org \
    --cc=kvm@vger.kernel.org \
    --cc=linux-coco@lists.linux.dev \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=linux-riscv@lists.infradead.org \
    --cc=maz@kernel.org \
    --cc=mchitale@ventanamicro.com \
    --cc=palmer@dabbelt.com \
    --cc=paul.walmsley@sifive.com \
    --cc=pbonzini@redhat.com \
    --cc=rkanwal@rivosinc.com \
    --cc=sameo@rivosinc.com \
    --cc=seanjc@google.com \
    --cc=suzuki.poulose@arm.com \
    --cc=urezki@gmail.com \
    --cc=will@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox