From: Fares Mehanna <faresx@amazon.de>
Cc: nh-open-source@amazon.com, "Fares Mehanna" <faresx@amazon.de>,
"Marc Zyngier" <maz@kernel.org>,
"Oliver Upton" <oliver.upton@linux.dev>,
"James Morse" <james.morse@arm.com>,
"Suzuki K Poulose" <suzuki.poulose@arm.com>,
"Zenghui Yu" <yuzenghui@huawei.com>,
"Catalin Marinas" <catalin.marinas@arm.com>,
"Will Deacon" <will@kernel.org>,
"Andrew Morton" <akpm@linux-foundation.org>,
"Kemeng Shi" <shikemeng@huaweicloud.com>,
"Pierre-Clément Tosi" <ptosi@google.com>,
"Ard Biesheuvel" <ardb@kernel.org>,
"Mark Rutland" <mark.rutland@arm.com>,
"Javier Martinez Canillas" <javierm@redhat.com>,
"Arnd Bergmann" <arnd@arndb.de>, "Fuad Tabba" <tabba@google.com>,
"Mark Brown" <broonie@kernel.org>,
"Joey Gouly" <joey.gouly@arm.com>,
"Kristina Martsenko" <kristina.martsenko@arm.com>,
"Randy Dunlap" <rdunlap@infradead.org>,
"Bjorn Helgaas" <bhelgaas@google.com>,
"Jean-Philippe Brucker" <jean-philippe@linaro.org>,
"Mike Rapoport (IBM)" <rppt@kernel.org>,
"David Hildenbrand" <david@redhat.com>,
"Roman Kagan" <rkagan@amazon.de>,
"moderated list:KERNEL VIRTUAL MACHINE FOR ARM64 (KVM/arm64)"
<linux-arm-kernel@lists.infradead.org>,
"open list:KERNEL VIRTUAL MACHINE FOR ARM64 (KVM/arm64)"
<kvmarm@lists.linux.dev>,
"open list" <linux-kernel@vger.kernel.org>,
"open list:MEMORY MANAGEMENT" <linux-mm@kvack.org>
Subject: [RFC PATCH 7/7] arm64: KVM: Allocate vCPU fp-regs dynamically on VHE and KERNEL_SECRETMEM enabled systems
Date: Wed, 11 Sep 2024 14:34:06 +0000 [thread overview]
Message-ID: <20240911143421.85612-8-faresx@amazon.de> (raw)
In-Reply-To: <20240911143421.85612-1-faresx@amazon.de>
Similar to what was done in this commit:
"arm64: KVM: Allocate vCPU gp-regs dynamically on VHE and KERNEL_SECRETMEM enabled systems"
We're moving fp-regs to dynamic memory for systems supporting VHE and compiled
with KERNEL_SECRETMEM support. Otherwise, we will use the "fp_regs_storage"
struct embedded in the vCPU context.
Accessing fp-regs embedded in the vCPU context without de-reference is done as:
add \regs, \ctxt, #offsetof(struct kvm_cpu_context, fp_regs_storage)
Accessing the dynamically allocated fp-regs with de-reference is done as:
ldr \regs, [\ctxt, #offsetof(struct kvm_cpu_context, fp_regs)]
Signed-off-by: Fares Mehanna <faresx@amazon.de>
---
arch/arm64/include/asm/kvm_host.h | 16 ++++++++++++++--
arch/arm64/kernel/image-vars.h | 1 +
arch/arm64/kvm/arm.c | 29 +++++++++++++++++++++++++++--
arch/arm64/kvm/va_layout.c | 23 +++++++++++++++++++----
4 files changed, 61 insertions(+), 8 deletions(-)
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index e8ed2c12479f..4132c57d7e69 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -550,7 +550,9 @@ struct kvm_cpu_context {
u64 spsr_irq;
u64 spsr_fiq;
- struct user_fpsimd_state fp_regs;
+ struct user_fpsimd_state *fp_regs;
+ struct user_fpsimd_state fp_regs_storage;
+ struct secretmem_area *fp_regs_area;
u64 sys_regs[NR_SYS_REGS];
@@ -968,7 +970,17 @@ static __always_inline struct user_pt_regs *ctxt_gp_regs(const struct kvm_cpu_co
return regs;
}
#define vcpu_gp_regs(v) (ctxt_gp_regs(&(v)->arch.ctxt))
-#define ctxt_fp_regs(ctxt) (&(ctxt).fp_regs)
+
+static __always_inline struct user_fpsimd_state *ctxt_fp_regs(const struct kvm_cpu_context *ctxt)
+{
+ struct user_fpsimd_state *fp_regs = (void *) ctxt;
+ asm volatile(ALTERNATIVE_CB("add %0, %0, %1\n",
+ ARM64_HAS_VIRT_HOST_EXTN,
+ kvm_update_ctxt_fp_regs)
+ : "+r" (fp_regs)
+ : "I" (offsetof(struct kvm_cpu_context, fp_regs_storage)));
+ return fp_regs;
+}
#define vcpu_fp_regs(v) (ctxt_fp_regs(&(v)->arch.ctxt))
/*
diff --git a/arch/arm64/kernel/image-vars.h b/arch/arm64/kernel/image-vars.h
index e3bb626e299c..904573598e0f 100644
--- a/arch/arm64/kernel/image-vars.h
+++ b/arch/arm64/kernel/image-vars.h
@@ -87,6 +87,7 @@ KVM_NVHE_ALIAS(kvm_update_va_mask);
KVM_NVHE_ALIAS(kvm_get_kimage_voffset);
KVM_NVHE_ALIAS(kvm_compute_final_ctr_el0);
KVM_NVHE_ALIAS(kvm_update_ctxt_gp_regs);
+KVM_NVHE_ALIAS(kvm_update_ctxt_fp_regs);
KVM_NVHE_ALIAS(spectre_bhb_patch_loop_iter);
KVM_NVHE_ALIAS(spectre_bhb_patch_loop_mitigation_enable);
KVM_NVHE_ALIAS(spectre_bhb_patch_wa3);
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index 7542af3f766a..17b42e9099c3 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -477,6 +477,14 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
if (!vcpu->arch.ctxt.regs_area)
return -ENOMEM;
vcpu->arch.ctxt.regs = vcpu->arch.ctxt.regs_area->ptr;
+
+ pages_needed = (sizeof(*vcpu_fp_regs(vcpu)) + PAGE_SIZE - 1) / PAGE_SIZE;
+ vcpu->arch.ctxt.fp_regs_area = secretmem_allocate_pages(fls(pages_needed - 1));
+ if (!vcpu->arch.ctxt.fp_regs_area) {
+ err = -ENOMEM;
+ goto free_vcpu_ctxt;
+ }
+ vcpu->arch.ctxt.fp_regs = vcpu->arch.ctxt.fp_regs_area->ptr;
}
/* Set up the timer */
@@ -504,8 +512,10 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
return kvm_share_hyp(vcpu, vcpu + 1);
free_vcpu_ctxt:
- if (kvm_use_dynamic_regs())
+ if (kvm_use_dynamic_regs()) {
secretmem_release_pages(vcpu->arch.ctxt.regs_area);
+ secretmem_release_pages(vcpu->arch.ctxt.fp_regs_area);
+ }
return err;
}
@@ -524,8 +534,10 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
kvm_vgic_vcpu_destroy(vcpu);
kvm_arm_vcpu_destroy(vcpu);
- if (kvm_use_dynamic_regs())
+ if (kvm_use_dynamic_regs()) {
secretmem_release_pages(vcpu->arch.ctxt.regs_area);
+ secretmem_release_pages(vcpu->arch.ctxt.fp_regs_area);
+ }
}
void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
@@ -2729,12 +2741,25 @@ static int init_hyp_hve_mode(void)
per_cpu(kvm_host_data, cpu).host_ctxt.regs = kvm_host_data_regs;
}
+ /* Allocate fp-regs */
+ for_each_possible_cpu(cpu) {
+ void *kvm_host_data_regs;
+
+ kvm_host_data_regs = kzalloc(sizeof(struct user_fpsimd_state), GFP_KERNEL);
+ if (!kvm_host_data_regs) {
+ err = -ENOMEM;
+ goto free_regs;
+ }
+ per_cpu(kvm_host_data, cpu).host_ctxt.fp_regs = kvm_host_data_regs;
+ }
+
return 0;
free_regs:
for_each_possible_cpu(cpu) {
kfree(per_cpu(kvm_hyp_ctxt, cpu).regs);
kfree(per_cpu(kvm_host_data, cpu).host_ctxt.regs);
+ kfree(per_cpu(kvm_host_data, cpu).host_ctxt.fp_regs);
}
return err;
diff --git a/arch/arm64/kvm/va_layout.c b/arch/arm64/kvm/va_layout.c
index fcef7e89d042..ba1030fa5b08 100644
--- a/arch/arm64/kvm/va_layout.c
+++ b/arch/arm64/kvm/va_layout.c
@@ -185,10 +185,12 @@ void __init kvm_update_va_mask(struct alt_instr *alt,
}
}
-void __init kvm_update_ctxt_gp_regs(struct alt_instr *alt,
- __le32 *origptr, __le32 *updptr, int nr_inst)
+static __always_inline void __init kvm_update_ctxt_regs(struct alt_instr *alt,
+ __le32 *origptr,
+ __le32 *updptr,
+ int nr_inst, u32 imm)
{
- u32 rd, rn, imm, insn, oinsn;
+ u32 rd, rn, insn, oinsn;
BUG_ON(nr_inst != 1);
@@ -198,7 +200,6 @@ void __init kvm_update_ctxt_gp_regs(struct alt_instr *alt,
oinsn = le32_to_cpu(origptr[0]);
rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, oinsn);
rn = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RN, oinsn);
- imm = offsetof(struct kvm_cpu_context, regs);
insn = aarch64_insn_gen_load_store_imm(rd, rn, imm,
AARCH64_INSN_SIZE_64,
@@ -208,6 +209,20 @@ void __init kvm_update_ctxt_gp_regs(struct alt_instr *alt,
updptr[0] = cpu_to_le32(insn);
}
+void __init kvm_update_ctxt_gp_regs(struct alt_instr *alt,
+ __le32 *origptr, __le32 *updptr, int nr_inst)
+{
+ u32 offset = offsetof(struct kvm_cpu_context, regs);
+ kvm_update_ctxt_regs(alt, origptr, updptr, nr_inst, offset);
+}
+
+void __init kvm_update_ctxt_fp_regs(struct alt_instr *alt,
+ __le32 *origptr, __le32 *updptr, int nr_inst)
+{
+ u32 offset = offsetof(struct kvm_cpu_context, fp_regs);
+ kvm_update_ctxt_regs(alt, origptr, updptr, nr_inst, offset);
+}
+
void kvm_patch_vector_branch(struct alt_instr *alt,
__le32 *origptr, __le32 *updptr, int nr_inst)
{
--
2.40.1
Amazon Web Services Development Center Germany GmbH
Krausenstr. 38
10117 Berlin
Geschaeftsfuehrung: Christian Schlaeger, Jonathan Weiss
Eingetragen am Amtsgericht Charlottenburg unter HRB 257764 B
Sitz: Berlin
Ust-ID: DE 365 538 597
next prev parent reply other threads:[~2024-09-11 14:38 UTC|newest]
Thread overview: 24+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-09-11 14:33 [RFC PATCH 0/7] support for mm-local memory allocations and use it Fares Mehanna
2024-09-11 14:34 ` [RFC PATCH 1/7] mseal: expose interface to seal / unseal user memory ranges Fares Mehanna
2024-09-12 16:40 ` Liam R. Howlett
2024-09-25 15:25 ` Fares Mehanna
2024-09-11 14:34 ` [RFC PATCH 2/7] mm/secretmem: implement mm-local kernel allocations Fares Mehanna
2024-09-11 14:34 ` [RFC PATCH 3/7] arm64: KVM: Refactor C-code to access vCPU gp-registers through macros Fares Mehanna
2024-09-11 14:34 ` [RFC PATCH 4/7] KVM: Refactor Assembly-code to access vCPU gp-registers through a macro Fares Mehanna
2024-09-11 14:34 ` [RFC PATCH 5/7] arm64: KVM: Allocate vCPU gp-regs dynamically on VHE and KERNEL_SECRETMEM enabled systems Fares Mehanna
2024-09-11 14:34 ` [RFC PATCH 6/7] arm64: KVM: Refactor C-code to access vCPU fp-registers through macros Fares Mehanna
2024-09-11 14:34 ` Fares Mehanna [this message]
2024-09-20 12:34 ` [RFC PATCH 0/7] support for mm-local memory allocations and use it Mike Rapoport
2024-09-25 15:33 ` Fares Mehanna
2024-09-27 7:08 ` Mike Rapoport
2024-10-08 20:06 ` Fares Mehanna
2024-09-20 13:19 ` Alexander Graf
2024-09-27 12:59 ` David Hildenbrand
2024-10-10 15:52 ` Fares Mehanna
2024-10-11 12:04 ` David Hildenbrand
2024-10-11 12:36 ` Mediouni, Mohamed
2024-10-11 12:56 ` Mediouni, Mohamed
2024-10-11 12:58 ` David Hildenbrand
2024-10-11 14:25 ` Fares Mehanna
2024-10-18 18:52 ` David Hildenbrand
2024-10-18 19:02 ` David Hildenbrand
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20240911143421.85612-8-faresx@amazon.de \
--to=faresx@amazon.de \
--cc=akpm@linux-foundation.org \
--cc=ardb@kernel.org \
--cc=arnd@arndb.de \
--cc=bhelgaas@google.com \
--cc=broonie@kernel.org \
--cc=catalin.marinas@arm.com \
--cc=david@redhat.com \
--cc=james.morse@arm.com \
--cc=javierm@redhat.com \
--cc=jean-philippe@linaro.org \
--cc=joey.gouly@arm.com \
--cc=kristina.martsenko@arm.com \
--cc=kvmarm@lists.linux.dev \
--cc=linux-arm-kernel@lists.infradead.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=mark.rutland@arm.com \
--cc=maz@kernel.org \
--cc=nh-open-source@amazon.com \
--cc=oliver.upton@linux.dev \
--cc=ptosi@google.com \
--cc=rdunlap@infradead.org \
--cc=rkagan@amazon.de \
--cc=rppt@kernel.org \
--cc=shikemeng@huaweicloud.com \
--cc=suzuki.poulose@arm.com \
--cc=tabba@google.com \
--cc=will@kernel.org \
--cc=yuzenghui@huawei.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox