Skip to content

Commit c776234

Browse files
mrutland-armgregkh
authored andcommitted
KVM: arm64: Eagerly switch ZCR_EL{1,2}
[ Upstream commit 59419f1 ] In non-protected KVM modes, while the guest FPSIMD/SVE/SME state is live on the CPU, the host's active SVE VL may differ from the guest's maximum SVE VL: * For VHE hosts, when a VM uses NV, ZCR_EL2 contains a value constrained by the guest hypervisor, which may be less than or equal to that guest's maximum VL. Note: in this case the value of ZCR_EL1 is immaterial due to E2H. * For nVHE/hVHE hosts, ZCR_EL1 contains a value written by the guest, which may be less than or greater than the guest's maximum VL. Note: in this case hyp code traps host SVE usage and lazily restores ZCR_EL2 to the host's maximum VL, which may be greater than the guest's maximum VL. This can be the case between exiting a guest and kvm_arch_vcpu_put_fp(). If a softirq is taken during this period and the softirq handler tries to use kernel-mode NEON, then the kernel will fail to save the guest's FPSIMD/SVE state, and will pend a SIGKILL for the current thread. This happens because kvm_arch_vcpu_ctxsync_fp() binds the guest's live FPSIMD/SVE state with the guest's maximum SVE VL, and fpsimd_save_user_state() verifies that the live SVE VL is as expected before attempting to save the register state: | if (WARN_ON(sve_get_vl() != vl)) { | force_signal_inject(SIGKILL, SI_KERNEL, 0, 0); | return; | } Fix this and make this a bit easier to reason about by always eagerly switching ZCR_EL{1,2} at hyp during guest<->host transitions. With this happening, there's no need to trap host SVE usage, and the nVHE/nVHE __deactivate_cptr_traps() logic can be simplified to enable host access to all present FPSIMD/SVE/SME features. In protected nVHE/hVHE modes, the host's state is always saved/restored by hyp, and the guest's state is saved prior to exit to the host, so from the host's PoV the guest never has live FPSIMD/SVE/SME state, and the host's ZCR_EL1 is never clobbered by hyp. Fixes: 8c8010d ("KVM: arm64: Save/restore SVE state for nVHE") Fixes: 2e3cf82 ("KVM: arm64: nv: Ensure correct VL is loaded before saving SVE state") Signed-off-by: Mark Rutland <mark.rutland@arm.com> Reviewed-by: Mark Brown <broonie@kernel.org> Tested-by: Mark Brown <broonie@kernel.org> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Fuad Tabba <tabba@google.com> Cc: Marc Zyngier <maz@kernel.org> Cc: Oliver Upton <oliver.upton@linux.dev> Cc: Will Deacon <will@kernel.org> Reviewed-by: Oliver Upton <oliver.upton@linux.dev> Link: https://lore.kernel.org/r/20250210195226.1215254-9-mark.rutland@arm.com Signed-off-by: Marc Zyngier <maz@kernel.org> Signed-off-by: Mark Brown <broonie@kernel.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
1 parent 4a397bf commit c776234

File tree

6 files changed

+100
-40
lines changed

6 files changed

+100
-40
lines changed

arch/arm64/kvm/fpsimd.c

Lines changed: 0 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -136,36 +136,6 @@ void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu)
136136
local_irq_save(flags);
137137

138138
if (guest_owns_fp_regs()) {
139-
if (vcpu_has_sve(vcpu)) {
140-
u64 zcr = read_sysreg_el1(SYS_ZCR);
141-
142-
/*
143-
* If the vCPU is in the hyp context then ZCR_EL1 is
144-
* loaded with its vEL2 counterpart.
145-
*/
146-
__vcpu_sys_reg(vcpu, vcpu_sve_zcr_elx(vcpu)) = zcr;
147-
148-
/*
149-
* Restore the VL that was saved when bound to the CPU,
150-
* which is the maximum VL for the guest. Because the
151-
* layout of the data when saving the sve state depends
152-
* on the VL, we need to use a consistent (i.e., the
153-
* maximum) VL.
154-
* Note that this means that at guest exit ZCR_EL1 is
155-
* not necessarily the same as on guest entry.
156-
*
157-
* ZCR_EL2 holds the guest hypervisor's VL when running
158-
* a nested guest, which could be smaller than the
159-
* max for the vCPU. Similar to above, we first need to
160-
* switch to a VL consistent with the layout of the
161-
* vCPU's SVE state. KVM support for NV implies VHE, so
162-
* using the ZCR_EL1 alias is safe.
163-
*/
164-
if (!has_vhe() || (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)))
165-
sve_cond_update_zcr_vq(vcpu_sve_max_vq(vcpu) - 1,
166-
SYS_ZCR_EL1);
167-
}
168-
169139
/*
170140
* Flush (save and invalidate) the fpsimd/sve state so that if
171141
* the host tries to use fpsimd/sve, it's not using stale data

arch/arm64/kvm/hyp/entry.S

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -44,6 +44,11 @@ alternative_if ARM64_HAS_RAS_EXTN
4444
alternative_else_nop_endif
4545
mrs x1, isr_el1
4646
cbz x1, 1f
47+
48+
// Ensure that __guest_enter() always provides a context
49+
// synchronization event so that callers don't need ISBs for anything
50+
// that would usually be synchonized by the ERET.
51+
isb
4752
mov x0, #ARM_EXCEPTION_IRQ
4853
ret
4954

arch/arm64/kvm/hyp/include/hyp/switch.h

Lines changed: 59 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -344,6 +344,65 @@ static inline void __hyp_sve_save_host(void)
344344
true);
345345
}
346346

347+
static inline void fpsimd_lazy_switch_to_guest(struct kvm_vcpu *vcpu)
348+
{
349+
u64 zcr_el1, zcr_el2;
350+
351+
if (!guest_owns_fp_regs())
352+
return;
353+
354+
if (vcpu_has_sve(vcpu)) {
355+
/* A guest hypervisor may restrict the effective max VL. */
356+
if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu))
357+
zcr_el2 = __vcpu_sys_reg(vcpu, ZCR_EL2);
358+
else
359+
zcr_el2 = vcpu_sve_max_vq(vcpu) - 1;
360+
361+
write_sysreg_el2(zcr_el2, SYS_ZCR);
362+
363+
zcr_el1 = __vcpu_sys_reg(vcpu, vcpu_sve_zcr_elx(vcpu));
364+
write_sysreg_el1(zcr_el1, SYS_ZCR);
365+
}
366+
}
367+
368+
static inline void fpsimd_lazy_switch_to_host(struct kvm_vcpu *vcpu)
369+
{
370+
u64 zcr_el1, zcr_el2;
371+
372+
if (!guest_owns_fp_regs())
373+
return;
374+
375+
/*
376+
* When the guest owns the FP regs, we know that guest+hyp traps for
377+
* any FPSIMD/SVE/SME features exposed to the guest have been disabled
378+
* by either fpsimd_lazy_switch_to_guest() or kvm_hyp_handle_fpsimd()
379+
* prior to __guest_entry(). As __guest_entry() guarantees a context
380+
* synchronization event, we don't need an ISB here to avoid taking
381+
* traps for anything that was exposed to the guest.
382+
*/
383+
if (vcpu_has_sve(vcpu)) {
384+
zcr_el1 = read_sysreg_el1(SYS_ZCR);
385+
__vcpu_sys_reg(vcpu, vcpu_sve_zcr_elx(vcpu)) = zcr_el1;
386+
387+
/*
388+
* The guest's state is always saved using the guest's max VL.
389+
* Ensure that the host has the guest's max VL active such that
390+
* the host can save the guest's state lazily, but don't
391+
* artificially restrict the host to the guest's max VL.
392+
*/
393+
if (has_vhe()) {
394+
zcr_el2 = vcpu_sve_max_vq(vcpu) - 1;
395+
write_sysreg_el2(zcr_el2, SYS_ZCR);
396+
} else {
397+
zcr_el2 = sve_vq_from_vl(kvm_host_sve_max_vl) - 1;
398+
write_sysreg_el2(zcr_el2, SYS_ZCR);
399+
400+
zcr_el1 = vcpu_sve_max_vq(vcpu) - 1;
401+
write_sysreg_el1(zcr_el1, SYS_ZCR);
402+
}
403+
}
404+
}
405+
347406
static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu)
348407
{
349408
/*

arch/arm64/kvm/hyp/nvhe/hyp-main.c

Lines changed: 3 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@
55
*/
66

77
#include <hyp/adjust_pc.h>
8+
#include <hyp/switch.h>
89

910
#include <asm/pgtable-types.h>
1011
#include <asm/kvm_asm.h>
@@ -177,7 +178,9 @@ static void handle___kvm_vcpu_run(struct kvm_cpu_context *host_ctxt)
177178
pkvm_put_hyp_vcpu(hyp_vcpu);
178179
} else {
179180
/* The host is fully trusted, run its vCPU directly. */
181+
fpsimd_lazy_switch_to_guest(host_vcpu);
180182
ret = __kvm_vcpu_run(host_vcpu);
183+
fpsimd_lazy_switch_to_host(host_vcpu);
181184
}
182185

183186
out:
@@ -486,12 +489,6 @@ void handle_trap(struct kvm_cpu_context *host_ctxt)
486489
case ESR_ELx_EC_SMC64:
487490
handle_host_smc(host_ctxt);
488491
break;
489-
case ESR_ELx_EC_SVE:
490-
cpacr_clear_set(0, CPACR_ELx_ZEN);
491-
isb();
492-
sve_cond_update_zcr_vq(sve_vq_from_vl(kvm_host_sve_max_vl) - 1,
493-
SYS_ZCR_EL2);
494-
break;
495492
case ESR_ELx_EC_IABT_LOW:
496493
case ESR_ELx_EC_DABT_LOW:
497494
handle_host_mem_abort(host_ctxt);

arch/arm64/kvm/hyp/nvhe/switch.c

Lines changed: 29 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -40,6 +40,9 @@ static void __activate_cptr_traps(struct kvm_vcpu *vcpu)
4040
{
4141
u64 val = CPTR_EL2_TAM; /* Same bit irrespective of E2H */
4242

43+
if (!guest_owns_fp_regs())
44+
__activate_traps_fpsimd32(vcpu);
45+
4346
if (has_hvhe()) {
4447
val |= CPACR_ELx_TTA;
4548

@@ -48,6 +51,8 @@ static void __activate_cptr_traps(struct kvm_vcpu *vcpu)
4851
if (vcpu_has_sve(vcpu))
4952
val |= CPACR_ELx_ZEN;
5053
}
54+
55+
write_sysreg(val, cpacr_el1);
5156
} else {
5257
val |= CPTR_EL2_TTA | CPTR_NVHE_EL2_RES1;
5358

@@ -62,12 +67,32 @@ static void __activate_cptr_traps(struct kvm_vcpu *vcpu)
6267

6368
if (!guest_owns_fp_regs())
6469
val |= CPTR_EL2_TFP;
70+
71+
write_sysreg(val, cptr_el2);
6572
}
73+
}
6674

67-
if (!guest_owns_fp_regs())
68-
__activate_traps_fpsimd32(vcpu);
75+
static void __deactivate_cptr_traps(struct kvm_vcpu *vcpu)
76+
{
77+
if (has_hvhe()) {
78+
u64 val = CPACR_ELx_FPEN;
79+
80+
if (cpus_have_final_cap(ARM64_SVE))
81+
val |= CPACR_ELx_ZEN;
82+
if (cpus_have_final_cap(ARM64_SME))
83+
val |= CPACR_ELx_SMEN;
84+
85+
write_sysreg(val, cpacr_el1);
86+
} else {
87+
u64 val = CPTR_NVHE_EL2_RES1;
88+
89+
if (!cpus_have_final_cap(ARM64_SVE))
90+
val |= CPTR_EL2_TZ;
91+
if (!cpus_have_final_cap(ARM64_SME))
92+
val |= CPTR_EL2_TSM;
6993

70-
kvm_write_cptr_el2(val);
94+
write_sysreg(val, cptr_el2);
95+
}
7196
}
7297

7398
static void __activate_traps(struct kvm_vcpu *vcpu)
@@ -120,7 +145,7 @@ static void __deactivate_traps(struct kvm_vcpu *vcpu)
120145

121146
write_sysreg(this_cpu_ptr(&kvm_init_params)->hcr_el2, hcr_el2);
122147

123-
kvm_reset_cptr_el2(vcpu);
148+
__deactivate_cptr_traps(vcpu);
124149
write_sysreg(__kvm_hyp_host_vector, vbar_el2);
125150
}
126151

arch/arm64/kvm/hyp/vhe/switch.c

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -462,6 +462,8 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
462462

463463
sysreg_save_host_state_vhe(host_ctxt);
464464

465+
fpsimd_lazy_switch_to_guest(vcpu);
466+
465467
/*
466468
* Note that ARM erratum 1165522 requires us to configure both stage 1
467469
* and stage 2 translation for the guest context before we clear
@@ -486,6 +488,8 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
486488

487489
__deactivate_traps(vcpu);
488490

491+
fpsimd_lazy_switch_to_host(vcpu);
492+
489493
sysreg_restore_host_state_vhe(host_ctxt);
490494

491495
if (guest_owns_fp_regs())

0 commit comments

Comments
 (0)