Skip to content

Commit 70b67ef

Browse files
committed
Merge: kvm/aarch64: Pick up fixes up to v6.14
MR: https://gitlab.com/redhat/centos-stream/src/kernel/centos-stream-9/-/merge_requests/6574 JIRA: https://issues.redhat.com/browse/RHEL-82298 Brew: https://brewweb.engineering.redhat.com/brew/taskinfo?taskID=67019166 Upstream Status: v6.14.rc7 Tested: Passed. kselftests, kvm-unit-tests, virt-install and live migration (4KB/64KB) This series picks up the following fixes, which are no later than v6.14.rc7 and the previous revisions. Note that any features, enhancements and their followup fixes are ignored. 3855a7b KVM: arm64: Initialize SCTLR_EL1 in __kvm_hyp_init_cpu() 7a68b55 KVM: arm64: Initialize HCR_EL2.E2H early afa9b48 KVM: arm64: Shave a few bytes from the EL2 idmap code fa808ed KVM: arm64: Ensure a VMID is allocated before programming VTTBR_EL2 102c51c KVM: arm64: Fix tcr_el2 initialisation in hVHE mode b3aa928 KVM: arm64: vgic: Hoist SGI/PPI alloc from vgic_init() to kvm_create_vgic() e6e3e00 KVM: arm64: timer: Drop warning on failed interrupt signalling b938731 KVM: arm64: Fix alignment of kvm_hyp_memcache allocations 332b7e6 KVM: arm64: Simplify warning in kvm_arch_vcpu_load_fp() b450dcc KVM: arm64: timer: Always evaluate the need for a soft timer 5417a2e KVM: arm64: Fix nested S2 MMU structures reallocation 32392e0 KVM: arm64: Fail protected mode init if no vgic hardware is present 59419f1 KVM: arm64: Eagerly switch ZCR_EL{1,2} f9dd00d KVM: arm64: Mark some header functions as inline 9b66195 KVM: arm64: Refactor exit handlers 407a99c KVM: arm64: Remove VHE host restore of CPACR_EL1.SMEN 459f059 KVM: arm64: Remove VHE host restore of CPACR_EL1.ZEN 8eca7f6 KVM: arm64: Remove host FPSIMD saving for non-protected KVM d4db987 KVM: arm64: Add predicate for FPMR support in a VM 9bcbb61 KVM: arm64: Flush hyp bss section after initialization of variables in bss 9fb4267 KVM: arm64: Fix selftests after sysreg field name update a7f1fa5 KVM: arm64: Explicitly handle BRBE traps as UNDEFINED dea8838 KVM: arm64: vgic: Use str_enabled_disabled() in vgic_v3_probe() 6834403 KVM: arm64: Fix nVHE stacktrace VA bits mask b7f345f KVM: arm64: Fix FEAT_MTE in pKVM e891432 KVM: arm64: nv: Advertise the lack of AArch32 EL0 support 1eccad3 KVM: arm64: Fix the value of the CPTR_EL2 RES1 bitmask for nVHE 2fd5b4b KVM: arm64: Calculate cptr_el2 traps on activating traps 9df9186 KVM: arm64: Fix RAS trapping in pKVM for protected VMs 8ca19c4 KVM: arm64: Drop MDSCR_EL1_DEBUG_MASK 9d86c3c arm64/kvm: Avoid invalid physical addresses to signal owner updates f0da169 arm64/kvm: Configure HYP TCR.PS/DS based on host stage1 d3ba35b KVM: arm64: nv: Reload PMU events upon MDCR_EL2.HPME change adf8623 KVM: arm64: Use KVM_REQ_RELOAD_PMU to handle PMCR_EL0.E change e22c369 KVM: arm64: Add unified helper for reprogramming counters by mask 985bb51 KVM: arm64: Always check the state from hyp_ack_unshare() Changes in v4: Improved commit logs for the following upstream commits (Eric) d3ba35b KVM: arm64: nv: Reload PMU events upon MDCR_EL2.HPME change 59419f1 KVM: arm64: Eagerly switch ZCR_EL{1,2} Added the following upstream commit, as dependency to upstream commit 7a68b55 and 3855a7b (Eric) afa9b48 KVM: arm64: Shave a few bytes from the EL2 idmap code Changes in v3: 7 upstream commits added per Eric's comments to include 59419f1. All other commits are dependencies. 59419f1 KVM: arm64: Eagerly switch ZCR_EL{1,2} f9dd00d KVM: arm64: Mark some header functions as inline 9b66195 KVM: arm64: Refactor exit handlers 407a99c KVM: arm64: Remove VHE host restore of CPACR_EL1.SMEN 459f059 KVM: arm64: Remove VHE host restore of CPACR_EL1.ZEN 8eca7f6 KVM: arm64: Remove host FPSIMD saving for non-protected KVM d4db987 KVM: arm64: Add predicate for FPMR support in a VM Changes in v2: 3 upstream commmits added per Eric's comments 1eccad3 KVM: arm64: Fix the value of the CPTR_EL2 RES1 bitmask for nVHE 2fd5b4b KVM: arm64: Calculate cptr_el2 traps on activating traps 9df9186 KVM: arm64: Fix RAS trapping in pKVM for protected VMs 59419f1 skipped due to too many dependencies Signed-off-by: Gavin Shan <gshan@redhat.com> Approved-by: Shaoqin Huang <shahuang@redhat.com> Approved-by: Eric Auger <eric.auger@redhat.com> Approved-by: Cornelia Huck <cohuck@redhat.com> Approved-by: CKI KWF Bot <cki-ci-bot+kwf-gitlab-com@redhat.com> Merged-by: Augusto Caringi <acaringi@redhat.com>
2 parents 2874741 + 75aeb44 commit 70b67ef

File tree

33 files changed

+431
-464
lines changed

33 files changed

+431
-464
lines changed

arch/arm64/include/asm/el2_setup.h

Lines changed: 26 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,32 @@
1616
#include <asm/sysreg.h>
1717
#include <linux/irqchip/arm-gic-v3.h>
1818

19+
.macro init_el2_hcr val
20+
mov_q x0, \val
21+
22+
/*
23+
* Compliant CPUs advertise their VHE-onlyness with
24+
* ID_AA64MMFR4_EL1.E2H0 < 0. On such CPUs HCR_EL2.E2H is RES1, but it
25+
* can reset into an UNKNOWN state and might not read as 1 until it has
26+
* been initialized explicitly.
27+
*
28+
* Fruity CPUs seem to have HCR_EL2.E2H set to RAO/WI, but
29+
* don't advertise it (they predate this relaxation).
30+
*
31+
* Initalize HCR_EL2.E2H so that later code can rely upon HCR_EL2.E2H
32+
* indicating whether the CPU is running in E2H mode.
33+
*/
34+
mrs_s x1, SYS_ID_AA64MMFR4_EL1
35+
sbfx x1, x1, #ID_AA64MMFR4_EL1_E2H0_SHIFT, #ID_AA64MMFR4_EL1_E2H0_WIDTH
36+
cmp x1, #0
37+
b.ge .LnVHE_\@
38+
39+
orr x0, x0, #HCR_E2H
40+
.LnVHE_\@:
41+
msr hcr_el2, x0
42+
isb
43+
.endm
44+
1945
.macro __init_el2_sctlr
2046
mov_q x0, INIT_SCTLR_EL2_MMU_OFF
2147
msr sctlr_el2, x0
@@ -204,11 +230,6 @@
204230
.Lskip_fgt_\@:
205231
.endm
206232

207-
.macro __init_el2_nvhe_prepare_eret
208-
mov x0, #INIT_PSTATE_EL1
209-
msr spsr_el2, x0
210-
.endm
211-
212233
/**
213234
* Initialize EL2 registers to sane values. This should be called early on all
214235
* cores that were booted in EL2. Note that everything gets initialised as

arch/arm64/include/asm/kvm_arm.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -117,7 +117,7 @@
117117
#define TCR_EL2_IRGN0_MASK TCR_IRGN0_MASK
118118
#define TCR_EL2_T0SZ_MASK 0x3f
119119
#define TCR_EL2_MASK (TCR_EL2_TG0_MASK | TCR_EL2_SH0_MASK | \
120-
TCR_EL2_ORGN0_MASK | TCR_EL2_IRGN0_MASK | TCR_EL2_T0SZ_MASK)
120+
TCR_EL2_ORGN0_MASK | TCR_EL2_IRGN0_MASK)
121121

122122
/* VTCR_EL2 Registers bits */
123123
#define VTCR_EL2_DS TCR_EL2_DS
@@ -298,7 +298,7 @@
298298
#define CPTR_EL2_TSM (1 << 12)
299299
#define CPTR_EL2_TFP (1 << CPTR_EL2_TFP_SHIFT)
300300
#define CPTR_EL2_TZ (1 << 8)
301-
#define CPTR_NVHE_EL2_RES1 0x000032ff /* known RES1 bits in CPTR_EL2 (nVHE) */
301+
#define CPTR_NVHE_EL2_RES1 (BIT(13) | BIT(9) | GENMASK(7, 0))
302302
#define CPTR_NVHE_EL2_RES0 (GENMASK(63, 32) | \
303303
GENMASK(29, 21) | \
304304
GENMASK(19, 14) | \

arch/arm64/include/asm/kvm_asm.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -176,6 +176,7 @@ struct kvm_nvhe_init_params {
176176
unsigned long hcr_el2;
177177
unsigned long vttbr;
178178
unsigned long vtcr;
179+
unsigned long tmp;
179180
};
180181

181182
/*

arch/arm64/include/asm/kvm_emulate.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -634,8 +634,8 @@ static __always_inline u64 kvm_get_reset_cptr_el2(struct kvm_vcpu *vcpu)
634634

635635
if (vcpu_has_sve(vcpu) && guest_owns_fp_regs())
636636
val |= CPTR_EL2_TZ;
637-
if (cpus_have_final_cap(ARM64_SME))
638-
val &= ~CPTR_EL2_TSM;
637+
if (!cpus_have_final_cap(ARM64_SME))
638+
val |= CPTR_EL2_TSM;
639639
}
640640

641641
return val;

arch/arm64/include/asm/kvm_host.h

Lines changed: 11 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -98,7 +98,7 @@ static inline void push_hyp_memcache(struct kvm_hyp_memcache *mc,
9898
static inline void *pop_hyp_memcache(struct kvm_hyp_memcache *mc,
9999
void *(*to_va)(phys_addr_t phys))
100100
{
101-
phys_addr_t *p = to_va(mc->head);
101+
phys_addr_t *p = to_va(mc->head & PAGE_MASK);
102102

103103
if (!mc->nr_pages)
104104
return NULL;
@@ -595,13 +595,13 @@ struct kvm_host_data {
595595
struct kvm_cpu_context host_ctxt;
596596

597597
/*
598-
* All pointers in this union are hyp VA.
598+
* Hyp VA.
599599
* sve_state is only used in pKVM and if system_supports_sve().
600600
*/
601-
union {
602-
struct user_fpsimd_state *fpsimd_state;
603-
struct cpu_sve_state *sve_state;
604-
};
601+
struct cpu_sve_state *sve_state;
602+
603+
/* Used by pKVM only. */
604+
u64 fpmr;
605605

606606
/* Ownership of the FP regs */
607607
enum {
@@ -682,7 +682,6 @@ struct kvm_vcpu_arch {
682682
u64 hcr_el2;
683683
u64 hcrx_el2;
684684
u64 mdcr_el2;
685-
u64 cptr_el2;
686685

687686
/* Exception Information */
688687
struct kvm_vcpu_fault_info fault;
@@ -1234,7 +1233,7 @@ int kvm_arm_pvtime_has_attr(struct kvm_vcpu *vcpu,
12341233
extern unsigned int __ro_after_init kvm_arm_vmid_bits;
12351234
int __init kvm_arm_vmid_alloc_init(void);
12361235
void __init kvm_arm_vmid_alloc_free(void);
1237-
bool kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid);
1236+
void kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid);
12381237
void kvm_arm_vmid_clear_active(void);
12391238

12401239
static inline void kvm_arm_pvtime_vcpu_init(struct kvm_vcpu_arch *vcpu_arch)
@@ -1489,4 +1488,8 @@ void kvm_set_vm_id_reg(struct kvm *kvm, u32 reg, u64 val);
14891488
(pa + pi + pa3) == 1; \
14901489
})
14911490

1491+
#define kvm_has_fpmr(k) \
1492+
(system_supports_fpmr() && \
1493+
kvm_has_feat((k), ID_AA64PFR2_EL1, FPMR, IMP))
1494+
14921495
#endif /* __ARM64_KVM_HOST_H__ */

arch/arm64/include/asm/kvm_mmu.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -139,6 +139,8 @@ static __always_inline unsigned long __kern_hyp_va(unsigned long v)
139139

140140
#define kern_hyp_va(v) ((typeof(v))(__kern_hyp_va((unsigned long)(v))))
141141

142+
extern u32 __hyp_va_bits;
143+
142144
/*
143145
* We currently support using a VM-specified IPA size. For backward
144146
* compatibility, the default IPA size is fixed to 40bits.

arch/arm64/kernel/asm-offsets.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -130,6 +130,7 @@ int main(void)
130130
DEFINE(NVHE_INIT_HCR_EL2, offsetof(struct kvm_nvhe_init_params, hcr_el2));
131131
DEFINE(NVHE_INIT_VTTBR, offsetof(struct kvm_nvhe_init_params, vttbr));
132132
DEFINE(NVHE_INIT_VTCR, offsetof(struct kvm_nvhe_init_params, vtcr));
133+
DEFINE(NVHE_INIT_TMP, offsetof(struct kvm_nvhe_init_params, tmp));
133134
#endif
134135
#ifdef CONFIG_CPU_PM
135136
DEFINE(CPU_CTX_SP, offsetof(struct cpu_suspend_ctx, sp));

arch/arm64/kernel/head.S

Lines changed: 3 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -295,25 +295,8 @@ SYM_INNER_LABEL(init_el2, SYM_L_LOCAL)
295295
msr sctlr_el2, x0
296296
isb
297297
0:
298-
mov_q x0, HCR_HOST_NVHE_FLAGS
299-
300-
/*
301-
* Compliant CPUs advertise their VHE-onlyness with
302-
* ID_AA64MMFR4_EL1.E2H0 < 0. HCR_EL2.E2H can be
303-
* RES1 in that case. Publish the E2H bit early so that
304-
* it can be picked up by the init_el2_state macro.
305-
*
306-
* Fruity CPUs seem to have HCR_EL2.E2H set to RAO/WI, but
307-
* don't advertise it (they predate this relaxation).
308-
*/
309-
mrs_s x1, SYS_ID_AA64MMFR4_EL1
310-
tbz x1, #(ID_AA64MMFR4_EL1_E2H0_SHIFT + ID_AA64MMFR4_EL1_E2H0_WIDTH - 1), 1f
311-
312-
orr x0, x0, #HCR_E2H
313-
1:
314-
msr hcr_el2, x0
315-
isb
316298

299+
init_el2_hcr HCR_HOST_NVHE_FLAGS
317300
init_el2_state
318301

319302
/* Hypervisor stub */
@@ -336,7 +319,8 @@ SYM_INNER_LABEL(init_el2, SYM_L_LOCAL)
336319
msr sctlr_el1, x1
337320
mov x2, xzr
338321
3:
339-
__init_el2_nvhe_prepare_eret
322+
mov x0, #INIT_PSTATE_EL1
323+
msr spsr_el2, x0
340324

341325
mov w0, #BOOT_CPU_MODE_EL2
342326
orr x0, x0, x2

arch/arm64/kvm/arch_timer.c

Lines changed: 8 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -444,19 +444,17 @@ void kvm_timer_update_run(struct kvm_vcpu *vcpu)
444444
static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level,
445445
struct arch_timer_context *timer_ctx)
446446
{
447-
int ret;
448-
449447
timer_ctx->irq.level = new_level;
450448
trace_kvm_timer_update_irq(vcpu->vcpu_id, timer_irq(timer_ctx),
451449
timer_ctx->irq.level);
452450

453-
if (!userspace_irqchip(vcpu->kvm)) {
454-
ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu,
455-
timer_irq(timer_ctx),
456-
timer_ctx->irq.level,
457-
timer_ctx);
458-
WARN_ON(ret);
459-
}
451+
if (userspace_irqchip(vcpu->kvm))
452+
return;
453+
454+
kvm_vgic_inject_irq(vcpu->kvm, vcpu,
455+
timer_irq(timer_ctx),
456+
timer_ctx->irq.level,
457+
timer_ctx);
460458
}
461459

462460
/* Only called for a fully emulated timer */
@@ -466,10 +464,8 @@ static void timer_emulate(struct arch_timer_context *ctx)
466464

467465
trace_kvm_timer_emulate(ctx, should_fire);
468466

469-
if (should_fire != ctx->irq.level) {
467+
if (should_fire != ctx->irq.level)
470468
kvm_timer_update_irq(ctx->vcpu, should_fire, ctx);
471-
return;
472-
}
473469

474470
/*
475471
* If the timer can fire now, we don't need to have a soft timer

arch/arm64/kvm/arm.c

Lines changed: 36 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -577,6 +577,16 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
577577
mmu = vcpu->arch.hw_mmu;
578578
last_ran = this_cpu_ptr(mmu->last_vcpu_ran);
579579

580+
/*
581+
* Ensure a VMID is allocated for the MMU before programming VTTBR_EL2,
582+
* which happens eagerly in VHE.
583+
*
584+
* Also, the VMID allocator only preserves VMIDs that are active at the
585+
* time of rollover, so KVM might need to grab a new VMID for the MMU if
586+
* this is called from kvm_sched_in().
587+
*/
588+
kvm_arm_vmid_update(&mmu->vmid);
589+
580590
/*
581591
* We guarantee that both TLBs and I-cache are private to each
582592
* vcpu. If detecting that a vcpu from the same VM has
@@ -1144,18 +1154,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
11441154
*/
11451155
preempt_disable();
11461156

1147-
/*
1148-
* The VMID allocator only tracks active VMIDs per
1149-
* physical CPU, and therefore the VMID allocated may not be
1150-
* preserved on VMID roll-over if the task was preempted,
1151-
* making a thread's VMID inactive. So we need to call
1152-
* kvm_arm_vmid_update() in non-premptible context.
1153-
*/
1154-
if (kvm_arm_vmid_update(&vcpu->arch.hw_mmu->vmid) &&
1155-
has_vhe())
1156-
__load_stage2(vcpu->arch.hw_mmu,
1157-
vcpu->arch.hw_mmu->arch);
1158-
11591157
kvm_pmu_flush_hwstate(vcpu);
11601158

11611159
local_irq_disable();
@@ -1568,7 +1566,6 @@ static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,
15681566
}
15691567

15701568
vcpu_reset_hcr(vcpu);
1571-
vcpu->arch.cptr_el2 = kvm_get_reset_cptr_el2(vcpu);
15721569

15731570
/*
15741571
* Handle the "start in power-off" case.
@@ -1987,7 +1984,6 @@ static int kvm_init_vector_slots(void)
19871984
static void __init cpu_prepare_hyp_mode(int cpu, u32 hyp_va_bits)
19881985
{
19891986
struct kvm_nvhe_init_params *params = per_cpu_ptr_nvhe_sym(kvm_init_params, cpu);
1990-
u64 mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
19911987
unsigned long tcr;
19921988

19931989
/*
@@ -2003,17 +1999,17 @@ static void __init cpu_prepare_hyp_mode(int cpu, u32 hyp_va_bits)
20031999

20042000
tcr = read_sysreg(tcr_el1);
20052001
if (cpus_have_final_cap(ARM64_KVM_HVHE)) {
2002+
tcr &= ~(TCR_HD | TCR_HA | TCR_A1 | TCR_T0SZ_MASK);
20062003
tcr |= TCR_EPD1_MASK;
20072004
} else {
2005+
unsigned long ips = FIELD_GET(TCR_IPS_MASK, tcr);
2006+
20082007
tcr &= TCR_EL2_MASK;
2009-
tcr |= TCR_EL2_RES1;
2008+
tcr |= TCR_EL2_RES1 | FIELD_PREP(TCR_EL2_PS_MASK, ips);
2009+
if (lpa2_is_enabled())
2010+
tcr |= TCR_EL2_DS;
20102011
}
2011-
tcr &= ~TCR_T0SZ_MASK;
20122012
tcr |= TCR_T0SZ(hyp_va_bits);
2013-
tcr &= ~TCR_EL2_PS_MASK;
2014-
tcr |= FIELD_PREP(TCR_EL2_PS_MASK, kvm_get_parange(mmfr0));
2015-
if (kvm_lpa2_is_enabled())
2016-
tcr |= TCR_EL2_DS;
20172013
params->tcr_el2 = tcr;
20182014

20192015
params->pgd_pa = kvm_mmu_get_httbr();
@@ -2297,6 +2293,19 @@ static int __init init_subsystems(void)
22972293
break;
22982294
case -ENODEV:
22992295
case -ENXIO:
2296+
/*
2297+
* No VGIC? No pKVM for you.
2298+
*
2299+
* Protected mode assumes that VGICv3 is present, so no point
2300+
* in trying to hobble along if vgic initialization fails.
2301+
*/
2302+
if (is_protected_kvm_enabled())
2303+
goto out;
2304+
2305+
/*
2306+
* Otherwise, userspace could choose to implement a GIC for its
2307+
* guest on non-cooperative hardware.
2308+
*/
23002309
vgic_present = false;
23012310
err = 0;
23022311
break;
@@ -2407,6 +2416,13 @@ static void kvm_hyp_init_symbols(void)
24072416
kvm_nvhe_sym(id_aa64smfr0_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64SMFR0_EL1);
24082417
kvm_nvhe_sym(__icache_flags) = __icache_flags;
24092418
kvm_nvhe_sym(kvm_arm_vmid_bits) = kvm_arm_vmid_bits;
2419+
2420+
/*
2421+
* Flush entire BSS since part of its data containing init symbols is read
2422+
* while the MMU is off.
2423+
*/
2424+
kvm_flush_dcache_to_poc(kvm_ksym_ref(__hyp_bss_start),
2425+
kvm_ksym_ref(__hyp_bss_end) - kvm_ksym_ref(__hyp_bss_start));
24102426
}
24112427

24122428
static int __init kvm_hyp_init_protection(u32 hyp_va_bits)
@@ -2468,14 +2484,6 @@ static void finalize_init_hyp_mode(void)
24682484
per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->sve_state =
24692485
kern_hyp_va(sve_state);
24702486
}
2471-
} else {
2472-
for_each_possible_cpu(cpu) {
2473-
struct user_fpsimd_state *fpsimd_state;
2474-
2475-
fpsimd_state = &per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->host_ctxt.fp_regs;
2476-
per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->fpsimd_state =
2477-
kern_hyp_va(fpsimd_state);
2478-
}
24792487
}
24802488
}
24812489

0 commit comments

Comments
 (0)