Skip to content

Commit fb10ddf

Browse files
ouptonMarc Zyngier
authored andcommitted
KVM: arm64: Compute per-vCPU FGTs at vcpu_load()
To date KVM has used the fine-grained traps for the sake of UNDEF enforcement (so-called FGUs), meaning the constituent parts could be computed on a per-VM basis and folded into the effective value when programmed. Prepare for traps changing based on the vCPU context by computing the whole mess of them at vcpu_load(). Aggressively inline all the helpers to preserve the build-time checks that were there before. Signed-off-by: Oliver Upton <oliver.upton@linux.dev> Reviewed-by: Joey Gouly <joey.gouly@arm.com> Signed-off-by: Marc Zyngier <maz@kernel.org>
1 parent 5c7cf1e commit fb10ddf

File tree

5 files changed

+151
-131
lines changed

5 files changed

+151
-131
lines changed

arch/arm64/include/asm/kvm_host.h

Lines changed: 50 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -816,6 +816,11 @@ struct kvm_vcpu_arch {
816816
u64 hcrx_el2;
817817
u64 mdcr_el2;
818818

819+
struct {
820+
u64 r;
821+
u64 w;
822+
} fgt[__NR_FGT_GROUP_IDS__];
823+
819824
/* Exception Information */
820825
struct kvm_vcpu_fault_info fault;
821826

@@ -1600,6 +1605,51 @@ static inline bool kvm_arch_has_irq_bypass(void)
16001605
void compute_fgu(struct kvm *kvm, enum fgt_group_id fgt);
16011606
void get_reg_fixed_bits(struct kvm *kvm, enum vcpu_sysreg reg, u64 *res0, u64 *res1);
16021607
void check_feature_map(void);
1608+
void kvm_vcpu_load_fgt(struct kvm_vcpu *vcpu);
1609+
1610+
static __always_inline enum fgt_group_id __fgt_reg_to_group_id(enum vcpu_sysreg reg)
1611+
{
1612+
switch (reg) {
1613+
case HFGRTR_EL2:
1614+
case HFGWTR_EL2:
1615+
return HFGRTR_GROUP;
1616+
case HFGITR_EL2:
1617+
return HFGITR_GROUP;
1618+
case HDFGRTR_EL2:
1619+
case HDFGWTR_EL2:
1620+
return HDFGRTR_GROUP;
1621+
case HAFGRTR_EL2:
1622+
return HAFGRTR_GROUP;
1623+
case HFGRTR2_EL2:
1624+
case HFGWTR2_EL2:
1625+
return HFGRTR2_GROUP;
1626+
case HFGITR2_EL2:
1627+
return HFGITR2_GROUP;
1628+
case HDFGRTR2_EL2:
1629+
case HDFGWTR2_EL2:
1630+
return HDFGRTR2_GROUP;
1631+
default:
1632+
BUILD_BUG_ON(1);
1633+
}
1634+
}
16031635

1636+
#define vcpu_fgt(vcpu, reg) \
1637+
({ \
1638+
enum fgt_group_id id = __fgt_reg_to_group_id(reg); \
1639+
u64 *p; \
1640+
switch (reg) { \
1641+
case HFGWTR_EL2: \
1642+
case HDFGWTR_EL2: \
1643+
case HFGWTR2_EL2: \
1644+
case HDFGWTR2_EL2: \
1645+
p = &(vcpu)->arch.fgt[id].w; \
1646+
break; \
1647+
default: \
1648+
p = &(vcpu)->arch.fgt[id].r; \
1649+
break; \
1650+
} \
1651+
\
1652+
p; \
1653+
})
16041654

16051655
#endif /* __ARM64_KVM_HOST_H__ */

arch/arm64/kvm/arm.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -642,6 +642,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
642642
vcpu->arch.hcr_el2 |= HCR_TWI;
643643

644644
vcpu_set_pauth_traps(vcpu);
645+
kvm_vcpu_load_fgt(vcpu);
645646

646647
if (is_protected_kvm_enabled()) {
647648
kvm_call_hyp_nvhe(__pkvm_vcpu_load,

arch/arm64/kvm/config.c

Lines changed: 82 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,8 @@
55
*/
66

77
#include <linux/kvm_host.h>
8+
#include <asm/kvm_emulate.h>
9+
#include <asm/kvm_nested.h>
810
#include <asm/sysreg.h>
911

1012
/*
@@ -1428,3 +1430,83 @@ void get_reg_fixed_bits(struct kvm *kvm, enum vcpu_sysreg reg, u64 *res0, u64 *r
14281430
break;
14291431
}
14301432
}
1433+
1434+
static __always_inline struct fgt_masks *__fgt_reg_to_masks(enum vcpu_sysreg reg)
1435+
{
1436+
switch (reg) {
1437+
case HFGRTR_EL2:
1438+
return &hfgrtr_masks;
1439+
case HFGWTR_EL2:
1440+
return &hfgwtr_masks;
1441+
case HFGITR_EL2:
1442+
return &hfgitr_masks;
1443+
case HDFGRTR_EL2:
1444+
return &hdfgrtr_masks;
1445+
case HDFGWTR_EL2:
1446+
return &hdfgwtr_masks;
1447+
case HAFGRTR_EL2:
1448+
return &hafgrtr_masks;
1449+
case HFGRTR2_EL2:
1450+
return &hfgrtr2_masks;
1451+
case HFGWTR2_EL2:
1452+
return &hfgwtr2_masks;
1453+
case HFGITR2_EL2:
1454+
return &hfgitr2_masks;
1455+
case HDFGRTR2_EL2:
1456+
return &hdfgrtr2_masks;
1457+
case HDFGWTR2_EL2:
1458+
return &hdfgwtr2_masks;
1459+
default:
1460+
BUILD_BUG_ON(1);
1461+
}
1462+
}
1463+
1464+
static __always_inline void __compute_fgt(struct kvm_vcpu *vcpu, enum vcpu_sysreg reg)
1465+
{
1466+
u64 fgu = vcpu->kvm->arch.fgu[__fgt_reg_to_group_id(reg)];
1467+
struct fgt_masks *m = __fgt_reg_to_masks(reg);
1468+
u64 clear = 0, set = 0, val = m->nmask;
1469+
1470+
set |= fgu & m->mask;
1471+
clear |= fgu & m->nmask;
1472+
1473+
if (is_nested_ctxt(vcpu)) {
1474+
u64 nested = __vcpu_sys_reg(vcpu, reg);
1475+
set |= nested & m->mask;
1476+
clear |= ~nested & m->nmask;
1477+
}
1478+
1479+
val |= set;
1480+
val &= ~clear;
1481+
*vcpu_fgt(vcpu, reg) = val;
1482+
}
1483+
1484+
static void __compute_hfgwtr(struct kvm_vcpu *vcpu)
1485+
{
1486+
__compute_fgt(vcpu, HFGWTR_EL2);
1487+
1488+
if (cpus_have_final_cap(ARM64_WORKAROUND_AMPERE_AC03_CPU_38))
1489+
*vcpu_fgt(vcpu, HFGWTR_EL2) |= HFGWTR_EL2_TCR_EL1;
1490+
}
1491+
1492+
void kvm_vcpu_load_fgt(struct kvm_vcpu *vcpu)
1493+
{
1494+
if (!cpus_have_final_cap(ARM64_HAS_FGT))
1495+
return;
1496+
1497+
__compute_fgt(vcpu, HFGRTR_EL2);
1498+
__compute_hfgwtr(vcpu);
1499+
__compute_fgt(vcpu, HFGITR_EL2);
1500+
__compute_fgt(vcpu, HDFGRTR_EL2);
1501+
__compute_fgt(vcpu, HDFGWTR_EL2);
1502+
__compute_fgt(vcpu, HAFGRTR_EL2);
1503+
1504+
if (!cpus_have_final_cap(ARM64_HAS_FGT2))
1505+
return;
1506+
1507+
__compute_fgt(vcpu, HFGRTR2_EL2);
1508+
__compute_fgt(vcpu, HFGWTR2_EL2);
1509+
__compute_fgt(vcpu, HFGITR2_EL2);
1510+
__compute_fgt(vcpu, HDFGRTR2_EL2);
1511+
__compute_fgt(vcpu, HDFGWTR2_EL2);
1512+
}

arch/arm64/kvm/hyp/include/hyp/switch.h

Lines changed: 17 additions & 131 deletions
Original file line numberDiff line numberDiff line change
@@ -195,123 +195,6 @@ static inline void __deactivate_cptr_traps(struct kvm_vcpu *vcpu)
195195
__deactivate_cptr_traps_nvhe(vcpu);
196196
}
197197

198-
#define reg_to_fgt_masks(reg) \
199-
({ \
200-
struct fgt_masks *m; \
201-
switch(reg) { \
202-
case HFGRTR_EL2: \
203-
m = &hfgrtr_masks; \
204-
break; \
205-
case HFGWTR_EL2: \
206-
m = &hfgwtr_masks; \
207-
break; \
208-
case HFGITR_EL2: \
209-
m = &hfgitr_masks; \
210-
break; \
211-
case HDFGRTR_EL2: \
212-
m = &hdfgrtr_masks; \
213-
break; \
214-
case HDFGWTR_EL2: \
215-
m = &hdfgwtr_masks; \
216-
break; \
217-
case HAFGRTR_EL2: \
218-
m = &hafgrtr_masks; \
219-
break; \
220-
case HFGRTR2_EL2: \
221-
m = &hfgrtr2_masks; \
222-
break; \
223-
case HFGWTR2_EL2: \
224-
m = &hfgwtr2_masks; \
225-
break; \
226-
case HFGITR2_EL2: \
227-
m = &hfgitr2_masks; \
228-
break; \
229-
case HDFGRTR2_EL2: \
230-
m = &hdfgrtr2_masks; \
231-
break; \
232-
case HDFGWTR2_EL2: \
233-
m = &hdfgwtr2_masks; \
234-
break; \
235-
default: \
236-
BUILD_BUG_ON(1); \
237-
} \
238-
\
239-
m; \
240-
})
241-
242-
#define compute_clr_set(vcpu, reg, clr, set) \
243-
do { \
244-
u64 hfg = __vcpu_sys_reg(vcpu, reg); \
245-
struct fgt_masks *m = reg_to_fgt_masks(reg); \
246-
set |= hfg & m->mask; \
247-
clr |= ~hfg & m->nmask; \
248-
} while(0)
249-
250-
#define reg_to_fgt_group_id(reg) \
251-
({ \
252-
enum fgt_group_id id; \
253-
switch(reg) { \
254-
case HFGRTR_EL2: \
255-
case HFGWTR_EL2: \
256-
id = HFGRTR_GROUP; \
257-
break; \
258-
case HFGITR_EL2: \
259-
id = HFGITR_GROUP; \
260-
break; \
261-
case HDFGRTR_EL2: \
262-
case HDFGWTR_EL2: \
263-
id = HDFGRTR_GROUP; \
264-
break; \
265-
case HAFGRTR_EL2: \
266-
id = HAFGRTR_GROUP; \
267-
break; \
268-
case HFGRTR2_EL2: \
269-
case HFGWTR2_EL2: \
270-
id = HFGRTR2_GROUP; \
271-
break; \
272-
case HFGITR2_EL2: \
273-
id = HFGITR2_GROUP; \
274-
break; \
275-
case HDFGRTR2_EL2: \
276-
case HDFGWTR2_EL2: \
277-
id = HDFGRTR2_GROUP; \
278-
break; \
279-
default: \
280-
BUILD_BUG_ON(1); \
281-
} \
282-
\
283-
id; \
284-
})
285-
286-
#define compute_undef_clr_set(vcpu, kvm, reg, clr, set) \
287-
do { \
288-
u64 hfg = kvm->arch.fgu[reg_to_fgt_group_id(reg)]; \
289-
struct fgt_masks *m = reg_to_fgt_masks(reg); \
290-
set |= hfg & m->mask; \
291-
clr |= hfg & m->nmask; \
292-
} while(0)
293-
294-
#define update_fgt_traps_cs(hctxt, vcpu, kvm, reg, clr, set) \
295-
do { \
296-
struct fgt_masks *m = reg_to_fgt_masks(reg); \
297-
u64 c = clr, s = set; \
298-
u64 val; \
299-
\
300-
ctxt_sys_reg(hctxt, reg) = read_sysreg_s(SYS_ ## reg); \
301-
if (is_nested_ctxt(vcpu)) \
302-
compute_clr_set(vcpu, reg, c, s); \
303-
\
304-
compute_undef_clr_set(vcpu, kvm, reg, c, s); \
305-
\
306-
val = m->nmask; \
307-
val |= s; \
308-
val &= ~c; \
309-
write_sysreg_s(val, SYS_ ## reg); \
310-
} while(0)
311-
312-
#define update_fgt_traps(hctxt, vcpu, kvm, reg) \
313-
update_fgt_traps_cs(hctxt, vcpu, kvm, reg, 0, 0)
314-
315198
static inline bool cpu_has_amu(void)
316199
{
317200
u64 pfr0 = read_sysreg_s(SYS_ID_AA64PFR0_EL1);
@@ -320,33 +203,36 @@ static inline bool cpu_has_amu(void)
320203
ID_AA64PFR0_EL1_AMU_SHIFT);
321204
}
322205

206+
#define __activate_fgt(hctxt, vcpu, reg) \
207+
do { \
208+
ctxt_sys_reg(hctxt, reg) = read_sysreg_s(SYS_ ## reg); \
209+
write_sysreg_s(*vcpu_fgt(vcpu, reg), SYS_ ## reg); \
210+
} while (0)
211+
323212
static inline void __activate_traps_hfgxtr(struct kvm_vcpu *vcpu)
324213
{
325214
struct kvm_cpu_context *hctxt = host_data_ptr(host_ctxt);
326-
struct kvm *kvm = kern_hyp_va(vcpu->kvm);
327215

328216
if (!cpus_have_final_cap(ARM64_HAS_FGT))
329217
return;
330218

331-
update_fgt_traps(hctxt, vcpu, kvm, HFGRTR_EL2);
332-
update_fgt_traps_cs(hctxt, vcpu, kvm, HFGWTR_EL2, 0,
333-
cpus_have_final_cap(ARM64_WORKAROUND_AMPERE_AC03_CPU_38) ?
334-
HFGWTR_EL2_TCR_EL1_MASK : 0);
335-
update_fgt_traps(hctxt, vcpu, kvm, HFGITR_EL2);
336-
update_fgt_traps(hctxt, vcpu, kvm, HDFGRTR_EL2);
337-
update_fgt_traps(hctxt, vcpu, kvm, HDFGWTR_EL2);
219+
__activate_fgt(hctxt, vcpu, HFGRTR_EL2);
220+
__activate_fgt(hctxt, vcpu, HFGWTR_EL2);
221+
__activate_fgt(hctxt, vcpu, HFGITR_EL2);
222+
__activate_fgt(hctxt, vcpu, HDFGRTR_EL2);
223+
__activate_fgt(hctxt, vcpu, HDFGWTR_EL2);
338224

339225
if (cpu_has_amu())
340-
update_fgt_traps(hctxt, vcpu, kvm, HAFGRTR_EL2);
226+
__activate_fgt(hctxt, vcpu, HAFGRTR_EL2);
341227

342228
if (!cpus_have_final_cap(ARM64_HAS_FGT2))
343229
return;
344230

345-
update_fgt_traps(hctxt, vcpu, kvm, HFGRTR2_EL2);
346-
update_fgt_traps(hctxt, vcpu, kvm, HFGWTR2_EL2);
347-
update_fgt_traps(hctxt, vcpu, kvm, HFGITR2_EL2);
348-
update_fgt_traps(hctxt, vcpu, kvm, HDFGRTR2_EL2);
349-
update_fgt_traps(hctxt, vcpu, kvm, HDFGWTR2_EL2);
231+
__activate_fgt(hctxt, vcpu, HFGRTR2_EL2);
232+
__activate_fgt(hctxt, vcpu, HFGWTR2_EL2);
233+
__activate_fgt(hctxt, vcpu, HFGITR2_EL2);
234+
__activate_fgt(hctxt, vcpu, HDFGRTR2_EL2);
235+
__activate_fgt(hctxt, vcpu, HDFGWTR2_EL2);
350236
}
351237

352238
#define __deactivate_fgt(htcxt, vcpu, reg) \

arch/arm64/kvm/hyp/nvhe/pkvm.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -172,6 +172,7 @@ static int pkvm_vcpu_init_traps(struct pkvm_hyp_vcpu *hyp_vcpu)
172172

173173
/* Trust the host for non-protected vcpu features. */
174174
vcpu->arch.hcrx_el2 = host_vcpu->arch.hcrx_el2;
175+
memcpy(vcpu->arch.fgt, host_vcpu->arch.fgt, sizeof(vcpu->arch.fgt));
175176
return 0;
176177
}
177178

0 commit comments

Comments
 (0)