@@ -65,6 +65,136 @@ static inline void __activate_traps_fpsimd32(struct kvm_vcpu *vcpu)
6565 }
6666}
6767
68+ static inline void __activate_cptr_traps_nvhe (struct kvm_vcpu * vcpu )
69+ {
70+ u64 val = CPTR_NVHE_EL2_RES1 | CPTR_EL2_TAM | CPTR_EL2_TTA ;
71+
72+ /*
73+ * Always trap SME since it's not supported in KVM.
74+ * TSM is RES1 if SME isn't implemented.
75+ */
76+ val |= CPTR_EL2_TSM ;
77+
78+ if (!vcpu_has_sve (vcpu ) || !guest_owns_fp_regs ())
79+ val |= CPTR_EL2_TZ ;
80+
81+ if (!guest_owns_fp_regs ())
82+ val |= CPTR_EL2_TFP ;
83+
84+ write_sysreg (val , cptr_el2 );
85+ }
86+
87+ static inline void __activate_cptr_traps_vhe (struct kvm_vcpu * vcpu )
88+ {
89+ /*
90+ * With VHE (HCR.E2H == 1), accesses to CPACR_EL1 are routed to
91+ * CPTR_EL2. In general, CPACR_EL1 has the same layout as CPTR_EL2,
92+ * except for some missing controls, such as TAM.
93+ * In this case, CPTR_EL2.TAM has the same position with or without
94+ * VHE (HCR.E2H == 1) which allows us to use here the CPTR_EL2.TAM
95+ * shift value for trapping the AMU accesses.
96+ */
97+ u64 val = CPTR_EL2_TAM | CPACR_EL1_TTA ;
98+ u64 cptr ;
99+
100+ if (guest_owns_fp_regs ()) {
101+ val |= CPACR_EL1_FPEN ;
102+ if (vcpu_has_sve (vcpu ))
103+ val |= CPACR_EL1_ZEN ;
104+ }
105+
106+ if (!vcpu_has_nv (vcpu ))
107+ goto write ;
108+
109+ /*
110+ * The architecture is a bit crap (what a surprise): an EL2 guest
111+ * writing to CPTR_EL2 via CPACR_EL1 can't set any of TCPAC or TTA,
112+ * as they are RES0 in the guest's view. To work around it, trap the
113+ * sucker using the very same bit it can't set...
114+ */
115+ if (vcpu_el2_e2h_is_set (vcpu ) && is_hyp_ctxt (vcpu ))
116+ val |= CPTR_EL2_TCPAC ;
117+
118+ /*
119+ * Layer the guest hypervisor's trap configuration on top of our own if
120+ * we're in a nested context.
121+ */
122+ if (is_hyp_ctxt (vcpu ))
123+ goto write ;
124+
125+ cptr = vcpu_sanitised_cptr_el2 (vcpu );
126+
127+ /*
128+ * Pay attention, there's some interesting detail here.
129+ *
130+ * The CPTR_EL2.xEN fields are 2 bits wide, although there are only two
131+ * meaningful trap states when HCR_EL2.TGE = 0 (running a nested guest):
132+ *
133+ * - CPTR_EL2.xEN = x0, traps are enabled
134+ * - CPTR_EL2.xEN = x1, traps are disabled
135+ *
136+ * In other words, bit[0] determines if guest accesses trap or not. In
137+ * the interest of simplicity, clear the entire field if the guest
138+ * hypervisor has traps enabled to dispel any illusion of something more
139+ * complicated taking place.
140+ */
141+ if (!(SYS_FIELD_GET (CPACR_EL1 , FPEN , cptr ) & BIT (0 )))
142+ val &= ~CPACR_EL1_FPEN ;
143+ if (!(SYS_FIELD_GET (CPACR_EL1 , ZEN , cptr ) & BIT (0 )))
144+ val &= ~CPACR_EL1_ZEN ;
145+
146+ if (kvm_has_feat (vcpu -> kvm , ID_AA64MMFR3_EL1 , S2POE , IMP ))
147+ val |= cptr & CPACR_EL1_E0POE ;
148+
149+ val |= cptr & CPTR_EL2_TCPAC ;
150+
151+ write :
152+ write_sysreg (val , cpacr_el1 );
153+ }
154+
155+ static inline void __activate_cptr_traps (struct kvm_vcpu * vcpu )
156+ {
157+ if (!guest_owns_fp_regs ())
158+ __activate_traps_fpsimd32 (vcpu );
159+
160+ if (has_vhe () || has_hvhe ())
161+ __activate_cptr_traps_vhe (vcpu );
162+ else
163+ __activate_cptr_traps_nvhe (vcpu );
164+ }
165+
166+ static inline void __deactivate_cptr_traps_nvhe (struct kvm_vcpu * vcpu )
167+ {
168+ u64 val = CPTR_NVHE_EL2_RES1 ;
169+
170+ if (!cpus_have_final_cap (ARM64_SVE ))
171+ val |= CPTR_EL2_TZ ;
172+ if (!cpus_have_final_cap (ARM64_SME ))
173+ val |= CPTR_EL2_TSM ;
174+
175+ write_sysreg (val , cptr_el2 );
176+ }
177+
178+ static inline void __deactivate_cptr_traps_vhe (struct kvm_vcpu * vcpu )
179+ {
180+ u64 val = CPACR_EL1_FPEN ;
181+
182+ if (cpus_have_final_cap (ARM64_SVE ))
183+ val |= CPACR_EL1_ZEN ;
184+ if (cpus_have_final_cap (ARM64_SME ))
185+ val |= CPACR_EL1_SMEN ;
186+
187+ write_sysreg (val , cpacr_el1 );
188+ }
189+
190+ static inline void __deactivate_cptr_traps (struct kvm_vcpu * vcpu )
191+ {
192+ if (has_vhe () || has_hvhe ())
193+ __deactivate_cptr_traps_vhe (vcpu );
194+ else
195+ __deactivate_cptr_traps_nvhe (vcpu );
196+ }
197+
68198#define reg_to_fgt_masks (reg ) \
69199 ({ \
70200 struct fgt_masks *m; \
@@ -486,11 +616,6 @@ static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu)
486616 */
487617 if (system_supports_sve ()) {
488618 __hyp_sve_save_host ();
489-
490- /* Re-enable SVE traps if not supported for the guest vcpu. */
491- if (!vcpu_has_sve (vcpu ))
492- cpacr_clear_set (CPACR_EL1_ZEN , 0 );
493-
494619 } else {
495620 __fpsimd_save_state (host_data_ptr (host_ctxt .fp_regs ));
496621 }
@@ -541,10 +666,7 @@ static inline bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
541666 /* Valid trap. Switch the context: */
542667
543668 /* First disable enough traps to allow us to update the registers */
544- if (sve_guest || (is_protected_kvm_enabled () && system_supports_sve ()))
545- cpacr_clear_set (0 , CPACR_EL1_FPEN | CPACR_EL1_ZEN );
546- else
547- cpacr_clear_set (0 , CPACR_EL1_FPEN );
669+ __deactivate_cptr_traps (vcpu );
548670 isb ();
549671
550672 /* Write out the host state if it's in the registers */
@@ -566,6 +688,13 @@ static inline bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
566688
567689 * host_data_ptr (fp_owner ) = FP_STATE_GUEST_OWNED ;
568690
691+ /*
692+ * Re-enable traps necessary for the current state of the guest, e.g.
693+ * those enabled by a guest hypervisor. The ERET to the guest will
694+ * provide the necessary context synchronization.
695+ */
696+ __activate_cptr_traps (vcpu );
697+
569698 return true;
570699}
571700
0 commit comments