@@ -274,12 +274,23 @@ void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu)
274274 irq_work_sync (& vcpu -> arch .pmu .overflow_work );
275275}
276276
277- bool kvm_pmu_counter_is_hyp (struct kvm_vcpu * vcpu , unsigned int idx )
277+ static u64 kvm_pmu_hyp_counter_mask (struct kvm_vcpu * vcpu )
278278{
279- unsigned int hpmn ;
279+ unsigned int hpmn , n ;
280280
281- if (!vcpu_has_nv (vcpu ) || idx == ARMV8_PMU_CYCLE_IDX )
282- return false;
281+ if (!vcpu_has_nv (vcpu ))
282+ return 0 ;
283+
284+ hpmn = SYS_FIELD_GET (MDCR_EL2 , HPMN , __vcpu_sys_reg (vcpu , MDCR_EL2 ));
285+ n = vcpu -> kvm -> arch .pmcr_n ;
286+
287+ /*
288+ * Programming HPMN to a value greater than PMCR_EL0.N is
289+ * CONSTRAINED UNPREDICTABLE. Make the implementation choice that an
290+ * UNKNOWN number of counters (in our case, zero) are reserved for EL2.
291+ */
292+ if (hpmn >= n )
293+ return 0 ;
283294
284295 /*
285296 * Programming HPMN=0 is CONSTRAINED UNPREDICTABLE if FEAT_HPMN0 isn't
@@ -288,20 +299,22 @@ bool kvm_pmu_counter_is_hyp(struct kvm_vcpu *vcpu, unsigned int idx)
288299 * implementation choice that all counters are included in the second
289300 * range reserved for EL2/EL3.
290301 */
291- hpmn = SYS_FIELD_GET (MDCR_EL2 , HPMN , __vcpu_sys_reg (vcpu , MDCR_EL2 ));
292- return idx >= hpmn ;
302+ return GENMASK (n - 1 , hpmn );
303+ }
304+
305+ bool kvm_pmu_counter_is_hyp (struct kvm_vcpu * vcpu , unsigned int idx )
306+ {
307+ return kvm_pmu_hyp_counter_mask (vcpu ) & BIT (idx );
293308}
294309
295310u64 kvm_pmu_accessible_counter_mask (struct kvm_vcpu * vcpu )
296311{
297312 u64 mask = kvm_pmu_implemented_counter_mask (vcpu );
298- u64 hpmn ;
299313
300314 if (!vcpu_has_nv (vcpu ) || vcpu_is_el2 (vcpu ))
301315 return mask ;
302316
303- hpmn = SYS_FIELD_GET (MDCR_EL2 , HPMN , __vcpu_sys_reg (vcpu , MDCR_EL2 ));
304- return mask & ~GENMASK (vcpu -> kvm -> arch .pmcr_n - 1 , hpmn );
317+ return mask & ~kvm_pmu_hyp_counter_mask (vcpu );
305318}
306319
307320u64 kvm_pmu_implemented_counter_mask (struct kvm_vcpu * vcpu )
@@ -375,14 +388,30 @@ void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
375388 }
376389}
377390
378- static u64 kvm_pmu_overflow_status (struct kvm_vcpu * vcpu )
391+ /*
392+ * Returns the PMU overflow state, which is true if there exists an event
393+ * counter where the values of the global enable control, PMOVSSET_EL0[n], and
394+ * PMINTENSET_EL1[n] are all 1.
395+ */
396+ static bool kvm_pmu_overflow_status (struct kvm_vcpu * vcpu )
379397{
380- u64 reg = 0 ;
398+ u64 reg = __vcpu_sys_reg ( vcpu , PMOVSSET_EL0 ) ;
381399
382- if ((kvm_vcpu_read_pmcr (vcpu ) & ARMV8_PMU_PMCR_E )) {
383- reg = __vcpu_sys_reg (vcpu , PMOVSSET_EL0 );
384- reg &= __vcpu_sys_reg (vcpu , PMINTENSET_EL1 );
385- }
400+ reg &= __vcpu_sys_reg (vcpu , PMINTENSET_EL1 );
401+
402+ /*
403+ * PMCR_EL0.E is the global enable control for event counters available
404+ * to EL0 and EL1.
405+ */
406+ if (!(kvm_vcpu_read_pmcr (vcpu ) & ARMV8_PMU_PMCR_E ))
407+ reg &= kvm_pmu_hyp_counter_mask (vcpu );
408+
409+ /*
410+ * Otherwise, MDCR_EL2.HPME is the global enable control for event
411+ * counters reserved for EL2.
412+ */
413+ if (!(vcpu_read_sys_reg (vcpu , MDCR_EL2 ) & MDCR_EL2_HPME ))
414+ reg &= ~kvm_pmu_hyp_counter_mask (vcpu );
386415
387416 return reg ;
388417}
@@ -395,7 +424,7 @@ static void kvm_pmu_update_state(struct kvm_vcpu *vcpu)
395424 if (!kvm_vcpu_has_pmu (vcpu ))
396425 return ;
397426
398- overflow = !! kvm_pmu_overflow_status (vcpu );
427+ overflow = kvm_pmu_overflow_status (vcpu );
399428 if (pmu -> irq_level == overflow )
400429 return ;
401430
0 commit comments