3333static uint8_t kvm_pmu_version ;
3434static bool kvm_has_perf_caps ;
3535
36+ #define X86_PMU_FEATURE_NULL \
37+ ({ \
38+ struct kvm_x86_pmu_feature feature = {}; \
39+ \
40+ feature; \
41+ })
42+
43+ static bool pmu_is_null_feature (struct kvm_x86_pmu_feature event )
44+ {
45+ return !(* (u64 * )& event );
46+ }
47+
48+ struct kvm_intel_pmu_event {
49+ struct kvm_x86_pmu_feature gp_event ;
50+ struct kvm_x86_pmu_feature fixed_event ;
51+ };
52+
53+ /*
54+ * Wrap the array to appease the compiler, as the macros used to construct each
55+ * kvm_x86_pmu_feature use syntax that's only valid in function scope, and the
56+ * compiler often thinks the feature definitions aren't compile-time constants.
57+ */
58+ static struct kvm_intel_pmu_event intel_event_to_feature (uint8_t idx )
59+ {
60+ const struct kvm_intel_pmu_event __intel_event_to_feature [] = {
61+ [INTEL_ARCH_CPU_CYCLES_INDEX ] = { X86_PMU_FEATURE_CPU_CYCLES , X86_PMU_FEATURE_CPU_CYCLES_FIXED },
62+ [INTEL_ARCH_INSTRUCTIONS_RETIRED_INDEX ] = { X86_PMU_FEATURE_INSNS_RETIRED , X86_PMU_FEATURE_INSNS_RETIRED_FIXED },
63+ /*
64+ * Note, the fixed counter for reference cycles is NOT the same as the
65+ * general purpose architectural event. The fixed counter explicitly
66+ * counts at the same frequency as the TSC, whereas the GP event counts
67+ * at a fixed, but uarch specific, frequency. Bundle them here for
68+ * simplicity.
69+ */
70+ [INTEL_ARCH_REFERENCE_CYCLES_INDEX ] = { X86_PMU_FEATURE_REFERENCE_CYCLES , X86_PMU_FEATURE_REFERENCE_TSC_CYCLES_FIXED },
71+ [INTEL_ARCH_LLC_REFERENCES_INDEX ] = { X86_PMU_FEATURE_LLC_REFERENCES , X86_PMU_FEATURE_NULL },
72+ [INTEL_ARCH_LLC_MISSES_INDEX ] = { X86_PMU_FEATURE_LLC_MISSES , X86_PMU_FEATURE_NULL },
73+ [INTEL_ARCH_BRANCHES_RETIRED_INDEX ] = { X86_PMU_FEATURE_BRANCH_INSNS_RETIRED , X86_PMU_FEATURE_NULL },
74+ [INTEL_ARCH_BRANCHES_MISPREDICTED_INDEX ] = { X86_PMU_FEATURE_BRANCHES_MISPREDICTED , X86_PMU_FEATURE_NULL },
75+ [INTEL_ARCH_TOPDOWN_SLOTS_INDEX ] = { X86_PMU_FEATURE_TOPDOWN_SLOTS , X86_PMU_FEATURE_TOPDOWN_SLOTS_FIXED },
76+ };
77+
78+ kvm_static_assert (ARRAY_SIZE (__intel_event_to_feature ) == NR_INTEL_ARCH_EVENTS );
79+
80+ return __intel_event_to_feature [idx ];
81+ }
82+
3683static struct kvm_vm * pmu_vm_create_with_one_vcpu (struct kvm_vcpu * * vcpu ,
3784 void * guest_code ,
3885 uint8_t pmu_version ,
@@ -197,41 +244,8 @@ static void __guest_test_arch_event(uint8_t idx, struct kvm_x86_pmu_feature even
197244 GUEST_TEST_EVENT (idx , event , pmc , pmc_msr , ctrl_msr , ctrl_msr_value , KVM_FEP );
198245}
199246
200- #define X86_PMU_FEATURE_NULL \
201- ({ \
202- struct kvm_x86_pmu_feature feature = {}; \
203- \
204- feature; \
205- })
206-
207- static bool pmu_is_null_feature (struct kvm_x86_pmu_feature event )
208- {
209- return !(* (u64 * )& event );
210- }
211-
212247static void guest_test_arch_event (uint8_t idx )
213248{
214- const struct {
215- struct kvm_x86_pmu_feature gp_event ;
216- struct kvm_x86_pmu_feature fixed_event ;
217- } intel_event_to_feature [] = {
218- [INTEL_ARCH_CPU_CYCLES_INDEX ] = { X86_PMU_FEATURE_CPU_CYCLES , X86_PMU_FEATURE_CPU_CYCLES_FIXED },
219- [INTEL_ARCH_INSTRUCTIONS_RETIRED_INDEX ] = { X86_PMU_FEATURE_INSNS_RETIRED , X86_PMU_FEATURE_INSNS_RETIRED_FIXED },
220- /*
221- * Note, the fixed counter for reference cycles is NOT the same
222- * as the general purpose architectural event. The fixed counter
223- * explicitly counts at the same frequency as the TSC, whereas
224- * the GP event counts at a fixed, but uarch specific, frequency.
225- * Bundle them here for simplicity.
226- */
227- [INTEL_ARCH_REFERENCE_CYCLES_INDEX ] = { X86_PMU_FEATURE_REFERENCE_CYCLES , X86_PMU_FEATURE_REFERENCE_TSC_CYCLES_FIXED },
228- [INTEL_ARCH_LLC_REFERENCES_INDEX ] = { X86_PMU_FEATURE_LLC_REFERENCES , X86_PMU_FEATURE_NULL },
229- [INTEL_ARCH_LLC_MISSES_INDEX ] = { X86_PMU_FEATURE_LLC_MISSES , X86_PMU_FEATURE_NULL },
230- [INTEL_ARCH_BRANCHES_RETIRED_INDEX ] = { X86_PMU_FEATURE_BRANCH_INSNS_RETIRED , X86_PMU_FEATURE_NULL },
231- [INTEL_ARCH_BRANCHES_MISPREDICTED_INDEX ] = { X86_PMU_FEATURE_BRANCHES_MISPREDICTED , X86_PMU_FEATURE_NULL },
232- [INTEL_ARCH_TOPDOWN_SLOTS_INDEX ] = { X86_PMU_FEATURE_TOPDOWN_SLOTS , X86_PMU_FEATURE_TOPDOWN_SLOTS_FIXED },
233- };
234-
235249 uint32_t nr_gp_counters = this_cpu_property (X86_PROPERTY_PMU_NR_GP_COUNTERS );
236250 uint32_t pmu_version = guest_get_pmu_version ();
237251 /* PERF_GLOBAL_CTRL exists only for Architectural PMU Version 2+. */
@@ -249,7 +263,7 @@ static void guest_test_arch_event(uint8_t idx)
249263 else
250264 base_pmc_msr = MSR_IA32_PERFCTR0 ;
251265
252- gp_event = intel_event_to_feature [ idx ] .gp_event ;
266+ gp_event = intel_event_to_feature ( idx ) .gp_event ;
253267 GUEST_ASSERT_EQ (idx , gp_event .f .bit );
254268
255269 GUEST_ASSERT (nr_gp_counters );
@@ -270,7 +284,7 @@ static void guest_test_arch_event(uint8_t idx)
270284 if (!guest_has_perf_global_ctrl )
271285 return ;
272286
273- fixed_event = intel_event_to_feature [ idx ] .fixed_event ;
287+ fixed_event = intel_event_to_feature ( idx ) .fixed_event ;
274288 if (pmu_is_null_feature (fixed_event ) || !this_pmu_has (fixed_event ))
275289 return ;
276290
0 commit comments