@@ -694,45 +694,23 @@ void kvm_host_pmu_init(struct arm_pmu *pmu)
694694
695695static struct arm_pmu * kvm_pmu_probe_armpmu (void )
696696{
697- struct perf_event_attr attr = { };
698- struct perf_event * event ;
699- struct arm_pmu * pmu = NULL ;
700-
701- /*
702- * Create a dummy event that only counts user cycles. As we'll never
703- * leave this function with the event being live, it will never
704- * count anything. But it allows us to probe some of the PMU
705- * details. Yes, this is terrible.
706- */
707- attr .type = PERF_TYPE_RAW ;
708- attr .size = sizeof (attr );
709- attr .pinned = 1 ;
710- attr .disabled = 0 ;
711- attr .exclude_user = 0 ;
712- attr .exclude_kernel = 1 ;
713- attr .exclude_hv = 1 ;
714- attr .exclude_host = 1 ;
715- attr .config = ARMV8_PMUV3_PERFCTR_CPU_CYCLES ;
716- attr .sample_period = GENMASK (63 , 0 );
697+ struct arm_pmu * tmp , * pmu = NULL ;
698+ struct arm_pmu_entry * entry ;
699+ int cpu ;
717700
718- event = perf_event_create_kernel_counter (& attr , -1 , current ,
719- kvm_pmu_perf_overflow , & attr );
701+ mutex_lock (& arm_pmus_lock );
720702
721- if (IS_ERR (event )) {
722- pr_err_once ("kvm: pmu event creation failed %ld\n" ,
723- PTR_ERR (event ));
724- return NULL ;
725- }
703+ cpu = smp_processor_id ();
704+ list_for_each_entry (entry , & arm_pmus , entry ) {
705+ tmp = entry -> arm_pmu ;
726706
727- if (event -> pmu ) {
728- pmu = to_arm_pmu (event -> pmu );
729- if (pmu -> pmuver == ID_AA64DFR0_EL1_PMUVer_NI ||
730- pmu -> pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF )
731- pmu = NULL ;
707+ if (cpumask_test_cpu (cpu , & tmp -> supported_cpus )) {
708+ pmu = tmp ;
709+ break ;
710+ }
732711 }
733712
734- perf_event_disable (event );
735- perf_event_release_kernel (event );
713+ mutex_unlock (& arm_pmus_lock );
736714
737715 return pmu ;
738716}
@@ -912,7 +890,17 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
912890 return - EBUSY ;
913891
914892 if (!kvm -> arch .arm_pmu ) {
915- /* No PMU set, get the default one */
893+ /*
894+ * No PMU set, get the default one.
895+ *
896+ * The observant among you will notice that the supported_cpus
897+ * mask does not get updated for the default PMU even though it
898+ * is quite possible the selected instance supports only a
899+ * subset of cores in the system. This is intentional, and
900+ * upholds the preexisting behavior on heterogeneous systems
901+ * where vCPUs can be scheduled on any core but the guest
902+ * counters could stop working.
903+ */
916904 kvm -> arch .arm_pmu = kvm_pmu_probe_armpmu ();
917905 if (!kvm -> arch .arm_pmu )
918906 return - ENODEV ;
0 commit comments