@@ -61,6 +61,8 @@ static struct cppc_workaround_oem_info wa_info[] = {
6161 }
6262};
6363
64+ static struct cpufreq_driver cppc_cpufreq_driver ;
65+
6466#ifdef CONFIG_ACPI_CPPC_CPUFREQ_FIE
6567
6668/* Frequency invariance support */
@@ -75,7 +77,6 @@ struct cppc_freq_invariance {
7577static DEFINE_PER_CPU (struct cppc_freq_invariance , cppc_freq_inv ) ;
7678static struct kthread_worker * kworker_fie ;
7779
78- static struct cpufreq_driver cppc_cpufreq_driver ;
7980static unsigned int hisi_cppc_cpufreq_get_rate (unsigned int cpu );
8081static int cppc_perf_from_fbctrs (struct cppc_cpudata * cpu_data ,
8182 struct cppc_perf_fb_ctrs * fb_ctrs_t0 ,
@@ -440,15 +441,199 @@ static unsigned int cppc_cpufreq_get_transition_delay_us(unsigned int cpu)
440441 }
441442 return cppc_get_transition_latency (cpu ) / NSEC_PER_USEC ;
442443}
443-
444444#else
445-
446445static unsigned int cppc_cpufreq_get_transition_delay_us (unsigned int cpu )
447446{
448447 return cppc_get_transition_latency (cpu ) / NSEC_PER_USEC ;
449448}
450449#endif
451450
451+ #if defined(CONFIG_ARM64 ) && defined(CONFIG_ENERGY_MODEL )
452+
453+ static DEFINE_PER_CPU (unsigned int , efficiency_class ) ;
454+ static void cppc_cpufreq_register_em (struct cpufreq_policy * policy );
455+
456+ /* Create an artificial performance state every CPPC_EM_CAP_STEP capacity unit. */
457+ #define CPPC_EM_CAP_STEP (20)
458+ /* Increase the cost value by CPPC_EM_COST_STEP every performance state. */
459+ #define CPPC_EM_COST_STEP (1)
460+ /* Add a cost gap correspnding to the energy of 4 CPUs. */
461+ #define CPPC_EM_COST_GAP (4 * SCHED_CAPACITY_SCALE * CPPC_EM_COST_STEP \
462+ / CPPC_EM_CAP_STEP)
463+
464+ static unsigned int get_perf_level_count (struct cpufreq_policy * policy )
465+ {
466+ struct cppc_perf_caps * perf_caps ;
467+ unsigned int min_cap , max_cap ;
468+ struct cppc_cpudata * cpu_data ;
469+ int cpu = policy -> cpu ;
470+
471+ cpu_data = policy -> driver_data ;
472+ perf_caps = & cpu_data -> perf_caps ;
473+ max_cap = arch_scale_cpu_capacity (cpu );
474+ min_cap = div_u64 (max_cap * perf_caps -> lowest_perf , perf_caps -> highest_perf );
475+ if ((min_cap == 0 ) || (max_cap < min_cap ))
476+ return 0 ;
477+ return 1 + max_cap / CPPC_EM_CAP_STEP - min_cap / CPPC_EM_CAP_STEP ;
478+ }
479+
480+ /*
481+ * The cost is defined as:
482+ * cost = power * max_frequency / frequency
483+ */
484+ static inline unsigned long compute_cost (int cpu , int step )
485+ {
486+ return CPPC_EM_COST_GAP * per_cpu (efficiency_class , cpu ) +
487+ step * CPPC_EM_COST_STEP ;
488+ }
489+
490+ static int cppc_get_cpu_power (struct device * cpu_dev ,
491+ unsigned long * power , unsigned long * KHz )
492+ {
493+ unsigned long perf_step , perf_prev , perf , perf_check ;
494+ unsigned int min_step , max_step , step , step_check ;
495+ unsigned long prev_freq = * KHz ;
496+ unsigned int min_cap , max_cap ;
497+ struct cpufreq_policy * policy ;
498+
499+ struct cppc_perf_caps * perf_caps ;
500+ struct cppc_cpudata * cpu_data ;
501+
502+ policy = cpufreq_cpu_get_raw (cpu_dev -> id );
503+ cpu_data = policy -> driver_data ;
504+ perf_caps = & cpu_data -> perf_caps ;
505+ max_cap = arch_scale_cpu_capacity (cpu_dev -> id );
506+ min_cap = div_u64 (max_cap * perf_caps -> lowest_perf ,
507+ perf_caps -> highest_perf );
508+
509+ perf_step = CPPC_EM_CAP_STEP * perf_caps -> highest_perf / max_cap ;
510+ min_step = min_cap / CPPC_EM_CAP_STEP ;
511+ max_step = max_cap / CPPC_EM_CAP_STEP ;
512+
513+ perf_prev = cppc_cpufreq_khz_to_perf (cpu_data , * KHz );
514+ step = perf_prev / perf_step ;
515+
516+ if (step > max_step )
517+ return - EINVAL ;
518+
519+ if (min_step == max_step ) {
520+ step = max_step ;
521+ perf = perf_caps -> highest_perf ;
522+ } else if (step < min_step ) {
523+ step = min_step ;
524+ perf = perf_caps -> lowest_perf ;
525+ } else {
526+ step ++ ;
527+ if (step == max_step )
528+ perf = perf_caps -> highest_perf ;
529+ else
530+ perf = step * perf_step ;
531+ }
532+
533+ * KHz = cppc_cpufreq_perf_to_khz (cpu_data , perf );
534+ perf_check = cppc_cpufreq_khz_to_perf (cpu_data , * KHz );
535+ step_check = perf_check / perf_step ;
536+
537+ /*
538+ * To avoid bad integer approximation, check that new frequency value
539+ * increased and that the new frequency will be converted to the
540+ * desired step value.
541+ */
542+ while ((* KHz == prev_freq ) || (step_check != step )) {
543+ perf ++ ;
544+ * KHz = cppc_cpufreq_perf_to_khz (cpu_data , perf );
545+ perf_check = cppc_cpufreq_khz_to_perf (cpu_data , * KHz );
546+ step_check = perf_check / perf_step ;
547+ }
548+
549+ /*
550+ * With an artificial EM, only the cost value is used. Still the power
551+ * is populated such as 0 < power < EM_MAX_POWER. This allows to add
552+ * more sense to the artificial performance states.
553+ */
554+ * power = compute_cost (cpu_dev -> id , step );
555+
556+ return 0 ;
557+ }
558+
559+ static int cppc_get_cpu_cost (struct device * cpu_dev , unsigned long KHz ,
560+ unsigned long * cost )
561+ {
562+ unsigned long perf_step , perf_prev ;
563+ struct cppc_perf_caps * perf_caps ;
564+ struct cpufreq_policy * policy ;
565+ struct cppc_cpudata * cpu_data ;
566+ unsigned int max_cap ;
567+ int step ;
568+
569+ policy = cpufreq_cpu_get_raw (cpu_dev -> id );
570+ cpu_data = policy -> driver_data ;
571+ perf_caps = & cpu_data -> perf_caps ;
572+ max_cap = arch_scale_cpu_capacity (cpu_dev -> id );
573+
574+ perf_prev = cppc_cpufreq_khz_to_perf (cpu_data , KHz );
575+ perf_step = CPPC_EM_CAP_STEP * perf_caps -> highest_perf / max_cap ;
576+ step = perf_prev / perf_step ;
577+
578+ * cost = compute_cost (cpu_dev -> id , step );
579+
580+ return 0 ;
581+ }
582+
583+ static int populate_efficiency_class (void )
584+ {
585+ struct acpi_madt_generic_interrupt * gicc ;
586+ DECLARE_BITMAP (used_classes , 256 ) = {};
587+ int class , cpu , index ;
588+
589+ for_each_possible_cpu (cpu ) {
590+ gicc = acpi_cpu_get_madt_gicc (cpu );
591+ class = gicc -> efficiency_class ;
592+ bitmap_set (used_classes , class , 1 );
593+ }
594+
595+ if (bitmap_weight (used_classes , 256 ) <= 1 ) {
596+ pr_debug ("Efficiency classes are all equal (=%d). "
597+ "No EM registered" , class );
598+ return - EINVAL ;
599+ }
600+
601+ /*
602+ * Squeeze efficiency class values on [0:#efficiency_class-1].
603+ * Values are per spec in [0:255].
604+ */
605+ index = 0 ;
606+ for_each_set_bit (class , used_classes , 256 ) {
607+ for_each_possible_cpu (cpu ) {
608+ gicc = acpi_cpu_get_madt_gicc (cpu );
609+ if (gicc -> efficiency_class == class )
610+ per_cpu (efficiency_class , cpu ) = index ;
611+ }
612+ index ++ ;
613+ }
614+ cppc_cpufreq_driver .register_em = cppc_cpufreq_register_em ;
615+
616+ return 0 ;
617+ }
618+
619+ static void cppc_cpufreq_register_em (struct cpufreq_policy * policy )
620+ {
621+ struct cppc_cpudata * cpu_data ;
622+ struct em_data_callback em_cb =
623+ EM_ADV_DATA_CB (cppc_get_cpu_power , cppc_get_cpu_cost );
624+
625+ cpu_data = policy -> driver_data ;
626+ em_dev_register_perf_domain (get_cpu_device (policy -> cpu ),
627+ get_perf_level_count (policy ), & em_cb ,
628+ cpu_data -> shared_cpu_map , 0 );
629+ }
630+
631+ #else
632+ static int populate_efficiency_class (void )
633+ {
634+ return 0 ;
635+ }
636+ #endif
452637
453638static struct cppc_cpudata * cppc_cpufreq_get_cpu_data (unsigned int cpu )
454639{
@@ -558,6 +743,7 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
558743 }
559744
560745 policy -> fast_switch_possible = cppc_allow_fast_switch ();
746+ policy -> dvfs_possible_from_any_cpu = true;
561747
562748 /*
563749 * If 'highest_perf' is greater than 'nominal_perf', we assume CPU Boost
@@ -766,6 +952,7 @@ static int __init cppc_cpufreq_init(void)
766952
767953 cppc_check_hisi_workaround ();
768954 cppc_freq_invariance_init ();
955+ populate_efficiency_class ();
769956
770957 ret = cpufreq_register_driver (& cppc_cpufreq_driver );
771958 if (ret )
0 commit comments