@@ -36,33 +36,15 @@ static LIST_HEAD(cpu_data_list);
3636
3737static bool boost_supported ;
3838
39- struct cppc_workaround_oem_info {
40- char oem_id [ACPI_OEM_ID_SIZE + 1 ];
41- char oem_table_id [ACPI_OEM_TABLE_ID_SIZE + 1 ];
42- u32 oem_revision ;
43- };
44-
45- static struct cppc_workaround_oem_info wa_info [] = {
46- {
47- .oem_id = "HISI " ,
48- .oem_table_id = "HIP07 " ,
49- .oem_revision = 0 ,
50- }, {
51- .oem_id = "HISI " ,
52- .oem_table_id = "HIP08 " ,
53- .oem_revision = 0 ,
54- }
55- };
56-
5739static struct cpufreq_driver cppc_cpufreq_driver ;
5840
41+ #ifdef CONFIG_ACPI_CPPC_CPUFREQ_FIE
5942static enum {
6043 FIE_UNSET = -1 ,
6144 FIE_ENABLED ,
6245 FIE_DISABLED
6346} fie_disabled = FIE_UNSET ;
6447
65- #ifdef CONFIG_ACPI_CPPC_CPUFREQ_FIE
6648module_param (fie_disabled , int , 0444 );
6749MODULE_PARM_DESC (fie_disabled , "Disable Frequency Invariance Engine (FIE)" );
6850
@@ -78,7 +60,6 @@ struct cppc_freq_invariance {
7860static DEFINE_PER_CPU (struct cppc_freq_invariance , cppc_freq_inv ) ;
7961static struct kthread_worker * kworker_fie ;
8062
81- static unsigned int hisi_cppc_cpufreq_get_rate (unsigned int cpu );
8263static int cppc_perf_from_fbctrs (struct cppc_cpudata * cpu_data ,
8364 struct cppc_perf_fb_ctrs * fb_ctrs_t0 ,
8465 struct cppc_perf_fb_ctrs * fb_ctrs_t1 );
@@ -118,6 +99,9 @@ static void cppc_scale_freq_workfn(struct kthread_work *work)
11899
119100 perf = cppc_perf_from_fbctrs (cpu_data , & cppc_fi -> prev_perf_fb_ctrs ,
120101 & fb_ctrs );
102+ if (!perf )
103+ return ;
104+
121105 cppc_fi -> prev_perf_fb_ctrs = fb_ctrs ;
122106
123107 perf <<= SCHED_CAPACITY_SHIFT ;
@@ -420,6 +404,9 @@ static int cppc_get_cpu_power(struct device *cpu_dev,
420404 struct cppc_cpudata * cpu_data ;
421405
422406 policy = cpufreq_cpu_get_raw (cpu_dev -> id );
407+ if (!policy )
408+ return - EINVAL ;
409+
423410 cpu_data = policy -> driver_data ;
424411 perf_caps = & cpu_data -> perf_caps ;
425412 max_cap = arch_scale_cpu_capacity (cpu_dev -> id );
@@ -487,6 +474,9 @@ static int cppc_get_cpu_cost(struct device *cpu_dev, unsigned long KHz,
487474 int step ;
488475
489476 policy = cpufreq_cpu_get_raw (cpu_dev -> id );
477+ if (!policy )
478+ return - EINVAL ;
479+
490480 cpu_data = policy -> driver_data ;
491481 perf_caps = & cpu_data -> perf_caps ;
492482 max_cap = arch_scale_cpu_capacity (cpu_dev -> id );
@@ -724,13 +714,31 @@ static int cppc_perf_from_fbctrs(struct cppc_cpudata *cpu_data,
724714 delta_delivered = get_delta (fb_ctrs_t1 -> delivered ,
725715 fb_ctrs_t0 -> delivered );
726716
727- /* Check to avoid divide-by zero and invalid delivered_perf */
717+ /*
718+ * Avoid divide-by zero and unchanged feedback counters.
719+ * Leave it for callers to handle.
720+ */
728721 if (!delta_reference || !delta_delivered )
729- return cpu_data -> perf_ctrls . desired_perf ;
722+ return 0 ;
730723
731724 return (reference_perf * delta_delivered ) / delta_reference ;
732725}
733726
727+ static int cppc_get_perf_ctrs_sample (int cpu ,
728+ struct cppc_perf_fb_ctrs * fb_ctrs_t0 ,
729+ struct cppc_perf_fb_ctrs * fb_ctrs_t1 )
730+ {
731+ int ret ;
732+
733+ ret = cppc_get_perf_ctrs (cpu , fb_ctrs_t0 );
734+ if (ret )
735+ return ret ;
736+
737+ udelay (2 ); /* 2usec delay between sampling */
738+
739+ return cppc_get_perf_ctrs (cpu , fb_ctrs_t1 );
740+ }
741+
734742static unsigned int cppc_cpufreq_get_rate (unsigned int cpu )
735743{
736744 struct cppc_perf_fb_ctrs fb_ctrs_t0 = {0 }, fb_ctrs_t1 = {0 };
@@ -746,18 +754,32 @@ static unsigned int cppc_cpufreq_get_rate(unsigned int cpu)
746754
747755 cpufreq_cpu_put (policy );
748756
749- ret = cppc_get_perf_ctrs (cpu , & fb_ctrs_t0 );
750- if (ret )
751- return 0 ;
752-
753- udelay (2 ); /* 2usec delay between sampling */
754-
755- ret = cppc_get_perf_ctrs (cpu , & fb_ctrs_t1 );
756- if (ret )
757- return 0 ;
757+ ret = cppc_get_perf_ctrs_sample (cpu , & fb_ctrs_t0 , & fb_ctrs_t1 );
758+ if (ret ) {
759+ if (ret == - EFAULT )
760+ /* Any of the associated CPPC regs is 0. */
761+ goto out_invalid_counters ;
762+ else
763+ return 0 ;
764+ }
758765
759766 delivered_perf = cppc_perf_from_fbctrs (cpu_data , & fb_ctrs_t0 ,
760767 & fb_ctrs_t1 );
768+ if (!delivered_perf )
769+ goto out_invalid_counters ;
770+
771+ return cppc_perf_to_khz (& cpu_data -> perf_caps , delivered_perf );
772+
773+ out_invalid_counters :
774+ /*
775+ * Feedback counters could be unchanged or 0 when a cpu enters a
776+ * low-power idle state, e.g. clock-gated or power-gated.
777+ * Use desired perf for reflecting frequency. Get the latest register
778+ * value first as some platforms may update the actual delivered perf
779+ * there; if failed, resort to the cached desired perf.
780+ */
781+ if (cppc_get_desired_perf (cpu , & delivered_perf ))
782+ delivered_perf = cpu_data -> perf_ctrls .desired_perf ;
761783
762784 return cppc_perf_to_khz (& cpu_data -> perf_caps , delivered_perf );
763785}
@@ -812,65 +834,13 @@ static struct cpufreq_driver cppc_cpufreq_driver = {
812834 .name = "cppc_cpufreq" ,
813835};
814836
815- /*
816- * HISI platform does not support delivered performance counter and
817- * reference performance counter. It can calculate the performance using the
818- * platform specific mechanism. We reuse the desired performance register to
819- * store the real performance calculated by the platform.
820- */
821- static unsigned int hisi_cppc_cpufreq_get_rate (unsigned int cpu )
822- {
823- struct cpufreq_policy * policy = cpufreq_cpu_get (cpu );
824- struct cppc_cpudata * cpu_data ;
825- u64 desired_perf ;
826- int ret ;
827-
828- if (!policy )
829- return - ENODEV ;
830-
831- cpu_data = policy -> driver_data ;
832-
833- cpufreq_cpu_put (policy );
834-
835- ret = cppc_get_desired_perf (cpu , & desired_perf );
836- if (ret < 0 )
837- return - EIO ;
838-
839- return cppc_perf_to_khz (& cpu_data -> perf_caps , desired_perf );
840- }
841-
842- static void cppc_check_hisi_workaround (void )
843- {
844- struct acpi_table_header * tbl ;
845- acpi_status status = AE_OK ;
846- int i ;
847-
848- status = acpi_get_table (ACPI_SIG_PCCT , 0 , & tbl );
849- if (ACPI_FAILURE (status ) || !tbl )
850- return ;
851-
852- for (i = 0 ; i < ARRAY_SIZE (wa_info ); i ++ ) {
853- if (!memcmp (wa_info [i ].oem_id , tbl -> oem_id , ACPI_OEM_ID_SIZE ) &&
854- !memcmp (wa_info [i ].oem_table_id , tbl -> oem_table_id , ACPI_OEM_TABLE_ID_SIZE ) &&
855- wa_info [i ].oem_revision == tbl -> oem_revision ) {
856- /* Overwrite the get() callback */
857- cppc_cpufreq_driver .get = hisi_cppc_cpufreq_get_rate ;
858- fie_disabled = FIE_DISABLED ;
859- break ;
860- }
861- }
862-
863- acpi_put_table (tbl );
864- }
865-
866837static int __init cppc_cpufreq_init (void )
867838{
868839 int ret ;
869840
870841 if (!acpi_cpc_valid ())
871842 return - ENODEV ;
872843
873- cppc_check_hisi_workaround ();
874844 cppc_freq_invariance_init ();
875845 populate_efficiency_class ();
876846
0 commit comments