3636#include <linux/delay.h>
3737#include <linux/uaccess.h>
3838#include <linux/static_call.h>
39+ #include <linux/amd-pstate.h>
3940
4041#include <acpi/processor.h>
4142#include <acpi/cppc_acpi.h>
4647#include <asm/cpu_device_id.h>
4748#include "amd-pstate-trace.h"
4849
49- #define AMD_PSTATE_TRANSITION_LATENCY 0x20000
50- #define AMD_PSTATE_TRANSITION_DELAY 500
50+ #define AMD_PSTATE_TRANSITION_LATENCY 20000
51+ #define AMD_PSTATE_TRANSITION_DELAY 1000
5152
5253/*
5354 * TODO: We need more time to fine tune processors with shared memory solution
5859 * we disable it by default to go acpi-cpufreq on these processors and add a
5960 * module parameter to be able to enable it manually for debugging.
6061 */
61- static bool shared_mem = false;
62- module_param (shared_mem , bool , 0444 );
63- MODULE_PARM_DESC (shared_mem ,
64- "enable amd-pstate on processors with shared memory solution (false = disabled (default), true = enabled)" );
65-
6662static struct cpufreq_driver amd_pstate_driver ;
67-
68- /**
69- * struct amd_aperf_mperf
70- * @aperf: actual performance frequency clock count
71- * @mperf: maximum performance frequency clock count
72- * @tsc: time stamp counter
73- */
74- struct amd_aperf_mperf {
75- u64 aperf ;
76- u64 mperf ;
77- u64 tsc ;
78- };
79-
80- /**
81- * struct amd_cpudata - private CPU data for AMD P-State
82- * @cpu: CPU number
83- * @req: constraint request to apply
84- * @cppc_req_cached: cached performance request hints
85- * @highest_perf: the maximum performance an individual processor may reach,
86- * assuming ideal conditions
87- * @nominal_perf: the maximum sustained performance level of the processor,
88- * assuming ideal operating conditions
89- * @lowest_nonlinear_perf: the lowest performance level at which nonlinear power
90- * savings are achieved
91- * @lowest_perf: the absolute lowest performance level of the processor
92- * @max_freq: the frequency that mapped to highest_perf
93- * @min_freq: the frequency that mapped to lowest_perf
94- * @nominal_freq: the frequency that mapped to nominal_perf
95- * @lowest_nonlinear_freq: the frequency that mapped to lowest_nonlinear_perf
96- * @cur: Difference of Aperf/Mperf/tsc count between last and current sample
97- * @prev: Last Aperf/Mperf/tsc count value read from register
98- * @freq: current cpu frequency value
99- * @boost_supported: check whether the Processor or SBIOS supports boost mode
100- *
101- * The amd_cpudata is key private data for each CPU thread in AMD P-State, and
102- * represents all the attributes and goals that AMD P-State requests at runtime.
103- */
104- struct amd_cpudata {
105- int cpu ;
106-
107- struct freq_qos_request req [2 ];
108- u64 cppc_req_cached ;
109-
110- u32 highest_perf ;
111- u32 nominal_perf ;
112- u32 lowest_nonlinear_perf ;
113- u32 lowest_perf ;
114-
115- u32 max_freq ;
116- u32 min_freq ;
117- u32 nominal_freq ;
118- u32 lowest_nonlinear_freq ;
119-
120- struct amd_aperf_mperf cur ;
121- struct amd_aperf_mperf prev ;
122-
123- u64 freq ;
124- bool boost_supported ;
125- };
63+ static int cppc_load __initdata ;
12664
12765static inline int pstate_enable (bool enable )
12866{
@@ -152,6 +90,7 @@ static inline int amd_pstate_enable(bool enable)
15290static int pstate_init_perf (struct amd_cpudata * cpudata )
15391{
15492 u64 cap1 ;
93+ u32 highest_perf ;
15594
15695 int ret = rdmsrl_safe_on_cpu (cpudata -> cpu , MSR_AMD_CPPC_CAP1 ,
15796 & cap1 );
@@ -163,7 +102,11 @@ static int pstate_init_perf(struct amd_cpudata *cpudata)
163102 *
164103 * CPPC entry doesn't indicate the highest performance in some ASICs.
165104 */
166- WRITE_ONCE (cpudata -> highest_perf , amd_get_highest_perf ());
105+ highest_perf = amd_get_highest_perf ();
106+ if (highest_perf > AMD_CPPC_HIGHEST_PERF (cap1 ))
107+ highest_perf = AMD_CPPC_HIGHEST_PERF (cap1 );
108+
109+ WRITE_ONCE (cpudata -> highest_perf , highest_perf );
167110
168111 WRITE_ONCE (cpudata -> nominal_perf , AMD_CPPC_NOMINAL_PERF (cap1 ));
169112 WRITE_ONCE (cpudata -> lowest_nonlinear_perf , AMD_CPPC_LOWNONLIN_PERF (cap1 ));
@@ -175,12 +118,17 @@ static int pstate_init_perf(struct amd_cpudata *cpudata)
175118static int cppc_init_perf (struct amd_cpudata * cpudata )
176119{
177120 struct cppc_perf_caps cppc_perf ;
121+ u32 highest_perf ;
178122
179123 int ret = cppc_get_perf_caps (cpudata -> cpu , & cppc_perf );
180124 if (ret )
181125 return ret ;
182126
183- WRITE_ONCE (cpudata -> highest_perf , amd_get_highest_perf ());
127+ highest_perf = amd_get_highest_perf ();
128+ if (highest_perf > cppc_perf .highest_perf )
129+ highest_perf = cppc_perf .highest_perf ;
130+
131+ WRITE_ONCE (cpudata -> highest_perf , highest_perf );
184132
185133 WRITE_ONCE (cpudata -> nominal_perf , cppc_perf .nominal_perf );
186134 WRITE_ONCE (cpudata -> lowest_nonlinear_perf ,
@@ -269,6 +217,7 @@ static void amd_pstate_update(struct amd_cpudata *cpudata, u32 min_perf,
269217 u64 prev = READ_ONCE (cpudata -> cppc_req_cached );
270218 u64 value = prev ;
271219
220+ des_perf = clamp_t (unsigned long , des_perf , min_perf , max_perf );
272221 value &= ~AMD_CPPC_MIN_PERF (~0L );
273222 value |= AMD_CPPC_MIN_PERF (min_perf );
274223
@@ -312,7 +261,7 @@ static int amd_pstate_target(struct cpufreq_policy *policy,
312261 return - ENODEV ;
313262
314263 cap_perf = READ_ONCE (cpudata -> highest_perf );
315- min_perf = READ_ONCE (cpudata -> lowest_nonlinear_perf );
264+ min_perf = READ_ONCE (cpudata -> lowest_perf );
316265 max_perf = cap_perf ;
317266
318267 freqs .old = policy -> cur ;
@@ -357,8 +306,6 @@ static void amd_pstate_adjust_perf(unsigned int cpu,
357306 if (max_perf < min_perf )
358307 max_perf = min_perf ;
359308
360- des_perf = clamp_t (unsigned long , des_perf , min_perf , max_perf );
361-
362309 amd_pstate_update (cpudata , min_perf , des_perf , max_perf , true);
363310}
364311
@@ -473,12 +420,22 @@ static void amd_pstate_boost_init(struct amd_cpudata *cpudata)
473420 amd_pstate_driver .boost_enabled = true;
474421}
475422
423+ static void amd_perf_ctl_reset (unsigned int cpu )
424+ {
425+ wrmsrl_on_cpu (cpu , MSR_AMD_PERF_CTL , 0 );
426+ }
427+
476428static int amd_pstate_cpu_init (struct cpufreq_policy * policy )
477429{
478430 int min_freq , max_freq , nominal_freq , lowest_nonlinear_freq , ret ;
479431 struct device * dev ;
480432 struct amd_cpudata * cpudata ;
481433
434+ /*
435+ * Resetting PERF_CTL_MSR will put the CPU in P0 frequency,
436+ * which is ideal for initialization process.
437+ */
438+ amd_perf_ctl_reset (policy -> cpu );
482439 dev = get_cpu_device (policy -> cpu );
483440 if (!dev )
484441 return - ENODEV ;
@@ -555,9 +512,7 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
555512
556513static int amd_pstate_cpu_exit (struct cpufreq_policy * policy )
557514{
558- struct amd_cpudata * cpudata ;
559-
560- cpudata = policy -> driver_data ;
515+ struct amd_cpudata * cpudata = policy -> driver_data ;
561516
562517 freq_qos_remove_request (& cpudata -> req [1 ]);
563518 freq_qos_remove_request (& cpudata -> req [0 ]);
@@ -566,6 +521,28 @@ static int amd_pstate_cpu_exit(struct cpufreq_policy *policy)
566521 return 0 ;
567522}
568523
524+ static int amd_pstate_cpu_resume (struct cpufreq_policy * policy )
525+ {
526+ int ret ;
527+
528+ ret = amd_pstate_enable (true);
529+ if (ret )
530+ pr_err ("failed to enable amd-pstate during resume, return %d\n" , ret );
531+
532+ return ret ;
533+ }
534+
535+ static int amd_pstate_cpu_suspend (struct cpufreq_policy * policy )
536+ {
537+ int ret ;
538+
539+ ret = amd_pstate_enable (false);
540+ if (ret )
541+ pr_err ("failed to disable amd-pstate during suspend, return %d\n" , ret );
542+
543+ return ret ;
544+ }
545+
569546/* Sysfs attributes */
570547
571548/*
@@ -577,9 +554,7 @@ static ssize_t show_amd_pstate_max_freq(struct cpufreq_policy *policy,
577554 char * buf )
578555{
579556 int max_freq ;
580- struct amd_cpudata * cpudata ;
581-
582- cpudata = policy -> driver_data ;
557+ struct amd_cpudata * cpudata = policy -> driver_data ;
583558
584559 max_freq = amd_get_max_freq (cpudata );
585560 if (max_freq < 0 )
@@ -592,9 +567,7 @@ static ssize_t show_amd_pstate_lowest_nonlinear_freq(struct cpufreq_policy *poli
592567 char * buf )
593568{
594569 int freq ;
595- struct amd_cpudata * cpudata ;
596-
597- cpudata = policy -> driver_data ;
570+ struct amd_cpudata * cpudata = policy -> driver_data ;
598571
599572 freq = amd_get_lowest_nonlinear_freq (cpudata );
600573 if (freq < 0 )
@@ -636,9 +609,11 @@ static struct cpufreq_driver amd_pstate_driver = {
636609 .target = amd_pstate_target ,
637610 .init = amd_pstate_cpu_init ,
638611 .exit = amd_pstate_cpu_exit ,
612+ .suspend = amd_pstate_cpu_suspend ,
613+ .resume = amd_pstate_cpu_resume ,
639614 .set_boost = amd_pstate_set_boost ,
640615 .name = "amd-pstate" ,
641- .attr = amd_pstate_attr ,
616+ .attr = amd_pstate_attr ,
642617};
643618
644619static int __init amd_pstate_init (void )
@@ -647,9 +622,18 @@ static int __init amd_pstate_init(void)
647622
648623 if (boot_cpu_data .x86_vendor != X86_VENDOR_AMD )
649624 return - ENODEV ;
625+ /*
626+ * by default the pstate driver is disabled to load
627+ * enable the amd_pstate passive mode driver explicitly
628+ * with amd_pstate=passive in kernel command line
629+ */
630+ if (!cppc_load ) {
631+ pr_debug ("driver load is disabled, boot with amd_pstate=passive to enable this\n" );
632+ return - ENODEV ;
633+ }
650634
651635 if (!acpi_cpc_valid ()) {
652- pr_debug ("the _CPC object is not present in SBIOS\n" );
636+ pr_warn_once ("the _CPC object is not present in SBIOS or ACPI disabled \n" );
653637 return - ENODEV ;
654638 }
655639
@@ -661,13 +645,11 @@ static int __init amd_pstate_init(void)
661645 if (boot_cpu_has (X86_FEATURE_CPPC )) {
662646 pr_debug ("AMD CPPC MSR based functionality is supported\n" );
663647 amd_pstate_driver .adjust_perf = amd_pstate_adjust_perf ;
664- } else if (shared_mem ) {
648+ } else {
649+ pr_debug ("AMD CPPC shared memory based functionality is supported\n" );
665650 static_call_update (amd_pstate_enable , cppc_enable );
666651 static_call_update (amd_pstate_init_perf , cppc_init_perf );
667652 static_call_update (amd_pstate_update_perf , cppc_update_perf );
668- } else {
669- pr_info ("This processor supports shared memory solution, you can enable it with amd_pstate.shared_mem=1\n" );
670- return - ENODEV ;
671653 }
672654
673655 /* enable amd pstate feature */
@@ -684,16 +666,22 @@ static int __init amd_pstate_init(void)
684666
685667 return ret ;
686668}
669+ device_initcall (amd_pstate_init );
687670
688- static void __exit amd_pstate_exit ( void )
671+ static int __init amd_pstate_param ( char * str )
689672{
690- cpufreq_unregister_driver (& amd_pstate_driver );
673+ if (!str )
674+ return - EINVAL ;
691675
692- amd_pstate_enable (false);
693- }
676+ if (!strcmp (str , "disable" )) {
677+ cppc_load = 0 ;
678+ pr_info ("driver is explicitly disabled\n" );
679+ } else if (!strcmp (str , "passive" ))
680+ cppc_load = 1 ;
694681
695- module_init (amd_pstate_init );
696- module_exit (amd_pstate_exit );
682+ return 0 ;
683+ }
684+ early_param ("amd_pstate" , amd_pstate_param );
697685
698686MODULE_AUTHOR ("Huang Rui <ray.huang@amd.com>" );
699687MODULE_DESCRIPTION ("AMD Processor P-state Frequency Driver" );
0 commit comments