1717#include <linux/cpufreq.h>
1818#include <linux/init.h>
1919#include <linux/percpu.h>
20+ #include <linux/sched/isolation.h>
2021
2122#include <asm/cpu.h>
2223#include <asm/cputype.h>
@@ -88,18 +89,28 @@ int __init parse_acpi_topology(void)
8889 * initialized.
8990 */
9091static DEFINE_PER_CPU_READ_MOSTLY (unsigned long, arch_max_freq_scale ) = 1UL << (2 * SCHED_CAPACITY_SHIFT );
91- static DEFINE_PER_CPU (u64 , arch_const_cycles_prev ) ;
92- static DEFINE_PER_CPU (u64 , arch_core_cycles_prev ) ;
9392static cpumask_var_t amu_fie_cpus ;
9493
94+ struct amu_cntr_sample {
95+ u64 arch_const_cycles_prev ;
96+ u64 arch_core_cycles_prev ;
97+ unsigned long last_scale_update ;
98+ };
99+
100+ static DEFINE_PER_CPU_SHARED_ALIGNED (struct amu_cntr_sample , cpu_amu_samples ) ;
101+
95102void update_freq_counters_refs (void )
96103{
97- this_cpu_write (arch_core_cycles_prev , read_corecnt ());
98- this_cpu_write (arch_const_cycles_prev , read_constcnt ());
104+ struct amu_cntr_sample * amu_sample = this_cpu_ptr (& cpu_amu_samples );
105+
106+ amu_sample -> arch_core_cycles_prev = read_corecnt ();
107+ amu_sample -> arch_const_cycles_prev = read_constcnt ();
99108}
100109
101110static inline bool freq_counters_valid (int cpu )
102111{
112+ struct amu_cntr_sample * amu_sample = per_cpu_ptr (& cpu_amu_samples , cpu );
113+
103114 if ((cpu >= nr_cpu_ids ) || !cpumask_test_cpu (cpu , cpu_present_mask ))
104115 return false;
105116
@@ -108,8 +119,8 @@ static inline bool freq_counters_valid(int cpu)
108119 return false;
109120 }
110121
111- if (unlikely (!per_cpu ( arch_const_cycles_prev , cpu ) ||
112- !per_cpu ( arch_core_cycles_prev , cpu ) )) {
122+ if (unlikely (!amu_sample -> arch_const_cycles_prev ||
123+ !amu_sample -> arch_core_cycles_prev )) {
113124 pr_debug ("CPU%d: cycle counters are not enabled.\n" , cpu );
114125 return false;
115126 }
@@ -152,17 +163,22 @@ void freq_inv_set_max_ratio(int cpu, u64 max_rate)
152163
153164static void amu_scale_freq_tick (void )
154165{
166+ struct amu_cntr_sample * amu_sample = this_cpu_ptr (& cpu_amu_samples );
155167 u64 prev_core_cnt , prev_const_cnt ;
156168 u64 core_cnt , const_cnt , scale ;
157169
158- prev_const_cnt = this_cpu_read ( arch_const_cycles_prev ) ;
159- prev_core_cnt = this_cpu_read ( arch_core_cycles_prev ) ;
170+ prev_const_cnt = amu_sample -> arch_const_cycles_prev ;
171+ prev_core_cnt = amu_sample -> arch_core_cycles_prev ;
160172
161173 update_freq_counters_refs ();
162174
163- const_cnt = this_cpu_read ( arch_const_cycles_prev ) ;
164- core_cnt = this_cpu_read ( arch_core_cycles_prev ) ;
175+ const_cnt = amu_sample -> arch_const_cycles_prev ;
176+ core_cnt = amu_sample -> arch_core_cycles_prev ;
165177
178+ /*
179+ * This should not happen unless the AMUs have been reset and the
180+ * counter values have not been restored - unlikely
181+ */
166182 if (unlikely (core_cnt <= prev_core_cnt ||
167183 const_cnt <= prev_const_cnt ))
168184 return ;
@@ -182,13 +198,88 @@ static void amu_scale_freq_tick(void)
182198
183199 scale = min_t (unsigned long , scale , SCHED_CAPACITY_SCALE );
184200 this_cpu_write (arch_freq_scale , (unsigned long )scale );
201+
202+ amu_sample -> last_scale_update = jiffies ;
185203}
186204
187205static struct scale_freq_data amu_sfd = {
188206 .source = SCALE_FREQ_SOURCE_ARCH ,
189207 .set_freq_scale = amu_scale_freq_tick ,
190208};
191209
210+ static __always_inline bool amu_fie_cpu_supported (unsigned int cpu )
211+ {
212+ return cpumask_available (amu_fie_cpus ) &&
213+ cpumask_test_cpu (cpu , amu_fie_cpus );
214+ }
215+
216+ #define AMU_SAMPLE_EXP_MS 20
217+
218+ int arch_freq_get_on_cpu (int cpu )
219+ {
220+ struct amu_cntr_sample * amu_sample ;
221+ unsigned int start_cpu = cpu ;
222+ unsigned long last_update ;
223+ unsigned int freq = 0 ;
224+ u64 scale ;
225+
226+ if (!amu_fie_cpu_supported (cpu ) || !arch_scale_freq_ref (cpu ))
227+ return - EOPNOTSUPP ;
228+
229+ while (1 ) {
230+
231+ amu_sample = per_cpu_ptr (& cpu_amu_samples , cpu );
232+
233+ last_update = amu_sample -> last_scale_update ;
234+
235+ /*
236+ * For those CPUs that are in full dynticks mode, or those that have
237+ * not seen tick for a while, try an alternative source for the counters
238+ * (and thus freq scale), if available, for given policy: this boils
239+ * down to identifying an active cpu within the same freq domain, if any.
240+ */
241+ if (!housekeeping_cpu (cpu , HK_TYPE_TICK ) ||
242+ time_is_before_jiffies (last_update + msecs_to_jiffies (AMU_SAMPLE_EXP_MS ))) {
243+ struct cpufreq_policy * policy = cpufreq_cpu_get (cpu );
244+ int ref_cpu = cpu ;
245+
246+ if (!policy )
247+ return - EINVAL ;
248+
249+ if (!cpumask_intersects (policy -> related_cpus ,
250+ housekeeping_cpumask (HK_TYPE_TICK ))) {
251+ cpufreq_cpu_put (policy );
252+ return - EOPNOTSUPP ;
253+ }
254+
255+ do {
256+ ref_cpu = cpumask_next_wrap (ref_cpu , policy -> cpus ,
257+ start_cpu , true);
258+
259+ } while (ref_cpu < nr_cpu_ids && idle_cpu (ref_cpu ));
260+
261+ cpufreq_cpu_put (policy );
262+
263+ if (ref_cpu >= nr_cpu_ids )
264+ /* No alternative to pull info from */
265+ return - EAGAIN ;
266+
267+ cpu = ref_cpu ;
268+ } else {
269+ break ;
270+ }
271+ }
272+ /*
273+ * Reversed computation to the one used to determine
274+ * the arch_freq_scale value
275+ * (see amu_scale_freq_tick for details)
276+ */
277+ scale = arch_scale_freq_capacity (cpu );
278+ freq = scale * arch_scale_freq_ref (cpu );
279+ freq >>= SCHED_CAPACITY_SHIFT ;
280+ return freq ;
281+ }
282+
192283static void amu_fie_setup (const struct cpumask * cpus )
193284{
194285 int cpu ;
0 commit comments