|
14 | 14 | #include <asm/kvm_asm.h> |
15 | 15 | #include <asm/smp_plat.h> |
16 | 16 |
|
| 17 | +static u64 target_impl_cpu_num; |
| 18 | +static struct target_impl_cpu *target_impl_cpus; |
| 19 | + |
| 20 | +bool cpu_errata_set_target_impl(u64 num, void *impl_cpus) |
| 21 | +{ |
| 22 | + if (target_impl_cpu_num || !num || !impl_cpus) |
| 23 | + return false; |
| 24 | + |
| 25 | + target_impl_cpu_num = num; |
| 26 | + target_impl_cpus = impl_cpus; |
| 27 | + return true; |
| 28 | +} |
| 29 | + |
| 30 | +static inline bool is_midr_in_range(struct midr_range const *range) |
| 31 | +{ |
| 32 | + int i; |
| 33 | + |
| 34 | + if (!target_impl_cpu_num) |
| 35 | + return midr_is_cpu_model_range(read_cpuid_id(), range->model, |
| 36 | + range->rv_min, range->rv_max); |
| 37 | + |
| 38 | + for (i = 0; i < target_impl_cpu_num; i++) { |
| 39 | + if (midr_is_cpu_model_range(target_impl_cpus[i].midr, |
| 40 | + range->model, |
| 41 | + range->rv_min, range->rv_max)) |
| 42 | + return true; |
| 43 | + } |
| 44 | + return false; |
| 45 | +} |
| 46 | + |
| 47 | +bool is_midr_in_range_list(struct midr_range const *ranges) |
| 48 | +{ |
| 49 | + while (ranges->model) |
| 50 | + if (is_midr_in_range(ranges++)) |
| 51 | + return true; |
| 52 | + return false; |
| 53 | +} |
| 54 | +EXPORT_SYMBOL_GPL(is_midr_in_range_list); |
| 55 | + |
17 | 56 | static bool __maybe_unused |
18 | | -is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope) |
| 57 | +__is_affected_midr_range(const struct arm64_cpu_capabilities *entry, |
| 58 | + u32 midr, u32 revidr) |
19 | 59 | { |
20 | 60 | const struct arm64_midr_revidr *fix; |
21 | | - u32 midr = read_cpuid_id(), revidr; |
22 | | - |
23 | | - WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); |
24 | | - if (!is_midr_in_range(midr, &entry->midr_range)) |
| 61 | + if (!is_midr_in_range(&entry->midr_range)) |
25 | 62 | return false; |
26 | 63 |
|
27 | 64 | midr &= MIDR_REVISION_MASK | MIDR_VARIANT_MASK; |
28 | | - revidr = read_cpuid(REVIDR_EL1); |
29 | 65 | for (fix = entry->fixed_revs; fix && fix->revidr_mask; fix++) |
30 | 66 | if (midr == fix->midr_rv && (revidr & fix->revidr_mask)) |
31 | 67 | return false; |
32 | | - |
33 | 68 | return true; |
34 | 69 | } |
35 | 70 |
|
| 71 | +static bool __maybe_unused |
| 72 | +is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope) |
| 73 | +{ |
| 74 | + int i; |
| 75 | + |
| 76 | + if (!target_impl_cpu_num) { |
| 77 | + WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); |
| 78 | + return __is_affected_midr_range(entry, read_cpuid_id(), |
| 79 | + read_cpuid(REVIDR_EL1)); |
| 80 | + } |
| 81 | + |
| 82 | + for (i = 0; i < target_impl_cpu_num; i++) { |
| 83 | + if (__is_affected_midr_range(entry, target_impl_cpus[i].midr, |
| 84 | + target_impl_cpus[i].midr)) |
| 85 | + return true; |
| 86 | + } |
| 87 | + return false; |
| 88 | +} |
| 89 | + |
36 | 90 | static bool __maybe_unused |
37 | 91 | is_affected_midr_range_list(const struct arm64_cpu_capabilities *entry, |
38 | 92 | int scope) |
39 | 93 | { |
40 | 94 | WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); |
41 | | - return is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list); |
| 95 | + return is_midr_in_range_list(entry->midr_range_list); |
42 | 96 | } |
43 | 97 |
|
44 | 98 | static bool __maybe_unused |
@@ -186,12 +240,11 @@ static bool __maybe_unused |
186 | 240 | has_neoverse_n1_erratum_1542419(const struct arm64_cpu_capabilities *entry, |
187 | 241 | int scope) |
188 | 242 | { |
189 | | - u32 midr = read_cpuid_id(); |
190 | 243 | bool has_dic = read_cpuid_cachetype() & BIT(CTR_EL0_DIC_SHIFT); |
191 | 244 | const struct midr_range range = MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1); |
192 | 245 |
|
193 | 246 | WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); |
194 | | - return is_midr_in_range(midr, &range) && has_dic; |
| 247 | + return is_midr_in_range(&range) && has_dic; |
195 | 248 | } |
196 | 249 |
|
197 | 250 | #ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI |
|
0 commit comments