Skip to content

Commit cc2fdb8

Browse files
committed
x86/bugs: Move cpu_bugs_smt_update() down
jira LE-4704 cve CVE-2025-40300 Rebuild_History Non-Buildable kernel-4.18.0-553.83.1.el8_10 commit-author Pawan Gupta <pawan.kumar.gupta@linux.intel.com> commit 6449f5b Empty-Commit: Cherry-Pick Conflicts during history rebuild. Will be included in final tarball splat. Ref for failed cherry-pick at: ciq/ciq_backports/kernel-4.18.0-553.83.1.el8_10/6449f5ba.failed cpu_bugs_smt_update() uses global variables from different mitigations. For SMT updates it can't currently use vmscape_mitigation that is defined after it. Since cpu_bugs_smt_update() depends on many other mitigations, move it after all mitigations are defined. With that, it can use vmscape_mitigation in a moment. No functional change. Signed-off-by: Pawan Gupta <pawan.kumar.gupta@linux.intel.com> Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com> Reviewed-by: Dave Hansen <dave.hansen@linux.intel.com> (cherry picked from commit 6449f5b) Signed-off-by: Jonathan Maple <jmaple@ciq.com> # Conflicts: # arch/x86/kernel/cpu/bugs.c
1 parent bc36aa4 commit cc2fdb8

File tree

1 file changed

+288
-0
lines changed

1 file changed

+288
-0
lines changed
Lines changed: 288 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,288 @@
1+
x86/bugs: Move cpu_bugs_smt_update() down
2+
3+
jira LE-4704
4+
cve CVE-2025-40300
5+
Rebuild_History Non-Buildable kernel-4.18.0-553.83.1.el8_10
6+
commit-author Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
7+
commit 6449f5baf9c78a7a442d64f4a61378a21c5db113
8+
Empty-Commit: Cherry-Pick Conflicts during history rebuild.
9+
Will be included in final tarball splat. Ref for failed cherry-pick at:
10+
ciq/ciq_backports/kernel-4.18.0-553.83.1.el8_10/6449f5ba.failed
11+
12+
cpu_bugs_smt_update() uses global variables from different mitigations. For
13+
SMT updates it can't currently use vmscape_mitigation that is defined after
14+
it.
15+
16+
Since cpu_bugs_smt_update() depends on many other mitigations, move it
17+
after all mitigations are defined. With that, it can use vmscape_mitigation
18+
in a moment.
19+
20+
No functional change.
21+
22+
Signed-off-by: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
23+
Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
24+
Reviewed-by: Dave Hansen <dave.hansen@linux.intel.com>
25+
(cherry picked from commit 6449f5baf9c78a7a442d64f4a61378a21c5db113)
26+
Signed-off-by: Jonathan Maple <jmaple@ciq.com>
27+
28+
# Conflicts:
29+
# arch/x86/kernel/cpu/bugs.c
30+
diff --cc arch/x86/kernel/cpu/bugs.c
31+
index a556e8ade674,1f8c1c51d057..000000000000
32+
--- a/arch/x86/kernel/cpu/bugs.c
33+
+++ b/arch/x86/kernel/cpu/bugs.c
34+
@@@ -1877,206 -2492,65 +1877,209 @@@ static void __init spectre_v2_select_mi
35+
setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW);
36+
pr_info("Enabling Restricted Speculation for firmware calls\n");
37+
}
38+
-}
39+
40+
-static void update_stibp_msr(void * __unused)
41+
-{
42+
- u64 val = spec_ctrl_current() | (x86_spec_ctrl_base & SPEC_CTRL_STIBP);
43+
- update_spec_ctrl(val);
44+
+ /* Set up IBPB and STIBP depending on the general spectre V2 command */
45+
+ spectre_v2_cmd = cmd;
46+
+}
47+
+
48+
+static void update_stibp_msr(void * __unused)
49+
+{
50+
+ u64 val = spec_ctrl_current() | (x86_spec_ctrl_base & SPEC_CTRL_STIBP);
51+
+ update_spec_ctrl(val);
52+
+}
53+
+
54+
+/* Update x86_spec_ctrl_base in case SMT state changed. */
55+
+static void update_stibp_strict(void)
56+
+{
57+
+ u64 mask = x86_spec_ctrl_base & ~SPEC_CTRL_STIBP;
58+
+
59+
+ if (sched_smt_active())
60+
+ mask |= SPEC_CTRL_STIBP;
61+
+
62+
+ if (mask == x86_spec_ctrl_base)
63+
+ return;
64+
+
65+
+ pr_info("Update user space SMT mitigation: STIBP %s\n",
66+
+ mask & SPEC_CTRL_STIBP ? "always-on" : "off");
67+
+ x86_spec_ctrl_base = mask;
68+
+ on_each_cpu(update_stibp_msr, NULL, 1);
69+
+}
70+
+
71+
+/* Update the static key controlling the evaluation of TIF_SPEC_IB */
72+
+static void update_indir_branch_cond(void)
73+
+{
74+
+ if (sched_smt_active())
75+
+ static_branch_enable(&switch_to_cond_stibp);
76+
+ else
77+
+ static_branch_disable(&switch_to_cond_stibp);
78+
+}
79+
+
80+
+#undef pr_fmt
81+
+#define pr_fmt(fmt) fmt
82+
+
83+
+/* Update the static key controlling the MDS CPU buffer clear in idle */
84+
+static void update_mds_branch_idle(void)
85+
+{
86+
+ /*
87+
+ * Enable the idle clearing if SMT is active on CPUs which are
88+
+ * affected only by MSBDS and not any other MDS variant.
89+
+ *
90+
+ * The other variants cannot be mitigated when SMT is enabled, so
91+
+ * clearing the buffers on idle just to prevent the Store Buffer
92+
+ * repartitioning leak would be a window dressing exercise.
93+
+ */
94+
+ if (!boot_cpu_has_bug(X86_BUG_MSBDS_ONLY))
95+
+ return;
96+
+
97+
+ if (sched_smt_active()) {
98+
+ static_branch_enable(&mds_idle_clear);
99+
+ } else if (mmio_mitigation == MMIO_MITIGATION_OFF ||
100+
+ (x86_arch_cap_msr & ARCH_CAP_FBSDP_NO)) {
101+
+ static_branch_disable(&mds_idle_clear);
102+
+ }
103+
+}
104+
+
105+
++<<<<<<< HEAD
106+
+#define MDS_MSG_SMT "MDS CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html for more details.\n"
107+
+#define TAA_MSG_SMT "TAA CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/tsx_async_abort.html for more details.\n"
108+
+#define MMIO_MSG_SMT "MMIO Stale Data CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/processor_mmio_stale_data.html for more details.\n"
109+
+
110+
+void cpu_bugs_smt_update(void)
111+
+{
112+
+ mutex_lock(&spec_ctrl_mutex);
113+
+
114+
+ if (sched_smt_active() && unprivileged_ebpf_enabled() &&
115+
+ spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE)
116+
+ pr_warn_once(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG);
117+
+
118+
+ switch (spectre_v2_user_stibp) {
119+
+ case SPECTRE_V2_USER_NONE:
120+
+ break;
121+
+ case SPECTRE_V2_USER_STRICT:
122+
+ case SPECTRE_V2_USER_STRICT_PREFERRED:
123+
+ update_stibp_strict();
124+
+ break;
125+
+ case SPECTRE_V2_USER_PRCTL:
126+
+ case SPECTRE_V2_USER_SECCOMP:
127+
+ update_indir_branch_cond();
128+
+ break;
129+
+ }
130+
+
131+
+ switch (mds_mitigation) {
132+
+ case MDS_MITIGATION_FULL:
133+
+ case MDS_MITIGATION_VMWERV:
134+
+ if (sched_smt_active() && !boot_cpu_has(X86_BUG_MSBDS_ONLY))
135+
+ pr_warn_once(MDS_MSG_SMT);
136+
+ update_mds_branch_idle();
137+
+ break;
138+
+ case MDS_MITIGATION_OFF:
139+
+ break;
140+
+ }
141+
+
142+
+ switch (taa_mitigation) {
143+
+ case TAA_MITIGATION_VERW:
144+
+ case TAA_MITIGATION_UCODE_NEEDED:
145+
+ if (sched_smt_active())
146+
+ pr_warn_once(TAA_MSG_SMT);
147+
+ break;
148+
+ case TAA_MITIGATION_TSX_DISABLED:
149+
+ case TAA_MITIGATION_OFF:
150+
+ break;
151+
+ }
152+
+
153+
+ switch (mmio_mitigation) {
154+
+ case MMIO_MITIGATION_VERW:
155+
+ case MMIO_MITIGATION_UCODE_NEEDED:
156+
+ if (sched_smt_active())
157+
+ pr_warn_once(MMIO_MSG_SMT);
158+
+ break;
159+
+ case MMIO_MITIGATION_OFF:
160+
+ break;
161+
+ }
162+
+
163+
+ mutex_unlock(&spec_ctrl_mutex);
164+
}
165+
166+
-/* Update x86_spec_ctrl_base in case SMT state changed. */
167+
-static void update_stibp_strict(void)
168+
+#ifdef CONFIG_DEBUG_FS
169+
+/*
170+
+ * Provide a debugfs file to dump SPEC_CTRL MSRs of all the CPUs
171+
+ * Consecutive MSR values are collapsed together if they are the same.
172+
+ */
173+
+static ssize_t spec_ctrl_msrs_read(struct file *file, char __user *user_buf,
174+
+ size_t count, loff_t *ppos)
175+
{
176+
- u64 mask = x86_spec_ctrl_base & ~SPEC_CTRL_STIBP;
177+
+ int bufsiz = min(count, PAGE_SIZE);
178+
+ int cpu, prev_cpu, len, cnt = 0;
179+
+ u64 val, prev_val;
180+
+ char *buf;
181+
182+
- if (sched_smt_active())
183+
- mask |= SPEC_CTRL_STIBP;
184+
+ /*
185+
+ * The MSRs info should be small enough that the whole buffer is
186+
+ * copied out in one call. However, user space may read it again
187+
+ * to see if there is any data left. Rereading the cached SPEC_CTRL
188+
+ * MSR values may produce a different result causing corruption in
189+
+ * output data. So skipping the call if *ppos is not starting from 0.
190+
+ */
191+
+ if (*ppos)
192+
+ return 0;
193+
194+
- if (mask == x86_spec_ctrl_base)
195+
- return;
196+
+ buf = kmalloc(bufsiz, GFP_KERNEL);
197+
+ if (!buf)
198+
+ return -ENOMEM;
199+
200+
- pr_info("Update user space SMT mitigation: STIBP %s\n",
201+
- mask & SPEC_CTRL_STIBP ? "always-on" : "off");
202+
- x86_spec_ctrl_base = mask;
203+
- on_each_cpu(update_stibp_msr, NULL, 1);
204+
-}
205+
+ for_each_possible_cpu(cpu) {
206+
+ val = per_cpu(x86_spec_ctrl_current, cpu);
207+
208+
-/* Update the static key controlling the evaluation of TIF_SPEC_IB */
209+
-static void update_indir_branch_cond(void)
210+
-{
211+
- if (sched_smt_active())
212+
- static_branch_enable(&switch_to_cond_stibp);
213+
+ if (!cpu)
214+
+ goto next;
215+
+
216+
+ if (val == prev_val)
217+
+ continue;
218+
+
219+
+ if (prev_cpu == cpu - 1)
220+
+ len = snprintf(buf + cnt, bufsiz - cnt, "CPU %d: 0x%llx\n",
221+
+ prev_cpu, prev_val);
222+
+ else
223+
+ len = snprintf(buf + cnt, bufsiz - cnt, "CPUs %d-%d: 0x%llx\n",
224+
+ prev_cpu, cpu - 1, prev_val);
225+
+
226+
+ cnt += len;
227+
+ if (!len)
228+
+ break; /* Out of buffer */
229+
+next:
230+
+ prev_cpu = cpu;
231+
+ prev_val = val;
232+
+ }
233+
+
234+
+ if (prev_cpu == cpu - 1)
235+
+ cnt += snprintf(buf + cnt, bufsiz - cnt, "CPU %d: 0x%llx\n",
236+
+ prev_cpu, prev_val);
237+
else
238+
- static_branch_disable(&switch_to_cond_stibp);
239+
+ cnt += snprintf(buf + cnt, bufsiz - cnt, "CPUs %d-%d: 0x%llx\n",
240+
+ prev_cpu, cpu - 1, prev_val);
241+
+
242+
+ count = simple_read_from_buffer(user_buf, count, ppos, buf, cnt);
243+
+ kfree(buf);
244+
+ return count;
245+
}
246+
247+
-#undef pr_fmt
248+
-#define pr_fmt(fmt) fmt
249+
+static const struct file_operations fops_spec_ctrl = {
250+
+ .read = spec_ctrl_msrs_read,
251+
+ .llseek = default_llseek,
252+
+};
253+
254+
-/* Update the static key controlling the MDS CPU buffer clear in idle */
255+
-static void update_mds_branch_idle(void)
256+
+static int __init init_spec_ctrl_debugfs(void)
257+
{
258+
- /*
259+
- * Enable the idle clearing if SMT is active on CPUs which are
260+
- * affected only by MSBDS and not any other MDS variant.
261+
- *
262+
- * The other variants cannot be mitigated when SMT is enabled, so
263+
- * clearing the buffers on idle just to prevent the Store Buffer
264+
- * repartitioning leak would be a window dressing exercise.
265+
- */
266+
- if (!boot_cpu_has_bug(X86_BUG_MSBDS_ONLY))
267+
- return;
268+
-
269+
- if (sched_smt_active()) {
270+
- static_branch_enable(&cpu_buf_idle_clear);
271+
- } else if (mmio_mitigation == MMIO_MITIGATION_OFF ||
272+
- (x86_arch_cap_msr & ARCH_CAP_FBSDP_NO)) {
273+
- static_branch_disable(&cpu_buf_idle_clear);
274+
- }
275+
+ if (!debugfs_create_file("spec_ctrl_msrs", 0400, arch_debugfs_dir,
276+
+ NULL, &fops_spec_ctrl))
277+
+ return -ENOMEM;
278+
+ return 0;
279+
}
280+
+fs_initcall(init_spec_ctrl_debugfs);
281+
+#endif
282+
283+
++=======
284+
++>>>>>>> 6449f5baf9c7 (x86/bugs: Move cpu_bugs_smt_update() down)
285+
#undef pr_fmt
286+
#define pr_fmt(fmt) "Speculative Store Bypass: " fmt
287+
288+
* Unmerged path arch/x86/kernel/cpu/bugs.c

0 commit comments

Comments
 (0)