Skip to content

Commit 852fb6b

Browse files
committed
Merge: x86/bugs: Support new SRSO CPUID bits for AMD ZEN 5 CPUs
MR: https://gitlab.com/redhat/centos-stream/src/kernel/centos-stream-10/-/merge_requests/406 JIRA: https://issues.redhat.com/browse/RHEL-80398 MR: https://gitlab.com/redhat/centos-stream/src/kernel/centos-stream-10/-/merge_requests/406 Omitted-fix: 847f140 ("tools headers: Update the x86 headers with the kernel sources") Patches 1, 2 & 4 support the new SRSO_USER_KERNEL_NO and SRSO_MSR_FIX CPUID bits. Patch 3 is a bug fix that fixes a problem wrt to IBPB on VM-Exit. Since Zen5 (Turin) has a new x86 ID of 0x1a, the current RHEL kernel won't enable any SRSO mitigation which isn't secure enough when dealing with VMs. Signed-off-by: Waiman Long <longman@redhat.com> Approved-by: David Arcari <darcari@redhat.com> Approved-by: Vitaly Kuznetsov <vkuznets@redhat.com> Approved-by: Lenny Szubowicz <lszubowi@redhat.com> Approved-by: CKI KWF Bot <cki-ci-bot+kwf-gitlab-com@redhat.com> Merged-by: Julio Faracco <jfaracco@redhat.com>
2 parents 3eb8e8a + 2923fc7 commit 852fb6b

File tree

9 files changed

+69
-13
lines changed

9 files changed

+69
-13
lines changed

Documentation/admin-guide/hw-vuln/srso.rst

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -104,7 +104,20 @@ The possible values in this file are:
104104

105105
(spec_rstack_overflow=ibpb-vmexit)
106106

107+
* 'Mitigation: Reduced Speculation':
107108

109+
This mitigation gets automatically enabled when the above one "IBPB on
110+
VMEXIT" has been selected and the CPU supports the BpSpecReduce bit.
111+
112+
It gets automatically enabled on machines which have the
113+
SRSO_USER_KERNEL_NO=1 CPUID bit. In that case, the code logic is to switch
114+
to the above =ibpb-vmexit mitigation because the user/kernel boundary is
115+
not affected anymore and thus "safe RET" is not needed.
116+
117+
After enabling the IBPB on VMEXIT mitigation option, the BpSpecReduce bit
118+
is detected (functionality present on all such machines) and that
119+
practically overrides IBPB on VMEXIT as it has a lot less performance
120+
impact and takes care of the guest->host attack vector too.
108121

109122
In order to exploit vulnerability, an attacker needs to:
110123

arch/x86/Kconfig

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2587,7 +2587,8 @@ config MITIGATION_IBPB_ENTRY
25872587
depends on CPU_SUP_AMD && X86_64
25882588
default y
25892589
help
2590-
Compile the kernel with support for the retbleed=ibpb mitigation.
2590+
Compile the kernel with support for the retbleed=ibpb and
2591+
spec_rstack_overflow={ibpb,ibpb-vmexit} mitigations.
25912592

25922593
config MITIGATION_IBRS_ENTRY
25932594
bool "Enable IBRS on kernel entry"

arch/x86/include/asm/cpufeatures.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -464,6 +464,11 @@
464464
#define X86_FEATURE_SBPB (20*32+27) /* Selective Branch Prediction Barrier */
465465
#define X86_FEATURE_IBPB_BRTYPE (20*32+28) /* MSR_PRED_CMD[IBPB] flushes all branch type predictions */
466466
#define X86_FEATURE_SRSO_NO (20*32+29) /* CPU is not affected by SRSO */
467+
#define X86_FEATURE_SRSO_USER_KERNEL_NO (20*32+30) /* CPU is not affected by SRSO across user/kernel boundaries */
468+
#define X86_FEATURE_SRSO_BP_SPEC_REDUCE (20*32+31) /*
469+
* BP_CFG[BpSpecReduce] can be used to mitigate SRSO for VMs.
470+
* (SRSO_MSR_FIX in the official doc).
471+
*/
467472

468473
/*
469474
* Extended auxiliary flags: Linux defined - for features scattered in various

arch/x86/include/asm/msr-index.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -717,6 +717,7 @@
717717

718718
/* Zen4 */
719719
#define MSR_ZEN4_BP_CFG 0xc001102e
720+
#define MSR_ZEN4_BP_CFG_BP_SPEC_REDUCE_BIT 4
720721
#define MSR_ZEN4_BP_CFG_SHARED_BTB_FIX_BIT 5
721722

722723
/* Fam 19h MSRs */

arch/x86/kernel/cpu/bugs.c

Lines changed: 38 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -1115,6 +1115,8 @@ static void __init retbleed_select_mitigation(void)
11151115

11161116
case RETBLEED_MITIGATION_IBPB:
11171117
setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB);
1118+
setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT);
1119+
mitigate_smt = true;
11181120

11191121
/*
11201122
* IBPB on entry already obviates the need for
@@ -1124,9 +1126,6 @@ static void __init retbleed_select_mitigation(void)
11241126
setup_clear_cpu_cap(X86_FEATURE_UNRET);
11251127
setup_clear_cpu_cap(X86_FEATURE_RETHUNK);
11261128

1127-
setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT);
1128-
mitigate_smt = true;
1129-
11301129
/*
11311130
* There is no need for RSB filling: entry_ibpb() ensures
11321131
* all predictions, including the RSB, are invalidated,
@@ -2523,6 +2522,7 @@ enum srso_mitigation {
25232522
SRSO_MITIGATION_SAFE_RET,
25242523
SRSO_MITIGATION_IBPB,
25252524
SRSO_MITIGATION_IBPB_ON_VMEXIT,
2525+
SRSO_MITIGATION_BP_SPEC_REDUCE,
25262526
};
25272527

25282528
enum srso_mitigation_cmd {
@@ -2540,7 +2540,8 @@ static const char * const srso_strings[] = {
25402540
[SRSO_MITIGATION_MICROCODE] = "Vulnerable: Microcode, no safe RET",
25412541
[SRSO_MITIGATION_SAFE_RET] = "Mitigation: Safe RET",
25422542
[SRSO_MITIGATION_IBPB] = "Mitigation: IBPB",
2543-
[SRSO_MITIGATION_IBPB_ON_VMEXIT] = "Mitigation: IBPB on VMEXIT only"
2543+
[SRSO_MITIGATION_IBPB_ON_VMEXIT] = "Mitigation: IBPB on VMEXIT only",
2544+
[SRSO_MITIGATION_BP_SPEC_REDUCE] = "Mitigation: Reduced Speculation"
25442545
};
25452546

25462547
static enum srso_mitigation srso_mitigation __ro_after_init = SRSO_MITIGATION_NONE;
@@ -2579,7 +2580,7 @@ static void __init srso_select_mitigation(void)
25792580
srso_cmd == SRSO_CMD_OFF) {
25802581
if (boot_cpu_has(X86_FEATURE_SBPB))
25812582
x86_pred_cmd = PRED_CMD_SBPB;
2582-
return;
2583+
goto out;
25832584
}
25842585

25852586
if (has_microcode) {
@@ -2591,7 +2592,7 @@ static void __init srso_select_mitigation(void)
25912592
*/
25922593
if (boot_cpu_data.x86 < 0x19 && !cpu_smt_possible()) {
25932594
setup_force_cpu_cap(X86_FEATURE_SRSO_NO);
2594-
return;
2595+
goto out;
25952596
}
25962597

25972598
if (retbleed_mitigation == RETBLEED_MITIGATION_IBPB) {
@@ -2615,6 +2616,9 @@ static void __init srso_select_mitigation(void)
26152616
break;
26162617

26172618
case SRSO_CMD_SAFE_RET:
2619+
if (boot_cpu_has(X86_FEATURE_SRSO_USER_KERNEL_NO))
2620+
goto ibpb_on_vmexit;
2621+
26182622
if (IS_ENABLED(CONFIG_MITIGATION_SRSO)) {
26192623
/*
26202624
* Enable the return thunk for generated code
@@ -2643,6 +2647,7 @@ static void __init srso_select_mitigation(void)
26432647
if (IS_ENABLED(CONFIG_MITIGATION_IBPB_ENTRY)) {
26442648
if (has_microcode) {
26452649
setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB);
2650+
setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT);
26462651
srso_mitigation = SRSO_MITIGATION_IBPB;
26472652

26482653
/*
@@ -2652,15 +2657,29 @@ static void __init srso_select_mitigation(void)
26522657
*/
26532658
setup_clear_cpu_cap(X86_FEATURE_UNRET);
26542659
setup_clear_cpu_cap(X86_FEATURE_RETHUNK);
2660+
2661+
/*
2662+
* There is no need for RSB filling: entry_ibpb() ensures
2663+
* all predictions, including the RSB, are invalidated,
2664+
* regardless of IBPB implementation.
2665+
*/
2666+
setup_clear_cpu_cap(X86_FEATURE_RSB_VMEXIT);
26552667
}
26562668
} else {
26572669
pr_err("WARNING: kernel not compiled with MITIGATION_IBPB_ENTRY.\n");
26582670
}
26592671
break;
26602672

2673+
ibpb_on_vmexit:
26612674
case SRSO_CMD_IBPB_ON_VMEXIT:
2662-
if (IS_ENABLED(CONFIG_MITIGATION_SRSO)) {
2663-
if (!boot_cpu_has(X86_FEATURE_ENTRY_IBPB) && has_microcode) {
2675+
if (boot_cpu_has(X86_FEATURE_SRSO_BP_SPEC_REDUCE)) {
2676+
pr_notice("Reducing speculation to address VM/HV SRSO attack vector.\n");
2677+
srso_mitigation = SRSO_MITIGATION_BP_SPEC_REDUCE;
2678+
break;
2679+
}
2680+
2681+
if (IS_ENABLED(CONFIG_MITIGATION_IBPB_ENTRY)) {
2682+
if (has_microcode) {
26642683
setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT);
26652684
srso_mitigation = SRSO_MITIGATION_IBPB_ON_VMEXIT;
26662685

@@ -2672,15 +2691,23 @@ static void __init srso_select_mitigation(void)
26722691
setup_clear_cpu_cap(X86_FEATURE_RSB_VMEXIT);
26732692
}
26742693
} else {
2675-
pr_err("WARNING: kernel not compiled with MITIGATION_SRSO.\n");
2676-
}
2694+
pr_err("WARNING: kernel not compiled with MITIGATION_IBPB_ENTRY.\n");
2695+
}
26772696
break;
26782697
default:
26792698
break;
26802699
}
26812700

26822701
out:
2683-
pr_info("%s\n", srso_strings[srso_mitigation]);
2702+
/*
2703+
* Clear the feature flag if this mitigation is not selected as that
2704+
* feature flag controls the BpSpecReduce MSR bit toggling in KVM.
2705+
*/
2706+
if (srso_mitigation != SRSO_MITIGATION_BP_SPEC_REDUCE)
2707+
setup_clear_cpu_cap(X86_FEATURE_SRSO_BP_SPEC_REDUCE);
2708+
2709+
if (srso_mitigation != SRSO_MITIGATION_NONE)
2710+
pr_info("%s\n", srso_strings[srso_mitigation]);
26842711
}
26852712

26862713
#undef pr_fmt

arch/x86/kernel/cpu/common.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1273,6 +1273,7 @@ static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {
12731273
VULNBL_AMD(0x17, RETBLEED | SMT_RSB | SRSO),
12741274
VULNBL_HYGON(0x18, RETBLEED | SMT_RSB | SRSO),
12751275
VULNBL_AMD(0x19, SRSO),
1276+
VULNBL_AMD(0x1a, SRSO),
12761277
{}
12771278
};
12781279

arch/x86/kvm/cpuid.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -815,7 +815,7 @@ void kvm_set_cpu_caps(void)
815815
kvm_cpu_cap_mask(CPUID_8000_0021_EAX,
816816
F(NO_NESTED_DATA_BP) | F(LFENCE_RDTSC) | 0 /* SmmPgCfgLock */ |
817817
F(NULL_SEL_CLR_BASE) | F(AUTOIBRS) | 0 /* PrefetchCtlMsr */ |
818-
F(WRMSR_XX_BASE_NS)
818+
F(WRMSR_XX_BASE_NS) | F(SRSO_USER_KERNEL_NO)
819819
);
820820

821821
kvm_cpu_cap_check_and_set(X86_FEATURE_SBPB);

arch/x86/kvm/svm/svm.c

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -608,6 +608,9 @@ static void svm_disable_virtualization_cpu(void)
608608
kvm_cpu_svm_disable();
609609

610610
amd_pmu_disable_virt();
611+
612+
if (cpu_feature_enabled(X86_FEATURE_SRSO_BP_SPEC_REDUCE))
613+
msr_clear_bit(MSR_ZEN4_BP_CFG, MSR_ZEN4_BP_CFG_BP_SPEC_REDUCE_BIT);
611614
}
612615

613616
static int svm_enable_virtualization_cpu(void)
@@ -685,6 +688,9 @@ static int svm_enable_virtualization_cpu(void)
685688
rdmsr(MSR_TSC_AUX, sev_es_host_save_area(sd)->tsc_aux, msr_hi);
686689
}
687690

691+
if (cpu_feature_enabled(X86_FEATURE_SRSO_BP_SPEC_REDUCE))
692+
msr_set_bit(MSR_ZEN4_BP_CFG, MSR_ZEN4_BP_CFG_BP_SPEC_REDUCE_BIT);
693+
688694
return 0;
689695
}
690696

arch/x86/lib/msr.c

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -103,6 +103,7 @@ int msr_set_bit(u32 msr, u8 bit)
103103
{
104104
return __flip_bit(msr, bit, true);
105105
}
106+
EXPORT_SYMBOL_GPL(msr_set_bit);
106107

107108
/**
108109
* msr_clear_bit - Clear @bit in a MSR @msr.
@@ -118,6 +119,7 @@ int msr_clear_bit(u32 msr, u8 bit)
118119
{
119120
return __flip_bit(msr, bit, false);
120121
}
122+
EXPORT_SYMBOL_GPL(msr_clear_bit);
121123

122124
#ifdef CONFIG_TRACEPOINTS
123125
void do_trace_write_msr(unsigned int msr, u64 val, int failed)

0 commit comments

Comments
 (0)