Skip to content

Commit ae6b4bb

Browse files
dvdgomezPlaidCat
authored andcommitted
x86/nospec: Unwreck the RSB stuffing
jira LE-958 cve-bugfix CVE-2022-26373 commit 4e3aa92 Commit 2b12993 ("x86/speculation: Add RSB VM Exit protections") made a right mess of the RSB stuffing, rewrite the whole thing to not suck. Thanks to Andrew for the enlightening comment about Post-Barrier RSB things so we can make this code less magical. Cc: stable@vger.kernel.org Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lkml.kernel.org/r/YvuNdDWoUZSBjYcm@worktop.programming.kicks-ass.net (cherry picked from commit 4e3aa92) Signed-off-by: David Gomez <dgomez@ciq.com>
1 parent 257801c commit ae6b4bb

File tree

1 file changed

+39
-41
lines changed

1 file changed

+39
-41
lines changed

arch/x86/include/asm/nospec-branch.h

Lines changed: 39 additions & 41 deletions
Original file line numberDiff line numberDiff line change
@@ -42,33 +42,44 @@
4242
#define RSB_FILL_LOOPS 16 /* To avoid underflow */
4343

4444
/*
45+
* Common helper for __FILL_RETURN_BUFFER and __FILL_ONE_RETURN.
46+
*/
47+
#define __FILL_RETURN_SLOT \
48+
ANNOTATE_INTRA_FUNCTION_CALL; \
49+
call 772f; \
50+
int3; \
51+
772:
52+
53+
/*
54+
* Stuff the entire RSB.
55+
*
4556
* Google experimented with loop-unrolling and this turned out to be
4657
* the optimal version - two calls, each with their own speculation
4758
* trap should their return address end up getting used, in a loop.
4859
*/
49-
#define __FILL_RETURN_BUFFER(reg, nr, sp) \
50-
mov $(nr/2), reg; \
51-
771: \
52-
ANNOTATE_INTRA_FUNCTION_CALL; \
53-
call 772f; \
54-
773: /* speculation trap */ \
55-
UNWIND_HINT_EMPTY; \
56-
pause; \
57-
lfence; \
58-
jmp 773b; \
59-
772: \
60-
ANNOTATE_INTRA_FUNCTION_CALL; \
61-
call 774f; \
62-
775: /* speculation trap */ \
63-
UNWIND_HINT_EMPTY; \
64-
pause; \
65-
lfence; \
66-
jmp 775b; \
67-
774: \
68-
add $(BITS_PER_LONG/8) * 2, sp; \
69-
dec reg; \
70-
jnz 771b; \
71-
/* barrier for jnz misprediction */ \
60+
#define __FILL_RETURN_BUFFER(reg, nr) \
61+
mov $(nr/2), reg; \
62+
771: \
63+
__FILL_RETURN_SLOT \
64+
__FILL_RETURN_SLOT \
65+
add $(BITS_PER_LONG/8) * 2, %_ASM_SP; \
66+
dec reg; \
67+
jnz 771b; \
68+
/* barrier for jnz misprediction */ \
69+
lfence;
70+
71+
/*
72+
* Stuff a single RSB slot.
73+
*
74+
* To mitigate Post-Barrier RSB speculation, one CALL instruction must be
75+
* forced to retire before letting a RET instruction execute.
76+
*
77+
* On PBRSB-vulnerable CPUs, it is not safe for a RET to be executed
78+
* before this point.
79+
*/
80+
#define __FILL_ONE_RETURN \
81+
__FILL_RETURN_SLOT \
82+
add $(BITS_PER_LONG/8), %_ASM_SP; \
7283
lfence;
7384

7485
#ifdef __ASSEMBLY__
@@ -146,28 +157,15 @@
146157
#endif
147158
.endm
148159

149-
.macro ISSUE_UNBALANCED_RET_GUARD
150-
ANNOTATE_INTRA_FUNCTION_CALL
151-
call .Lunbalanced_ret_guard_\@
152-
int3
153-
.Lunbalanced_ret_guard_\@:
154-
add $(BITS_PER_LONG/8), %_ASM_SP
155-
lfence
156-
.endm
157-
158160
/*
159161
* A simpler FILL_RETURN_BUFFER macro. Don't make people use the CPP
160162
* monstrosity above, manually.
161163
*/
162-
.macro FILL_RETURN_BUFFER reg:req nr:req ftr:req ftr2
163-
.ifb \ftr2
164-
ALTERNATIVE "jmp .Lskip_rsb_\@", "", \ftr
165-
.else
166-
ALTERNATIVE_2 "jmp .Lskip_rsb_\@", "", \ftr, "jmp .Lunbalanced_\@", \ftr2
167-
.endif
168-
__FILL_RETURN_BUFFER(\reg,\nr,%_ASM_SP)
169-
.Lunbalanced_\@:
170-
ISSUE_UNBALANCED_RET_GUARD
164+
.macro FILL_RETURN_BUFFER reg:req nr:req ftr:req ftr2=ALT_NOT(X86_FEATURE_ALWAYS)
165+
ALTERNATIVE_2 "jmp .Lskip_rsb_\@", \
166+
__stringify(__FILL_RETURN_BUFFER(\reg,\nr)), \ftr, \
167+
__stringify(__FILL_ONE_RETURN), \ftr2
168+
171169
.Lskip_rsb_\@:
172170
.endm
173171

0 commit comments

Comments
 (0)