Skip to content

Commit 34a3cae

Browse files
jpoimboebp3tk0v
authored andcommitted
x86/srso: Disentangle rethunk-dependent options
CONFIG_RETHUNK, CONFIG_CPU_UNRET_ENTRY and CONFIG_CPU_SRSO are all tangled up. De-spaghettify the code a bit. Some of the rethunk-related code has been shuffled around within the '.text..__x86.return_thunk' section, but otherwise there are no functional changes. srso_alias_untrain_ret() and srso_alias_safe_ret() ((which are very address-sensitive) haven't moved. Signed-off-by: Josh Poimboeuf <jpoimboe@kernel.org> Signed-off-by: Ingo Molnar <mingo@kernel.org> Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de> Acked-by: Borislav Petkov (AMD) <bp@alien8.de> Link: https://lore.kernel.org/r/2845084ed303d8384905db3b87b77693945302b4.1693889988.git.jpoimboe@kernel.org
1 parent 3512369 commit 34a3cae

4 files changed

Lines changed: 109 additions & 85 deletions

File tree

arch/x86/include/asm/nospec-branch.h

Lines changed: 18 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -289,19 +289,17 @@
289289
* where we have a stack but before any RET instruction.
290290
*/
291291
.macro UNTRAIN_RET
292-
#if defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_IBPB_ENTRY) || \
293-
defined(CONFIG_CALL_DEPTH_TRACKING) || defined(CONFIG_CPU_SRSO)
292+
#if defined(CONFIG_RETHUNK) || defined(CONFIG_CPU_IBPB_ENTRY)
294293
VALIDATE_UNRET_END
295294
ALTERNATIVE_3 "", \
296295
CALL_UNTRAIN_RET, X86_FEATURE_UNRET, \
297296
"call entry_ibpb", X86_FEATURE_ENTRY_IBPB, \
298-
__stringify(RESET_CALL_DEPTH), X86_FEATURE_CALL_DEPTH
297+
__stringify(RESET_CALL_DEPTH), X86_FEATURE_CALL_DEPTH
299298
#endif
300299
.endm
301300

302301
.macro UNTRAIN_RET_VM
303-
#if defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_IBPB_ENTRY) || \
304-
defined(CONFIG_CALL_DEPTH_TRACKING) || defined(CONFIG_CPU_SRSO)
302+
#if defined(CONFIG_RETHUNK) || defined(CONFIG_CPU_IBPB_ENTRY)
305303
VALIDATE_UNRET_END
306304
ALTERNATIVE_3 "", \
307305
CALL_UNTRAIN_RET, X86_FEATURE_UNRET, \
@@ -311,8 +309,7 @@
311309
.endm
312310

313311
.macro UNTRAIN_RET_FROM_CALL
314-
#if defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_IBPB_ENTRY) || \
315-
defined(CONFIG_CALL_DEPTH_TRACKING) || defined(CONFIG_CPU_SRSO)
312+
#if defined(CONFIG_RETHUNK) || defined(CONFIG_CPU_IBPB_ENTRY)
316313
VALIDATE_UNRET_END
317314
ALTERNATIVE_3 "", \
318315
CALL_UNTRAIN_RET, X86_FEATURE_UNRET, \
@@ -348,6 +345,20 @@ extern void __x86_return_thunk(void);
348345
static inline void __x86_return_thunk(void) {}
349346
#endif
350347

348+
#ifdef CONFIG_CPU_UNRET_ENTRY
349+
extern void retbleed_return_thunk(void);
350+
#else
351+
static inline void retbleed_return_thunk(void) {}
352+
#endif
353+
354+
#ifdef CONFIG_CPU_SRSO
355+
extern void srso_return_thunk(void);
356+
extern void srso_alias_return_thunk(void);
357+
#else
358+
static inline void srso_return_thunk(void) {}
359+
static inline void srso_alias_return_thunk(void) {}
360+
#endif
361+
351362
extern void retbleed_return_thunk(void);
352363
extern void srso_return_thunk(void);
353364
extern void srso_alias_return_thunk(void);

arch/x86/kernel/cpu/bugs.c

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -63,7 +63,7 @@ EXPORT_SYMBOL_GPL(x86_pred_cmd);
6363

6464
static DEFINE_MUTEX(spec_ctrl_mutex);
6565

66-
void (*x86_return_thunk)(void) __ro_after_init = &__x86_return_thunk;
66+
void (*x86_return_thunk)(void) __ro_after_init = __x86_return_thunk;
6767

6868
/* Update SPEC_CTRL MSR and its cached copy unconditionally */
6969
static void update_spec_ctrl(u64 val)
@@ -1041,8 +1041,7 @@ static void __init retbleed_select_mitigation(void)
10411041
setup_force_cpu_cap(X86_FEATURE_RETHUNK);
10421042
setup_force_cpu_cap(X86_FEATURE_UNRET);
10431043

1044-
if (IS_ENABLED(CONFIG_RETHUNK))
1045-
x86_return_thunk = retbleed_return_thunk;
1044+
x86_return_thunk = retbleed_return_thunk;
10461045

10471046
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
10481047
boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)

arch/x86/kernel/vmlinux.lds.S

Lines changed: 2 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -139,10 +139,7 @@ SECTIONS
139139
STATIC_CALL_TEXT
140140

141141
ALIGN_ENTRY_TEXT_BEGIN
142-
#ifdef CONFIG_CPU_SRSO
143142
*(.text..__x86.rethunk_untrain)
144-
#endif
145-
146143
ENTRY_TEXT
147144

148145
#ifdef CONFIG_CPU_SRSO
@@ -520,12 +517,12 @@ INIT_PER_CPU(irq_stack_backing_store);
520517
"fixed_percpu_data is not at start of per-cpu area");
521518
#endif
522519

523-
#ifdef CONFIG_RETHUNK
520+
#ifdef CONFIG_CPU_UNRET_ENTRY
524521
. = ASSERT((retbleed_return_thunk & 0x3f) == 0, "retbleed_return_thunk not cacheline-aligned");
525-
. = ASSERT((srso_safe_ret & 0x3f) == 0, "srso_safe_ret not cacheline-aligned");
526522
#endif
527523

528524
#ifdef CONFIG_CPU_SRSO
525+
. = ASSERT((srso_safe_ret & 0x3f) == 0, "srso_safe_ret not cacheline-aligned");
529526
/*
530527
* GNU ld cannot do XOR until 2.41.
531528
* https://sourceware.org/git/?p=binutils-gdb.git;a=commit;h=f6f78318fca803c4907fb8d7f6ded8295f1947b1

arch/x86/lib/retpoline.S

Lines changed: 87 additions & 70 deletions
Original file line numberDiff line numberDiff line change
@@ -126,12 +126,13 @@ SYM_CODE_END(__x86_indirect_jump_thunk_array)
126126
#include <asm/GEN-for-each-reg.h>
127127
#undef GEN
128128
#endif
129-
/*
130-
* This function name is magical and is used by -mfunction-return=thunk-extern
131-
* for the compiler to generate JMPs to it.
132-
*/
129+
133130
#ifdef CONFIG_RETHUNK
134131

132+
.section .text..__x86.return_thunk
133+
134+
#ifdef CONFIG_CPU_SRSO
135+
135136
/*
136137
* srso_alias_untrain_ret() and srso_alias_safe_ret() are placed at
137138
* special addresses:
@@ -147,27 +148,17 @@ SYM_CODE_END(__x86_indirect_jump_thunk_array)
147148
*
148149
* As a result, srso_alias_safe_ret() becomes a safe return.
149150
*/
150-
#ifdef CONFIG_CPU_SRSO
151-
.section .text..__x86.rethunk_untrain
152-
151+
.pushsection .text..__x86.rethunk_untrain
153152
SYM_START(srso_alias_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
154153
UNWIND_HINT_FUNC
155154
ANNOTATE_NOENDBR
156155
ASM_NOP2
157156
lfence
158157
jmp srso_alias_return_thunk
159158
SYM_FUNC_END(srso_alias_untrain_ret)
159+
.popsection
160160

161-
.section .text..__x86.rethunk_safe
162-
#else
163-
/* dummy definition for alternatives */
164-
SYM_START(srso_alias_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
165-
ANNOTATE_UNRET_SAFE
166-
ret
167-
int3
168-
SYM_FUNC_END(srso_alias_untrain_ret)
169-
#endif
170-
161+
.pushsection .text..__x86.rethunk_safe
171162
SYM_START(srso_alias_safe_ret, SYM_L_GLOBAL, SYM_A_NONE)
172163
lea 8(%_ASM_SP), %_ASM_SP
173164
UNWIND_HINT_FUNC
@@ -182,8 +173,58 @@ SYM_CODE_START_NOALIGN(srso_alias_return_thunk)
182173
call srso_alias_safe_ret
183174
ud2
184175
SYM_CODE_END(srso_alias_return_thunk)
176+
.popsection
177+
178+
/*
179+
* SRSO untraining sequence for Zen1/2, similar to retbleed_untrain_ret()
180+
* above. On kernel entry, srso_untrain_ret() is executed which is a
181+
*
182+
* movabs $0xccccc30824648d48,%rax
183+
*
184+
* and when the return thunk executes the inner label srso_safe_ret()
185+
* later, it is a stack manipulation and a RET which is mispredicted and
186+
* thus a "safe" one to use.
187+
*/
188+
.align 64
189+
.skip 64 - (srso_safe_ret - srso_untrain_ret), 0xcc
190+
SYM_START(srso_untrain_ret, SYM_L_LOCAL, SYM_A_NONE)
191+
ANNOTATE_NOENDBR
192+
.byte 0x48, 0xb8
193+
194+
/*
195+
* This forces the function return instruction to speculate into a trap
196+
* (UD2 in srso_return_thunk() below). This RET will then mispredict
197+
* and execution will continue at the return site read from the top of
198+
* the stack.
199+
*/
200+
SYM_INNER_LABEL(srso_safe_ret, SYM_L_GLOBAL)
201+
lea 8(%_ASM_SP), %_ASM_SP
202+
ret
203+
int3
204+
int3
205+
/* end of movabs */
206+
lfence
207+
call srso_safe_ret
208+
ud2
209+
SYM_CODE_END(srso_safe_ret)
210+
SYM_FUNC_END(srso_untrain_ret)
211+
212+
SYM_CODE_START(srso_return_thunk)
213+
UNWIND_HINT_FUNC
214+
ANNOTATE_NOENDBR
215+
call srso_safe_ret
216+
ud2
217+
SYM_CODE_END(srso_return_thunk)
218+
219+
#define JMP_SRSO_UNTRAIN_RET "jmp srso_untrain_ret"
220+
#define JMP_SRSO_ALIAS_UNTRAIN_RET "jmp srso_alias_untrain_ret"
221+
#else /* !CONFIG_CPU_SRSO */
222+
#define JMP_SRSO_UNTRAIN_RET "ud2"
223+
#define JMP_SRSO_ALIAS_UNTRAIN_RET "ud2"
224+
#endif /* CONFIG_CPU_SRSO */
225+
226+
#ifdef CONFIG_CPU_UNRET_ENTRY
185227

186-
.section .text..__x86.return_thunk
187228
/*
188229
* Some generic notes on the untraining sequences:
189230
*
@@ -263,64 +304,21 @@ SYM_CODE_END(retbleed_return_thunk)
263304
int3
264305
SYM_FUNC_END(retbleed_untrain_ret)
265306

266-
/*
267-
* SRSO untraining sequence for Zen1/2, similar to retbleed_untrain_ret()
268-
* above. On kernel entry, srso_untrain_ret() is executed which is a
269-
*
270-
* movabs $0xccccc30824648d48,%rax
271-
*
272-
* and when the return thunk executes the inner label srso_safe_ret()
273-
* later, it is a stack manipulation and a RET which is mispredicted and
274-
* thus a "safe" one to use.
275-
*/
276-
.align 64
277-
.skip 64 - (srso_safe_ret - srso_untrain_ret), 0xcc
278-
SYM_START(srso_untrain_ret, SYM_L_LOCAL, SYM_A_NONE)
279-
ANNOTATE_NOENDBR
280-
.byte 0x48, 0xb8
307+
#define JMP_RETBLEED_UNTRAIN_RET "jmp retbleed_untrain_ret"
308+
#else /* !CONFIG_CPU_UNRET_ENTRY */
309+
#define JMP_RETBLEED_UNTRAIN_RET "ud2"
310+
#endif /* CONFIG_CPU_UNRET_ENTRY */
281311

282-
/*
283-
* This forces the function return instruction to speculate into a trap
284-
* (UD2 in srso_return_thunk() below). This RET will then mispredict
285-
* and execution will continue at the return site read from the top of
286-
* the stack.
287-
*/
288-
SYM_INNER_LABEL(srso_safe_ret, SYM_L_GLOBAL)
289-
lea 8(%_ASM_SP), %_ASM_SP
290-
ret
291-
int3
292-
int3
293-
/* end of movabs */
294-
lfence
295-
call srso_safe_ret
296-
ud2
297-
SYM_CODE_END(srso_safe_ret)
298-
SYM_FUNC_END(srso_untrain_ret)
299-
300-
SYM_CODE_START(srso_return_thunk)
301-
UNWIND_HINT_FUNC
302-
ANNOTATE_NOENDBR
303-
call srso_safe_ret
304-
ud2
305-
SYM_CODE_END(srso_return_thunk)
312+
#if defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_SRSO)
306313

307314
SYM_FUNC_START(entry_untrain_ret)
308-
ALTERNATIVE_2 "jmp retbleed_untrain_ret", \
309-
"jmp srso_untrain_ret", X86_FEATURE_SRSO, \
310-
"jmp srso_alias_untrain_ret", X86_FEATURE_SRSO_ALIAS
315+
ALTERNATIVE_2 JMP_RETBLEED_UNTRAIN_RET, \
316+
JMP_SRSO_UNTRAIN_RET, X86_FEATURE_SRSO, \
317+
JMP_SRSO_ALIAS_UNTRAIN_RET, X86_FEATURE_SRSO_ALIAS
311318
SYM_FUNC_END(entry_untrain_ret)
312319
__EXPORT_THUNK(entry_untrain_ret)
313320

314-
SYM_CODE_START(__x86_return_thunk)
315-
UNWIND_HINT_FUNC
316-
ANNOTATE_NOENDBR
317-
ANNOTATE_UNRET_SAFE
318-
ret
319-
int3
320-
SYM_CODE_END(__x86_return_thunk)
321-
EXPORT_SYMBOL(__x86_return_thunk)
322-
323-
#endif /* CONFIG_RETHUNK */
321+
#endif /* CONFIG_CPU_UNRET_ENTRY || CONFIG_CPU_SRSO */
324322

325323
#ifdef CONFIG_CALL_DEPTH_TRACKING
326324

@@ -355,3 +353,22 @@ SYM_FUNC_START(__x86_return_skl)
355353
SYM_FUNC_END(__x86_return_skl)
356354

357355
#endif /* CONFIG_CALL_DEPTH_TRACKING */
356+
357+
/*
358+
* This function name is magical and is used by -mfunction-return=thunk-extern
359+
* for the compiler to generate JMPs to it.
360+
*
361+
* This code is only used during kernel boot or module init. All
362+
* 'JMP __x86_return_thunk' sites are changed to something else by
363+
* apply_returns().
364+
*/
365+
SYM_CODE_START(__x86_return_thunk)
366+
UNWIND_HINT_FUNC
367+
ANNOTATE_NOENDBR
368+
ANNOTATE_UNRET_SAFE
369+
ret
370+
int3
371+
SYM_CODE_END(__x86_return_thunk)
372+
EXPORT_SYMBOL(__x86_return_thunk)
373+
374+
#endif /* CONFIG_RETHUNK */

0 commit comments

Comments
 (0)