@@ -126,12 +126,13 @@ SYM_CODE_END(__x86_indirect_jump_thunk_array)
126126#include <asm/GEN-for-each-reg.h>
127127#undef GEN
128128#endif
129- /*
130- * This function name is magical and is used by -mfunction-return=thunk-extern
131- * for the compiler to generate JMPs to it.
132- */
129+
133130#ifdef CONFIG_RETHUNK
134131
132+ .section .text ..__x86.return_thunk
133+
134+ #ifdef CONFIG_CPU_SRSO
135+
135136/*
136137 * srso_alias_untrain_ret() and srso_alias_safe_ret() are placed at
137138 * special addresses:
@@ -147,27 +148,17 @@ SYM_CODE_END(__x86_indirect_jump_thunk_array)
147148 *
148149 * As a result, srso_alias_safe_ret() becomes a safe return.
149150 */
150- #ifdef CONFIG_CPU_SRSO
151- .section .text ..__x86.rethunk_untrain
152-
151+ .pushsection .text ..__x86.rethunk_untrain
153152SYM_START(srso_alias_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
154153 UNWIND_HINT_FUNC
155154 ANNOTATE_NOENDBR
156155 ASM_NOP2
157156 lfence
158157 jmp srso_alias_return_thunk
159158SYM_FUNC_END(srso_alias_untrain_ret)
159+ .popsection
160160
161- .section .text ..__x86.rethunk_safe
162- #else
163- /* dummy definition for alternatives */
164- SYM_START(srso_alias_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
165- ANNOTATE_UNRET_SAFE
166- ret
167- int3
168- SYM_FUNC_END(srso_alias_untrain_ret)
169- #endif
170-
161+ .pushsection .text ..__x86.rethunk_safe
171162SYM_START(srso_alias_safe_ret, SYM_L_GLOBAL, SYM_A_NONE)
172163 lea 8 (%_ASM_SP), %_ASM_SP
173164 UNWIND_HINT_FUNC
@@ -182,8 +173,58 @@ SYM_CODE_START_NOALIGN(srso_alias_return_thunk)
182173 call srso_alias_safe_ret
183174 ud2
184175SYM_CODE_END(srso_alias_return_thunk)
176+ .popsection
177+
178+ /*
179+ * SRSO untraining sequence for Zen1/2, similar to retbleed_untrain_ret()
180+ * above. On kernel entry, srso_untrain_ret() is executed which is a
181+ *
182+ * movabs $0xccccc30824648d48,%rax
183+ *
184+ * and when the return thunk executes the inner label srso_safe_ret()
185+ * later, it is a stack manipulation and a RET which is mispredicted and
186+ * thus a "safe" one to use.
187+ */
188+ .align 64
189+ .skip 64 - (srso_safe_ret - srso_untrain_ret), 0xcc
190+ SYM_START(srso_untrain_ret, SYM_L_LOCAL , SYM_A_NONE)
191+ ANNOTATE_NOENDBR
192+ .byte 0x48 , 0xb8
193+
194+ /*
195+ * This forces the function return instruction to speculate into a trap
196+ * (UD2 in srso_return_thunk() below). This RET will then mispredict
197+ * and execution will continue at the return site read from the top of
198+ * the stack.
199+ */
200+ SYM_INNER_LABEL(srso_safe_ret, SYM_L_GLOBAL)
201+ lea 8 (%_ASM_SP), %_ASM_SP
202+ ret
203+ int3
204+ int3
205+ /* end of movabs */
206+ lfence
207+ call srso_safe_ret
208+ ud2
209+ SYM_CODE_END(srso_safe_ret)
210+ SYM_FUNC_END(srso_untrain_ret)
211+
212+ SYM_CODE_START(srso_return_thunk)
213+ UNWIND_HINT_FUNC
214+ ANNOTATE_NOENDBR
215+ call srso_safe_ret
216+ ud2
217+ SYM_CODE_END(srso_return_thunk)
218+
219+ #define JMP_SRSO_UNTRAIN_RET "jmp srso_untrain_ret"
220+ #define JMP_SRSO_ALIAS_UNTRAIN_RET "jmp srso_alias_untrain_ret"
221+ #else /* !CONFIG_CPU_SRSO */
222+ #define JMP_SRSO_UNTRAIN_RET "ud2"
223+ #define JMP_SRSO_ALIAS_UNTRAIN_RET "ud2"
224+ #endif /* CONFIG_CPU_SRSO */
225+
226+ #ifdef CONFIG_CPU_UNRET_ENTRY
185227
186- .section .text ..__x86.return_thunk
187228/*
188229 * Some generic notes on the untraining sequences:
189230 *
@@ -263,64 +304,21 @@ SYM_CODE_END(retbleed_return_thunk)
263304 int3
264305SYM_FUNC_END(retbleed_untrain_ret)
265306
266- /*
267- * SRSO untraining sequence for Zen1/2, similar to retbleed_untrain_ret()
268- * above. On kernel entry, srso_untrain_ret() is executed which is a
269- *
270- * movabs $0xccccc30824648d48,%rax
271- *
272- * and when the return thunk executes the inner label srso_safe_ret()
273- * later, it is a stack manipulation and a RET which is mispredicted and
274- * thus a "safe" one to use.
275- */
276- .align 64
277- .skip 64 - (srso_safe_ret - srso_untrain_ret), 0xcc
278- SYM_START(srso_untrain_ret, SYM_L_LOCAL , SYM_A_NONE)
279- ANNOTATE_NOENDBR
280- .byte 0x48 , 0xb8
307+ #define JMP_RETBLEED_UNTRAIN_RET "jmp retbleed_untrain_ret"
308+ #else /* !CONFIG_CPU_UNRET_ENTRY */
309+ #define JMP_RETBLEED_UNTRAIN_RET "ud2"
310+ #endif /* CONFIG_CPU_UNRET_ENTRY */
281311
282- /*
283- * This forces the function return instruction to speculate into a trap
284- * (UD2 in srso_return_thunk() below). This RET will then mispredict
285- * and execution will continue at the return site read from the top of
286- * the stack.
287- */
288- SYM_INNER_LABEL(srso_safe_ret, SYM_L_GLOBAL)
289- lea 8 (%_ASM_SP), %_ASM_SP
290- ret
291- int3
292- int3
293- /* end of movabs */
294- lfence
295- call srso_safe_ret
296- ud2
297- SYM_CODE_END(srso_safe_ret)
298- SYM_FUNC_END(srso_untrain_ret)
299-
300- SYM_CODE_START(srso_return_thunk)
301- UNWIND_HINT_FUNC
302- ANNOTATE_NOENDBR
303- call srso_safe_ret
304- ud2
305- SYM_CODE_END(srso_return_thunk)
312+ #if defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_SRSO)
306313
307314SYM_FUNC_START(entry_untrain_ret)
308- ALTERNATIVE_2 "jmp retbleed_untrain_ret" , \
309- "jmp srso_untrain_ret" , X86_FEATURE_SRSO, \
310- "jmp srso_alias_untrain_ret" , X86_FEATURE_SRSO_ALIAS
315+ ALTERNATIVE_2 JMP_RETBLEED_UNTRAIN_RET, \
316+ JMP_SRSO_UNTRAIN_RET , X86_FEATURE_SRSO, \
317+ JMP_SRSO_ALIAS_UNTRAIN_RET , X86_FEATURE_SRSO_ALIAS
311318SYM_FUNC_END(entry_untrain_ret)
312319__EXPORT_THUNK(entry_untrain_ret)
313320
314- SYM_CODE_START(__x86_return_thunk)
315- UNWIND_HINT_FUNC
316- ANNOTATE_NOENDBR
317- ANNOTATE_UNRET_SAFE
318- ret
319- int3
320- SYM_CODE_END(__x86_return_thunk)
321- EXPORT_SYMBOL(__x86_return_thunk)
322-
323- #endif /* CONFIG_RETHUNK */
321+ #endif /* CONFIG_CPU_UNRET_ENTRY || CONFIG_CPU_SRSO */
324322
325323#ifdef CONFIG_CALL_DEPTH_TRACKING
326324
@@ -355,3 +353,22 @@ SYM_FUNC_START(__x86_return_skl)
355353SYM_FUNC_END(__x86_return_skl)
356354
357355#endif /* CONFIG_CALL_DEPTH_TRACKING */
356+
357+ /*
358+ * This function name is magical and is used by -mfunction-return=thunk-extern
359+ * for the compiler to generate JMPs to it.
360+ *
361+ * This code is only used during kernel boot or module init. All
362+ * 'JMP __x86_return_thunk' sites are changed to something else by
363+ * apply_returns().
364+ */
365+ SYM_CODE_START(__x86_return_thunk)
366+ UNWIND_HINT_FUNC
367+ ANNOTATE_NOENDBR
368+ ANNOTATE_UNRET_SAFE
369+ ret
370+ int3
371+ SYM_CODE_END(__x86_return_thunk)
372+ EXPORT_SYMBOL(__x86_return_thunk)
373+
374+ #endif /* CONFIG_RETHUNK */
0 commit comments