xref: /openbmc/linux/arch/x86/lib/retpoline.S (revision 63bd0862)
176b04384SDavid Woodhouse/* SPDX-License-Identifier: GPL-2.0 */
276b04384SDavid Woodhouse
376b04384SDavid Woodhouse#include <linux/stringify.h>
476b04384SDavid Woodhouse#include <linux/linkage.h>
576b04384SDavid Woodhouse#include <asm/dwarf2.h>
676b04384SDavid Woodhouse#include <asm/cpufeatures.h>
75e21a3ecSJuergen Gross#include <asm/alternative.h>
85d821386SThomas Gleixner#include <asm/asm-offsets.h>
976b04384SDavid Woodhouse#include <asm/export.h>
1076b04384SDavid Woodhouse#include <asm/nospec-branch.h>
11cc1ac9c7SPeter Zijlstra#include <asm/unwind_hints.h>
125d821386SThomas Gleixner#include <asm/percpu.h>
13cc1ac9c7SPeter Zijlstra#include <asm/frame.h>
14fb3bd914SBorislav Petkov (AMD)#include <asm/nops.h>
1576b04384SDavid Woodhouse
1679cd2a11SPetr Pavlu	.section .text..__x86.indirect_thunk
179bc0bb50SPeter Zijlstra
183b6c1747SPeter Zijlstra
193b6c1747SPeter Zijlstra.macro POLINE reg
20cc1ac9c7SPeter Zijlstra	ANNOTATE_INTRA_FUNCTION_CALL
21cc1ac9c7SPeter Zijlstra	call    .Ldo_rop_\@
223b6c1747SPeter Zijlstra	int3
23cc1ac9c7SPeter Zijlstra.Ldo_rop_\@:
24cc1ac9c7SPeter Zijlstra	mov     %\reg, (%_ASM_SP)
25b735bd3eSJosh Poimboeuf	UNWIND_HINT_FUNC
263b6c1747SPeter Zijlstra.endm
273b6c1747SPeter Zijlstra
283b6c1747SPeter Zijlstra.macro RETPOLINE reg
293b6c1747SPeter Zijlstra	POLINE \reg
30f94909ceSPeter Zijlstra	RET
3111925185SPeter Zijlstra.endm
3211925185SPeter Zijlstra
3311925185SPeter Zijlstra.macro THUNK reg
3411925185SPeter Zijlstra
351a6f7442SPeter Zijlstra	.align RETPOLINE_THUNK_SIZE
361a6f7442SPeter ZijlstraSYM_INNER_LABEL(__x86_indirect_thunk_\reg, SYM_L_GLOBAL)
37fb799447SJosh Poimboeuf	UNWIND_HINT_UNDEFINED
381c0513deSJosh Poimboeuf	ANNOTATE_NOENDBR
3911925185SPeter Zijlstra
4000e15333SPeter Zijlstra	ALTERNATIVE_2 __stringify(RETPOLINE \reg), \
4100e15333SPeter Zijlstra		      __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; jmp *%\reg; int3), X86_FEATURE_RETPOLINE_LFENCE, \
4200e15333SPeter Zijlstra		      __stringify(ANNOTATE_RETPOLINE_SAFE; jmp *%\reg), ALT_NOT(X86_FEATURE_RETPOLINE)
4311925185SPeter Zijlstra
4476b04384SDavid Woodhouse.endm
4576b04384SDavid Woodhouse
4676b04384SDavid Woodhouse/*
4776b04384SDavid Woodhouse * Despite being an assembler file we can't just use .irp here
4876b04384SDavid Woodhouse * because __KSYM_DEPS__ only uses the C preprocessor and would
4976b04384SDavid Woodhouse * only see one instance of "__x86_indirect_thunk_\reg" rather
5076b04384SDavid Woodhouse * than one per register with the correct names. So we do it
5176b04384SDavid Woodhouse * the simple and nasty way...
52ca3f0d80SPeter Zijlstra *
53ca3f0d80SPeter Zijlstra * Worse, you can only have a single EXPORT_SYMBOL per line,
54ca3f0d80SPeter Zijlstra * and CPP can't insert newlines, so we have to repeat everything
55ca3f0d80SPeter Zijlstra * at least twice.
5676b04384SDavid Woodhouse */
57ca3f0d80SPeter Zijlstra
58c1804a23SMasami Hiramatsu#define __EXPORT_THUNK(sym)	_ASM_NOKPROBE(sym); EXPORT_SYMBOL(sym)
5976b04384SDavid Woodhouse
601a6f7442SPeter Zijlstra	.align RETPOLINE_THUNK_SIZE
611a6f7442SPeter ZijlstraSYM_CODE_START(__x86_indirect_thunk_array)
621a6f7442SPeter Zijlstra
63ca3f0d80SPeter Zijlstra#define GEN(reg) THUNK reg
64ca3f0d80SPeter Zijlstra#include <asm/GEN-for-each-reg.h>
65ca3f0d80SPeter Zijlstra#undef GEN
66b6d3d994SPeter Zijlstra
671a6f7442SPeter Zijlstra	.align RETPOLINE_THUNK_SIZE
681a6f7442SPeter ZijlstraSYM_CODE_END(__x86_indirect_thunk_array)
691a6f7442SPeter Zijlstra
703b6c1747SPeter Zijlstra#define GEN(reg) __EXPORT_THUNK(__x86_indirect_thunk_ ## reg)
71ca3f0d80SPeter Zijlstra#include <asm/GEN-for-each-reg.h>
72b6d3d994SPeter Zijlstra#undef GEN
730b53c374SPeter Zijlstra
743b6c1747SPeter Zijlstra#ifdef CONFIG_CALL_DEPTH_TRACKING
753b6c1747SPeter Zijlstra.macro CALL_THUNK reg
763b6c1747SPeter Zijlstra	.align RETPOLINE_THUNK_SIZE
773b6c1747SPeter Zijlstra
783b6c1747SPeter ZijlstraSYM_INNER_LABEL(__x86_indirect_call_thunk_\reg, SYM_L_GLOBAL)
79fb799447SJosh Poimboeuf	UNWIND_HINT_UNDEFINED
803b6c1747SPeter Zijlstra	ANNOTATE_NOENDBR
813b6c1747SPeter Zijlstra
823b6c1747SPeter Zijlstra	CALL_DEPTH_ACCOUNT
833b6c1747SPeter Zijlstra	POLINE \reg
843b6c1747SPeter Zijlstra	ANNOTATE_UNRET_SAFE
853b6c1747SPeter Zijlstra	ret
863b6c1747SPeter Zijlstra	int3
873b6c1747SPeter Zijlstra.endm
883b6c1747SPeter Zijlstra
893b6c1747SPeter Zijlstra	.align RETPOLINE_THUNK_SIZE
903b6c1747SPeter ZijlstraSYM_CODE_START(__x86_indirect_call_thunk_array)
913b6c1747SPeter Zijlstra
923b6c1747SPeter Zijlstra#define GEN(reg) CALL_THUNK reg
933b6c1747SPeter Zijlstra#include <asm/GEN-for-each-reg.h>
943b6c1747SPeter Zijlstra#undef GEN
953b6c1747SPeter Zijlstra
963b6c1747SPeter Zijlstra	.align RETPOLINE_THUNK_SIZE
973b6c1747SPeter ZijlstraSYM_CODE_END(__x86_indirect_call_thunk_array)
983b6c1747SPeter Zijlstra
993b6c1747SPeter Zijlstra#define GEN(reg) __EXPORT_THUNK(__x86_indirect_call_thunk_ ## reg)
1003b6c1747SPeter Zijlstra#include <asm/GEN-for-each-reg.h>
1013b6c1747SPeter Zijlstra#undef GEN
1023b6c1747SPeter Zijlstra
1033b6c1747SPeter Zijlstra.macro JUMP_THUNK reg
1043b6c1747SPeter Zijlstra	.align RETPOLINE_THUNK_SIZE
1053b6c1747SPeter Zijlstra
1063b6c1747SPeter ZijlstraSYM_INNER_LABEL(__x86_indirect_jump_thunk_\reg, SYM_L_GLOBAL)
107fb799447SJosh Poimboeuf	UNWIND_HINT_UNDEFINED
1083b6c1747SPeter Zijlstra	ANNOTATE_NOENDBR
1093b6c1747SPeter Zijlstra	POLINE \reg
1103b6c1747SPeter Zijlstra	ANNOTATE_UNRET_SAFE
1113b6c1747SPeter Zijlstra	ret
1123b6c1747SPeter Zijlstra	int3
1133b6c1747SPeter Zijlstra.endm
1143b6c1747SPeter Zijlstra
1153b6c1747SPeter Zijlstra	.align RETPOLINE_THUNK_SIZE
1163b6c1747SPeter ZijlstraSYM_CODE_START(__x86_indirect_jump_thunk_array)
1173b6c1747SPeter Zijlstra
1183b6c1747SPeter Zijlstra#define GEN(reg) JUMP_THUNK reg
1193b6c1747SPeter Zijlstra#include <asm/GEN-for-each-reg.h>
1203b6c1747SPeter Zijlstra#undef GEN
1213b6c1747SPeter Zijlstra
1223b6c1747SPeter Zijlstra	.align RETPOLINE_THUNK_SIZE
1233b6c1747SPeter ZijlstraSYM_CODE_END(__x86_indirect_jump_thunk_array)
1243b6c1747SPeter Zijlstra
1253b6c1747SPeter Zijlstra#define GEN(reg) __EXPORT_THUNK(__x86_indirect_jump_thunk_ ## reg)
1263b6c1747SPeter Zijlstra#include <asm/GEN-for-each-reg.h>
1273b6c1747SPeter Zijlstra#undef GEN
1283b6c1747SPeter Zijlstra#endif
129820a3626SJosh Poimboeuf
130f43b9876SPeter Zijlstra#ifdef CONFIG_RETHUNK
131f43b9876SPeter Zijlstra
132820a3626SJosh Poimboeuf	.section .text..__x86.return_thunk
133820a3626SJosh Poimboeuf
134820a3626SJosh Poimboeuf#ifdef CONFIG_CPU_SRSO
135820a3626SJosh Poimboeuf
136fb3bd914SBorislav Petkov (AMD)/*
13742be649dSPeter Zijlstra * srso_alias_untrain_ret() and srso_alias_safe_ret() are placed at
138fb3bd914SBorislav Petkov (AMD) * special addresses:
139fb3bd914SBorislav Petkov (AMD) *
14042be649dSPeter Zijlstra * - srso_alias_untrain_ret() is 2M aligned
14142be649dSPeter Zijlstra * - srso_alias_safe_ret() is also in the same 2M page but bits 2, 8, 14
142fb3bd914SBorislav Petkov (AMD) * and 20 in its virtual address are set (while those bits in the
14342be649dSPeter Zijlstra * srso_alias_untrain_ret() function are cleared).
144fb3bd914SBorislav Petkov (AMD) *
145fb3bd914SBorislav Petkov (AMD) * This guarantees that those two addresses will alias in the branch
146fb3bd914SBorislav Petkov (AMD) * target buffer of Zen3/4 generations, leading to any potential
147fb3bd914SBorislav Petkov (AMD) * poisoned entries at that BTB slot to get evicted.
148fb3bd914SBorislav Petkov (AMD) *
14942be649dSPeter Zijlstra * As a result, srso_alias_safe_ret() becomes a safe return.
150fb3bd914SBorislav Petkov (AMD) */
151820a3626SJosh Poimboeuf	.pushsection .text..__x86.rethunk_untrain
15242be649dSPeter ZijlstraSYM_START(srso_alias_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
153d43490d0SPeter Zijlstra	UNWIND_HINT_FUNC
1543bbbe97aSBorislav Petkov (AMD)	ANNOTATE_NOENDBR
155fb3bd914SBorislav Petkov (AMD)	ASM_NOP2
156fb3bd914SBorislav Petkov (AMD)	lfence
157d43490d0SPeter Zijlstra	jmp srso_alias_return_thunk
15842be649dSPeter ZijlstraSYM_FUNC_END(srso_alias_untrain_ret)
15942be649dSPeter Zijlstra__EXPORT_THUNK(srso_alias_untrain_ret)
160820a3626SJosh Poimboeuf	.popsection
161fb3bd914SBorislav Petkov (AMD)
162820a3626SJosh Poimboeuf	.pushsection .text..__x86.rethunk_safe
16342be649dSPeter ZijlstraSYM_START(srso_alias_safe_ret, SYM_L_GLOBAL, SYM_A_NONE)
164ba5ca5e5SSean Christopherson	lea 8(%_ASM_SP), %_ASM_SP
165fb3bd914SBorislav Petkov (AMD)	UNWIND_HINT_FUNC
166fb3bd914SBorislav Petkov (AMD)	ANNOTATE_UNRET_SAFE
167fb3bd914SBorislav Petkov (AMD)	ret
168fb3bd914SBorislav Petkov (AMD)	int3
16942be649dSPeter ZijlstraSYM_FUNC_END(srso_alias_safe_ret)
170fb3bd914SBorislav Petkov (AMD)
1716b10edf9SJosh PoimboeufSYM_CODE_START_NOALIGN(srso_alias_return_thunk)
172d43490d0SPeter Zijlstra	UNWIND_HINT_FUNC
173d43490d0SPeter Zijlstra	ANNOTATE_NOENDBR
17442be649dSPeter Zijlstra	call srso_alias_safe_ret
175d43490d0SPeter Zijlstra	ud2
176d43490d0SPeter ZijlstraSYM_CODE_END(srso_alias_return_thunk)
177820a3626SJosh Poimboeuf	.popsection
178d43490d0SPeter Zijlstra
179820a3626SJosh Poimboeuf/*
180820a3626SJosh Poimboeuf * SRSO untraining sequence for Zen1/2, similar to retbleed_untrain_ret()
181820a3626SJosh Poimboeuf * above. On kernel entry, srso_untrain_ret() is executed which is a
182820a3626SJosh Poimboeuf *
183820a3626SJosh Poimboeuf * movabs $0xccccc30824648d48,%rax
184820a3626SJosh Poimboeuf *
185820a3626SJosh Poimboeuf * and when the return thunk executes the inner label srso_safe_ret()
186820a3626SJosh Poimboeuf * later, it is a stack manipulation and a RET which is mispredicted and
187820a3626SJosh Poimboeuf * thus a "safe" one to use.
188820a3626SJosh Poimboeuf */
189820a3626SJosh Poimboeuf	.align 64
190820a3626SJosh Poimboeuf	.skip 64 - (srso_safe_ret - srso_untrain_ret), 0xcc
191820a3626SJosh PoimboeufSYM_START(srso_untrain_ret, SYM_L_LOCAL, SYM_A_NONE)
192820a3626SJosh Poimboeuf	ANNOTATE_NOENDBR
193820a3626SJosh Poimboeuf	.byte 0x48, 0xb8
194820a3626SJosh Poimboeuf
195820a3626SJosh Poimboeuf/*
196820a3626SJosh Poimboeuf * This forces the function return instruction to speculate into a trap
197820a3626SJosh Poimboeuf * (UD2 in srso_return_thunk() below).  This RET will then mispredict
198820a3626SJosh Poimboeuf * and execution will continue at the return site read from the top of
199820a3626SJosh Poimboeuf * the stack.
200820a3626SJosh Poimboeuf */
201820a3626SJosh PoimboeufSYM_INNER_LABEL(srso_safe_ret, SYM_L_GLOBAL)
202820a3626SJosh Poimboeuf	lea 8(%_ASM_SP), %_ASM_SP
203820a3626SJosh Poimboeuf	ret
204820a3626SJosh Poimboeuf	int3
205820a3626SJosh Poimboeuf	int3
206820a3626SJosh Poimboeuf	/* end of movabs */
207820a3626SJosh Poimboeuf	lfence
208820a3626SJosh Poimboeuf	call srso_safe_ret
209820a3626SJosh Poimboeuf	ud2
210820a3626SJosh PoimboeufSYM_CODE_END(srso_safe_ret)
211820a3626SJosh PoimboeufSYM_FUNC_END(srso_untrain_ret)
212820a3626SJosh Poimboeuf
213820a3626SJosh PoimboeufSYM_CODE_START(srso_return_thunk)
214820a3626SJosh Poimboeuf	UNWIND_HINT_FUNC
215820a3626SJosh Poimboeuf	ANNOTATE_NOENDBR
216820a3626SJosh Poimboeuf	call srso_safe_ret
217820a3626SJosh Poimboeuf	ud2
218820a3626SJosh PoimboeufSYM_CODE_END(srso_return_thunk)
219820a3626SJosh Poimboeuf
220820a3626SJosh Poimboeuf#define JMP_SRSO_UNTRAIN_RET "jmp srso_untrain_ret"
221820a3626SJosh Poimboeuf#else /* !CONFIG_CPU_SRSO */
222820a3626SJosh Poimboeuf#define JMP_SRSO_UNTRAIN_RET "ud2"
223e40f32f1SBorislav Petkov (AMD)/* Dummy for the alternative in CALL_UNTRAIN_RET. */
224e40f32f1SBorislav Petkov (AMD)SYM_CODE_START(srso_alias_untrain_ret)
2253ec21104SBorislav Petkov (AMD)	ANNOTATE_UNRET_SAFE
226*63bd0862SBorislav Petkov (AMD)	ANNOTATE_NOENDBR
2273ec21104SBorislav Petkov (AMD)	ret
2283ec21104SBorislav Petkov (AMD)	int3
229e40f32f1SBorislav Petkov (AMD)SYM_FUNC_END(srso_alias_untrain_ret)
2303ec21104SBorislav Petkov (AMD)__EXPORT_THUNK(srso_alias_untrain_ret)
231820a3626SJosh Poimboeuf#endif /* CONFIG_CPU_SRSO */
232820a3626SJosh Poimboeuf
233820a3626SJosh Poimboeuf#ifdef CONFIG_CPU_UNRET_ENTRY
234820a3626SJosh Poimboeuf
235a149180fSPeter Zijlstra/*
2369dbd23e4SBorislav Petkov (AMD) * Some generic notes on the untraining sequences:
2379dbd23e4SBorislav Petkov (AMD) *
2389dbd23e4SBorislav Petkov (AMD) * They are interchangeable when it comes to flushing potentially wrong
2399dbd23e4SBorislav Petkov (AMD) * RET predictions from the BTB.
2409dbd23e4SBorislav Petkov (AMD) *
2419dbd23e4SBorislav Petkov (AMD) * The SRSO Zen1/2 (MOVABS) untraining sequence is longer than the
2429dbd23e4SBorislav Petkov (AMD) * Retbleed sequence because the return sequence done there
2439dbd23e4SBorislav Petkov (AMD) * (srso_safe_ret()) is longer and the return sequence must fully nest
2449dbd23e4SBorislav Petkov (AMD) * (end before) the untraining sequence. Therefore, the untraining
2459dbd23e4SBorislav Petkov (AMD) * sequence must fully overlap the return sequence.
2469dbd23e4SBorislav Petkov (AMD) *
2479dbd23e4SBorislav Petkov (AMD) * Regarding alignment - the instructions which need to be untrained,
2489dbd23e4SBorislav Petkov (AMD) * must all start at a cacheline boundary for Zen1/2 generations. That
2499dbd23e4SBorislav Petkov (AMD) * is, instruction sequences starting at srso_safe_ret() and
2509dbd23e4SBorislav Petkov (AMD) * the respective instruction sequences at retbleed_return_thunk()
2519dbd23e4SBorislav Petkov (AMD) * must start at a cacheline boundary.
2529dbd23e4SBorislav Petkov (AMD) */
2539dbd23e4SBorislav Petkov (AMD)
2549dbd23e4SBorislav Petkov (AMD)/*
255a149180fSPeter Zijlstra * Safety details here pertain to the AMD Zen{1,2} microarchitecture:
256d025b7baSPeter Zijlstra * 1) The RET at retbleed_return_thunk must be on a 64 byte boundary, for
257a149180fSPeter Zijlstra *    alignment within the BTB.
258d025b7baSPeter Zijlstra * 2) The instruction at retbleed_untrain_ret must contain, and not
259a149180fSPeter Zijlstra *    end with, the 0xc3 byte of the RET.
260a149180fSPeter Zijlstra * 3) STIBP must be enabled, or SMT disabled, to prevent the sibling thread
261a149180fSPeter Zijlstra *    from re-poisioning the BTB prediction.
262a149180fSPeter Zijlstra */
263a149180fSPeter Zijlstra	.align 64
264d025b7baSPeter Zijlstra	.skip 64 - (retbleed_return_thunk - retbleed_untrain_ret), 0xcc
265d025b7baSPeter ZijlstraSYM_START(retbleed_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
2669a48d604SBorislav Petkov (AMD)	ANNOTATE_NOENDBR
267a149180fSPeter Zijlstra	/*
268d025b7baSPeter Zijlstra	 * As executed from retbleed_untrain_ret, this is:
269a149180fSPeter Zijlstra	 *
270a149180fSPeter Zijlstra	 *   TEST $0xcc, %bl
271a149180fSPeter Zijlstra	 *   LFENCE
272d025b7baSPeter Zijlstra	 *   JMP retbleed_return_thunk
273a149180fSPeter Zijlstra	 *
274a149180fSPeter Zijlstra	 * Executing the TEST instruction has a side effect of evicting any BTB
275a149180fSPeter Zijlstra	 * prediction (potentially attacker controlled) attached to the RET, as
276d025b7baSPeter Zijlstra	 * retbleed_return_thunk + 1 isn't an instruction boundary at the moment.
277a149180fSPeter Zijlstra	 */
278a149180fSPeter Zijlstra	.byte	0xf6
279a149180fSPeter Zijlstra
280a149180fSPeter Zijlstra	/*
281d025b7baSPeter Zijlstra	 * As executed from retbleed_return_thunk, this is a plain RET.
282a149180fSPeter Zijlstra	 *
283a149180fSPeter Zijlstra	 * As part of the TEST above, RET is the ModRM byte, and INT3 the imm8.
284a149180fSPeter Zijlstra	 *
285a149180fSPeter Zijlstra	 * We subsequently jump backwards and architecturally execute the RET.
286a149180fSPeter Zijlstra	 * This creates a correct BTB prediction (type=ret), but in the
287a149180fSPeter Zijlstra	 * meantime we suffer Straight Line Speculation (because the type was
288a149180fSPeter Zijlstra	 * no branch) which is halted by the INT3.
289a149180fSPeter Zijlstra	 *
290a149180fSPeter Zijlstra	 * With SMT enabled and STIBP active, a sibling thread cannot poison
291a149180fSPeter Zijlstra	 * RET's prediction to a type of its choice, but can evict the
292a149180fSPeter Zijlstra	 * prediction due to competitive sharing. If the prediction is
293d025b7baSPeter Zijlstra	 * evicted, retbleed_return_thunk will suffer Straight Line Speculation
294a149180fSPeter Zijlstra	 * which will be contained safely by the INT3.
295a149180fSPeter Zijlstra	 */
296d025b7baSPeter ZijlstraSYM_INNER_LABEL(retbleed_return_thunk, SYM_L_GLOBAL)
2970b53c374SPeter Zijlstra	ret
2980b53c374SPeter Zijlstra	int3
299d025b7baSPeter ZijlstraSYM_CODE_END(retbleed_return_thunk)
3000b53c374SPeter Zijlstra
301a149180fSPeter Zijlstra	/*
302a149180fSPeter Zijlstra	 * Ensure the TEST decoding / BTB invalidation is complete.
303a149180fSPeter Zijlstra	 */
304a149180fSPeter Zijlstra	lfence
305a149180fSPeter Zijlstra
306a149180fSPeter Zijlstra	/*
307a149180fSPeter Zijlstra	 * Jump back and execute the RET in the middle of the TEST instruction.
308a149180fSPeter Zijlstra	 * INT3 is for SLS protection.
309a149180fSPeter Zijlstra	 */
310d025b7baSPeter Zijlstra	jmp retbleed_return_thunk
311a149180fSPeter Zijlstra	int3
312d025b7baSPeter ZijlstraSYM_FUNC_END(retbleed_untrain_ret)
313d025b7baSPeter Zijlstra__EXPORT_THUNK(retbleed_untrain_ret)
314a149180fSPeter Zijlstra
315820a3626SJosh Poimboeuf#define JMP_RETBLEED_UNTRAIN_RET "jmp retbleed_untrain_ret"
316820a3626SJosh Poimboeuf#else /* !CONFIG_CPU_UNRET_ENTRY */
317820a3626SJosh Poimboeuf#define JMP_RETBLEED_UNTRAIN_RET "ud2"
318820a3626SJosh Poimboeuf#endif /* CONFIG_CPU_UNRET_ENTRY */
319fb3bd914SBorislav Petkov (AMD)
320820a3626SJosh Poimboeuf#if defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_SRSO)
321d43490d0SPeter Zijlstra
322e7c25c44SPeter ZijlstraSYM_FUNC_START(entry_untrain_ret)
323e40f32f1SBorislav Petkov (AMD)	ALTERNATIVE JMP_RETBLEED_UNTRAIN_RET, JMP_SRSO_UNTRAIN_RET, X86_FEATURE_SRSO
324e7c25c44SPeter ZijlstraSYM_FUNC_END(entry_untrain_ret)
325e7c25c44SPeter Zijlstra__EXPORT_THUNK(entry_untrain_ret)
326e7c25c44SPeter Zijlstra
327820a3626SJosh Poimboeuf#endif /* CONFIG_CPU_UNRET_ENTRY || CONFIG_CPU_SRSO */
3285d821386SThomas Gleixner
3295d821386SThomas Gleixner#ifdef CONFIG_CALL_DEPTH_TRACKING
3305d821386SThomas Gleixner
3315d821386SThomas Gleixner	.align 64
3325d821386SThomas GleixnerSYM_FUNC_START(__x86_return_skl)
3335d821386SThomas Gleixner	ANNOTATE_NOENDBR
334f5c1bb2aSThomas Gleixner	/*
335f5c1bb2aSThomas Gleixner	 * Keep the hotpath in a 16byte I-fetch for the non-debug
336f5c1bb2aSThomas Gleixner	 * case.
337f5c1bb2aSThomas Gleixner	 */
338f5c1bb2aSThomas Gleixner	CALL_THUNKS_DEBUG_INC_RETS
3395d821386SThomas Gleixner	shlq	$5, PER_CPU_VAR(pcpu_hot + X86_call_depth)
3405d821386SThomas Gleixner	jz	1f
3415d821386SThomas Gleixner	ANNOTATE_UNRET_SAFE
3425d821386SThomas Gleixner	ret
3435d821386SThomas Gleixner	int3
3445d821386SThomas Gleixner1:
345f5c1bb2aSThomas Gleixner	CALL_THUNKS_DEBUG_INC_STUFFS
3465d821386SThomas Gleixner	.rept	16
3475d821386SThomas Gleixner	ANNOTATE_INTRA_FUNCTION_CALL
3485d821386SThomas Gleixner	call	2f
3495d821386SThomas Gleixner	int3
3505d821386SThomas Gleixner2:
3515d821386SThomas Gleixner	.endr
3525d821386SThomas Gleixner	add	$(8*16), %rsp
3535d821386SThomas Gleixner
3545d821386SThomas Gleixner	CREDIT_CALL_DEPTH
3555d821386SThomas Gleixner
3565d821386SThomas Gleixner	ANNOTATE_UNRET_SAFE
3575d821386SThomas Gleixner	ret
3585d821386SThomas Gleixner	int3
3595d821386SThomas GleixnerSYM_FUNC_END(__x86_return_skl)
3605d821386SThomas Gleixner
3615d821386SThomas Gleixner#endif /* CONFIG_CALL_DEPTH_TRACKING */
362820a3626SJosh Poimboeuf
363820a3626SJosh Poimboeuf/*
364820a3626SJosh Poimboeuf * This function name is magical and is used by -mfunction-return=thunk-extern
365820a3626SJosh Poimboeuf * for the compiler to generate JMPs to it.
366820a3626SJosh Poimboeuf *
367820a3626SJosh Poimboeuf * This code is only used during kernel boot or module init.  All
368820a3626SJosh Poimboeuf * 'JMP __x86_return_thunk' sites are changed to something else by
369820a3626SJosh Poimboeuf * apply_returns().
370820a3626SJosh Poimboeuf */
371820a3626SJosh PoimboeufSYM_CODE_START(__x86_return_thunk)
372820a3626SJosh Poimboeuf	UNWIND_HINT_FUNC
373820a3626SJosh Poimboeuf	ANNOTATE_NOENDBR
374820a3626SJosh Poimboeuf	ANNOTATE_UNRET_SAFE
375820a3626SJosh Poimboeuf	ret
376820a3626SJosh Poimboeuf	int3
377820a3626SJosh PoimboeufSYM_CODE_END(__x86_return_thunk)
378820a3626SJosh PoimboeufEXPORT_SYMBOL(__x86_return_thunk)
379820a3626SJosh Poimboeuf
380820a3626SJosh Poimboeuf#endif /* CONFIG_RETHUNK */
381