xref: /openbmc/linux/arch/x86/lib/retpoline.S (revision 9dbd23e4)
176b04384SDavid Woodhouse/* SPDX-License-Identifier: GPL-2.0 */
276b04384SDavid Woodhouse
376b04384SDavid Woodhouse#include <linux/stringify.h>
476b04384SDavid Woodhouse#include <linux/linkage.h>
576b04384SDavid Woodhouse#include <asm/dwarf2.h>
676b04384SDavid Woodhouse#include <asm/cpufeatures.h>
75e21a3ecSJuergen Gross#include <asm/alternative.h>
85d821386SThomas Gleixner#include <asm/asm-offsets.h>
976b04384SDavid Woodhouse#include <asm/export.h>
1076b04384SDavid Woodhouse#include <asm/nospec-branch.h>
11cc1ac9c7SPeter Zijlstra#include <asm/unwind_hints.h>
125d821386SThomas Gleixner#include <asm/percpu.h>
13cc1ac9c7SPeter Zijlstra#include <asm/frame.h>
14fb3bd914SBorislav Petkov (AMD)#include <asm/nops.h>
1576b04384SDavid Woodhouse
1679cd2a11SPetr Pavlu	.section .text..__x86.indirect_thunk
179bc0bb50SPeter Zijlstra
183b6c1747SPeter Zijlstra
193b6c1747SPeter Zijlstra.macro POLINE reg
20cc1ac9c7SPeter Zijlstra	ANNOTATE_INTRA_FUNCTION_CALL
21cc1ac9c7SPeter Zijlstra	call    .Ldo_rop_\@
223b6c1747SPeter Zijlstra	int3
23cc1ac9c7SPeter Zijlstra.Ldo_rop_\@:
24cc1ac9c7SPeter Zijlstra	mov     %\reg, (%_ASM_SP)
25b735bd3eSJosh Poimboeuf	UNWIND_HINT_FUNC
263b6c1747SPeter Zijlstra.endm
273b6c1747SPeter Zijlstra
283b6c1747SPeter Zijlstra.macro RETPOLINE reg
293b6c1747SPeter Zijlstra	POLINE \reg
30f94909ceSPeter Zijlstra	RET
3111925185SPeter Zijlstra.endm
3211925185SPeter Zijlstra
3311925185SPeter Zijlstra.macro THUNK reg
3411925185SPeter Zijlstra
351a6f7442SPeter Zijlstra	.align RETPOLINE_THUNK_SIZE
361a6f7442SPeter ZijlstraSYM_INNER_LABEL(__x86_indirect_thunk_\reg, SYM_L_GLOBAL)
37fb799447SJosh Poimboeuf	UNWIND_HINT_UNDEFINED
381c0513deSJosh Poimboeuf	ANNOTATE_NOENDBR
3911925185SPeter Zijlstra
4000e15333SPeter Zijlstra	ALTERNATIVE_2 __stringify(RETPOLINE \reg), \
4100e15333SPeter Zijlstra		      __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; jmp *%\reg; int3), X86_FEATURE_RETPOLINE_LFENCE, \
4200e15333SPeter Zijlstra		      __stringify(ANNOTATE_RETPOLINE_SAFE; jmp *%\reg), ALT_NOT(X86_FEATURE_RETPOLINE)
4311925185SPeter Zijlstra
4476b04384SDavid Woodhouse.endm
4576b04384SDavid Woodhouse
4676b04384SDavid Woodhouse/*
4776b04384SDavid Woodhouse * Despite being an assembler file we can't just use .irp here
4876b04384SDavid Woodhouse * because __KSYM_DEPS__ only uses the C preprocessor and would
4976b04384SDavid Woodhouse * only see one instance of "__x86_indirect_thunk_\reg" rather
5076b04384SDavid Woodhouse * than one per register with the correct names. So we do it
5176b04384SDavid Woodhouse * the simple and nasty way...
52ca3f0d80SPeter Zijlstra *
53ca3f0d80SPeter Zijlstra * Worse, you can only have a single EXPORT_SYMBOL per line,
54ca3f0d80SPeter Zijlstra * and CPP can't insert newlines, so we have to repeat everything
55ca3f0d80SPeter Zijlstra * at least twice.
5676b04384SDavid Woodhouse */
57ca3f0d80SPeter Zijlstra
58c1804a23SMasami Hiramatsu#define __EXPORT_THUNK(sym)	_ASM_NOKPROBE(sym); EXPORT_SYMBOL(sym)
5976b04384SDavid Woodhouse
601a6f7442SPeter Zijlstra	.align RETPOLINE_THUNK_SIZE
611a6f7442SPeter ZijlstraSYM_CODE_START(__x86_indirect_thunk_array)
621a6f7442SPeter Zijlstra
63ca3f0d80SPeter Zijlstra#define GEN(reg) THUNK reg
64ca3f0d80SPeter Zijlstra#include <asm/GEN-for-each-reg.h>
65ca3f0d80SPeter Zijlstra#undef GEN
66b6d3d994SPeter Zijlstra
671a6f7442SPeter Zijlstra	.align RETPOLINE_THUNK_SIZE
681a6f7442SPeter ZijlstraSYM_CODE_END(__x86_indirect_thunk_array)
691a6f7442SPeter Zijlstra
703b6c1747SPeter Zijlstra#define GEN(reg) __EXPORT_THUNK(__x86_indirect_thunk_ ## reg)
71ca3f0d80SPeter Zijlstra#include <asm/GEN-for-each-reg.h>
72b6d3d994SPeter Zijlstra#undef GEN
730b53c374SPeter Zijlstra
743b6c1747SPeter Zijlstra#ifdef CONFIG_CALL_DEPTH_TRACKING
753b6c1747SPeter Zijlstra.macro CALL_THUNK reg
763b6c1747SPeter Zijlstra	.align RETPOLINE_THUNK_SIZE
773b6c1747SPeter Zijlstra
783b6c1747SPeter ZijlstraSYM_INNER_LABEL(__x86_indirect_call_thunk_\reg, SYM_L_GLOBAL)
79fb799447SJosh Poimboeuf	UNWIND_HINT_UNDEFINED
803b6c1747SPeter Zijlstra	ANNOTATE_NOENDBR
813b6c1747SPeter Zijlstra
823b6c1747SPeter Zijlstra	CALL_DEPTH_ACCOUNT
833b6c1747SPeter Zijlstra	POLINE \reg
843b6c1747SPeter Zijlstra	ANNOTATE_UNRET_SAFE
853b6c1747SPeter Zijlstra	ret
863b6c1747SPeter Zijlstra	int3
873b6c1747SPeter Zijlstra.endm
883b6c1747SPeter Zijlstra
893b6c1747SPeter Zijlstra	.align RETPOLINE_THUNK_SIZE
903b6c1747SPeter ZijlstraSYM_CODE_START(__x86_indirect_call_thunk_array)
913b6c1747SPeter Zijlstra
923b6c1747SPeter Zijlstra#define GEN(reg) CALL_THUNK reg
933b6c1747SPeter Zijlstra#include <asm/GEN-for-each-reg.h>
943b6c1747SPeter Zijlstra#undef GEN
953b6c1747SPeter Zijlstra
963b6c1747SPeter Zijlstra	.align RETPOLINE_THUNK_SIZE
973b6c1747SPeter ZijlstraSYM_CODE_END(__x86_indirect_call_thunk_array)
983b6c1747SPeter Zijlstra
993b6c1747SPeter Zijlstra#define GEN(reg) __EXPORT_THUNK(__x86_indirect_call_thunk_ ## reg)
1003b6c1747SPeter Zijlstra#include <asm/GEN-for-each-reg.h>
1013b6c1747SPeter Zijlstra#undef GEN
1023b6c1747SPeter Zijlstra
1033b6c1747SPeter Zijlstra.macro JUMP_THUNK reg
1043b6c1747SPeter Zijlstra	.align RETPOLINE_THUNK_SIZE
1053b6c1747SPeter Zijlstra
1063b6c1747SPeter ZijlstraSYM_INNER_LABEL(__x86_indirect_jump_thunk_\reg, SYM_L_GLOBAL)
107fb799447SJosh Poimboeuf	UNWIND_HINT_UNDEFINED
1083b6c1747SPeter Zijlstra	ANNOTATE_NOENDBR
1093b6c1747SPeter Zijlstra	POLINE \reg
1103b6c1747SPeter Zijlstra	ANNOTATE_UNRET_SAFE
1113b6c1747SPeter Zijlstra	ret
1123b6c1747SPeter Zijlstra	int3
1133b6c1747SPeter Zijlstra.endm
1143b6c1747SPeter Zijlstra
1153b6c1747SPeter Zijlstra	.align RETPOLINE_THUNK_SIZE
1163b6c1747SPeter ZijlstraSYM_CODE_START(__x86_indirect_jump_thunk_array)
1173b6c1747SPeter Zijlstra
1183b6c1747SPeter Zijlstra#define GEN(reg) JUMP_THUNK reg
1193b6c1747SPeter Zijlstra#include <asm/GEN-for-each-reg.h>
1203b6c1747SPeter Zijlstra#undef GEN
1213b6c1747SPeter Zijlstra
1223b6c1747SPeter Zijlstra	.align RETPOLINE_THUNK_SIZE
1233b6c1747SPeter ZijlstraSYM_CODE_END(__x86_indirect_jump_thunk_array)
1243b6c1747SPeter Zijlstra
1253b6c1747SPeter Zijlstra#define GEN(reg) __EXPORT_THUNK(__x86_indirect_jump_thunk_ ## reg)
1263b6c1747SPeter Zijlstra#include <asm/GEN-for-each-reg.h>
1273b6c1747SPeter Zijlstra#undef GEN
1283b6c1747SPeter Zijlstra#endif
1290b53c374SPeter Zijlstra/*
1300b53c374SPeter Zijlstra * This function name is magical and is used by -mfunction-return=thunk-extern
1310b53c374SPeter Zijlstra * for the compiler to generate JMPs to it.
1320b53c374SPeter Zijlstra */
133f43b9876SPeter Zijlstra#ifdef CONFIG_RETHUNK
134f43b9876SPeter Zijlstra
135fb3bd914SBorislav Petkov (AMD)/*
13642be649dSPeter Zijlstra * srso_alias_untrain_ret() and srso_alias_safe_ret() are placed at
137fb3bd914SBorislav Petkov (AMD) * special addresses:
138fb3bd914SBorislav Petkov (AMD) *
13942be649dSPeter Zijlstra * - srso_alias_untrain_ret() is 2M aligned
14042be649dSPeter Zijlstra * - srso_alias_safe_ret() is also in the same 2M page but bits 2, 8, 14
141fb3bd914SBorislav Petkov (AMD) * and 20 in its virtual address are set (while those bits in the
14242be649dSPeter Zijlstra * srso_alias_untrain_ret() function are cleared).
143fb3bd914SBorislav Petkov (AMD) *
144fb3bd914SBorislav Petkov (AMD) * This guarantees that those two addresses will alias in the branch
145fb3bd914SBorislav Petkov (AMD) * target buffer of Zen3/4 generations, leading to any potential
146fb3bd914SBorislav Petkov (AMD) * poisoned entries at that BTB slot to get evicted.
147fb3bd914SBorislav Petkov (AMD) *
14842be649dSPeter Zijlstra * As a result, srso_alias_safe_ret() becomes a safe return.
149fb3bd914SBorislav Petkov (AMD) */
150fb3bd914SBorislav Petkov (AMD)#ifdef CONFIG_CPU_SRSO
15179cd2a11SPetr Pavlu	.section .text..__x86.rethunk_untrain
152fb3bd914SBorislav Petkov (AMD)
15342be649dSPeter ZijlstraSYM_START(srso_alias_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
154d43490d0SPeter Zijlstra	UNWIND_HINT_FUNC
1553bbbe97aSBorislav Petkov (AMD)	ANNOTATE_NOENDBR
156fb3bd914SBorislav Petkov (AMD)	ASM_NOP2
157fb3bd914SBorislav Petkov (AMD)	lfence
158d43490d0SPeter Zijlstra	jmp srso_alias_return_thunk
15942be649dSPeter ZijlstraSYM_FUNC_END(srso_alias_untrain_ret)
16042be649dSPeter Zijlstra__EXPORT_THUNK(srso_alias_untrain_ret)
161fb3bd914SBorislav Petkov (AMD)
16279cd2a11SPetr Pavlu	.section .text..__x86.rethunk_safe
163d43490d0SPeter Zijlstra#else
164d43490d0SPeter Zijlstra/* dummy definition for alternatives */
16542be649dSPeter ZijlstraSYM_START(srso_alias_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
166d43490d0SPeter Zijlstra	ANNOTATE_UNRET_SAFE
167d43490d0SPeter Zijlstra	ret
168d43490d0SPeter Zijlstra	int3
16942be649dSPeter ZijlstraSYM_FUNC_END(srso_alias_untrain_ret)
170fb3bd914SBorislav Petkov (AMD)#endif
171fb3bd914SBorislav Petkov (AMD)
17242be649dSPeter ZijlstraSYM_START(srso_alias_safe_ret, SYM_L_GLOBAL, SYM_A_NONE)
173ba5ca5e5SSean Christopherson	lea 8(%_ASM_SP), %_ASM_SP
174fb3bd914SBorislav Petkov (AMD)	UNWIND_HINT_FUNC
175fb3bd914SBorislav Petkov (AMD)	ANNOTATE_UNRET_SAFE
176fb3bd914SBorislav Petkov (AMD)	ret
177fb3bd914SBorislav Petkov (AMD)	int3
17842be649dSPeter ZijlstraSYM_FUNC_END(srso_alias_safe_ret)
179fb3bd914SBorislav Petkov (AMD)
18079cd2a11SPetr Pavlu	.section .text..__x86.return_thunk
181a149180fSPeter Zijlstra
182d43490d0SPeter ZijlstraSYM_CODE_START(srso_alias_return_thunk)
183d43490d0SPeter Zijlstra	UNWIND_HINT_FUNC
184d43490d0SPeter Zijlstra	ANNOTATE_NOENDBR
18542be649dSPeter Zijlstra	call srso_alias_safe_ret
186d43490d0SPeter Zijlstra	ud2
187d43490d0SPeter ZijlstraSYM_CODE_END(srso_alias_return_thunk)
188d43490d0SPeter Zijlstra
189a149180fSPeter Zijlstra/*
190*9dbd23e4SBorislav Petkov (AMD) * Some generic notes on the untraining sequences:
191*9dbd23e4SBorislav Petkov (AMD) *
192*9dbd23e4SBorislav Petkov (AMD) * They are interchangeable when it comes to flushing potentially wrong
193*9dbd23e4SBorislav Petkov (AMD) * RET predictions from the BTB.
194*9dbd23e4SBorislav Petkov (AMD) *
195*9dbd23e4SBorislav Petkov (AMD) * The SRSO Zen1/2 (MOVABS) untraining sequence is longer than the
196*9dbd23e4SBorislav Petkov (AMD) * Retbleed sequence because the return sequence done there
197*9dbd23e4SBorislav Petkov (AMD) * (srso_safe_ret()) is longer and the return sequence must fully nest
198*9dbd23e4SBorislav Petkov (AMD) * (end before) the untraining sequence. Therefore, the untraining
199*9dbd23e4SBorislav Petkov (AMD) * sequence must fully overlap the return sequence.
200*9dbd23e4SBorislav Petkov (AMD) *
201*9dbd23e4SBorislav Petkov (AMD) * Regarding alignment - the instructions which need to be untrained,
202*9dbd23e4SBorislav Petkov (AMD) * must all start at a cacheline boundary for Zen1/2 generations. That
203*9dbd23e4SBorislav Petkov (AMD) * is, instruction sequences starting at srso_safe_ret() and
204*9dbd23e4SBorislav Petkov (AMD) * the respective instruction sequences at retbleed_return_thunk()
205*9dbd23e4SBorislav Petkov (AMD) * must start at a cacheline boundary.
206*9dbd23e4SBorislav Petkov (AMD) */
207*9dbd23e4SBorislav Petkov (AMD)
208*9dbd23e4SBorislav Petkov (AMD)/*
209a149180fSPeter Zijlstra * Safety details here pertain to the AMD Zen{1,2} microarchitecture:
210d025b7baSPeter Zijlstra * 1) The RET at retbleed_return_thunk must be on a 64 byte boundary, for
211a149180fSPeter Zijlstra *    alignment within the BTB.
212d025b7baSPeter Zijlstra * 2) The instruction at retbleed_untrain_ret must contain, and not
213a149180fSPeter Zijlstra *    end with, the 0xc3 byte of the RET.
214a149180fSPeter Zijlstra * 3) STIBP must be enabled, or SMT disabled, to prevent the sibling thread
215a149180fSPeter Zijlstra *    from re-poisioning the BTB prediction.
216a149180fSPeter Zijlstra */
217a149180fSPeter Zijlstra	.align 64
218d025b7baSPeter Zijlstra	.skip 64 - (retbleed_return_thunk - retbleed_untrain_ret), 0xcc
219d025b7baSPeter ZijlstraSYM_START(retbleed_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
2209a48d604SBorislav Petkov (AMD)	ANNOTATE_NOENDBR
221a149180fSPeter Zijlstra	/*
222d025b7baSPeter Zijlstra	 * As executed from retbleed_untrain_ret, this is:
223a149180fSPeter Zijlstra	 *
224a149180fSPeter Zijlstra	 *   TEST $0xcc, %bl
225a149180fSPeter Zijlstra	 *   LFENCE
226d025b7baSPeter Zijlstra	 *   JMP retbleed_return_thunk
227a149180fSPeter Zijlstra	 *
228a149180fSPeter Zijlstra	 * Executing the TEST instruction has a side effect of evicting any BTB
229a149180fSPeter Zijlstra	 * prediction (potentially attacker controlled) attached to the RET, as
230d025b7baSPeter Zijlstra	 * retbleed_return_thunk + 1 isn't an instruction boundary at the moment.
231a149180fSPeter Zijlstra	 */
232a149180fSPeter Zijlstra	.byte	0xf6
233a149180fSPeter Zijlstra
234a149180fSPeter Zijlstra	/*
235d025b7baSPeter Zijlstra	 * As executed from retbleed_return_thunk, this is a plain RET.
236a149180fSPeter Zijlstra	 *
237a149180fSPeter Zijlstra	 * As part of the TEST above, RET is the ModRM byte, and INT3 the imm8.
238a149180fSPeter Zijlstra	 *
239a149180fSPeter Zijlstra	 * We subsequently jump backwards and architecturally execute the RET.
240a149180fSPeter Zijlstra	 * This creates a correct BTB prediction (type=ret), but in the
241a149180fSPeter Zijlstra	 * meantime we suffer Straight Line Speculation (because the type was
242a149180fSPeter Zijlstra	 * no branch) which is halted by the INT3.
243a149180fSPeter Zijlstra	 *
244a149180fSPeter Zijlstra	 * With SMT enabled and STIBP active, a sibling thread cannot poison
245a149180fSPeter Zijlstra	 * RET's prediction to a type of its choice, but can evict the
246a149180fSPeter Zijlstra	 * prediction due to competitive sharing. If the prediction is
247d025b7baSPeter Zijlstra	 * evicted, retbleed_return_thunk will suffer Straight Line Speculation
248a149180fSPeter Zijlstra	 * which will be contained safely by the INT3.
249a149180fSPeter Zijlstra	 */
250d025b7baSPeter ZijlstraSYM_INNER_LABEL(retbleed_return_thunk, SYM_L_GLOBAL)
2510b53c374SPeter Zijlstra	ret
2520b53c374SPeter Zijlstra	int3
253d025b7baSPeter ZijlstraSYM_CODE_END(retbleed_return_thunk)
2540b53c374SPeter Zijlstra
255a149180fSPeter Zijlstra	/*
256a149180fSPeter Zijlstra	 * Ensure the TEST decoding / BTB invalidation is complete.
257a149180fSPeter Zijlstra	 */
258a149180fSPeter Zijlstra	lfence
259a149180fSPeter Zijlstra
260a149180fSPeter Zijlstra	/*
261a149180fSPeter Zijlstra	 * Jump back and execute the RET in the middle of the TEST instruction.
262a149180fSPeter Zijlstra	 * INT3 is for SLS protection.
263a149180fSPeter Zijlstra	 */
264d025b7baSPeter Zijlstra	jmp retbleed_return_thunk
265a149180fSPeter Zijlstra	int3
266d025b7baSPeter ZijlstraSYM_FUNC_END(retbleed_untrain_ret)
267d025b7baSPeter Zijlstra__EXPORT_THUNK(retbleed_untrain_ret)
268a149180fSPeter Zijlstra
269fb3bd914SBorislav Petkov (AMD)/*
270d025b7baSPeter Zijlstra * SRSO untraining sequence for Zen1/2, similar to retbleed_untrain_ret()
271fb3bd914SBorislav Petkov (AMD) * above. On kernel entry, srso_untrain_ret() is executed which is a
272fb3bd914SBorislav Petkov (AMD) *
273ba5ca5e5SSean Christopherson * movabs $0xccccc30824648d48,%rax
274fb3bd914SBorislav Petkov (AMD) *
275fb3bd914SBorislav Petkov (AMD) * and when the return thunk executes the inner label srso_safe_ret()
276fb3bd914SBorislav Petkov (AMD) * later, it is a stack manipulation and a RET which is mispredicted and
277fb3bd914SBorislav Petkov (AMD) * thus a "safe" one to use.
278fb3bd914SBorislav Petkov (AMD) */
279fb3bd914SBorislav Petkov (AMD)	.align 64
280fb3bd914SBorislav Petkov (AMD)	.skip 64 - (srso_safe_ret - srso_untrain_ret), 0xcc
281fb3bd914SBorislav Petkov (AMD)SYM_START(srso_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
282fb3bd914SBorislav Petkov (AMD)	ANNOTATE_NOENDBR
283fb3bd914SBorislav Petkov (AMD)	.byte 0x48, 0xb8
284fb3bd914SBorislav Petkov (AMD)
285d43490d0SPeter Zijlstra/*
286d43490d0SPeter Zijlstra * This forces the function return instruction to speculate into a trap
287d43490d0SPeter Zijlstra * (UD2 in srso_return_thunk() below).  This RET will then mispredict
288d43490d0SPeter Zijlstra * and execution will continue at the return site read from the top of
289d43490d0SPeter Zijlstra * the stack.
290d43490d0SPeter Zijlstra */
291fb3bd914SBorislav Petkov (AMD)SYM_INNER_LABEL(srso_safe_ret, SYM_L_GLOBAL)
292ba5ca5e5SSean Christopherson	lea 8(%_ASM_SP), %_ASM_SP
293fb3bd914SBorislav Petkov (AMD)	ret
294fb3bd914SBorislav Petkov (AMD)	int3
295fb3bd914SBorislav Petkov (AMD)	int3
296d43490d0SPeter Zijlstra	/* end of movabs */
297fb3bd914SBorislav Petkov (AMD)	lfence
298fb3bd914SBorislav Petkov (AMD)	call srso_safe_ret
299af023ef3SPeter Zijlstra	ud2
300fb3bd914SBorislav Petkov (AMD)SYM_CODE_END(srso_safe_ret)
301fb3bd914SBorislav Petkov (AMD)SYM_FUNC_END(srso_untrain_ret)
302fb3bd914SBorislav Petkov (AMD)__EXPORT_THUNK(srso_untrain_ret)
303fb3bd914SBorislav Petkov (AMD)
304d43490d0SPeter ZijlstraSYM_CODE_START(srso_return_thunk)
305d43490d0SPeter Zijlstra	UNWIND_HINT_FUNC
306d43490d0SPeter Zijlstra	ANNOTATE_NOENDBR
307d43490d0SPeter Zijlstra	call srso_safe_ret
308d43490d0SPeter Zijlstra	ud2
309d43490d0SPeter ZijlstraSYM_CODE_END(srso_return_thunk)
310d43490d0SPeter Zijlstra
311e7c25c44SPeter ZijlstraSYM_FUNC_START(entry_untrain_ret)
312e7c25c44SPeter Zijlstra	ALTERNATIVE_2 "jmp retbleed_untrain_ret", \
313e7c25c44SPeter Zijlstra		      "jmp srso_untrain_ret", X86_FEATURE_SRSO, \
314e7c25c44SPeter Zijlstra		      "jmp srso_alias_untrain_ret", X86_FEATURE_SRSO_ALIAS
315e7c25c44SPeter ZijlstraSYM_FUNC_END(entry_untrain_ret)
316e7c25c44SPeter Zijlstra__EXPORT_THUNK(entry_untrain_ret)
317e7c25c44SPeter Zijlstra
31877f67119SPeter ZijlstraSYM_CODE_START(__x86_return_thunk)
31977f67119SPeter Zijlstra	UNWIND_HINT_FUNC
32077f67119SPeter Zijlstra	ANNOTATE_NOENDBR
321d43490d0SPeter Zijlstra	ANNOTATE_UNRET_SAFE
322d43490d0SPeter Zijlstra	ret
323d43490d0SPeter Zijlstra	int3
324fb3bd914SBorislav Petkov (AMD)SYM_CODE_END(__x86_return_thunk)
325a149180fSPeter ZijlstraEXPORT_SYMBOL(__x86_return_thunk)
326f43b9876SPeter Zijlstra
327f43b9876SPeter Zijlstra#endif /* CONFIG_RETHUNK */
3285d821386SThomas Gleixner
3295d821386SThomas Gleixner#ifdef CONFIG_CALL_DEPTH_TRACKING
3305d821386SThomas Gleixner
3315d821386SThomas Gleixner	.align 64
3325d821386SThomas GleixnerSYM_FUNC_START(__x86_return_skl)
3335d821386SThomas Gleixner	ANNOTATE_NOENDBR
334f5c1bb2aSThomas Gleixner	/*
335f5c1bb2aSThomas Gleixner	 * Keep the hotpath in a 16byte I-fetch for the non-debug
336f5c1bb2aSThomas Gleixner	 * case.
337f5c1bb2aSThomas Gleixner	 */
338f5c1bb2aSThomas Gleixner	CALL_THUNKS_DEBUG_INC_RETS
3395d821386SThomas Gleixner	shlq	$5, PER_CPU_VAR(pcpu_hot + X86_call_depth)
3405d821386SThomas Gleixner	jz	1f
3415d821386SThomas Gleixner	ANNOTATE_UNRET_SAFE
3425d821386SThomas Gleixner	ret
3435d821386SThomas Gleixner	int3
3445d821386SThomas Gleixner1:
345f5c1bb2aSThomas Gleixner	CALL_THUNKS_DEBUG_INC_STUFFS
3465d821386SThomas Gleixner	.rept	16
3475d821386SThomas Gleixner	ANNOTATE_INTRA_FUNCTION_CALL
3485d821386SThomas Gleixner	call	2f
3495d821386SThomas Gleixner	int3
3505d821386SThomas Gleixner2:
3515d821386SThomas Gleixner	.endr
3525d821386SThomas Gleixner	add	$(8*16), %rsp
3535d821386SThomas Gleixner
3545d821386SThomas Gleixner	CREDIT_CALL_DEPTH
3555d821386SThomas Gleixner
3565d821386SThomas Gleixner	ANNOTATE_UNRET_SAFE
3575d821386SThomas Gleixner	ret
3585d821386SThomas Gleixner	int3
3595d821386SThomas GleixnerSYM_FUNC_END(__x86_return_skl)
3605d821386SThomas Gleixner
3615d821386SThomas Gleixner#endif /* CONFIG_CALL_DEPTH_TRACKING */
362