xref: /openbmc/linux/arch/x86/entry/calling.h (revision eaad9812)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
2478dc89cSAndy Lutomirski #include <linux/jump_label.h>
38c1f7558SJosh Poimboeuf #include <asm/unwind_hints.h>
48a09317bSDave Hansen #include <asm/cpufeatures.h>
58a09317bSDave Hansen #include <asm/page_types.h>
66fd166aaSPeter Zijlstra #include <asm/percpu.h>
76fd166aaSPeter Zijlstra #include <asm/asm-offsets.h>
86fd166aaSPeter Zijlstra #include <asm/processor-flags.h>
9eaad9812SChang S. Bae #include <asm/inst.h>
10478dc89cSAndy Lutomirski 
11d36f9479SIngo Molnar /*
12d36f9479SIngo Molnar 
13d36f9479SIngo Molnar  x86 function call convention, 64-bit:
14d36f9479SIngo Molnar  -------------------------------------
15d36f9479SIngo Molnar   arguments           |  callee-saved      | extra caller-saved | return
16d36f9479SIngo Molnar  [callee-clobbered]   |                    | [callee-clobbered] |
17d36f9479SIngo Molnar  ---------------------------------------------------------------------------
18d36f9479SIngo Molnar  rdi rsi rdx rcx r8-9 | rbx rbp [*] r12-15 | r10-11             | rax, rdx [**]
19d36f9479SIngo Molnar 
20d36f9479SIngo Molnar  ( rsp is obviously invariant across normal function calls. (gcc can 'merge'
21d36f9479SIngo Molnar    functions when it sees tail-call optimization possibilities) rflags is
22d36f9479SIngo Molnar    clobbered. Leftover arguments are passed over the stack frame.)
23d36f9479SIngo Molnar 
24d36f9479SIngo Molnar  [*]  In the frame-pointers case rbp is fixed to the stack frame.
25d36f9479SIngo Molnar 
26d36f9479SIngo Molnar  [**] for struct return values wider than 64 bits the return convention is a
27d36f9479SIngo Molnar       bit more complex: up to 128 bits width we return small structures
28d36f9479SIngo Molnar       straight in rax, rdx. For structures larger than that (3 words or
29d36f9479SIngo Molnar       larger) the caller puts a pointer to an on-stack return struct
30d36f9479SIngo Molnar       [allocated in the caller's stack frame] into the first argument - i.e.
31d36f9479SIngo Molnar       into rdi. All other arguments shift up by one in this case.
32d36f9479SIngo Molnar       Fortunately this case is rare in the kernel.
33d36f9479SIngo Molnar 
34d36f9479SIngo Molnar For 32-bit we have the following conventions - kernel is built with
35d36f9479SIngo Molnar -mregparm=3 and -freg-struct-return:
36d36f9479SIngo Molnar 
37d36f9479SIngo Molnar  x86 function calling convention, 32-bit:
38d36f9479SIngo Molnar  ----------------------------------------
39d36f9479SIngo Molnar   arguments         | callee-saved        | extra caller-saved | return
40d36f9479SIngo Molnar  [callee-clobbered] |                     | [callee-clobbered] |
41d36f9479SIngo Molnar  -------------------------------------------------------------------------
42d36f9479SIngo Molnar  eax edx ecx        | ebx edi esi ebp [*] | <none>             | eax, edx [**]
43d36f9479SIngo Molnar 
44d36f9479SIngo Molnar  ( here too esp is obviously invariant across normal function calls. eflags
45d36f9479SIngo Molnar    is clobbered. Leftover arguments are passed over the stack frame. )
46d36f9479SIngo Molnar 
47d36f9479SIngo Molnar  [*]  In the frame-pointers case ebp is fixed to the stack frame.
48d36f9479SIngo Molnar 
49d36f9479SIngo Molnar  [**] We build with -freg-struct-return, which on 32-bit means similar
50d36f9479SIngo Molnar       semantics as on 64-bit: edx can be used for a second return value
51d36f9479SIngo Molnar       (i.e. covering integer and structure sizes up to 64 bits) - after that
52d36f9479SIngo Molnar       it gets more complex and more expensive: 3-word or larger struct returns
53d36f9479SIngo Molnar       get done in the caller's frame and the pointer to the return struct goes
54d36f9479SIngo Molnar       into regparm0, i.e. eax - the other arguments shift up and the
55d36f9479SIngo Molnar       function's register parameters degenerate to regparm=2 in essence.
56d36f9479SIngo Molnar 
57d36f9479SIngo Molnar */
58d36f9479SIngo Molnar 
59d36f9479SIngo Molnar #ifdef CONFIG_X86_64
60d36f9479SIngo Molnar 
61d36f9479SIngo Molnar /*
62d36f9479SIngo Molnar  * 64-bit system call stack frame layout defines and helpers,
63d36f9479SIngo Molnar  * for assembly code:
64d36f9479SIngo Molnar  */
65d36f9479SIngo Molnar 
66d36f9479SIngo Molnar /* The layout forms the "struct pt_regs" on the stack: */
67d36f9479SIngo Molnar /*
68d36f9479SIngo Molnar  * C ABI says these regs are callee-preserved. They aren't saved on kernel entry
69d36f9479SIngo Molnar  * unless syscall needs a complete, fully filled "struct pt_regs".
70d36f9479SIngo Molnar  */
71d36f9479SIngo Molnar #define R15		0*8
72d36f9479SIngo Molnar #define R14		1*8
73d36f9479SIngo Molnar #define R13		2*8
74d36f9479SIngo Molnar #define R12		3*8
75d36f9479SIngo Molnar #define RBP		4*8
76d36f9479SIngo Molnar #define RBX		5*8
77d36f9479SIngo Molnar /* These regs are callee-clobbered. Always saved on kernel entry. */
78d36f9479SIngo Molnar #define R11		6*8
79d36f9479SIngo Molnar #define R10		7*8
80d36f9479SIngo Molnar #define R9		8*8
81d36f9479SIngo Molnar #define R8		9*8
82d36f9479SIngo Molnar #define RAX		10*8
83d36f9479SIngo Molnar #define RCX		11*8
84d36f9479SIngo Molnar #define RDX		12*8
85d36f9479SIngo Molnar #define RSI		13*8
86d36f9479SIngo Molnar #define RDI		14*8
87d36f9479SIngo Molnar /*
88d36f9479SIngo Molnar  * On syscall entry, this is syscall#. On CPU exception, this is error code.
89d36f9479SIngo Molnar  * On hw interrupt, it's IRQ number:
90d36f9479SIngo Molnar  */
91d36f9479SIngo Molnar #define ORIG_RAX	15*8
92d36f9479SIngo Molnar /* Return frame for iretq */
93d36f9479SIngo Molnar #define RIP		16*8
94d36f9479SIngo Molnar #define CS		17*8
95d36f9479SIngo Molnar #define EFLAGS		18*8
96d36f9479SIngo Molnar #define RSP		19*8
97d36f9479SIngo Molnar #define SS		20*8
98d36f9479SIngo Molnar 
99d36f9479SIngo Molnar #define SIZEOF_PTREGS	21*8
100d36f9479SIngo Molnar 
1019e809d15SDominik Brodowski .macro PUSH_AND_CLEAR_REGS rdx=%rdx rax=%rax save_ret=0
1029e809d15SDominik Brodowski 	.if \save_ret
1039e809d15SDominik Brodowski 	pushq	%rsi		/* pt_regs->si */
1049e809d15SDominik Brodowski 	movq	8(%rsp), %rsi	/* temporarily store the return address in %rsi */
1059e809d15SDominik Brodowski 	movq	%rdi, 8(%rsp)	/* pt_regs->di (overwriting original return address) */
1069e809d15SDominik Brodowski 	.else
1073f01daecSDominik Brodowski 	pushq   %rdi		/* pt_regs->di */
1083f01daecSDominik Brodowski 	pushq   %rsi		/* pt_regs->si */
1099e809d15SDominik Brodowski 	.endif
11030907fd1SDominik Brodowski 	pushq	\rdx		/* pt_regs->dx */
1113f01daecSDominik Brodowski 	pushq   %rcx		/* pt_regs->cx */
11230907fd1SDominik Brodowski 	pushq   \rax		/* pt_regs->ax */
1133f01daecSDominik Brodowski 	pushq   %r8		/* pt_regs->r8 */
1143f01daecSDominik Brodowski 	pushq   %r9		/* pt_regs->r9 */
1153f01daecSDominik Brodowski 	pushq   %r10		/* pt_regs->r10 */
1163f01daecSDominik Brodowski 	pushq   %r11		/* pt_regs->r11 */
1173f01daecSDominik Brodowski 	pushq	%rbx		/* pt_regs->rbx */
1183f01daecSDominik Brodowski 	pushq	%rbp		/* pt_regs->rbp */
1193f01daecSDominik Brodowski 	pushq	%r12		/* pt_regs->r12 */
1203f01daecSDominik Brodowski 	pushq	%r13		/* pt_regs->r13 */
1213f01daecSDominik Brodowski 	pushq	%r14		/* pt_regs->r14 */
1223f01daecSDominik Brodowski 	pushq	%r15		/* pt_regs->r15 */
1233f01daecSDominik Brodowski 	UNWIND_HINT_REGS
12406a9750eSJosh Poimboeuf 
1259e809d15SDominik Brodowski 	.if \save_ret
1269e809d15SDominik Brodowski 	pushq	%rsi		/* return address on top of stack */
1279e809d15SDominik Brodowski 	.endif
12806a9750eSJosh Poimboeuf 
12906a9750eSJosh Poimboeuf 	/*
13006a9750eSJosh Poimboeuf 	 * Sanitize registers of values that a speculation attack might
13106a9750eSJosh Poimboeuf 	 * otherwise want to exploit. The lower registers are likely clobbered
13206a9750eSJosh Poimboeuf 	 * well before they could be put to use in a speculative execution
13306a9750eSJosh Poimboeuf 	 * gadget.
13406a9750eSJosh Poimboeuf 	 */
13506a9750eSJosh Poimboeuf 	xorl	%edx,  %edx	/* nospec dx  */
13606a9750eSJosh Poimboeuf 	xorl	%ecx,  %ecx	/* nospec cx  */
13706a9750eSJosh Poimboeuf 	xorl	%r8d,  %r8d	/* nospec r8  */
13806a9750eSJosh Poimboeuf 	xorl	%r9d,  %r9d	/* nospec r9  */
13906a9750eSJosh Poimboeuf 	xorl	%r10d, %r10d	/* nospec r10 */
14006a9750eSJosh Poimboeuf 	xorl	%r11d, %r11d	/* nospec r11 */
14106a9750eSJosh Poimboeuf 	xorl	%ebx,  %ebx	/* nospec rbx */
14206a9750eSJosh Poimboeuf 	xorl	%ebp,  %ebp	/* nospec rbp */
14306a9750eSJosh Poimboeuf 	xorl	%r12d, %r12d	/* nospec r12 */
14406a9750eSJosh Poimboeuf 	xorl	%r13d, %r13d	/* nospec r13 */
14506a9750eSJosh Poimboeuf 	xorl	%r14d, %r14d	/* nospec r14 */
14606a9750eSJosh Poimboeuf 	xorl	%r15d, %r15d	/* nospec r15 */
14706a9750eSJosh Poimboeuf 
1483f01daecSDominik Brodowski .endm
1493f01daecSDominik Brodowski 
150502af0d7SDominik Brodowski .macro POP_REGS pop_rdi=1 skip_r11rcx=0
151e872045bSAndy Lutomirski 	popq %r15
152e872045bSAndy Lutomirski 	popq %r14
153e872045bSAndy Lutomirski 	popq %r13
154e872045bSAndy Lutomirski 	popq %r12
155e872045bSAndy Lutomirski 	popq %rbp
156e872045bSAndy Lutomirski 	popq %rbx
157502af0d7SDominik Brodowski 	.if \skip_r11rcx
158502af0d7SDominik Brodowski 	popq %rsi
159502af0d7SDominik Brodowski 	.else
160e872045bSAndy Lutomirski 	popq %r11
161502af0d7SDominik Brodowski 	.endif
162e872045bSAndy Lutomirski 	popq %r10
163e872045bSAndy Lutomirski 	popq %r9
164e872045bSAndy Lutomirski 	popq %r8
165e872045bSAndy Lutomirski 	popq %rax
166502af0d7SDominik Brodowski 	.if \skip_r11rcx
167502af0d7SDominik Brodowski 	popq %rsi
168502af0d7SDominik Brodowski 	.else
169e872045bSAndy Lutomirski 	popq %rcx
170502af0d7SDominik Brodowski 	.endif
171e872045bSAndy Lutomirski 	popq %rdx
172e872045bSAndy Lutomirski 	popq %rsi
173502af0d7SDominik Brodowski 	.if \pop_rdi
174e872045bSAndy Lutomirski 	popq %rdi
175502af0d7SDominik Brodowski 	.endif
176d36f9479SIngo Molnar .endm
177d36f9479SIngo Molnar 
1788a09317bSDave Hansen #ifdef CONFIG_PAGE_TABLE_ISOLATION
1798a09317bSDave Hansen 
1806fd166aaSPeter Zijlstra /*
1816fd166aaSPeter Zijlstra  * PAGE_TABLE_ISOLATION PGDs are 8k.  Flip bit 12 to switch between the two
1826fd166aaSPeter Zijlstra  * halves:
1836fd166aaSPeter Zijlstra  */
184f10ee3dcSThomas Gleixner #define PTI_USER_PGTABLE_BIT		PAGE_SHIFT
185f10ee3dcSThomas Gleixner #define PTI_USER_PGTABLE_MASK		(1 << PTI_USER_PGTABLE_BIT)
186f10ee3dcSThomas Gleixner #define PTI_USER_PCID_BIT		X86_CR3_PTI_PCID_USER_BIT
187f10ee3dcSThomas Gleixner #define PTI_USER_PCID_MASK		(1 << PTI_USER_PCID_BIT)
188f10ee3dcSThomas Gleixner #define PTI_USER_PGTABLE_AND_PCID_MASK  (PTI_USER_PCID_MASK | PTI_USER_PGTABLE_MASK)
1898a09317bSDave Hansen 
1906fd166aaSPeter Zijlstra .macro SET_NOFLUSH_BIT	reg:req
1916fd166aaSPeter Zijlstra 	bts	$X86_CR3_PCID_NOFLUSH_BIT, \reg
1928a09317bSDave Hansen .endm
1938a09317bSDave Hansen 
1946fd166aaSPeter Zijlstra .macro ADJUST_KERNEL_CR3 reg:req
1956fd166aaSPeter Zijlstra 	ALTERNATIVE "", "SET_NOFLUSH_BIT \reg", X86_FEATURE_PCID
1966fd166aaSPeter Zijlstra 	/* Clear PCID and "PAGE_TABLE_ISOLATION bit", point CR3 at kernel pagetables: */
197f10ee3dcSThomas Gleixner 	andq    $(~PTI_USER_PGTABLE_AND_PCID_MASK), \reg
1988a09317bSDave Hansen .endm
1998a09317bSDave Hansen 
2008a09317bSDave Hansen .macro SWITCH_TO_KERNEL_CR3 scratch_reg:req
201aa8c6248SThomas Gleixner 	ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI
2028a09317bSDave Hansen 	mov	%cr3, \scratch_reg
2038a09317bSDave Hansen 	ADJUST_KERNEL_CR3 \scratch_reg
2048a09317bSDave Hansen 	mov	\scratch_reg, %cr3
205aa8c6248SThomas Gleixner .Lend_\@:
2068a09317bSDave Hansen .endm
2078a09317bSDave Hansen 
2086fd166aaSPeter Zijlstra #define THIS_CPU_user_pcid_flush_mask   \
2096fd166aaSPeter Zijlstra 	PER_CPU_VAR(cpu_tlbstate) + TLB_STATE_user_pcid_flush_mask
2106fd166aaSPeter Zijlstra 
2116fd166aaSPeter Zijlstra .macro SWITCH_TO_USER_CR3_NOSTACK scratch_reg:req scratch_reg2:req
212aa8c6248SThomas Gleixner 	ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI
2138a09317bSDave Hansen 	mov	%cr3, \scratch_reg
2146fd166aaSPeter Zijlstra 
2156fd166aaSPeter Zijlstra 	ALTERNATIVE "jmp .Lwrcr3_\@", "", X86_FEATURE_PCID
2166fd166aaSPeter Zijlstra 
2176fd166aaSPeter Zijlstra 	/*
2186fd166aaSPeter Zijlstra 	 * Test if the ASID needs a flush.
2196fd166aaSPeter Zijlstra 	 */
2206fd166aaSPeter Zijlstra 	movq	\scratch_reg, \scratch_reg2
2216fd166aaSPeter Zijlstra 	andq	$(0x7FF), \scratch_reg		/* mask ASID */
2226fd166aaSPeter Zijlstra 	bt	\scratch_reg, THIS_CPU_user_pcid_flush_mask
2236fd166aaSPeter Zijlstra 	jnc	.Lnoflush_\@
2246fd166aaSPeter Zijlstra 
2256fd166aaSPeter Zijlstra 	/* Flush needed, clear the bit */
2266fd166aaSPeter Zijlstra 	btr	\scratch_reg, THIS_CPU_user_pcid_flush_mask
2276fd166aaSPeter Zijlstra 	movq	\scratch_reg2, \scratch_reg
228f10ee3dcSThomas Gleixner 	jmp	.Lwrcr3_pcid_\@
2296fd166aaSPeter Zijlstra 
2306fd166aaSPeter Zijlstra .Lnoflush_\@:
2316fd166aaSPeter Zijlstra 	movq	\scratch_reg2, \scratch_reg
2326fd166aaSPeter Zijlstra 	SET_NOFLUSH_BIT \scratch_reg
2336fd166aaSPeter Zijlstra 
234f10ee3dcSThomas Gleixner .Lwrcr3_pcid_\@:
235f10ee3dcSThomas Gleixner 	/* Flip the ASID to the user version */
236f10ee3dcSThomas Gleixner 	orq	$(PTI_USER_PCID_MASK), \scratch_reg
237f10ee3dcSThomas Gleixner 
2386fd166aaSPeter Zijlstra .Lwrcr3_\@:
239f10ee3dcSThomas Gleixner 	/* Flip the PGD to the user version */
240f10ee3dcSThomas Gleixner 	orq     $(PTI_USER_PGTABLE_MASK), \scratch_reg
2418a09317bSDave Hansen 	mov	\scratch_reg, %cr3
242aa8c6248SThomas Gleixner .Lend_\@:
2438a09317bSDave Hansen .endm
2448a09317bSDave Hansen 
2456fd166aaSPeter Zijlstra .macro SWITCH_TO_USER_CR3_STACK	scratch_reg:req
2466fd166aaSPeter Zijlstra 	pushq	%rax
2476fd166aaSPeter Zijlstra 	SWITCH_TO_USER_CR3_NOSTACK scratch_reg=\scratch_reg scratch_reg2=%rax
2486fd166aaSPeter Zijlstra 	popq	%rax
2496fd166aaSPeter Zijlstra .endm
2506fd166aaSPeter Zijlstra 
2518a09317bSDave Hansen .macro SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg:req save_reg:req
252aa8c6248SThomas Gleixner 	ALTERNATIVE "jmp .Ldone_\@", "", X86_FEATURE_PTI
2538a09317bSDave Hansen 	movq	%cr3, \scratch_reg
2548a09317bSDave Hansen 	movq	\scratch_reg, \save_reg
2558a09317bSDave Hansen 	/*
256f10ee3dcSThomas Gleixner 	 * Test the user pagetable bit. If set, then the user page tables
257f10ee3dcSThomas Gleixner 	 * are active. If clear CR3 already has the kernel page table
258f10ee3dcSThomas Gleixner 	 * active.
2598a09317bSDave Hansen 	 */
260f10ee3dcSThomas Gleixner 	bt	$PTI_USER_PGTABLE_BIT, \scratch_reg
261f10ee3dcSThomas Gleixner 	jnc	.Ldone_\@
2628a09317bSDave Hansen 
2638a09317bSDave Hansen 	ADJUST_KERNEL_CR3 \scratch_reg
2648a09317bSDave Hansen 	movq	\scratch_reg, %cr3
2658a09317bSDave Hansen 
2668a09317bSDave Hansen .Ldone_\@:
2678a09317bSDave Hansen .endm
2688a09317bSDave Hansen 
26921e94459SPeter Zijlstra .macro RESTORE_CR3 scratch_reg:req save_reg:req
270aa8c6248SThomas Gleixner 	ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI
27121e94459SPeter Zijlstra 
27221e94459SPeter Zijlstra 	ALTERNATIVE "jmp .Lwrcr3_\@", "", X86_FEATURE_PCID
27321e94459SPeter Zijlstra 
27421e94459SPeter Zijlstra 	/*
27521e94459SPeter Zijlstra 	 * KERNEL pages can always resume with NOFLUSH as we do
27621e94459SPeter Zijlstra 	 * explicit flushes.
27721e94459SPeter Zijlstra 	 */
278f10ee3dcSThomas Gleixner 	bt	$PTI_USER_PGTABLE_BIT, \save_reg
27921e94459SPeter Zijlstra 	jnc	.Lnoflush_\@
28021e94459SPeter Zijlstra 
28121e94459SPeter Zijlstra 	/*
28221e94459SPeter Zijlstra 	 * Check if there's a pending flush for the user ASID we're
28321e94459SPeter Zijlstra 	 * about to set.
28421e94459SPeter Zijlstra 	 */
28521e94459SPeter Zijlstra 	movq	\save_reg, \scratch_reg
28621e94459SPeter Zijlstra 	andq	$(0x7FF), \scratch_reg
28721e94459SPeter Zijlstra 	bt	\scratch_reg, THIS_CPU_user_pcid_flush_mask
28821e94459SPeter Zijlstra 	jnc	.Lnoflush_\@
28921e94459SPeter Zijlstra 
29021e94459SPeter Zijlstra 	btr	\scratch_reg, THIS_CPU_user_pcid_flush_mask
29121e94459SPeter Zijlstra 	jmp	.Lwrcr3_\@
29221e94459SPeter Zijlstra 
29321e94459SPeter Zijlstra .Lnoflush_\@:
29421e94459SPeter Zijlstra 	SET_NOFLUSH_BIT \save_reg
29521e94459SPeter Zijlstra 
29621e94459SPeter Zijlstra .Lwrcr3_\@:
2978a09317bSDave Hansen 	/*
2988a09317bSDave Hansen 	 * The CR3 write could be avoided when not changing its value,
2998a09317bSDave Hansen 	 * but would require a CR3 read *and* a scratch register.
3008a09317bSDave Hansen 	 */
3018a09317bSDave Hansen 	movq	\save_reg, %cr3
302aa8c6248SThomas Gleixner .Lend_\@:
3038a09317bSDave Hansen .endm
3048a09317bSDave Hansen 
3058a09317bSDave Hansen #else /* CONFIG_PAGE_TABLE_ISOLATION=n: */
3068a09317bSDave Hansen 
3078a09317bSDave Hansen .macro SWITCH_TO_KERNEL_CR3 scratch_reg:req
3088a09317bSDave Hansen .endm
3096fd166aaSPeter Zijlstra .macro SWITCH_TO_USER_CR3_NOSTACK scratch_reg:req scratch_reg2:req
3106fd166aaSPeter Zijlstra .endm
3116fd166aaSPeter Zijlstra .macro SWITCH_TO_USER_CR3_STACK scratch_reg:req
3128a09317bSDave Hansen .endm
3138a09317bSDave Hansen .macro SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg:req save_reg:req
3148a09317bSDave Hansen .endm
31521e94459SPeter Zijlstra .macro RESTORE_CR3 scratch_reg:req save_reg:req
3168a09317bSDave Hansen .endm
3178a09317bSDave Hansen 
3188a09317bSDave Hansen #endif
3198a09317bSDave Hansen 
32018ec54fdSJosh Poimboeuf /*
32118ec54fdSJosh Poimboeuf  * Mitigate Spectre v1 for conditional swapgs code paths.
32218ec54fdSJosh Poimboeuf  *
32318ec54fdSJosh Poimboeuf  * FENCE_SWAPGS_USER_ENTRY is used in the user entry swapgs code path, to
32418ec54fdSJosh Poimboeuf  * prevent a speculative swapgs when coming from kernel space.
32518ec54fdSJosh Poimboeuf  *
32618ec54fdSJosh Poimboeuf  * FENCE_SWAPGS_KERNEL_ENTRY is used in the kernel entry non-swapgs code path,
32718ec54fdSJosh Poimboeuf  * to prevent the swapgs from getting speculatively skipped when coming from
32818ec54fdSJosh Poimboeuf  * user space.
32918ec54fdSJosh Poimboeuf  */
33018ec54fdSJosh Poimboeuf .macro FENCE_SWAPGS_USER_ENTRY
33118ec54fdSJosh Poimboeuf 	ALTERNATIVE "", "lfence", X86_FEATURE_FENCE_SWAPGS_USER
33218ec54fdSJosh Poimboeuf .endm
33318ec54fdSJosh Poimboeuf .macro FENCE_SWAPGS_KERNEL_ENTRY
33418ec54fdSJosh Poimboeuf 	ALTERNATIVE "", "lfence", X86_FEATURE_FENCE_SWAPGS_KERNEL
33518ec54fdSJosh Poimboeuf .endm
33618ec54fdSJosh Poimboeuf 
337afaef01cSAlexander Popov .macro STACKLEAK_ERASE_NOCLOBBER
338afaef01cSAlexander Popov #ifdef CONFIG_GCC_PLUGIN_STACKLEAK
339afaef01cSAlexander Popov 	PUSH_AND_CLEAR_REGS
340afaef01cSAlexander Popov 	call stackleak_erase
341afaef01cSAlexander Popov 	POP_REGS
342afaef01cSAlexander Popov #endif
343afaef01cSAlexander Popov .endm
344afaef01cSAlexander Popov 
345633260faSThomas Gleixner #else /* CONFIG_X86_64 */
346633260faSThomas Gleixner # undef		UNWIND_HINT_IRET_REGS
347633260faSThomas Gleixner # define	UNWIND_HINT_IRET_REGS
348633260faSThomas Gleixner #endif /* !CONFIG_X86_64 */
349d36f9479SIngo Molnar 
350afaef01cSAlexander Popov .macro STACKLEAK_ERASE
351afaef01cSAlexander Popov #ifdef CONFIG_GCC_PLUGIN_STACKLEAK
352afaef01cSAlexander Popov 	call stackleak_erase
353afaef01cSAlexander Popov #endif
354afaef01cSAlexander Popov .endm
355eaad9812SChang S. Bae 
356eaad9812SChang S. Bae #ifdef CONFIG_SMP
357eaad9812SChang S. Bae 
358eaad9812SChang S. Bae /*
359eaad9812SChang S. Bae  * CPU/node NR is loaded from the limit (size) field of a special segment
360eaad9812SChang S. Bae  * descriptor entry in GDT.
361eaad9812SChang S. Bae  */
362eaad9812SChang S. Bae .macro LOAD_CPU_AND_NODE_SEG_LIMIT reg:req
363eaad9812SChang S. Bae 	movq	$__CPUNODE_SEG, \reg
364eaad9812SChang S. Bae 	lsl	\reg, \reg
365eaad9812SChang S. Bae .endm
366eaad9812SChang S. Bae 
367eaad9812SChang S. Bae /*
368eaad9812SChang S. Bae  * Fetch the per-CPU GSBASE value for this processor and put it in @reg.
369eaad9812SChang S. Bae  * We normally use %gs for accessing per-CPU data, but we are setting up
370eaad9812SChang S. Bae  * %gs here and obviously can not use %gs itself to access per-CPU data.
371eaad9812SChang S. Bae  */
372eaad9812SChang S. Bae .macro GET_PERCPU_BASE reg:req
373eaad9812SChang S. Bae 	ALTERNATIVE \
374eaad9812SChang S. Bae 		"LOAD_CPU_AND_NODE_SEG_LIMIT \reg", \
375eaad9812SChang S. Bae 		"RDPID	\reg", \
376eaad9812SChang S. Bae 		X86_FEATURE_RDPID
377eaad9812SChang S. Bae 	andq	$VDSO_CPUNODE_MASK, \reg
378eaad9812SChang S. Bae 	movq	__per_cpu_offset(, \reg, 8), \reg
379eaad9812SChang S. Bae .endm
380eaad9812SChang S. Bae 
381eaad9812SChang S. Bae #else
382eaad9812SChang S. Bae 
383eaad9812SChang S. Bae .macro GET_PERCPU_BASE reg:req
384eaad9812SChang S. Bae 	movq	pcpu_unit_offsets(%rip), \reg
385eaad9812SChang S. Bae .endm
386eaad9812SChang S. Bae 
387eaad9812SChang S. Bae #endif /* CONFIG_SMP */
388