xref: /openbmc/linux/arch/x86/entry/calling.h (revision 30907fd1)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
2478dc89cSAndy Lutomirski #include <linux/jump_label.h>
38c1f7558SJosh Poimboeuf #include <asm/unwind_hints.h>
48a09317bSDave Hansen #include <asm/cpufeatures.h>
58a09317bSDave Hansen #include <asm/page_types.h>
66fd166aaSPeter Zijlstra #include <asm/percpu.h>
76fd166aaSPeter Zijlstra #include <asm/asm-offsets.h>
86fd166aaSPeter Zijlstra #include <asm/processor-flags.h>
9478dc89cSAndy Lutomirski 
10d36f9479SIngo Molnar /*
11d36f9479SIngo Molnar 
12d36f9479SIngo Molnar  x86 function call convention, 64-bit:
13d36f9479SIngo Molnar  -------------------------------------
14d36f9479SIngo Molnar   arguments           |  callee-saved      | extra caller-saved | return
15d36f9479SIngo Molnar  [callee-clobbered]   |                    | [callee-clobbered] |
16d36f9479SIngo Molnar  ---------------------------------------------------------------------------
17d36f9479SIngo Molnar  rdi rsi rdx rcx r8-9 | rbx rbp [*] r12-15 | r10-11             | rax, rdx [**]
18d36f9479SIngo Molnar 
19d36f9479SIngo Molnar  ( rsp is obviously invariant across normal function calls. (gcc can 'merge'
20d36f9479SIngo Molnar    functions when it sees tail-call optimization possibilities) rflags is
21d36f9479SIngo Molnar    clobbered. Leftover arguments are passed over the stack frame.)
22d36f9479SIngo Molnar 
23d36f9479SIngo Molnar  [*]  In the frame-pointers case rbp is fixed to the stack frame.
24d36f9479SIngo Molnar 
25d36f9479SIngo Molnar  [**] for struct return values wider than 64 bits the return convention is a
26d36f9479SIngo Molnar       bit more complex: up to 128 bits width we return small structures
27d36f9479SIngo Molnar       straight in rax, rdx. For structures larger than that (3 words or
28d36f9479SIngo Molnar       larger) the caller puts a pointer to an on-stack return struct
29d36f9479SIngo Molnar       [allocated in the caller's stack frame] into the first argument - i.e.
30d36f9479SIngo Molnar       into rdi. All other arguments shift up by one in this case.
31d36f9479SIngo Molnar       Fortunately this case is rare in the kernel.
32d36f9479SIngo Molnar 
33d36f9479SIngo Molnar For 32-bit we have the following conventions - kernel is built with
34d36f9479SIngo Molnar -mregparm=3 and -freg-struct-return:
35d36f9479SIngo Molnar 
36d36f9479SIngo Molnar  x86 function calling convention, 32-bit:
37d36f9479SIngo Molnar  ----------------------------------------
38d36f9479SIngo Molnar   arguments         | callee-saved        | extra caller-saved | return
39d36f9479SIngo Molnar  [callee-clobbered] |                     | [callee-clobbered] |
40d36f9479SIngo Molnar  -------------------------------------------------------------------------
41d36f9479SIngo Molnar  eax edx ecx        | ebx edi esi ebp [*] | <none>             | eax, edx [**]
42d36f9479SIngo Molnar 
43d36f9479SIngo Molnar  ( here too esp is obviously invariant across normal function calls. eflags
44d36f9479SIngo Molnar    is clobbered. Leftover arguments are passed over the stack frame. )
45d36f9479SIngo Molnar 
46d36f9479SIngo Molnar  [*]  In the frame-pointers case ebp is fixed to the stack frame.
47d36f9479SIngo Molnar 
48d36f9479SIngo Molnar  [**] We build with -freg-struct-return, which on 32-bit means similar
49d36f9479SIngo Molnar       semantics as on 64-bit: edx can be used for a second return value
50d36f9479SIngo Molnar       (i.e. covering integer and structure sizes up to 64 bits) - after that
51d36f9479SIngo Molnar       it gets more complex and more expensive: 3-word or larger struct returns
52d36f9479SIngo Molnar       get done in the caller's frame and the pointer to the return struct goes
53d36f9479SIngo Molnar       into regparm0, i.e. eax - the other arguments shift up and the
54d36f9479SIngo Molnar       function's register parameters degenerate to regparm=2 in essence.
55d36f9479SIngo Molnar 
56d36f9479SIngo Molnar */
57d36f9479SIngo Molnar 
58d36f9479SIngo Molnar #ifdef CONFIG_X86_64
59d36f9479SIngo Molnar 
60d36f9479SIngo Molnar /*
61d36f9479SIngo Molnar  * 64-bit system call stack frame layout defines and helpers,
62d36f9479SIngo Molnar  * for assembly code:
63d36f9479SIngo Molnar  */
64d36f9479SIngo Molnar 
65d36f9479SIngo Molnar /* The layout forms the "struct pt_regs" on the stack: */
66d36f9479SIngo Molnar /*
67d36f9479SIngo Molnar  * C ABI says these regs are callee-preserved. They aren't saved on kernel entry
68d36f9479SIngo Molnar  * unless syscall needs a complete, fully filled "struct pt_regs".
69d36f9479SIngo Molnar  */
70d36f9479SIngo Molnar #define R15		0*8
71d36f9479SIngo Molnar #define R14		1*8
72d36f9479SIngo Molnar #define R13		2*8
73d36f9479SIngo Molnar #define R12		3*8
74d36f9479SIngo Molnar #define RBP		4*8
75d36f9479SIngo Molnar #define RBX		5*8
76d36f9479SIngo Molnar /* These regs are callee-clobbered. Always saved on kernel entry. */
77d36f9479SIngo Molnar #define R11		6*8
78d36f9479SIngo Molnar #define R10		7*8
79d36f9479SIngo Molnar #define R9		8*8
80d36f9479SIngo Molnar #define R8		9*8
81d36f9479SIngo Molnar #define RAX		10*8
82d36f9479SIngo Molnar #define RCX		11*8
83d36f9479SIngo Molnar #define RDX		12*8
84d36f9479SIngo Molnar #define RSI		13*8
85d36f9479SIngo Molnar #define RDI		14*8
86d36f9479SIngo Molnar /*
87d36f9479SIngo Molnar  * On syscall entry, this is syscall#. On CPU exception, this is error code.
88d36f9479SIngo Molnar  * On hw interrupt, it's IRQ number:
89d36f9479SIngo Molnar  */
90d36f9479SIngo Molnar #define ORIG_RAX	15*8
91d36f9479SIngo Molnar /* Return frame for iretq */
92d36f9479SIngo Molnar #define RIP		16*8
93d36f9479SIngo Molnar #define CS		17*8
94d36f9479SIngo Molnar #define EFLAGS		18*8
95d36f9479SIngo Molnar #define RSP		19*8
96d36f9479SIngo Molnar #define SS		20*8
97d36f9479SIngo Molnar 
98d36f9479SIngo Molnar #define SIZEOF_PTREGS	21*8
99d36f9479SIngo Molnar 
10059df2268SAlexander Kuleshov 	.macro ALLOC_PT_GPREGS_ON_STACK
10159df2268SAlexander Kuleshov 	addq	$-(15*8), %rsp
102d36f9479SIngo Molnar 	.endm
103d36f9479SIngo Molnar 
104f7bafa2bSDominik Brodowski 	.macro SAVE_AND_CLEAR_REGS offset=0
105f7bafa2bSDominik Brodowski 	/*
106f7bafa2bSDominik Brodowski 	 * Save registers and sanitize registers of values that a
107f7bafa2bSDominik Brodowski 	 * speculation attack might otherwise want to exploit. The
108f7bafa2bSDominik Brodowski 	 * lower registers are likely clobbered well before they
109f7bafa2bSDominik Brodowski 	 * could be put to use in a speculative execution gadget.
110f7bafa2bSDominik Brodowski 	 * Interleave XOR with PUSH for better uop scheduling:
111f7bafa2bSDominik Brodowski 	 */
112d36f9479SIngo Molnar 	movq %rdi, 14*8+\offset(%rsp)
1132e3f0098SDominik Brodowski 	movq %rsi, 13*8+\offset(%rsp)
1142e3f0098SDominik Brodowski 	movq %rdx, 12*8+\offset(%rsp)
1152e3f0098SDominik Brodowski 	movq %rcx, 11*8+\offset(%rsp)
1162e3f0098SDominik Brodowski 	movq %rax, 10*8+\offset(%rsp)
1172e3f0098SDominik Brodowski 	movq %r8,  9*8+\offset(%rsp)
118f7bafa2bSDominik Brodowski 	xorq %r8, %r8				/* nospec r8 */
1192e3f0098SDominik Brodowski 	movq %r9,  8*8+\offset(%rsp)
120f7bafa2bSDominik Brodowski 	xorq %r9, %r9				/* nospec r9 */
1212e3f0098SDominik Brodowski 	movq %r10, 7*8+\offset(%rsp)
122f7bafa2bSDominik Brodowski 	xorq %r10, %r10				/* nospec r10 */
1232e3f0098SDominik Brodowski 	movq %r11, 6*8+\offset(%rsp)
124f7bafa2bSDominik Brodowski 	xorq %r11, %r11				/* nospec r11 */
125d36f9479SIngo Molnar 	movq %rbx, 5*8+\offset(%rsp)
126f7bafa2bSDominik Brodowski 	xorl %ebx, %ebx				/* nospec rbx */
1272e3f0098SDominik Brodowski 	movq %rbp, 4*8+\offset(%rsp)
128f7bafa2bSDominik Brodowski 	xorl %ebp, %ebp				/* nospec rbp */
1292e3f0098SDominik Brodowski 	movq %r12, 3*8+\offset(%rsp)
130f7bafa2bSDominik Brodowski 	xorq %r12, %r12				/* nospec r12 */
1312e3f0098SDominik Brodowski 	movq %r13, 2*8+\offset(%rsp)
132f7bafa2bSDominik Brodowski 	xorq %r13, %r13				/* nospec r13 */
1332e3f0098SDominik Brodowski 	movq %r14, 1*8+\offset(%rsp)
134f7bafa2bSDominik Brodowski 	xorq %r14, %r14				/* nospec r14 */
1352e3f0098SDominik Brodowski 	movq %r15, 0*8+\offset(%rsp)
136f7bafa2bSDominik Brodowski 	xorq %r15, %r15				/* nospec r15 */
1378c1f7558SJosh Poimboeuf 	UNWIND_HINT_REGS offset=\offset
138d36f9479SIngo Molnar 	.endm
139d36f9479SIngo Molnar 
14030907fd1SDominik Brodowski 	.macro PUSH_AND_CLEAR_REGS rdx=%rdx rax=%rax
1413f01daecSDominik Brodowski 	/*
1423f01daecSDominik Brodowski 	 * Push registers and sanitize registers of values that a
1433f01daecSDominik Brodowski 	 * speculation attack might otherwise want to exploit. The
1443f01daecSDominik Brodowski 	 * lower registers are likely clobbered well before they
1453f01daecSDominik Brodowski 	 * could be put to use in a speculative execution gadget.
1463f01daecSDominik Brodowski 	 * Interleave XOR with PUSH for better uop scheduling:
1473f01daecSDominik Brodowski 	 */
1483f01daecSDominik Brodowski 	pushq   %rdi		/* pt_regs->di */
1493f01daecSDominik Brodowski 	pushq   %rsi		/* pt_regs->si */
15030907fd1SDominik Brodowski 	pushq	\rdx		/* pt_regs->dx */
1513f01daecSDominik Brodowski 	pushq   %rcx		/* pt_regs->cx */
15230907fd1SDominik Brodowski 	pushq   \rax		/* pt_regs->ax */
1533f01daecSDominik Brodowski 	pushq   %r8		/* pt_regs->r8 */
1543f01daecSDominik Brodowski 	xorq    %r8, %r8	/* nospec   r8 */
1553f01daecSDominik Brodowski 	pushq   %r9		/* pt_regs->r9 */
1563f01daecSDominik Brodowski 	xorq    %r9, %r9	/* nospec   r9 */
1573f01daecSDominik Brodowski 	pushq   %r10		/* pt_regs->r10 */
1583f01daecSDominik Brodowski 	xorq    %r10, %r10	/* nospec   r10 */
1593f01daecSDominik Brodowski 	pushq   %r11		/* pt_regs->r11 */
1603f01daecSDominik Brodowski 	xorq    %r11, %r11	/* nospec   r11*/
1613f01daecSDominik Brodowski 	pushq	%rbx		/* pt_regs->rbx */
1623f01daecSDominik Brodowski 	xorl    %ebx, %ebx	/* nospec   rbx*/
1633f01daecSDominik Brodowski 	pushq	%rbp		/* pt_regs->rbp */
1643f01daecSDominik Brodowski 	xorl    %ebp, %ebp	/* nospec   rbp*/
1653f01daecSDominik Brodowski 	pushq	%r12		/* pt_regs->r12 */
1663f01daecSDominik Brodowski 	xorq    %r12, %r12	/* nospec   r12*/
1673f01daecSDominik Brodowski 	pushq	%r13		/* pt_regs->r13 */
1683f01daecSDominik Brodowski 	xorq    %r13, %r13	/* nospec   r13*/
1693f01daecSDominik Brodowski 	pushq	%r14		/* pt_regs->r14 */
1703f01daecSDominik Brodowski 	xorq    %r14, %r14	/* nospec   r14*/
1713f01daecSDominik Brodowski 	pushq	%r15		/* pt_regs->r15 */
1723f01daecSDominik Brodowski 	xorq    %r15, %r15	/* nospec   r15*/
1733f01daecSDominik Brodowski 	UNWIND_HINT_REGS
1743f01daecSDominik Brodowski 	.endm
1753f01daecSDominik Brodowski 
176502af0d7SDominik Brodowski 	.macro POP_REGS pop_rdi=1 skip_r11rcx=0
177e872045bSAndy Lutomirski 	popq %r15
178e872045bSAndy Lutomirski 	popq %r14
179e872045bSAndy Lutomirski 	popq %r13
180e872045bSAndy Lutomirski 	popq %r12
181e872045bSAndy Lutomirski 	popq %rbp
182e872045bSAndy Lutomirski 	popq %rbx
183502af0d7SDominik Brodowski 	.if \skip_r11rcx
184502af0d7SDominik Brodowski 	popq %rsi
185502af0d7SDominik Brodowski 	.else
186e872045bSAndy Lutomirski 	popq %r11
187502af0d7SDominik Brodowski 	.endif
188e872045bSAndy Lutomirski 	popq %r10
189e872045bSAndy Lutomirski 	popq %r9
190e872045bSAndy Lutomirski 	popq %r8
191e872045bSAndy Lutomirski 	popq %rax
192502af0d7SDominik Brodowski 	.if \skip_r11rcx
193502af0d7SDominik Brodowski 	popq %rsi
194502af0d7SDominik Brodowski 	.else
195e872045bSAndy Lutomirski 	popq %rcx
196502af0d7SDominik Brodowski 	.endif
197e872045bSAndy Lutomirski 	popq %rdx
198e872045bSAndy Lutomirski 	popq %rsi
199502af0d7SDominik Brodowski 	.if \pop_rdi
200e872045bSAndy Lutomirski 	popq %rdi
201502af0d7SDominik Brodowski 	.endif
202d36f9479SIngo Molnar 	.endm
203d36f9479SIngo Molnar 
204d36f9479SIngo Molnar 	.macro icebp
205d36f9479SIngo Molnar 	.byte 0xf1
206d36f9479SIngo Molnar 	.endm
207d36f9479SIngo Molnar 
208946c1911SJosh Poimboeuf /*
209946c1911SJosh Poimboeuf  * This is a sneaky trick to help the unwinder find pt_regs on the stack.  The
210946c1911SJosh Poimboeuf  * frame pointer is replaced with an encoded pointer to pt_regs.  The encoding
211946c1911SJosh Poimboeuf  * is just setting the LSB, which makes it an invalid stack address and is also
212946c1911SJosh Poimboeuf  * a signal to the unwinder that it's a pt_regs pointer in disguise.
213946c1911SJosh Poimboeuf  *
214f7bafa2bSDominik Brodowski  * NOTE: This macro must be used *after* SAVE_AND_CLEAR_REGS because it corrupts
215946c1911SJosh Poimboeuf  * the original rbp.
216946c1911SJosh Poimboeuf  */
217946c1911SJosh Poimboeuf .macro ENCODE_FRAME_POINTER ptregs_offset=0
218946c1911SJosh Poimboeuf #ifdef CONFIG_FRAME_POINTER
219946c1911SJosh Poimboeuf 	.if \ptregs_offset
220946c1911SJosh Poimboeuf 		leaq \ptregs_offset(%rsp), %rbp
221946c1911SJosh Poimboeuf 	.else
222946c1911SJosh Poimboeuf 		mov %rsp, %rbp
223946c1911SJosh Poimboeuf 	.endif
224946c1911SJosh Poimboeuf 	orq	$0x1, %rbp
225946c1911SJosh Poimboeuf #endif
226946c1911SJosh Poimboeuf .endm
227946c1911SJosh Poimboeuf 
2288a09317bSDave Hansen #ifdef CONFIG_PAGE_TABLE_ISOLATION
2298a09317bSDave Hansen 
2306fd166aaSPeter Zijlstra /*
2316fd166aaSPeter Zijlstra  * PAGE_TABLE_ISOLATION PGDs are 8k.  Flip bit 12 to switch between the two
2326fd166aaSPeter Zijlstra  * halves:
2336fd166aaSPeter Zijlstra  */
234f10ee3dcSThomas Gleixner #define PTI_USER_PGTABLE_BIT		PAGE_SHIFT
235f10ee3dcSThomas Gleixner #define PTI_USER_PGTABLE_MASK		(1 << PTI_USER_PGTABLE_BIT)
236f10ee3dcSThomas Gleixner #define PTI_USER_PCID_BIT		X86_CR3_PTI_PCID_USER_BIT
237f10ee3dcSThomas Gleixner #define PTI_USER_PCID_MASK		(1 << PTI_USER_PCID_BIT)
238f10ee3dcSThomas Gleixner #define PTI_USER_PGTABLE_AND_PCID_MASK  (PTI_USER_PCID_MASK | PTI_USER_PGTABLE_MASK)
2398a09317bSDave Hansen 
2406fd166aaSPeter Zijlstra .macro SET_NOFLUSH_BIT	reg:req
2416fd166aaSPeter Zijlstra 	bts	$X86_CR3_PCID_NOFLUSH_BIT, \reg
2428a09317bSDave Hansen .endm
2438a09317bSDave Hansen 
2446fd166aaSPeter Zijlstra .macro ADJUST_KERNEL_CR3 reg:req
2456fd166aaSPeter Zijlstra 	ALTERNATIVE "", "SET_NOFLUSH_BIT \reg", X86_FEATURE_PCID
2466fd166aaSPeter Zijlstra 	/* Clear PCID and "PAGE_TABLE_ISOLATION bit", point CR3 at kernel pagetables: */
247f10ee3dcSThomas Gleixner 	andq    $(~PTI_USER_PGTABLE_AND_PCID_MASK), \reg
2488a09317bSDave Hansen .endm
2498a09317bSDave Hansen 
2508a09317bSDave Hansen .macro SWITCH_TO_KERNEL_CR3 scratch_reg:req
251aa8c6248SThomas Gleixner 	ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI
2528a09317bSDave Hansen 	mov	%cr3, \scratch_reg
2538a09317bSDave Hansen 	ADJUST_KERNEL_CR3 \scratch_reg
2548a09317bSDave Hansen 	mov	\scratch_reg, %cr3
255aa8c6248SThomas Gleixner .Lend_\@:
2568a09317bSDave Hansen .endm
2578a09317bSDave Hansen 
2586fd166aaSPeter Zijlstra #define THIS_CPU_user_pcid_flush_mask   \
2596fd166aaSPeter Zijlstra 	PER_CPU_VAR(cpu_tlbstate) + TLB_STATE_user_pcid_flush_mask
2606fd166aaSPeter Zijlstra 
2616fd166aaSPeter Zijlstra .macro SWITCH_TO_USER_CR3_NOSTACK scratch_reg:req scratch_reg2:req
262aa8c6248SThomas Gleixner 	ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI
2638a09317bSDave Hansen 	mov	%cr3, \scratch_reg
2646fd166aaSPeter Zijlstra 
2656fd166aaSPeter Zijlstra 	ALTERNATIVE "jmp .Lwrcr3_\@", "", X86_FEATURE_PCID
2666fd166aaSPeter Zijlstra 
2676fd166aaSPeter Zijlstra 	/*
2686fd166aaSPeter Zijlstra 	 * Test if the ASID needs a flush.
2696fd166aaSPeter Zijlstra 	 */
2706fd166aaSPeter Zijlstra 	movq	\scratch_reg, \scratch_reg2
2716fd166aaSPeter Zijlstra 	andq	$(0x7FF), \scratch_reg		/* mask ASID */
2726fd166aaSPeter Zijlstra 	bt	\scratch_reg, THIS_CPU_user_pcid_flush_mask
2736fd166aaSPeter Zijlstra 	jnc	.Lnoflush_\@
2746fd166aaSPeter Zijlstra 
2756fd166aaSPeter Zijlstra 	/* Flush needed, clear the bit */
2766fd166aaSPeter Zijlstra 	btr	\scratch_reg, THIS_CPU_user_pcid_flush_mask
2776fd166aaSPeter Zijlstra 	movq	\scratch_reg2, \scratch_reg
278f10ee3dcSThomas Gleixner 	jmp	.Lwrcr3_pcid_\@
2796fd166aaSPeter Zijlstra 
2806fd166aaSPeter Zijlstra .Lnoflush_\@:
2816fd166aaSPeter Zijlstra 	movq	\scratch_reg2, \scratch_reg
2826fd166aaSPeter Zijlstra 	SET_NOFLUSH_BIT \scratch_reg
2836fd166aaSPeter Zijlstra 
284f10ee3dcSThomas Gleixner .Lwrcr3_pcid_\@:
285f10ee3dcSThomas Gleixner 	/* Flip the ASID to the user version */
286f10ee3dcSThomas Gleixner 	orq	$(PTI_USER_PCID_MASK), \scratch_reg
287f10ee3dcSThomas Gleixner 
2886fd166aaSPeter Zijlstra .Lwrcr3_\@:
289f10ee3dcSThomas Gleixner 	/* Flip the PGD to the user version */
290f10ee3dcSThomas Gleixner 	orq     $(PTI_USER_PGTABLE_MASK), \scratch_reg
2918a09317bSDave Hansen 	mov	\scratch_reg, %cr3
292aa8c6248SThomas Gleixner .Lend_\@:
2938a09317bSDave Hansen .endm
2948a09317bSDave Hansen 
2956fd166aaSPeter Zijlstra .macro SWITCH_TO_USER_CR3_STACK	scratch_reg:req
2966fd166aaSPeter Zijlstra 	pushq	%rax
2976fd166aaSPeter Zijlstra 	SWITCH_TO_USER_CR3_NOSTACK scratch_reg=\scratch_reg scratch_reg2=%rax
2986fd166aaSPeter Zijlstra 	popq	%rax
2996fd166aaSPeter Zijlstra .endm
3006fd166aaSPeter Zijlstra 
3018a09317bSDave Hansen .macro SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg:req save_reg:req
302aa8c6248SThomas Gleixner 	ALTERNATIVE "jmp .Ldone_\@", "", X86_FEATURE_PTI
3038a09317bSDave Hansen 	movq	%cr3, \scratch_reg
3048a09317bSDave Hansen 	movq	\scratch_reg, \save_reg
3058a09317bSDave Hansen 	/*
306f10ee3dcSThomas Gleixner 	 * Test the user pagetable bit. If set, then the user page tables
307f10ee3dcSThomas Gleixner 	 * are active. If clear CR3 already has the kernel page table
308f10ee3dcSThomas Gleixner 	 * active.
3098a09317bSDave Hansen 	 */
310f10ee3dcSThomas Gleixner 	bt	$PTI_USER_PGTABLE_BIT, \scratch_reg
311f10ee3dcSThomas Gleixner 	jnc	.Ldone_\@
3128a09317bSDave Hansen 
3138a09317bSDave Hansen 	ADJUST_KERNEL_CR3 \scratch_reg
3148a09317bSDave Hansen 	movq	\scratch_reg, %cr3
3158a09317bSDave Hansen 
3168a09317bSDave Hansen .Ldone_\@:
3178a09317bSDave Hansen .endm
3188a09317bSDave Hansen 
31921e94459SPeter Zijlstra .macro RESTORE_CR3 scratch_reg:req save_reg:req
320aa8c6248SThomas Gleixner 	ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI
32121e94459SPeter Zijlstra 
32221e94459SPeter Zijlstra 	ALTERNATIVE "jmp .Lwrcr3_\@", "", X86_FEATURE_PCID
32321e94459SPeter Zijlstra 
32421e94459SPeter Zijlstra 	/*
32521e94459SPeter Zijlstra 	 * KERNEL pages can always resume with NOFLUSH as we do
32621e94459SPeter Zijlstra 	 * explicit flushes.
32721e94459SPeter Zijlstra 	 */
328f10ee3dcSThomas Gleixner 	bt	$PTI_USER_PGTABLE_BIT, \save_reg
32921e94459SPeter Zijlstra 	jnc	.Lnoflush_\@
33021e94459SPeter Zijlstra 
33121e94459SPeter Zijlstra 	/*
33221e94459SPeter Zijlstra 	 * Check if there's a pending flush for the user ASID we're
33321e94459SPeter Zijlstra 	 * about to set.
33421e94459SPeter Zijlstra 	 */
33521e94459SPeter Zijlstra 	movq	\save_reg, \scratch_reg
33621e94459SPeter Zijlstra 	andq	$(0x7FF), \scratch_reg
33721e94459SPeter Zijlstra 	bt	\scratch_reg, THIS_CPU_user_pcid_flush_mask
33821e94459SPeter Zijlstra 	jnc	.Lnoflush_\@
33921e94459SPeter Zijlstra 
34021e94459SPeter Zijlstra 	btr	\scratch_reg, THIS_CPU_user_pcid_flush_mask
34121e94459SPeter Zijlstra 	jmp	.Lwrcr3_\@
34221e94459SPeter Zijlstra 
34321e94459SPeter Zijlstra .Lnoflush_\@:
34421e94459SPeter Zijlstra 	SET_NOFLUSH_BIT \save_reg
34521e94459SPeter Zijlstra 
34621e94459SPeter Zijlstra .Lwrcr3_\@:
3478a09317bSDave Hansen 	/*
3488a09317bSDave Hansen 	 * The CR3 write could be avoided when not changing its value,
3498a09317bSDave Hansen 	 * but would require a CR3 read *and* a scratch register.
3508a09317bSDave Hansen 	 */
3518a09317bSDave Hansen 	movq	\save_reg, %cr3
352aa8c6248SThomas Gleixner .Lend_\@:
3538a09317bSDave Hansen .endm
3548a09317bSDave Hansen 
3558a09317bSDave Hansen #else /* CONFIG_PAGE_TABLE_ISOLATION=n: */
3568a09317bSDave Hansen 
3578a09317bSDave Hansen .macro SWITCH_TO_KERNEL_CR3 scratch_reg:req
3588a09317bSDave Hansen .endm
3596fd166aaSPeter Zijlstra .macro SWITCH_TO_USER_CR3_NOSTACK scratch_reg:req scratch_reg2:req
3606fd166aaSPeter Zijlstra .endm
3616fd166aaSPeter Zijlstra .macro SWITCH_TO_USER_CR3_STACK scratch_reg:req
3628a09317bSDave Hansen .endm
3638a09317bSDave Hansen .macro SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg:req save_reg:req
3648a09317bSDave Hansen .endm
36521e94459SPeter Zijlstra .macro RESTORE_CR3 scratch_reg:req save_reg:req
3668a09317bSDave Hansen .endm
3678a09317bSDave Hansen 
3688a09317bSDave Hansen #endif
3698a09317bSDave Hansen 
370d36f9479SIngo Molnar #endif /* CONFIG_X86_64 */
371d36f9479SIngo Molnar 
372478dc89cSAndy Lutomirski /*
373478dc89cSAndy Lutomirski  * This does 'call enter_from_user_mode' unless we can avoid it based on
374478dc89cSAndy Lutomirski  * kernel config or using the static jump infrastructure.
375478dc89cSAndy Lutomirski  */
376478dc89cSAndy Lutomirski .macro CALL_enter_from_user_mode
377478dc89cSAndy Lutomirski #ifdef CONFIG_CONTEXT_TRACKING
378478dc89cSAndy Lutomirski #ifdef HAVE_JUMP_LABEL
379478dc89cSAndy Lutomirski 	STATIC_JUMP_IF_FALSE .Lafter_call_\@, context_tracking_enabled, def=0
380478dc89cSAndy Lutomirski #endif
381478dc89cSAndy Lutomirski 	call enter_from_user_mode
382478dc89cSAndy Lutomirski .Lafter_call_\@:
383478dc89cSAndy Lutomirski #endif
384478dc89cSAndy Lutomirski .endm
385