xref: /openbmc/linux/arch/x86/entry/calling.h (revision f43b9876)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
2478dc89cSAndy Lutomirski #include <linux/jump_label.h>
38c1f7558SJosh Poimboeuf #include <asm/unwind_hints.h>
48a09317bSDave Hansen #include <asm/cpufeatures.h>
58a09317bSDave Hansen #include <asm/page_types.h>
66fd166aaSPeter Zijlstra #include <asm/percpu.h>
76fd166aaSPeter Zijlstra #include <asm/asm-offsets.h>
86fd166aaSPeter Zijlstra #include <asm/processor-flags.h>
96627eb25SH. Peter Anvin (Intel) #include <asm/ptrace-abi.h>
102dbb887eSPeter Zijlstra #include <asm/msr.h>
112dbb887eSPeter Zijlstra #include <asm/nospec-branch.h>
12478dc89cSAndy Lutomirski 
13d36f9479SIngo Molnar /*
14d36f9479SIngo Molnar 
15d36f9479SIngo Molnar  x86 function call convention, 64-bit:
16d36f9479SIngo Molnar  -------------------------------------
17d36f9479SIngo Molnar   arguments           |  callee-saved      | extra caller-saved | return
18d36f9479SIngo Molnar  [callee-clobbered]   |                    | [callee-clobbered] |
19d36f9479SIngo Molnar  ---------------------------------------------------------------------------
20d36f9479SIngo Molnar  rdi rsi rdx rcx r8-9 | rbx rbp [*] r12-15 | r10-11             | rax, rdx [**]
21d36f9479SIngo Molnar 
22d36f9479SIngo Molnar  ( rsp is obviously invariant across normal function calls. (gcc can 'merge'
23d36f9479SIngo Molnar    functions when it sees tail-call optimization possibilities) rflags is
24d36f9479SIngo Molnar    clobbered. Leftover arguments are passed over the stack frame.)
25d36f9479SIngo Molnar 
26d36f9479SIngo Molnar  [*]  In the frame-pointers case rbp is fixed to the stack frame.
27d36f9479SIngo Molnar 
28d36f9479SIngo Molnar  [**] for struct return values wider than 64 bits the return convention is a
29d36f9479SIngo Molnar       bit more complex: up to 128 bits width we return small structures
30d36f9479SIngo Molnar       straight in rax, rdx. For structures larger than that (3 words or
31d36f9479SIngo Molnar       larger) the caller puts a pointer to an on-stack return struct
32d36f9479SIngo Molnar       [allocated in the caller's stack frame] into the first argument - i.e.
33d36f9479SIngo Molnar       into rdi. All other arguments shift up by one in this case.
34d36f9479SIngo Molnar       Fortunately this case is rare in the kernel.
35d36f9479SIngo Molnar 
36d36f9479SIngo Molnar For 32-bit we have the following conventions - kernel is built with
37d36f9479SIngo Molnar -mregparm=3 and -freg-struct-return:
38d36f9479SIngo Molnar 
39d36f9479SIngo Molnar  x86 function calling convention, 32-bit:
40d36f9479SIngo Molnar  ----------------------------------------
41d36f9479SIngo Molnar   arguments         | callee-saved        | extra caller-saved | return
42d36f9479SIngo Molnar  [callee-clobbered] |                     | [callee-clobbered] |
43d36f9479SIngo Molnar  -------------------------------------------------------------------------
44d36f9479SIngo Molnar  eax edx ecx        | ebx edi esi ebp [*] | <none>             | eax, edx [**]
45d36f9479SIngo Molnar 
46d36f9479SIngo Molnar  ( here too esp is obviously invariant across normal function calls. eflags
47d36f9479SIngo Molnar    is clobbered. Leftover arguments are passed over the stack frame. )
48d36f9479SIngo Molnar 
49d36f9479SIngo Molnar  [*]  In the frame-pointers case ebp is fixed to the stack frame.
50d36f9479SIngo Molnar 
51d36f9479SIngo Molnar  [**] We build with -freg-struct-return, which on 32-bit means similar
52d36f9479SIngo Molnar       semantics as on 64-bit: edx can be used for a second return value
53d36f9479SIngo Molnar       (i.e. covering integer and structure sizes up to 64 bits) - after that
54d36f9479SIngo Molnar       it gets more complex and more expensive: 3-word or larger struct returns
55d36f9479SIngo Molnar       get done in the caller's frame and the pointer to the return struct goes
56d36f9479SIngo Molnar       into regparm0, i.e. eax - the other arguments shift up and the
57d36f9479SIngo Molnar       function's register parameters degenerate to regparm=2 in essence.
58d36f9479SIngo Molnar 
59d36f9479SIngo Molnar */
60d36f9479SIngo Molnar 
61d36f9479SIngo Molnar #ifdef CONFIG_X86_64
62d36f9479SIngo Molnar 
63d36f9479SIngo Molnar /*
64d36f9479SIngo Molnar  * 64-bit system call stack frame layout defines and helpers,
65d36f9479SIngo Molnar  * for assembly code:
66d36f9479SIngo Molnar  */
67d36f9479SIngo Molnar 
68036c07c0SJosh Poimboeuf .macro PUSH_REGS rdx=%rdx rcx=%rcx rax=%rax save_ret=0
699e809d15SDominik Brodowski 	.if \save_ret
709e809d15SDominik Brodowski 	pushq	%rsi		/* pt_regs->si */
719e809d15SDominik Brodowski 	movq	8(%rsp), %rsi	/* temporarily store the return address in %rsi */
729e809d15SDominik Brodowski 	movq	%rdi, 8(%rsp)	/* pt_regs->di (overwriting original return address) */
739e809d15SDominik Brodowski 	.else
743f01daecSDominik Brodowski 	pushq   %rdi		/* pt_regs->di */
753f01daecSDominik Brodowski 	pushq   %rsi		/* pt_regs->si */
769e809d15SDominik Brodowski 	.endif
7730907fd1SDominik Brodowski 	pushq	\rdx		/* pt_regs->dx */
78036c07c0SJosh Poimboeuf 	pushq   \rcx		/* pt_regs->cx */
7930907fd1SDominik Brodowski 	pushq   \rax		/* pt_regs->ax */
803f01daecSDominik Brodowski 	pushq   %r8		/* pt_regs->r8 */
813f01daecSDominik Brodowski 	pushq   %r9		/* pt_regs->r9 */
823f01daecSDominik Brodowski 	pushq   %r10		/* pt_regs->r10 */
833f01daecSDominik Brodowski 	pushq   %r11		/* pt_regs->r11 */
843f01daecSDominik Brodowski 	pushq	%rbx		/* pt_regs->rbx */
853f01daecSDominik Brodowski 	pushq	%rbp		/* pt_regs->rbp */
863f01daecSDominik Brodowski 	pushq	%r12		/* pt_regs->r12 */
873f01daecSDominik Brodowski 	pushq	%r13		/* pt_regs->r13 */
883f01daecSDominik Brodowski 	pushq	%r14		/* pt_regs->r14 */
893f01daecSDominik Brodowski 	pushq	%r15		/* pt_regs->r15 */
903f01daecSDominik Brodowski 	UNWIND_HINT_REGS
9106a9750eSJosh Poimboeuf 
929e809d15SDominik Brodowski 	.if \save_ret
939e809d15SDominik Brodowski 	pushq	%rsi		/* return address on top of stack */
949e809d15SDominik Brodowski 	.endif
9529e97589SH. Peter Anvin (Intel) .endm
9606a9750eSJosh Poimboeuf 
9729e97589SH. Peter Anvin (Intel) .macro CLEAR_REGS
9806a9750eSJosh Poimboeuf 	/*
9906a9750eSJosh Poimboeuf 	 * Sanitize registers of values that a speculation attack might
10006a9750eSJosh Poimboeuf 	 * otherwise want to exploit. The lower registers are likely clobbered
10106a9750eSJosh Poimboeuf 	 * well before they could be put to use in a speculative execution
10206a9750eSJosh Poimboeuf 	 * gadget.
10306a9750eSJosh Poimboeuf 	 */
1048c42819bSPeter Zijlstra 	xorl	%esi,  %esi	/* nospec si  */
10506a9750eSJosh Poimboeuf 	xorl	%edx,  %edx	/* nospec dx  */
10606a9750eSJosh Poimboeuf 	xorl	%ecx,  %ecx	/* nospec cx  */
10706a9750eSJosh Poimboeuf 	xorl	%r8d,  %r8d	/* nospec r8  */
10806a9750eSJosh Poimboeuf 	xorl	%r9d,  %r9d	/* nospec r9  */
10906a9750eSJosh Poimboeuf 	xorl	%r10d, %r10d	/* nospec r10 */
11006a9750eSJosh Poimboeuf 	xorl	%r11d, %r11d	/* nospec r11 */
11106a9750eSJosh Poimboeuf 	xorl	%ebx,  %ebx	/* nospec rbx */
11206a9750eSJosh Poimboeuf 	xorl	%ebp,  %ebp	/* nospec rbp */
11306a9750eSJosh Poimboeuf 	xorl	%r12d, %r12d	/* nospec r12 */
11406a9750eSJosh Poimboeuf 	xorl	%r13d, %r13d	/* nospec r13 */
11506a9750eSJosh Poimboeuf 	xorl	%r14d, %r14d	/* nospec r14 */
11606a9750eSJosh Poimboeuf 	xorl	%r15d, %r15d	/* nospec r15 */
11706a9750eSJosh Poimboeuf 
1183f01daecSDominik Brodowski .endm
1193f01daecSDominik Brodowski 
120036c07c0SJosh Poimboeuf .macro PUSH_AND_CLEAR_REGS rdx=%rdx rcx=%rcx rax=%rax save_ret=0
121036c07c0SJosh Poimboeuf 	PUSH_REGS rdx=\rdx, rcx=\rcx, rax=\rax, save_ret=\save_ret
12229e97589SH. Peter Anvin (Intel) 	CLEAR_REGS
12329e97589SH. Peter Anvin (Intel) .endm
12429e97589SH. Peter Anvin (Intel) 
1251b331eeeSPeter Zijlstra .macro POP_REGS pop_rdi=1
126e872045bSAndy Lutomirski 	popq %r15
127e872045bSAndy Lutomirski 	popq %r14
128e872045bSAndy Lutomirski 	popq %r13
129e872045bSAndy Lutomirski 	popq %r12
130e872045bSAndy Lutomirski 	popq %rbp
131e872045bSAndy Lutomirski 	popq %rbx
132e872045bSAndy Lutomirski 	popq %r11
133e872045bSAndy Lutomirski 	popq %r10
134e872045bSAndy Lutomirski 	popq %r9
135e872045bSAndy Lutomirski 	popq %r8
136e872045bSAndy Lutomirski 	popq %rax
137e872045bSAndy Lutomirski 	popq %rcx
138e872045bSAndy Lutomirski 	popq %rdx
139e872045bSAndy Lutomirski 	popq %rsi
140502af0d7SDominik Brodowski 	.if \pop_rdi
141e872045bSAndy Lutomirski 	popq %rdi
142502af0d7SDominik Brodowski 	.endif
143d36f9479SIngo Molnar .endm
144d36f9479SIngo Molnar 
1458a09317bSDave Hansen #ifdef CONFIG_PAGE_TABLE_ISOLATION
1468a09317bSDave Hansen 
1476fd166aaSPeter Zijlstra /*
1486fd166aaSPeter Zijlstra  * PAGE_TABLE_ISOLATION PGDs are 8k.  Flip bit 12 to switch between the two
1496fd166aaSPeter Zijlstra  * halves:
1506fd166aaSPeter Zijlstra  */
151f10ee3dcSThomas Gleixner #define PTI_USER_PGTABLE_BIT		PAGE_SHIFT
152f10ee3dcSThomas Gleixner #define PTI_USER_PGTABLE_MASK		(1 << PTI_USER_PGTABLE_BIT)
153f10ee3dcSThomas Gleixner #define PTI_USER_PCID_BIT		X86_CR3_PTI_PCID_USER_BIT
154f10ee3dcSThomas Gleixner #define PTI_USER_PCID_MASK		(1 << PTI_USER_PCID_BIT)
155f10ee3dcSThomas Gleixner #define PTI_USER_PGTABLE_AND_PCID_MASK  (PTI_USER_PCID_MASK | PTI_USER_PGTABLE_MASK)
1568a09317bSDave Hansen 
1576fd166aaSPeter Zijlstra .macro SET_NOFLUSH_BIT	reg:req
1586fd166aaSPeter Zijlstra 	bts	$X86_CR3_PCID_NOFLUSH_BIT, \reg
1598a09317bSDave Hansen .endm
1608a09317bSDave Hansen 
1616fd166aaSPeter Zijlstra .macro ADJUST_KERNEL_CR3 reg:req
1626fd166aaSPeter Zijlstra 	ALTERNATIVE "", "SET_NOFLUSH_BIT \reg", X86_FEATURE_PCID
1636fd166aaSPeter Zijlstra 	/* Clear PCID and "PAGE_TABLE_ISOLATION bit", point CR3 at kernel pagetables: */
164f10ee3dcSThomas Gleixner 	andq    $(~PTI_USER_PGTABLE_AND_PCID_MASK), \reg
1658a09317bSDave Hansen .endm
1668a09317bSDave Hansen 
1678a09317bSDave Hansen .macro SWITCH_TO_KERNEL_CR3 scratch_reg:req
168aa8c6248SThomas Gleixner 	ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI
1698a09317bSDave Hansen 	mov	%cr3, \scratch_reg
1708a09317bSDave Hansen 	ADJUST_KERNEL_CR3 \scratch_reg
1718a09317bSDave Hansen 	mov	\scratch_reg, %cr3
172aa8c6248SThomas Gleixner .Lend_\@:
1738a09317bSDave Hansen .endm
1748a09317bSDave Hansen 
1756fd166aaSPeter Zijlstra #define THIS_CPU_user_pcid_flush_mask   \
1766fd166aaSPeter Zijlstra 	PER_CPU_VAR(cpu_tlbstate) + TLB_STATE_user_pcid_flush_mask
1776fd166aaSPeter Zijlstra 
1786fd166aaSPeter Zijlstra .macro SWITCH_TO_USER_CR3_NOSTACK scratch_reg:req scratch_reg2:req
179aa8c6248SThomas Gleixner 	ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI
1808a09317bSDave Hansen 	mov	%cr3, \scratch_reg
1816fd166aaSPeter Zijlstra 
1826fd166aaSPeter Zijlstra 	ALTERNATIVE "jmp .Lwrcr3_\@", "", X86_FEATURE_PCID
1836fd166aaSPeter Zijlstra 
1846fd166aaSPeter Zijlstra 	/*
1856fd166aaSPeter Zijlstra 	 * Test if the ASID needs a flush.
1866fd166aaSPeter Zijlstra 	 */
1876fd166aaSPeter Zijlstra 	movq	\scratch_reg, \scratch_reg2
1886fd166aaSPeter Zijlstra 	andq	$(0x7FF), \scratch_reg		/* mask ASID */
1896fd166aaSPeter Zijlstra 	bt	\scratch_reg, THIS_CPU_user_pcid_flush_mask
1906fd166aaSPeter Zijlstra 	jnc	.Lnoflush_\@
1916fd166aaSPeter Zijlstra 
1926fd166aaSPeter Zijlstra 	/* Flush needed, clear the bit */
1936fd166aaSPeter Zijlstra 	btr	\scratch_reg, THIS_CPU_user_pcid_flush_mask
1946fd166aaSPeter Zijlstra 	movq	\scratch_reg2, \scratch_reg
195f10ee3dcSThomas Gleixner 	jmp	.Lwrcr3_pcid_\@
1966fd166aaSPeter Zijlstra 
1976fd166aaSPeter Zijlstra .Lnoflush_\@:
1986fd166aaSPeter Zijlstra 	movq	\scratch_reg2, \scratch_reg
1996fd166aaSPeter Zijlstra 	SET_NOFLUSH_BIT \scratch_reg
2006fd166aaSPeter Zijlstra 
201f10ee3dcSThomas Gleixner .Lwrcr3_pcid_\@:
202f10ee3dcSThomas Gleixner 	/* Flip the ASID to the user version */
203f10ee3dcSThomas Gleixner 	orq	$(PTI_USER_PCID_MASK), \scratch_reg
204f10ee3dcSThomas Gleixner 
2056fd166aaSPeter Zijlstra .Lwrcr3_\@:
206f10ee3dcSThomas Gleixner 	/* Flip the PGD to the user version */
207f10ee3dcSThomas Gleixner 	orq     $(PTI_USER_PGTABLE_MASK), \scratch_reg
2088a09317bSDave Hansen 	mov	\scratch_reg, %cr3
209aa8c6248SThomas Gleixner .Lend_\@:
2108a09317bSDave Hansen .endm
2118a09317bSDave Hansen 
2126fd166aaSPeter Zijlstra .macro SWITCH_TO_USER_CR3_STACK	scratch_reg:req
2136fd166aaSPeter Zijlstra 	pushq	%rax
2146fd166aaSPeter Zijlstra 	SWITCH_TO_USER_CR3_NOSTACK scratch_reg=\scratch_reg scratch_reg2=%rax
2156fd166aaSPeter Zijlstra 	popq	%rax
2166fd166aaSPeter Zijlstra .endm
2176fd166aaSPeter Zijlstra 
2188a09317bSDave Hansen .macro SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg:req save_reg:req
219aa8c6248SThomas Gleixner 	ALTERNATIVE "jmp .Ldone_\@", "", X86_FEATURE_PTI
2208a09317bSDave Hansen 	movq	%cr3, \scratch_reg
2218a09317bSDave Hansen 	movq	\scratch_reg, \save_reg
2228a09317bSDave Hansen 	/*
223f10ee3dcSThomas Gleixner 	 * Test the user pagetable bit. If set, then the user page tables
224f10ee3dcSThomas Gleixner 	 * are active. If clear CR3 already has the kernel page table
225f10ee3dcSThomas Gleixner 	 * active.
2268a09317bSDave Hansen 	 */
227f10ee3dcSThomas Gleixner 	bt	$PTI_USER_PGTABLE_BIT, \scratch_reg
228f10ee3dcSThomas Gleixner 	jnc	.Ldone_\@
2298a09317bSDave Hansen 
2308a09317bSDave Hansen 	ADJUST_KERNEL_CR3 \scratch_reg
2318a09317bSDave Hansen 	movq	\scratch_reg, %cr3
2328a09317bSDave Hansen 
2338a09317bSDave Hansen .Ldone_\@:
2348a09317bSDave Hansen .endm
2358a09317bSDave Hansen 
23621e94459SPeter Zijlstra .macro RESTORE_CR3 scratch_reg:req save_reg:req
237aa8c6248SThomas Gleixner 	ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI
23821e94459SPeter Zijlstra 
23921e94459SPeter Zijlstra 	ALTERNATIVE "jmp .Lwrcr3_\@", "", X86_FEATURE_PCID
24021e94459SPeter Zijlstra 
24121e94459SPeter Zijlstra 	/*
24221e94459SPeter Zijlstra 	 * KERNEL pages can always resume with NOFLUSH as we do
24321e94459SPeter Zijlstra 	 * explicit flushes.
24421e94459SPeter Zijlstra 	 */
245f10ee3dcSThomas Gleixner 	bt	$PTI_USER_PGTABLE_BIT, \save_reg
24621e94459SPeter Zijlstra 	jnc	.Lnoflush_\@
24721e94459SPeter Zijlstra 
24821e94459SPeter Zijlstra 	/*
24921e94459SPeter Zijlstra 	 * Check if there's a pending flush for the user ASID we're
25021e94459SPeter Zijlstra 	 * about to set.
25121e94459SPeter Zijlstra 	 */
25221e94459SPeter Zijlstra 	movq	\save_reg, \scratch_reg
25321e94459SPeter Zijlstra 	andq	$(0x7FF), \scratch_reg
25421e94459SPeter Zijlstra 	bt	\scratch_reg, THIS_CPU_user_pcid_flush_mask
25521e94459SPeter Zijlstra 	jnc	.Lnoflush_\@
25621e94459SPeter Zijlstra 
25721e94459SPeter Zijlstra 	btr	\scratch_reg, THIS_CPU_user_pcid_flush_mask
25821e94459SPeter Zijlstra 	jmp	.Lwrcr3_\@
25921e94459SPeter Zijlstra 
26021e94459SPeter Zijlstra .Lnoflush_\@:
26121e94459SPeter Zijlstra 	SET_NOFLUSH_BIT \save_reg
26221e94459SPeter Zijlstra 
26321e94459SPeter Zijlstra .Lwrcr3_\@:
2648a09317bSDave Hansen 	/*
2658a09317bSDave Hansen 	 * The CR3 write could be avoided when not changing its value,
2668a09317bSDave Hansen 	 * but would require a CR3 read *and* a scratch register.
2678a09317bSDave Hansen 	 */
2688a09317bSDave Hansen 	movq	\save_reg, %cr3
269aa8c6248SThomas Gleixner .Lend_\@:
2708a09317bSDave Hansen .endm
2718a09317bSDave Hansen 
2728a09317bSDave Hansen #else /* CONFIG_PAGE_TABLE_ISOLATION=n: */
2738a09317bSDave Hansen 
2748a09317bSDave Hansen .macro SWITCH_TO_KERNEL_CR3 scratch_reg:req
2758a09317bSDave Hansen .endm
2766fd166aaSPeter Zijlstra .macro SWITCH_TO_USER_CR3_NOSTACK scratch_reg:req scratch_reg2:req
2776fd166aaSPeter Zijlstra .endm
2786fd166aaSPeter Zijlstra .macro SWITCH_TO_USER_CR3_STACK scratch_reg:req
2798a09317bSDave Hansen .endm
2808a09317bSDave Hansen .macro SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg:req save_reg:req
2818a09317bSDave Hansen .endm
28221e94459SPeter Zijlstra .macro RESTORE_CR3 scratch_reg:req save_reg:req
2838a09317bSDave Hansen .endm
2848a09317bSDave Hansen 
2858a09317bSDave Hansen #endif
2868a09317bSDave Hansen 
28718ec54fdSJosh Poimboeuf /*
2882dbb887eSPeter Zijlstra  * IBRS kernel mitigation for Spectre_v2.
2892dbb887eSPeter Zijlstra  *
2902dbb887eSPeter Zijlstra  * Assumes full context is established (PUSH_REGS, CR3 and GS) and it clobbers
2912dbb887eSPeter Zijlstra  * the regs it uses (AX, CX, DX). Must be called before the first RET
2922dbb887eSPeter Zijlstra  * instruction (NOTE! UNTRAIN_RET includes a RET instruction)
2932dbb887eSPeter Zijlstra  *
2942dbb887eSPeter Zijlstra  * The optional argument is used to save/restore the current value,
2952dbb887eSPeter Zijlstra  * which is used on the paranoid paths.
2962dbb887eSPeter Zijlstra  *
2972dbb887eSPeter Zijlstra  * Assumes x86_spec_ctrl_{base,current} to have SPEC_CTRL_IBRS set.
2982dbb887eSPeter Zijlstra  */
2992dbb887eSPeter Zijlstra .macro IBRS_ENTER save_reg
300*f43b9876SPeter Zijlstra #ifdef CONFIG_CPU_IBRS_ENTRY
3012dbb887eSPeter Zijlstra 	ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_KERNEL_IBRS
3022dbb887eSPeter Zijlstra 	movl	$MSR_IA32_SPEC_CTRL, %ecx
3032dbb887eSPeter Zijlstra 
3042dbb887eSPeter Zijlstra .ifnb \save_reg
3052dbb887eSPeter Zijlstra 	rdmsr
3062dbb887eSPeter Zijlstra 	shl	$32, %rdx
3072dbb887eSPeter Zijlstra 	or	%rdx, %rax
3082dbb887eSPeter Zijlstra 	mov	%rax, \save_reg
3092dbb887eSPeter Zijlstra 	test	$SPEC_CTRL_IBRS, %eax
3102dbb887eSPeter Zijlstra 	jz	.Ldo_wrmsr_\@
3112dbb887eSPeter Zijlstra 	lfence
3122dbb887eSPeter Zijlstra 	jmp	.Lend_\@
3132dbb887eSPeter Zijlstra .Ldo_wrmsr_\@:
3142dbb887eSPeter Zijlstra .endif
3152dbb887eSPeter Zijlstra 
3162dbb887eSPeter Zijlstra 	movq	PER_CPU_VAR(x86_spec_ctrl_current), %rdx
3172dbb887eSPeter Zijlstra 	movl	%edx, %eax
3182dbb887eSPeter Zijlstra 	shr	$32, %rdx
3192dbb887eSPeter Zijlstra 	wrmsr
3202dbb887eSPeter Zijlstra .Lend_\@:
321*f43b9876SPeter Zijlstra #endif
3222dbb887eSPeter Zijlstra .endm
3232dbb887eSPeter Zijlstra 
3242dbb887eSPeter Zijlstra /*
3252dbb887eSPeter Zijlstra  * Similar to IBRS_ENTER, requires KERNEL GS,CR3 and clobbers (AX, CX, DX)
3262dbb887eSPeter Zijlstra  * regs. Must be called after the last RET.
3272dbb887eSPeter Zijlstra  */
3282dbb887eSPeter Zijlstra .macro IBRS_EXIT save_reg
329*f43b9876SPeter Zijlstra #ifdef CONFIG_CPU_IBRS_ENTRY
3302dbb887eSPeter Zijlstra 	ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_KERNEL_IBRS
3312dbb887eSPeter Zijlstra 	movl	$MSR_IA32_SPEC_CTRL, %ecx
3322dbb887eSPeter Zijlstra 
3332dbb887eSPeter Zijlstra .ifnb \save_reg
3342dbb887eSPeter Zijlstra 	mov	\save_reg, %rdx
3352dbb887eSPeter Zijlstra .else
3362dbb887eSPeter Zijlstra 	movq	PER_CPU_VAR(x86_spec_ctrl_current), %rdx
3372dbb887eSPeter Zijlstra 	andl	$(~SPEC_CTRL_IBRS), %edx
3382dbb887eSPeter Zijlstra .endif
3392dbb887eSPeter Zijlstra 
3402dbb887eSPeter Zijlstra 	movl	%edx, %eax
3412dbb887eSPeter Zijlstra 	shr	$32, %rdx
3422dbb887eSPeter Zijlstra 	wrmsr
3432dbb887eSPeter Zijlstra .Lend_\@:
344*f43b9876SPeter Zijlstra #endif
3452dbb887eSPeter Zijlstra .endm
3462dbb887eSPeter Zijlstra 
3472dbb887eSPeter Zijlstra /*
34818ec54fdSJosh Poimboeuf  * Mitigate Spectre v1 for conditional swapgs code paths.
34918ec54fdSJosh Poimboeuf  *
35018ec54fdSJosh Poimboeuf  * FENCE_SWAPGS_USER_ENTRY is used in the user entry swapgs code path, to
35118ec54fdSJosh Poimboeuf  * prevent a speculative swapgs when coming from kernel space.
35218ec54fdSJosh Poimboeuf  *
35318ec54fdSJosh Poimboeuf  * FENCE_SWAPGS_KERNEL_ENTRY is used in the kernel entry non-swapgs code path,
35418ec54fdSJosh Poimboeuf  * to prevent the swapgs from getting speculatively skipped when coming from
35518ec54fdSJosh Poimboeuf  * user space.
35618ec54fdSJosh Poimboeuf  */
35718ec54fdSJosh Poimboeuf .macro FENCE_SWAPGS_USER_ENTRY
35818ec54fdSJosh Poimboeuf 	ALTERNATIVE "", "lfence", X86_FEATURE_FENCE_SWAPGS_USER
35918ec54fdSJosh Poimboeuf .endm
36018ec54fdSJosh Poimboeuf .macro FENCE_SWAPGS_KERNEL_ENTRY
36118ec54fdSJosh Poimboeuf 	ALTERNATIVE "", "lfence", X86_FEATURE_FENCE_SWAPGS_KERNEL
36218ec54fdSJosh Poimboeuf .endm
36318ec54fdSJosh Poimboeuf 
364afaef01cSAlexander Popov .macro STACKLEAK_ERASE_NOCLOBBER
365afaef01cSAlexander Popov #ifdef CONFIG_GCC_PLUGIN_STACKLEAK
366afaef01cSAlexander Popov 	PUSH_AND_CLEAR_REGS
367afaef01cSAlexander Popov 	call stackleak_erase
368afaef01cSAlexander Popov 	POP_REGS
369afaef01cSAlexander Popov #endif
370afaef01cSAlexander Popov .endm
371afaef01cSAlexander Popov 
372c82965f9SChang S. Bae .macro SAVE_AND_SET_GSBASE scratch_reg:req save_reg:req
373c82965f9SChang S. Bae 	rdgsbase \save_reg
374c82965f9SChang S. Bae 	GET_PERCPU_BASE \scratch_reg
375c82965f9SChang S. Bae 	wrgsbase \scratch_reg
376c82965f9SChang S. Bae .endm
377c82965f9SChang S. Bae 
378633260faSThomas Gleixner #else /* CONFIG_X86_64 */
379633260faSThomas Gleixner # undef		UNWIND_HINT_IRET_REGS
380633260faSThomas Gleixner # define	UNWIND_HINT_IRET_REGS
381633260faSThomas Gleixner #endif /* !CONFIG_X86_64 */
382d36f9479SIngo Molnar 
383afaef01cSAlexander Popov .macro STACKLEAK_ERASE
384afaef01cSAlexander Popov #ifdef CONFIG_GCC_PLUGIN_STACKLEAK
385afaef01cSAlexander Popov 	call stackleak_erase
386afaef01cSAlexander Popov #endif
387afaef01cSAlexander Popov .endm
388eaad9812SChang S. Bae 
389eaad9812SChang S. Bae #ifdef CONFIG_SMP
390eaad9812SChang S. Bae 
391eaad9812SChang S. Bae /*
392eaad9812SChang S. Bae  * CPU/node NR is loaded from the limit (size) field of a special segment
393eaad9812SChang S. Bae  * descriptor entry in GDT.
394eaad9812SChang S. Bae  */
395eaad9812SChang S. Bae .macro LOAD_CPU_AND_NODE_SEG_LIMIT reg:req
396eaad9812SChang S. Bae 	movq	$__CPUNODE_SEG, \reg
397eaad9812SChang S. Bae 	lsl	\reg, \reg
398eaad9812SChang S. Bae .endm
399eaad9812SChang S. Bae 
400eaad9812SChang S. Bae /*
401eaad9812SChang S. Bae  * Fetch the per-CPU GSBASE value for this processor and put it in @reg.
402eaad9812SChang S. Bae  * We normally use %gs for accessing per-CPU data, but we are setting up
403eaad9812SChang S. Bae  * %gs here and obviously can not use %gs itself to access per-CPU data.
4046a3ea3e6SSean Christopherson  *
4056a3ea3e6SSean Christopherson  * Do not use RDPID, because KVM loads guest's TSC_AUX on vm-entry and
4066a3ea3e6SSean Christopherson  * may not restore the host's value until the CPU returns to userspace.
4076a3ea3e6SSean Christopherson  * Thus the kernel would consume a guest's TSC_AUX if an NMI arrives
4086a3ea3e6SSean Christopherson  * while running KVM's run loop.
409eaad9812SChang S. Bae  */
410eaad9812SChang S. Bae .macro GET_PERCPU_BASE reg:req
4116a3ea3e6SSean Christopherson 	LOAD_CPU_AND_NODE_SEG_LIMIT \reg
412eaad9812SChang S. Bae 	andq	$VDSO_CPUNODE_MASK, \reg
413eaad9812SChang S. Bae 	movq	__per_cpu_offset(, \reg, 8), \reg
414eaad9812SChang S. Bae .endm
415eaad9812SChang S. Bae 
416eaad9812SChang S. Bae #else
417eaad9812SChang S. Bae 
418eaad9812SChang S. Bae .macro GET_PERCPU_BASE reg:req
419eaad9812SChang S. Bae 	movq	pcpu_unit_offsets(%rip), \reg
420eaad9812SChang S. Bae .endm
421eaad9812SChang S. Bae 
422eaad9812SChang S. Bae #endif /* CONFIG_SMP */
423