xref: /openbmc/linux/arch/x86/entry/entry_64.S (revision 784d5699eddc55878627da20d3fe0c8542e2f1a2)
1905a36a2SIngo Molnar/*
2905a36a2SIngo Molnar *  linux/arch/x86_64/entry.S
3905a36a2SIngo Molnar *
4905a36a2SIngo Molnar *  Copyright (C) 1991, 1992  Linus Torvalds
5905a36a2SIngo Molnar *  Copyright (C) 2000, 2001, 2002  Andi Kleen SuSE Labs
6905a36a2SIngo Molnar *  Copyright (C) 2000  Pavel Machek <pavel@suse.cz>
74d732138SIngo Molnar *
8905a36a2SIngo Molnar * entry.S contains the system-call and fault low-level handling routines.
9905a36a2SIngo Molnar *
10905a36a2SIngo Molnar * Some of this is documented in Documentation/x86/entry_64.txt
11905a36a2SIngo Molnar *
12905a36a2SIngo Molnar * A note on terminology:
13905a36a2SIngo Molnar * - iret frame:	Architecture defined interrupt frame from SS to RIP
14905a36a2SIngo Molnar *			at the top of the kernel process stack.
15905a36a2SIngo Molnar *
16905a36a2SIngo Molnar * Some macro usage:
174d732138SIngo Molnar * - ENTRY/END:		Define functions in the symbol table.
184d732138SIngo Molnar * - TRACE_IRQ_*:	Trace hardirq state for lock debugging.
194d732138SIngo Molnar * - idtentry:		Define exception entry points.
20905a36a2SIngo Molnar */
21905a36a2SIngo Molnar#include <linux/linkage.h>
22905a36a2SIngo Molnar#include <asm/segment.h>
23905a36a2SIngo Molnar#include <asm/cache.h>
24905a36a2SIngo Molnar#include <asm/errno.h>
25d36f9479SIngo Molnar#include "calling.h"
26905a36a2SIngo Molnar#include <asm/asm-offsets.h>
27905a36a2SIngo Molnar#include <asm/msr.h>
28905a36a2SIngo Molnar#include <asm/unistd.h>
29905a36a2SIngo Molnar#include <asm/thread_info.h>
30905a36a2SIngo Molnar#include <asm/hw_irq.h>
31905a36a2SIngo Molnar#include <asm/page_types.h>
32905a36a2SIngo Molnar#include <asm/irqflags.h>
33905a36a2SIngo Molnar#include <asm/paravirt.h>
34905a36a2SIngo Molnar#include <asm/percpu.h>
35905a36a2SIngo Molnar#include <asm/asm.h>
36905a36a2SIngo Molnar#include <asm/smap.h>
37905a36a2SIngo Molnar#include <asm/pgtable_types.h>
38*784d5699SAl Viro#include <asm/export.h>
39905a36a2SIngo Molnar#include <linux/err.h>
40905a36a2SIngo Molnar
41905a36a2SIngo Molnar/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this.  */
42905a36a2SIngo Molnar#include <linux/elf-em.h>
43905a36a2SIngo Molnar#define AUDIT_ARCH_X86_64			(EM_X86_64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
44905a36a2SIngo Molnar#define __AUDIT_ARCH_64BIT			0x80000000
45905a36a2SIngo Molnar#define __AUDIT_ARCH_LE				0x40000000
46905a36a2SIngo Molnar
47905a36a2SIngo Molnar.code64
48905a36a2SIngo Molnar.section .entry.text, "ax"
49905a36a2SIngo Molnar
50905a36a2SIngo Molnar#ifdef CONFIG_PARAVIRT
51905a36a2SIngo MolnarENTRY(native_usergs_sysret64)
52905a36a2SIngo Molnar	swapgs
53905a36a2SIngo Molnar	sysretq
54905a36a2SIngo MolnarENDPROC(native_usergs_sysret64)
55905a36a2SIngo Molnar#endif /* CONFIG_PARAVIRT */
56905a36a2SIngo Molnar
57905a36a2SIngo Molnar.macro TRACE_IRQS_IRETQ
58905a36a2SIngo Molnar#ifdef CONFIG_TRACE_IRQFLAGS
59905a36a2SIngo Molnar	bt	$9, EFLAGS(%rsp)		/* interrupts off? */
60905a36a2SIngo Molnar	jnc	1f
61905a36a2SIngo Molnar	TRACE_IRQS_ON
62905a36a2SIngo Molnar1:
63905a36a2SIngo Molnar#endif
64905a36a2SIngo Molnar.endm
65905a36a2SIngo Molnar
66905a36a2SIngo Molnar/*
67905a36a2SIngo Molnar * When dynamic function tracer is enabled it will add a breakpoint
68905a36a2SIngo Molnar * to all locations that it is about to modify, sync CPUs, update
69905a36a2SIngo Molnar * all the code, sync CPUs, then remove the breakpoints. In this time
70905a36a2SIngo Molnar * if lockdep is enabled, it might jump back into the debug handler
71905a36a2SIngo Molnar * outside the updating of the IST protection. (TRACE_IRQS_ON/OFF).
72905a36a2SIngo Molnar *
73905a36a2SIngo Molnar * We need to change the IDT table before calling TRACE_IRQS_ON/OFF to
74905a36a2SIngo Molnar * make sure the stack pointer does not get reset back to the top
75905a36a2SIngo Molnar * of the debug stack, and instead just reuses the current stack.
76905a36a2SIngo Molnar */
77905a36a2SIngo Molnar#if defined(CONFIG_DYNAMIC_FTRACE) && defined(CONFIG_TRACE_IRQFLAGS)
78905a36a2SIngo Molnar
79905a36a2SIngo Molnar.macro TRACE_IRQS_OFF_DEBUG
80905a36a2SIngo Molnar	call	debug_stack_set_zero
81905a36a2SIngo Molnar	TRACE_IRQS_OFF
82905a36a2SIngo Molnar	call	debug_stack_reset
83905a36a2SIngo Molnar.endm
84905a36a2SIngo Molnar
85905a36a2SIngo Molnar.macro TRACE_IRQS_ON_DEBUG
86905a36a2SIngo Molnar	call	debug_stack_set_zero
87905a36a2SIngo Molnar	TRACE_IRQS_ON
88905a36a2SIngo Molnar	call	debug_stack_reset
89905a36a2SIngo Molnar.endm
90905a36a2SIngo Molnar
91905a36a2SIngo Molnar.macro TRACE_IRQS_IRETQ_DEBUG
92905a36a2SIngo Molnar	bt	$9, EFLAGS(%rsp)		/* interrupts off? */
93905a36a2SIngo Molnar	jnc	1f
94905a36a2SIngo Molnar	TRACE_IRQS_ON_DEBUG
95905a36a2SIngo Molnar1:
96905a36a2SIngo Molnar.endm
97905a36a2SIngo Molnar
98905a36a2SIngo Molnar#else
99905a36a2SIngo Molnar# define TRACE_IRQS_OFF_DEBUG			TRACE_IRQS_OFF
100905a36a2SIngo Molnar# define TRACE_IRQS_ON_DEBUG			TRACE_IRQS_ON
101905a36a2SIngo Molnar# define TRACE_IRQS_IRETQ_DEBUG			TRACE_IRQS_IRETQ
102905a36a2SIngo Molnar#endif
103905a36a2SIngo Molnar
104905a36a2SIngo Molnar/*
1054d732138SIngo Molnar * 64-bit SYSCALL instruction entry. Up to 6 arguments in registers.
106905a36a2SIngo Molnar *
107fda57b22SAndy Lutomirski * This is the only entry point used for 64-bit system calls.  The
108fda57b22SAndy Lutomirski * hardware interface is reasonably well designed and the register to
109fda57b22SAndy Lutomirski * argument mapping Linux uses fits well with the registers that are
110fda57b22SAndy Lutomirski * available when SYSCALL is used.
111fda57b22SAndy Lutomirski *
112fda57b22SAndy Lutomirski * SYSCALL instructions can be found inlined in libc implementations as
113fda57b22SAndy Lutomirski * well as some other programs and libraries.  There are also a handful
114fda57b22SAndy Lutomirski * of SYSCALL instructions in the vDSO used, for example, as a
115fda57b22SAndy Lutomirski * clock_gettimeofday fallback.
116fda57b22SAndy Lutomirski *
1174d732138SIngo Molnar * 64-bit SYSCALL saves rip to rcx, clears rflags.RF, then saves rflags to r11,
118905a36a2SIngo Molnar * then loads new ss, cs, and rip from previously programmed MSRs.
119905a36a2SIngo Molnar * rflags gets masked by a value from another MSR (so CLD and CLAC
120905a36a2SIngo Molnar * are not needed). SYSCALL does not save anything on the stack
121905a36a2SIngo Molnar * and does not change rsp.
122905a36a2SIngo Molnar *
123905a36a2SIngo Molnar * Registers on entry:
124905a36a2SIngo Molnar * rax  system call number
125905a36a2SIngo Molnar * rcx  return address
126905a36a2SIngo Molnar * r11  saved rflags (note: r11 is callee-clobbered register in C ABI)
127905a36a2SIngo Molnar * rdi  arg0
128905a36a2SIngo Molnar * rsi  arg1
129905a36a2SIngo Molnar * rdx  arg2
130905a36a2SIngo Molnar * r10  arg3 (needs to be moved to rcx to conform to C ABI)
131905a36a2SIngo Molnar * r8   arg4
132905a36a2SIngo Molnar * r9   arg5
133905a36a2SIngo Molnar * (note: r12-r15, rbp, rbx are callee-preserved in C ABI)
134905a36a2SIngo Molnar *
135905a36a2SIngo Molnar * Only called from user space.
136905a36a2SIngo Molnar *
137905a36a2SIngo Molnar * When user can change pt_regs->foo always force IRET. That is because
138905a36a2SIngo Molnar * it deals with uncanonical addresses better. SYSRET has trouble
139905a36a2SIngo Molnar * with them due to bugs in both AMD and Intel CPUs.
140905a36a2SIngo Molnar */
141905a36a2SIngo Molnar
142b2502b41SIngo MolnarENTRY(entry_SYSCALL_64)
143905a36a2SIngo Molnar	/*
144905a36a2SIngo Molnar	 * Interrupts are off on entry.
145905a36a2SIngo Molnar	 * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON,
146905a36a2SIngo Molnar	 * it is too small to ever cause noticeable irq latency.
147905a36a2SIngo Molnar	 */
148905a36a2SIngo Molnar	SWAPGS_UNSAFE_STACK
149905a36a2SIngo Molnar	/*
150905a36a2SIngo Molnar	 * A hypervisor implementation might want to use a label
151905a36a2SIngo Molnar	 * after the swapgs, so that it can do the swapgs
152905a36a2SIngo Molnar	 * for the guest and jump here on syscall.
153905a36a2SIngo Molnar	 */
154b2502b41SIngo MolnarGLOBAL(entry_SYSCALL_64_after_swapgs)
155905a36a2SIngo Molnar
156905a36a2SIngo Molnar	movq	%rsp, PER_CPU_VAR(rsp_scratch)
157905a36a2SIngo Molnar	movq	PER_CPU_VAR(cpu_current_top_of_stack), %rsp
158905a36a2SIngo Molnar
1591e423bffSAndy Lutomirski	TRACE_IRQS_OFF
1601e423bffSAndy Lutomirski
161905a36a2SIngo Molnar	/* Construct struct pt_regs on stack */
162905a36a2SIngo Molnar	pushq	$__USER_DS			/* pt_regs->ss */
163905a36a2SIngo Molnar	pushq	PER_CPU_VAR(rsp_scratch)	/* pt_regs->sp */
164905a36a2SIngo Molnar	pushq	%r11				/* pt_regs->flags */
165905a36a2SIngo Molnar	pushq	$__USER_CS			/* pt_regs->cs */
166905a36a2SIngo Molnar	pushq	%rcx				/* pt_regs->ip */
167905a36a2SIngo Molnar	pushq	%rax				/* pt_regs->orig_ax */
168905a36a2SIngo Molnar	pushq	%rdi				/* pt_regs->di */
169905a36a2SIngo Molnar	pushq	%rsi				/* pt_regs->si */
170905a36a2SIngo Molnar	pushq	%rdx				/* pt_regs->dx */
171905a36a2SIngo Molnar	pushq	%rcx				/* pt_regs->cx */
172905a36a2SIngo Molnar	pushq	$-ENOSYS			/* pt_regs->ax */
173905a36a2SIngo Molnar	pushq	%r8				/* pt_regs->r8 */
174905a36a2SIngo Molnar	pushq	%r9				/* pt_regs->r9 */
175905a36a2SIngo Molnar	pushq	%r10				/* pt_regs->r10 */
176905a36a2SIngo Molnar	pushq	%r11				/* pt_regs->r11 */
177905a36a2SIngo Molnar	sub	$(6*8), %rsp			/* pt_regs->bp, bx, r12-15 not saved */
178905a36a2SIngo Molnar
1791e423bffSAndy Lutomirski	/*
1801e423bffSAndy Lutomirski	 * If we need to do entry work or if we guess we'll need to do
1811e423bffSAndy Lutomirski	 * exit work, go straight to the slow path.
1821e423bffSAndy Lutomirski	 */
1831e423bffSAndy Lutomirski	testl	$_TIF_WORK_SYSCALL_ENTRY|_TIF_ALLWORK_MASK, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
1841e423bffSAndy Lutomirski	jnz	entry_SYSCALL64_slow_path
1851e423bffSAndy Lutomirski
186b2502b41SIngo Molnarentry_SYSCALL_64_fastpath:
1871e423bffSAndy Lutomirski	/*
1881e423bffSAndy Lutomirski	 * Easy case: enable interrupts and issue the syscall.  If the syscall
1891e423bffSAndy Lutomirski	 * needs pt_regs, we'll call a stub that disables interrupts again
1901e423bffSAndy Lutomirski	 * and jumps to the slow path.
1911e423bffSAndy Lutomirski	 */
1921e423bffSAndy Lutomirski	TRACE_IRQS_ON
1931e423bffSAndy Lutomirski	ENABLE_INTERRUPTS(CLBR_NONE)
194905a36a2SIngo Molnar#if __SYSCALL_MASK == ~0
195905a36a2SIngo Molnar	cmpq	$__NR_syscall_max, %rax
196905a36a2SIngo Molnar#else
197905a36a2SIngo Molnar	andl	$__SYSCALL_MASK, %eax
198905a36a2SIngo Molnar	cmpl	$__NR_syscall_max, %eax
199905a36a2SIngo Molnar#endif
200905a36a2SIngo Molnar	ja	1f				/* return -ENOSYS (already in pt_regs->ax) */
201905a36a2SIngo Molnar	movq	%r10, %rcx
202302f5b26SAndy Lutomirski
203302f5b26SAndy Lutomirski	/*
204302f5b26SAndy Lutomirski	 * This call instruction is handled specially in stub_ptregs_64.
205b7765086SAndy Lutomirski	 * It might end up jumping to the slow path.  If it jumps, RAX
206b7765086SAndy Lutomirski	 * and all argument registers are clobbered.
207302f5b26SAndy Lutomirski	 */
208905a36a2SIngo Molnar	call	*sys_call_table(, %rax, 8)
209302f5b26SAndy Lutomirski.Lentry_SYSCALL_64_after_fastpath_call:
210302f5b26SAndy Lutomirski
211905a36a2SIngo Molnar	movq	%rax, RAX(%rsp)
212905a36a2SIngo Molnar1:
2131e423bffSAndy Lutomirski
214905a36a2SIngo Molnar	/*
2151e423bffSAndy Lutomirski	 * If we get here, then we know that pt_regs is clean for SYSRET64.
2161e423bffSAndy Lutomirski	 * If we see that no exit work is required (which we are required
2171e423bffSAndy Lutomirski	 * to check with IRQs off), then we can go straight to SYSRET64.
218905a36a2SIngo Molnar	 */
219905a36a2SIngo Molnar	DISABLE_INTERRUPTS(CLBR_NONE)
2201e423bffSAndy Lutomirski	TRACE_IRQS_OFF
221905a36a2SIngo Molnar	testl	$_TIF_ALLWORK_MASK, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
2221e423bffSAndy Lutomirski	jnz	1f
223905a36a2SIngo Molnar
2241e423bffSAndy Lutomirski	LOCKDEP_SYS_EXIT
2251e423bffSAndy Lutomirski	TRACE_IRQS_ON		/* user mode is traced as IRQs on */
226eb2a54c3SAndy Lutomirski	movq	RIP(%rsp), %rcx
227eb2a54c3SAndy Lutomirski	movq	EFLAGS(%rsp), %r11
228eb2a54c3SAndy Lutomirski	RESTORE_C_REGS_EXCEPT_RCX_R11
229905a36a2SIngo Molnar	movq	RSP(%rsp), %rsp
230905a36a2SIngo Molnar	USERGS_SYSRET64
231905a36a2SIngo Molnar
2321e423bffSAndy Lutomirski1:
2331e423bffSAndy Lutomirski	/*
2341e423bffSAndy Lutomirski	 * The fast path looked good when we started, but something changed
2351e423bffSAndy Lutomirski	 * along the way and we need to switch to the slow path.  Calling
2361e423bffSAndy Lutomirski	 * raise(3) will trigger this, for example.  IRQs are off.
2371e423bffSAndy Lutomirski	 */
23829ea1b25SAndy Lutomirski	TRACE_IRQS_ON
23929ea1b25SAndy Lutomirski	ENABLE_INTERRUPTS(CLBR_NONE)
240905a36a2SIngo Molnar	SAVE_EXTRA_REGS
24129ea1b25SAndy Lutomirski	movq	%rsp, %rdi
24229ea1b25SAndy Lutomirski	call	syscall_return_slowpath	/* returns with IRQs disabled */
2431e423bffSAndy Lutomirski	jmp	return_from_SYSCALL_64
2441e423bffSAndy Lutomirski
2451e423bffSAndy Lutomirskientry_SYSCALL64_slow_path:
2461e423bffSAndy Lutomirski	/* IRQs are off. */
2471e423bffSAndy Lutomirski	SAVE_EXTRA_REGS
2481e423bffSAndy Lutomirski	movq	%rsp, %rdi
2491e423bffSAndy Lutomirski	call	do_syscall_64		/* returns with IRQs disabled */
2501e423bffSAndy Lutomirski
2511e423bffSAndy Lutomirskireturn_from_SYSCALL_64:
252905a36a2SIngo Molnar	RESTORE_EXTRA_REGS
25329ea1b25SAndy Lutomirski	TRACE_IRQS_IRETQ		/* we're about to change IF */
254905a36a2SIngo Molnar
255905a36a2SIngo Molnar	/*
256905a36a2SIngo Molnar	 * Try to use SYSRET instead of IRET if we're returning to
257905a36a2SIngo Molnar	 * a completely clean 64-bit userspace context.
258905a36a2SIngo Molnar	 */
259905a36a2SIngo Molnar	movq	RCX(%rsp), %rcx
260905a36a2SIngo Molnar	movq	RIP(%rsp), %r11
261905a36a2SIngo Molnar	cmpq	%rcx, %r11			/* RCX == RIP */
262905a36a2SIngo Molnar	jne	opportunistic_sysret_failed
263905a36a2SIngo Molnar
264905a36a2SIngo Molnar	/*
265905a36a2SIngo Molnar	 * On Intel CPUs, SYSRET with non-canonical RCX/RIP will #GP
266905a36a2SIngo Molnar	 * in kernel space.  This essentially lets the user take over
267905a36a2SIngo Molnar	 * the kernel, since userspace controls RSP.
268905a36a2SIngo Molnar	 *
269905a36a2SIngo Molnar	 * If width of "canonical tail" ever becomes variable, this will need
270905a36a2SIngo Molnar	 * to be updated to remain correct on both old and new CPUs.
271905a36a2SIngo Molnar	 */
272905a36a2SIngo Molnar	.ifne __VIRTUAL_MASK_SHIFT - 47
273905a36a2SIngo Molnar	.error "virtual address width changed -- SYSRET checks need update"
274905a36a2SIngo Molnar	.endif
2754d732138SIngo Molnar
276905a36a2SIngo Molnar	/* Change top 16 bits to be the sign-extension of 47th bit */
277905a36a2SIngo Molnar	shl	$(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx
278905a36a2SIngo Molnar	sar	$(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx
2794d732138SIngo Molnar
280905a36a2SIngo Molnar	/* If this changed %rcx, it was not canonical */
281905a36a2SIngo Molnar	cmpq	%rcx, %r11
282905a36a2SIngo Molnar	jne	opportunistic_sysret_failed
283905a36a2SIngo Molnar
284905a36a2SIngo Molnar	cmpq	$__USER_CS, CS(%rsp)		/* CS must match SYSRET */
285905a36a2SIngo Molnar	jne	opportunistic_sysret_failed
286905a36a2SIngo Molnar
287905a36a2SIngo Molnar	movq	R11(%rsp), %r11
288905a36a2SIngo Molnar	cmpq	%r11, EFLAGS(%rsp)		/* R11 == RFLAGS */
289905a36a2SIngo Molnar	jne	opportunistic_sysret_failed
290905a36a2SIngo Molnar
291905a36a2SIngo Molnar	/*
292905a36a2SIngo Molnar	 * SYSRET can't restore RF.  SYSRET can restore TF, but unlike IRET,
293905a36a2SIngo Molnar	 * restoring TF results in a trap from userspace immediately after
294905a36a2SIngo Molnar	 * SYSRET.  This would cause an infinite loop whenever #DB happens
295905a36a2SIngo Molnar	 * with register state that satisfies the opportunistic SYSRET
296905a36a2SIngo Molnar	 * conditions.  For example, single-stepping this user code:
297905a36a2SIngo Molnar	 *
298905a36a2SIngo Molnar	 *           movq	$stuck_here, %rcx
299905a36a2SIngo Molnar	 *           pushfq
300905a36a2SIngo Molnar	 *           popq %r11
301905a36a2SIngo Molnar	 *   stuck_here:
302905a36a2SIngo Molnar	 *
303905a36a2SIngo Molnar	 * would never get past 'stuck_here'.
304905a36a2SIngo Molnar	 */
305905a36a2SIngo Molnar	testq	$(X86_EFLAGS_RF|X86_EFLAGS_TF), %r11
306905a36a2SIngo Molnar	jnz	opportunistic_sysret_failed
307905a36a2SIngo Molnar
308905a36a2SIngo Molnar	/* nothing to check for RSP */
309905a36a2SIngo Molnar
310905a36a2SIngo Molnar	cmpq	$__USER_DS, SS(%rsp)		/* SS must match SYSRET */
311905a36a2SIngo Molnar	jne	opportunistic_sysret_failed
312905a36a2SIngo Molnar
313905a36a2SIngo Molnar	/*
314905a36a2SIngo Molnar	 * We win! This label is here just for ease of understanding
315905a36a2SIngo Molnar	 * perf profiles. Nothing jumps here.
316905a36a2SIngo Molnar	 */
317905a36a2SIngo Molnarsyscall_return_via_sysret:
318905a36a2SIngo Molnar	/* rcx and r11 are already restored (see code above) */
319905a36a2SIngo Molnar	RESTORE_C_REGS_EXCEPT_RCX_R11
320905a36a2SIngo Molnar	movq	RSP(%rsp), %rsp
321905a36a2SIngo Molnar	USERGS_SYSRET64
322905a36a2SIngo Molnar
323905a36a2SIngo Molnaropportunistic_sysret_failed:
324905a36a2SIngo Molnar	SWAPGS
325905a36a2SIngo Molnar	jmp	restore_c_regs_and_iret
326b2502b41SIngo MolnarEND(entry_SYSCALL_64)
327905a36a2SIngo Molnar
328302f5b26SAndy LutomirskiENTRY(stub_ptregs_64)
329302f5b26SAndy Lutomirski	/*
330302f5b26SAndy Lutomirski	 * Syscalls marked as needing ptregs land here.
331b7765086SAndy Lutomirski	 * If we are on the fast path, we need to save the extra regs,
332b7765086SAndy Lutomirski	 * which we achieve by trying again on the slow path.  If we are on
333b7765086SAndy Lutomirski	 * the slow path, the extra regs are already saved.
334302f5b26SAndy Lutomirski	 *
335302f5b26SAndy Lutomirski	 * RAX stores a pointer to the C function implementing the syscall.
336b7765086SAndy Lutomirski	 * IRQs are on.
337302f5b26SAndy Lutomirski	 */
338302f5b26SAndy Lutomirski	cmpq	$.Lentry_SYSCALL_64_after_fastpath_call, (%rsp)
339302f5b26SAndy Lutomirski	jne	1f
340302f5b26SAndy Lutomirski
341b7765086SAndy Lutomirski	/*
342b7765086SAndy Lutomirski	 * Called from fast path -- disable IRQs again, pop return address
343b7765086SAndy Lutomirski	 * and jump to slow path
344b7765086SAndy Lutomirski	 */
345b7765086SAndy Lutomirski	DISABLE_INTERRUPTS(CLBR_NONE)
346b7765086SAndy Lutomirski	TRACE_IRQS_OFF
347302f5b26SAndy Lutomirski	popq	%rax
348b7765086SAndy Lutomirski	jmp	entry_SYSCALL64_slow_path
349302f5b26SAndy Lutomirski
350302f5b26SAndy Lutomirski1:
351302f5b26SAndy Lutomirski	/* Called from C */
352302f5b26SAndy Lutomirski	jmp	*%rax				/* called from C */
353302f5b26SAndy LutomirskiEND(stub_ptregs_64)
354302f5b26SAndy Lutomirski
355302f5b26SAndy Lutomirski.macro ptregs_stub func
356302f5b26SAndy LutomirskiENTRY(ptregs_\func)
357302f5b26SAndy Lutomirski	leaq	\func(%rip), %rax
358302f5b26SAndy Lutomirski	jmp	stub_ptregs_64
359302f5b26SAndy LutomirskiEND(ptregs_\func)
360302f5b26SAndy Lutomirski.endm
361302f5b26SAndy Lutomirski
362302f5b26SAndy Lutomirski/* Instantiate ptregs_stub for each ptregs-using syscall */
363302f5b26SAndy Lutomirski#define __SYSCALL_64_QUAL_(sym)
364302f5b26SAndy Lutomirski#define __SYSCALL_64_QUAL_ptregs(sym) ptregs_stub sym
365302f5b26SAndy Lutomirski#define __SYSCALL_64(nr, sym, qual) __SYSCALL_64_QUAL_##qual(sym)
366302f5b26SAndy Lutomirski#include <asm/syscalls_64.h>
367905a36a2SIngo Molnar
368905a36a2SIngo Molnar/*
369905a36a2SIngo Molnar * A newly forked process directly context switches into this address.
370905a36a2SIngo Molnar *
371905a36a2SIngo Molnar * rdi: prev task we switched from
372905a36a2SIngo Molnar */
373905a36a2SIngo MolnarENTRY(ret_from_fork)
374905a36a2SIngo Molnar	LOCK ; btr $TIF_FORK, TI_flags(%r8)
375905a36a2SIngo Molnar
3764d732138SIngo Molnar	call	schedule_tail			/* rdi: 'prev' task parameter */
377905a36a2SIngo Molnar
3784d732138SIngo Molnar	testb	$3, CS(%rsp)			/* from kernel_thread? */
37924d978b7SAndy Lutomirski	jnz	1f
380905a36a2SIngo Molnar
381905a36a2SIngo Molnar	/*
38224d978b7SAndy Lutomirski	 * We came from kernel_thread.  This code path is quite twisted, and
38324d978b7SAndy Lutomirski	 * someone should clean it up.
38424d978b7SAndy Lutomirski	 *
38524d978b7SAndy Lutomirski	 * copy_thread_tls stashes the function pointer in RBX and the
38624d978b7SAndy Lutomirski	 * parameter to be passed in RBP.  The called function is permitted
38724d978b7SAndy Lutomirski	 * to call do_execve and thereby jump to user mode.
388905a36a2SIngo Molnar	 */
38924d978b7SAndy Lutomirski	movq	RBP(%rsp), %rdi
39024d978b7SAndy Lutomirski	call	*RBX(%rsp)
391905a36a2SIngo Molnar	movl	$0, RAX(%rsp)
39224d978b7SAndy Lutomirski
39324d978b7SAndy Lutomirski	/*
39424d978b7SAndy Lutomirski	 * Fall through as though we're exiting a syscall.  This makes a
39524d978b7SAndy Lutomirski	 * twisted sort of sense if we just called do_execve.
39624d978b7SAndy Lutomirski	 */
39724d978b7SAndy Lutomirski
39824d978b7SAndy Lutomirski1:
39924d978b7SAndy Lutomirski	movq	%rsp, %rdi
40024d978b7SAndy Lutomirski	call	syscall_return_slowpath	/* returns with IRQs disabled */
40124d978b7SAndy Lutomirski	TRACE_IRQS_ON			/* user mode is traced as IRQS on */
40224d978b7SAndy Lutomirski	SWAPGS
40324d978b7SAndy Lutomirski	jmp	restore_regs_and_iret
404905a36a2SIngo MolnarEND(ret_from_fork)
405905a36a2SIngo Molnar
406905a36a2SIngo Molnar/*
407905a36a2SIngo Molnar * Build the entry stubs with some assembler magic.
408905a36a2SIngo Molnar * We pack 1 stub into every 8-byte block.
409905a36a2SIngo Molnar */
410905a36a2SIngo Molnar	.align 8
411905a36a2SIngo MolnarENTRY(irq_entries_start)
412905a36a2SIngo Molnar    vector=FIRST_EXTERNAL_VECTOR
413905a36a2SIngo Molnar    .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR)
414905a36a2SIngo Molnar	pushq	$(~vector+0x80)			/* Note: always in signed byte range */
415905a36a2SIngo Molnar    vector=vector+1
416905a36a2SIngo Molnar	jmp	common_interrupt
417905a36a2SIngo Molnar	.align	8
418905a36a2SIngo Molnar    .endr
419905a36a2SIngo MolnarEND(irq_entries_start)
420905a36a2SIngo Molnar
421905a36a2SIngo Molnar/*
422905a36a2SIngo Molnar * Interrupt entry/exit.
423905a36a2SIngo Molnar *
424905a36a2SIngo Molnar * Interrupt entry points save only callee clobbered registers in fast path.
425905a36a2SIngo Molnar *
426905a36a2SIngo Molnar * Entry runs with interrupts off.
427905a36a2SIngo Molnar */
428905a36a2SIngo Molnar
429905a36a2SIngo Molnar/* 0(%rsp): ~(interrupt number) */
430905a36a2SIngo Molnar	.macro interrupt func
431905a36a2SIngo Molnar	cld
432ff467594SAndy Lutomirski	ALLOC_PT_GPREGS_ON_STACK
433ff467594SAndy Lutomirski	SAVE_C_REGS
434ff467594SAndy Lutomirski	SAVE_EXTRA_REGS
435905a36a2SIngo Molnar
436ff467594SAndy Lutomirski	testb	$3, CS(%rsp)
437905a36a2SIngo Molnar	jz	1f
43802bc7768SAndy Lutomirski
43902bc7768SAndy Lutomirski	/*
44002bc7768SAndy Lutomirski	 * IRQ from user mode.  Switch to kernel gsbase and inform context
44102bc7768SAndy Lutomirski	 * tracking that we're in kernel mode.
44202bc7768SAndy Lutomirski	 */
443905a36a2SIngo Molnar	SWAPGS
444f1075053SAndy Lutomirski
445f1075053SAndy Lutomirski	/*
446f1075053SAndy Lutomirski	 * We need to tell lockdep that IRQs are off.  We can't do this until
447f1075053SAndy Lutomirski	 * we fix gsbase, and we should do it before enter_from_user_mode
448f1075053SAndy Lutomirski	 * (which can take locks).  Since TRACE_IRQS_OFF idempotent,
449f1075053SAndy Lutomirski	 * the simplest way to handle it is to just call it twice if
450f1075053SAndy Lutomirski	 * we enter from user mode.  There's no reason to optimize this since
451f1075053SAndy Lutomirski	 * TRACE_IRQS_OFF is a no-op if lockdep is off.
452f1075053SAndy Lutomirski	 */
453f1075053SAndy Lutomirski	TRACE_IRQS_OFF
454f1075053SAndy Lutomirski
455478dc89cSAndy Lutomirski	CALL_enter_from_user_mode
45602bc7768SAndy Lutomirski
457905a36a2SIngo Molnar1:
458905a36a2SIngo Molnar	/*
459905a36a2SIngo Molnar	 * Save previous stack pointer, optionally switch to interrupt stack.
460905a36a2SIngo Molnar	 * irq_count is used to check if a CPU is already on an interrupt stack
461905a36a2SIngo Molnar	 * or not. While this is essentially redundant with preempt_count it is
462905a36a2SIngo Molnar	 * a little cheaper to use a separate counter in the PDA (short of
463905a36a2SIngo Molnar	 * moving irq_enter into assembly, which would be too much work)
464905a36a2SIngo Molnar	 */
465a586f98eSAndy Lutomirski	movq	%rsp, %rdi
466905a36a2SIngo Molnar	incl	PER_CPU_VAR(irq_count)
467905a36a2SIngo Molnar	cmovzq	PER_CPU_VAR(irq_stack_ptr), %rsp
468a586f98eSAndy Lutomirski	pushq	%rdi
469905a36a2SIngo Molnar	/* We entered an interrupt context - irqs are off: */
470905a36a2SIngo Molnar	TRACE_IRQS_OFF
471905a36a2SIngo Molnar
472a586f98eSAndy Lutomirski	call	\func	/* rdi points to pt_regs */
473905a36a2SIngo Molnar	.endm
474905a36a2SIngo Molnar
475905a36a2SIngo Molnar	/*
476905a36a2SIngo Molnar	 * The interrupt stubs push (~vector+0x80) onto the stack and
477905a36a2SIngo Molnar	 * then jump to common_interrupt.
478905a36a2SIngo Molnar	 */
479905a36a2SIngo Molnar	.p2align CONFIG_X86_L1_CACHE_SHIFT
480905a36a2SIngo Molnarcommon_interrupt:
481905a36a2SIngo Molnar	ASM_CLAC
482905a36a2SIngo Molnar	addq	$-0x80, (%rsp)			/* Adjust vector to [-256, -1] range */
483905a36a2SIngo Molnar	interrupt do_IRQ
484905a36a2SIngo Molnar	/* 0(%rsp): old RSP */
485905a36a2SIngo Molnarret_from_intr:
486905a36a2SIngo Molnar	DISABLE_INTERRUPTS(CLBR_NONE)
487905a36a2SIngo Molnar	TRACE_IRQS_OFF
488905a36a2SIngo Molnar	decl	PER_CPU_VAR(irq_count)
489905a36a2SIngo Molnar
490905a36a2SIngo Molnar	/* Restore saved previous stack */
491ff467594SAndy Lutomirski	popq	%rsp
492905a36a2SIngo Molnar
493905a36a2SIngo Molnar	testb	$3, CS(%rsp)
494905a36a2SIngo Molnar	jz	retint_kernel
49502bc7768SAndy Lutomirski
496905a36a2SIngo Molnar	/* Interrupt came from user space */
49702bc7768SAndy LutomirskiGLOBAL(retint_user)
49802bc7768SAndy Lutomirski	mov	%rsp,%rdi
49902bc7768SAndy Lutomirski	call	prepare_exit_to_usermode
500905a36a2SIngo Molnar	TRACE_IRQS_IRETQ
501905a36a2SIngo Molnar	SWAPGS
502ff467594SAndy Lutomirski	jmp	restore_regs_and_iret
503905a36a2SIngo Molnar
504905a36a2SIngo Molnar/* Returning to kernel space */
505905a36a2SIngo Molnarretint_kernel:
506905a36a2SIngo Molnar#ifdef CONFIG_PREEMPT
507905a36a2SIngo Molnar	/* Interrupts are off */
508905a36a2SIngo Molnar	/* Check if we need preemption */
5094d732138SIngo Molnar	bt	$9, EFLAGS(%rsp)		/* were interrupts off? */
510905a36a2SIngo Molnar	jnc	1f
511905a36a2SIngo Molnar0:	cmpl	$0, PER_CPU_VAR(__preempt_count)
512905a36a2SIngo Molnar	jnz	1f
513905a36a2SIngo Molnar	call	preempt_schedule_irq
514905a36a2SIngo Molnar	jmp	0b
515905a36a2SIngo Molnar1:
516905a36a2SIngo Molnar#endif
517905a36a2SIngo Molnar	/*
518905a36a2SIngo Molnar	 * The iretq could re-enable interrupts:
519905a36a2SIngo Molnar	 */
520905a36a2SIngo Molnar	TRACE_IRQS_IRETQ
521905a36a2SIngo Molnar
522905a36a2SIngo Molnar/*
523905a36a2SIngo Molnar * At this label, code paths which return to kernel and to user,
524905a36a2SIngo Molnar * which come from interrupts/exception and from syscalls, merge.
525905a36a2SIngo Molnar */
526ee08c6bdSAndy LutomirskiGLOBAL(restore_regs_and_iret)
527ff467594SAndy Lutomirski	RESTORE_EXTRA_REGS
528905a36a2SIngo Molnarrestore_c_regs_and_iret:
529905a36a2SIngo Molnar	RESTORE_C_REGS
530905a36a2SIngo Molnar	REMOVE_PT_GPREGS_FROM_STACK 8
531905a36a2SIngo Molnar	INTERRUPT_RETURN
532905a36a2SIngo Molnar
533905a36a2SIngo MolnarENTRY(native_iret)
534905a36a2SIngo Molnar	/*
535905a36a2SIngo Molnar	 * Are we returning to a stack segment from the LDT?  Note: in
536905a36a2SIngo Molnar	 * 64-bit mode SS:RSP on the exception stack is always valid.
537905a36a2SIngo Molnar	 */
538905a36a2SIngo Molnar#ifdef CONFIG_X86_ESPFIX64
539905a36a2SIngo Molnar	testb	$4, (SS-RIP)(%rsp)
540905a36a2SIngo Molnar	jnz	native_irq_return_ldt
541905a36a2SIngo Molnar#endif
542905a36a2SIngo Molnar
543905a36a2SIngo Molnar.global native_irq_return_iret
544905a36a2SIngo Molnarnative_irq_return_iret:
545905a36a2SIngo Molnar	/*
546905a36a2SIngo Molnar	 * This may fault.  Non-paranoid faults on return to userspace are
547905a36a2SIngo Molnar	 * handled by fixup_bad_iret.  These include #SS, #GP, and #NP.
548905a36a2SIngo Molnar	 * Double-faults due to espfix64 are handled in do_double_fault.
549905a36a2SIngo Molnar	 * Other faults here are fatal.
550905a36a2SIngo Molnar	 */
551905a36a2SIngo Molnar	iretq
552905a36a2SIngo Molnar
553905a36a2SIngo Molnar#ifdef CONFIG_X86_ESPFIX64
554905a36a2SIngo Molnarnative_irq_return_ldt:
555905a36a2SIngo Molnar	pushq	%rax
556905a36a2SIngo Molnar	pushq	%rdi
557905a36a2SIngo Molnar	SWAPGS
558905a36a2SIngo Molnar	movq	PER_CPU_VAR(espfix_waddr), %rdi
559905a36a2SIngo Molnar	movq	%rax, (0*8)(%rdi)		/* RAX */
560905a36a2SIngo Molnar	movq	(2*8)(%rsp), %rax		/* RIP */
561905a36a2SIngo Molnar	movq	%rax, (1*8)(%rdi)
562905a36a2SIngo Molnar	movq	(3*8)(%rsp), %rax		/* CS */
563905a36a2SIngo Molnar	movq	%rax, (2*8)(%rdi)
564905a36a2SIngo Molnar	movq	(4*8)(%rsp), %rax		/* RFLAGS */
565905a36a2SIngo Molnar	movq	%rax, (3*8)(%rdi)
566905a36a2SIngo Molnar	movq	(6*8)(%rsp), %rax		/* SS */
567905a36a2SIngo Molnar	movq	%rax, (5*8)(%rdi)
568905a36a2SIngo Molnar	movq	(5*8)(%rsp), %rax		/* RSP */
569905a36a2SIngo Molnar	movq	%rax, (4*8)(%rdi)
570905a36a2SIngo Molnar	andl	$0xffff0000, %eax
571905a36a2SIngo Molnar	popq	%rdi
572905a36a2SIngo Molnar	orq	PER_CPU_VAR(espfix_stack), %rax
573905a36a2SIngo Molnar	SWAPGS
574905a36a2SIngo Molnar	movq	%rax, %rsp
575905a36a2SIngo Molnar	popq	%rax
576905a36a2SIngo Molnar	jmp	native_irq_return_iret
577905a36a2SIngo Molnar#endif
578905a36a2SIngo MolnarEND(common_interrupt)
579905a36a2SIngo Molnar
580905a36a2SIngo Molnar/*
581905a36a2SIngo Molnar * APIC interrupts.
582905a36a2SIngo Molnar */
583905a36a2SIngo Molnar.macro apicinterrupt3 num sym do_sym
584905a36a2SIngo MolnarENTRY(\sym)
585905a36a2SIngo Molnar	ASM_CLAC
586905a36a2SIngo Molnar	pushq	$~(\num)
587905a36a2SIngo Molnar.Lcommon_\sym:
588905a36a2SIngo Molnar	interrupt \do_sym
589905a36a2SIngo Molnar	jmp	ret_from_intr
590905a36a2SIngo MolnarEND(\sym)
591905a36a2SIngo Molnar.endm
592905a36a2SIngo Molnar
593905a36a2SIngo Molnar#ifdef CONFIG_TRACING
594905a36a2SIngo Molnar#define trace(sym) trace_##sym
595905a36a2SIngo Molnar#define smp_trace(sym) smp_trace_##sym
596905a36a2SIngo Molnar
597905a36a2SIngo Molnar.macro trace_apicinterrupt num sym
598905a36a2SIngo Molnarapicinterrupt3 \num trace(\sym) smp_trace(\sym)
599905a36a2SIngo Molnar.endm
600905a36a2SIngo Molnar#else
601905a36a2SIngo Molnar.macro trace_apicinterrupt num sym do_sym
602905a36a2SIngo Molnar.endm
603905a36a2SIngo Molnar#endif
604905a36a2SIngo Molnar
605905a36a2SIngo Molnar.macro apicinterrupt num sym do_sym
606905a36a2SIngo Molnarapicinterrupt3 \num \sym \do_sym
607905a36a2SIngo Molnartrace_apicinterrupt \num \sym
608905a36a2SIngo Molnar.endm
609905a36a2SIngo Molnar
610905a36a2SIngo Molnar#ifdef CONFIG_SMP
6114d732138SIngo Molnarapicinterrupt3 IRQ_MOVE_CLEANUP_VECTOR		irq_move_cleanup_interrupt	smp_irq_move_cleanup_interrupt
6124d732138SIngo Molnarapicinterrupt3 REBOOT_VECTOR			reboot_interrupt		smp_reboot_interrupt
613905a36a2SIngo Molnar#endif
614905a36a2SIngo Molnar
615905a36a2SIngo Molnar#ifdef CONFIG_X86_UV
6164d732138SIngo Molnarapicinterrupt3 UV_BAU_MESSAGE			uv_bau_message_intr1		uv_bau_message_interrupt
617905a36a2SIngo Molnar#endif
6184d732138SIngo Molnar
6194d732138SIngo Molnarapicinterrupt LOCAL_TIMER_VECTOR		apic_timer_interrupt		smp_apic_timer_interrupt
6204d732138SIngo Molnarapicinterrupt X86_PLATFORM_IPI_VECTOR		x86_platform_ipi		smp_x86_platform_ipi
621905a36a2SIngo Molnar
622905a36a2SIngo Molnar#ifdef CONFIG_HAVE_KVM
6234d732138SIngo Molnarapicinterrupt3 POSTED_INTR_VECTOR		kvm_posted_intr_ipi		smp_kvm_posted_intr_ipi
6244d732138SIngo Molnarapicinterrupt3 POSTED_INTR_WAKEUP_VECTOR	kvm_posted_intr_wakeup_ipi	smp_kvm_posted_intr_wakeup_ipi
625905a36a2SIngo Molnar#endif
626905a36a2SIngo Molnar
627905a36a2SIngo Molnar#ifdef CONFIG_X86_MCE_THRESHOLD
6284d732138SIngo Molnarapicinterrupt THRESHOLD_APIC_VECTOR		threshold_interrupt		smp_threshold_interrupt
629905a36a2SIngo Molnar#endif
630905a36a2SIngo Molnar
6319dda1658SIngo Molnar#ifdef CONFIG_X86_MCE_AMD
6324d732138SIngo Molnarapicinterrupt DEFERRED_ERROR_VECTOR		deferred_error_interrupt	smp_deferred_error_interrupt
6339dda1658SIngo Molnar#endif
6349dda1658SIngo Molnar
635905a36a2SIngo Molnar#ifdef CONFIG_X86_THERMAL_VECTOR
6364d732138SIngo Molnarapicinterrupt THERMAL_APIC_VECTOR		thermal_interrupt		smp_thermal_interrupt
637905a36a2SIngo Molnar#endif
638905a36a2SIngo Molnar
639905a36a2SIngo Molnar#ifdef CONFIG_SMP
6404d732138SIngo Molnarapicinterrupt CALL_FUNCTION_SINGLE_VECTOR	call_function_single_interrupt	smp_call_function_single_interrupt
6414d732138SIngo Molnarapicinterrupt CALL_FUNCTION_VECTOR		call_function_interrupt		smp_call_function_interrupt
6424d732138SIngo Molnarapicinterrupt RESCHEDULE_VECTOR			reschedule_interrupt		smp_reschedule_interrupt
643905a36a2SIngo Molnar#endif
644905a36a2SIngo Molnar
6454d732138SIngo Molnarapicinterrupt ERROR_APIC_VECTOR			error_interrupt			smp_error_interrupt
6464d732138SIngo Molnarapicinterrupt SPURIOUS_APIC_VECTOR		spurious_interrupt		smp_spurious_interrupt
647905a36a2SIngo Molnar
648905a36a2SIngo Molnar#ifdef CONFIG_IRQ_WORK
6494d732138SIngo Molnarapicinterrupt IRQ_WORK_VECTOR			irq_work_interrupt		smp_irq_work_interrupt
650905a36a2SIngo Molnar#endif
651905a36a2SIngo Molnar
652905a36a2SIngo Molnar/*
653905a36a2SIngo Molnar * Exception entry points.
654905a36a2SIngo Molnar */
655905a36a2SIngo Molnar#define CPU_TSS_IST(x) PER_CPU_VAR(cpu_tss) + (TSS_ist + ((x) - 1) * 8)
656905a36a2SIngo Molnar
657905a36a2SIngo Molnar.macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1
658905a36a2SIngo MolnarENTRY(\sym)
659905a36a2SIngo Molnar	/* Sanity check */
660905a36a2SIngo Molnar	.if \shift_ist != -1 && \paranoid == 0
661905a36a2SIngo Molnar	.error "using shift_ist requires paranoid=1"
662905a36a2SIngo Molnar	.endif
663905a36a2SIngo Molnar
664905a36a2SIngo Molnar	ASM_CLAC
665905a36a2SIngo Molnar	PARAVIRT_ADJUST_EXCEPTION_FRAME
666905a36a2SIngo Molnar
667905a36a2SIngo Molnar	.ifeq \has_error_code
668905a36a2SIngo Molnar	pushq	$-1				/* ORIG_RAX: no syscall to restart */
669905a36a2SIngo Molnar	.endif
670905a36a2SIngo Molnar
671905a36a2SIngo Molnar	ALLOC_PT_GPREGS_ON_STACK
672905a36a2SIngo Molnar
673905a36a2SIngo Molnar	.if \paranoid
674905a36a2SIngo Molnar	.if \paranoid == 1
6754d732138SIngo Molnar	testb	$3, CS(%rsp)			/* If coming from userspace, switch stacks */
6764d732138SIngo Molnar	jnz	1f
677905a36a2SIngo Molnar	.endif
678905a36a2SIngo Molnar	call	paranoid_entry
679905a36a2SIngo Molnar	.else
680905a36a2SIngo Molnar	call	error_entry
681905a36a2SIngo Molnar	.endif
682905a36a2SIngo Molnar	/* returned flag: ebx=0: need swapgs on exit, ebx=1: don't need it */
683905a36a2SIngo Molnar
684905a36a2SIngo Molnar	.if \paranoid
685905a36a2SIngo Molnar	.if \shift_ist != -1
686905a36a2SIngo Molnar	TRACE_IRQS_OFF_DEBUG			/* reload IDT in case of recursion */
687905a36a2SIngo Molnar	.else
688905a36a2SIngo Molnar	TRACE_IRQS_OFF
689905a36a2SIngo Molnar	.endif
690905a36a2SIngo Molnar	.endif
691905a36a2SIngo Molnar
692905a36a2SIngo Molnar	movq	%rsp, %rdi			/* pt_regs pointer */
693905a36a2SIngo Molnar
694905a36a2SIngo Molnar	.if \has_error_code
695905a36a2SIngo Molnar	movq	ORIG_RAX(%rsp), %rsi		/* get error code */
696905a36a2SIngo Molnar	movq	$-1, ORIG_RAX(%rsp)		/* no syscall to restart */
697905a36a2SIngo Molnar	.else
698905a36a2SIngo Molnar	xorl	%esi, %esi			/* no error code */
699905a36a2SIngo Molnar	.endif
700905a36a2SIngo Molnar
701905a36a2SIngo Molnar	.if \shift_ist != -1
702905a36a2SIngo Molnar	subq	$EXCEPTION_STKSZ, CPU_TSS_IST(\shift_ist)
703905a36a2SIngo Molnar	.endif
704905a36a2SIngo Molnar
705905a36a2SIngo Molnar	call	\do_sym
706905a36a2SIngo Molnar
707905a36a2SIngo Molnar	.if \shift_ist != -1
708905a36a2SIngo Molnar	addq	$EXCEPTION_STKSZ, CPU_TSS_IST(\shift_ist)
709905a36a2SIngo Molnar	.endif
710905a36a2SIngo Molnar
711905a36a2SIngo Molnar	/* these procedures expect "no swapgs" flag in ebx */
712905a36a2SIngo Molnar	.if \paranoid
713905a36a2SIngo Molnar	jmp	paranoid_exit
714905a36a2SIngo Molnar	.else
715905a36a2SIngo Molnar	jmp	error_exit
716905a36a2SIngo Molnar	.endif
717905a36a2SIngo Molnar
718905a36a2SIngo Molnar	.if \paranoid == 1
719905a36a2SIngo Molnar	/*
720905a36a2SIngo Molnar	 * Paranoid entry from userspace.  Switch stacks and treat it
721905a36a2SIngo Molnar	 * as a normal entry.  This means that paranoid handlers
722905a36a2SIngo Molnar	 * run in real process context if user_mode(regs).
723905a36a2SIngo Molnar	 */
724905a36a2SIngo Molnar1:
725905a36a2SIngo Molnar	call	error_entry
726905a36a2SIngo Molnar
727905a36a2SIngo Molnar
728905a36a2SIngo Molnar	movq	%rsp, %rdi			/* pt_regs pointer */
729905a36a2SIngo Molnar	call	sync_regs
730905a36a2SIngo Molnar	movq	%rax, %rsp			/* switch stack */
731905a36a2SIngo Molnar
732905a36a2SIngo Molnar	movq	%rsp, %rdi			/* pt_regs pointer */
733905a36a2SIngo Molnar
734905a36a2SIngo Molnar	.if \has_error_code
735905a36a2SIngo Molnar	movq	ORIG_RAX(%rsp), %rsi		/* get error code */
736905a36a2SIngo Molnar	movq	$-1, ORIG_RAX(%rsp)		/* no syscall to restart */
737905a36a2SIngo Molnar	.else
738905a36a2SIngo Molnar	xorl	%esi, %esi			/* no error code */
739905a36a2SIngo Molnar	.endif
740905a36a2SIngo Molnar
741905a36a2SIngo Molnar	call	\do_sym
742905a36a2SIngo Molnar
743905a36a2SIngo Molnar	jmp	error_exit			/* %ebx: no swapgs flag */
744905a36a2SIngo Molnar	.endif
745905a36a2SIngo MolnarEND(\sym)
746905a36a2SIngo Molnar.endm
747905a36a2SIngo Molnar
748905a36a2SIngo Molnar#ifdef CONFIG_TRACING
749905a36a2SIngo Molnar.macro trace_idtentry sym do_sym has_error_code:req
750905a36a2SIngo Molnaridtentry trace(\sym) trace(\do_sym) has_error_code=\has_error_code
751905a36a2SIngo Molnaridtentry \sym \do_sym has_error_code=\has_error_code
752905a36a2SIngo Molnar.endm
753905a36a2SIngo Molnar#else
754905a36a2SIngo Molnar.macro trace_idtentry sym do_sym has_error_code:req
755905a36a2SIngo Molnaridtentry \sym \do_sym has_error_code=\has_error_code
756905a36a2SIngo Molnar.endm
757905a36a2SIngo Molnar#endif
758905a36a2SIngo Molnar
759905a36a2SIngo Molnaridtentry divide_error			do_divide_error			has_error_code=0
760905a36a2SIngo Molnaridtentry overflow			do_overflow			has_error_code=0
761905a36a2SIngo Molnaridtentry bounds				do_bounds			has_error_code=0
762905a36a2SIngo Molnaridtentry invalid_op			do_invalid_op			has_error_code=0
763905a36a2SIngo Molnaridtentry device_not_available		do_device_not_available		has_error_code=0
764905a36a2SIngo Molnaridtentry double_fault			do_double_fault			has_error_code=1 paranoid=2
765905a36a2SIngo Molnaridtentry coprocessor_segment_overrun	do_coprocessor_segment_overrun	has_error_code=0
766905a36a2SIngo Molnaridtentry invalid_TSS			do_invalid_TSS			has_error_code=1
767905a36a2SIngo Molnaridtentry segment_not_present		do_segment_not_present		has_error_code=1
768905a36a2SIngo Molnaridtentry spurious_interrupt_bug		do_spurious_interrupt_bug	has_error_code=0
769905a36a2SIngo Molnaridtentry coprocessor_error		do_coprocessor_error		has_error_code=0
770905a36a2SIngo Molnaridtentry alignment_check		do_alignment_check		has_error_code=1
771905a36a2SIngo Molnaridtentry simd_coprocessor_error		do_simd_coprocessor_error	has_error_code=0
772905a36a2SIngo Molnar
773905a36a2SIngo Molnar
7744d732138SIngo Molnar	/*
7754d732138SIngo Molnar	 * Reload gs selector with exception handling
7764d732138SIngo Molnar	 * edi:  new selector
7774d732138SIngo Molnar	 */
778905a36a2SIngo MolnarENTRY(native_load_gs_index)
779905a36a2SIngo Molnar	pushfq
780905a36a2SIngo Molnar	DISABLE_INTERRUPTS(CLBR_ANY & ~CLBR_RDI)
781905a36a2SIngo Molnar	SWAPGS
78242c748bbSBorislav Petkov.Lgs_change:
783905a36a2SIngo Molnar	movl	%edi, %gs
78496e5d28aSBorislav Petkov2:	ALTERNATIVE "", "mfence", X86_BUG_SWAPGS_FENCE
785905a36a2SIngo Molnar	SWAPGS
786905a36a2SIngo Molnar	popfq
787905a36a2SIngo Molnar	ret
788905a36a2SIngo MolnarEND(native_load_gs_index)
789*784d5699SAl ViroEXPORT_SYMBOL(native_load_gs_index)
790905a36a2SIngo Molnar
79142c748bbSBorislav Petkov	_ASM_EXTABLE(.Lgs_change, bad_gs)
792905a36a2SIngo Molnar	.section .fixup, "ax"
793905a36a2SIngo Molnar	/* running with kernelgs */
794905a36a2SIngo Molnarbad_gs:
795905a36a2SIngo Molnar	SWAPGS					/* switch back to user gs */
796b038c842SAndy Lutomirski.macro ZAP_GS
797b038c842SAndy Lutomirski	/* This can't be a string because the preprocessor needs to see it. */
798b038c842SAndy Lutomirski	movl $__USER_DS, %eax
799b038c842SAndy Lutomirski	movl %eax, %gs
800b038c842SAndy Lutomirski.endm
801b038c842SAndy Lutomirski	ALTERNATIVE "", "ZAP_GS", X86_BUG_NULL_SEG
802905a36a2SIngo Molnar	xorl	%eax, %eax
803905a36a2SIngo Molnar	movl	%eax, %gs
804905a36a2SIngo Molnar	jmp	2b
805905a36a2SIngo Molnar	.previous
806905a36a2SIngo Molnar
807905a36a2SIngo Molnar/* Call softirq on interrupt stack. Interrupts are off. */
808905a36a2SIngo MolnarENTRY(do_softirq_own_stack)
809905a36a2SIngo Molnar	pushq	%rbp
810905a36a2SIngo Molnar	mov	%rsp, %rbp
811905a36a2SIngo Molnar	incl	PER_CPU_VAR(irq_count)
812905a36a2SIngo Molnar	cmove	PER_CPU_VAR(irq_stack_ptr), %rsp
8134d732138SIngo Molnar	push	%rbp				/* frame pointer backlink */
814905a36a2SIngo Molnar	call	__do_softirq
815905a36a2SIngo Molnar	leaveq
816905a36a2SIngo Molnar	decl	PER_CPU_VAR(irq_count)
817905a36a2SIngo Molnar	ret
818905a36a2SIngo MolnarEND(do_softirq_own_stack)
819905a36a2SIngo Molnar
820905a36a2SIngo Molnar#ifdef CONFIG_XEN
821905a36a2SIngo Molnaridtentry xen_hypervisor_callback xen_do_hypervisor_callback has_error_code=0
822905a36a2SIngo Molnar
823905a36a2SIngo Molnar/*
824905a36a2SIngo Molnar * A note on the "critical region" in our callback handler.
825905a36a2SIngo Molnar * We want to avoid stacking callback handlers due to events occurring
826905a36a2SIngo Molnar * during handling of the last event. To do this, we keep events disabled
827905a36a2SIngo Molnar * until we've done all processing. HOWEVER, we must enable events before
828905a36a2SIngo Molnar * popping the stack frame (can't be done atomically) and so it would still
829905a36a2SIngo Molnar * be possible to get enough handler activations to overflow the stack.
830905a36a2SIngo Molnar * Although unlikely, bugs of that kind are hard to track down, so we'd
831905a36a2SIngo Molnar * like to avoid the possibility.
832905a36a2SIngo Molnar * So, on entry to the handler we detect whether we interrupted an
833905a36a2SIngo Molnar * existing activation in its critical region -- if so, we pop the current
834905a36a2SIngo Molnar * activation and restart the handler using the previous one.
835905a36a2SIngo Molnar */
8364d732138SIngo MolnarENTRY(xen_do_hypervisor_callback)		/* do_hypervisor_callback(struct *pt_regs) */
8374d732138SIngo Molnar
838905a36a2SIngo Molnar/*
839905a36a2SIngo Molnar * Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will
840905a36a2SIngo Molnar * see the correct pointer to the pt_regs
841905a36a2SIngo Molnar */
8424d732138SIngo Molnar	movq	%rdi, %rsp			/* we don't return, adjust the stack frame */
843905a36a2SIngo Molnar11:	incl	PER_CPU_VAR(irq_count)
844905a36a2SIngo Molnar	movq	%rsp, %rbp
845905a36a2SIngo Molnar	cmovzq	PER_CPU_VAR(irq_stack_ptr), %rsp
8464d732138SIngo Molnar	pushq	%rbp				/* frame pointer backlink */
847905a36a2SIngo Molnar	call	xen_evtchn_do_upcall
848905a36a2SIngo Molnar	popq	%rsp
849905a36a2SIngo Molnar	decl	PER_CPU_VAR(irq_count)
850905a36a2SIngo Molnar#ifndef CONFIG_PREEMPT
851905a36a2SIngo Molnar	call	xen_maybe_preempt_hcall
852905a36a2SIngo Molnar#endif
853905a36a2SIngo Molnar	jmp	error_exit
854905a36a2SIngo MolnarEND(xen_do_hypervisor_callback)
855905a36a2SIngo Molnar
856905a36a2SIngo Molnar/*
857905a36a2SIngo Molnar * Hypervisor uses this for application faults while it executes.
858905a36a2SIngo Molnar * We get here for two reasons:
859905a36a2SIngo Molnar *  1. Fault while reloading DS, ES, FS or GS
860905a36a2SIngo Molnar *  2. Fault while executing IRET
861905a36a2SIngo Molnar * Category 1 we do not need to fix up as Xen has already reloaded all segment
862905a36a2SIngo Molnar * registers that could be reloaded and zeroed the others.
863905a36a2SIngo Molnar * Category 2 we fix up by killing the current process. We cannot use the
864905a36a2SIngo Molnar * normal Linux return path in this case because if we use the IRET hypercall
865905a36a2SIngo Molnar * to pop the stack frame we end up in an infinite loop of failsafe callbacks.
866905a36a2SIngo Molnar * We distinguish between categories by comparing each saved segment register
867905a36a2SIngo Molnar * with its current contents: any discrepancy means we in category 1.
868905a36a2SIngo Molnar */
869905a36a2SIngo MolnarENTRY(xen_failsafe_callback)
870905a36a2SIngo Molnar	movl	%ds, %ecx
871905a36a2SIngo Molnar	cmpw	%cx, 0x10(%rsp)
872905a36a2SIngo Molnar	jne	1f
873905a36a2SIngo Molnar	movl	%es, %ecx
874905a36a2SIngo Molnar	cmpw	%cx, 0x18(%rsp)
875905a36a2SIngo Molnar	jne	1f
876905a36a2SIngo Molnar	movl	%fs, %ecx
877905a36a2SIngo Molnar	cmpw	%cx, 0x20(%rsp)
878905a36a2SIngo Molnar	jne	1f
879905a36a2SIngo Molnar	movl	%gs, %ecx
880905a36a2SIngo Molnar	cmpw	%cx, 0x28(%rsp)
881905a36a2SIngo Molnar	jne	1f
882905a36a2SIngo Molnar	/* All segments match their saved values => Category 2 (Bad IRET). */
883905a36a2SIngo Molnar	movq	(%rsp), %rcx
884905a36a2SIngo Molnar	movq	8(%rsp), %r11
885905a36a2SIngo Molnar	addq	$0x30, %rsp
886905a36a2SIngo Molnar	pushq	$0				/* RIP */
887905a36a2SIngo Molnar	pushq	%r11
888905a36a2SIngo Molnar	pushq	%rcx
889905a36a2SIngo Molnar	jmp	general_protection
890905a36a2SIngo Molnar1:	/* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */
891905a36a2SIngo Molnar	movq	(%rsp), %rcx
892905a36a2SIngo Molnar	movq	8(%rsp), %r11
893905a36a2SIngo Molnar	addq	$0x30, %rsp
894905a36a2SIngo Molnar	pushq	$-1 /* orig_ax = -1 => not a system call */
895905a36a2SIngo Molnar	ALLOC_PT_GPREGS_ON_STACK
896905a36a2SIngo Molnar	SAVE_C_REGS
897905a36a2SIngo Molnar	SAVE_EXTRA_REGS
898905a36a2SIngo Molnar	jmp	error_exit
899905a36a2SIngo MolnarEND(xen_failsafe_callback)
900905a36a2SIngo Molnar
901905a36a2SIngo Molnarapicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
902905a36a2SIngo Molnar	xen_hvm_callback_vector xen_evtchn_do_upcall
903905a36a2SIngo Molnar
904905a36a2SIngo Molnar#endif /* CONFIG_XEN */
905905a36a2SIngo Molnar
906905a36a2SIngo Molnar#if IS_ENABLED(CONFIG_HYPERV)
907905a36a2SIngo Molnarapicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
908905a36a2SIngo Molnar	hyperv_callback_vector hyperv_vector_handler
909905a36a2SIngo Molnar#endif /* CONFIG_HYPERV */
910905a36a2SIngo Molnar
911905a36a2SIngo Molnaridtentry debug			do_debug		has_error_code=0	paranoid=1 shift_ist=DEBUG_STACK
912905a36a2SIngo Molnaridtentry int3			do_int3			has_error_code=0	paranoid=1 shift_ist=DEBUG_STACK
913905a36a2SIngo Molnaridtentry stack_segment		do_stack_segment	has_error_code=1
9144d732138SIngo Molnar
915905a36a2SIngo Molnar#ifdef CONFIG_XEN
916905a36a2SIngo Molnaridtentry xen_debug		do_debug		has_error_code=0
917905a36a2SIngo Molnaridtentry xen_int3		do_int3			has_error_code=0
918905a36a2SIngo Molnaridtentry xen_stack_segment	do_stack_segment	has_error_code=1
919905a36a2SIngo Molnar#endif
9204d732138SIngo Molnar
921905a36a2SIngo Molnaridtentry general_protection	do_general_protection	has_error_code=1
922905a36a2SIngo Molnartrace_idtentry page_fault	do_page_fault		has_error_code=1
9234d732138SIngo Molnar
924905a36a2SIngo Molnar#ifdef CONFIG_KVM_GUEST
925905a36a2SIngo Molnaridtentry async_page_fault	do_async_page_fault	has_error_code=1
926905a36a2SIngo Molnar#endif
9274d732138SIngo Molnar
928905a36a2SIngo Molnar#ifdef CONFIG_X86_MCE
929905a36a2SIngo Molnaridtentry machine_check					has_error_code=0	paranoid=1 do_sym=*machine_check_vector(%rip)
930905a36a2SIngo Molnar#endif
931905a36a2SIngo Molnar
932905a36a2SIngo Molnar/*
933905a36a2SIngo Molnar * Save all registers in pt_regs, and switch gs if needed.
934905a36a2SIngo Molnar * Use slow, but surefire "are we in kernel?" check.
935905a36a2SIngo Molnar * Return: ebx=0: need swapgs on exit, ebx=1: otherwise
936905a36a2SIngo Molnar */
937905a36a2SIngo MolnarENTRY(paranoid_entry)
938905a36a2SIngo Molnar	cld
939905a36a2SIngo Molnar	SAVE_C_REGS 8
940905a36a2SIngo Molnar	SAVE_EXTRA_REGS 8
941905a36a2SIngo Molnar	movl	$1, %ebx
942905a36a2SIngo Molnar	movl	$MSR_GS_BASE, %ecx
943905a36a2SIngo Molnar	rdmsr
944905a36a2SIngo Molnar	testl	%edx, %edx
945905a36a2SIngo Molnar	js	1f				/* negative -> in kernel */
946905a36a2SIngo Molnar	SWAPGS
947905a36a2SIngo Molnar	xorl	%ebx, %ebx
948905a36a2SIngo Molnar1:	ret
949905a36a2SIngo MolnarEND(paranoid_entry)
950905a36a2SIngo Molnar
951905a36a2SIngo Molnar/*
952905a36a2SIngo Molnar * "Paranoid" exit path from exception stack.  This is invoked
953905a36a2SIngo Molnar * only on return from non-NMI IST interrupts that came
954905a36a2SIngo Molnar * from kernel space.
955905a36a2SIngo Molnar *
956905a36a2SIngo Molnar * We may be returning to very strange contexts (e.g. very early
957905a36a2SIngo Molnar * in syscall entry), so checking for preemption here would
958905a36a2SIngo Molnar * be complicated.  Fortunately, we there's no good reason
959905a36a2SIngo Molnar * to try to handle preemption here.
9604d732138SIngo Molnar *
9614d732138SIngo Molnar * On entry, ebx is "no swapgs" flag (1: don't need swapgs, 0: need it)
962905a36a2SIngo Molnar */
963905a36a2SIngo MolnarENTRY(paranoid_exit)
964905a36a2SIngo Molnar	DISABLE_INTERRUPTS(CLBR_NONE)
965905a36a2SIngo Molnar	TRACE_IRQS_OFF_DEBUG
966905a36a2SIngo Molnar	testl	%ebx, %ebx			/* swapgs needed? */
967905a36a2SIngo Molnar	jnz	paranoid_exit_no_swapgs
968905a36a2SIngo Molnar	TRACE_IRQS_IRETQ
969905a36a2SIngo Molnar	SWAPGS_UNSAFE_STACK
970905a36a2SIngo Molnar	jmp	paranoid_exit_restore
971905a36a2SIngo Molnarparanoid_exit_no_swapgs:
972905a36a2SIngo Molnar	TRACE_IRQS_IRETQ_DEBUG
973905a36a2SIngo Molnarparanoid_exit_restore:
974905a36a2SIngo Molnar	RESTORE_EXTRA_REGS
975905a36a2SIngo Molnar	RESTORE_C_REGS
976905a36a2SIngo Molnar	REMOVE_PT_GPREGS_FROM_STACK 8
977905a36a2SIngo Molnar	INTERRUPT_RETURN
978905a36a2SIngo MolnarEND(paranoid_exit)
979905a36a2SIngo Molnar
980905a36a2SIngo Molnar/*
981905a36a2SIngo Molnar * Save all registers in pt_regs, and switch gs if needed.
982539f5113SAndy Lutomirski * Return: EBX=0: came from user mode; EBX=1: otherwise
983905a36a2SIngo Molnar */
984905a36a2SIngo MolnarENTRY(error_entry)
985905a36a2SIngo Molnar	cld
986905a36a2SIngo Molnar	SAVE_C_REGS 8
987905a36a2SIngo Molnar	SAVE_EXTRA_REGS 8
988905a36a2SIngo Molnar	xorl	%ebx, %ebx
989905a36a2SIngo Molnar	testb	$3, CS+8(%rsp)
990cb6f64edSAndy Lutomirski	jz	.Lerror_kernelspace
991539f5113SAndy Lutomirski
992cb6f64edSAndy Lutomirski.Lerror_entry_from_usermode_swapgs:
993cb6f64edSAndy Lutomirski	/*
994cb6f64edSAndy Lutomirski	 * We entered from user mode or we're pretending to have entered
995cb6f64edSAndy Lutomirski	 * from user mode due to an IRET fault.
996cb6f64edSAndy Lutomirski	 */
997905a36a2SIngo Molnar	SWAPGS
998539f5113SAndy Lutomirski
999cb6f64edSAndy Lutomirski.Lerror_entry_from_usermode_after_swapgs:
1000f1075053SAndy Lutomirski	/*
1001f1075053SAndy Lutomirski	 * We need to tell lockdep that IRQs are off.  We can't do this until
1002f1075053SAndy Lutomirski	 * we fix gsbase, and we should do it before enter_from_user_mode
1003f1075053SAndy Lutomirski	 * (which can take locks).
1004f1075053SAndy Lutomirski	 */
1005f1075053SAndy Lutomirski	TRACE_IRQS_OFF
1006478dc89cSAndy Lutomirski	CALL_enter_from_user_mode
1007f1075053SAndy Lutomirski	ret
100802bc7768SAndy Lutomirski
1009cb6f64edSAndy Lutomirski.Lerror_entry_done:
1010905a36a2SIngo Molnar	TRACE_IRQS_OFF
1011905a36a2SIngo Molnar	ret
1012905a36a2SIngo Molnar
1013905a36a2SIngo Molnar	/*
1014905a36a2SIngo Molnar	 * There are two places in the kernel that can potentially fault with
1015905a36a2SIngo Molnar	 * usergs. Handle them here.  B stepping K8s sometimes report a
1016905a36a2SIngo Molnar	 * truncated RIP for IRET exceptions returning to compat mode. Check
1017905a36a2SIngo Molnar	 * for these here too.
1018905a36a2SIngo Molnar	 */
1019cb6f64edSAndy Lutomirski.Lerror_kernelspace:
1020905a36a2SIngo Molnar	incl	%ebx
1021905a36a2SIngo Molnar	leaq	native_irq_return_iret(%rip), %rcx
1022905a36a2SIngo Molnar	cmpq	%rcx, RIP+8(%rsp)
1023cb6f64edSAndy Lutomirski	je	.Lerror_bad_iret
1024905a36a2SIngo Molnar	movl	%ecx, %eax			/* zero extend */
1025905a36a2SIngo Molnar	cmpq	%rax, RIP+8(%rsp)
1026cb6f64edSAndy Lutomirski	je	.Lbstep_iret
102742c748bbSBorislav Petkov	cmpq	$.Lgs_change, RIP+8(%rsp)
1028cb6f64edSAndy Lutomirski	jne	.Lerror_entry_done
1029539f5113SAndy Lutomirski
1030539f5113SAndy Lutomirski	/*
103142c748bbSBorislav Petkov	 * hack: .Lgs_change can fail with user gsbase.  If this happens, fix up
1032539f5113SAndy Lutomirski	 * gsbase and proceed.  We'll fix up the exception and land in
103342c748bbSBorislav Petkov	 * .Lgs_change's error handler with kernel gsbase.
1034539f5113SAndy Lutomirski	 */
1035cb6f64edSAndy Lutomirski	jmp	.Lerror_entry_from_usermode_swapgs
1036905a36a2SIngo Molnar
1037cb6f64edSAndy Lutomirski.Lbstep_iret:
1038905a36a2SIngo Molnar	/* Fix truncated RIP */
1039905a36a2SIngo Molnar	movq	%rcx, RIP+8(%rsp)
1040905a36a2SIngo Molnar	/* fall through */
1041905a36a2SIngo Molnar
1042cb6f64edSAndy Lutomirski.Lerror_bad_iret:
1043539f5113SAndy Lutomirski	/*
1044539f5113SAndy Lutomirski	 * We came from an IRET to user mode, so we have user gsbase.
1045539f5113SAndy Lutomirski	 * Switch to kernel gsbase:
1046539f5113SAndy Lutomirski	 */
1047905a36a2SIngo Molnar	SWAPGS
1048539f5113SAndy Lutomirski
1049539f5113SAndy Lutomirski	/*
1050539f5113SAndy Lutomirski	 * Pretend that the exception came from user mode: set up pt_regs
1051539f5113SAndy Lutomirski	 * as if we faulted immediately after IRET and clear EBX so that
1052539f5113SAndy Lutomirski	 * error_exit knows that we will be returning to user mode.
1053539f5113SAndy Lutomirski	 */
1054905a36a2SIngo Molnar	mov	%rsp, %rdi
1055905a36a2SIngo Molnar	call	fixup_bad_iret
1056905a36a2SIngo Molnar	mov	%rax, %rsp
1057539f5113SAndy Lutomirski	decl	%ebx
1058cb6f64edSAndy Lutomirski	jmp	.Lerror_entry_from_usermode_after_swapgs
1059905a36a2SIngo MolnarEND(error_entry)
1060905a36a2SIngo Molnar
1061905a36a2SIngo Molnar
1062539f5113SAndy Lutomirski/*
1063539f5113SAndy Lutomirski * On entry, EBS is a "return to kernel mode" flag:
1064539f5113SAndy Lutomirski *   1: already in kernel mode, don't need SWAPGS
1065539f5113SAndy Lutomirski *   0: user gsbase is loaded, we need SWAPGS and standard preparation for return to usermode
1066539f5113SAndy Lutomirski */
1067905a36a2SIngo MolnarENTRY(error_exit)
1068905a36a2SIngo Molnar	movl	%ebx, %eax
1069905a36a2SIngo Molnar	DISABLE_INTERRUPTS(CLBR_NONE)
1070905a36a2SIngo Molnar	TRACE_IRQS_OFF
1071905a36a2SIngo Molnar	testl	%eax, %eax
1072905a36a2SIngo Molnar	jnz	retint_kernel
1073905a36a2SIngo Molnar	jmp	retint_user
1074905a36a2SIngo MolnarEND(error_exit)
1075905a36a2SIngo Molnar
1076905a36a2SIngo Molnar/* Runs on exception stack */
1077905a36a2SIngo MolnarENTRY(nmi)
1078fc57a7c6SAndy Lutomirski	/*
1079fc57a7c6SAndy Lutomirski	 * Fix up the exception frame if we're on Xen.
1080fc57a7c6SAndy Lutomirski	 * PARAVIRT_ADJUST_EXCEPTION_FRAME is guaranteed to push at most
1081fc57a7c6SAndy Lutomirski	 * one value to the stack on native, so it may clobber the rdx
1082fc57a7c6SAndy Lutomirski	 * scratch slot, but it won't clobber any of the important
1083fc57a7c6SAndy Lutomirski	 * slots past it.
1084fc57a7c6SAndy Lutomirski	 *
1085fc57a7c6SAndy Lutomirski	 * Xen is a different story, because the Xen frame itself overlaps
1086fc57a7c6SAndy Lutomirski	 * the "NMI executing" variable.
1087fc57a7c6SAndy Lutomirski	 */
1088905a36a2SIngo Molnar	PARAVIRT_ADJUST_EXCEPTION_FRAME
1089fc57a7c6SAndy Lutomirski
1090905a36a2SIngo Molnar	/*
1091905a36a2SIngo Molnar	 * We allow breakpoints in NMIs. If a breakpoint occurs, then
1092905a36a2SIngo Molnar	 * the iretq it performs will take us out of NMI context.
1093905a36a2SIngo Molnar	 * This means that we can have nested NMIs where the next
1094905a36a2SIngo Molnar	 * NMI is using the top of the stack of the previous NMI. We
1095905a36a2SIngo Molnar	 * can't let it execute because the nested NMI will corrupt the
1096905a36a2SIngo Molnar	 * stack of the previous NMI. NMI handlers are not re-entrant
1097905a36a2SIngo Molnar	 * anyway.
1098905a36a2SIngo Molnar	 *
1099905a36a2SIngo Molnar	 * To handle this case we do the following:
1100905a36a2SIngo Molnar	 *  Check the a special location on the stack that contains
1101905a36a2SIngo Molnar	 *  a variable that is set when NMIs are executing.
1102905a36a2SIngo Molnar	 *  The interrupted task's stack is also checked to see if it
1103905a36a2SIngo Molnar	 *  is an NMI stack.
1104905a36a2SIngo Molnar	 *  If the variable is not set and the stack is not the NMI
1105905a36a2SIngo Molnar	 *  stack then:
1106905a36a2SIngo Molnar	 *    o Set the special variable on the stack
11070b22930eSAndy Lutomirski	 *    o Copy the interrupt frame into an "outermost" location on the
11080b22930eSAndy Lutomirski	 *      stack
11090b22930eSAndy Lutomirski	 *    o Copy the interrupt frame into an "iret" location on the stack
1110905a36a2SIngo Molnar	 *    o Continue processing the NMI
1111905a36a2SIngo Molnar	 *  If the variable is set or the previous stack is the NMI stack:
11120b22930eSAndy Lutomirski	 *    o Modify the "iret" location to jump to the repeat_nmi
1113905a36a2SIngo Molnar	 *    o return back to the first NMI
1114905a36a2SIngo Molnar	 *
1115905a36a2SIngo Molnar	 * Now on exit of the first NMI, we first clear the stack variable
1116905a36a2SIngo Molnar	 * The NMI stack will tell any nested NMIs at that point that it is
1117905a36a2SIngo Molnar	 * nested. Then we pop the stack normally with iret, and if there was
1118905a36a2SIngo Molnar	 * a nested NMI that updated the copy interrupt stack frame, a
1119905a36a2SIngo Molnar	 * jump will be made to the repeat_nmi code that will handle the second
1120905a36a2SIngo Molnar	 * NMI.
11219b6e6a83SAndy Lutomirski	 *
11229b6e6a83SAndy Lutomirski	 * However, espfix prevents us from directly returning to userspace
11239b6e6a83SAndy Lutomirski	 * with a single IRET instruction.  Similarly, IRET to user mode
11249b6e6a83SAndy Lutomirski	 * can fault.  We therefore handle NMIs from user space like
11259b6e6a83SAndy Lutomirski	 * other IST entries.
1126905a36a2SIngo Molnar	 */
1127905a36a2SIngo Molnar
1128905a36a2SIngo Molnar	/* Use %rdx as our temp variable throughout */
1129905a36a2SIngo Molnar	pushq	%rdx
1130905a36a2SIngo Molnar
11319b6e6a83SAndy Lutomirski	testb	$3, CS-RIP+8(%rsp)
11329b6e6a83SAndy Lutomirski	jz	.Lnmi_from_kernel
1133905a36a2SIngo Molnar
1134905a36a2SIngo Molnar	/*
11359b6e6a83SAndy Lutomirski	 * NMI from user mode.  We need to run on the thread stack, but we
11369b6e6a83SAndy Lutomirski	 * can't go through the normal entry paths: NMIs are masked, and
11379b6e6a83SAndy Lutomirski	 * we don't want to enable interrupts, because then we'll end
11389b6e6a83SAndy Lutomirski	 * up in an awkward situation in which IRQs are on but NMIs
11399b6e6a83SAndy Lutomirski	 * are off.
114083c133cfSAndy Lutomirski	 *
114183c133cfSAndy Lutomirski	 * We also must not push anything to the stack before switching
114283c133cfSAndy Lutomirski	 * stacks lest we corrupt the "NMI executing" variable.
11439b6e6a83SAndy Lutomirski	 */
11449b6e6a83SAndy Lutomirski
114583c133cfSAndy Lutomirski	SWAPGS_UNSAFE_STACK
11469b6e6a83SAndy Lutomirski	cld
11479b6e6a83SAndy Lutomirski	movq	%rsp, %rdx
11489b6e6a83SAndy Lutomirski	movq	PER_CPU_VAR(cpu_current_top_of_stack), %rsp
11499b6e6a83SAndy Lutomirski	pushq	5*8(%rdx)	/* pt_regs->ss */
11509b6e6a83SAndy Lutomirski	pushq	4*8(%rdx)	/* pt_regs->rsp */
11519b6e6a83SAndy Lutomirski	pushq	3*8(%rdx)	/* pt_regs->flags */
11529b6e6a83SAndy Lutomirski	pushq	2*8(%rdx)	/* pt_regs->cs */
11539b6e6a83SAndy Lutomirski	pushq	1*8(%rdx)	/* pt_regs->rip */
11549b6e6a83SAndy Lutomirski	pushq   $-1		/* pt_regs->orig_ax */
11559b6e6a83SAndy Lutomirski	pushq   %rdi		/* pt_regs->di */
11569b6e6a83SAndy Lutomirski	pushq   %rsi		/* pt_regs->si */
11579b6e6a83SAndy Lutomirski	pushq   (%rdx)		/* pt_regs->dx */
11589b6e6a83SAndy Lutomirski	pushq   %rcx		/* pt_regs->cx */
11599b6e6a83SAndy Lutomirski	pushq   %rax		/* pt_regs->ax */
11609b6e6a83SAndy Lutomirski	pushq   %r8		/* pt_regs->r8 */
11619b6e6a83SAndy Lutomirski	pushq   %r9		/* pt_regs->r9 */
11629b6e6a83SAndy Lutomirski	pushq   %r10		/* pt_regs->r10 */
11639b6e6a83SAndy Lutomirski	pushq   %r11		/* pt_regs->r11 */
11649b6e6a83SAndy Lutomirski	pushq	%rbx		/* pt_regs->rbx */
11659b6e6a83SAndy Lutomirski	pushq	%rbp		/* pt_regs->rbp */
11669b6e6a83SAndy Lutomirski	pushq	%r12		/* pt_regs->r12 */
11679b6e6a83SAndy Lutomirski	pushq	%r13		/* pt_regs->r13 */
11689b6e6a83SAndy Lutomirski	pushq	%r14		/* pt_regs->r14 */
11699b6e6a83SAndy Lutomirski	pushq	%r15		/* pt_regs->r15 */
11709b6e6a83SAndy Lutomirski
11719b6e6a83SAndy Lutomirski	/*
11729b6e6a83SAndy Lutomirski	 * At this point we no longer need to worry about stack damage
11739b6e6a83SAndy Lutomirski	 * due to nesting -- we're on the normal thread stack and we're
11749b6e6a83SAndy Lutomirski	 * done with the NMI stack.
11759b6e6a83SAndy Lutomirski	 */
11769b6e6a83SAndy Lutomirski
11779b6e6a83SAndy Lutomirski	movq	%rsp, %rdi
11789b6e6a83SAndy Lutomirski	movq	$-1, %rsi
11799b6e6a83SAndy Lutomirski	call	do_nmi
11809b6e6a83SAndy Lutomirski
11819b6e6a83SAndy Lutomirski	/*
11829b6e6a83SAndy Lutomirski	 * Return back to user mode.  We must *not* do the normal exit
11839b6e6a83SAndy Lutomirski	 * work, because we don't want to enable interrupts.  Fortunately,
11849b6e6a83SAndy Lutomirski	 * do_nmi doesn't modify pt_regs.
11859b6e6a83SAndy Lutomirski	 */
11869b6e6a83SAndy Lutomirski	SWAPGS
11879b6e6a83SAndy Lutomirski	jmp	restore_c_regs_and_iret
11889b6e6a83SAndy Lutomirski
11899b6e6a83SAndy Lutomirski.Lnmi_from_kernel:
11909b6e6a83SAndy Lutomirski	/*
11910b22930eSAndy Lutomirski	 * Here's what our stack frame will look like:
11920b22930eSAndy Lutomirski	 * +---------------------------------------------------------+
11930b22930eSAndy Lutomirski	 * | original SS                                             |
11940b22930eSAndy Lutomirski	 * | original Return RSP                                     |
11950b22930eSAndy Lutomirski	 * | original RFLAGS                                         |
11960b22930eSAndy Lutomirski	 * | original CS                                             |
11970b22930eSAndy Lutomirski	 * | original RIP                                            |
11980b22930eSAndy Lutomirski	 * +---------------------------------------------------------+
11990b22930eSAndy Lutomirski	 * | temp storage for rdx                                    |
12000b22930eSAndy Lutomirski	 * +---------------------------------------------------------+
12010b22930eSAndy Lutomirski	 * | "NMI executing" variable                                |
12020b22930eSAndy Lutomirski	 * +---------------------------------------------------------+
12030b22930eSAndy Lutomirski	 * | iret SS          } Copied from "outermost" frame        |
12040b22930eSAndy Lutomirski	 * | iret Return RSP  } on each loop iteration; overwritten  |
12050b22930eSAndy Lutomirski	 * | iret RFLAGS      } by a nested NMI to force another     |
12060b22930eSAndy Lutomirski	 * | iret CS          } iteration if needed.                 |
12070b22930eSAndy Lutomirski	 * | iret RIP         }                                      |
12080b22930eSAndy Lutomirski	 * +---------------------------------------------------------+
12090b22930eSAndy Lutomirski	 * | outermost SS          } initialized in first_nmi;       |
12100b22930eSAndy Lutomirski	 * | outermost Return RSP  } will not be changed before      |
12110b22930eSAndy Lutomirski	 * | outermost RFLAGS      } NMI processing is done.         |
12120b22930eSAndy Lutomirski	 * | outermost CS          } Copied to "iret" frame on each  |
12130b22930eSAndy Lutomirski	 * | outermost RIP         } iteration.                      |
12140b22930eSAndy Lutomirski	 * +---------------------------------------------------------+
12150b22930eSAndy Lutomirski	 * | pt_regs                                                 |
12160b22930eSAndy Lutomirski	 * +---------------------------------------------------------+
12170b22930eSAndy Lutomirski	 *
12180b22930eSAndy Lutomirski	 * The "original" frame is used by hardware.  Before re-enabling
12190b22930eSAndy Lutomirski	 * NMIs, we need to be done with it, and we need to leave enough
12200b22930eSAndy Lutomirski	 * space for the asm code here.
12210b22930eSAndy Lutomirski	 *
12220b22930eSAndy Lutomirski	 * We return by executing IRET while RSP points to the "iret" frame.
12230b22930eSAndy Lutomirski	 * That will either return for real or it will loop back into NMI
12240b22930eSAndy Lutomirski	 * processing.
12250b22930eSAndy Lutomirski	 *
12260b22930eSAndy Lutomirski	 * The "outermost" frame is copied to the "iret" frame on each
12270b22930eSAndy Lutomirski	 * iteration of the loop, so each iteration starts with the "iret"
12280b22930eSAndy Lutomirski	 * frame pointing to the final return target.
12290b22930eSAndy Lutomirski	 */
12300b22930eSAndy Lutomirski
12310b22930eSAndy Lutomirski	/*
12320b22930eSAndy Lutomirski	 * Determine whether we're a nested NMI.
12330b22930eSAndy Lutomirski	 *
1234a27507caSAndy Lutomirski	 * If we interrupted kernel code between repeat_nmi and
1235a27507caSAndy Lutomirski	 * end_repeat_nmi, then we are a nested NMI.  We must not
1236a27507caSAndy Lutomirski	 * modify the "iret" frame because it's being written by
1237a27507caSAndy Lutomirski	 * the outer NMI.  That's okay; the outer NMI handler is
1238a27507caSAndy Lutomirski	 * about to about to call do_nmi anyway, so we can just
1239a27507caSAndy Lutomirski	 * resume the outer NMI.
1240a27507caSAndy Lutomirski	 */
1241a27507caSAndy Lutomirski
1242a27507caSAndy Lutomirski	movq	$repeat_nmi, %rdx
1243a27507caSAndy Lutomirski	cmpq	8(%rsp), %rdx
1244a27507caSAndy Lutomirski	ja	1f
1245a27507caSAndy Lutomirski	movq	$end_repeat_nmi, %rdx
1246a27507caSAndy Lutomirski	cmpq	8(%rsp), %rdx
1247a27507caSAndy Lutomirski	ja	nested_nmi_out
1248a27507caSAndy Lutomirski1:
1249a27507caSAndy Lutomirski
1250a27507caSAndy Lutomirski	/*
1251a27507caSAndy Lutomirski	 * Now check "NMI executing".  If it's set, then we're nested.
12520b22930eSAndy Lutomirski	 * This will not detect if we interrupted an outer NMI just
12530b22930eSAndy Lutomirski	 * before IRET.
1254905a36a2SIngo Molnar	 */
1255905a36a2SIngo Molnar	cmpl	$1, -8(%rsp)
1256905a36a2SIngo Molnar	je	nested_nmi
1257905a36a2SIngo Molnar
1258905a36a2SIngo Molnar	/*
12590b22930eSAndy Lutomirski	 * Now test if the previous stack was an NMI stack.  This covers
12600b22930eSAndy Lutomirski	 * the case where we interrupt an outer NMI after it clears
1261810bc075SAndy Lutomirski	 * "NMI executing" but before IRET.  We need to be careful, though:
1262810bc075SAndy Lutomirski	 * there is one case in which RSP could point to the NMI stack
1263810bc075SAndy Lutomirski	 * despite there being no NMI active: naughty userspace controls
1264810bc075SAndy Lutomirski	 * RSP at the very beginning of the SYSCALL targets.  We can
1265810bc075SAndy Lutomirski	 * pull a fast one on naughty userspace, though: we program
1266810bc075SAndy Lutomirski	 * SYSCALL to mask DF, so userspace cannot cause DF to be set
1267810bc075SAndy Lutomirski	 * if it controls the kernel's RSP.  We set DF before we clear
1268810bc075SAndy Lutomirski	 * "NMI executing".
1269905a36a2SIngo Molnar	 */
1270905a36a2SIngo Molnar	lea	6*8(%rsp), %rdx
1271905a36a2SIngo Molnar	/* Compare the NMI stack (rdx) with the stack we came from (4*8(%rsp)) */
1272905a36a2SIngo Molnar	cmpq	%rdx, 4*8(%rsp)
1273905a36a2SIngo Molnar	/* If the stack pointer is above the NMI stack, this is a normal NMI */
1274905a36a2SIngo Molnar	ja	first_nmi
12754d732138SIngo Molnar
1276905a36a2SIngo Molnar	subq	$EXCEPTION_STKSZ, %rdx
1277905a36a2SIngo Molnar	cmpq	%rdx, 4*8(%rsp)
1278905a36a2SIngo Molnar	/* If it is below the NMI stack, it is a normal NMI */
1279905a36a2SIngo Molnar	jb	first_nmi
1280810bc075SAndy Lutomirski
1281810bc075SAndy Lutomirski	/* Ah, it is within the NMI stack. */
1282810bc075SAndy Lutomirski
1283810bc075SAndy Lutomirski	testb	$(X86_EFLAGS_DF >> 8), (3*8 + 1)(%rsp)
1284810bc075SAndy Lutomirski	jz	first_nmi	/* RSP was user controlled. */
1285810bc075SAndy Lutomirski
1286810bc075SAndy Lutomirski	/* This is a nested NMI. */
1287905a36a2SIngo Molnar
1288905a36a2SIngo Molnarnested_nmi:
1289905a36a2SIngo Molnar	/*
12900b22930eSAndy Lutomirski	 * Modify the "iret" frame to point to repeat_nmi, forcing another
12910b22930eSAndy Lutomirski	 * iteration of NMI handling.
1292905a36a2SIngo Molnar	 */
129323a781e9SAndy Lutomirski	subq	$8, %rsp
1294905a36a2SIngo Molnar	leaq	-10*8(%rsp), %rdx
1295905a36a2SIngo Molnar	pushq	$__KERNEL_DS
1296905a36a2SIngo Molnar	pushq	%rdx
1297905a36a2SIngo Molnar	pushfq
1298905a36a2SIngo Molnar	pushq	$__KERNEL_CS
1299905a36a2SIngo Molnar	pushq	$repeat_nmi
1300905a36a2SIngo Molnar
1301905a36a2SIngo Molnar	/* Put stack back */
1302905a36a2SIngo Molnar	addq	$(6*8), %rsp
1303905a36a2SIngo Molnar
1304905a36a2SIngo Molnarnested_nmi_out:
1305905a36a2SIngo Molnar	popq	%rdx
1306905a36a2SIngo Molnar
13070b22930eSAndy Lutomirski	/* We are returning to kernel mode, so this cannot result in a fault. */
1308905a36a2SIngo Molnar	INTERRUPT_RETURN
1309905a36a2SIngo Molnar
1310905a36a2SIngo Molnarfirst_nmi:
13110b22930eSAndy Lutomirski	/* Restore rdx. */
1312905a36a2SIngo Molnar	movq	(%rsp), %rdx
1313905a36a2SIngo Molnar
131436f1a77bSAndy Lutomirski	/* Make room for "NMI executing". */
131536f1a77bSAndy Lutomirski	pushq	$0
1316905a36a2SIngo Molnar
13170b22930eSAndy Lutomirski	/* Leave room for the "iret" frame */
1318905a36a2SIngo Molnar	subq	$(5*8), %rsp
1319905a36a2SIngo Molnar
13200b22930eSAndy Lutomirski	/* Copy the "original" frame to the "outermost" frame */
1321905a36a2SIngo Molnar	.rept 5
1322905a36a2SIngo Molnar	pushq	11*8(%rsp)
1323905a36a2SIngo Molnar	.endr
1324905a36a2SIngo Molnar
1325905a36a2SIngo Molnar	/* Everything up to here is safe from nested NMIs */
1326905a36a2SIngo Molnar
1327a97439aaSAndy Lutomirski#ifdef CONFIG_DEBUG_ENTRY
1328a97439aaSAndy Lutomirski	/*
1329a97439aaSAndy Lutomirski	 * For ease of testing, unmask NMIs right away.  Disabled by
1330a97439aaSAndy Lutomirski	 * default because IRET is very expensive.
1331a97439aaSAndy Lutomirski	 */
1332a97439aaSAndy Lutomirski	pushq	$0		/* SS */
1333a97439aaSAndy Lutomirski	pushq	%rsp		/* RSP (minus 8 because of the previous push) */
1334a97439aaSAndy Lutomirski	addq	$8, (%rsp)	/* Fix up RSP */
1335a97439aaSAndy Lutomirski	pushfq			/* RFLAGS */
1336a97439aaSAndy Lutomirski	pushq	$__KERNEL_CS	/* CS */
1337a97439aaSAndy Lutomirski	pushq	$1f		/* RIP */
1338a97439aaSAndy Lutomirski	INTERRUPT_RETURN	/* continues at repeat_nmi below */
1339a97439aaSAndy Lutomirski1:
1340a97439aaSAndy Lutomirski#endif
1341a97439aaSAndy Lutomirski
13420b22930eSAndy Lutomirskirepeat_nmi:
1343905a36a2SIngo Molnar	/*
1344905a36a2SIngo Molnar	 * If there was a nested NMI, the first NMI's iret will return
1345905a36a2SIngo Molnar	 * here. But NMIs are still enabled and we can take another
1346905a36a2SIngo Molnar	 * nested NMI. The nested NMI checks the interrupted RIP to see
1347905a36a2SIngo Molnar	 * if it is between repeat_nmi and end_repeat_nmi, and if so
1348905a36a2SIngo Molnar	 * it will just return, as we are about to repeat an NMI anyway.
1349905a36a2SIngo Molnar	 * This makes it safe to copy to the stack frame that a nested
1350905a36a2SIngo Molnar	 * NMI will update.
13510b22930eSAndy Lutomirski	 *
13520b22930eSAndy Lutomirski	 * RSP is pointing to "outermost RIP".  gsbase is unknown, but, if
13530b22930eSAndy Lutomirski	 * we're repeating an NMI, gsbase has the same value that it had on
13540b22930eSAndy Lutomirski	 * the first iteration.  paranoid_entry will load the kernel
135536f1a77bSAndy Lutomirski	 * gsbase if needed before we call do_nmi.  "NMI executing"
135636f1a77bSAndy Lutomirski	 * is zero.
1357905a36a2SIngo Molnar	 */
135836f1a77bSAndy Lutomirski	movq	$1, 10*8(%rsp)		/* Set "NMI executing". */
1359905a36a2SIngo Molnar
13600b22930eSAndy Lutomirski	/*
13610b22930eSAndy Lutomirski	 * Copy the "outermost" frame to the "iret" frame.  NMIs that nest
13620b22930eSAndy Lutomirski	 * here must not modify the "iret" frame while we're writing to
13630b22930eSAndy Lutomirski	 * it or it will end up containing garbage.
13640b22930eSAndy Lutomirski	 */
1365905a36a2SIngo Molnar	addq	$(10*8), %rsp
1366905a36a2SIngo Molnar	.rept 5
1367905a36a2SIngo Molnar	pushq	-6*8(%rsp)
1368905a36a2SIngo Molnar	.endr
1369905a36a2SIngo Molnar	subq	$(5*8), %rsp
1370905a36a2SIngo Molnarend_repeat_nmi:
1371905a36a2SIngo Molnar
1372905a36a2SIngo Molnar	/*
13730b22930eSAndy Lutomirski	 * Everything below this point can be preempted by a nested NMI.
13740b22930eSAndy Lutomirski	 * If this happens, then the inner NMI will change the "iret"
13750b22930eSAndy Lutomirski	 * frame to point back to repeat_nmi.
1376905a36a2SIngo Molnar	 */
1377905a36a2SIngo Molnar	pushq	$-1				/* ORIG_RAX: no syscall to restart */
1378905a36a2SIngo Molnar	ALLOC_PT_GPREGS_ON_STACK
1379905a36a2SIngo Molnar
1380905a36a2SIngo Molnar	/*
1381905a36a2SIngo Molnar	 * Use paranoid_entry to handle SWAPGS, but no need to use paranoid_exit
1382905a36a2SIngo Molnar	 * as we should not be calling schedule in NMI context.
1383905a36a2SIngo Molnar	 * Even with normal interrupts enabled. An NMI should not be
1384905a36a2SIngo Molnar	 * setting NEED_RESCHED or anything that normal interrupts and
1385905a36a2SIngo Molnar	 * exceptions might do.
1386905a36a2SIngo Molnar	 */
1387905a36a2SIngo Molnar	call	paranoid_entry
1388905a36a2SIngo Molnar
1389905a36a2SIngo Molnar	/* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
1390905a36a2SIngo Molnar	movq	%rsp, %rdi
1391905a36a2SIngo Molnar	movq	$-1, %rsi
1392905a36a2SIngo Molnar	call	do_nmi
1393905a36a2SIngo Molnar
1394905a36a2SIngo Molnar	testl	%ebx, %ebx			/* swapgs needed? */
1395905a36a2SIngo Molnar	jnz	nmi_restore
1396905a36a2SIngo Molnarnmi_swapgs:
1397905a36a2SIngo Molnar	SWAPGS_UNSAFE_STACK
1398905a36a2SIngo Molnarnmi_restore:
1399905a36a2SIngo Molnar	RESTORE_EXTRA_REGS
1400905a36a2SIngo Molnar	RESTORE_C_REGS
14010b22930eSAndy Lutomirski
14020b22930eSAndy Lutomirski	/* Point RSP at the "iret" frame. */
1403905a36a2SIngo Molnar	REMOVE_PT_GPREGS_FROM_STACK 6*8
1404905a36a2SIngo Molnar
1405810bc075SAndy Lutomirski	/*
1406810bc075SAndy Lutomirski	 * Clear "NMI executing".  Set DF first so that we can easily
1407810bc075SAndy Lutomirski	 * distinguish the remaining code between here and IRET from
1408810bc075SAndy Lutomirski	 * the SYSCALL entry and exit paths.  On a native kernel, we
1409810bc075SAndy Lutomirski	 * could just inspect RIP, but, on paravirt kernels,
1410810bc075SAndy Lutomirski	 * INTERRUPT_RETURN can translate into a jump into a
1411810bc075SAndy Lutomirski	 * hypercall page.
1412810bc075SAndy Lutomirski	 */
1413810bc075SAndy Lutomirski	std
1414810bc075SAndy Lutomirski	movq	$0, 5*8(%rsp)		/* clear "NMI executing" */
14150b22930eSAndy Lutomirski
14160b22930eSAndy Lutomirski	/*
14170b22930eSAndy Lutomirski	 * INTERRUPT_RETURN reads the "iret" frame and exits the NMI
14180b22930eSAndy Lutomirski	 * stack in a single instruction.  We are returning to kernel
14190b22930eSAndy Lutomirski	 * mode, so this cannot result in a fault.
14200b22930eSAndy Lutomirski	 */
14215ca6f70fSAndy Lutomirski	INTERRUPT_RETURN
1422905a36a2SIngo MolnarEND(nmi)
1423905a36a2SIngo Molnar
1424905a36a2SIngo MolnarENTRY(ignore_sysret)
1425905a36a2SIngo Molnar	mov	$-ENOSYS, %eax
1426905a36a2SIngo Molnar	sysret
1427905a36a2SIngo MolnarEND(ignore_sysret)
14282deb4be2SAndy Lutomirski
14292deb4be2SAndy LutomirskiENTRY(rewind_stack_do_exit)
14302deb4be2SAndy Lutomirski	/* Prevent any naive code from trying to unwind to our caller. */
14312deb4be2SAndy Lutomirski	xorl	%ebp, %ebp
14322deb4be2SAndy Lutomirski
14332deb4be2SAndy Lutomirski	movq	PER_CPU_VAR(cpu_current_top_of_stack), %rax
14342deb4be2SAndy Lutomirski	leaq	-TOP_OF_KERNEL_STACK_PADDING-PTREGS_SIZE(%rax), %rsp
14352deb4be2SAndy Lutomirski
14362deb4be2SAndy Lutomirski	call	do_exit
14372deb4be2SAndy Lutomirski1:	jmp 1b
14382deb4be2SAndy LutomirskiEND(rewind_stack_do_exit)
1439