xref: /openbmc/linux/arch/x86/entry/entry_64.S (revision e88d974136dbb5d6962eeb63075900603e737a1e)
1b2441318SGreg Kroah-Hartman/* SPDX-License-Identifier: GPL-2.0 */
2905a36a2SIngo Molnar/*
3905a36a2SIngo Molnar *  linux/arch/x86_64/entry.S
4905a36a2SIngo Molnar *
5905a36a2SIngo Molnar *  Copyright (C) 1991, 1992  Linus Torvalds
6905a36a2SIngo Molnar *  Copyright (C) 2000, 2001, 2002  Andi Kleen SuSE Labs
7905a36a2SIngo Molnar *  Copyright (C) 2000  Pavel Machek <pavel@suse.cz>
84d732138SIngo Molnar *
9905a36a2SIngo Molnar * entry.S contains the system-call and fault low-level handling routines.
10905a36a2SIngo Molnar *
11cb1aaebeSMauro Carvalho Chehab * Some of this is documented in Documentation/x86/entry_64.rst
12905a36a2SIngo Molnar *
13905a36a2SIngo Molnar * A note on terminology:
14905a36a2SIngo Molnar * - iret frame:	Architecture defined interrupt frame from SS to RIP
15905a36a2SIngo Molnar *			at the top of the kernel process stack.
16905a36a2SIngo Molnar *
17905a36a2SIngo Molnar * Some macro usage:
186dcc5627SJiri Slaby * - SYM_FUNC_START/END:Define functions in the symbol table.
194d732138SIngo Molnar * - TRACE_IRQ_*:	Trace hardirq state for lock debugging.
204d732138SIngo Molnar * - idtentry:		Define exception entry points.
21905a36a2SIngo Molnar */
22905a36a2SIngo Molnar#include <linux/linkage.h>
23905a36a2SIngo Molnar#include <asm/segment.h>
24905a36a2SIngo Molnar#include <asm/cache.h>
25905a36a2SIngo Molnar#include <asm/errno.h>
26905a36a2SIngo Molnar#include <asm/asm-offsets.h>
27905a36a2SIngo Molnar#include <asm/msr.h>
28905a36a2SIngo Molnar#include <asm/unistd.h>
29905a36a2SIngo Molnar#include <asm/thread_info.h>
30905a36a2SIngo Molnar#include <asm/hw_irq.h>
31905a36a2SIngo Molnar#include <asm/page_types.h>
32905a36a2SIngo Molnar#include <asm/irqflags.h>
33905a36a2SIngo Molnar#include <asm/paravirt.h>
34905a36a2SIngo Molnar#include <asm/percpu.h>
35905a36a2SIngo Molnar#include <asm/asm.h>
36905a36a2SIngo Molnar#include <asm/smap.h>
37905a36a2SIngo Molnar#include <asm/pgtable_types.h>
38784d5699SAl Viro#include <asm/export.h>
398c1f7558SJosh Poimboeuf#include <asm/frame.h>
40cfa82a00SThomas Gleixner#include <asm/trapnr.h>
412641f08bSDavid Woodhouse#include <asm/nospec-branch.h>
42905a36a2SIngo Molnar#include <linux/err.h>
43905a36a2SIngo Molnar
446fd166aaSPeter Zijlstra#include "calling.h"
456fd166aaSPeter Zijlstra
46905a36a2SIngo Molnar.code64
47905a36a2SIngo Molnar.section .entry.text, "ax"
48905a36a2SIngo Molnar
49905a36a2SIngo Molnar#ifdef CONFIG_PARAVIRT
50bc7b11c0SJiri SlabySYM_CODE_START(native_usergs_sysret64)
518c1f7558SJosh Poimboeuf	UNWIND_HINT_EMPTY
52905a36a2SIngo Molnar	swapgs
53905a36a2SIngo Molnar	sysretq
54bc7b11c0SJiri SlabySYM_CODE_END(native_usergs_sysret64)
55905a36a2SIngo Molnar#endif /* CONFIG_PARAVIRT */
56905a36a2SIngo Molnar
57ca37e57bSAndy Lutomirski.macro TRACE_IRQS_FLAGS flags:req
58905a36a2SIngo Molnar#ifdef CONFIG_TRACE_IRQFLAGS
59a368d7fdSJan Beulich	btl	$9, \flags		/* interrupts off? */
60905a36a2SIngo Molnar	jnc	1f
61905a36a2SIngo Molnar	TRACE_IRQS_ON
62905a36a2SIngo Molnar1:
63905a36a2SIngo Molnar#endif
64905a36a2SIngo Molnar.endm
65905a36a2SIngo Molnar
66ca37e57bSAndy Lutomirski.macro TRACE_IRQS_IRETQ
67ca37e57bSAndy Lutomirski	TRACE_IRQS_FLAGS EFLAGS(%rsp)
68ca37e57bSAndy Lutomirski.endm
69ca37e57bSAndy Lutomirski
70905a36a2SIngo Molnar/*
71905a36a2SIngo Molnar * When dynamic function tracer is enabled it will add a breakpoint
72905a36a2SIngo Molnar * to all locations that it is about to modify, sync CPUs, update
73905a36a2SIngo Molnar * all the code, sync CPUs, then remove the breakpoints. In this time
74905a36a2SIngo Molnar * if lockdep is enabled, it might jump back into the debug handler
75905a36a2SIngo Molnar * outside the updating of the IST protection. (TRACE_IRQS_ON/OFF).
76905a36a2SIngo Molnar *
77905a36a2SIngo Molnar * We need to change the IDT table before calling TRACE_IRQS_ON/OFF to
78905a36a2SIngo Molnar * make sure the stack pointer does not get reset back to the top
79905a36a2SIngo Molnar * of the debug stack, and instead just reuses the current stack.
80905a36a2SIngo Molnar */
81905a36a2SIngo Molnar#if defined(CONFIG_DYNAMIC_FTRACE) && defined(CONFIG_TRACE_IRQFLAGS)
82905a36a2SIngo Molnar
83905a36a2SIngo Molnar.macro TRACE_IRQS_OFF_DEBUG
84905a36a2SIngo Molnar	call	debug_stack_set_zero
85905a36a2SIngo Molnar	TRACE_IRQS_OFF
86905a36a2SIngo Molnar	call	debug_stack_reset
87905a36a2SIngo Molnar.endm
88905a36a2SIngo Molnar
89905a36a2SIngo Molnar.macro TRACE_IRQS_ON_DEBUG
90905a36a2SIngo Molnar	call	debug_stack_set_zero
91905a36a2SIngo Molnar	TRACE_IRQS_ON
92905a36a2SIngo Molnar	call	debug_stack_reset
93905a36a2SIngo Molnar.endm
94905a36a2SIngo Molnar
95905a36a2SIngo Molnar.macro TRACE_IRQS_IRETQ_DEBUG
966709812fSJan Beulich	btl	$9, EFLAGS(%rsp)		/* interrupts off? */
97905a36a2SIngo Molnar	jnc	1f
98905a36a2SIngo Molnar	TRACE_IRQS_ON_DEBUG
99905a36a2SIngo Molnar1:
100905a36a2SIngo Molnar.endm
101905a36a2SIngo Molnar
102905a36a2SIngo Molnar#else
103905a36a2SIngo Molnar# define TRACE_IRQS_OFF_DEBUG			TRACE_IRQS_OFF
104905a36a2SIngo Molnar# define TRACE_IRQS_ON_DEBUG			TRACE_IRQS_ON
105905a36a2SIngo Molnar# define TRACE_IRQS_IRETQ_DEBUG			TRACE_IRQS_IRETQ
106905a36a2SIngo Molnar#endif
107905a36a2SIngo Molnar
108905a36a2SIngo Molnar/*
1094d732138SIngo Molnar * 64-bit SYSCALL instruction entry. Up to 6 arguments in registers.
110905a36a2SIngo Molnar *
111fda57b22SAndy Lutomirski * This is the only entry point used for 64-bit system calls.  The
112fda57b22SAndy Lutomirski * hardware interface is reasonably well designed and the register to
113fda57b22SAndy Lutomirski * argument mapping Linux uses fits well with the registers that are
114fda57b22SAndy Lutomirski * available when SYSCALL is used.
115fda57b22SAndy Lutomirski *
116fda57b22SAndy Lutomirski * SYSCALL instructions can be found inlined in libc implementations as
117fda57b22SAndy Lutomirski * well as some other programs and libraries.  There are also a handful
118fda57b22SAndy Lutomirski * of SYSCALL instructions in the vDSO used, for example, as a
119fda57b22SAndy Lutomirski * clock_gettimeofday fallback.
120fda57b22SAndy Lutomirski *
1214d732138SIngo Molnar * 64-bit SYSCALL saves rip to rcx, clears rflags.RF, then saves rflags to r11,
122905a36a2SIngo Molnar * then loads new ss, cs, and rip from previously programmed MSRs.
123905a36a2SIngo Molnar * rflags gets masked by a value from another MSR (so CLD and CLAC
124905a36a2SIngo Molnar * are not needed). SYSCALL does not save anything on the stack
125905a36a2SIngo Molnar * and does not change rsp.
126905a36a2SIngo Molnar *
127905a36a2SIngo Molnar * Registers on entry:
128905a36a2SIngo Molnar * rax  system call number
129905a36a2SIngo Molnar * rcx  return address
130905a36a2SIngo Molnar * r11  saved rflags (note: r11 is callee-clobbered register in C ABI)
131905a36a2SIngo Molnar * rdi  arg0
132905a36a2SIngo Molnar * rsi  arg1
133905a36a2SIngo Molnar * rdx  arg2
134905a36a2SIngo Molnar * r10  arg3 (needs to be moved to rcx to conform to C ABI)
135905a36a2SIngo Molnar * r8   arg4
136905a36a2SIngo Molnar * r9   arg5
137905a36a2SIngo Molnar * (note: r12-r15, rbp, rbx are callee-preserved in C ABI)
138905a36a2SIngo Molnar *
139905a36a2SIngo Molnar * Only called from user space.
140905a36a2SIngo Molnar *
141905a36a2SIngo Molnar * When user can change pt_regs->foo always force IRET. That is because
142905a36a2SIngo Molnar * it deals with uncanonical addresses better. SYSRET has trouble
143905a36a2SIngo Molnar * with them due to bugs in both AMD and Intel CPUs.
144905a36a2SIngo Molnar */
145905a36a2SIngo Molnar
146bc7b11c0SJiri SlabySYM_CODE_START(entry_SYSCALL_64)
1478c1f7558SJosh Poimboeuf	UNWIND_HINT_EMPTY
148905a36a2SIngo Molnar	/*
149905a36a2SIngo Molnar	 * Interrupts are off on entry.
150905a36a2SIngo Molnar	 * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON,
151905a36a2SIngo Molnar	 * it is too small to ever cause noticeable irq latency.
152905a36a2SIngo Molnar	 */
153905a36a2SIngo Molnar
1548a9949bcSAndy Lutomirski	swapgs
155bf904d27SAndy Lutomirski	/* tss.sp2 is scratch space. */
15698f05b51SAndy Lutomirski	movq	%rsp, PER_CPU_VAR(cpu_tss_rw + TSS_sp2)
157bf904d27SAndy Lutomirski	SWITCH_TO_KERNEL_CR3 scratch_reg=%rsp
158905a36a2SIngo Molnar	movq	PER_CPU_VAR(cpu_current_top_of_stack), %rsp
159905a36a2SIngo Molnar
160905a36a2SIngo Molnar	/* Construct struct pt_regs on stack */
161905a36a2SIngo Molnar	pushq	$__USER_DS				/* pt_regs->ss */
16298f05b51SAndy Lutomirski	pushq	PER_CPU_VAR(cpu_tss_rw + TSS_sp2)	/* pt_regs->sp */
163905a36a2SIngo Molnar	pushq	%r11					/* pt_regs->flags */
164905a36a2SIngo Molnar	pushq	$__USER_CS				/* pt_regs->cs */
165905a36a2SIngo Molnar	pushq	%rcx					/* pt_regs->ip */
16626ba4e57SJiri SlabySYM_INNER_LABEL(entry_SYSCALL_64_after_hwframe, SYM_L_GLOBAL)
167905a36a2SIngo Molnar	pushq	%rax					/* pt_regs->orig_ax */
16830907fd1SDominik Brodowski
16930907fd1SDominik Brodowski	PUSH_AND_CLEAR_REGS rax=$-ENOSYS
170905a36a2SIngo Molnar
1711e423bffSAndy Lutomirski	/* IRQs are off. */
172dfe64506SLinus Torvalds	movq	%rax, %rdi
173dfe64506SLinus Torvalds	movq	%rsp, %rsi
1741e423bffSAndy Lutomirski	call	do_syscall_64		/* returns with IRQs disabled */
1751e423bffSAndy Lutomirski
176905a36a2SIngo Molnar	/*
177905a36a2SIngo Molnar	 * Try to use SYSRET instead of IRET if we're returning to
1788a055d7fSAndy Lutomirski	 * a completely clean 64-bit userspace context.  If we're not,
1798a055d7fSAndy Lutomirski	 * go to the slow exit path.
180905a36a2SIngo Molnar	 */
181905a36a2SIngo Molnar	movq	RCX(%rsp), %rcx
182905a36a2SIngo Molnar	movq	RIP(%rsp), %r11
1838a055d7fSAndy Lutomirski
1848a055d7fSAndy Lutomirski	cmpq	%rcx, %r11	/* SYSRET requires RCX == RIP */
1858a055d7fSAndy Lutomirski	jne	swapgs_restore_regs_and_return_to_usermode
186905a36a2SIngo Molnar
187905a36a2SIngo Molnar	/*
188905a36a2SIngo Molnar	 * On Intel CPUs, SYSRET with non-canonical RCX/RIP will #GP
189905a36a2SIngo Molnar	 * in kernel space.  This essentially lets the user take over
190905a36a2SIngo Molnar	 * the kernel, since userspace controls RSP.
191905a36a2SIngo Molnar	 *
192905a36a2SIngo Molnar	 * If width of "canonical tail" ever becomes variable, this will need
193905a36a2SIngo Molnar	 * to be updated to remain correct on both old and new CPUs.
194361b4b58SKirill A. Shutemov	 *
195cbe0317bSKirill A. Shutemov	 * Change top bits to match most significant bit (47th or 56th bit
196cbe0317bSKirill A. Shutemov	 * depending on paging mode) in the address.
197905a36a2SIngo Molnar	 */
19809e61a77SKirill A. Shutemov#ifdef CONFIG_X86_5LEVEL
19939b95522SKirill A. Shutemov	ALTERNATIVE "shl $(64 - 48), %rcx; sar $(64 - 48), %rcx", \
20039b95522SKirill A. Shutemov		"shl $(64 - 57), %rcx; sar $(64 - 57), %rcx", X86_FEATURE_LA57
20109e61a77SKirill A. Shutemov#else
202905a36a2SIngo Molnar	shl	$(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx
203905a36a2SIngo Molnar	sar	$(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx
20409e61a77SKirill A. Shutemov#endif
2054d732138SIngo Molnar
206905a36a2SIngo Molnar	/* If this changed %rcx, it was not canonical */
207905a36a2SIngo Molnar	cmpq	%rcx, %r11
2088a055d7fSAndy Lutomirski	jne	swapgs_restore_regs_and_return_to_usermode
209905a36a2SIngo Molnar
210905a36a2SIngo Molnar	cmpq	$__USER_CS, CS(%rsp)		/* CS must match SYSRET */
2118a055d7fSAndy Lutomirski	jne	swapgs_restore_regs_and_return_to_usermode
212905a36a2SIngo Molnar
213905a36a2SIngo Molnar	movq	R11(%rsp), %r11
214905a36a2SIngo Molnar	cmpq	%r11, EFLAGS(%rsp)		/* R11 == RFLAGS */
2158a055d7fSAndy Lutomirski	jne	swapgs_restore_regs_and_return_to_usermode
216905a36a2SIngo Molnar
217905a36a2SIngo Molnar	/*
2183e035305SBorislav Petkov	 * SYSCALL clears RF when it saves RFLAGS in R11 and SYSRET cannot
2193e035305SBorislav Petkov	 * restore RF properly. If the slowpath sets it for whatever reason, we
2203e035305SBorislav Petkov	 * need to restore it correctly.
2213e035305SBorislav Petkov	 *
2223e035305SBorislav Petkov	 * SYSRET can restore TF, but unlike IRET, restoring TF results in a
2233e035305SBorislav Petkov	 * trap from userspace immediately after SYSRET.  This would cause an
2243e035305SBorislav Petkov	 * infinite loop whenever #DB happens with register state that satisfies
2253e035305SBorislav Petkov	 * the opportunistic SYSRET conditions.  For example, single-stepping
2263e035305SBorislav Petkov	 * this user code:
227905a36a2SIngo Molnar	 *
228905a36a2SIngo Molnar	 *           movq	$stuck_here, %rcx
229905a36a2SIngo Molnar	 *           pushfq
230905a36a2SIngo Molnar	 *           popq %r11
231905a36a2SIngo Molnar	 *   stuck_here:
232905a36a2SIngo Molnar	 *
233905a36a2SIngo Molnar	 * would never get past 'stuck_here'.
234905a36a2SIngo Molnar	 */
235905a36a2SIngo Molnar	testq	$(X86_EFLAGS_RF|X86_EFLAGS_TF), %r11
2368a055d7fSAndy Lutomirski	jnz	swapgs_restore_regs_and_return_to_usermode
237905a36a2SIngo Molnar
238905a36a2SIngo Molnar	/* nothing to check for RSP */
239905a36a2SIngo Molnar
240905a36a2SIngo Molnar	cmpq	$__USER_DS, SS(%rsp)		/* SS must match SYSRET */
2418a055d7fSAndy Lutomirski	jne	swapgs_restore_regs_and_return_to_usermode
242905a36a2SIngo Molnar
243905a36a2SIngo Molnar	/*
244905a36a2SIngo Molnar	 * We win! This label is here just for ease of understanding
245905a36a2SIngo Molnar	 * perf profiles. Nothing jumps here.
246905a36a2SIngo Molnar	 */
247905a36a2SIngo Molnarsyscall_return_via_sysret:
248905a36a2SIngo Molnar	/* rcx and r11 are already restored (see code above) */
249502af0d7SDominik Brodowski	POP_REGS pop_rdi=0 skip_r11rcx=1
2503e3b9293SAndy Lutomirski
2513e3b9293SAndy Lutomirski	/*
2523e3b9293SAndy Lutomirski	 * Now all regs are restored except RSP and RDI.
2533e3b9293SAndy Lutomirski	 * Save old stack pointer and switch to trampoline stack.
2543e3b9293SAndy Lutomirski	 */
2553e3b9293SAndy Lutomirski	movq	%rsp, %rdi
256c482feefSAndy Lutomirski	movq	PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp
2571fb14363SJosh Poimboeuf	UNWIND_HINT_EMPTY
2583e3b9293SAndy Lutomirski
2593e3b9293SAndy Lutomirski	pushq	RSP-RDI(%rdi)	/* RSP */
2603e3b9293SAndy Lutomirski	pushq	(%rdi)		/* RDI */
2613e3b9293SAndy Lutomirski
2623e3b9293SAndy Lutomirski	/*
2633e3b9293SAndy Lutomirski	 * We are on the trampoline stack.  All regs except RDI are live.
2643e3b9293SAndy Lutomirski	 * We can do future final exit work right here.
2653e3b9293SAndy Lutomirski	 */
266afaef01cSAlexander Popov	STACKLEAK_ERASE_NOCLOBBER
267afaef01cSAlexander Popov
2686fd166aaSPeter Zijlstra	SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi
2693e3b9293SAndy Lutomirski
2704fbb3910SAndy Lutomirski	popq	%rdi
2713e3b9293SAndy Lutomirski	popq	%rsp
272905a36a2SIngo Molnar	USERGS_SYSRET64
273bc7b11c0SJiri SlabySYM_CODE_END(entry_SYSCALL_64)
274905a36a2SIngo Molnar
275905a36a2SIngo Molnar/*
2760100301bSBrian Gerst * %rdi: prev task
2770100301bSBrian Gerst * %rsi: next task
2780100301bSBrian Gerst */
279b9f6976bSThomas Gleixner.pushsection .text, "ax"
28096c64806SJosh PoimboeufSYM_FUNC_START(__switch_to_asm)
2810100301bSBrian Gerst	/*
2820100301bSBrian Gerst	 * Save callee-saved registers
2830100301bSBrian Gerst	 * This must match the order in inactive_task_frame
2840100301bSBrian Gerst	 */
2850100301bSBrian Gerst	pushq	%rbp
2860100301bSBrian Gerst	pushq	%rbx
2870100301bSBrian Gerst	pushq	%r12
2880100301bSBrian Gerst	pushq	%r13
2890100301bSBrian Gerst	pushq	%r14
2900100301bSBrian Gerst	pushq	%r15
2910100301bSBrian Gerst
2920100301bSBrian Gerst	/* switch stack */
2930100301bSBrian Gerst	movq	%rsp, TASK_threadsp(%rdi)
2940100301bSBrian Gerst	movq	TASK_threadsp(%rsi), %rsp
2950100301bSBrian Gerst
296050e9baaSLinus Torvalds#ifdef CONFIG_STACKPROTECTOR
2970100301bSBrian Gerst	movq	TASK_stack_canary(%rsi), %rbx
298e6401c13SAndy Lutomirski	movq	%rbx, PER_CPU_VAR(fixed_percpu_data) + stack_canary_offset
2990100301bSBrian Gerst#endif
3000100301bSBrian Gerst
301c995efd5SDavid Woodhouse#ifdef CONFIG_RETPOLINE
302c995efd5SDavid Woodhouse	/*
303c995efd5SDavid Woodhouse	 * When switching from a shallower to a deeper call stack
304c995efd5SDavid Woodhouse	 * the RSB may either underflow or use entries populated
305c995efd5SDavid Woodhouse	 * with userspace addresses. On CPUs where those concerns
306c995efd5SDavid Woodhouse	 * exist, overwrite the RSB with entries which capture
307c995efd5SDavid Woodhouse	 * speculative execution to prevent attack.
308c995efd5SDavid Woodhouse	 */
309d1c99108SDavid Woodhouse	FILL_RETURN_BUFFER %r12, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
310c995efd5SDavid Woodhouse#endif
311c995efd5SDavid Woodhouse
3120100301bSBrian Gerst	/* restore callee-saved registers */
3130100301bSBrian Gerst	popq	%r15
3140100301bSBrian Gerst	popq	%r14
3150100301bSBrian Gerst	popq	%r13
3160100301bSBrian Gerst	popq	%r12
3170100301bSBrian Gerst	popq	%rbx
3180100301bSBrian Gerst	popq	%rbp
3190100301bSBrian Gerst
3200100301bSBrian Gerst	jmp	__switch_to
32196c64806SJosh PoimboeufSYM_FUNC_END(__switch_to_asm)
322b9f6976bSThomas Gleixner.popsection
3230100301bSBrian Gerst
3240100301bSBrian Gerst/*
325905a36a2SIngo Molnar * A newly forked process directly context switches into this address.
326905a36a2SIngo Molnar *
3270100301bSBrian Gerst * rax: prev task we switched from
328616d2483SBrian Gerst * rbx: kernel thread func (NULL for user thread)
329616d2483SBrian Gerst * r12: kernel thread arg
330905a36a2SIngo Molnar */
331b9f6976bSThomas Gleixner.pushsection .text, "ax"
332bc7b11c0SJiri SlabySYM_CODE_START(ret_from_fork)
3338c1f7558SJosh Poimboeuf	UNWIND_HINT_EMPTY
3340100301bSBrian Gerst	movq	%rax, %rdi
3354d732138SIngo Molnar	call	schedule_tail			/* rdi: 'prev' task parameter */
336905a36a2SIngo Molnar
337616d2483SBrian Gerst	testq	%rbx, %rbx			/* from kernel_thread? */
338616d2483SBrian Gerst	jnz	1f				/* kernel threads are uncommon */
339905a36a2SIngo Molnar
340616d2483SBrian Gerst2:
3418c1f7558SJosh Poimboeuf	UNWIND_HINT_REGS
342ebd57499SJosh Poimboeuf	movq	%rsp, %rdi
34324d978b7SAndy Lutomirski	call	syscall_return_slowpath	/* returns with IRQs disabled */
3448a055d7fSAndy Lutomirski	jmp	swapgs_restore_regs_and_return_to_usermode
345616d2483SBrian Gerst
346616d2483SBrian Gerst1:
347616d2483SBrian Gerst	/* kernel thread */
348d31a5802SJosh Poimboeuf	UNWIND_HINT_EMPTY
349616d2483SBrian Gerst	movq	%r12, %rdi
35034fdce69SPeter Zijlstra	CALL_NOSPEC rbx
351616d2483SBrian Gerst	/*
352616d2483SBrian Gerst	 * A kernel thread is allowed to return here after successfully
353616d2483SBrian Gerst	 * calling do_execve().  Exit to userspace to complete the execve()
354616d2483SBrian Gerst	 * syscall.
355616d2483SBrian Gerst	 */
356616d2483SBrian Gerst	movq	$0, RAX(%rsp)
357616d2483SBrian Gerst	jmp	2b
358bc7b11c0SJiri SlabySYM_CODE_END(ret_from_fork)
359b9f6976bSThomas Gleixner.popsection
360905a36a2SIngo Molnar
361905a36a2SIngo Molnar/*
362905a36a2SIngo Molnar * Build the entry stubs with some assembler magic.
363905a36a2SIngo Molnar * We pack 1 stub into every 8-byte block.
364905a36a2SIngo Molnar */
365905a36a2SIngo Molnar	.align 8
366bc7b11c0SJiri SlabySYM_CODE_START(irq_entries_start)
367905a36a2SIngo Molnar    vector=FIRST_EXTERNAL_VECTOR
368905a36a2SIngo Molnar    .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR)
3698c1f7558SJosh Poimboeuf	UNWIND_HINT_IRET_REGS
370905a36a2SIngo Molnar	pushq	$(~vector+0x80)			/* Note: always in signed byte range */
371905a36a2SIngo Molnar	jmp	common_interrupt
372905a36a2SIngo Molnar	.align	8
3738c1f7558SJosh Poimboeuf	vector=vector+1
374905a36a2SIngo Molnar    .endr
375bc7b11c0SJiri SlabySYM_CODE_END(irq_entries_start)
376905a36a2SIngo Molnar
377f8a8fe61SThomas Gleixner	.align 8
378bc7b11c0SJiri SlabySYM_CODE_START(spurious_entries_start)
379f8a8fe61SThomas Gleixner    vector=FIRST_SYSTEM_VECTOR
380f8a8fe61SThomas Gleixner    .rept (NR_VECTORS - FIRST_SYSTEM_VECTOR)
381f8a8fe61SThomas Gleixner	UNWIND_HINT_IRET_REGS
382f8a8fe61SThomas Gleixner	pushq	$(~vector+0x80)			/* Note: always in signed byte range */
383f8a8fe61SThomas Gleixner	jmp	common_spurious
384f8a8fe61SThomas Gleixner	.align	8
385f8a8fe61SThomas Gleixner	vector=vector+1
386f8a8fe61SThomas Gleixner    .endr
387bc7b11c0SJiri SlabySYM_CODE_END(spurious_entries_start)
388f8a8fe61SThomas Gleixner
3891d3e53e8SAndy Lutomirski.macro DEBUG_ENTRY_ASSERT_IRQS_OFF
3901d3e53e8SAndy Lutomirski#ifdef CONFIG_DEBUG_ENTRY
391e17f8234SBoris Ostrovsky	pushq %rax
392e17f8234SBoris Ostrovsky	SAVE_FLAGS(CLBR_RAX)
393e17f8234SBoris Ostrovsky	testl $X86_EFLAGS_IF, %eax
3941d3e53e8SAndy Lutomirski	jz .Lokay_\@
3951d3e53e8SAndy Lutomirski	ud2
3961d3e53e8SAndy Lutomirski.Lokay_\@:
397e17f8234SBoris Ostrovsky	popq %rax
3981d3e53e8SAndy Lutomirski#endif
3991d3e53e8SAndy Lutomirski.endm
4001d3e53e8SAndy Lutomirski
4011d3e53e8SAndy Lutomirski/*
4021d3e53e8SAndy Lutomirski * Enters the IRQ stack if we're not already using it.  NMI-safe.  Clobbers
4031d3e53e8SAndy Lutomirski * flags and puts old RSP into old_rsp, and leaves all other GPRs alone.
4041d3e53e8SAndy Lutomirski * Requires kernel GSBASE.
4051d3e53e8SAndy Lutomirski *
4061d3e53e8SAndy Lutomirski * The invariant is that, if irq_count != -1, then the IRQ stack is in use.
4071d3e53e8SAndy Lutomirski */
4082ba64741SDominik Brodowski.macro ENTER_IRQ_STACK regs=1 old_rsp save_ret=0
4091d3e53e8SAndy Lutomirski	DEBUG_ENTRY_ASSERT_IRQS_OFF
4102ba64741SDominik Brodowski
4112ba64741SDominik Brodowski	.if \save_ret
4122ba64741SDominik Brodowski	/*
4132ba64741SDominik Brodowski	 * If save_ret is set, the original stack contains one additional
4142ba64741SDominik Brodowski	 * entry -- the return address. Therefore, move the address one
4152ba64741SDominik Brodowski	 * entry below %rsp to \old_rsp.
4162ba64741SDominik Brodowski	 */
4172ba64741SDominik Brodowski	leaq	8(%rsp), \old_rsp
4182ba64741SDominik Brodowski	.else
4191d3e53e8SAndy Lutomirski	movq	%rsp, \old_rsp
4202ba64741SDominik Brodowski	.endif
4218c1f7558SJosh Poimboeuf
4228c1f7558SJosh Poimboeuf	.if \regs
4238c1f7558SJosh Poimboeuf	UNWIND_HINT_REGS base=\old_rsp
4248c1f7558SJosh Poimboeuf	.endif
4258c1f7558SJosh Poimboeuf
4261d3e53e8SAndy Lutomirski	incl	PER_CPU_VAR(irq_count)
42729955909SAndy Lutomirski	jnz	.Lirq_stack_push_old_rsp_\@
4281d3e53e8SAndy Lutomirski
4291d3e53e8SAndy Lutomirski	/*
4301d3e53e8SAndy Lutomirski	 * Right now, if we just incremented irq_count to zero, we've
4311d3e53e8SAndy Lutomirski	 * claimed the IRQ stack but we haven't switched to it yet.
4321d3e53e8SAndy Lutomirski	 *
4331d3e53e8SAndy Lutomirski	 * If anything is added that can interrupt us here without using IST,
4341d3e53e8SAndy Lutomirski	 * it must be *extremely* careful to limit its stack usage.  This
4351d3e53e8SAndy Lutomirski	 * could include kprobes and a hypothetical future IST-less #DB
4361d3e53e8SAndy Lutomirski	 * handler.
43729955909SAndy Lutomirski	 *
43829955909SAndy Lutomirski	 * The OOPS unwinder relies on the word at the top of the IRQ
43929955909SAndy Lutomirski	 * stack linking back to the previous RSP for the entire time we're
44029955909SAndy Lutomirski	 * on the IRQ stack.  For this to work reliably, we need to write
44129955909SAndy Lutomirski	 * it before we actually move ourselves to the IRQ stack.
4421d3e53e8SAndy Lutomirski	 */
4431d3e53e8SAndy Lutomirski
444e6401c13SAndy Lutomirski	movq	\old_rsp, PER_CPU_VAR(irq_stack_backing_store + IRQ_STACK_SIZE - 8)
445758a2e31SThomas Gleixner	movq	PER_CPU_VAR(hardirq_stack_ptr), %rsp
44629955909SAndy Lutomirski
44729955909SAndy Lutomirski#ifdef CONFIG_DEBUG_ENTRY
44829955909SAndy Lutomirski	/*
44929955909SAndy Lutomirski	 * If the first movq above becomes wrong due to IRQ stack layout
45029955909SAndy Lutomirski	 * changes, the only way we'll notice is if we try to unwind right
45129955909SAndy Lutomirski	 * here.  Assert that we set up the stack right to catch this type
45229955909SAndy Lutomirski	 * of bug quickly.
45329955909SAndy Lutomirski	 */
45429955909SAndy Lutomirski	cmpq	-8(%rsp), \old_rsp
45529955909SAndy Lutomirski	je	.Lirq_stack_okay\@
45629955909SAndy Lutomirski	ud2
45729955909SAndy Lutomirski	.Lirq_stack_okay\@:
45829955909SAndy Lutomirski#endif
45929955909SAndy Lutomirski
46029955909SAndy Lutomirski.Lirq_stack_push_old_rsp_\@:
4611d3e53e8SAndy Lutomirski	pushq	\old_rsp
4628c1f7558SJosh Poimboeuf
4638c1f7558SJosh Poimboeuf	.if \regs
4648c1f7558SJosh Poimboeuf	UNWIND_HINT_REGS indirect=1
4658c1f7558SJosh Poimboeuf	.endif
4662ba64741SDominik Brodowski
4672ba64741SDominik Brodowski	.if \save_ret
4682ba64741SDominik Brodowski	/*
4692ba64741SDominik Brodowski	 * Push the return address to the stack. This return address can
4702ba64741SDominik Brodowski	 * be found at the "real" original RSP, which was offset by 8 at
4712ba64741SDominik Brodowski	 * the beginning of this macro.
4722ba64741SDominik Brodowski	 */
4732ba64741SDominik Brodowski	pushq	-8(\old_rsp)
4742ba64741SDominik Brodowski	.endif
4751d3e53e8SAndy Lutomirski.endm
4761d3e53e8SAndy Lutomirski
4771d3e53e8SAndy Lutomirski/*
4781d3e53e8SAndy Lutomirski * Undoes ENTER_IRQ_STACK.
4791d3e53e8SAndy Lutomirski */
4808c1f7558SJosh Poimboeuf.macro LEAVE_IRQ_STACK regs=1
4811d3e53e8SAndy Lutomirski	DEBUG_ENTRY_ASSERT_IRQS_OFF
4821d3e53e8SAndy Lutomirski	/* We need to be off the IRQ stack before decrementing irq_count. */
4831d3e53e8SAndy Lutomirski	popq	%rsp
4841d3e53e8SAndy Lutomirski
4858c1f7558SJosh Poimboeuf	.if \regs
4868c1f7558SJosh Poimboeuf	UNWIND_HINT_REGS
4878c1f7558SJosh Poimboeuf	.endif
4888c1f7558SJosh Poimboeuf
4891d3e53e8SAndy Lutomirski	/*
4901d3e53e8SAndy Lutomirski	 * As in ENTER_IRQ_STACK, irq_count == 0, we are still claiming
4911d3e53e8SAndy Lutomirski	 * the irq stack but we're not on it.
4921d3e53e8SAndy Lutomirski	 */
4931d3e53e8SAndy Lutomirski
4941d3e53e8SAndy Lutomirski	decl	PER_CPU_VAR(irq_count)
4951d3e53e8SAndy Lutomirski.endm
4961d3e53e8SAndy Lutomirski
497cfa82a00SThomas Gleixner/**
498cfa82a00SThomas Gleixner * idtentry_body - Macro to emit code calling the C function
499cfa82a00SThomas Gleixner * @cfunc:		C function to be called
500cfa82a00SThomas Gleixner * @has_error_code:	Hardware pushed error code on stack
501cfa82a00SThomas Gleixner */
502e2dcb5f1SThomas Gleixner.macro idtentry_body cfunc has_error_code:req
503cfa82a00SThomas Gleixner
504cfa82a00SThomas Gleixner	call	error_entry
505cfa82a00SThomas Gleixner	UNWIND_HINT_REGS
506cfa82a00SThomas Gleixner
507cfa82a00SThomas Gleixner	movq	%rsp, %rdi			/* pt_regs pointer into 1st argument*/
508cfa82a00SThomas Gleixner
509cfa82a00SThomas Gleixner	.if \has_error_code == 1
510cfa82a00SThomas Gleixner		movq	ORIG_RAX(%rsp), %rsi	/* get error code into 2nd argument*/
511cfa82a00SThomas Gleixner		movq	$-1, ORIG_RAX(%rsp)	/* no syscall to restart */
512cfa82a00SThomas Gleixner	.endif
513cfa82a00SThomas Gleixner
514cfa82a00SThomas Gleixner	call	\cfunc
515cfa82a00SThomas Gleixner
516424c7d0aSThomas Gleixner	jmp	error_return
517cfa82a00SThomas Gleixner.endm
518cfa82a00SThomas Gleixner
519cfa82a00SThomas Gleixner/**
520cfa82a00SThomas Gleixner * idtentry - Macro to generate entry stubs for simple IDT entries
521cfa82a00SThomas Gleixner * @vector:		Vector number
522cfa82a00SThomas Gleixner * @asmsym:		ASM symbol for the entry point
523cfa82a00SThomas Gleixner * @cfunc:		C function to be called
524cfa82a00SThomas Gleixner * @has_error_code:	Hardware pushed error code on stack
525cfa82a00SThomas Gleixner *
526cfa82a00SThomas Gleixner * The macro emits code to set up the kernel context for straight forward
527cfa82a00SThomas Gleixner * and simple IDT entries. No IST stack, no paranoid entry checks.
528cfa82a00SThomas Gleixner */
529e2dcb5f1SThomas Gleixner.macro idtentry vector asmsym cfunc has_error_code:req
530cfa82a00SThomas GleixnerSYM_CODE_START(\asmsym)
531cfa82a00SThomas Gleixner	UNWIND_HINT_IRET_REGS offset=\has_error_code*8
532cfa82a00SThomas Gleixner	ASM_CLAC
533cfa82a00SThomas Gleixner
534cfa82a00SThomas Gleixner	.if \has_error_code == 0
535cfa82a00SThomas Gleixner		pushq	$-1			/* ORIG_RAX: no syscall to restart */
536cfa82a00SThomas Gleixner	.endif
537cfa82a00SThomas Gleixner
538cfa82a00SThomas Gleixner	.if \vector == X86_TRAP_BP
539cfa82a00SThomas Gleixner		/*
540cfa82a00SThomas Gleixner		 * If coming from kernel space, create a 6-word gap to allow the
541cfa82a00SThomas Gleixner		 * int3 handler to emulate a call instruction.
542cfa82a00SThomas Gleixner		 */
543cfa82a00SThomas Gleixner		testb	$3, CS-ORIG_RAX(%rsp)
544cfa82a00SThomas Gleixner		jnz	.Lfrom_usermode_no_gap_\@
545cfa82a00SThomas Gleixner		.rept	6
546cfa82a00SThomas Gleixner		pushq	5*8(%rsp)
547cfa82a00SThomas Gleixner		.endr
548cfa82a00SThomas Gleixner		UNWIND_HINT_IRET_REGS offset=8
549cfa82a00SThomas Gleixner.Lfrom_usermode_no_gap_\@:
550cfa82a00SThomas Gleixner	.endif
551cfa82a00SThomas Gleixner
552e2dcb5f1SThomas Gleixner	idtentry_body \cfunc \has_error_code
553cfa82a00SThomas Gleixner
554cfa82a00SThomas Gleixner_ASM_NOKPROBE(\asmsym)
555cfa82a00SThomas GleixnerSYM_CODE_END(\asmsym)
556cfa82a00SThomas Gleixner.endm
557cfa82a00SThomas Gleixner
558cfa82a00SThomas Gleixner/*
559cfa82a00SThomas Gleixner * MCE and DB exceptions
560cfa82a00SThomas Gleixner */
561cfa82a00SThomas Gleixner#define CPU_TSS_IST(x) PER_CPU_VAR(cpu_tss_rw) + (TSS_ist + (x) * 8)
562cfa82a00SThomas Gleixner
563cfa82a00SThomas Gleixner/**
564cfa82a00SThomas Gleixner * idtentry_mce_db - Macro to generate entry stubs for #MC and #DB
565cfa82a00SThomas Gleixner * @vector:		Vector number
566cfa82a00SThomas Gleixner * @asmsym:		ASM symbol for the entry point
567cfa82a00SThomas Gleixner * @cfunc:		C function to be called
568cfa82a00SThomas Gleixner *
569cfa82a00SThomas Gleixner * The macro emits code to set up the kernel context for #MC and #DB
570cfa82a00SThomas Gleixner *
571cfa82a00SThomas Gleixner * If the entry comes from user space it uses the normal entry path
572cfa82a00SThomas Gleixner * including the return to user space work and preemption checks on
573cfa82a00SThomas Gleixner * exit.
574cfa82a00SThomas Gleixner *
575cfa82a00SThomas Gleixner * If hits in kernel mode then it needs to go through the paranoid
576cfa82a00SThomas Gleixner * entry as the exception can hit any random state. No preemption
577cfa82a00SThomas Gleixner * check on exit to keep the paranoid path simple.
578cfa82a00SThomas Gleixner *
579cfa82a00SThomas Gleixner * If the trap is #DB then the interrupt stack entry in the IST is
580cfa82a00SThomas Gleixner * moved to the second stack, so a potential recursion will have a
581cfa82a00SThomas Gleixner * fresh IST.
582cfa82a00SThomas Gleixner */
583cfa82a00SThomas Gleixner.macro idtentry_mce_db vector asmsym cfunc
584cfa82a00SThomas GleixnerSYM_CODE_START(\asmsym)
585cfa82a00SThomas Gleixner	UNWIND_HINT_IRET_REGS
586cfa82a00SThomas Gleixner	ASM_CLAC
587cfa82a00SThomas Gleixner
588cfa82a00SThomas Gleixner	pushq	$-1			/* ORIG_RAX: no syscall to restart */
589cfa82a00SThomas Gleixner
590cfa82a00SThomas Gleixner	/*
591cfa82a00SThomas Gleixner	 * If the entry is from userspace, switch stacks and treat it as
592cfa82a00SThomas Gleixner	 * a normal entry.
593cfa82a00SThomas Gleixner	 */
594cfa82a00SThomas Gleixner	testb	$3, CS-ORIG_RAX(%rsp)
595cfa82a00SThomas Gleixner	jnz	.Lfrom_usermode_switch_stack_\@
596cfa82a00SThomas Gleixner
597cfa82a00SThomas Gleixner	/*
598cfa82a00SThomas Gleixner	 * paranoid_entry returns SWAPGS flag for paranoid_exit in EBX.
599cfa82a00SThomas Gleixner	 * EBX == 0 -> SWAPGS, EBX == 1 -> no SWAPGS
600cfa82a00SThomas Gleixner	 */
601cfa82a00SThomas Gleixner	call	paranoid_entry
602cfa82a00SThomas Gleixner
603cfa82a00SThomas Gleixner	UNWIND_HINT_REGS
604cfa82a00SThomas Gleixner
605cfa82a00SThomas Gleixner	.if \vector == X86_TRAP_DB
606cfa82a00SThomas Gleixner		TRACE_IRQS_OFF_DEBUG
607cfa82a00SThomas Gleixner	.else
608cfa82a00SThomas Gleixner		TRACE_IRQS_OFF
609cfa82a00SThomas Gleixner	.endif
610cfa82a00SThomas Gleixner
611cfa82a00SThomas Gleixner	movq	%rsp, %rdi		/* pt_regs pointer */
612cfa82a00SThomas Gleixner
613cfa82a00SThomas Gleixner	.if \vector == X86_TRAP_DB
614cfa82a00SThomas Gleixner		subq	$DB_STACK_OFFSET, CPU_TSS_IST(IST_INDEX_DB)
615cfa82a00SThomas Gleixner	.endif
616cfa82a00SThomas Gleixner
617cfa82a00SThomas Gleixner	call	\cfunc
618cfa82a00SThomas Gleixner
619cfa82a00SThomas Gleixner	.if \vector == X86_TRAP_DB
620cfa82a00SThomas Gleixner		addq	$DB_STACK_OFFSET, CPU_TSS_IST(IST_INDEX_DB)
621cfa82a00SThomas Gleixner	.endif
622cfa82a00SThomas Gleixner
623cfa82a00SThomas Gleixner	jmp	paranoid_exit
624cfa82a00SThomas Gleixner
625cfa82a00SThomas Gleixner	/* Switch to the regular task stack and use the noist entry point */
626cfa82a00SThomas Gleixner.Lfrom_usermode_switch_stack_\@:
627e2dcb5f1SThomas Gleixner	idtentry_body noist_\cfunc, has_error_code=0
628cfa82a00SThomas Gleixner
629cfa82a00SThomas Gleixner_ASM_NOKPROBE(\asmsym)
630cfa82a00SThomas GleixnerSYM_CODE_END(\asmsym)
631cfa82a00SThomas Gleixner.endm
632cfa82a00SThomas Gleixner
633cfa82a00SThomas Gleixner/*
634cfa82a00SThomas Gleixner * Double fault entry. Straight paranoid. No checks from which context
635cfa82a00SThomas Gleixner * this comes because for the espfix induced #DF this would do the wrong
636cfa82a00SThomas Gleixner * thing.
637cfa82a00SThomas Gleixner */
638cfa82a00SThomas Gleixner.macro idtentry_df vector asmsym cfunc
639cfa82a00SThomas GleixnerSYM_CODE_START(\asmsym)
640cfa82a00SThomas Gleixner	UNWIND_HINT_IRET_REGS offset=8
641cfa82a00SThomas Gleixner	ASM_CLAC
642cfa82a00SThomas Gleixner
643cfa82a00SThomas Gleixner	/*
644cfa82a00SThomas Gleixner	 * paranoid_entry returns SWAPGS flag for paranoid_exit in EBX.
645cfa82a00SThomas Gleixner	 * EBX == 0 -> SWAPGS, EBX == 1 -> no SWAPGS
646cfa82a00SThomas Gleixner	 */
647cfa82a00SThomas Gleixner	call	paranoid_entry
648cfa82a00SThomas Gleixner	UNWIND_HINT_REGS
649cfa82a00SThomas Gleixner
650cfa82a00SThomas Gleixner	movq	%rsp, %rdi		/* pt_regs pointer into first argument */
651cfa82a00SThomas Gleixner	movq	ORIG_RAX(%rsp), %rsi	/* get error code into 2nd argument*/
652cfa82a00SThomas Gleixner	movq	$-1, ORIG_RAX(%rsp)	/* no syscall to restart */
653cfa82a00SThomas Gleixner	call	\cfunc
654cfa82a00SThomas Gleixner
655cfa82a00SThomas Gleixner	jmp	paranoid_exit
656cfa82a00SThomas Gleixner
657cfa82a00SThomas Gleixner_ASM_NOKPROBE(\asmsym)
658cfa82a00SThomas GleixnerSYM_CODE_END(\asmsym)
659cfa82a00SThomas Gleixner.endm
660cfa82a00SThomas Gleixner
661905a36a2SIngo Molnar/*
66253aaf262SThomas Gleixner * Include the defines which emit the idt entries which are shared
66353aaf262SThomas Gleixner * shared between 32 and 64 bit.
66453aaf262SThomas Gleixner */
66553aaf262SThomas Gleixner#include <asm/idtentry.h>
66653aaf262SThomas Gleixner
66753aaf262SThomas Gleixner/*
668f3d415eaSDominik Brodowski * Interrupt entry helper function.
669905a36a2SIngo Molnar *
670f3d415eaSDominik Brodowski * Entry runs with interrupts off. Stack layout at entry:
671f3d415eaSDominik Brodowski * +----------------------------------------------------+
672f3d415eaSDominik Brodowski * | regs->ss						|
673f3d415eaSDominik Brodowski * | regs->rsp						|
674f3d415eaSDominik Brodowski * | regs->eflags					|
675f3d415eaSDominik Brodowski * | regs->cs						|
676f3d415eaSDominik Brodowski * | regs->ip						|
677f3d415eaSDominik Brodowski * +----------------------------------------------------+
678f3d415eaSDominik Brodowski * | regs->orig_ax = ~(interrupt number)		|
679f3d415eaSDominik Brodowski * +----------------------------------------------------+
680f3d415eaSDominik Brodowski * | return address					|
681f3d415eaSDominik Brodowski * +----------------------------------------------------+
682905a36a2SIngo Molnar */
683bc7b11c0SJiri SlabySYM_CODE_START(interrupt_entry)
68481b67439SJosh Poimboeuf	UNWIND_HINT_IRET_REGS offset=16
685f3d415eaSDominik Brodowski	ASM_CLAC
686905a36a2SIngo Molnar	cld
6877f2590a1SAndy Lutomirski
688f3d415eaSDominik Brodowski	testb	$3, CS-ORIG_RAX+8(%rsp)
6897f2590a1SAndy Lutomirski	jz	1f
6907f2590a1SAndy Lutomirski	SWAPGS
69118ec54fdSJosh Poimboeuf	FENCE_SWAPGS_USER_ENTRY
692f3d415eaSDominik Brodowski	/*
693f3d415eaSDominik Brodowski	 * Switch to the thread stack. The IRET frame and orig_ax are
694f3d415eaSDominik Brodowski	 * on the stack, as well as the return address. RDI..R12 are
695f3d415eaSDominik Brodowski	 * not (yet) on the stack and space has not (yet) been
696f3d415eaSDominik Brodowski	 * allocated for them.
697f3d415eaSDominik Brodowski	 */
69890a6acc4SDominik Brodowski	pushq	%rdi
699f3d415eaSDominik Brodowski
70090a6acc4SDominik Brodowski	/* Need to switch before accessing the thread stack. */
70190a6acc4SDominik Brodowski	SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi
70290a6acc4SDominik Brodowski	movq	%rsp, %rdi
70390a6acc4SDominik Brodowski	movq	PER_CPU_VAR(cpu_current_top_of_stack), %rsp
704f3d415eaSDominik Brodowski
705f3d415eaSDominik Brodowski	 /*
706f3d415eaSDominik Brodowski	  * We have RDI, return address, and orig_ax on the stack on
707f3d415eaSDominik Brodowski	  * top of the IRET frame. That means offset=24
708f3d415eaSDominik Brodowski	  */
709f3d415eaSDominik Brodowski	UNWIND_HINT_IRET_REGS base=%rdi offset=24
71090a6acc4SDominik Brodowski
71190a6acc4SDominik Brodowski	pushq	7*8(%rdi)		/* regs->ss */
71290a6acc4SDominik Brodowski	pushq	6*8(%rdi)		/* regs->rsp */
71390a6acc4SDominik Brodowski	pushq	5*8(%rdi)		/* regs->eflags */
71490a6acc4SDominik Brodowski	pushq	4*8(%rdi)		/* regs->cs */
71590a6acc4SDominik Brodowski	pushq	3*8(%rdi)		/* regs->ip */
71681b67439SJosh Poimboeuf	UNWIND_HINT_IRET_REGS
71790a6acc4SDominik Brodowski	pushq	2*8(%rdi)		/* regs->orig_ax */
71890a6acc4SDominik Brodowski	pushq	8(%rdi)			/* return address */
71990a6acc4SDominik Brodowski
72090a6acc4SDominik Brodowski	movq	(%rdi), %rdi
72164dbc122SJosh Poimboeuf	jmp	2f
7227f2590a1SAndy Lutomirski1:
72318ec54fdSJosh Poimboeuf	FENCE_SWAPGS_KERNEL_ENTRY
72418ec54fdSJosh Poimboeuf2:
7250e34d226SDominik Brodowski	PUSH_AND_CLEAR_REGS save_ret=1
7260e34d226SDominik Brodowski	ENCODE_FRAME_POINTER 8
727905a36a2SIngo Molnar
7282ba64741SDominik Brodowski	testb	$3, CS+8(%rsp)
729905a36a2SIngo Molnar	jz	1f
73002bc7768SAndy Lutomirski
73102bc7768SAndy Lutomirski	/*
7327f2590a1SAndy Lutomirski	 * IRQ from user mode.
7337f2590a1SAndy Lutomirski	 *
734f1075053SAndy Lutomirski	 * We need to tell lockdep that IRQs are off.  We can't do this until
735f1075053SAndy Lutomirski	 * we fix gsbase, and we should do it before enter_from_user_mode
736f3d415eaSDominik Brodowski	 * (which can take locks).  Since TRACE_IRQS_OFF is idempotent,
737f1075053SAndy Lutomirski	 * the simplest way to handle it is to just call it twice if
738f1075053SAndy Lutomirski	 * we enter from user mode.  There's no reason to optimize this since
739f1075053SAndy Lutomirski	 * TRACE_IRQS_OFF is a no-op if lockdep is off.
740f1075053SAndy Lutomirski	 */
741f1075053SAndy Lutomirski	TRACE_IRQS_OFF
742f1075053SAndy Lutomirski
743478dc89cSAndy Lutomirski	CALL_enter_from_user_mode
74402bc7768SAndy Lutomirski
745905a36a2SIngo Molnar1:
7462ba64741SDominik Brodowski	ENTER_IRQ_STACK old_rsp=%rdi save_ret=1
747905a36a2SIngo Molnar	/* We entered an interrupt context - irqs are off: */
748905a36a2SIngo Molnar	TRACE_IRQS_OFF
749905a36a2SIngo Molnar
7502ba64741SDominik Brodowski	ret
751bc7b11c0SJiri SlabySYM_CODE_END(interrupt_entry)
752a50480cbSAndrea Righi_ASM_NOKPROBE(interrupt_entry)
7532ba64741SDominik Brodowski
754f3d415eaSDominik Brodowski
755f3d415eaSDominik Brodowski/* Interrupt entry/exit. */
756905a36a2SIngo Molnar
757905a36a2SIngo Molnar/*
758905a36a2SIngo Molnar * The interrupt stubs push (~vector+0x80) onto the stack and
759f8a8fe61SThomas Gleixner * then jump to common_spurious/interrupt.
760905a36a2SIngo Molnar */
761cc66936eSJiri SlabySYM_CODE_START_LOCAL(common_spurious)
762f8a8fe61SThomas Gleixner	addq	$-0x80, (%rsp)			/* Adjust vector to [-256, -1] range */
763f8a8fe61SThomas Gleixner	call	interrupt_entry
764f8a8fe61SThomas Gleixner	UNWIND_HINT_REGS indirect=1
765f8a8fe61SThomas Gleixner	call	smp_spurious_interrupt		/* rdi points to pt_regs */
766f8a8fe61SThomas Gleixner	jmp	ret_from_intr
767cc66936eSJiri SlabySYM_CODE_END(common_spurious)
768f8a8fe61SThomas Gleixner_ASM_NOKPROBE(common_spurious)
769f8a8fe61SThomas Gleixner
770f8a8fe61SThomas Gleixner/* common_interrupt is a hotpath. Align it */
771905a36a2SIngo Molnar	.p2align CONFIG_X86_L1_CACHE_SHIFT
772cc66936eSJiri SlabySYM_CODE_START_LOCAL(common_interrupt)
773905a36a2SIngo Molnar	addq	$-0x80, (%rsp)			/* Adjust vector to [-256, -1] range */
7743aa99fc3SDominik Brodowski	call	interrupt_entry
7753aa99fc3SDominik Brodowski	UNWIND_HINT_REGS indirect=1
7763aa99fc3SDominik Brodowski	call	do_IRQ	/* rdi points to pt_regs */
777905a36a2SIngo Molnar	/* 0(%rsp): old RSP */
778905a36a2SIngo Molnarret_from_intr:
7792140a994SJan Beulich	DISABLE_INTERRUPTS(CLBR_ANY)
780905a36a2SIngo Molnar	TRACE_IRQS_OFF
781905a36a2SIngo Molnar
7821d3e53e8SAndy Lutomirski	LEAVE_IRQ_STACK
783905a36a2SIngo Molnar
784905a36a2SIngo Molnar	testb	$3, CS(%rsp)
785905a36a2SIngo Molnar	jz	retint_kernel
78602bc7768SAndy Lutomirski
787905a36a2SIngo Molnar	/* Interrupt came from user space */
78830a2441cSJiri Slaby.Lretint_user:
78902bc7768SAndy Lutomirski	mov	%rsp,%rdi
79002bc7768SAndy Lutomirski	call	prepare_exit_to_usermode
79126c4ef9cSAndy Lutomirski
79226ba4e57SJiri SlabySYM_INNER_LABEL(swapgs_restore_regs_and_return_to_usermode, SYM_L_GLOBAL)
79326c4ef9cSAndy Lutomirski#ifdef CONFIG_DEBUG_ENTRY
79426c4ef9cSAndy Lutomirski	/* Assert that pt_regs indicates user mode. */
7951e4c4f61SBorislav Petkov	testb	$3, CS(%rsp)
79626c4ef9cSAndy Lutomirski	jnz	1f
79726c4ef9cSAndy Lutomirski	ud2
79826c4ef9cSAndy Lutomirski1:
79926c4ef9cSAndy Lutomirski#endif
800502af0d7SDominik Brodowski	POP_REGS pop_rdi=0
8013e3b9293SAndy Lutomirski
8023e3b9293SAndy Lutomirski	/*
8033e3b9293SAndy Lutomirski	 * The stack is now user RDI, orig_ax, RIP, CS, EFLAGS, RSP, SS.
8043e3b9293SAndy Lutomirski	 * Save old stack pointer and switch to trampoline stack.
8053e3b9293SAndy Lutomirski	 */
8063e3b9293SAndy Lutomirski	movq	%rsp, %rdi
807c482feefSAndy Lutomirski	movq	PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp
8081fb14363SJosh Poimboeuf	UNWIND_HINT_EMPTY
8093e3b9293SAndy Lutomirski
8103e3b9293SAndy Lutomirski	/* Copy the IRET frame to the trampoline stack. */
8113e3b9293SAndy Lutomirski	pushq	6*8(%rdi)	/* SS */
8123e3b9293SAndy Lutomirski	pushq	5*8(%rdi)	/* RSP */
8133e3b9293SAndy Lutomirski	pushq	4*8(%rdi)	/* EFLAGS */
8143e3b9293SAndy Lutomirski	pushq	3*8(%rdi)	/* CS */
8153e3b9293SAndy Lutomirski	pushq	2*8(%rdi)	/* RIP */
8163e3b9293SAndy Lutomirski
8173e3b9293SAndy Lutomirski	/* Push user RDI on the trampoline stack. */
8183e3b9293SAndy Lutomirski	pushq	(%rdi)
8193e3b9293SAndy Lutomirski
8203e3b9293SAndy Lutomirski	/*
8213e3b9293SAndy Lutomirski	 * We are on the trampoline stack.  All regs except RDI are live.
8223e3b9293SAndy Lutomirski	 * We can do future final exit work right here.
8233e3b9293SAndy Lutomirski	 */
824afaef01cSAlexander Popov	STACKLEAK_ERASE_NOCLOBBER
8253e3b9293SAndy Lutomirski
8266fd166aaSPeter Zijlstra	SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi
8278a09317bSDave Hansen
8283e3b9293SAndy Lutomirski	/* Restore RDI. */
8293e3b9293SAndy Lutomirski	popq	%rdi
8303e3b9293SAndy Lutomirski	SWAPGS
83126c4ef9cSAndy Lutomirski	INTERRUPT_RETURN
83226c4ef9cSAndy Lutomirski
833905a36a2SIngo Molnar
834905a36a2SIngo Molnar/* Returning to kernel space */
835905a36a2SIngo Molnarretint_kernel:
83648593975SThomas Gleixner#ifdef CONFIG_PREEMPTION
837905a36a2SIngo Molnar	/* Interrupts are off */
838905a36a2SIngo Molnar	/* Check if we need preemption */
8396709812fSJan Beulich	btl	$9, EFLAGS(%rsp)		/* were interrupts off? */
840905a36a2SIngo Molnar	jnc	1f
841b5b447b6SValentin Schneider	cmpl	$0, PER_CPU_VAR(__preempt_count)
842905a36a2SIngo Molnar	jnz	1f
843905a36a2SIngo Molnar	call	preempt_schedule_irq
844905a36a2SIngo Molnar1:
845905a36a2SIngo Molnar#endif
846905a36a2SIngo Molnar	/*
847905a36a2SIngo Molnar	 * The iretq could re-enable interrupts:
848905a36a2SIngo Molnar	 */
849905a36a2SIngo Molnar	TRACE_IRQS_IRETQ
850905a36a2SIngo Molnar
85126ba4e57SJiri SlabySYM_INNER_LABEL(restore_regs_and_return_to_kernel, SYM_L_GLOBAL)
85226c4ef9cSAndy Lutomirski#ifdef CONFIG_DEBUG_ENTRY
85326c4ef9cSAndy Lutomirski	/* Assert that pt_regs indicates kernel mode. */
8541e4c4f61SBorislav Petkov	testb	$3, CS(%rsp)
85526c4ef9cSAndy Lutomirski	jz	1f
85626c4ef9cSAndy Lutomirski	ud2
85726c4ef9cSAndy Lutomirski1:
85826c4ef9cSAndy Lutomirski#endif
859502af0d7SDominik Brodowski	POP_REGS
860e872045bSAndy Lutomirski	addq	$8, %rsp	/* skip regs->orig_ax */
86110bcc80eSMathieu Desnoyers	/*
86210bcc80eSMathieu Desnoyers	 * ARCH_HAS_MEMBARRIER_SYNC_CORE rely on IRET core serialization
86310bcc80eSMathieu Desnoyers	 * when returning from IPI handler.
86410bcc80eSMathieu Desnoyers	 */
865905a36a2SIngo Molnar	INTERRUPT_RETURN
866905a36a2SIngo Molnar
867cc66936eSJiri SlabySYM_INNER_LABEL_ALIGN(native_iret, SYM_L_GLOBAL)
8688c1f7558SJosh Poimboeuf	UNWIND_HINT_IRET_REGS
869905a36a2SIngo Molnar	/*
870905a36a2SIngo Molnar	 * Are we returning to a stack segment from the LDT?  Note: in
871905a36a2SIngo Molnar	 * 64-bit mode SS:RSP on the exception stack is always valid.
872905a36a2SIngo Molnar	 */
873905a36a2SIngo Molnar#ifdef CONFIG_X86_ESPFIX64
874905a36a2SIngo Molnar	testb	$4, (SS-RIP)(%rsp)
875905a36a2SIngo Molnar	jnz	native_irq_return_ldt
876905a36a2SIngo Molnar#endif
877905a36a2SIngo Molnar
878cc66936eSJiri SlabySYM_INNER_LABEL(native_irq_return_iret, SYM_L_GLOBAL)
879905a36a2SIngo Molnar	/*
880905a36a2SIngo Molnar	 * This may fault.  Non-paranoid faults on return to userspace are
881905a36a2SIngo Molnar	 * handled by fixup_bad_iret.  These include #SS, #GP, and #NP.
882c29c775aSThomas Gleixner	 * Double-faults due to espfix64 are handled in exc_double_fault.
883905a36a2SIngo Molnar	 * Other faults here are fatal.
884905a36a2SIngo Molnar	 */
885905a36a2SIngo Molnar	iretq
886905a36a2SIngo Molnar
887905a36a2SIngo Molnar#ifdef CONFIG_X86_ESPFIX64
888905a36a2SIngo Molnarnative_irq_return_ldt:
88985063facSAndy Lutomirski	/*
89085063facSAndy Lutomirski	 * We are running with user GSBASE.  All GPRs contain their user
89185063facSAndy Lutomirski	 * values.  We have a percpu ESPFIX stack that is eight slots
89285063facSAndy Lutomirski	 * long (see ESPFIX_STACK_SIZE).  espfix_waddr points to the bottom
89385063facSAndy Lutomirski	 * of the ESPFIX stack.
89485063facSAndy Lutomirski	 *
89585063facSAndy Lutomirski	 * We clobber RAX and RDI in this code.  We stash RDI on the
89685063facSAndy Lutomirski	 * normal stack and RAX on the ESPFIX stack.
89785063facSAndy Lutomirski	 *
89885063facSAndy Lutomirski	 * The ESPFIX stack layout we set up looks like this:
89985063facSAndy Lutomirski	 *
90085063facSAndy Lutomirski	 * --- top of ESPFIX stack ---
90185063facSAndy Lutomirski	 * SS
90285063facSAndy Lutomirski	 * RSP
90385063facSAndy Lutomirski	 * RFLAGS
90485063facSAndy Lutomirski	 * CS
90585063facSAndy Lutomirski	 * RIP  <-- RSP points here when we're done
90685063facSAndy Lutomirski	 * RAX  <-- espfix_waddr points here
90785063facSAndy Lutomirski	 * --- bottom of ESPFIX stack ---
90885063facSAndy Lutomirski	 */
90985063facSAndy Lutomirski
91085063facSAndy Lutomirski	pushq	%rdi				/* Stash user RDI */
9118a09317bSDave Hansen	SWAPGS					/* to kernel GS */
9128a09317bSDave Hansen	SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi	/* to kernel CR3 */
9138a09317bSDave Hansen
914905a36a2SIngo Molnar	movq	PER_CPU_VAR(espfix_waddr), %rdi
91585063facSAndy Lutomirski	movq	%rax, (0*8)(%rdi)		/* user RAX */
91685063facSAndy Lutomirski	movq	(1*8)(%rsp), %rax		/* user RIP */
917905a36a2SIngo Molnar	movq	%rax, (1*8)(%rdi)
91885063facSAndy Lutomirski	movq	(2*8)(%rsp), %rax		/* user CS */
919905a36a2SIngo Molnar	movq	%rax, (2*8)(%rdi)
92085063facSAndy Lutomirski	movq	(3*8)(%rsp), %rax		/* user RFLAGS */
921905a36a2SIngo Molnar	movq	%rax, (3*8)(%rdi)
92285063facSAndy Lutomirski	movq	(5*8)(%rsp), %rax		/* user SS */
923905a36a2SIngo Molnar	movq	%rax, (5*8)(%rdi)
92485063facSAndy Lutomirski	movq	(4*8)(%rsp), %rax		/* user RSP */
925905a36a2SIngo Molnar	movq	%rax, (4*8)(%rdi)
92685063facSAndy Lutomirski	/* Now RAX == RSP. */
92785063facSAndy Lutomirski
92885063facSAndy Lutomirski	andl	$0xffff0000, %eax		/* RAX = (RSP & 0xffff0000) */
92985063facSAndy Lutomirski
93085063facSAndy Lutomirski	/*
93185063facSAndy Lutomirski	 * espfix_stack[31:16] == 0.  The page tables are set up such that
93285063facSAndy Lutomirski	 * (espfix_stack | (X & 0xffff0000)) points to a read-only alias of
93385063facSAndy Lutomirski	 * espfix_waddr for any X.  That is, there are 65536 RO aliases of
93485063facSAndy Lutomirski	 * the same page.  Set up RSP so that RSP[31:16] contains the
93585063facSAndy Lutomirski	 * respective 16 bits of the /userspace/ RSP and RSP nonetheless
93685063facSAndy Lutomirski	 * still points to an RO alias of the ESPFIX stack.
93785063facSAndy Lutomirski	 */
938905a36a2SIngo Molnar	orq	PER_CPU_VAR(espfix_stack), %rax
9398a09317bSDave Hansen
9406fd166aaSPeter Zijlstra	SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi
9418a09317bSDave Hansen	SWAPGS					/* to user GS */
9428a09317bSDave Hansen	popq	%rdi				/* Restore user RDI */
9438a09317bSDave Hansen
944905a36a2SIngo Molnar	movq	%rax, %rsp
9458c1f7558SJosh Poimboeuf	UNWIND_HINT_IRET_REGS offset=8
94685063facSAndy Lutomirski
94785063facSAndy Lutomirski	/*
94885063facSAndy Lutomirski	 * At this point, we cannot write to the stack any more, but we can
94985063facSAndy Lutomirski	 * still read.
95085063facSAndy Lutomirski	 */
95185063facSAndy Lutomirski	popq	%rax				/* Restore user RAX */
95285063facSAndy Lutomirski
95385063facSAndy Lutomirski	/*
95485063facSAndy Lutomirski	 * RSP now points to an ordinary IRET frame, except that the page
95585063facSAndy Lutomirski	 * is read-only and RSP[31:16] are preloaded with the userspace
95685063facSAndy Lutomirski	 * values.  We can now IRET back to userspace.
95785063facSAndy Lutomirski	 */
958905a36a2SIngo Molnar	jmp	native_irq_return_iret
959905a36a2SIngo Molnar#endif
960cc66936eSJiri SlabySYM_CODE_END(common_interrupt)
961a50480cbSAndrea Righi_ASM_NOKPROBE(common_interrupt)
962905a36a2SIngo Molnar
963905a36a2SIngo Molnar/*
964905a36a2SIngo Molnar * APIC interrupts.
965905a36a2SIngo Molnar */
966905a36a2SIngo Molnar.macro apicinterrupt3 num sym do_sym
967bc7b11c0SJiri SlabySYM_CODE_START(\sym)
9688c1f7558SJosh Poimboeuf	UNWIND_HINT_IRET_REGS
969905a36a2SIngo Molnar	pushq	$~(\num)
9703aa99fc3SDominik Brodowski	call	interrupt_entry
9713aa99fc3SDominik Brodowski	UNWIND_HINT_REGS indirect=1
9723aa99fc3SDominik Brodowski	call	\do_sym	/* rdi points to pt_regs */
973905a36a2SIngo Molnar	jmp	ret_from_intr
974bc7b11c0SJiri SlabySYM_CODE_END(\sym)
975a50480cbSAndrea Righi_ASM_NOKPROBE(\sym)
976905a36a2SIngo Molnar.endm
977905a36a2SIngo Molnar
978469f0023SAlexander Potapenko/* Make sure APIC interrupt handlers end up in the irqentry section: */
979469f0023SAlexander Potapenko#define PUSH_SECTION_IRQENTRY	.pushsection .irqentry.text, "ax"
980469f0023SAlexander Potapenko#define POP_SECTION_IRQENTRY	.popsection
981469f0023SAlexander Potapenko
982905a36a2SIngo Molnar.macro apicinterrupt num sym do_sym
983469f0023SAlexander PotapenkoPUSH_SECTION_IRQENTRY
984905a36a2SIngo Molnarapicinterrupt3 \num \sym \do_sym
985469f0023SAlexander PotapenkoPOP_SECTION_IRQENTRY
986905a36a2SIngo Molnar.endm
987905a36a2SIngo Molnar
988905a36a2SIngo Molnar#ifdef CONFIG_SMP
9894d732138SIngo Molnarapicinterrupt3 IRQ_MOVE_CLEANUP_VECTOR		irq_move_cleanup_interrupt	smp_irq_move_cleanup_interrupt
9904d732138SIngo Molnarapicinterrupt3 REBOOT_VECTOR			reboot_interrupt		smp_reboot_interrupt
991905a36a2SIngo Molnar#endif
992905a36a2SIngo Molnar
993905a36a2SIngo Molnar#ifdef CONFIG_X86_UV
9944d732138SIngo Molnarapicinterrupt3 UV_BAU_MESSAGE			uv_bau_message_intr1		uv_bau_message_interrupt
995905a36a2SIngo Molnar#endif
9964d732138SIngo Molnar
9974d732138SIngo Molnarapicinterrupt LOCAL_TIMER_VECTOR		apic_timer_interrupt		smp_apic_timer_interrupt
9984d732138SIngo Molnarapicinterrupt X86_PLATFORM_IPI_VECTOR		x86_platform_ipi		smp_x86_platform_ipi
999905a36a2SIngo Molnar
1000905a36a2SIngo Molnar#ifdef CONFIG_HAVE_KVM
10014d732138SIngo Molnarapicinterrupt3 POSTED_INTR_VECTOR		kvm_posted_intr_ipi		smp_kvm_posted_intr_ipi
10024d732138SIngo Molnarapicinterrupt3 POSTED_INTR_WAKEUP_VECTOR	kvm_posted_intr_wakeup_ipi	smp_kvm_posted_intr_wakeup_ipi
1003210f84b0SWincy Vanapicinterrupt3 POSTED_INTR_NESTED_VECTOR	kvm_posted_intr_nested_ipi	smp_kvm_posted_intr_nested_ipi
1004905a36a2SIngo Molnar#endif
1005905a36a2SIngo Molnar
1006905a36a2SIngo Molnar#ifdef CONFIG_X86_MCE_THRESHOLD
10074d732138SIngo Molnarapicinterrupt THRESHOLD_APIC_VECTOR		threshold_interrupt		smp_threshold_interrupt
1008905a36a2SIngo Molnar#endif
1009905a36a2SIngo Molnar
10109dda1658SIngo Molnar#ifdef CONFIG_X86_MCE_AMD
10114d732138SIngo Molnarapicinterrupt DEFERRED_ERROR_VECTOR		deferred_error_interrupt	smp_deferred_error_interrupt
10129dda1658SIngo Molnar#endif
10139dda1658SIngo Molnar
1014905a36a2SIngo Molnar#ifdef CONFIG_X86_THERMAL_VECTOR
10154d732138SIngo Molnarapicinterrupt THERMAL_APIC_VECTOR		thermal_interrupt		smp_thermal_interrupt
1016905a36a2SIngo Molnar#endif
1017905a36a2SIngo Molnar
1018905a36a2SIngo Molnar#ifdef CONFIG_SMP
10194d732138SIngo Molnarapicinterrupt CALL_FUNCTION_SINGLE_VECTOR	call_function_single_interrupt	smp_call_function_single_interrupt
10204d732138SIngo Molnarapicinterrupt CALL_FUNCTION_VECTOR		call_function_interrupt		smp_call_function_interrupt
10214d732138SIngo Molnarapicinterrupt RESCHEDULE_VECTOR			reschedule_interrupt		smp_reschedule_interrupt
1022905a36a2SIngo Molnar#endif
1023905a36a2SIngo Molnar
10244d732138SIngo Molnarapicinterrupt ERROR_APIC_VECTOR			error_interrupt			smp_error_interrupt
10254d732138SIngo Molnarapicinterrupt SPURIOUS_APIC_VECTOR		spurious_interrupt		smp_spurious_interrupt
1026905a36a2SIngo Molnar
1027905a36a2SIngo Molnar#ifdef CONFIG_IRQ_WORK
10284d732138SIngo Molnarapicinterrupt IRQ_WORK_VECTOR			irq_work_interrupt		smp_irq_work_interrupt
1029905a36a2SIngo Molnar#endif
1030905a36a2SIngo Molnar
1031905a36a2SIngo Molnar/*
10324d732138SIngo Molnar * Reload gs selector with exception handling
10334d732138SIngo Molnar * edi:  new selector
1034b9f6976bSThomas Gleixner *
1035b9f6976bSThomas Gleixner * Is in entry.text as it shouldn't be instrumented.
10364d732138SIngo Molnar */
1037410367e3SThomas GleixnerSYM_FUNC_START(asm_load_gs_index)
10388c1f7558SJosh Poimboeuf	FRAME_BEGIN
1039c9317202SThomas Gleixner	swapgs
104042c748bbSBorislav Petkov.Lgs_change:
1041905a36a2SIngo Molnar	movl	%edi, %gs
104296e5d28aSBorislav Petkov2:	ALTERNATIVE "", "mfence", X86_BUG_SWAPGS_FENCE
1043c9317202SThomas Gleixner	swapgs
10448c1f7558SJosh Poimboeuf	FRAME_END
1045905a36a2SIngo Molnar	ret
1046410367e3SThomas GleixnerSYM_FUNC_END(asm_load_gs_index)
1047410367e3SThomas GleixnerEXPORT_SYMBOL(asm_load_gs_index)
1048905a36a2SIngo Molnar
104998ededb6SJiri Slaby	_ASM_EXTABLE(.Lgs_change, .Lbad_gs)
1050905a36a2SIngo Molnar	.section .fixup, "ax"
1051905a36a2SIngo Molnar	/* running with kernelgs */
1052ef77e688SJiri SlabySYM_CODE_START_LOCAL_NOALIGN(.Lbad_gs)
1053c9317202SThomas Gleixner	swapgs					/* switch back to user gs */
1054b038c842SAndy Lutomirski.macro ZAP_GS
1055b038c842SAndy Lutomirski	/* This can't be a string because the preprocessor needs to see it. */
1056b038c842SAndy Lutomirski	movl $__USER_DS, %eax
1057b038c842SAndy Lutomirski	movl %eax, %gs
1058b038c842SAndy Lutomirski.endm
1059b038c842SAndy Lutomirski	ALTERNATIVE "", "ZAP_GS", X86_BUG_NULL_SEG
1060905a36a2SIngo Molnar	xorl	%eax, %eax
1061905a36a2SIngo Molnar	movl	%eax, %gs
1062905a36a2SIngo Molnar	jmp	2b
1063ef77e688SJiri SlabySYM_CODE_END(.Lbad_gs)
1064905a36a2SIngo Molnar	.previous
1065905a36a2SIngo Molnar
1066931b9414SThomas Gleixner/*
1067931b9414SThomas Gleixner * rdi: New stack pointer points to the top word of the stack
1068931b9414SThomas Gleixner * rsi: Function pointer
1069931b9414SThomas Gleixner * rdx: Function argument (can be NULL if none)
1070931b9414SThomas Gleixner */
1071931b9414SThomas GleixnerSYM_FUNC_START(asm_call_on_stack)
1072931b9414SThomas Gleixner	/*
1073931b9414SThomas Gleixner	 * Save the frame pointer unconditionally. This allows the ORC
1074931b9414SThomas Gleixner	 * unwinder to handle the stack switch.
1075931b9414SThomas Gleixner	 */
1076931b9414SThomas Gleixner	pushq		%rbp
1077931b9414SThomas Gleixner	mov		%rsp, %rbp
1078931b9414SThomas Gleixner
1079931b9414SThomas Gleixner	/*
1080931b9414SThomas Gleixner	 * The unwinder relies on the word at the top of the new stack
1081931b9414SThomas Gleixner	 * page linking back to the previous RSP.
1082931b9414SThomas Gleixner	 */
1083931b9414SThomas Gleixner	mov		%rsp, (%rdi)
1084931b9414SThomas Gleixner	mov		%rdi, %rsp
1085931b9414SThomas Gleixner	/* Move the argument to the right place */
1086931b9414SThomas Gleixner	mov		%rdx, %rdi
1087931b9414SThomas Gleixner
1088931b9414SThomas Gleixner1:
1089931b9414SThomas Gleixner	.pushsection .discard.instr_begin
1090931b9414SThomas Gleixner	.long 1b - .
1091931b9414SThomas Gleixner	.popsection
1092931b9414SThomas Gleixner
1093931b9414SThomas Gleixner	CALL_NOSPEC	rsi
1094931b9414SThomas Gleixner
1095931b9414SThomas Gleixner2:
1096931b9414SThomas Gleixner	.pushsection .discard.instr_end
1097931b9414SThomas Gleixner	.long 2b - .
1098931b9414SThomas Gleixner	.popsection
1099931b9414SThomas Gleixner
1100931b9414SThomas Gleixner	/* Restore the previous stack pointer from RBP. */
1101931b9414SThomas Gleixner	leaveq
1102931b9414SThomas Gleixner	ret
1103931b9414SThomas GleixnerSYM_FUNC_END(asm_call_on_stack)
1104931b9414SThomas Gleixner
110528c11b0fSJuergen Gross#ifdef CONFIG_XEN_PV
1106905a36a2SIngo Molnar/*
1107905a36a2SIngo Molnar * A note on the "critical region" in our callback handler.
1108905a36a2SIngo Molnar * We want to avoid stacking callback handlers due to events occurring
1109905a36a2SIngo Molnar * during handling of the last event. To do this, we keep events disabled
1110905a36a2SIngo Molnar * until we've done all processing. HOWEVER, we must enable events before
1111905a36a2SIngo Molnar * popping the stack frame (can't be done atomically) and so it would still
1112905a36a2SIngo Molnar * be possible to get enough handler activations to overflow the stack.
1113905a36a2SIngo Molnar * Although unlikely, bugs of that kind are hard to track down, so we'd
1114905a36a2SIngo Molnar * like to avoid the possibility.
1115905a36a2SIngo Molnar * So, on entry to the handler we detect whether we interrupted an
1116905a36a2SIngo Molnar * existing activation in its critical region -- if so, we pop the current
1117905a36a2SIngo Molnar * activation and restart the handler using the previous one.
11182f6474e4SThomas Gleixner *
11192f6474e4SThomas Gleixner * C calling convention: exc_xen_hypervisor_callback(struct *pt_regs)
1120905a36a2SIngo Molnar */
11212f6474e4SThomas GleixnerSYM_CODE_START_LOCAL(exc_xen_hypervisor_callback)
11224d732138SIngo Molnar
1123905a36a2SIngo Molnar/*
1124905a36a2SIngo Molnar * Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will
1125905a36a2SIngo Molnar * see the correct pointer to the pt_regs
1126905a36a2SIngo Molnar */
11278c1f7558SJosh Poimboeuf	UNWIND_HINT_FUNC
11284d732138SIngo Molnar	movq	%rdi, %rsp			/* we don't return, adjust the stack frame */
11298c1f7558SJosh Poimboeuf	UNWIND_HINT_REGS
11301d3e53e8SAndy Lutomirski
11312f6474e4SThomas Gleixner	call	xen_pv_evtchn_do_upcall
11321d3e53e8SAndy Lutomirski
11332f6474e4SThomas Gleixner	jmp	error_return
11342f6474e4SThomas GleixnerSYM_CODE_END(exc_xen_hypervisor_callback)
1135905a36a2SIngo Molnar
1136905a36a2SIngo Molnar/*
1137905a36a2SIngo Molnar * Hypervisor uses this for application faults while it executes.
1138905a36a2SIngo Molnar * We get here for two reasons:
1139905a36a2SIngo Molnar *  1. Fault while reloading DS, ES, FS or GS
1140905a36a2SIngo Molnar *  2. Fault while executing IRET
1141905a36a2SIngo Molnar * Category 1 we do not need to fix up as Xen has already reloaded all segment
1142905a36a2SIngo Molnar * registers that could be reloaded and zeroed the others.
1143905a36a2SIngo Molnar * Category 2 we fix up by killing the current process. We cannot use the
1144905a36a2SIngo Molnar * normal Linux return path in this case because if we use the IRET hypercall
1145905a36a2SIngo Molnar * to pop the stack frame we end up in an infinite loop of failsafe callbacks.
1146905a36a2SIngo Molnar * We distinguish between categories by comparing each saved segment register
1147905a36a2SIngo Molnar * with its current contents: any discrepancy means we in category 1.
1148905a36a2SIngo Molnar */
1149bc7b11c0SJiri SlabySYM_CODE_START(xen_failsafe_callback)
11508c1f7558SJosh Poimboeuf	UNWIND_HINT_EMPTY
1151905a36a2SIngo Molnar	movl	%ds, %ecx
1152905a36a2SIngo Molnar	cmpw	%cx, 0x10(%rsp)
1153905a36a2SIngo Molnar	jne	1f
1154905a36a2SIngo Molnar	movl	%es, %ecx
1155905a36a2SIngo Molnar	cmpw	%cx, 0x18(%rsp)
1156905a36a2SIngo Molnar	jne	1f
1157905a36a2SIngo Molnar	movl	%fs, %ecx
1158905a36a2SIngo Molnar	cmpw	%cx, 0x20(%rsp)
1159905a36a2SIngo Molnar	jne	1f
1160905a36a2SIngo Molnar	movl	%gs, %ecx
1161905a36a2SIngo Molnar	cmpw	%cx, 0x28(%rsp)
1162905a36a2SIngo Molnar	jne	1f
1163905a36a2SIngo Molnar	/* All segments match their saved values => Category 2 (Bad IRET). */
1164905a36a2SIngo Molnar	movq	(%rsp), %rcx
1165905a36a2SIngo Molnar	movq	8(%rsp), %r11
1166905a36a2SIngo Molnar	addq	$0x30, %rsp
1167905a36a2SIngo Molnar	pushq	$0				/* RIP */
11688c1f7558SJosh Poimboeuf	UNWIND_HINT_IRET_REGS offset=8
1169be4c11afSThomas Gleixner	jmp	asm_exc_general_protection
1170905a36a2SIngo Molnar1:	/* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */
1171905a36a2SIngo Molnar	movq	(%rsp), %rcx
1172905a36a2SIngo Molnar	movq	8(%rsp), %r11
1173905a36a2SIngo Molnar	addq	$0x30, %rsp
11748c1f7558SJosh Poimboeuf	UNWIND_HINT_IRET_REGS
1175905a36a2SIngo Molnar	pushq	$-1 /* orig_ax = -1 => not a system call */
11763f01daecSDominik Brodowski	PUSH_AND_CLEAR_REGS
1177946c1911SJosh Poimboeuf	ENCODE_FRAME_POINTER
1178*e88d9741SThomas Gleixner	jmp	error_return
1179bc7b11c0SJiri SlabySYM_CODE_END(xen_failsafe_callback)
118028c11b0fSJuergen Gross#endif /* CONFIG_XEN_PV */
1181905a36a2SIngo Molnar
118228c11b0fSJuergen Gross#ifdef CONFIG_XEN_PVHVM
1183905a36a2SIngo Molnarapicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
1184905a36a2SIngo Molnar	xen_hvm_callback_vector xen_evtchn_do_upcall
118528c11b0fSJuergen Gross#endif
1186905a36a2SIngo Molnar
1187905a36a2SIngo Molnar
1188905a36a2SIngo Molnar#if IS_ENABLED(CONFIG_HYPERV)
1189905a36a2SIngo Molnarapicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
1190905a36a2SIngo Molnar	hyperv_callback_vector hyperv_vector_handler
119193286261SVitaly Kuznetsov
119293286261SVitaly Kuznetsovapicinterrupt3 HYPERV_REENLIGHTENMENT_VECTOR \
119393286261SVitaly Kuznetsov	hyperv_reenlightenment_vector hyperv_reenlightenment_intr
1194248e742aSMichael Kelley
1195248e742aSMichael Kelleyapicinterrupt3 HYPERV_STIMER0_VECTOR \
1196248e742aSMichael Kelley	hv_stimer0_callback_vector hv_stimer0_vector_handler
1197905a36a2SIngo Molnar#endif /* CONFIG_HYPERV */
1198905a36a2SIngo Molnar
1199498ad393SZhao Yakui#if IS_ENABLED(CONFIG_ACRN_GUEST)
1200498ad393SZhao Yakuiapicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
1201498ad393SZhao Yakui	acrn_hv_callback_vector acrn_hv_vector_handler
1202498ad393SZhao Yakui#endif
1203498ad393SZhao Yakui
1204905a36a2SIngo Molnar/*
12059e809d15SDominik Brodowski * Save all registers in pt_regs, and switch gs if needed.
1206905a36a2SIngo Molnar * Use slow, but surefire "are we in kernel?" check.
1207905a36a2SIngo Molnar * Return: ebx=0: need swapgs on exit, ebx=1: otherwise
1208905a36a2SIngo Molnar */
1209ef1e0315SJiri SlabySYM_CODE_START_LOCAL(paranoid_entry)
12108c1f7558SJosh Poimboeuf	UNWIND_HINT_FUNC
1211905a36a2SIngo Molnar	cld
12129e809d15SDominik Brodowski	PUSH_AND_CLEAR_REGS save_ret=1
12139e809d15SDominik Brodowski	ENCODE_FRAME_POINTER 8
1214905a36a2SIngo Molnar	movl	$1, %ebx
1215905a36a2SIngo Molnar	movl	$MSR_GS_BASE, %ecx
1216905a36a2SIngo Molnar	rdmsr
1217905a36a2SIngo Molnar	testl	%edx, %edx
1218905a36a2SIngo Molnar	js	1f				/* negative -> in kernel */
1219905a36a2SIngo Molnar	SWAPGS
1220905a36a2SIngo Molnar	xorl	%ebx, %ebx
12218a09317bSDave Hansen
12228a09317bSDave Hansen1:
122316561f27SDave Hansen	/*
122416561f27SDave Hansen	 * Always stash CR3 in %r14.  This value will be restored,
1225ae852495SAndy Lutomirski	 * verbatim, at exit.  Needed if paranoid_entry interrupted
1226ae852495SAndy Lutomirski	 * another entry that already switched to the user CR3 value
1227ae852495SAndy Lutomirski	 * but has not yet returned to userspace.
122816561f27SDave Hansen	 *
122916561f27SDave Hansen	 * This is also why CS (stashed in the "iret frame" by the
123016561f27SDave Hansen	 * hardware at entry) can not be used: this may be a return
1231ae852495SAndy Lutomirski	 * to kernel code, but with a user CR3 value.
123216561f27SDave Hansen	 */
12338a09317bSDave Hansen	SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg=%rax save_reg=%r14
12348a09317bSDave Hansen
123518ec54fdSJosh Poimboeuf	/*
123618ec54fdSJosh Poimboeuf	 * The above SAVE_AND_SWITCH_TO_KERNEL_CR3 macro doesn't do an
123718ec54fdSJosh Poimboeuf	 * unconditional CR3 write, even in the PTI case.  So do an lfence
123818ec54fdSJosh Poimboeuf	 * to prevent GS speculation, regardless of whether PTI is enabled.
123918ec54fdSJosh Poimboeuf	 */
124018ec54fdSJosh Poimboeuf	FENCE_SWAPGS_KERNEL_ENTRY
124118ec54fdSJosh Poimboeuf
12428a09317bSDave Hansen	ret
1243ef1e0315SJiri SlabySYM_CODE_END(paranoid_entry)
1244905a36a2SIngo Molnar
1245905a36a2SIngo Molnar/*
1246905a36a2SIngo Molnar * "Paranoid" exit path from exception stack.  This is invoked
1247905a36a2SIngo Molnar * only on return from non-NMI IST interrupts that came
1248905a36a2SIngo Molnar * from kernel space.
1249905a36a2SIngo Molnar *
1250905a36a2SIngo Molnar * We may be returning to very strange contexts (e.g. very early
1251905a36a2SIngo Molnar * in syscall entry), so checking for preemption here would
1252905a36a2SIngo Molnar * be complicated.  Fortunately, we there's no good reason
1253905a36a2SIngo Molnar * to try to handle preemption here.
12544d732138SIngo Molnar *
12554d732138SIngo Molnar * On entry, ebx is "no swapgs" flag (1: don't need swapgs, 0: need it)
1256905a36a2SIngo Molnar */
1257ef1e0315SJiri SlabySYM_CODE_START_LOCAL(paranoid_exit)
12588c1f7558SJosh Poimboeuf	UNWIND_HINT_REGS
12592140a994SJan Beulich	DISABLE_INTERRUPTS(CLBR_ANY)
1260905a36a2SIngo Molnar	TRACE_IRQS_OFF_DEBUG
1261905a36a2SIngo Molnar	testl	%ebx, %ebx			/* swapgs needed? */
1262e5317832SAndy Lutomirski	jnz	.Lparanoid_exit_no_swapgs
1263905a36a2SIngo Molnar	TRACE_IRQS_IRETQ
126416561f27SDave Hansen	/* Always restore stashed CR3 value (see paranoid_entry) */
126521e94459SPeter Zijlstra	RESTORE_CR3	scratch_reg=%rbx save_reg=%r14
1266905a36a2SIngo Molnar	SWAPGS_UNSAFE_STACK
126745c08383SThomas Gleixner	jmp	restore_regs_and_return_to_kernel
1268e5317832SAndy Lutomirski.Lparanoid_exit_no_swapgs:
1269905a36a2SIngo Molnar	TRACE_IRQS_IRETQ_DEBUG
127016561f27SDave Hansen	/* Always restore stashed CR3 value (see paranoid_entry) */
1271e4865757SIngo Molnar	RESTORE_CR3	scratch_reg=%rbx save_reg=%r14
1272e5317832SAndy Lutomirski	jmp restore_regs_and_return_to_kernel
1273ef1e0315SJiri SlabySYM_CODE_END(paranoid_exit)
1274905a36a2SIngo Molnar
1275905a36a2SIngo Molnar/*
12769e809d15SDominik Brodowski * Save all registers in pt_regs, and switch GS if needed.
1277905a36a2SIngo Molnar */
1278ef1e0315SJiri SlabySYM_CODE_START_LOCAL(error_entry)
12799e809d15SDominik Brodowski	UNWIND_HINT_FUNC
1280905a36a2SIngo Molnar	cld
12819e809d15SDominik Brodowski	PUSH_AND_CLEAR_REGS save_ret=1
12829e809d15SDominik Brodowski	ENCODE_FRAME_POINTER 8
1283905a36a2SIngo Molnar	testb	$3, CS+8(%rsp)
1284cb6f64edSAndy Lutomirski	jz	.Lerror_kernelspace
1285539f5113SAndy Lutomirski
1286cb6f64edSAndy Lutomirski	/*
1287cb6f64edSAndy Lutomirski	 * We entered from user mode or we're pretending to have entered
1288cb6f64edSAndy Lutomirski	 * from user mode due to an IRET fault.
1289cb6f64edSAndy Lutomirski	 */
1290905a36a2SIngo Molnar	SWAPGS
129118ec54fdSJosh Poimboeuf	FENCE_SWAPGS_USER_ENTRY
12928a09317bSDave Hansen	/* We have user CR3.  Change to kernel CR3. */
12938a09317bSDave Hansen	SWITCH_TO_KERNEL_CR3 scratch_reg=%rax
1294539f5113SAndy Lutomirski
1295cb6f64edSAndy Lutomirski.Lerror_entry_from_usermode_after_swapgs:
12967f2590a1SAndy Lutomirski	/* Put us onto the real thread stack. */
12977f2590a1SAndy Lutomirski	popq	%r12				/* save return addr in %12 */
12987f2590a1SAndy Lutomirski	movq	%rsp, %rdi			/* arg0 = pt_regs pointer */
12997f2590a1SAndy Lutomirski	call	sync_regs
13007f2590a1SAndy Lutomirski	movq	%rax, %rsp			/* switch stack */
13017f2590a1SAndy Lutomirski	ENCODE_FRAME_POINTER
13027f2590a1SAndy Lutomirski	pushq	%r12
1303f1075053SAndy Lutomirski	ret
130402bc7768SAndy Lutomirski
130518ec54fdSJosh Poimboeuf.Lerror_entry_done_lfence:
130618ec54fdSJosh Poimboeuf	FENCE_SWAPGS_KERNEL_ENTRY
1307cb6f64edSAndy Lutomirski.Lerror_entry_done:
1308905a36a2SIngo Molnar	ret
1309905a36a2SIngo Molnar
1310905a36a2SIngo Molnar	/*
1311905a36a2SIngo Molnar	 * There are two places in the kernel that can potentially fault with
1312905a36a2SIngo Molnar	 * usergs. Handle them here.  B stepping K8s sometimes report a
1313905a36a2SIngo Molnar	 * truncated RIP for IRET exceptions returning to compat mode. Check
1314905a36a2SIngo Molnar	 * for these here too.
1315905a36a2SIngo Molnar	 */
1316cb6f64edSAndy Lutomirski.Lerror_kernelspace:
1317905a36a2SIngo Molnar	leaq	native_irq_return_iret(%rip), %rcx
1318905a36a2SIngo Molnar	cmpq	%rcx, RIP+8(%rsp)
1319cb6f64edSAndy Lutomirski	je	.Lerror_bad_iret
1320905a36a2SIngo Molnar	movl	%ecx, %eax			/* zero extend */
1321905a36a2SIngo Molnar	cmpq	%rax, RIP+8(%rsp)
1322cb6f64edSAndy Lutomirski	je	.Lbstep_iret
132342c748bbSBorislav Petkov	cmpq	$.Lgs_change, RIP+8(%rsp)
132418ec54fdSJosh Poimboeuf	jne	.Lerror_entry_done_lfence
1325539f5113SAndy Lutomirski
1326539f5113SAndy Lutomirski	/*
132742c748bbSBorislav Petkov	 * hack: .Lgs_change can fail with user gsbase.  If this happens, fix up
1328539f5113SAndy Lutomirski	 * gsbase and proceed.  We'll fix up the exception and land in
132942c748bbSBorislav Petkov	 * .Lgs_change's error handler with kernel gsbase.
1330539f5113SAndy Lutomirski	 */
13312fa5f04fSWanpeng Li	SWAPGS
133218ec54fdSJosh Poimboeuf	FENCE_SWAPGS_USER_ENTRY
13332fa5f04fSWanpeng Li	jmp .Lerror_entry_done
1334905a36a2SIngo Molnar
1335cb6f64edSAndy Lutomirski.Lbstep_iret:
1336905a36a2SIngo Molnar	/* Fix truncated RIP */
1337905a36a2SIngo Molnar	movq	%rcx, RIP+8(%rsp)
1338905a36a2SIngo Molnar	/* fall through */
1339905a36a2SIngo Molnar
1340cb6f64edSAndy Lutomirski.Lerror_bad_iret:
1341539f5113SAndy Lutomirski	/*
13428a09317bSDave Hansen	 * We came from an IRET to user mode, so we have user
13438a09317bSDave Hansen	 * gsbase and CR3.  Switch to kernel gsbase and CR3:
1344539f5113SAndy Lutomirski	 */
1345905a36a2SIngo Molnar	SWAPGS
134618ec54fdSJosh Poimboeuf	FENCE_SWAPGS_USER_ENTRY
13478a09317bSDave Hansen	SWITCH_TO_KERNEL_CR3 scratch_reg=%rax
1348539f5113SAndy Lutomirski
1349539f5113SAndy Lutomirski	/*
1350539f5113SAndy Lutomirski	 * Pretend that the exception came from user mode: set up pt_regs
1351b3681dd5SAndy Lutomirski	 * as if we faulted immediately after IRET.
1352539f5113SAndy Lutomirski	 */
1353905a36a2SIngo Molnar	mov	%rsp, %rdi
1354905a36a2SIngo Molnar	call	fixup_bad_iret
1355905a36a2SIngo Molnar	mov	%rax, %rsp
1356cb6f64edSAndy Lutomirski	jmp	.Lerror_entry_from_usermode_after_swapgs
1357ef1e0315SJiri SlabySYM_CODE_END(error_entry)
1358905a36a2SIngo Molnar
1359ef1e0315SJiri SlabySYM_CODE_START_LOCAL(error_exit)
13608c1f7558SJosh Poimboeuf	UNWIND_HINT_REGS
13612140a994SJan Beulich	DISABLE_INTERRUPTS(CLBR_ANY)
1362905a36a2SIngo Molnar	TRACE_IRQS_OFF
1363b3681dd5SAndy Lutomirski	testb	$3, CS(%rsp)
1364b3681dd5SAndy Lutomirski	jz	retint_kernel
136530a2441cSJiri Slaby	jmp	.Lretint_user
1366ef1e0315SJiri SlabySYM_CODE_END(error_exit)
1367905a36a2SIngo Molnar
1368424c7d0aSThomas GleixnerSYM_CODE_START_LOCAL(error_return)
1369424c7d0aSThomas Gleixner	UNWIND_HINT_REGS
1370424c7d0aSThomas Gleixner	DEBUG_ENTRY_ASSERT_IRQS_OFF
1371424c7d0aSThomas Gleixner	testb	$3, CS(%rsp)
1372424c7d0aSThomas Gleixner	jz	restore_regs_and_return_to_kernel
1373424c7d0aSThomas Gleixner	jmp	swapgs_restore_regs_and_return_to_usermode
1374424c7d0aSThomas GleixnerSYM_CODE_END(error_return)
1375424c7d0aSThomas Gleixner
1376929bacecSAndy Lutomirski/*
1377929bacecSAndy Lutomirski * Runs on exception stack.  Xen PV does not go through this path at all,
1378929bacecSAndy Lutomirski * so we can use real assembly here.
13798a09317bSDave Hansen *
13808a09317bSDave Hansen * Registers:
13818a09317bSDave Hansen *	%r14: Used to save/restore the CR3 of the interrupted context
13828a09317bSDave Hansen *	      when PAGE_TABLE_ISOLATION is in use.  Do not clobber.
1383929bacecSAndy Lutomirski */
13846271fef0SThomas GleixnerSYM_CODE_START(asm_exc_nmi)
13858c1f7558SJosh Poimboeuf	UNWIND_HINT_IRET_REGS
1386929bacecSAndy Lutomirski
1387fc57a7c6SAndy Lutomirski	/*
1388905a36a2SIngo Molnar	 * We allow breakpoints in NMIs. If a breakpoint occurs, then
1389905a36a2SIngo Molnar	 * the iretq it performs will take us out of NMI context.
1390905a36a2SIngo Molnar	 * This means that we can have nested NMIs where the next
1391905a36a2SIngo Molnar	 * NMI is using the top of the stack of the previous NMI. We
1392905a36a2SIngo Molnar	 * can't let it execute because the nested NMI will corrupt the
1393905a36a2SIngo Molnar	 * stack of the previous NMI. NMI handlers are not re-entrant
1394905a36a2SIngo Molnar	 * anyway.
1395905a36a2SIngo Molnar	 *
1396905a36a2SIngo Molnar	 * To handle this case we do the following:
1397905a36a2SIngo Molnar	 *  Check the a special location on the stack that contains
1398905a36a2SIngo Molnar	 *  a variable that is set when NMIs are executing.
1399905a36a2SIngo Molnar	 *  The interrupted task's stack is also checked to see if it
1400905a36a2SIngo Molnar	 *  is an NMI stack.
1401905a36a2SIngo Molnar	 *  If the variable is not set and the stack is not the NMI
1402905a36a2SIngo Molnar	 *  stack then:
1403905a36a2SIngo Molnar	 *    o Set the special variable on the stack
14040b22930eSAndy Lutomirski	 *    o Copy the interrupt frame into an "outermost" location on the
14050b22930eSAndy Lutomirski	 *      stack
14060b22930eSAndy Lutomirski	 *    o Copy the interrupt frame into an "iret" location on the stack
1407905a36a2SIngo Molnar	 *    o Continue processing the NMI
1408905a36a2SIngo Molnar	 *  If the variable is set or the previous stack is the NMI stack:
14090b22930eSAndy Lutomirski	 *    o Modify the "iret" location to jump to the repeat_nmi
1410905a36a2SIngo Molnar	 *    o return back to the first NMI
1411905a36a2SIngo Molnar	 *
1412905a36a2SIngo Molnar	 * Now on exit of the first NMI, we first clear the stack variable
1413905a36a2SIngo Molnar	 * The NMI stack will tell any nested NMIs at that point that it is
1414905a36a2SIngo Molnar	 * nested. Then we pop the stack normally with iret, and if there was
1415905a36a2SIngo Molnar	 * a nested NMI that updated the copy interrupt stack frame, a
1416905a36a2SIngo Molnar	 * jump will be made to the repeat_nmi code that will handle the second
1417905a36a2SIngo Molnar	 * NMI.
14189b6e6a83SAndy Lutomirski	 *
14199b6e6a83SAndy Lutomirski	 * However, espfix prevents us from directly returning to userspace
14209b6e6a83SAndy Lutomirski	 * with a single IRET instruction.  Similarly, IRET to user mode
14219b6e6a83SAndy Lutomirski	 * can fault.  We therefore handle NMIs from user space like
14229b6e6a83SAndy Lutomirski	 * other IST entries.
1423905a36a2SIngo Molnar	 */
1424905a36a2SIngo Molnar
1425e93c1730SAndy Lutomirski	ASM_CLAC
1426e93c1730SAndy Lutomirski
1427905a36a2SIngo Molnar	/* Use %rdx as our temp variable throughout */
1428905a36a2SIngo Molnar	pushq	%rdx
1429905a36a2SIngo Molnar
14309b6e6a83SAndy Lutomirski	testb	$3, CS-RIP+8(%rsp)
14319b6e6a83SAndy Lutomirski	jz	.Lnmi_from_kernel
1432905a36a2SIngo Molnar
1433905a36a2SIngo Molnar	/*
14349b6e6a83SAndy Lutomirski	 * NMI from user mode.  We need to run on the thread stack, but we
14359b6e6a83SAndy Lutomirski	 * can't go through the normal entry paths: NMIs are masked, and
14369b6e6a83SAndy Lutomirski	 * we don't want to enable interrupts, because then we'll end
14379b6e6a83SAndy Lutomirski	 * up in an awkward situation in which IRQs are on but NMIs
14389b6e6a83SAndy Lutomirski	 * are off.
143983c133cfSAndy Lutomirski	 *
144083c133cfSAndy Lutomirski	 * We also must not push anything to the stack before switching
144183c133cfSAndy Lutomirski	 * stacks lest we corrupt the "NMI executing" variable.
14429b6e6a83SAndy Lutomirski	 */
14439b6e6a83SAndy Lutomirski
1444929bacecSAndy Lutomirski	swapgs
14459b6e6a83SAndy Lutomirski	cld
144618ec54fdSJosh Poimboeuf	FENCE_SWAPGS_USER_ENTRY
14478a09317bSDave Hansen	SWITCH_TO_KERNEL_CR3 scratch_reg=%rdx
14489b6e6a83SAndy Lutomirski	movq	%rsp, %rdx
14499b6e6a83SAndy Lutomirski	movq	PER_CPU_VAR(cpu_current_top_of_stack), %rsp
14508c1f7558SJosh Poimboeuf	UNWIND_HINT_IRET_REGS base=%rdx offset=8
14519b6e6a83SAndy Lutomirski	pushq	5*8(%rdx)	/* pt_regs->ss */
14529b6e6a83SAndy Lutomirski	pushq	4*8(%rdx)	/* pt_regs->rsp */
14539b6e6a83SAndy Lutomirski	pushq	3*8(%rdx)	/* pt_regs->flags */
14549b6e6a83SAndy Lutomirski	pushq	2*8(%rdx)	/* pt_regs->cs */
14559b6e6a83SAndy Lutomirski	pushq	1*8(%rdx)	/* pt_regs->rip */
14568c1f7558SJosh Poimboeuf	UNWIND_HINT_IRET_REGS
14579b6e6a83SAndy Lutomirski	pushq   $-1		/* pt_regs->orig_ax */
145830907fd1SDominik Brodowski	PUSH_AND_CLEAR_REGS rdx=(%rdx)
1459946c1911SJosh Poimboeuf	ENCODE_FRAME_POINTER
14609b6e6a83SAndy Lutomirski
14619b6e6a83SAndy Lutomirski	/*
14629b6e6a83SAndy Lutomirski	 * At this point we no longer need to worry about stack damage
14639b6e6a83SAndy Lutomirski	 * due to nesting -- we're on the normal thread stack and we're
14649b6e6a83SAndy Lutomirski	 * done with the NMI stack.
14659b6e6a83SAndy Lutomirski	 */
14669b6e6a83SAndy Lutomirski
14679b6e6a83SAndy Lutomirski	movq	%rsp, %rdi
14689b6e6a83SAndy Lutomirski	movq	$-1, %rsi
14696271fef0SThomas Gleixner	call	exc_nmi
14709b6e6a83SAndy Lutomirski
14719b6e6a83SAndy Lutomirski	/*
14729b6e6a83SAndy Lutomirski	 * Return back to user mode.  We must *not* do the normal exit
1473946c1911SJosh Poimboeuf	 * work, because we don't want to enable interrupts.
14749b6e6a83SAndy Lutomirski	 */
14758a055d7fSAndy Lutomirski	jmp	swapgs_restore_regs_and_return_to_usermode
14769b6e6a83SAndy Lutomirski
14779b6e6a83SAndy Lutomirski.Lnmi_from_kernel:
14789b6e6a83SAndy Lutomirski	/*
14790b22930eSAndy Lutomirski	 * Here's what our stack frame will look like:
14800b22930eSAndy Lutomirski	 * +---------------------------------------------------------+
14810b22930eSAndy Lutomirski	 * | original SS                                             |
14820b22930eSAndy Lutomirski	 * | original Return RSP                                     |
14830b22930eSAndy Lutomirski	 * | original RFLAGS                                         |
14840b22930eSAndy Lutomirski	 * | original CS                                             |
14850b22930eSAndy Lutomirski	 * | original RIP                                            |
14860b22930eSAndy Lutomirski	 * +---------------------------------------------------------+
14870b22930eSAndy Lutomirski	 * | temp storage for rdx                                    |
14880b22930eSAndy Lutomirski	 * +---------------------------------------------------------+
14890b22930eSAndy Lutomirski	 * | "NMI executing" variable                                |
14900b22930eSAndy Lutomirski	 * +---------------------------------------------------------+
14910b22930eSAndy Lutomirski	 * | iret SS          } Copied from "outermost" frame        |
14920b22930eSAndy Lutomirski	 * | iret Return RSP  } on each loop iteration; overwritten  |
14930b22930eSAndy Lutomirski	 * | iret RFLAGS      } by a nested NMI to force another     |
14940b22930eSAndy Lutomirski	 * | iret CS          } iteration if needed.                 |
14950b22930eSAndy Lutomirski	 * | iret RIP         }                                      |
14960b22930eSAndy Lutomirski	 * +---------------------------------------------------------+
14970b22930eSAndy Lutomirski	 * | outermost SS          } initialized in first_nmi;       |
14980b22930eSAndy Lutomirski	 * | outermost Return RSP  } will not be changed before      |
14990b22930eSAndy Lutomirski	 * | outermost RFLAGS      } NMI processing is done.         |
15000b22930eSAndy Lutomirski	 * | outermost CS          } Copied to "iret" frame on each  |
15010b22930eSAndy Lutomirski	 * | outermost RIP         } iteration.                      |
15020b22930eSAndy Lutomirski	 * +---------------------------------------------------------+
15030b22930eSAndy Lutomirski	 * | pt_regs                                                 |
15040b22930eSAndy Lutomirski	 * +---------------------------------------------------------+
15050b22930eSAndy Lutomirski	 *
15060b22930eSAndy Lutomirski	 * The "original" frame is used by hardware.  Before re-enabling
15070b22930eSAndy Lutomirski	 * NMIs, we need to be done with it, and we need to leave enough
15080b22930eSAndy Lutomirski	 * space for the asm code here.
15090b22930eSAndy Lutomirski	 *
15100b22930eSAndy Lutomirski	 * We return by executing IRET while RSP points to the "iret" frame.
15110b22930eSAndy Lutomirski	 * That will either return for real or it will loop back into NMI
15120b22930eSAndy Lutomirski	 * processing.
15130b22930eSAndy Lutomirski	 *
15140b22930eSAndy Lutomirski	 * The "outermost" frame is copied to the "iret" frame on each
15150b22930eSAndy Lutomirski	 * iteration of the loop, so each iteration starts with the "iret"
15160b22930eSAndy Lutomirski	 * frame pointing to the final return target.
15170b22930eSAndy Lutomirski	 */
15180b22930eSAndy Lutomirski
15190b22930eSAndy Lutomirski	/*
15200b22930eSAndy Lutomirski	 * Determine whether we're a nested NMI.
15210b22930eSAndy Lutomirski	 *
1522a27507caSAndy Lutomirski	 * If we interrupted kernel code between repeat_nmi and
1523a27507caSAndy Lutomirski	 * end_repeat_nmi, then we are a nested NMI.  We must not
1524a27507caSAndy Lutomirski	 * modify the "iret" frame because it's being written by
1525a27507caSAndy Lutomirski	 * the outer NMI.  That's okay; the outer NMI handler is
15266271fef0SThomas Gleixner	 * about to about to call exc_nmi() anyway, so we can just
1527a27507caSAndy Lutomirski	 * resume the outer NMI.
1528a27507caSAndy Lutomirski	 */
1529a27507caSAndy Lutomirski
1530a27507caSAndy Lutomirski	movq	$repeat_nmi, %rdx
1531a27507caSAndy Lutomirski	cmpq	8(%rsp), %rdx
1532a27507caSAndy Lutomirski	ja	1f
1533a27507caSAndy Lutomirski	movq	$end_repeat_nmi, %rdx
1534a27507caSAndy Lutomirski	cmpq	8(%rsp), %rdx
1535a27507caSAndy Lutomirski	ja	nested_nmi_out
1536a27507caSAndy Lutomirski1:
1537a27507caSAndy Lutomirski
1538a27507caSAndy Lutomirski	/*
1539a27507caSAndy Lutomirski	 * Now check "NMI executing".  If it's set, then we're nested.
15400b22930eSAndy Lutomirski	 * This will not detect if we interrupted an outer NMI just
15410b22930eSAndy Lutomirski	 * before IRET.
1542905a36a2SIngo Molnar	 */
1543905a36a2SIngo Molnar	cmpl	$1, -8(%rsp)
1544905a36a2SIngo Molnar	je	nested_nmi
1545905a36a2SIngo Molnar
1546905a36a2SIngo Molnar	/*
15470b22930eSAndy Lutomirski	 * Now test if the previous stack was an NMI stack.  This covers
15480b22930eSAndy Lutomirski	 * the case where we interrupt an outer NMI after it clears
1549810bc075SAndy Lutomirski	 * "NMI executing" but before IRET.  We need to be careful, though:
1550810bc075SAndy Lutomirski	 * there is one case in which RSP could point to the NMI stack
1551810bc075SAndy Lutomirski	 * despite there being no NMI active: naughty userspace controls
1552810bc075SAndy Lutomirski	 * RSP at the very beginning of the SYSCALL targets.  We can
1553810bc075SAndy Lutomirski	 * pull a fast one on naughty userspace, though: we program
1554810bc075SAndy Lutomirski	 * SYSCALL to mask DF, so userspace cannot cause DF to be set
1555810bc075SAndy Lutomirski	 * if it controls the kernel's RSP.  We set DF before we clear
1556810bc075SAndy Lutomirski	 * "NMI executing".
1557905a36a2SIngo Molnar	 */
1558905a36a2SIngo Molnar	lea	6*8(%rsp), %rdx
1559905a36a2SIngo Molnar	/* Compare the NMI stack (rdx) with the stack we came from (4*8(%rsp)) */
1560905a36a2SIngo Molnar	cmpq	%rdx, 4*8(%rsp)
1561905a36a2SIngo Molnar	/* If the stack pointer is above the NMI stack, this is a normal NMI */
1562905a36a2SIngo Molnar	ja	first_nmi
15634d732138SIngo Molnar
1564905a36a2SIngo Molnar	subq	$EXCEPTION_STKSZ, %rdx
1565905a36a2SIngo Molnar	cmpq	%rdx, 4*8(%rsp)
1566905a36a2SIngo Molnar	/* If it is below the NMI stack, it is a normal NMI */
1567905a36a2SIngo Molnar	jb	first_nmi
1568810bc075SAndy Lutomirski
1569810bc075SAndy Lutomirski	/* Ah, it is within the NMI stack. */
1570810bc075SAndy Lutomirski
1571810bc075SAndy Lutomirski	testb	$(X86_EFLAGS_DF >> 8), (3*8 + 1)(%rsp)
1572810bc075SAndy Lutomirski	jz	first_nmi	/* RSP was user controlled. */
1573810bc075SAndy Lutomirski
1574810bc075SAndy Lutomirski	/* This is a nested NMI. */
1575905a36a2SIngo Molnar
1576905a36a2SIngo Molnarnested_nmi:
1577905a36a2SIngo Molnar	/*
15780b22930eSAndy Lutomirski	 * Modify the "iret" frame to point to repeat_nmi, forcing another
15790b22930eSAndy Lutomirski	 * iteration of NMI handling.
1580905a36a2SIngo Molnar	 */
158123a781e9SAndy Lutomirski	subq	$8, %rsp
1582905a36a2SIngo Molnar	leaq	-10*8(%rsp), %rdx
1583905a36a2SIngo Molnar	pushq	$__KERNEL_DS
1584905a36a2SIngo Molnar	pushq	%rdx
1585905a36a2SIngo Molnar	pushfq
1586905a36a2SIngo Molnar	pushq	$__KERNEL_CS
1587905a36a2SIngo Molnar	pushq	$repeat_nmi
1588905a36a2SIngo Molnar
1589905a36a2SIngo Molnar	/* Put stack back */
1590905a36a2SIngo Molnar	addq	$(6*8), %rsp
1591905a36a2SIngo Molnar
1592905a36a2SIngo Molnarnested_nmi_out:
1593905a36a2SIngo Molnar	popq	%rdx
1594905a36a2SIngo Molnar
15950b22930eSAndy Lutomirski	/* We are returning to kernel mode, so this cannot result in a fault. */
1596929bacecSAndy Lutomirski	iretq
1597905a36a2SIngo Molnar
1598905a36a2SIngo Molnarfirst_nmi:
15990b22930eSAndy Lutomirski	/* Restore rdx. */
1600905a36a2SIngo Molnar	movq	(%rsp), %rdx
1601905a36a2SIngo Molnar
160236f1a77bSAndy Lutomirski	/* Make room for "NMI executing". */
160336f1a77bSAndy Lutomirski	pushq	$0
1604905a36a2SIngo Molnar
16050b22930eSAndy Lutomirski	/* Leave room for the "iret" frame */
1606905a36a2SIngo Molnar	subq	$(5*8), %rsp
1607905a36a2SIngo Molnar
16080b22930eSAndy Lutomirski	/* Copy the "original" frame to the "outermost" frame */
1609905a36a2SIngo Molnar	.rept 5
1610905a36a2SIngo Molnar	pushq	11*8(%rsp)
1611905a36a2SIngo Molnar	.endr
16128c1f7558SJosh Poimboeuf	UNWIND_HINT_IRET_REGS
1613905a36a2SIngo Molnar
1614905a36a2SIngo Molnar	/* Everything up to here is safe from nested NMIs */
1615905a36a2SIngo Molnar
1616a97439aaSAndy Lutomirski#ifdef CONFIG_DEBUG_ENTRY
1617a97439aaSAndy Lutomirski	/*
1618a97439aaSAndy Lutomirski	 * For ease of testing, unmask NMIs right away.  Disabled by
1619a97439aaSAndy Lutomirski	 * default because IRET is very expensive.
1620a97439aaSAndy Lutomirski	 */
1621a97439aaSAndy Lutomirski	pushq	$0		/* SS */
1622a97439aaSAndy Lutomirski	pushq	%rsp		/* RSP (minus 8 because of the previous push) */
1623a97439aaSAndy Lutomirski	addq	$8, (%rsp)	/* Fix up RSP */
1624a97439aaSAndy Lutomirski	pushfq			/* RFLAGS */
1625a97439aaSAndy Lutomirski	pushq	$__KERNEL_CS	/* CS */
1626a97439aaSAndy Lutomirski	pushq	$1f		/* RIP */
1627929bacecSAndy Lutomirski	iretq			/* continues at repeat_nmi below */
16288c1f7558SJosh Poimboeuf	UNWIND_HINT_IRET_REGS
1629a97439aaSAndy Lutomirski1:
1630a97439aaSAndy Lutomirski#endif
1631a97439aaSAndy Lutomirski
16320b22930eSAndy Lutomirskirepeat_nmi:
1633905a36a2SIngo Molnar	/*
1634905a36a2SIngo Molnar	 * If there was a nested NMI, the first NMI's iret will return
1635905a36a2SIngo Molnar	 * here. But NMIs are still enabled and we can take another
1636905a36a2SIngo Molnar	 * nested NMI. The nested NMI checks the interrupted RIP to see
1637905a36a2SIngo Molnar	 * if it is between repeat_nmi and end_repeat_nmi, and if so
1638905a36a2SIngo Molnar	 * it will just return, as we are about to repeat an NMI anyway.
1639905a36a2SIngo Molnar	 * This makes it safe to copy to the stack frame that a nested
1640905a36a2SIngo Molnar	 * NMI will update.
16410b22930eSAndy Lutomirski	 *
16420b22930eSAndy Lutomirski	 * RSP is pointing to "outermost RIP".  gsbase is unknown, but, if
16430b22930eSAndy Lutomirski	 * we're repeating an NMI, gsbase has the same value that it had on
16440b22930eSAndy Lutomirski	 * the first iteration.  paranoid_entry will load the kernel
16456271fef0SThomas Gleixner	 * gsbase if needed before we call exc_nmi().  "NMI executing"
164636f1a77bSAndy Lutomirski	 * is zero.
1647905a36a2SIngo Molnar	 */
164836f1a77bSAndy Lutomirski	movq	$1, 10*8(%rsp)		/* Set "NMI executing". */
1649905a36a2SIngo Molnar
16500b22930eSAndy Lutomirski	/*
16510b22930eSAndy Lutomirski	 * Copy the "outermost" frame to the "iret" frame.  NMIs that nest
16520b22930eSAndy Lutomirski	 * here must not modify the "iret" frame while we're writing to
16530b22930eSAndy Lutomirski	 * it or it will end up containing garbage.
16540b22930eSAndy Lutomirski	 */
1655905a36a2SIngo Molnar	addq	$(10*8), %rsp
1656905a36a2SIngo Molnar	.rept 5
1657905a36a2SIngo Molnar	pushq	-6*8(%rsp)
1658905a36a2SIngo Molnar	.endr
1659905a36a2SIngo Molnar	subq	$(5*8), %rsp
1660905a36a2SIngo Molnarend_repeat_nmi:
1661905a36a2SIngo Molnar
1662905a36a2SIngo Molnar	/*
16630b22930eSAndy Lutomirski	 * Everything below this point can be preempted by a nested NMI.
16640b22930eSAndy Lutomirski	 * If this happens, then the inner NMI will change the "iret"
16650b22930eSAndy Lutomirski	 * frame to point back to repeat_nmi.
1666905a36a2SIngo Molnar	 */
1667905a36a2SIngo Molnar	pushq	$-1				/* ORIG_RAX: no syscall to restart */
1668905a36a2SIngo Molnar
1669905a36a2SIngo Molnar	/*
1670905a36a2SIngo Molnar	 * Use paranoid_entry to handle SWAPGS, but no need to use paranoid_exit
1671905a36a2SIngo Molnar	 * as we should not be calling schedule in NMI context.
1672905a36a2SIngo Molnar	 * Even with normal interrupts enabled. An NMI should not be
1673905a36a2SIngo Molnar	 * setting NEED_RESCHED or anything that normal interrupts and
1674905a36a2SIngo Molnar	 * exceptions might do.
1675905a36a2SIngo Molnar	 */
1676905a36a2SIngo Molnar	call	paranoid_entry
16778c1f7558SJosh Poimboeuf	UNWIND_HINT_REGS
1678905a36a2SIngo Molnar
16796271fef0SThomas Gleixner	/* paranoidentry exc_nmi(), 0; without TRACE_IRQS_OFF */
1680905a36a2SIngo Molnar	movq	%rsp, %rdi
1681905a36a2SIngo Molnar	movq	$-1, %rsi
16826271fef0SThomas Gleixner	call	exc_nmi
1683905a36a2SIngo Molnar
168416561f27SDave Hansen	/* Always restore stashed CR3 value (see paranoid_entry) */
168521e94459SPeter Zijlstra	RESTORE_CR3 scratch_reg=%r15 save_reg=%r14
16868a09317bSDave Hansen
1687905a36a2SIngo Molnar	testl	%ebx, %ebx			/* swapgs needed? */
1688905a36a2SIngo Molnar	jnz	nmi_restore
1689905a36a2SIngo Molnarnmi_swapgs:
1690905a36a2SIngo Molnar	SWAPGS_UNSAFE_STACK
1691905a36a2SIngo Molnarnmi_restore:
1692502af0d7SDominik Brodowski	POP_REGS
16930b22930eSAndy Lutomirski
1694471ee483SAndy Lutomirski	/*
1695471ee483SAndy Lutomirski	 * Skip orig_ax and the "outermost" frame to point RSP at the "iret"
1696471ee483SAndy Lutomirski	 * at the "iret" frame.
1697471ee483SAndy Lutomirski	 */
1698471ee483SAndy Lutomirski	addq	$6*8, %rsp
1699905a36a2SIngo Molnar
1700810bc075SAndy Lutomirski	/*
1701810bc075SAndy Lutomirski	 * Clear "NMI executing".  Set DF first so that we can easily
1702810bc075SAndy Lutomirski	 * distinguish the remaining code between here and IRET from
1703929bacecSAndy Lutomirski	 * the SYSCALL entry and exit paths.
1704929bacecSAndy Lutomirski	 *
1705929bacecSAndy Lutomirski	 * We arguably should just inspect RIP instead, but I (Andy) wrote
1706929bacecSAndy Lutomirski	 * this code when I had the misapprehension that Xen PV supported
1707929bacecSAndy Lutomirski	 * NMIs, and Xen PV would break that approach.
1708810bc075SAndy Lutomirski	 */
1709810bc075SAndy Lutomirski	std
1710810bc075SAndy Lutomirski	movq	$0, 5*8(%rsp)		/* clear "NMI executing" */
17110b22930eSAndy Lutomirski
17120b22930eSAndy Lutomirski	/*
1713929bacecSAndy Lutomirski	 * iretq reads the "iret" frame and exits the NMI stack in a
1714929bacecSAndy Lutomirski	 * single instruction.  We are returning to kernel mode, so this
1715929bacecSAndy Lutomirski	 * cannot result in a fault.  Similarly, we don't need to worry
1716929bacecSAndy Lutomirski	 * about espfix64 on the way back to kernel mode.
17170b22930eSAndy Lutomirski	 */
1718929bacecSAndy Lutomirski	iretq
17196271fef0SThomas GleixnerSYM_CODE_END(asm_exc_nmi)
1720905a36a2SIngo Molnar
1721dffb3f9dSAndy Lutomirski#ifndef CONFIG_IA32_EMULATION
1722dffb3f9dSAndy Lutomirski/*
1723dffb3f9dSAndy Lutomirski * This handles SYSCALL from 32-bit code.  There is no way to program
1724dffb3f9dSAndy Lutomirski * MSRs to fully disable 32-bit SYSCALL.
1725dffb3f9dSAndy Lutomirski */
1726bc7b11c0SJiri SlabySYM_CODE_START(ignore_sysret)
17278c1f7558SJosh Poimboeuf	UNWIND_HINT_EMPTY
1728905a36a2SIngo Molnar	mov	$-ENOSYS, %eax
1729b2b1d94cSJan Beulich	sysretl
1730bc7b11c0SJiri SlabySYM_CODE_END(ignore_sysret)
1731dffb3f9dSAndy Lutomirski#endif
17322deb4be2SAndy Lutomirski
1733b9f6976bSThomas Gleixner.pushsection .text, "ax"
1734bc7b11c0SJiri SlabySYM_CODE_START(rewind_stack_do_exit)
17358c1f7558SJosh Poimboeuf	UNWIND_HINT_FUNC
17362deb4be2SAndy Lutomirski	/* Prevent any naive code from trying to unwind to our caller. */
17372deb4be2SAndy Lutomirski	xorl	%ebp, %ebp
17382deb4be2SAndy Lutomirski
17392deb4be2SAndy Lutomirski	movq	PER_CPU_VAR(cpu_current_top_of_stack), %rax
17408c1f7558SJosh Poimboeuf	leaq	-PTREGS_SIZE(%rax), %rsp
1741f977df7bSJann Horn	UNWIND_HINT_REGS
17422deb4be2SAndy Lutomirski
17432deb4be2SAndy Lutomirski	call	do_exit
1744bc7b11c0SJiri SlabySYM_CODE_END(rewind_stack_do_exit)
1745b9f6976bSThomas Gleixner.popsection
1746