xref: /openbmc/linux/arch/x86/entry/entry_64.S (revision 31eeb6b0)
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 *  linux/arch/x86_64/entry.S
4 *
5 *  Copyright (C) 1991, 1992  Linus Torvalds
6 *  Copyright (C) 2000, 2001, 2002  Andi Kleen SuSE Labs
7 *  Copyright (C) 2000  Pavel Machek <pavel@suse.cz>
8 *
9 * entry.S contains the system-call and fault low-level handling routines.
10 *
11 * Some of this is documented in Documentation/x86/entry_64.rst
12 *
13 * A note on terminology:
14 * - iret frame:	Architecture defined interrupt frame from SS to RIP
15 *			at the top of the kernel process stack.
16 *
17 * Some macro usage:
18 * - SYM_FUNC_START/END:Define functions in the symbol table.
19 * - idtentry:		Define exception entry points.
20 */
21#include <linux/linkage.h>
22#include <asm/segment.h>
23#include <asm/cache.h>
24#include <asm/errno.h>
25#include <asm/asm-offsets.h>
26#include <asm/msr.h>
27#include <asm/unistd.h>
28#include <asm/thread_info.h>
29#include <asm/hw_irq.h>
30#include <asm/page_types.h>
31#include <asm/irqflags.h>
32#include <asm/paravirt.h>
33#include <asm/percpu.h>
34#include <asm/asm.h>
35#include <asm/smap.h>
36#include <asm/pgtable_types.h>
37#include <asm/export.h>
38#include <asm/frame.h>
39#include <asm/trapnr.h>
40#include <asm/nospec-branch.h>
41#include <asm/fsgsbase.h>
42#include <linux/err.h>
43
44#include "calling.h"
45
46.code64
47.section .entry.text, "ax"
48
49/*
50 * 64-bit SYSCALL instruction entry. Up to 6 arguments in registers.
51 *
52 * This is the only entry point used for 64-bit system calls.  The
53 * hardware interface is reasonably well designed and the register to
54 * argument mapping Linux uses fits well with the registers that are
55 * available when SYSCALL is used.
56 *
57 * SYSCALL instructions can be found inlined in libc implementations as
58 * well as some other programs and libraries.  There are also a handful
59 * of SYSCALL instructions in the vDSO used, for example, as a
60 * clock_gettimeofday fallback.
61 *
62 * 64-bit SYSCALL saves rip to rcx, clears rflags.RF, then saves rflags to r11,
63 * then loads new ss, cs, and rip from previously programmed MSRs.
64 * rflags gets masked by a value from another MSR (so CLD and CLAC
65 * are not needed). SYSCALL does not save anything on the stack
66 * and does not change rsp.
67 *
68 * Registers on entry:
69 * rax  system call number
70 * rcx  return address
71 * r11  saved rflags (note: r11 is callee-clobbered register in C ABI)
72 * rdi  arg0
73 * rsi  arg1
74 * rdx  arg2
75 * r10  arg3 (needs to be moved to rcx to conform to C ABI)
76 * r8   arg4
77 * r9   arg5
78 * (note: r12-r15, rbp, rbx are callee-preserved in C ABI)
79 *
80 * Only called from user space.
81 *
82 * When user can change pt_regs->foo always force IRET. That is because
83 * it deals with uncanonical addresses better. SYSRET has trouble
84 * with them due to bugs in both AMD and Intel CPUs.
85 */
86
87SYM_CODE_START(entry_SYSCALL_64)
88	UNWIND_HINT_EMPTY
89
90	swapgs
91	/* tss.sp2 is scratch space. */
92	movq	%rsp, PER_CPU_VAR(cpu_tss_rw + TSS_sp2)
93	SWITCH_TO_KERNEL_CR3 scratch_reg=%rsp
94	movq	PER_CPU_VAR(cpu_current_top_of_stack), %rsp
95
96SYM_INNER_LABEL(entry_SYSCALL_64_safe_stack, SYM_L_GLOBAL)
97
98	/* Construct struct pt_regs on stack */
99	pushq	$__USER_DS				/* pt_regs->ss */
100	pushq	PER_CPU_VAR(cpu_tss_rw + TSS_sp2)	/* pt_regs->sp */
101	pushq	%r11					/* pt_regs->flags */
102	pushq	$__USER_CS				/* pt_regs->cs */
103	pushq	%rcx					/* pt_regs->ip */
104SYM_INNER_LABEL(entry_SYSCALL_64_after_hwframe, SYM_L_GLOBAL)
105	pushq	%rax					/* pt_regs->orig_ax */
106
107	PUSH_AND_CLEAR_REGS rax=$-ENOSYS
108
109	/* IRQs are off. */
110	movq	%rsp, %rdi
111	/* Sign extend the lower 32bit as syscall numbers are treated as int */
112	movslq	%eax, %rsi
113	call	do_syscall_64		/* returns with IRQs disabled */
114
115	/*
116	 * Try to use SYSRET instead of IRET if we're returning to
117	 * a completely clean 64-bit userspace context.  If we're not,
118	 * go to the slow exit path.
119	 * In the Xen PV case we must use iret anyway.
120	 */
121
122	ALTERNATIVE "", "jmp	swapgs_restore_regs_and_return_to_usermode", \
123		X86_FEATURE_XENPV
124
125	movq	RCX(%rsp), %rcx
126	movq	RIP(%rsp), %r11
127
128	cmpq	%rcx, %r11	/* SYSRET requires RCX == RIP */
129	jne	swapgs_restore_regs_and_return_to_usermode
130
131	/*
132	 * On Intel CPUs, SYSRET with non-canonical RCX/RIP will #GP
133	 * in kernel space.  This essentially lets the user take over
134	 * the kernel, since userspace controls RSP.
135	 *
136	 * If width of "canonical tail" ever becomes variable, this will need
137	 * to be updated to remain correct on both old and new CPUs.
138	 *
139	 * Change top bits to match most significant bit (47th or 56th bit
140	 * depending on paging mode) in the address.
141	 */
142#ifdef CONFIG_X86_5LEVEL
143	ALTERNATIVE "shl $(64 - 48), %rcx; sar $(64 - 48), %rcx", \
144		"shl $(64 - 57), %rcx; sar $(64 - 57), %rcx", X86_FEATURE_LA57
145#else
146	shl	$(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx
147	sar	$(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx
148#endif
149
150	/* If this changed %rcx, it was not canonical */
151	cmpq	%rcx, %r11
152	jne	swapgs_restore_regs_and_return_to_usermode
153
154	cmpq	$__USER_CS, CS(%rsp)		/* CS must match SYSRET */
155	jne	swapgs_restore_regs_and_return_to_usermode
156
157	movq	R11(%rsp), %r11
158	cmpq	%r11, EFLAGS(%rsp)		/* R11 == RFLAGS */
159	jne	swapgs_restore_regs_and_return_to_usermode
160
161	/*
162	 * SYSCALL clears RF when it saves RFLAGS in R11 and SYSRET cannot
163	 * restore RF properly. If the slowpath sets it for whatever reason, we
164	 * need to restore it correctly.
165	 *
166	 * SYSRET can restore TF, but unlike IRET, restoring TF results in a
167	 * trap from userspace immediately after SYSRET.  This would cause an
168	 * infinite loop whenever #DB happens with register state that satisfies
169	 * the opportunistic SYSRET conditions.  For example, single-stepping
170	 * this user code:
171	 *
172	 *           movq	$stuck_here, %rcx
173	 *           pushfq
174	 *           popq %r11
175	 *   stuck_here:
176	 *
177	 * would never get past 'stuck_here'.
178	 */
179	testq	$(X86_EFLAGS_RF|X86_EFLAGS_TF), %r11
180	jnz	swapgs_restore_regs_and_return_to_usermode
181
182	/* nothing to check for RSP */
183
184	cmpq	$__USER_DS, SS(%rsp)		/* SS must match SYSRET */
185	jne	swapgs_restore_regs_and_return_to_usermode
186
187	/*
188	 * We win! This label is here just for ease of understanding
189	 * perf profiles. Nothing jumps here.
190	 */
191syscall_return_via_sysret:
192	/* rcx and r11 are already restored (see code above) */
193	POP_REGS pop_rdi=0 skip_r11rcx=1
194
195	/*
196	 * Now all regs are restored except RSP and RDI.
197	 * Save old stack pointer and switch to trampoline stack.
198	 */
199	movq	%rsp, %rdi
200	movq	PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp
201	UNWIND_HINT_EMPTY
202
203	pushq	RSP-RDI(%rdi)	/* RSP */
204	pushq	(%rdi)		/* RDI */
205
206	/*
207	 * We are on the trampoline stack.  All regs except RDI are live.
208	 * We can do future final exit work right here.
209	 */
210	STACKLEAK_ERASE_NOCLOBBER
211
212	SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi
213
214	popq	%rdi
215	popq	%rsp
216	swapgs
217	sysretq
218SYM_CODE_END(entry_SYSCALL_64)
219
220/*
221 * %rdi: prev task
222 * %rsi: next task
223 */
224.pushsection .text, "ax"
225SYM_FUNC_START(__switch_to_asm)
226	/*
227	 * Save callee-saved registers
228	 * This must match the order in inactive_task_frame
229	 */
230	pushq	%rbp
231	pushq	%rbx
232	pushq	%r12
233	pushq	%r13
234	pushq	%r14
235	pushq	%r15
236
237	/* switch stack */
238	movq	%rsp, TASK_threadsp(%rdi)
239	movq	TASK_threadsp(%rsi), %rsp
240
241#ifdef CONFIG_STACKPROTECTOR
242	movq	TASK_stack_canary(%rsi), %rbx
243	movq	%rbx, PER_CPU_VAR(fixed_percpu_data) + stack_canary_offset
244#endif
245
246#ifdef CONFIG_RETPOLINE
247	/*
248	 * When switching from a shallower to a deeper call stack
249	 * the RSB may either underflow or use entries populated
250	 * with userspace addresses. On CPUs where those concerns
251	 * exist, overwrite the RSB with entries which capture
252	 * speculative execution to prevent attack.
253	 */
254	FILL_RETURN_BUFFER %r12, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
255#endif
256
257	/* restore callee-saved registers */
258	popq	%r15
259	popq	%r14
260	popq	%r13
261	popq	%r12
262	popq	%rbx
263	popq	%rbp
264
265	jmp	__switch_to
266SYM_FUNC_END(__switch_to_asm)
267.popsection
268
269/*
270 * A newly forked process directly context switches into this address.
271 *
272 * rax: prev task we switched from
273 * rbx: kernel thread func (NULL for user thread)
274 * r12: kernel thread arg
275 */
276.pushsection .text, "ax"
277SYM_CODE_START(ret_from_fork)
278	UNWIND_HINT_EMPTY
279	movq	%rax, %rdi
280	call	schedule_tail			/* rdi: 'prev' task parameter */
281
282	testq	%rbx, %rbx			/* from kernel_thread? */
283	jnz	1f				/* kernel threads are uncommon */
284
2852:
286	UNWIND_HINT_REGS
287	movq	%rsp, %rdi
288	call	syscall_exit_to_user_mode	/* returns with IRQs disabled */
289	jmp	swapgs_restore_regs_and_return_to_usermode
290
2911:
292	/* kernel thread */
293	UNWIND_HINT_EMPTY
294	movq	%r12, %rdi
295	CALL_NOSPEC rbx
296	/*
297	 * A kernel thread is allowed to return here after successfully
298	 * calling kernel_execve().  Exit to userspace to complete the execve()
299	 * syscall.
300	 */
301	movq	$0, RAX(%rsp)
302	jmp	2b
303SYM_CODE_END(ret_from_fork)
304.popsection
305
306.macro DEBUG_ENTRY_ASSERT_IRQS_OFF
307#ifdef CONFIG_DEBUG_ENTRY
308	pushq %rax
309	SAVE_FLAGS
310	testl $X86_EFLAGS_IF, %eax
311	jz .Lokay_\@
312	ud2
313.Lokay_\@:
314	popq %rax
315#endif
316.endm
317
318/**
319 * idtentry_body - Macro to emit code calling the C function
320 * @cfunc:		C function to be called
321 * @has_error_code:	Hardware pushed error code on stack
322 */
323.macro idtentry_body cfunc has_error_code:req
324
325	call	error_entry
326	UNWIND_HINT_REGS
327
328	movq	%rsp, %rdi			/* pt_regs pointer into 1st argument*/
329
330	.if \has_error_code == 1
331		movq	ORIG_RAX(%rsp), %rsi	/* get error code into 2nd argument*/
332		movq	$-1, ORIG_RAX(%rsp)	/* no syscall to restart */
333	.endif
334
335	call	\cfunc
336
337	jmp	error_return
338.endm
339
340/**
341 * idtentry - Macro to generate entry stubs for simple IDT entries
342 * @vector:		Vector number
343 * @asmsym:		ASM symbol for the entry point
344 * @cfunc:		C function to be called
345 * @has_error_code:	Hardware pushed error code on stack
346 *
347 * The macro emits code to set up the kernel context for straight forward
348 * and simple IDT entries. No IST stack, no paranoid entry checks.
349 */
350.macro idtentry vector asmsym cfunc has_error_code:req
351SYM_CODE_START(\asmsym)
352	UNWIND_HINT_IRET_REGS offset=\has_error_code*8
353	ASM_CLAC
354
355	.if \has_error_code == 0
356		pushq	$-1			/* ORIG_RAX: no syscall to restart */
357	.endif
358
359	.if \vector == X86_TRAP_BP
360		/*
361		 * If coming from kernel space, create a 6-word gap to allow the
362		 * int3 handler to emulate a call instruction.
363		 */
364		testb	$3, CS-ORIG_RAX(%rsp)
365		jnz	.Lfrom_usermode_no_gap_\@
366		.rept	6
367		pushq	5*8(%rsp)
368		.endr
369		UNWIND_HINT_IRET_REGS offset=8
370.Lfrom_usermode_no_gap_\@:
371	.endif
372
373	idtentry_body \cfunc \has_error_code
374
375_ASM_NOKPROBE(\asmsym)
376SYM_CODE_END(\asmsym)
377.endm
378
379/*
380 * Interrupt entry/exit.
381 *
382 + The interrupt stubs push (vector) onto the stack, which is the error_code
383 * position of idtentry exceptions, and jump to one of the two idtentry points
384 * (common/spurious).
385 *
386 * common_interrupt is a hotpath, align it to a cache line
387 */
388.macro idtentry_irq vector cfunc
389	.p2align CONFIG_X86_L1_CACHE_SHIFT
390	idtentry \vector asm_\cfunc \cfunc has_error_code=1
391.endm
392
393/*
394 * System vectors which invoke their handlers directly and are not
395 * going through the regular common device interrupt handling code.
396 */
397.macro idtentry_sysvec vector cfunc
398	idtentry \vector asm_\cfunc \cfunc has_error_code=0
399.endm
400
401/**
402 * idtentry_mce_db - Macro to generate entry stubs for #MC and #DB
403 * @vector:		Vector number
404 * @asmsym:		ASM symbol for the entry point
405 * @cfunc:		C function to be called
406 *
407 * The macro emits code to set up the kernel context for #MC and #DB
408 *
409 * If the entry comes from user space it uses the normal entry path
410 * including the return to user space work and preemption checks on
411 * exit.
412 *
413 * If hits in kernel mode then it needs to go through the paranoid
414 * entry as the exception can hit any random state. No preemption
415 * check on exit to keep the paranoid path simple.
416 */
417.macro idtentry_mce_db vector asmsym cfunc
418SYM_CODE_START(\asmsym)
419	UNWIND_HINT_IRET_REGS
420	ASM_CLAC
421
422	pushq	$-1			/* ORIG_RAX: no syscall to restart */
423
424	/*
425	 * If the entry is from userspace, switch stacks and treat it as
426	 * a normal entry.
427	 */
428	testb	$3, CS-ORIG_RAX(%rsp)
429	jnz	.Lfrom_usermode_switch_stack_\@
430
431	/* paranoid_entry returns GS information for paranoid_exit in EBX. */
432	call	paranoid_entry
433
434	UNWIND_HINT_REGS
435
436	movq	%rsp, %rdi		/* pt_regs pointer */
437
438	call	\cfunc
439
440	jmp	paranoid_exit
441
442	/* Switch to the regular task stack and use the noist entry point */
443.Lfrom_usermode_switch_stack_\@:
444	idtentry_body noist_\cfunc, has_error_code=0
445
446_ASM_NOKPROBE(\asmsym)
447SYM_CODE_END(\asmsym)
448.endm
449
450#ifdef CONFIG_AMD_MEM_ENCRYPT
451/**
452 * idtentry_vc - Macro to generate entry stub for #VC
453 * @vector:		Vector number
454 * @asmsym:		ASM symbol for the entry point
455 * @cfunc:		C function to be called
456 *
457 * The macro emits code to set up the kernel context for #VC. The #VC handler
458 * runs on an IST stack and needs to be able to cause nested #VC exceptions.
459 *
460 * To make this work the #VC entry code tries its best to pretend it doesn't use
461 * an IST stack by switching to the task stack if coming from user-space (which
462 * includes early SYSCALL entry path) or back to the stack in the IRET frame if
463 * entered from kernel-mode.
464 *
465 * If entered from kernel-mode the return stack is validated first, and if it is
466 * not safe to use (e.g. because it points to the entry stack) the #VC handler
467 * will switch to a fall-back stack (VC2) and call a special handler function.
468 *
469 * The macro is only used for one vector, but it is planned to be extended in
470 * the future for the #HV exception.
471 */
472.macro idtentry_vc vector asmsym cfunc
473SYM_CODE_START(\asmsym)
474	UNWIND_HINT_IRET_REGS
475	ASM_CLAC
476
477	/*
478	 * If the entry is from userspace, switch stacks and treat it as
479	 * a normal entry.
480	 */
481	testb	$3, CS-ORIG_RAX(%rsp)
482	jnz	.Lfrom_usermode_switch_stack_\@
483
484	/*
485	 * paranoid_entry returns SWAPGS flag for paranoid_exit in EBX.
486	 * EBX == 0 -> SWAPGS, EBX == 1 -> no SWAPGS
487	 */
488	call	paranoid_entry
489
490	UNWIND_HINT_REGS
491
492	/*
493	 * Switch off the IST stack to make it free for nested exceptions. The
494	 * vc_switch_off_ist() function will switch back to the interrupted
495	 * stack if it is safe to do so. If not it switches to the VC fall-back
496	 * stack.
497	 */
498	movq	%rsp, %rdi		/* pt_regs pointer */
499	call	vc_switch_off_ist
500	movq	%rax, %rsp		/* Switch to new stack */
501
502	UNWIND_HINT_REGS
503
504	/* Update pt_regs */
505	movq	ORIG_RAX(%rsp), %rsi	/* get error code into 2nd argument*/
506	movq	$-1, ORIG_RAX(%rsp)	/* no syscall to restart */
507
508	movq	%rsp, %rdi		/* pt_regs pointer */
509
510	call	kernel_\cfunc
511
512	/*
513	 * No need to switch back to the IST stack. The current stack is either
514	 * identical to the stack in the IRET frame or the VC fall-back stack,
515	 * so it is definitely mapped even with PTI enabled.
516	 */
517	jmp	paranoid_exit
518
519	/* Switch to the regular task stack */
520.Lfrom_usermode_switch_stack_\@:
521	idtentry_body user_\cfunc, has_error_code=1
522
523_ASM_NOKPROBE(\asmsym)
524SYM_CODE_END(\asmsym)
525.endm
526#endif
527
528/*
529 * Double fault entry. Straight paranoid. No checks from which context
530 * this comes because for the espfix induced #DF this would do the wrong
531 * thing.
532 */
533.macro idtentry_df vector asmsym cfunc
534SYM_CODE_START(\asmsym)
535	UNWIND_HINT_IRET_REGS offset=8
536	ASM_CLAC
537
538	/* paranoid_entry returns GS information for paranoid_exit in EBX. */
539	call	paranoid_entry
540	UNWIND_HINT_REGS
541
542	movq	%rsp, %rdi		/* pt_regs pointer into first argument */
543	movq	ORIG_RAX(%rsp), %rsi	/* get error code into 2nd argument*/
544	movq	$-1, ORIG_RAX(%rsp)	/* no syscall to restart */
545	call	\cfunc
546
547	jmp	paranoid_exit
548
549_ASM_NOKPROBE(\asmsym)
550SYM_CODE_END(\asmsym)
551.endm
552
553/*
554 * Include the defines which emit the idt entries which are shared
555 * shared between 32 and 64 bit and emit the __irqentry_text_* markers
556 * so the stacktrace boundary checks work.
557 */
558	.align 16
559	.globl __irqentry_text_start
560__irqentry_text_start:
561
562#include <asm/idtentry.h>
563
564	.align 16
565	.globl __irqentry_text_end
566__irqentry_text_end:
567
568SYM_CODE_START_LOCAL(common_interrupt_return)
569SYM_INNER_LABEL(swapgs_restore_regs_and_return_to_usermode, SYM_L_GLOBAL)
570#ifdef CONFIG_DEBUG_ENTRY
571	/* Assert that pt_regs indicates user mode. */
572	testb	$3, CS(%rsp)
573	jnz	1f
574	ud2
5751:
576#endif
577#ifdef CONFIG_XEN_PV
578	ALTERNATIVE "", "jmp xenpv_restore_regs_and_return_to_usermode", X86_FEATURE_XENPV
579#endif
580
581	POP_REGS pop_rdi=0
582
583	/*
584	 * The stack is now user RDI, orig_ax, RIP, CS, EFLAGS, RSP, SS.
585	 * Save old stack pointer and switch to trampoline stack.
586	 */
587	movq	%rsp, %rdi
588	movq	PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp
589	UNWIND_HINT_EMPTY
590
591	/* Copy the IRET frame to the trampoline stack. */
592	pushq	6*8(%rdi)	/* SS */
593	pushq	5*8(%rdi)	/* RSP */
594	pushq	4*8(%rdi)	/* EFLAGS */
595	pushq	3*8(%rdi)	/* CS */
596	pushq	2*8(%rdi)	/* RIP */
597
598	/* Push user RDI on the trampoline stack. */
599	pushq	(%rdi)
600
601	/*
602	 * We are on the trampoline stack.  All regs except RDI are live.
603	 * We can do future final exit work right here.
604	 */
605	STACKLEAK_ERASE_NOCLOBBER
606
607	SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi
608
609	/* Restore RDI. */
610	popq	%rdi
611	SWAPGS
612	INTERRUPT_RETURN
613
614
615SYM_INNER_LABEL(restore_regs_and_return_to_kernel, SYM_L_GLOBAL)
616#ifdef CONFIG_DEBUG_ENTRY
617	/* Assert that pt_regs indicates kernel mode. */
618	testb	$3, CS(%rsp)
619	jz	1f
620	ud2
6211:
622#endif
623	POP_REGS
624	addq	$8, %rsp	/* skip regs->orig_ax */
625	/*
626	 * ARCH_HAS_MEMBARRIER_SYNC_CORE rely on IRET core serialization
627	 * when returning from IPI handler.
628	 */
629	INTERRUPT_RETURN
630
631SYM_INNER_LABEL_ALIGN(native_iret, SYM_L_GLOBAL)
632	UNWIND_HINT_IRET_REGS
633	/*
634	 * Are we returning to a stack segment from the LDT?  Note: in
635	 * 64-bit mode SS:RSP on the exception stack is always valid.
636	 */
637#ifdef CONFIG_X86_ESPFIX64
638	testb	$4, (SS-RIP)(%rsp)
639	jnz	native_irq_return_ldt
640#endif
641
642SYM_INNER_LABEL(native_irq_return_iret, SYM_L_GLOBAL)
643	/*
644	 * This may fault.  Non-paranoid faults on return to userspace are
645	 * handled by fixup_bad_iret.  These include #SS, #GP, and #NP.
646	 * Double-faults due to espfix64 are handled in exc_double_fault.
647	 * Other faults here are fatal.
648	 */
649	iretq
650
651#ifdef CONFIG_X86_ESPFIX64
652native_irq_return_ldt:
653	/*
654	 * We are running with user GSBASE.  All GPRs contain their user
655	 * values.  We have a percpu ESPFIX stack that is eight slots
656	 * long (see ESPFIX_STACK_SIZE).  espfix_waddr points to the bottom
657	 * of the ESPFIX stack.
658	 *
659	 * We clobber RAX and RDI in this code.  We stash RDI on the
660	 * normal stack and RAX on the ESPFIX stack.
661	 *
662	 * The ESPFIX stack layout we set up looks like this:
663	 *
664	 * --- top of ESPFIX stack ---
665	 * SS
666	 * RSP
667	 * RFLAGS
668	 * CS
669	 * RIP  <-- RSP points here when we're done
670	 * RAX  <-- espfix_waddr points here
671	 * --- bottom of ESPFIX stack ---
672	 */
673
674	pushq	%rdi				/* Stash user RDI */
675	swapgs					/* to kernel GS */
676	SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi	/* to kernel CR3 */
677
678	movq	PER_CPU_VAR(espfix_waddr), %rdi
679	movq	%rax, (0*8)(%rdi)		/* user RAX */
680	movq	(1*8)(%rsp), %rax		/* user RIP */
681	movq	%rax, (1*8)(%rdi)
682	movq	(2*8)(%rsp), %rax		/* user CS */
683	movq	%rax, (2*8)(%rdi)
684	movq	(3*8)(%rsp), %rax		/* user RFLAGS */
685	movq	%rax, (3*8)(%rdi)
686	movq	(5*8)(%rsp), %rax		/* user SS */
687	movq	%rax, (5*8)(%rdi)
688	movq	(4*8)(%rsp), %rax		/* user RSP */
689	movq	%rax, (4*8)(%rdi)
690	/* Now RAX == RSP. */
691
692	andl	$0xffff0000, %eax		/* RAX = (RSP & 0xffff0000) */
693
694	/*
695	 * espfix_stack[31:16] == 0.  The page tables are set up such that
696	 * (espfix_stack | (X & 0xffff0000)) points to a read-only alias of
697	 * espfix_waddr for any X.  That is, there are 65536 RO aliases of
698	 * the same page.  Set up RSP so that RSP[31:16] contains the
699	 * respective 16 bits of the /userspace/ RSP and RSP nonetheless
700	 * still points to an RO alias of the ESPFIX stack.
701	 */
702	orq	PER_CPU_VAR(espfix_stack), %rax
703
704	SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi
705	swapgs					/* to user GS */
706	popq	%rdi				/* Restore user RDI */
707
708	movq	%rax, %rsp
709	UNWIND_HINT_IRET_REGS offset=8
710
711	/*
712	 * At this point, we cannot write to the stack any more, but we can
713	 * still read.
714	 */
715	popq	%rax				/* Restore user RAX */
716
717	/*
718	 * RSP now points to an ordinary IRET frame, except that the page
719	 * is read-only and RSP[31:16] are preloaded with the userspace
720	 * values.  We can now IRET back to userspace.
721	 */
722	jmp	native_irq_return_iret
723#endif
724SYM_CODE_END(common_interrupt_return)
725_ASM_NOKPROBE(common_interrupt_return)
726
727/*
728 * Reload gs selector with exception handling
729 * edi:  new selector
730 *
731 * Is in entry.text as it shouldn't be instrumented.
732 */
733SYM_FUNC_START(asm_load_gs_index)
734	FRAME_BEGIN
735	swapgs
736.Lgs_change:
737	movl	%edi, %gs
7382:	ALTERNATIVE "", "mfence", X86_BUG_SWAPGS_FENCE
739	swapgs
740	FRAME_END
741	RET
742
743	/* running with kernelgs */
744.Lbad_gs:
745	swapgs					/* switch back to user gs */
746.macro ZAP_GS
747	/* This can't be a string because the preprocessor needs to see it. */
748	movl $__USER_DS, %eax
749	movl %eax, %gs
750.endm
751	ALTERNATIVE "", "ZAP_GS", X86_BUG_NULL_SEG
752	xorl	%eax, %eax
753	movl	%eax, %gs
754	jmp	2b
755
756	_ASM_EXTABLE(.Lgs_change, .Lbad_gs)
757
758SYM_FUNC_END(asm_load_gs_index)
759EXPORT_SYMBOL(asm_load_gs_index)
760
761#ifdef CONFIG_XEN_PV
762/*
763 * A note on the "critical region" in our callback handler.
764 * We want to avoid stacking callback handlers due to events occurring
765 * during handling of the last event. To do this, we keep events disabled
766 * until we've done all processing. HOWEVER, we must enable events before
767 * popping the stack frame (can't be done atomically) and so it would still
768 * be possible to get enough handler activations to overflow the stack.
769 * Although unlikely, bugs of that kind are hard to track down, so we'd
770 * like to avoid the possibility.
771 * So, on entry to the handler we detect whether we interrupted an
772 * existing activation in its critical region -- if so, we pop the current
773 * activation and restart the handler using the previous one.
774 *
775 * C calling convention: exc_xen_hypervisor_callback(struct *pt_regs)
776 */
777SYM_CODE_START_LOCAL(exc_xen_hypervisor_callback)
778
779/*
780 * Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will
781 * see the correct pointer to the pt_regs
782 */
783	UNWIND_HINT_FUNC
784	movq	%rdi, %rsp			/* we don't return, adjust the stack frame */
785	UNWIND_HINT_REGS
786
787	call	xen_pv_evtchn_do_upcall
788
789	jmp	error_return
790SYM_CODE_END(exc_xen_hypervisor_callback)
791
792/*
793 * Hypervisor uses this for application faults while it executes.
794 * We get here for two reasons:
795 *  1. Fault while reloading DS, ES, FS or GS
796 *  2. Fault while executing IRET
797 * Category 1 we do not need to fix up as Xen has already reloaded all segment
798 * registers that could be reloaded and zeroed the others.
799 * Category 2 we fix up by killing the current process. We cannot use the
800 * normal Linux return path in this case because if we use the IRET hypercall
801 * to pop the stack frame we end up in an infinite loop of failsafe callbacks.
802 * We distinguish between categories by comparing each saved segment register
803 * with its current contents: any discrepancy means we in category 1.
804 */
805SYM_CODE_START(xen_failsafe_callback)
806	UNWIND_HINT_EMPTY
807	movl	%ds, %ecx
808	cmpw	%cx, 0x10(%rsp)
809	jne	1f
810	movl	%es, %ecx
811	cmpw	%cx, 0x18(%rsp)
812	jne	1f
813	movl	%fs, %ecx
814	cmpw	%cx, 0x20(%rsp)
815	jne	1f
816	movl	%gs, %ecx
817	cmpw	%cx, 0x28(%rsp)
818	jne	1f
819	/* All segments match their saved values => Category 2 (Bad IRET). */
820	movq	(%rsp), %rcx
821	movq	8(%rsp), %r11
822	addq	$0x30, %rsp
823	pushq	$0				/* RIP */
824	UNWIND_HINT_IRET_REGS offset=8
825	jmp	asm_exc_general_protection
8261:	/* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */
827	movq	(%rsp), %rcx
828	movq	8(%rsp), %r11
829	addq	$0x30, %rsp
830	UNWIND_HINT_IRET_REGS
831	pushq	$-1 /* orig_ax = -1 => not a system call */
832	PUSH_AND_CLEAR_REGS
833	ENCODE_FRAME_POINTER
834	jmp	error_return
835SYM_CODE_END(xen_failsafe_callback)
836#endif /* CONFIG_XEN_PV */
837
838/*
839 * Save all registers in pt_regs. Return GSBASE related information
840 * in EBX depending on the availability of the FSGSBASE instructions:
841 *
842 * FSGSBASE	R/EBX
843 *     N        0 -> SWAPGS on exit
844 *              1 -> no SWAPGS on exit
845 *
846 *     Y        GSBASE value at entry, must be restored in paranoid_exit
847 */
848SYM_CODE_START_LOCAL(paranoid_entry)
849	UNWIND_HINT_FUNC
850	cld
851	PUSH_AND_CLEAR_REGS save_ret=1
852	ENCODE_FRAME_POINTER 8
853
854	/*
855	 * Always stash CR3 in %r14.  This value will be restored,
856	 * verbatim, at exit.  Needed if paranoid_entry interrupted
857	 * another entry that already switched to the user CR3 value
858	 * but has not yet returned to userspace.
859	 *
860	 * This is also why CS (stashed in the "iret frame" by the
861	 * hardware at entry) can not be used: this may be a return
862	 * to kernel code, but with a user CR3 value.
863	 *
864	 * Switching CR3 does not depend on kernel GSBASE so it can
865	 * be done before switching to the kernel GSBASE. This is
866	 * required for FSGSBASE because the kernel GSBASE has to
867	 * be retrieved from a kernel internal table.
868	 */
869	SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg=%rax save_reg=%r14
870
871	/*
872	 * Handling GSBASE depends on the availability of FSGSBASE.
873	 *
874	 * Without FSGSBASE the kernel enforces that negative GSBASE
875	 * values indicate kernel GSBASE. With FSGSBASE no assumptions
876	 * can be made about the GSBASE value when entering from user
877	 * space.
878	 */
879	ALTERNATIVE "jmp .Lparanoid_entry_checkgs", "", X86_FEATURE_FSGSBASE
880
881	/*
882	 * Read the current GSBASE and store it in %rbx unconditionally,
883	 * retrieve and set the current CPUs kernel GSBASE. The stored value
884	 * has to be restored in paranoid_exit unconditionally.
885	 *
886	 * The unconditional write to GS base below ensures that no subsequent
887	 * loads based on a mispredicted GS base can happen, therefore no LFENCE
888	 * is needed here.
889	 */
890	SAVE_AND_SET_GSBASE scratch_reg=%rax save_reg=%rbx
891	RET
892
893.Lparanoid_entry_checkgs:
894	/* EBX = 1 -> kernel GSBASE active, no restore required */
895	movl	$1, %ebx
896
897	/*
898	 * The kernel-enforced convention is a negative GSBASE indicates
899	 * a kernel value. No SWAPGS needed on entry and exit.
900	 */
901	movl	$MSR_GS_BASE, %ecx
902	rdmsr
903	testl	%edx, %edx
904	js	.Lparanoid_kernel_gsbase
905
906	/* EBX = 0 -> SWAPGS required on exit */
907	xorl	%ebx, %ebx
908	swapgs
909.Lparanoid_kernel_gsbase:
910
911	FENCE_SWAPGS_KERNEL_ENTRY
912	RET
913SYM_CODE_END(paranoid_entry)
914
915/*
916 * "Paranoid" exit path from exception stack.  This is invoked
917 * only on return from non-NMI IST interrupts that came
918 * from kernel space.
919 *
920 * We may be returning to very strange contexts (e.g. very early
921 * in syscall entry), so checking for preemption here would
922 * be complicated.  Fortunately, there's no good reason to try
923 * to handle preemption here.
924 *
925 * R/EBX contains the GSBASE related information depending on the
926 * availability of the FSGSBASE instructions:
927 *
928 * FSGSBASE	R/EBX
929 *     N        0 -> SWAPGS on exit
930 *              1 -> no SWAPGS on exit
931 *
932 *     Y        User space GSBASE, must be restored unconditionally
933 */
934SYM_CODE_START_LOCAL(paranoid_exit)
935	UNWIND_HINT_REGS
936	/*
937	 * The order of operations is important. RESTORE_CR3 requires
938	 * kernel GSBASE.
939	 *
940	 * NB to anyone to try to optimize this code: this code does
941	 * not execute at all for exceptions from user mode. Those
942	 * exceptions go through error_exit instead.
943	 */
944	RESTORE_CR3	scratch_reg=%rax save_reg=%r14
945
946	/* Handle the three GSBASE cases */
947	ALTERNATIVE "jmp .Lparanoid_exit_checkgs", "", X86_FEATURE_FSGSBASE
948
949	/* With FSGSBASE enabled, unconditionally restore GSBASE */
950	wrgsbase	%rbx
951	jmp		restore_regs_and_return_to_kernel
952
953.Lparanoid_exit_checkgs:
954	/* On non-FSGSBASE systems, conditionally do SWAPGS */
955	testl		%ebx, %ebx
956	jnz		restore_regs_and_return_to_kernel
957
958	/* We are returning to a context with user GSBASE */
959	swapgs
960	jmp		restore_regs_and_return_to_kernel
961SYM_CODE_END(paranoid_exit)
962
963/*
964 * Save all registers in pt_regs, and switch GS if needed.
965 */
966SYM_CODE_START_LOCAL(error_entry)
967	UNWIND_HINT_FUNC
968	cld
969	PUSH_AND_CLEAR_REGS save_ret=1
970	ENCODE_FRAME_POINTER 8
971	testb	$3, CS+8(%rsp)
972	jz	.Lerror_kernelspace
973
974	/*
975	 * We entered from user mode or we're pretending to have entered
976	 * from user mode due to an IRET fault.
977	 */
978	SWAPGS
979	FENCE_SWAPGS_USER_ENTRY
980	/* We have user CR3.  Change to kernel CR3. */
981	SWITCH_TO_KERNEL_CR3 scratch_reg=%rax
982
983.Lerror_entry_from_usermode_after_swapgs:
984	/* Put us onto the real thread stack. */
985	popq	%r12				/* save return addr in %12 */
986	movq	%rsp, %rdi			/* arg0 = pt_regs pointer */
987	call	sync_regs
988	movq	%rax, %rsp			/* switch stack */
989	ENCODE_FRAME_POINTER
990	pushq	%r12
991	RET
992
993	/*
994	 * There are two places in the kernel that can potentially fault with
995	 * usergs. Handle them here.  B stepping K8s sometimes report a
996	 * truncated RIP for IRET exceptions returning to compat mode. Check
997	 * for these here too.
998	 */
999.Lerror_kernelspace:
1000	leaq	native_irq_return_iret(%rip), %rcx
1001	cmpq	%rcx, RIP+8(%rsp)
1002	je	.Lerror_bad_iret
1003	movl	%ecx, %eax			/* zero extend */
1004	cmpq	%rax, RIP+8(%rsp)
1005	je	.Lbstep_iret
1006	cmpq	$.Lgs_change, RIP+8(%rsp)
1007	jne	.Lerror_entry_done_lfence
1008
1009	/*
1010	 * hack: .Lgs_change can fail with user gsbase.  If this happens, fix up
1011	 * gsbase and proceed.  We'll fix up the exception and land in
1012	 * .Lgs_change's error handler with kernel gsbase.
1013	 */
1014	SWAPGS
1015
1016	/*
1017	 * Issue an LFENCE to prevent GS speculation, regardless of whether it is a
1018	 * kernel or user gsbase.
1019	 */
1020.Lerror_entry_done_lfence:
1021	FENCE_SWAPGS_KERNEL_ENTRY
1022	RET
1023
1024.Lbstep_iret:
1025	/* Fix truncated RIP */
1026	movq	%rcx, RIP+8(%rsp)
1027	/* fall through */
1028
1029.Lerror_bad_iret:
1030	/*
1031	 * We came from an IRET to user mode, so we have user
1032	 * gsbase and CR3.  Switch to kernel gsbase and CR3:
1033	 */
1034	SWAPGS
1035	FENCE_SWAPGS_USER_ENTRY
1036	SWITCH_TO_KERNEL_CR3 scratch_reg=%rax
1037
1038	/*
1039	 * Pretend that the exception came from user mode: set up pt_regs
1040	 * as if we faulted immediately after IRET.
1041	 */
1042	mov	%rsp, %rdi
1043	call	fixup_bad_iret
1044	mov	%rax, %rsp
1045	jmp	.Lerror_entry_from_usermode_after_swapgs
1046SYM_CODE_END(error_entry)
1047
1048SYM_CODE_START_LOCAL(error_return)
1049	UNWIND_HINT_REGS
1050	DEBUG_ENTRY_ASSERT_IRQS_OFF
1051	testb	$3, CS(%rsp)
1052	jz	restore_regs_and_return_to_kernel
1053	jmp	swapgs_restore_regs_and_return_to_usermode
1054SYM_CODE_END(error_return)
1055
1056/*
1057 * Runs on exception stack.  Xen PV does not go through this path at all,
1058 * so we can use real assembly here.
1059 *
1060 * Registers:
1061 *	%r14: Used to save/restore the CR3 of the interrupted context
1062 *	      when PAGE_TABLE_ISOLATION is in use.  Do not clobber.
1063 */
1064SYM_CODE_START(asm_exc_nmi)
1065	UNWIND_HINT_IRET_REGS
1066
1067	/*
1068	 * We allow breakpoints in NMIs. If a breakpoint occurs, then
1069	 * the iretq it performs will take us out of NMI context.
1070	 * This means that we can have nested NMIs where the next
1071	 * NMI is using the top of the stack of the previous NMI. We
1072	 * can't let it execute because the nested NMI will corrupt the
1073	 * stack of the previous NMI. NMI handlers are not re-entrant
1074	 * anyway.
1075	 *
1076	 * To handle this case we do the following:
1077	 *  Check the a special location on the stack that contains
1078	 *  a variable that is set when NMIs are executing.
1079	 *  The interrupted task's stack is also checked to see if it
1080	 *  is an NMI stack.
1081	 *  If the variable is not set and the stack is not the NMI
1082	 *  stack then:
1083	 *    o Set the special variable on the stack
1084	 *    o Copy the interrupt frame into an "outermost" location on the
1085	 *      stack
1086	 *    o Copy the interrupt frame into an "iret" location on the stack
1087	 *    o Continue processing the NMI
1088	 *  If the variable is set or the previous stack is the NMI stack:
1089	 *    o Modify the "iret" location to jump to the repeat_nmi
1090	 *    o return back to the first NMI
1091	 *
1092	 * Now on exit of the first NMI, we first clear the stack variable
1093	 * The NMI stack will tell any nested NMIs at that point that it is
1094	 * nested. Then we pop the stack normally with iret, and if there was
1095	 * a nested NMI that updated the copy interrupt stack frame, a
1096	 * jump will be made to the repeat_nmi code that will handle the second
1097	 * NMI.
1098	 *
1099	 * However, espfix prevents us from directly returning to userspace
1100	 * with a single IRET instruction.  Similarly, IRET to user mode
1101	 * can fault.  We therefore handle NMIs from user space like
1102	 * other IST entries.
1103	 */
1104
1105	ASM_CLAC
1106
1107	/* Use %rdx as our temp variable throughout */
1108	pushq	%rdx
1109
1110	testb	$3, CS-RIP+8(%rsp)
1111	jz	.Lnmi_from_kernel
1112
1113	/*
1114	 * NMI from user mode.  We need to run on the thread stack, but we
1115	 * can't go through the normal entry paths: NMIs are masked, and
1116	 * we don't want to enable interrupts, because then we'll end
1117	 * up in an awkward situation in which IRQs are on but NMIs
1118	 * are off.
1119	 *
1120	 * We also must not push anything to the stack before switching
1121	 * stacks lest we corrupt the "NMI executing" variable.
1122	 */
1123
1124	swapgs
1125	cld
1126	FENCE_SWAPGS_USER_ENTRY
1127	SWITCH_TO_KERNEL_CR3 scratch_reg=%rdx
1128	movq	%rsp, %rdx
1129	movq	PER_CPU_VAR(cpu_current_top_of_stack), %rsp
1130	UNWIND_HINT_IRET_REGS base=%rdx offset=8
1131	pushq	5*8(%rdx)	/* pt_regs->ss */
1132	pushq	4*8(%rdx)	/* pt_regs->rsp */
1133	pushq	3*8(%rdx)	/* pt_regs->flags */
1134	pushq	2*8(%rdx)	/* pt_regs->cs */
1135	pushq	1*8(%rdx)	/* pt_regs->rip */
1136	UNWIND_HINT_IRET_REGS
1137	pushq   $-1		/* pt_regs->orig_ax */
1138	PUSH_AND_CLEAR_REGS rdx=(%rdx)
1139	ENCODE_FRAME_POINTER
1140
1141	/*
1142	 * At this point we no longer need to worry about stack damage
1143	 * due to nesting -- we're on the normal thread stack and we're
1144	 * done with the NMI stack.
1145	 */
1146
1147	movq	%rsp, %rdi
1148	movq	$-1, %rsi
1149	call	exc_nmi
1150
1151	/*
1152	 * Return back to user mode.  We must *not* do the normal exit
1153	 * work, because we don't want to enable interrupts.
1154	 */
1155	jmp	swapgs_restore_regs_and_return_to_usermode
1156
1157.Lnmi_from_kernel:
1158	/*
1159	 * Here's what our stack frame will look like:
1160	 * +---------------------------------------------------------+
1161	 * | original SS                                             |
1162	 * | original Return RSP                                     |
1163	 * | original RFLAGS                                         |
1164	 * | original CS                                             |
1165	 * | original RIP                                            |
1166	 * +---------------------------------------------------------+
1167	 * | temp storage for rdx                                    |
1168	 * +---------------------------------------------------------+
1169	 * | "NMI executing" variable                                |
1170	 * +---------------------------------------------------------+
1171	 * | iret SS          } Copied from "outermost" frame        |
1172	 * | iret Return RSP  } on each loop iteration; overwritten  |
1173	 * | iret RFLAGS      } by a nested NMI to force another     |
1174	 * | iret CS          } iteration if needed.                 |
1175	 * | iret RIP         }                                      |
1176	 * +---------------------------------------------------------+
1177	 * | outermost SS          } initialized in first_nmi;       |
1178	 * | outermost Return RSP  } will not be changed before      |
1179	 * | outermost RFLAGS      } NMI processing is done.         |
1180	 * | outermost CS          } Copied to "iret" frame on each  |
1181	 * | outermost RIP         } iteration.                      |
1182	 * +---------------------------------------------------------+
1183	 * | pt_regs                                                 |
1184	 * +---------------------------------------------------------+
1185	 *
1186	 * The "original" frame is used by hardware.  Before re-enabling
1187	 * NMIs, we need to be done with it, and we need to leave enough
1188	 * space for the asm code here.
1189	 *
1190	 * We return by executing IRET while RSP points to the "iret" frame.
1191	 * That will either return for real or it will loop back into NMI
1192	 * processing.
1193	 *
1194	 * The "outermost" frame is copied to the "iret" frame on each
1195	 * iteration of the loop, so each iteration starts with the "iret"
1196	 * frame pointing to the final return target.
1197	 */
1198
1199	/*
1200	 * Determine whether we're a nested NMI.
1201	 *
1202	 * If we interrupted kernel code between repeat_nmi and
1203	 * end_repeat_nmi, then we are a nested NMI.  We must not
1204	 * modify the "iret" frame because it's being written by
1205	 * the outer NMI.  That's okay; the outer NMI handler is
1206	 * about to about to call exc_nmi() anyway, so we can just
1207	 * resume the outer NMI.
1208	 */
1209
1210	movq	$repeat_nmi, %rdx
1211	cmpq	8(%rsp), %rdx
1212	ja	1f
1213	movq	$end_repeat_nmi, %rdx
1214	cmpq	8(%rsp), %rdx
1215	ja	nested_nmi_out
12161:
1217
1218	/*
1219	 * Now check "NMI executing".  If it's set, then we're nested.
1220	 * This will not detect if we interrupted an outer NMI just
1221	 * before IRET.
1222	 */
1223	cmpl	$1, -8(%rsp)
1224	je	nested_nmi
1225
1226	/*
1227	 * Now test if the previous stack was an NMI stack.  This covers
1228	 * the case where we interrupt an outer NMI after it clears
1229	 * "NMI executing" but before IRET.  We need to be careful, though:
1230	 * there is one case in which RSP could point to the NMI stack
1231	 * despite there being no NMI active: naughty userspace controls
1232	 * RSP at the very beginning of the SYSCALL targets.  We can
1233	 * pull a fast one on naughty userspace, though: we program
1234	 * SYSCALL to mask DF, so userspace cannot cause DF to be set
1235	 * if it controls the kernel's RSP.  We set DF before we clear
1236	 * "NMI executing".
1237	 */
1238	lea	6*8(%rsp), %rdx
1239	/* Compare the NMI stack (rdx) with the stack we came from (4*8(%rsp)) */
1240	cmpq	%rdx, 4*8(%rsp)
1241	/* If the stack pointer is above the NMI stack, this is a normal NMI */
1242	ja	first_nmi
1243
1244	subq	$EXCEPTION_STKSZ, %rdx
1245	cmpq	%rdx, 4*8(%rsp)
1246	/* If it is below the NMI stack, it is a normal NMI */
1247	jb	first_nmi
1248
1249	/* Ah, it is within the NMI stack. */
1250
1251	testb	$(X86_EFLAGS_DF >> 8), (3*8 + 1)(%rsp)
1252	jz	first_nmi	/* RSP was user controlled. */
1253
1254	/* This is a nested NMI. */
1255
1256nested_nmi:
1257	/*
1258	 * Modify the "iret" frame to point to repeat_nmi, forcing another
1259	 * iteration of NMI handling.
1260	 */
1261	subq	$8, %rsp
1262	leaq	-10*8(%rsp), %rdx
1263	pushq	$__KERNEL_DS
1264	pushq	%rdx
1265	pushfq
1266	pushq	$__KERNEL_CS
1267	pushq	$repeat_nmi
1268
1269	/* Put stack back */
1270	addq	$(6*8), %rsp
1271
1272nested_nmi_out:
1273	popq	%rdx
1274
1275	/* We are returning to kernel mode, so this cannot result in a fault. */
1276	iretq
1277
1278first_nmi:
1279	/* Restore rdx. */
1280	movq	(%rsp), %rdx
1281
1282	/* Make room for "NMI executing". */
1283	pushq	$0
1284
1285	/* Leave room for the "iret" frame */
1286	subq	$(5*8), %rsp
1287
1288	/* Copy the "original" frame to the "outermost" frame */
1289	.rept 5
1290	pushq	11*8(%rsp)
1291	.endr
1292	UNWIND_HINT_IRET_REGS
1293
1294	/* Everything up to here is safe from nested NMIs */
1295
1296#ifdef CONFIG_DEBUG_ENTRY
1297	/*
1298	 * For ease of testing, unmask NMIs right away.  Disabled by
1299	 * default because IRET is very expensive.
1300	 */
1301	pushq	$0		/* SS */
1302	pushq	%rsp		/* RSP (minus 8 because of the previous push) */
1303	addq	$8, (%rsp)	/* Fix up RSP */
1304	pushfq			/* RFLAGS */
1305	pushq	$__KERNEL_CS	/* CS */
1306	pushq	$1f		/* RIP */
1307	iretq			/* continues at repeat_nmi below */
1308	UNWIND_HINT_IRET_REGS
13091:
1310#endif
1311
1312repeat_nmi:
1313	/*
1314	 * If there was a nested NMI, the first NMI's iret will return
1315	 * here. But NMIs are still enabled and we can take another
1316	 * nested NMI. The nested NMI checks the interrupted RIP to see
1317	 * if it is between repeat_nmi and end_repeat_nmi, and if so
1318	 * it will just return, as we are about to repeat an NMI anyway.
1319	 * This makes it safe to copy to the stack frame that a nested
1320	 * NMI will update.
1321	 *
1322	 * RSP is pointing to "outermost RIP".  gsbase is unknown, but, if
1323	 * we're repeating an NMI, gsbase has the same value that it had on
1324	 * the first iteration.  paranoid_entry will load the kernel
1325	 * gsbase if needed before we call exc_nmi().  "NMI executing"
1326	 * is zero.
1327	 */
1328	movq	$1, 10*8(%rsp)		/* Set "NMI executing". */
1329
1330	/*
1331	 * Copy the "outermost" frame to the "iret" frame.  NMIs that nest
1332	 * here must not modify the "iret" frame while we're writing to
1333	 * it or it will end up containing garbage.
1334	 */
1335	addq	$(10*8), %rsp
1336	.rept 5
1337	pushq	-6*8(%rsp)
1338	.endr
1339	subq	$(5*8), %rsp
1340end_repeat_nmi:
1341
1342	/*
1343	 * Everything below this point can be preempted by a nested NMI.
1344	 * If this happens, then the inner NMI will change the "iret"
1345	 * frame to point back to repeat_nmi.
1346	 */
1347	pushq	$-1				/* ORIG_RAX: no syscall to restart */
1348
1349	/*
1350	 * Use paranoid_entry to handle SWAPGS, but no need to use paranoid_exit
1351	 * as we should not be calling schedule in NMI context.
1352	 * Even with normal interrupts enabled. An NMI should not be
1353	 * setting NEED_RESCHED or anything that normal interrupts and
1354	 * exceptions might do.
1355	 */
1356	call	paranoid_entry
1357	UNWIND_HINT_REGS
1358
1359	movq	%rsp, %rdi
1360	movq	$-1, %rsi
1361	call	exc_nmi
1362
1363	/* Always restore stashed CR3 value (see paranoid_entry) */
1364	RESTORE_CR3 scratch_reg=%r15 save_reg=%r14
1365
1366	/*
1367	 * The above invocation of paranoid_entry stored the GSBASE
1368	 * related information in R/EBX depending on the availability
1369	 * of FSGSBASE.
1370	 *
1371	 * If FSGSBASE is enabled, restore the saved GSBASE value
1372	 * unconditionally, otherwise take the conditional SWAPGS path.
1373	 */
1374	ALTERNATIVE "jmp nmi_no_fsgsbase", "", X86_FEATURE_FSGSBASE
1375
1376	wrgsbase	%rbx
1377	jmp	nmi_restore
1378
1379nmi_no_fsgsbase:
1380	/* EBX == 0 -> invoke SWAPGS */
1381	testl	%ebx, %ebx
1382	jnz	nmi_restore
1383
1384nmi_swapgs:
1385	swapgs
1386
1387nmi_restore:
1388	POP_REGS
1389
1390	/*
1391	 * Skip orig_ax and the "outermost" frame to point RSP at the "iret"
1392	 * at the "iret" frame.
1393	 */
1394	addq	$6*8, %rsp
1395
1396	/*
1397	 * Clear "NMI executing".  Set DF first so that we can easily
1398	 * distinguish the remaining code between here and IRET from
1399	 * the SYSCALL entry and exit paths.
1400	 *
1401	 * We arguably should just inspect RIP instead, but I (Andy) wrote
1402	 * this code when I had the misapprehension that Xen PV supported
1403	 * NMIs, and Xen PV would break that approach.
1404	 */
1405	std
1406	movq	$0, 5*8(%rsp)		/* clear "NMI executing" */
1407
1408	/*
1409	 * iretq reads the "iret" frame and exits the NMI stack in a
1410	 * single instruction.  We are returning to kernel mode, so this
1411	 * cannot result in a fault.  Similarly, we don't need to worry
1412	 * about espfix64 on the way back to kernel mode.
1413	 */
1414	iretq
1415SYM_CODE_END(asm_exc_nmi)
1416
1417#ifndef CONFIG_IA32_EMULATION
1418/*
1419 * This handles SYSCALL from 32-bit code.  There is no way to program
1420 * MSRs to fully disable 32-bit SYSCALL.
1421 */
1422SYM_CODE_START(ignore_sysret)
1423	UNWIND_HINT_EMPTY
1424	mov	$-ENOSYS, %eax
1425	sysretl
1426SYM_CODE_END(ignore_sysret)
1427#endif
1428
1429.pushsection .text, "ax"
1430SYM_CODE_START(rewind_stack_and_make_dead)
1431	UNWIND_HINT_FUNC
1432	/* Prevent any naive code from trying to unwind to our caller. */
1433	xorl	%ebp, %ebp
1434
1435	movq	PER_CPU_VAR(cpu_current_top_of_stack), %rax
1436	leaq	-PTREGS_SIZE(%rax), %rsp
1437	UNWIND_HINT_REGS
1438
1439	call	make_task_dead
1440SYM_CODE_END(rewind_stack_and_make_dead)
1441.popsection
1442