xref: /openbmc/linux/arch/x86/entry/entry_32.S (revision b03afaa8)
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 *  Copyright (C) 1991,1992  Linus Torvalds
4 *
5 * entry_32.S contains the system-call and low-level fault and trap handling routines.
6 *
7 * Stack layout while running C code:
8 *	ptrace needs to have all registers on the stack.
9 *	If the order here is changed, it needs to be
10 *	updated in fork.c:copy_process(), signal.c:do_signal(),
11 *	ptrace.c and ptrace.h
12 *
13 *	 0(%esp) - %ebx
14 *	 4(%esp) - %ecx
15 *	 8(%esp) - %edx
16 *	 C(%esp) - %esi
17 *	10(%esp) - %edi
18 *	14(%esp) - %ebp
19 *	18(%esp) - %eax
20 *	1C(%esp) - %ds
21 *	20(%esp) - %es
22 *	24(%esp) - %fs
23 *	28(%esp) - %gs		saved iff !CONFIG_X86_32_LAZY_GS
24 *	2C(%esp) - orig_eax
25 *	30(%esp) - %eip
26 *	34(%esp) - %cs
27 *	38(%esp) - %eflags
28 *	3C(%esp) - %oldesp
29 *	40(%esp) - %oldss
30 */
31
32#include <linux/linkage.h>
33#include <linux/err.h>
34#include <asm/thread_info.h>
35#include <asm/irqflags.h>
36#include <asm/errno.h>
37#include <asm/segment.h>
38#include <asm/smp.h>
39#include <asm/percpu.h>
40#include <asm/processor-flags.h>
41#include <asm/irq_vectors.h>
42#include <asm/cpufeatures.h>
43#include <asm/alternative-asm.h>
44#include <asm/asm.h>
45#include <asm/smap.h>
46#include <asm/frame.h>
47#include <asm/trapnr.h>
48#include <asm/nospec-branch.h>
49
50#include "calling.h"
51
52	.section .entry.text, "ax"
53
54#define PTI_SWITCH_MASK         (1 << PAGE_SHIFT)
55
56/*
57 * User gs save/restore
58 *
59 * %gs is used for userland TLS and kernel only uses it for stack
60 * canary which is required to be at %gs:20 by gcc.  Read the comment
61 * at the top of stackprotector.h for more info.
62 *
63 * Local labels 98 and 99 are used.
64 */
65#ifdef CONFIG_X86_32_LAZY_GS
66
67 /* unfortunately push/pop can't be no-op */
68.macro PUSH_GS
69	pushl	$0
70.endm
71.macro POP_GS pop=0
72	addl	$(4 + \pop), %esp
73.endm
74.macro POP_GS_EX
75.endm
76
77 /* all the rest are no-op */
78.macro PTGS_TO_GS
79.endm
80.macro PTGS_TO_GS_EX
81.endm
82.macro GS_TO_REG reg
83.endm
84.macro REG_TO_PTGS reg
85.endm
86.macro SET_KERNEL_GS reg
87.endm
88
89#else	/* CONFIG_X86_32_LAZY_GS */
90
91.macro PUSH_GS
92	pushl	%gs
93.endm
94
95.macro POP_GS pop=0
9698:	popl	%gs
97  .if \pop <> 0
98	add	$\pop, %esp
99  .endif
100.endm
101.macro POP_GS_EX
102.pushsection .fixup, "ax"
10399:	movl	$0, (%esp)
104	jmp	98b
105.popsection
106	_ASM_EXTABLE(98b, 99b)
107.endm
108
109.macro PTGS_TO_GS
11098:	mov	PT_GS(%esp), %gs
111.endm
112.macro PTGS_TO_GS_EX
113.pushsection .fixup, "ax"
11499:	movl	$0, PT_GS(%esp)
115	jmp	98b
116.popsection
117	_ASM_EXTABLE(98b, 99b)
118.endm
119
120.macro GS_TO_REG reg
121	movl	%gs, \reg
122.endm
123.macro REG_TO_PTGS reg
124	movl	\reg, PT_GS(%esp)
125.endm
126.macro SET_KERNEL_GS reg
127	movl	$(__KERNEL_STACK_CANARY), \reg
128	movl	\reg, %gs
129.endm
130
131#endif /* CONFIG_X86_32_LAZY_GS */
132
133/* Unconditionally switch to user cr3 */
134.macro SWITCH_TO_USER_CR3 scratch_reg:req
135	ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI
136
137	movl	%cr3, \scratch_reg
138	orl	$PTI_SWITCH_MASK, \scratch_reg
139	movl	\scratch_reg, %cr3
140.Lend_\@:
141.endm
142
143.macro BUG_IF_WRONG_CR3 no_user_check=0
144#ifdef CONFIG_DEBUG_ENTRY
145	ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI
146	.if \no_user_check == 0
147	/* coming from usermode? */
148	testl	$USER_SEGMENT_RPL_MASK, PT_CS(%esp)
149	jz	.Lend_\@
150	.endif
151	/* On user-cr3? */
152	movl	%cr3, %eax
153	testl	$PTI_SWITCH_MASK, %eax
154	jnz	.Lend_\@
155	/* From userspace with kernel cr3 - BUG */
156	ud2
157.Lend_\@:
158#endif
159.endm
160
161/*
162 * Switch to kernel cr3 if not already loaded and return current cr3 in
163 * \scratch_reg
164 */
165.macro SWITCH_TO_KERNEL_CR3 scratch_reg:req
166	ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI
167	movl	%cr3, \scratch_reg
168	/* Test if we are already on kernel CR3 */
169	testl	$PTI_SWITCH_MASK, \scratch_reg
170	jz	.Lend_\@
171	andl	$(~PTI_SWITCH_MASK), \scratch_reg
172	movl	\scratch_reg, %cr3
173	/* Return original CR3 in \scratch_reg */
174	orl	$PTI_SWITCH_MASK, \scratch_reg
175.Lend_\@:
176.endm
177
178#define CS_FROM_ENTRY_STACK	(1 << 31)
179#define CS_FROM_USER_CR3	(1 << 30)
180#define CS_FROM_KERNEL		(1 << 29)
181#define CS_FROM_ESPFIX		(1 << 28)
182
183.macro FIXUP_FRAME
184	/*
185	 * The high bits of the CS dword (__csh) are used for CS_FROM_*.
186	 * Clear them in case hardware didn't do this for us.
187	 */
188	andl	$0x0000ffff, 4*4(%esp)
189
190#ifdef CONFIG_VM86
191	testl	$X86_EFLAGS_VM, 5*4(%esp)
192	jnz	.Lfrom_usermode_no_fixup_\@
193#endif
194	testl	$USER_SEGMENT_RPL_MASK, 4*4(%esp)
195	jnz	.Lfrom_usermode_no_fixup_\@
196
197	orl	$CS_FROM_KERNEL, 4*4(%esp)
198
199	/*
200	 * When we're here from kernel mode; the (exception) stack looks like:
201	 *
202	 *  6*4(%esp) - <previous context>
203	 *  5*4(%esp) - flags
204	 *  4*4(%esp) - cs
205	 *  3*4(%esp) - ip
206	 *  2*4(%esp) - orig_eax
207	 *  1*4(%esp) - gs / function
208	 *  0*4(%esp) - fs
209	 *
210	 * Lets build a 5 entry IRET frame after that, such that struct pt_regs
211	 * is complete and in particular regs->sp is correct. This gives us
212	 * the original 6 enties as gap:
213	 *
214	 * 14*4(%esp) - <previous context>
215	 * 13*4(%esp) - gap / flags
216	 * 12*4(%esp) - gap / cs
217	 * 11*4(%esp) - gap / ip
218	 * 10*4(%esp) - gap / orig_eax
219	 *  9*4(%esp) - gap / gs / function
220	 *  8*4(%esp) - gap / fs
221	 *  7*4(%esp) - ss
222	 *  6*4(%esp) - sp
223	 *  5*4(%esp) - flags
224	 *  4*4(%esp) - cs
225	 *  3*4(%esp) - ip
226	 *  2*4(%esp) - orig_eax
227	 *  1*4(%esp) - gs / function
228	 *  0*4(%esp) - fs
229	 */
230
231	pushl	%ss		# ss
232	pushl	%esp		# sp (points at ss)
233	addl	$7*4, (%esp)	# point sp back at the previous context
234	pushl	7*4(%esp)	# flags
235	pushl	7*4(%esp)	# cs
236	pushl	7*4(%esp)	# ip
237	pushl	7*4(%esp)	# orig_eax
238	pushl	7*4(%esp)	# gs / function
239	pushl	7*4(%esp)	# fs
240.Lfrom_usermode_no_fixup_\@:
241.endm
242
243.macro IRET_FRAME
244	/*
245	 * We're called with %ds, %es, %fs, and %gs from the interrupted
246	 * frame, so we shouldn't use them.  Also, we may be in ESPFIX
247	 * mode and therefore have a nonzero SS base and an offset ESP,
248	 * so any attempt to access the stack needs to use SS.  (except for
249	 * accesses through %esp, which automatically use SS.)
250	 */
251	testl $CS_FROM_KERNEL, 1*4(%esp)
252	jz .Lfinished_frame_\@
253
254	/*
255	 * Reconstruct the 3 entry IRET frame right after the (modified)
256	 * regs->sp without lowering %esp in between, such that an NMI in the
257	 * middle doesn't scribble our stack.
258	 */
259	pushl	%eax
260	pushl	%ecx
261	movl	5*4(%esp), %eax		# (modified) regs->sp
262
263	movl	4*4(%esp), %ecx		# flags
264	movl	%ecx, %ss:-1*4(%eax)
265
266	movl	3*4(%esp), %ecx		# cs
267	andl	$0x0000ffff, %ecx
268	movl	%ecx, %ss:-2*4(%eax)
269
270	movl	2*4(%esp), %ecx		# ip
271	movl	%ecx, %ss:-3*4(%eax)
272
273	movl	1*4(%esp), %ecx		# eax
274	movl	%ecx, %ss:-4*4(%eax)
275
276	popl	%ecx
277	lea	-4*4(%eax), %esp
278	popl	%eax
279.Lfinished_frame_\@:
280.endm
281
282.macro SAVE_ALL pt_regs_ax=%eax switch_stacks=0 skip_gs=0 unwind_espfix=0
283	cld
284.if \skip_gs == 0
285	PUSH_GS
286.endif
287	pushl	%fs
288
289	pushl	%eax
290	movl	$(__KERNEL_PERCPU), %eax
291	movl	%eax, %fs
292.if \unwind_espfix > 0
293	UNWIND_ESPFIX_STACK
294.endif
295	popl	%eax
296
297	FIXUP_FRAME
298	pushl	%es
299	pushl	%ds
300	pushl	\pt_regs_ax
301	pushl	%ebp
302	pushl	%edi
303	pushl	%esi
304	pushl	%edx
305	pushl	%ecx
306	pushl	%ebx
307	movl	$(__USER_DS), %edx
308	movl	%edx, %ds
309	movl	%edx, %es
310.if \skip_gs == 0
311	SET_KERNEL_GS %edx
312.endif
313	/* Switch to kernel stack if necessary */
314.if \switch_stacks > 0
315	SWITCH_TO_KERNEL_STACK
316.endif
317.endm
318
319.macro SAVE_ALL_NMI cr3_reg:req unwind_espfix=0
320	SAVE_ALL unwind_espfix=\unwind_espfix
321
322	BUG_IF_WRONG_CR3
323
324	/*
325	 * Now switch the CR3 when PTI is enabled.
326	 *
327	 * We can enter with either user or kernel cr3, the code will
328	 * store the old cr3 in \cr3_reg and switches to the kernel cr3
329	 * if necessary.
330	 */
331	SWITCH_TO_KERNEL_CR3 scratch_reg=\cr3_reg
332
333.Lend_\@:
334.endm
335
336.macro RESTORE_INT_REGS
337	popl	%ebx
338	popl	%ecx
339	popl	%edx
340	popl	%esi
341	popl	%edi
342	popl	%ebp
343	popl	%eax
344.endm
345
346.macro RESTORE_REGS pop=0
347	RESTORE_INT_REGS
3481:	popl	%ds
3492:	popl	%es
3503:	popl	%fs
351	POP_GS \pop
352	IRET_FRAME
353.pushsection .fixup, "ax"
3544:	movl	$0, (%esp)
355	jmp	1b
3565:	movl	$0, (%esp)
357	jmp	2b
3586:	movl	$0, (%esp)
359	jmp	3b
360.popsection
361	_ASM_EXTABLE(1b, 4b)
362	_ASM_EXTABLE(2b, 5b)
363	_ASM_EXTABLE(3b, 6b)
364	POP_GS_EX
365.endm
366
367.macro RESTORE_ALL_NMI cr3_reg:req pop=0
368	/*
369	 * Now switch the CR3 when PTI is enabled.
370	 *
371	 * We enter with kernel cr3 and switch the cr3 to the value
372	 * stored on \cr3_reg, which is either a user or a kernel cr3.
373	 */
374	ALTERNATIVE "jmp .Lswitched_\@", "", X86_FEATURE_PTI
375
376	testl	$PTI_SWITCH_MASK, \cr3_reg
377	jz	.Lswitched_\@
378
379	/* User cr3 in \cr3_reg - write it to hardware cr3 */
380	movl	\cr3_reg, %cr3
381
382.Lswitched_\@:
383
384	BUG_IF_WRONG_CR3
385
386	RESTORE_REGS pop=\pop
387.endm
388
389.macro CHECK_AND_APPLY_ESPFIX
390#ifdef CONFIG_X86_ESPFIX32
391#define GDT_ESPFIX_OFFSET (GDT_ENTRY_ESPFIX_SS * 8)
392#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + GDT_ESPFIX_OFFSET
393
394	ALTERNATIVE	"jmp .Lend_\@", "", X86_BUG_ESPFIX
395
396	movl	PT_EFLAGS(%esp), %eax		# mix EFLAGS, SS and CS
397	/*
398	 * Warning: PT_OLDSS(%esp) contains the wrong/random values if we
399	 * are returning to the kernel.
400	 * See comments in process.c:copy_thread() for details.
401	 */
402	movb	PT_OLDSS(%esp), %ah
403	movb	PT_CS(%esp), %al
404	andl	$(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
405	cmpl	$((SEGMENT_LDT << 8) | USER_RPL), %eax
406	jne	.Lend_\@	# returning to user-space with LDT SS
407
408	/*
409	 * Setup and switch to ESPFIX stack
410	 *
411	 * We're returning to userspace with a 16 bit stack. The CPU will not
412	 * restore the high word of ESP for us on executing iret... This is an
413	 * "official" bug of all the x86-compatible CPUs, which we can work
414	 * around to make dosemu and wine happy. We do this by preloading the
415	 * high word of ESP with the high word of the userspace ESP while
416	 * compensating for the offset by changing to the ESPFIX segment with
417	 * a base address that matches for the difference.
418	 */
419	mov	%esp, %edx			/* load kernel esp */
420	mov	PT_OLDESP(%esp), %eax		/* load userspace esp */
421	mov	%dx, %ax			/* eax: new kernel esp */
422	sub	%eax, %edx			/* offset (low word is 0) */
423	shr	$16, %edx
424	mov	%dl, GDT_ESPFIX_SS + 4		/* bits 16..23 */
425	mov	%dh, GDT_ESPFIX_SS + 7		/* bits 24..31 */
426	pushl	$__ESPFIX_SS
427	pushl	%eax				/* new kernel esp */
428	/*
429	 * Disable interrupts, but do not irqtrace this section: we
430	 * will soon execute iret and the tracer was already set to
431	 * the irqstate after the IRET:
432	 */
433	DISABLE_INTERRUPTS(CLBR_ANY)
434	lss	(%esp), %esp			/* switch to espfix segment */
435.Lend_\@:
436#endif /* CONFIG_X86_ESPFIX32 */
437.endm
438
439/*
440 * Called with pt_regs fully populated and kernel segments loaded,
441 * so we can access PER_CPU and use the integer registers.
442 *
443 * We need to be very careful here with the %esp switch, because an NMI
444 * can happen everywhere. If the NMI handler finds itself on the
445 * entry-stack, it will overwrite the task-stack and everything we
446 * copied there. So allocate the stack-frame on the task-stack and
447 * switch to it before we do any copying.
448 */
449
450.macro SWITCH_TO_KERNEL_STACK
451
452	ALTERNATIVE     "", "jmp .Lend_\@", X86_FEATURE_XENPV
453
454	BUG_IF_WRONG_CR3
455
456	SWITCH_TO_KERNEL_CR3 scratch_reg=%eax
457
458	/*
459	 * %eax now contains the entry cr3 and we carry it forward in
460	 * that register for the time this macro runs
461	 */
462
463	/* Are we on the entry stack? Bail out if not! */
464	movl	PER_CPU_VAR(cpu_entry_area), %ecx
465	addl	$CPU_ENTRY_AREA_entry_stack + SIZEOF_entry_stack, %ecx
466	subl	%esp, %ecx	/* ecx = (end of entry_stack) - esp */
467	cmpl	$SIZEOF_entry_stack, %ecx
468	jae	.Lend_\@
469
470	/* Load stack pointer into %esi and %edi */
471	movl	%esp, %esi
472	movl	%esi, %edi
473
474	/* Move %edi to the top of the entry stack */
475	andl	$(MASK_entry_stack), %edi
476	addl	$(SIZEOF_entry_stack), %edi
477
478	/* Load top of task-stack into %edi */
479	movl	TSS_entry2task_stack(%edi), %edi
480
481	/* Special case - entry from kernel mode via entry stack */
482#ifdef CONFIG_VM86
483	movl	PT_EFLAGS(%esp), %ecx		# mix EFLAGS and CS
484	movb	PT_CS(%esp), %cl
485	andl	$(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %ecx
486#else
487	movl	PT_CS(%esp), %ecx
488	andl	$SEGMENT_RPL_MASK, %ecx
489#endif
490	cmpl	$USER_RPL, %ecx
491	jb	.Lentry_from_kernel_\@
492
493	/* Bytes to copy */
494	movl	$PTREGS_SIZE, %ecx
495
496#ifdef CONFIG_VM86
497	testl	$X86_EFLAGS_VM, PT_EFLAGS(%esi)
498	jz	.Lcopy_pt_regs_\@
499
500	/*
501	 * Stack-frame contains 4 additional segment registers when
502	 * coming from VM86 mode
503	 */
504	addl	$(4 * 4), %ecx
505
506#endif
507.Lcopy_pt_regs_\@:
508
509	/* Allocate frame on task-stack */
510	subl	%ecx, %edi
511
512	/* Switch to task-stack */
513	movl	%edi, %esp
514
515	/*
516	 * We are now on the task-stack and can safely copy over the
517	 * stack-frame
518	 */
519	shrl	$2, %ecx
520	cld
521	rep movsl
522
523	jmp .Lend_\@
524
525.Lentry_from_kernel_\@:
526
527	/*
528	 * This handles the case when we enter the kernel from
529	 * kernel-mode and %esp points to the entry-stack. When this
530	 * happens we need to switch to the task-stack to run C code,
531	 * but switch back to the entry-stack again when we approach
532	 * iret and return to the interrupted code-path. This usually
533	 * happens when we hit an exception while restoring user-space
534	 * segment registers on the way back to user-space or when the
535	 * sysenter handler runs with eflags.tf set.
536	 *
537	 * When we switch to the task-stack here, we can't trust the
538	 * contents of the entry-stack anymore, as the exception handler
539	 * might be scheduled out or moved to another CPU. Therefore we
540	 * copy the complete entry-stack to the task-stack and set a
541	 * marker in the iret-frame (bit 31 of the CS dword) to detect
542	 * what we've done on the iret path.
543	 *
544	 * On the iret path we copy everything back and switch to the
545	 * entry-stack, so that the interrupted kernel code-path
546	 * continues on the same stack it was interrupted with.
547	 *
548	 * Be aware that an NMI can happen anytime in this code.
549	 *
550	 * %esi: Entry-Stack pointer (same as %esp)
551	 * %edi: Top of the task stack
552	 * %eax: CR3 on kernel entry
553	 */
554
555	/* Calculate number of bytes on the entry stack in %ecx */
556	movl	%esi, %ecx
557
558	/* %ecx to the top of entry-stack */
559	andl	$(MASK_entry_stack), %ecx
560	addl	$(SIZEOF_entry_stack), %ecx
561
562	/* Number of bytes on the entry stack to %ecx */
563	sub	%esi, %ecx
564
565	/* Mark stackframe as coming from entry stack */
566	orl	$CS_FROM_ENTRY_STACK, PT_CS(%esp)
567
568	/*
569	 * Test the cr3 used to enter the kernel and add a marker
570	 * so that we can switch back to it before iret.
571	 */
572	testl	$PTI_SWITCH_MASK, %eax
573	jz	.Lcopy_pt_regs_\@
574	orl	$CS_FROM_USER_CR3, PT_CS(%esp)
575
576	/*
577	 * %esi and %edi are unchanged, %ecx contains the number of
578	 * bytes to copy. The code at .Lcopy_pt_regs_\@ will allocate
579	 * the stack-frame on task-stack and copy everything over
580	 */
581	jmp .Lcopy_pt_regs_\@
582
583.Lend_\@:
584.endm
585
586/*
587 * Switch back from the kernel stack to the entry stack.
588 *
589 * The %esp register must point to pt_regs on the task stack. It will
590 * first calculate the size of the stack-frame to copy, depending on
591 * whether we return to VM86 mode or not. With that it uses 'rep movsl'
592 * to copy the contents of the stack over to the entry stack.
593 *
594 * We must be very careful here, as we can't trust the contents of the
595 * task-stack once we switched to the entry-stack. When an NMI happens
596 * while on the entry-stack, the NMI handler will switch back to the top
597 * of the task stack, overwriting our stack-frame we are about to copy.
598 * Therefore we switch the stack only after everything is copied over.
599 */
600.macro SWITCH_TO_ENTRY_STACK
601
602	ALTERNATIVE     "", "jmp .Lend_\@", X86_FEATURE_XENPV
603
604	/* Bytes to copy */
605	movl	$PTREGS_SIZE, %ecx
606
607#ifdef CONFIG_VM86
608	testl	$(X86_EFLAGS_VM), PT_EFLAGS(%esp)
609	jz	.Lcopy_pt_regs_\@
610
611	/* Additional 4 registers to copy when returning to VM86 mode */
612	addl    $(4 * 4), %ecx
613
614.Lcopy_pt_regs_\@:
615#endif
616
617	/* Initialize source and destination for movsl */
618	movl	PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %edi
619	subl	%ecx, %edi
620	movl	%esp, %esi
621
622	/* Save future stack pointer in %ebx */
623	movl	%edi, %ebx
624
625	/* Copy over the stack-frame */
626	shrl	$2, %ecx
627	cld
628	rep movsl
629
630	/*
631	 * Switch to entry-stack - needs to happen after everything is
632	 * copied because the NMI handler will overwrite the task-stack
633	 * when on entry-stack
634	 */
635	movl	%ebx, %esp
636
637.Lend_\@:
638.endm
639
640/*
641 * This macro handles the case when we return to kernel-mode on the iret
642 * path and have to switch back to the entry stack and/or user-cr3
643 *
644 * See the comments below the .Lentry_from_kernel_\@ label in the
645 * SWITCH_TO_KERNEL_STACK macro for more details.
646 */
647.macro PARANOID_EXIT_TO_KERNEL_MODE
648
649	/*
650	 * Test if we entered the kernel with the entry-stack. Most
651	 * likely we did not, because this code only runs on the
652	 * return-to-kernel path.
653	 */
654	testl	$CS_FROM_ENTRY_STACK, PT_CS(%esp)
655	jz	.Lend_\@
656
657	/* Unlikely slow-path */
658
659	/* Clear marker from stack-frame */
660	andl	$(~CS_FROM_ENTRY_STACK), PT_CS(%esp)
661
662	/* Copy the remaining task-stack contents to entry-stack */
663	movl	%esp, %esi
664	movl	PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %edi
665
666	/* Bytes on the task-stack to ecx */
667	movl	PER_CPU_VAR(cpu_tss_rw + TSS_sp1), %ecx
668	subl	%esi, %ecx
669
670	/* Allocate stack-frame on entry-stack */
671	subl	%ecx, %edi
672
673	/*
674	 * Save future stack-pointer, we must not switch until the
675	 * copy is done, otherwise the NMI handler could destroy the
676	 * contents of the task-stack we are about to copy.
677	 */
678	movl	%edi, %ebx
679
680	/* Do the copy */
681	shrl	$2, %ecx
682	cld
683	rep movsl
684
685	/* Safe to switch to entry-stack now */
686	movl	%ebx, %esp
687
688	/*
689	 * We came from entry-stack and need to check if we also need to
690	 * switch back to user cr3.
691	 */
692	testl	$CS_FROM_USER_CR3, PT_CS(%esp)
693	jz	.Lend_\@
694
695	/* Clear marker from stack-frame */
696	andl	$(~CS_FROM_USER_CR3), PT_CS(%esp)
697
698	SWITCH_TO_USER_CR3 scratch_reg=%eax
699
700.Lend_\@:
701.endm
702
703/**
704 * idtentry - Macro to generate entry stubs for simple IDT entries
705 * @vector:		Vector number
706 * @asmsym:		ASM symbol for the entry point
707 * @cfunc:		C function to be called
708 * @has_error_code:	Hardware pushed error code on stack
709 */
710.macro idtentry vector asmsym cfunc has_error_code:req
711SYM_CODE_START(\asmsym)
712	ASM_CLAC
713	cld
714
715	.if \has_error_code == 0
716		pushl	$0		/* Clear the error code */
717	.endif
718
719	/* Push the C-function address into the GS slot */
720	pushl	$\cfunc
721	/* Invoke the common exception entry */
722	jmp	handle_exception
723SYM_CODE_END(\asmsym)
724.endm
725
726.macro idtentry_irq vector cfunc
727	.p2align CONFIG_X86_L1_CACHE_SHIFT
728SYM_CODE_START_LOCAL(asm_\cfunc)
729	ASM_CLAC
730	SAVE_ALL switch_stacks=1
731	ENCODE_FRAME_POINTER
732	movl	%esp, %eax
733	movl	PT_ORIG_EAX(%esp), %edx		/* get the vector from stack */
734	movl	$-1, PT_ORIG_EAX(%esp)		/* no syscall to restart */
735	call	\cfunc
736	jmp	handle_exception_return
737SYM_CODE_END(asm_\cfunc)
738.endm
739
740.macro idtentry_sysvec vector cfunc
741	idtentry \vector asm_\cfunc \cfunc has_error_code=0
742.endm
743
744/*
745 * Include the defines which emit the idt entries which are shared
746 * shared between 32 and 64 bit and emit the __irqentry_text_* markers
747 * so the stacktrace boundary checks work.
748 */
749	.align 16
750	.globl __irqentry_text_start
751__irqentry_text_start:
752
753#include <asm/idtentry.h>
754
755	.align 16
756	.globl __irqentry_text_end
757__irqentry_text_end:
758
759/*
760 * %eax: prev task
761 * %edx: next task
762 */
763.pushsection .text, "ax"
764SYM_CODE_START(__switch_to_asm)
765	/*
766	 * Save callee-saved registers
767	 * This must match the order in struct inactive_task_frame
768	 */
769	pushl	%ebp
770	pushl	%ebx
771	pushl	%edi
772	pushl	%esi
773	/*
774	 * Flags are saved to prevent AC leakage. This could go
775	 * away if objtool would have 32bit support to verify
776	 * the STAC/CLAC correctness.
777	 */
778	pushfl
779
780	/* switch stack */
781	movl	%esp, TASK_threadsp(%eax)
782	movl	TASK_threadsp(%edx), %esp
783
784#ifdef CONFIG_STACKPROTECTOR
785	movl	TASK_stack_canary(%edx), %ebx
786	movl	%ebx, PER_CPU_VAR(stack_canary)+stack_canary_offset
787#endif
788
789#ifdef CONFIG_RETPOLINE
790	/*
791	 * When switching from a shallower to a deeper call stack
792	 * the RSB may either underflow or use entries populated
793	 * with userspace addresses. On CPUs where those concerns
794	 * exist, overwrite the RSB with entries which capture
795	 * speculative execution to prevent attack.
796	 */
797	FILL_RETURN_BUFFER %ebx, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
798#endif
799
800	/* Restore flags or the incoming task to restore AC state. */
801	popfl
802	/* restore callee-saved registers */
803	popl	%esi
804	popl	%edi
805	popl	%ebx
806	popl	%ebp
807
808	jmp	__switch_to
809SYM_CODE_END(__switch_to_asm)
810.popsection
811
812/*
813 * The unwinder expects the last frame on the stack to always be at the same
814 * offset from the end of the page, which allows it to validate the stack.
815 * Calling schedule_tail() directly would break that convention because its an
816 * asmlinkage function so its argument has to be pushed on the stack.  This
817 * wrapper creates a proper "end of stack" frame header before the call.
818 */
819.pushsection .text, "ax"
820SYM_FUNC_START(schedule_tail_wrapper)
821	FRAME_BEGIN
822
823	pushl	%eax
824	call	schedule_tail
825	popl	%eax
826
827	FRAME_END
828	ret
829SYM_FUNC_END(schedule_tail_wrapper)
830.popsection
831
832/*
833 * A newly forked process directly context switches into this address.
834 *
835 * eax: prev task we switched from
836 * ebx: kernel thread func (NULL for user thread)
837 * edi: kernel thread arg
838 */
839.pushsection .text, "ax"
840SYM_CODE_START(ret_from_fork)
841	call	schedule_tail_wrapper
842
843	testl	%ebx, %ebx
844	jnz	1f		/* kernel threads are uncommon */
845
8462:
847	/* When we fork, we trace the syscall return in the child, too. */
848	movl    %esp, %eax
849	call    syscall_return_slowpath
850	jmp     .Lsyscall_32_done
851
852	/* kernel thread */
8531:	movl	%edi, %eax
854	CALL_NOSPEC ebx
855	/*
856	 * A kernel thread is allowed to return here after successfully
857	 * calling do_execve().  Exit to userspace to complete the execve()
858	 * syscall.
859	 */
860	movl	$0, PT_EAX(%esp)
861	jmp	2b
862SYM_CODE_END(ret_from_fork)
863.popsection
864
865SYM_ENTRY(__begin_SYSENTER_singlestep_region, SYM_L_GLOBAL, SYM_A_NONE)
866/*
867 * All code from here through __end_SYSENTER_singlestep_region is subject
868 * to being single-stepped if a user program sets TF and executes SYSENTER.
869 * There is absolutely nothing that we can do to prevent this from happening
870 * (thanks Intel!).  To keep our handling of this situation as simple as
871 * possible, we handle TF just like AC and NT, except that our #DB handler
872 * will ignore all of the single-step traps generated in this range.
873 */
874
875#ifdef CONFIG_XEN_PV
876/*
877 * Xen doesn't set %esp to be precisely what the normal SYSENTER
878 * entry point expects, so fix it up before using the normal path.
879 */
880SYM_CODE_START(xen_sysenter_target)
881	addl	$5*4, %esp			/* remove xen-provided frame */
882	jmp	.Lsysenter_past_esp
883SYM_CODE_END(xen_sysenter_target)
884#endif
885
886/*
887 * 32-bit SYSENTER entry.
888 *
889 * 32-bit system calls through the vDSO's __kernel_vsyscall enter here
890 * if X86_FEATURE_SEP is available.  This is the preferred system call
891 * entry on 32-bit systems.
892 *
893 * The SYSENTER instruction, in principle, should *only* occur in the
894 * vDSO.  In practice, a small number of Android devices were shipped
895 * with a copy of Bionic that inlined a SYSENTER instruction.  This
896 * never happened in any of Google's Bionic versions -- it only happened
897 * in a narrow range of Intel-provided versions.
898 *
899 * SYSENTER loads SS, ESP, CS, and EIP from previously programmed MSRs.
900 * IF and VM in RFLAGS are cleared (IOW: interrupts are off).
901 * SYSENTER does not save anything on the stack,
902 * and does not save old EIP (!!!), ESP, or EFLAGS.
903 *
904 * To avoid losing track of EFLAGS.VM (and thus potentially corrupting
905 * user and/or vm86 state), we explicitly disable the SYSENTER
906 * instruction in vm86 mode by reprogramming the MSRs.
907 *
908 * Arguments:
909 * eax  system call number
910 * ebx  arg1
911 * ecx  arg2
912 * edx  arg3
913 * esi  arg4
914 * edi  arg5
915 * ebp  user stack
916 * 0(%ebp) arg6
917 */
918SYM_FUNC_START(entry_SYSENTER_32)
919	/*
920	 * On entry-stack with all userspace-regs live - save and
921	 * restore eflags and %eax to use it as scratch-reg for the cr3
922	 * switch.
923	 */
924	pushfl
925	pushl	%eax
926	BUG_IF_WRONG_CR3 no_user_check=1
927	SWITCH_TO_KERNEL_CR3 scratch_reg=%eax
928	popl	%eax
929	popfl
930
931	/* Stack empty again, switch to task stack */
932	movl	TSS_entry2task_stack(%esp), %esp
933
934.Lsysenter_past_esp:
935	pushl	$__USER_DS		/* pt_regs->ss */
936	pushl	$0			/* pt_regs->sp (placeholder) */
937	pushfl				/* pt_regs->flags (except IF = 0) */
938	pushl	$__USER_CS		/* pt_regs->cs */
939	pushl	$0			/* pt_regs->ip = 0 (placeholder) */
940	pushl	%eax			/* pt_regs->orig_ax */
941	SAVE_ALL pt_regs_ax=$-ENOSYS	/* save rest, stack already switched */
942
943	/*
944	 * SYSENTER doesn't filter flags, so we need to clear NT, AC
945	 * and TF ourselves.  To save a few cycles, we can check whether
946	 * either was set instead of doing an unconditional popfq.
947	 * This needs to happen before enabling interrupts so that
948	 * we don't get preempted with NT set.
949	 *
950	 * If TF is set, we will single-step all the way to here -- do_debug
951	 * will ignore all the traps.  (Yes, this is slow, but so is
952	 * single-stepping in general.  This allows us to avoid having
953	 * a more complicated code to handle the case where a user program
954	 * forces us to single-step through the SYSENTER entry code.)
955	 *
956	 * NB.: .Lsysenter_fix_flags is a label with the code under it moved
957	 * out-of-line as an optimization: NT is unlikely to be set in the
958	 * majority of the cases and instead of polluting the I$ unnecessarily,
959	 * we're keeping that code behind a branch which will predict as
960	 * not-taken and therefore its instructions won't be fetched.
961	 */
962	testl	$X86_EFLAGS_NT|X86_EFLAGS_AC|X86_EFLAGS_TF, PT_EFLAGS(%esp)
963	jnz	.Lsysenter_fix_flags
964.Lsysenter_flags_fixed:
965
966	movl	%esp, %eax
967	call	do_SYSENTER_32
968	/* XEN PV guests always use IRET path */
969	ALTERNATIVE "testl %eax, %eax; jz .Lsyscall_32_done", \
970		    "jmp .Lsyscall_32_done", X86_FEATURE_XENPV
971
972	STACKLEAK_ERASE
973
974	/* Opportunistic SYSEXIT */
975
976	/*
977	 * Setup entry stack - we keep the pointer in %eax and do the
978	 * switch after almost all user-state is restored.
979	 */
980
981	/* Load entry stack pointer and allocate frame for eflags/eax */
982	movl	PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %eax
983	subl	$(2*4), %eax
984
985	/* Copy eflags and eax to entry stack */
986	movl	PT_EFLAGS(%esp), %edi
987	movl	PT_EAX(%esp), %esi
988	movl	%edi, (%eax)
989	movl	%esi, 4(%eax)
990
991	/* Restore user registers and segments */
992	movl	PT_EIP(%esp), %edx	/* pt_regs->ip */
993	movl	PT_OLDESP(%esp), %ecx	/* pt_regs->sp */
9941:	mov	PT_FS(%esp), %fs
995	PTGS_TO_GS
996
997	popl	%ebx			/* pt_regs->bx */
998	addl	$2*4, %esp		/* skip pt_regs->cx and pt_regs->dx */
999	popl	%esi			/* pt_regs->si */
1000	popl	%edi			/* pt_regs->di */
1001	popl	%ebp			/* pt_regs->bp */
1002
1003	/* Switch to entry stack */
1004	movl	%eax, %esp
1005
1006	/* Now ready to switch the cr3 */
1007	SWITCH_TO_USER_CR3 scratch_reg=%eax
1008
1009	/*
1010	 * Restore all flags except IF. (We restore IF separately because
1011	 * STI gives a one-instruction window in which we won't be interrupted,
1012	 * whereas POPF does not.)
1013	 */
1014	btrl	$X86_EFLAGS_IF_BIT, (%esp)
1015	BUG_IF_WRONG_CR3 no_user_check=1
1016	popfl
1017	popl	%eax
1018
1019	/*
1020	 * Return back to the vDSO, which will pop ecx and edx.
1021	 * Don't bother with DS and ES (they already contain __USER_DS).
1022	 */
1023	sti
1024	sysexit
1025
1026.pushsection .fixup, "ax"
10272:	movl	$0, PT_FS(%esp)
1028	jmp	1b
1029.popsection
1030	_ASM_EXTABLE(1b, 2b)
1031	PTGS_TO_GS_EX
1032
1033.Lsysenter_fix_flags:
1034	pushl	$X86_EFLAGS_FIXED
1035	popfl
1036	jmp	.Lsysenter_flags_fixed
1037SYM_ENTRY(__end_SYSENTER_singlestep_region, SYM_L_GLOBAL, SYM_A_NONE)
1038SYM_FUNC_END(entry_SYSENTER_32)
1039
1040/*
1041 * 32-bit legacy system call entry.
1042 *
1043 * 32-bit x86 Linux system calls traditionally used the INT $0x80
1044 * instruction.  INT $0x80 lands here.
1045 *
1046 * This entry point can be used by any 32-bit perform system calls.
1047 * Instances of INT $0x80 can be found inline in various programs and
1048 * libraries.  It is also used by the vDSO's __kernel_vsyscall
1049 * fallback for hardware that doesn't support a faster entry method.
1050 * Restarted 32-bit system calls also fall back to INT $0x80
1051 * regardless of what instruction was originally used to do the system
1052 * call.  (64-bit programs can use INT $0x80 as well, but they can
1053 * only run on 64-bit kernels and therefore land in
1054 * entry_INT80_compat.)
1055 *
1056 * This is considered a slow path.  It is not used by most libc
1057 * implementations on modern hardware except during process startup.
1058 *
1059 * Arguments:
1060 * eax  system call number
1061 * ebx  arg1
1062 * ecx  arg2
1063 * edx  arg3
1064 * esi  arg4
1065 * edi  arg5
1066 * ebp  arg6
1067 */
1068SYM_FUNC_START(entry_INT80_32)
1069	ASM_CLAC
1070	pushl	%eax			/* pt_regs->orig_ax */
1071
1072	SAVE_ALL pt_regs_ax=$-ENOSYS switch_stacks=1	/* save rest */
1073
1074	movl	%esp, %eax
1075	call	do_int80_syscall_32
1076.Lsyscall_32_done:
1077	STACKLEAK_ERASE
1078
1079restore_all_switch_stack:
1080	SWITCH_TO_ENTRY_STACK
1081	CHECK_AND_APPLY_ESPFIX
1082
1083	/* Switch back to user CR3 */
1084	SWITCH_TO_USER_CR3 scratch_reg=%eax
1085
1086	BUG_IF_WRONG_CR3
1087
1088	/* Restore user state */
1089	RESTORE_REGS pop=4			# skip orig_eax/error_code
1090.Lirq_return:
1091	/*
1092	 * ARCH_HAS_MEMBARRIER_SYNC_CORE rely on IRET core serialization
1093	 * when returning from IPI handler and when returning from
1094	 * scheduler to user-space.
1095	 */
1096	INTERRUPT_RETURN
1097
1098.section .fixup, "ax"
1099SYM_CODE_START(asm_iret_error)
1100	pushl	$0				# no error code
1101	pushl	$iret_error
1102
1103#ifdef CONFIG_DEBUG_ENTRY
1104	/*
1105	 * The stack-frame here is the one that iret faulted on, so its a
1106	 * return-to-user frame. We are on kernel-cr3 because we come here from
1107	 * the fixup code. This confuses the CR3 checker, so switch to user-cr3
1108	 * as the checker expects it.
1109	 */
1110	pushl	%eax
1111	SWITCH_TO_USER_CR3 scratch_reg=%eax
1112	popl	%eax
1113#endif
1114
1115	jmp	handle_exception
1116SYM_CODE_END(asm_iret_error)
1117.previous
1118	_ASM_EXTABLE(.Lirq_return, asm_iret_error)
1119SYM_FUNC_END(entry_INT80_32)
1120
1121.macro FIXUP_ESPFIX_STACK
1122/*
1123 * Switch back for ESPFIX stack to the normal zerobased stack
1124 *
1125 * We can't call C functions using the ESPFIX stack. This code reads
1126 * the high word of the segment base from the GDT and swiches to the
1127 * normal stack and adjusts ESP with the matching offset.
1128 *
1129 * We might be on user CR3 here, so percpu data is not mapped and we can't
1130 * access the GDT through the percpu segment.  Instead, use SGDT to find
1131 * the cpu_entry_area alias of the GDT.
1132 */
1133#ifdef CONFIG_X86_ESPFIX32
1134	/* fixup the stack */
1135	pushl	%ecx
1136	subl	$2*4, %esp
1137	sgdt	(%esp)
1138	movl	2(%esp), %ecx				/* GDT address */
1139	/*
1140	 * Careful: ECX is a linear pointer, so we need to force base
1141	 * zero.  %cs is the only known-linear segment we have right now.
1142	 */
1143	mov	%cs:GDT_ESPFIX_OFFSET + 4(%ecx), %al	/* bits 16..23 */
1144	mov	%cs:GDT_ESPFIX_OFFSET + 7(%ecx), %ah	/* bits 24..31 */
1145	shl	$16, %eax
1146	addl	$2*4, %esp
1147	popl	%ecx
1148	addl	%esp, %eax			/* the adjusted stack pointer */
1149	pushl	$__KERNEL_DS
1150	pushl	%eax
1151	lss	(%esp), %esp			/* switch to the normal stack segment */
1152#endif
1153.endm
1154
1155.macro UNWIND_ESPFIX_STACK
1156	/* It's safe to clobber %eax, all other regs need to be preserved */
1157#ifdef CONFIG_X86_ESPFIX32
1158	movl	%ss, %eax
1159	/* see if on espfix stack */
1160	cmpw	$__ESPFIX_SS, %ax
1161	jne	.Lno_fixup_\@
1162	/* switch to normal stack */
1163	FIXUP_ESPFIX_STACK
1164.Lno_fixup_\@:
1165#endif
1166.endm
1167
1168#ifdef CONFIG_PARAVIRT
1169SYM_CODE_START(native_iret)
1170	iret
1171	_ASM_EXTABLE(native_iret, asm_iret_error)
1172SYM_CODE_END(native_iret)
1173#endif
1174
1175#ifdef CONFIG_XEN_PV
1176/*
1177 * See comment in entry_64.S for further explanation
1178 *
1179 * Note: This is not an actual IDT entry point. It's a XEN specific entry
1180 * point and therefore named to match the 64-bit trampoline counterpart.
1181 */
1182SYM_FUNC_START(xen_asm_exc_xen_hypervisor_callback)
1183	/*
1184	 * Check to see if we got the event in the critical
1185	 * region in xen_iret_direct, after we've reenabled
1186	 * events and checked for pending events.  This simulates
1187	 * iret instruction's behaviour where it delivers a
1188	 * pending interrupt when enabling interrupts:
1189	 */
1190	cmpl	$xen_iret_start_crit, (%esp)
1191	jb	1f
1192	cmpl	$xen_iret_end_crit, (%esp)
1193	jae	1f
1194	call	xen_iret_crit_fixup
11951:
1196	pushl	$-1				/* orig_ax = -1 => not a system call */
1197	SAVE_ALL
1198	ENCODE_FRAME_POINTER
1199
1200	mov	%esp, %eax
1201	call	xen_pv_evtchn_do_upcall
1202	jmp	handle_exception_return
1203SYM_FUNC_END(xen_asm_exc_xen_hypervisor_callback)
1204
1205/*
1206 * Hypervisor uses this for application faults while it executes.
1207 * We get here for two reasons:
1208 *  1. Fault while reloading DS, ES, FS or GS
1209 *  2. Fault while executing IRET
1210 * Category 1 we fix up by reattempting the load, and zeroing the segment
1211 * register if the load fails.
1212 * Category 2 we fix up by jumping to do_iret_error. We cannot use the
1213 * normal Linux return path in this case because if we use the IRET hypercall
1214 * to pop the stack frame we end up in an infinite loop of failsafe callbacks.
1215 * We distinguish between categories by maintaining a status value in EAX.
1216 */
1217SYM_FUNC_START(xen_failsafe_callback)
1218	pushl	%eax
1219	movl	$1, %eax
12201:	mov	4(%esp), %ds
12212:	mov	8(%esp), %es
12223:	mov	12(%esp), %fs
12234:	mov	16(%esp), %gs
1224	/* EAX == 0 => Category 1 (Bad segment)
1225	   EAX != 0 => Category 2 (Bad IRET) */
1226	testl	%eax, %eax
1227	popl	%eax
1228	lea	16(%esp), %esp
1229	jz	5f
1230	jmp	asm_iret_error
12315:	pushl	$-1				/* orig_ax = -1 => not a system call */
1232	SAVE_ALL
1233	ENCODE_FRAME_POINTER
1234	jmp	handle_exception_return
1235
1236.section .fixup, "ax"
12376:	xorl	%eax, %eax
1238	movl	%eax, 4(%esp)
1239	jmp	1b
12407:	xorl	%eax, %eax
1241	movl	%eax, 8(%esp)
1242	jmp	2b
12438:	xorl	%eax, %eax
1244	movl	%eax, 12(%esp)
1245	jmp	3b
12469:	xorl	%eax, %eax
1247	movl	%eax, 16(%esp)
1248	jmp	4b
1249.previous
1250	_ASM_EXTABLE(1b, 6b)
1251	_ASM_EXTABLE(2b, 7b)
1252	_ASM_EXTABLE(3b, 8b)
1253	_ASM_EXTABLE(4b, 9b)
1254SYM_FUNC_END(xen_failsafe_callback)
1255#endif /* CONFIG_XEN_PV */
1256
1257SYM_CODE_START_LOCAL_NOALIGN(handle_exception)
1258	/* the function address is in %gs's slot on the stack */
1259	SAVE_ALL switch_stacks=1 skip_gs=1 unwind_espfix=1
1260	ENCODE_FRAME_POINTER
1261
1262	/* fixup %gs */
1263	GS_TO_REG %ecx
1264	movl	PT_GS(%esp), %edi		# get the function address
1265	REG_TO_PTGS %ecx
1266	SET_KERNEL_GS %ecx
1267
1268	/* fixup orig %eax */
1269	movl	PT_ORIG_EAX(%esp), %edx		# get the error code
1270	movl	$-1, PT_ORIG_EAX(%esp)		# no syscall to restart
1271
1272	movl	%esp, %eax			# pt_regs pointer
1273	CALL_NOSPEC edi
1274
1275handle_exception_return:
1276#ifdef CONFIG_VM86
1277	movl	PT_EFLAGS(%esp), %eax		# mix EFLAGS and CS
1278	movb	PT_CS(%esp), %al
1279	andl	$(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
1280#else
1281	/*
1282	 * We can be coming here from child spawned by kernel_thread().
1283	 */
1284	movl	PT_CS(%esp), %eax
1285	andl	$SEGMENT_RPL_MASK, %eax
1286#endif
1287	cmpl	$USER_RPL, %eax			# returning to v8086 or userspace ?
1288	jnb	ret_to_user
1289
1290	PARANOID_EXIT_TO_KERNEL_MODE
1291	BUG_IF_WRONG_CR3
1292	RESTORE_REGS 4
1293	jmp	.Lirq_return
1294
1295ret_to_user:
1296	movl	%esp, %eax
1297	jmp	restore_all_switch_stack
1298SYM_CODE_END(handle_exception)
1299
1300SYM_CODE_START(asm_exc_double_fault)
13011:
1302	/*
1303	 * This is a task gate handler, not an interrupt gate handler.
1304	 * The error code is on the stack, but the stack is otherwise
1305	 * empty.  Interrupts are off.  Our state is sane with the following
1306	 * exceptions:
1307	 *
1308	 *  - CR0.TS is set.  "TS" literally means "task switched".
1309	 *  - EFLAGS.NT is set because we're a "nested task".
1310	 *  - The doublefault TSS has back_link set and has been marked busy.
1311	 *  - TR points to the doublefault TSS and the normal TSS is busy.
1312	 *  - CR3 is the normal kernel PGD.  This would be delightful, except
1313	 *    that the CPU didn't bother to save the old CR3 anywhere.  This
1314	 *    would make it very awkward to return back to the context we came
1315	 *    from.
1316	 *
1317	 * The rest of EFLAGS is sanitized for us, so we don't need to
1318	 * worry about AC or DF.
1319	 *
1320	 * Don't even bother popping the error code.  It's always zero,
1321	 * and ignoring it makes us a bit more robust against buggy
1322	 * hypervisor task gate implementations.
1323	 *
1324	 * We will manually undo the task switch instead of doing a
1325	 * task-switching IRET.
1326	 */
1327
1328	clts				/* clear CR0.TS */
1329	pushl	$X86_EFLAGS_FIXED
1330	popfl				/* clear EFLAGS.NT */
1331
1332	call	doublefault_shim
1333
1334	/* We don't support returning, so we have no IRET here. */
13351:
1336	hlt
1337	jmp 1b
1338SYM_CODE_END(asm_exc_double_fault)
1339
1340/*
1341 * NMI is doubly nasty.  It can happen on the first instruction of
1342 * entry_SYSENTER_32 (just like #DB), but it can also interrupt the beginning
1343 * of the #DB handler even if that #DB in turn hit before entry_SYSENTER_32
1344 * switched stacks.  We handle both conditions by simply checking whether we
1345 * interrupted kernel code running on the SYSENTER stack.
1346 */
1347SYM_CODE_START(asm_exc_nmi)
1348	ASM_CLAC
1349
1350#ifdef CONFIG_X86_ESPFIX32
1351	/*
1352	 * ESPFIX_SS is only ever set on the return to user path
1353	 * after we've switched to the entry stack.
1354	 */
1355	pushl	%eax
1356	movl	%ss, %eax
1357	cmpw	$__ESPFIX_SS, %ax
1358	popl	%eax
1359	je	.Lnmi_espfix_stack
1360#endif
1361
1362	pushl	%eax				# pt_regs->orig_ax
1363	SAVE_ALL_NMI cr3_reg=%edi
1364	ENCODE_FRAME_POINTER
1365	xorl	%edx, %edx			# zero error code
1366	movl	%esp, %eax			# pt_regs pointer
1367
1368	/* Are we currently on the SYSENTER stack? */
1369	movl	PER_CPU_VAR(cpu_entry_area), %ecx
1370	addl	$CPU_ENTRY_AREA_entry_stack + SIZEOF_entry_stack, %ecx
1371	subl	%eax, %ecx	/* ecx = (end of entry_stack) - esp */
1372	cmpl	$SIZEOF_entry_stack, %ecx
1373	jb	.Lnmi_from_sysenter_stack
1374
1375	/* Not on SYSENTER stack. */
1376	call	exc_nmi
1377	jmp	.Lnmi_return
1378
1379.Lnmi_from_sysenter_stack:
1380	/*
1381	 * We're on the SYSENTER stack.  Switch off.  No one (not even debug)
1382	 * is using the thread stack right now, so it's safe for us to use it.
1383	 */
1384	movl	%esp, %ebx
1385	movl	PER_CPU_VAR(cpu_current_top_of_stack), %esp
1386	call	exc_nmi
1387	movl	%ebx, %esp
1388
1389.Lnmi_return:
1390#ifdef CONFIG_X86_ESPFIX32
1391	testl	$CS_FROM_ESPFIX, PT_CS(%esp)
1392	jnz	.Lnmi_from_espfix
1393#endif
1394
1395	CHECK_AND_APPLY_ESPFIX
1396	RESTORE_ALL_NMI cr3_reg=%edi pop=4
1397	jmp	.Lirq_return
1398
1399#ifdef CONFIG_X86_ESPFIX32
1400.Lnmi_espfix_stack:
1401	/*
1402	 * Create the pointer to LSS back
1403	 */
1404	pushl	%ss
1405	pushl	%esp
1406	addl	$4, (%esp)
1407
1408	/* Copy the (short) IRET frame */
1409	pushl	4*4(%esp)	# flags
1410	pushl	4*4(%esp)	# cs
1411	pushl	4*4(%esp)	# ip
1412
1413	pushl	%eax		# orig_ax
1414
1415	SAVE_ALL_NMI cr3_reg=%edi unwind_espfix=1
1416	ENCODE_FRAME_POINTER
1417
1418	/* clear CS_FROM_KERNEL, set CS_FROM_ESPFIX */
1419	xorl	$(CS_FROM_ESPFIX | CS_FROM_KERNEL), PT_CS(%esp)
1420
1421	xorl	%edx, %edx			# zero error code
1422	movl	%esp, %eax			# pt_regs pointer
1423	jmp	.Lnmi_from_sysenter_stack
1424
1425.Lnmi_from_espfix:
1426	RESTORE_ALL_NMI cr3_reg=%edi
1427	/*
1428	 * Because we cleared CS_FROM_KERNEL, IRET_FRAME 'forgot' to
1429	 * fix up the gap and long frame:
1430	 *
1431	 *  3 - original frame	(exception)
1432	 *  2 - ESPFIX block	(above)
1433	 *  6 - gap		(FIXUP_FRAME)
1434	 *  5 - long frame	(FIXUP_FRAME)
1435	 *  1 - orig_ax
1436	 */
1437	lss	(1+5+6)*4(%esp), %esp			# back to espfix stack
1438	jmp	.Lirq_return
1439#endif
1440SYM_CODE_END(asm_exc_nmi)
1441
1442.pushsection .text, "ax"
1443SYM_CODE_START(rewind_stack_do_exit)
1444	/* Prevent any naive code from trying to unwind to our caller. */
1445	xorl	%ebp, %ebp
1446
1447	movl	PER_CPU_VAR(cpu_current_top_of_stack), %esi
1448	leal	-TOP_OF_KERNEL_STACK_PADDING-PTREGS_SIZE(%esi), %esp
1449
1450	call	do_exit
14511:	jmp 1b
1452SYM_CODE_END(rewind_stack_do_exit)
1453.popsection
1454