1/* SPDX-License-Identifier: GPL-2.0 */ 2/* 3 * Copyright (C) 1991,1992 Linus Torvalds 4 * 5 * entry_32.S contains the system-call and low-level fault and trap handling routines. 6 * 7 * Stack layout while running C code: 8 * ptrace needs to have all registers on the stack. 9 * If the order here is changed, it needs to be 10 * updated in fork.c:copy_process(), signal.c:do_signal(), 11 * ptrace.c and ptrace.h 12 * 13 * 0(%esp) - %ebx 14 * 4(%esp) - %ecx 15 * 8(%esp) - %edx 16 * C(%esp) - %esi 17 * 10(%esp) - %edi 18 * 14(%esp) - %ebp 19 * 18(%esp) - %eax 20 * 1C(%esp) - %ds 21 * 20(%esp) - %es 22 * 24(%esp) - %fs 23 * 28(%esp) - %gs saved iff !CONFIG_X86_32_LAZY_GS 24 * 2C(%esp) - orig_eax 25 * 30(%esp) - %eip 26 * 34(%esp) - %cs 27 * 38(%esp) - %eflags 28 * 3C(%esp) - %oldesp 29 * 40(%esp) - %oldss 30 */ 31 32#include <linux/linkage.h> 33#include <linux/err.h> 34#include <asm/thread_info.h> 35#include <asm/irqflags.h> 36#include <asm/errno.h> 37#include <asm/segment.h> 38#include <asm/smp.h> 39#include <asm/percpu.h> 40#include <asm/processor-flags.h> 41#include <asm/irq_vectors.h> 42#include <asm/cpufeatures.h> 43#include <asm/alternative-asm.h> 44#include <asm/asm.h> 45#include <asm/smap.h> 46#include <asm/frame.h> 47#include <asm/trapnr.h> 48#include <asm/nospec-branch.h> 49 50#include "calling.h" 51 52 .section .entry.text, "ax" 53 54#define PTI_SWITCH_MASK (1 << PAGE_SHIFT) 55 56/* 57 * User gs save/restore 58 * 59 * %gs is used for userland TLS and kernel only uses it for stack 60 * canary which is required to be at %gs:20 by gcc. Read the comment 61 * at the top of stackprotector.h for more info. 62 * 63 * Local labels 98 and 99 are used. 64 */ 65#ifdef CONFIG_X86_32_LAZY_GS 66 67 /* unfortunately push/pop can't be no-op */ 68.macro PUSH_GS 69 pushl $0 70.endm 71.macro POP_GS pop=0 72 addl $(4 + \pop), %esp 73.endm 74.macro POP_GS_EX 75.endm 76 77 /* all the rest are no-op */ 78.macro PTGS_TO_GS 79.endm 80.macro PTGS_TO_GS_EX 81.endm 82.macro GS_TO_REG reg 83.endm 84.macro REG_TO_PTGS reg 85.endm 86.macro SET_KERNEL_GS reg 87.endm 88 89#else /* CONFIG_X86_32_LAZY_GS */ 90 91.macro PUSH_GS 92 pushl %gs 93.endm 94 95.macro POP_GS pop=0 9698: popl %gs 97 .if \pop <> 0 98 add $\pop, %esp 99 .endif 100.endm 101.macro POP_GS_EX 102.pushsection .fixup, "ax" 10399: movl $0, (%esp) 104 jmp 98b 105.popsection 106 _ASM_EXTABLE(98b, 99b) 107.endm 108 109.macro PTGS_TO_GS 11098: mov PT_GS(%esp), %gs 111.endm 112.macro PTGS_TO_GS_EX 113.pushsection .fixup, "ax" 11499: movl $0, PT_GS(%esp) 115 jmp 98b 116.popsection 117 _ASM_EXTABLE(98b, 99b) 118.endm 119 120.macro GS_TO_REG reg 121 movl %gs, \reg 122.endm 123.macro REG_TO_PTGS reg 124 movl \reg, PT_GS(%esp) 125.endm 126.macro SET_KERNEL_GS reg 127 movl $(__KERNEL_STACK_CANARY), \reg 128 movl \reg, %gs 129.endm 130 131#endif /* CONFIG_X86_32_LAZY_GS */ 132 133/* Unconditionally switch to user cr3 */ 134.macro SWITCH_TO_USER_CR3 scratch_reg:req 135 ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI 136 137 movl %cr3, \scratch_reg 138 orl $PTI_SWITCH_MASK, \scratch_reg 139 movl \scratch_reg, %cr3 140.Lend_\@: 141.endm 142 143.macro BUG_IF_WRONG_CR3 no_user_check=0 144#ifdef CONFIG_DEBUG_ENTRY 145 ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI 146 .if \no_user_check == 0 147 /* coming from usermode? */ 148 testl $USER_SEGMENT_RPL_MASK, PT_CS(%esp) 149 jz .Lend_\@ 150 .endif 151 /* On user-cr3? */ 152 movl %cr3, %eax 153 testl $PTI_SWITCH_MASK, %eax 154 jnz .Lend_\@ 155 /* From userspace with kernel cr3 - BUG */ 156 ud2 157.Lend_\@: 158#endif 159.endm 160 161/* 162 * Switch to kernel cr3 if not already loaded and return current cr3 in 163 * \scratch_reg 164 */ 165.macro SWITCH_TO_KERNEL_CR3 scratch_reg:req 166 ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI 167 movl %cr3, \scratch_reg 168 /* Test if we are already on kernel CR3 */ 169 testl $PTI_SWITCH_MASK, \scratch_reg 170 jz .Lend_\@ 171 andl $(~PTI_SWITCH_MASK), \scratch_reg 172 movl \scratch_reg, %cr3 173 /* Return original CR3 in \scratch_reg */ 174 orl $PTI_SWITCH_MASK, \scratch_reg 175.Lend_\@: 176.endm 177 178#define CS_FROM_ENTRY_STACK (1 << 31) 179#define CS_FROM_USER_CR3 (1 << 30) 180#define CS_FROM_KERNEL (1 << 29) 181#define CS_FROM_ESPFIX (1 << 28) 182 183.macro FIXUP_FRAME 184 /* 185 * The high bits of the CS dword (__csh) are used for CS_FROM_*. 186 * Clear them in case hardware didn't do this for us. 187 */ 188 andl $0x0000ffff, 4*4(%esp) 189 190#ifdef CONFIG_VM86 191 testl $X86_EFLAGS_VM, 5*4(%esp) 192 jnz .Lfrom_usermode_no_fixup_\@ 193#endif 194 testl $USER_SEGMENT_RPL_MASK, 4*4(%esp) 195 jnz .Lfrom_usermode_no_fixup_\@ 196 197 orl $CS_FROM_KERNEL, 4*4(%esp) 198 199 /* 200 * When we're here from kernel mode; the (exception) stack looks like: 201 * 202 * 6*4(%esp) - <previous context> 203 * 5*4(%esp) - flags 204 * 4*4(%esp) - cs 205 * 3*4(%esp) - ip 206 * 2*4(%esp) - orig_eax 207 * 1*4(%esp) - gs / function 208 * 0*4(%esp) - fs 209 * 210 * Lets build a 5 entry IRET frame after that, such that struct pt_regs 211 * is complete and in particular regs->sp is correct. This gives us 212 * the original 6 enties as gap: 213 * 214 * 14*4(%esp) - <previous context> 215 * 13*4(%esp) - gap / flags 216 * 12*4(%esp) - gap / cs 217 * 11*4(%esp) - gap / ip 218 * 10*4(%esp) - gap / orig_eax 219 * 9*4(%esp) - gap / gs / function 220 * 8*4(%esp) - gap / fs 221 * 7*4(%esp) - ss 222 * 6*4(%esp) - sp 223 * 5*4(%esp) - flags 224 * 4*4(%esp) - cs 225 * 3*4(%esp) - ip 226 * 2*4(%esp) - orig_eax 227 * 1*4(%esp) - gs / function 228 * 0*4(%esp) - fs 229 */ 230 231 pushl %ss # ss 232 pushl %esp # sp (points at ss) 233 addl $7*4, (%esp) # point sp back at the previous context 234 pushl 7*4(%esp) # flags 235 pushl 7*4(%esp) # cs 236 pushl 7*4(%esp) # ip 237 pushl 7*4(%esp) # orig_eax 238 pushl 7*4(%esp) # gs / function 239 pushl 7*4(%esp) # fs 240.Lfrom_usermode_no_fixup_\@: 241.endm 242 243.macro IRET_FRAME 244 /* 245 * We're called with %ds, %es, %fs, and %gs from the interrupted 246 * frame, so we shouldn't use them. Also, we may be in ESPFIX 247 * mode and therefore have a nonzero SS base and an offset ESP, 248 * so any attempt to access the stack needs to use SS. (except for 249 * accesses through %esp, which automatically use SS.) 250 */ 251 testl $CS_FROM_KERNEL, 1*4(%esp) 252 jz .Lfinished_frame_\@ 253 254 /* 255 * Reconstruct the 3 entry IRET frame right after the (modified) 256 * regs->sp without lowering %esp in between, such that an NMI in the 257 * middle doesn't scribble our stack. 258 */ 259 pushl %eax 260 pushl %ecx 261 movl 5*4(%esp), %eax # (modified) regs->sp 262 263 movl 4*4(%esp), %ecx # flags 264 movl %ecx, %ss:-1*4(%eax) 265 266 movl 3*4(%esp), %ecx # cs 267 andl $0x0000ffff, %ecx 268 movl %ecx, %ss:-2*4(%eax) 269 270 movl 2*4(%esp), %ecx # ip 271 movl %ecx, %ss:-3*4(%eax) 272 273 movl 1*4(%esp), %ecx # eax 274 movl %ecx, %ss:-4*4(%eax) 275 276 popl %ecx 277 lea -4*4(%eax), %esp 278 popl %eax 279.Lfinished_frame_\@: 280.endm 281 282.macro SAVE_ALL pt_regs_ax=%eax switch_stacks=0 skip_gs=0 unwind_espfix=0 283 cld 284.if \skip_gs == 0 285 PUSH_GS 286.endif 287 pushl %fs 288 289 pushl %eax 290 movl $(__KERNEL_PERCPU), %eax 291 movl %eax, %fs 292.if \unwind_espfix > 0 293 UNWIND_ESPFIX_STACK 294.endif 295 popl %eax 296 297 FIXUP_FRAME 298 pushl %es 299 pushl %ds 300 pushl \pt_regs_ax 301 pushl %ebp 302 pushl %edi 303 pushl %esi 304 pushl %edx 305 pushl %ecx 306 pushl %ebx 307 movl $(__USER_DS), %edx 308 movl %edx, %ds 309 movl %edx, %es 310.if \skip_gs == 0 311 SET_KERNEL_GS %edx 312.endif 313 /* Switch to kernel stack if necessary */ 314.if \switch_stacks > 0 315 SWITCH_TO_KERNEL_STACK 316.endif 317.endm 318 319.macro SAVE_ALL_NMI cr3_reg:req unwind_espfix=0 320 SAVE_ALL unwind_espfix=\unwind_espfix 321 322 BUG_IF_WRONG_CR3 323 324 /* 325 * Now switch the CR3 when PTI is enabled. 326 * 327 * We can enter with either user or kernel cr3, the code will 328 * store the old cr3 in \cr3_reg and switches to the kernel cr3 329 * if necessary. 330 */ 331 SWITCH_TO_KERNEL_CR3 scratch_reg=\cr3_reg 332 333.Lend_\@: 334.endm 335 336.macro RESTORE_INT_REGS 337 popl %ebx 338 popl %ecx 339 popl %edx 340 popl %esi 341 popl %edi 342 popl %ebp 343 popl %eax 344.endm 345 346.macro RESTORE_REGS pop=0 347 RESTORE_INT_REGS 3481: popl %ds 3492: popl %es 3503: popl %fs 351 POP_GS \pop 352 IRET_FRAME 353.pushsection .fixup, "ax" 3544: movl $0, (%esp) 355 jmp 1b 3565: movl $0, (%esp) 357 jmp 2b 3586: movl $0, (%esp) 359 jmp 3b 360.popsection 361 _ASM_EXTABLE(1b, 4b) 362 _ASM_EXTABLE(2b, 5b) 363 _ASM_EXTABLE(3b, 6b) 364 POP_GS_EX 365.endm 366 367.macro RESTORE_ALL_NMI cr3_reg:req pop=0 368 /* 369 * Now switch the CR3 when PTI is enabled. 370 * 371 * We enter with kernel cr3 and switch the cr3 to the value 372 * stored on \cr3_reg, which is either a user or a kernel cr3. 373 */ 374 ALTERNATIVE "jmp .Lswitched_\@", "", X86_FEATURE_PTI 375 376 testl $PTI_SWITCH_MASK, \cr3_reg 377 jz .Lswitched_\@ 378 379 /* User cr3 in \cr3_reg - write it to hardware cr3 */ 380 movl \cr3_reg, %cr3 381 382.Lswitched_\@: 383 384 BUG_IF_WRONG_CR3 385 386 RESTORE_REGS pop=\pop 387.endm 388 389.macro CHECK_AND_APPLY_ESPFIX 390#ifdef CONFIG_X86_ESPFIX32 391#define GDT_ESPFIX_OFFSET (GDT_ENTRY_ESPFIX_SS * 8) 392#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + GDT_ESPFIX_OFFSET 393 394 ALTERNATIVE "jmp .Lend_\@", "", X86_BUG_ESPFIX 395 396 movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS 397 /* 398 * Warning: PT_OLDSS(%esp) contains the wrong/random values if we 399 * are returning to the kernel. 400 * See comments in process.c:copy_thread() for details. 401 */ 402 movb PT_OLDSS(%esp), %ah 403 movb PT_CS(%esp), %al 404 andl $(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax 405 cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax 406 jne .Lend_\@ # returning to user-space with LDT SS 407 408 /* 409 * Setup and switch to ESPFIX stack 410 * 411 * We're returning to userspace with a 16 bit stack. The CPU will not 412 * restore the high word of ESP for us on executing iret... This is an 413 * "official" bug of all the x86-compatible CPUs, which we can work 414 * around to make dosemu and wine happy. We do this by preloading the 415 * high word of ESP with the high word of the userspace ESP while 416 * compensating for the offset by changing to the ESPFIX segment with 417 * a base address that matches for the difference. 418 */ 419 mov %esp, %edx /* load kernel esp */ 420 mov PT_OLDESP(%esp), %eax /* load userspace esp */ 421 mov %dx, %ax /* eax: new kernel esp */ 422 sub %eax, %edx /* offset (low word is 0) */ 423 shr $16, %edx 424 mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */ 425 mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */ 426 pushl $__ESPFIX_SS 427 pushl %eax /* new kernel esp */ 428 /* 429 * Disable interrupts, but do not irqtrace this section: we 430 * will soon execute iret and the tracer was already set to 431 * the irqstate after the IRET: 432 */ 433 DISABLE_INTERRUPTS(CLBR_ANY) 434 lss (%esp), %esp /* switch to espfix segment */ 435.Lend_\@: 436#endif /* CONFIG_X86_ESPFIX32 */ 437.endm 438 439/* 440 * Called with pt_regs fully populated and kernel segments loaded, 441 * so we can access PER_CPU and use the integer registers. 442 * 443 * We need to be very careful here with the %esp switch, because an NMI 444 * can happen everywhere. If the NMI handler finds itself on the 445 * entry-stack, it will overwrite the task-stack and everything we 446 * copied there. So allocate the stack-frame on the task-stack and 447 * switch to it before we do any copying. 448 */ 449 450.macro SWITCH_TO_KERNEL_STACK 451 452 ALTERNATIVE "", "jmp .Lend_\@", X86_FEATURE_XENPV 453 454 BUG_IF_WRONG_CR3 455 456 SWITCH_TO_KERNEL_CR3 scratch_reg=%eax 457 458 /* 459 * %eax now contains the entry cr3 and we carry it forward in 460 * that register for the time this macro runs 461 */ 462 463 /* Are we on the entry stack? Bail out if not! */ 464 movl PER_CPU_VAR(cpu_entry_area), %ecx 465 addl $CPU_ENTRY_AREA_entry_stack + SIZEOF_entry_stack, %ecx 466 subl %esp, %ecx /* ecx = (end of entry_stack) - esp */ 467 cmpl $SIZEOF_entry_stack, %ecx 468 jae .Lend_\@ 469 470 /* Load stack pointer into %esi and %edi */ 471 movl %esp, %esi 472 movl %esi, %edi 473 474 /* Move %edi to the top of the entry stack */ 475 andl $(MASK_entry_stack), %edi 476 addl $(SIZEOF_entry_stack), %edi 477 478 /* Load top of task-stack into %edi */ 479 movl TSS_entry2task_stack(%edi), %edi 480 481 /* Special case - entry from kernel mode via entry stack */ 482#ifdef CONFIG_VM86 483 movl PT_EFLAGS(%esp), %ecx # mix EFLAGS and CS 484 movb PT_CS(%esp), %cl 485 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %ecx 486#else 487 movl PT_CS(%esp), %ecx 488 andl $SEGMENT_RPL_MASK, %ecx 489#endif 490 cmpl $USER_RPL, %ecx 491 jb .Lentry_from_kernel_\@ 492 493 /* Bytes to copy */ 494 movl $PTREGS_SIZE, %ecx 495 496#ifdef CONFIG_VM86 497 testl $X86_EFLAGS_VM, PT_EFLAGS(%esi) 498 jz .Lcopy_pt_regs_\@ 499 500 /* 501 * Stack-frame contains 4 additional segment registers when 502 * coming from VM86 mode 503 */ 504 addl $(4 * 4), %ecx 505 506#endif 507.Lcopy_pt_regs_\@: 508 509 /* Allocate frame on task-stack */ 510 subl %ecx, %edi 511 512 /* Switch to task-stack */ 513 movl %edi, %esp 514 515 /* 516 * We are now on the task-stack and can safely copy over the 517 * stack-frame 518 */ 519 shrl $2, %ecx 520 cld 521 rep movsl 522 523 jmp .Lend_\@ 524 525.Lentry_from_kernel_\@: 526 527 /* 528 * This handles the case when we enter the kernel from 529 * kernel-mode and %esp points to the entry-stack. When this 530 * happens we need to switch to the task-stack to run C code, 531 * but switch back to the entry-stack again when we approach 532 * iret and return to the interrupted code-path. This usually 533 * happens when we hit an exception while restoring user-space 534 * segment registers on the way back to user-space or when the 535 * sysenter handler runs with eflags.tf set. 536 * 537 * When we switch to the task-stack here, we can't trust the 538 * contents of the entry-stack anymore, as the exception handler 539 * might be scheduled out or moved to another CPU. Therefore we 540 * copy the complete entry-stack to the task-stack and set a 541 * marker in the iret-frame (bit 31 of the CS dword) to detect 542 * what we've done on the iret path. 543 * 544 * On the iret path we copy everything back and switch to the 545 * entry-stack, so that the interrupted kernel code-path 546 * continues on the same stack it was interrupted with. 547 * 548 * Be aware that an NMI can happen anytime in this code. 549 * 550 * %esi: Entry-Stack pointer (same as %esp) 551 * %edi: Top of the task stack 552 * %eax: CR3 on kernel entry 553 */ 554 555 /* Calculate number of bytes on the entry stack in %ecx */ 556 movl %esi, %ecx 557 558 /* %ecx to the top of entry-stack */ 559 andl $(MASK_entry_stack), %ecx 560 addl $(SIZEOF_entry_stack), %ecx 561 562 /* Number of bytes on the entry stack to %ecx */ 563 sub %esi, %ecx 564 565 /* Mark stackframe as coming from entry stack */ 566 orl $CS_FROM_ENTRY_STACK, PT_CS(%esp) 567 568 /* 569 * Test the cr3 used to enter the kernel and add a marker 570 * so that we can switch back to it before iret. 571 */ 572 testl $PTI_SWITCH_MASK, %eax 573 jz .Lcopy_pt_regs_\@ 574 orl $CS_FROM_USER_CR3, PT_CS(%esp) 575 576 /* 577 * %esi and %edi are unchanged, %ecx contains the number of 578 * bytes to copy. The code at .Lcopy_pt_regs_\@ will allocate 579 * the stack-frame on task-stack and copy everything over 580 */ 581 jmp .Lcopy_pt_regs_\@ 582 583.Lend_\@: 584.endm 585 586/* 587 * Switch back from the kernel stack to the entry stack. 588 * 589 * The %esp register must point to pt_regs on the task stack. It will 590 * first calculate the size of the stack-frame to copy, depending on 591 * whether we return to VM86 mode or not. With that it uses 'rep movsl' 592 * to copy the contents of the stack over to the entry stack. 593 * 594 * We must be very careful here, as we can't trust the contents of the 595 * task-stack once we switched to the entry-stack. When an NMI happens 596 * while on the entry-stack, the NMI handler will switch back to the top 597 * of the task stack, overwriting our stack-frame we are about to copy. 598 * Therefore we switch the stack only after everything is copied over. 599 */ 600.macro SWITCH_TO_ENTRY_STACK 601 602 ALTERNATIVE "", "jmp .Lend_\@", X86_FEATURE_XENPV 603 604 /* Bytes to copy */ 605 movl $PTREGS_SIZE, %ecx 606 607#ifdef CONFIG_VM86 608 testl $(X86_EFLAGS_VM), PT_EFLAGS(%esp) 609 jz .Lcopy_pt_regs_\@ 610 611 /* Additional 4 registers to copy when returning to VM86 mode */ 612 addl $(4 * 4), %ecx 613 614.Lcopy_pt_regs_\@: 615#endif 616 617 /* Initialize source and destination for movsl */ 618 movl PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %edi 619 subl %ecx, %edi 620 movl %esp, %esi 621 622 /* Save future stack pointer in %ebx */ 623 movl %edi, %ebx 624 625 /* Copy over the stack-frame */ 626 shrl $2, %ecx 627 cld 628 rep movsl 629 630 /* 631 * Switch to entry-stack - needs to happen after everything is 632 * copied because the NMI handler will overwrite the task-stack 633 * when on entry-stack 634 */ 635 movl %ebx, %esp 636 637.Lend_\@: 638.endm 639 640/* 641 * This macro handles the case when we return to kernel-mode on the iret 642 * path and have to switch back to the entry stack and/or user-cr3 643 * 644 * See the comments below the .Lentry_from_kernel_\@ label in the 645 * SWITCH_TO_KERNEL_STACK macro for more details. 646 */ 647.macro PARANOID_EXIT_TO_KERNEL_MODE 648 649 /* 650 * Test if we entered the kernel with the entry-stack. Most 651 * likely we did not, because this code only runs on the 652 * return-to-kernel path. 653 */ 654 testl $CS_FROM_ENTRY_STACK, PT_CS(%esp) 655 jz .Lend_\@ 656 657 /* Unlikely slow-path */ 658 659 /* Clear marker from stack-frame */ 660 andl $(~CS_FROM_ENTRY_STACK), PT_CS(%esp) 661 662 /* Copy the remaining task-stack contents to entry-stack */ 663 movl %esp, %esi 664 movl PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %edi 665 666 /* Bytes on the task-stack to ecx */ 667 movl PER_CPU_VAR(cpu_tss_rw + TSS_sp1), %ecx 668 subl %esi, %ecx 669 670 /* Allocate stack-frame on entry-stack */ 671 subl %ecx, %edi 672 673 /* 674 * Save future stack-pointer, we must not switch until the 675 * copy is done, otherwise the NMI handler could destroy the 676 * contents of the task-stack we are about to copy. 677 */ 678 movl %edi, %ebx 679 680 /* Do the copy */ 681 shrl $2, %ecx 682 cld 683 rep movsl 684 685 /* Safe to switch to entry-stack now */ 686 movl %ebx, %esp 687 688 /* 689 * We came from entry-stack and need to check if we also need to 690 * switch back to user cr3. 691 */ 692 testl $CS_FROM_USER_CR3, PT_CS(%esp) 693 jz .Lend_\@ 694 695 /* Clear marker from stack-frame */ 696 andl $(~CS_FROM_USER_CR3), PT_CS(%esp) 697 698 SWITCH_TO_USER_CR3 scratch_reg=%eax 699 700.Lend_\@: 701.endm 702 703/** 704 * idtentry - Macro to generate entry stubs for simple IDT entries 705 * @vector: Vector number 706 * @asmsym: ASM symbol for the entry point 707 * @cfunc: C function to be called 708 * @has_error_code: Hardware pushed error code on stack 709 */ 710.macro idtentry vector asmsym cfunc has_error_code:req 711SYM_CODE_START(\asmsym) 712 ASM_CLAC 713 cld 714 715 .if \has_error_code == 0 716 pushl $0 /* Clear the error code */ 717 .endif 718 719 /* Push the C-function address into the GS slot */ 720 pushl $\cfunc 721 /* Invoke the common exception entry */ 722 jmp handle_exception 723SYM_CODE_END(\asmsym) 724.endm 725 726.macro idtentry_irq vector cfunc 727 .p2align CONFIG_X86_L1_CACHE_SHIFT 728SYM_CODE_START_LOCAL(asm_\cfunc) 729 ASM_CLAC 730 SAVE_ALL switch_stacks=1 731 ENCODE_FRAME_POINTER 732 movl %esp, %eax 733 movl PT_ORIG_EAX(%esp), %edx /* get the vector from stack */ 734 movl $-1, PT_ORIG_EAX(%esp) /* no syscall to restart */ 735 call \cfunc 736 jmp handle_exception_return 737SYM_CODE_END(asm_\cfunc) 738.endm 739 740.macro idtentry_sysvec vector cfunc 741 idtentry \vector asm_\cfunc \cfunc has_error_code=0 742.endm 743 744/* 745 * Include the defines which emit the idt entries which are shared 746 * shared between 32 and 64 bit and emit the __irqentry_text_* markers 747 * so the stacktrace boundary checks work. 748 */ 749 .align 16 750 .globl __irqentry_text_start 751__irqentry_text_start: 752 753#include <asm/idtentry.h> 754 755 .align 16 756 .globl __irqentry_text_end 757__irqentry_text_end: 758 759/* 760 * %eax: prev task 761 * %edx: next task 762 */ 763.pushsection .text, "ax" 764SYM_CODE_START(__switch_to_asm) 765 /* 766 * Save callee-saved registers 767 * This must match the order in struct inactive_task_frame 768 */ 769 pushl %ebp 770 pushl %ebx 771 pushl %edi 772 pushl %esi 773 /* 774 * Flags are saved to prevent AC leakage. This could go 775 * away if objtool would have 32bit support to verify 776 * the STAC/CLAC correctness. 777 */ 778 pushfl 779 780 /* switch stack */ 781 movl %esp, TASK_threadsp(%eax) 782 movl TASK_threadsp(%edx), %esp 783 784#ifdef CONFIG_STACKPROTECTOR 785 movl TASK_stack_canary(%edx), %ebx 786 movl %ebx, PER_CPU_VAR(stack_canary)+stack_canary_offset 787#endif 788 789#ifdef CONFIG_RETPOLINE 790 /* 791 * When switching from a shallower to a deeper call stack 792 * the RSB may either underflow or use entries populated 793 * with userspace addresses. On CPUs where those concerns 794 * exist, overwrite the RSB with entries which capture 795 * speculative execution to prevent attack. 796 */ 797 FILL_RETURN_BUFFER %ebx, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW 798#endif 799 800 /* Restore flags or the incoming task to restore AC state. */ 801 popfl 802 /* restore callee-saved registers */ 803 popl %esi 804 popl %edi 805 popl %ebx 806 popl %ebp 807 808 jmp __switch_to 809SYM_CODE_END(__switch_to_asm) 810.popsection 811 812/* 813 * The unwinder expects the last frame on the stack to always be at the same 814 * offset from the end of the page, which allows it to validate the stack. 815 * Calling schedule_tail() directly would break that convention because its an 816 * asmlinkage function so its argument has to be pushed on the stack. This 817 * wrapper creates a proper "end of stack" frame header before the call. 818 */ 819.pushsection .text, "ax" 820SYM_FUNC_START(schedule_tail_wrapper) 821 FRAME_BEGIN 822 823 pushl %eax 824 call schedule_tail 825 popl %eax 826 827 FRAME_END 828 ret 829SYM_FUNC_END(schedule_tail_wrapper) 830.popsection 831 832/* 833 * A newly forked process directly context switches into this address. 834 * 835 * eax: prev task we switched from 836 * ebx: kernel thread func (NULL for user thread) 837 * edi: kernel thread arg 838 */ 839.pushsection .text, "ax" 840SYM_CODE_START(ret_from_fork) 841 call schedule_tail_wrapper 842 843 testl %ebx, %ebx 844 jnz 1f /* kernel threads are uncommon */ 845 8462: 847 /* When we fork, we trace the syscall return in the child, too. */ 848 movl %esp, %eax 849 call syscall_return_slowpath 850 jmp .Lsyscall_32_done 851 852 /* kernel thread */ 8531: movl %edi, %eax 854 CALL_NOSPEC ebx 855 /* 856 * A kernel thread is allowed to return here after successfully 857 * calling do_execve(). Exit to userspace to complete the execve() 858 * syscall. 859 */ 860 movl $0, PT_EAX(%esp) 861 jmp 2b 862SYM_CODE_END(ret_from_fork) 863.popsection 864 865SYM_ENTRY(__begin_SYSENTER_singlestep_region, SYM_L_GLOBAL, SYM_A_NONE) 866/* 867 * All code from here through __end_SYSENTER_singlestep_region is subject 868 * to being single-stepped if a user program sets TF and executes SYSENTER. 869 * There is absolutely nothing that we can do to prevent this from happening 870 * (thanks Intel!). To keep our handling of this situation as simple as 871 * possible, we handle TF just like AC and NT, except that our #DB handler 872 * will ignore all of the single-step traps generated in this range. 873 */ 874 875#ifdef CONFIG_XEN_PV 876/* 877 * Xen doesn't set %esp to be precisely what the normal SYSENTER 878 * entry point expects, so fix it up before using the normal path. 879 */ 880SYM_CODE_START(xen_sysenter_target) 881 addl $5*4, %esp /* remove xen-provided frame */ 882 jmp .Lsysenter_past_esp 883SYM_CODE_END(xen_sysenter_target) 884#endif 885 886/* 887 * 32-bit SYSENTER entry. 888 * 889 * 32-bit system calls through the vDSO's __kernel_vsyscall enter here 890 * if X86_FEATURE_SEP is available. This is the preferred system call 891 * entry on 32-bit systems. 892 * 893 * The SYSENTER instruction, in principle, should *only* occur in the 894 * vDSO. In practice, a small number of Android devices were shipped 895 * with a copy of Bionic that inlined a SYSENTER instruction. This 896 * never happened in any of Google's Bionic versions -- it only happened 897 * in a narrow range of Intel-provided versions. 898 * 899 * SYSENTER loads SS, ESP, CS, and EIP from previously programmed MSRs. 900 * IF and VM in RFLAGS are cleared (IOW: interrupts are off). 901 * SYSENTER does not save anything on the stack, 902 * and does not save old EIP (!!!), ESP, or EFLAGS. 903 * 904 * To avoid losing track of EFLAGS.VM (and thus potentially corrupting 905 * user and/or vm86 state), we explicitly disable the SYSENTER 906 * instruction in vm86 mode by reprogramming the MSRs. 907 * 908 * Arguments: 909 * eax system call number 910 * ebx arg1 911 * ecx arg2 912 * edx arg3 913 * esi arg4 914 * edi arg5 915 * ebp user stack 916 * 0(%ebp) arg6 917 */ 918SYM_FUNC_START(entry_SYSENTER_32) 919 /* 920 * On entry-stack with all userspace-regs live - save and 921 * restore eflags and %eax to use it as scratch-reg for the cr3 922 * switch. 923 */ 924 pushfl 925 pushl %eax 926 BUG_IF_WRONG_CR3 no_user_check=1 927 SWITCH_TO_KERNEL_CR3 scratch_reg=%eax 928 popl %eax 929 popfl 930 931 /* Stack empty again, switch to task stack */ 932 movl TSS_entry2task_stack(%esp), %esp 933 934.Lsysenter_past_esp: 935 pushl $__USER_DS /* pt_regs->ss */ 936 pushl %ebp /* pt_regs->sp (stashed in bp) */ 937 pushfl /* pt_regs->flags (except IF = 0) */ 938 orl $X86_EFLAGS_IF, (%esp) /* Fix IF */ 939 pushl $__USER_CS /* pt_regs->cs */ 940 pushl $0 /* pt_regs->ip = 0 (placeholder) */ 941 pushl %eax /* pt_regs->orig_ax */ 942 SAVE_ALL pt_regs_ax=$-ENOSYS /* save rest, stack already switched */ 943 944 /* 945 * SYSENTER doesn't filter flags, so we need to clear NT, AC 946 * and TF ourselves. To save a few cycles, we can check whether 947 * either was set instead of doing an unconditional popfq. 948 * This needs to happen before enabling interrupts so that 949 * we don't get preempted with NT set. 950 * 951 * If TF is set, we will single-step all the way to here -- do_debug 952 * will ignore all the traps. (Yes, this is slow, but so is 953 * single-stepping in general. This allows us to avoid having 954 * a more complicated code to handle the case where a user program 955 * forces us to single-step through the SYSENTER entry code.) 956 * 957 * NB.: .Lsysenter_fix_flags is a label with the code under it moved 958 * out-of-line as an optimization: NT is unlikely to be set in the 959 * majority of the cases and instead of polluting the I$ unnecessarily, 960 * we're keeping that code behind a branch which will predict as 961 * not-taken and therefore its instructions won't be fetched. 962 */ 963 testl $X86_EFLAGS_NT|X86_EFLAGS_AC|X86_EFLAGS_TF, PT_EFLAGS(%esp) 964 jnz .Lsysenter_fix_flags 965.Lsysenter_flags_fixed: 966 967 movl %esp, %eax 968 call do_fast_syscall_32 969 /* XEN PV guests always use IRET path */ 970 ALTERNATIVE "testl %eax, %eax; jz .Lsyscall_32_done", \ 971 "jmp .Lsyscall_32_done", X86_FEATURE_XENPV 972 973 STACKLEAK_ERASE 974 975 /* Opportunistic SYSEXIT */ 976 977 /* 978 * Setup entry stack - we keep the pointer in %eax and do the 979 * switch after almost all user-state is restored. 980 */ 981 982 /* Load entry stack pointer and allocate frame for eflags/eax */ 983 movl PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %eax 984 subl $(2*4), %eax 985 986 /* Copy eflags and eax to entry stack */ 987 movl PT_EFLAGS(%esp), %edi 988 movl PT_EAX(%esp), %esi 989 movl %edi, (%eax) 990 movl %esi, 4(%eax) 991 992 /* Restore user registers and segments */ 993 movl PT_EIP(%esp), %edx /* pt_regs->ip */ 994 movl PT_OLDESP(%esp), %ecx /* pt_regs->sp */ 9951: mov PT_FS(%esp), %fs 996 PTGS_TO_GS 997 998 popl %ebx /* pt_regs->bx */ 999 addl $2*4, %esp /* skip pt_regs->cx and pt_regs->dx */ 1000 popl %esi /* pt_regs->si */ 1001 popl %edi /* pt_regs->di */ 1002 popl %ebp /* pt_regs->bp */ 1003 1004 /* Switch to entry stack */ 1005 movl %eax, %esp 1006 1007 /* Now ready to switch the cr3 */ 1008 SWITCH_TO_USER_CR3 scratch_reg=%eax 1009 1010 /* 1011 * Restore all flags except IF. (We restore IF separately because 1012 * STI gives a one-instruction window in which we won't be interrupted, 1013 * whereas POPF does not.) 1014 */ 1015 btrl $X86_EFLAGS_IF_BIT, (%esp) 1016 BUG_IF_WRONG_CR3 no_user_check=1 1017 popfl 1018 popl %eax 1019 1020 /* 1021 * Return back to the vDSO, which will pop ecx and edx. 1022 * Don't bother with DS and ES (they already contain __USER_DS). 1023 */ 1024 sti 1025 sysexit 1026 1027.pushsection .fixup, "ax" 10282: movl $0, PT_FS(%esp) 1029 jmp 1b 1030.popsection 1031 _ASM_EXTABLE(1b, 2b) 1032 PTGS_TO_GS_EX 1033 1034.Lsysenter_fix_flags: 1035 pushl $X86_EFLAGS_FIXED 1036 popfl 1037 jmp .Lsysenter_flags_fixed 1038SYM_ENTRY(__end_SYSENTER_singlestep_region, SYM_L_GLOBAL, SYM_A_NONE) 1039SYM_FUNC_END(entry_SYSENTER_32) 1040 1041/* 1042 * 32-bit legacy system call entry. 1043 * 1044 * 32-bit x86 Linux system calls traditionally used the INT $0x80 1045 * instruction. INT $0x80 lands here. 1046 * 1047 * This entry point can be used by any 32-bit perform system calls. 1048 * Instances of INT $0x80 can be found inline in various programs and 1049 * libraries. It is also used by the vDSO's __kernel_vsyscall 1050 * fallback for hardware that doesn't support a faster entry method. 1051 * Restarted 32-bit system calls also fall back to INT $0x80 1052 * regardless of what instruction was originally used to do the system 1053 * call. (64-bit programs can use INT $0x80 as well, but they can 1054 * only run on 64-bit kernels and therefore land in 1055 * entry_INT80_compat.) 1056 * 1057 * This is considered a slow path. It is not used by most libc 1058 * implementations on modern hardware except during process startup. 1059 * 1060 * Arguments: 1061 * eax system call number 1062 * ebx arg1 1063 * ecx arg2 1064 * edx arg3 1065 * esi arg4 1066 * edi arg5 1067 * ebp arg6 1068 */ 1069SYM_FUNC_START(entry_INT80_32) 1070 ASM_CLAC 1071 pushl %eax /* pt_regs->orig_ax */ 1072 1073 SAVE_ALL pt_regs_ax=$-ENOSYS switch_stacks=1 /* save rest */ 1074 1075 movl %esp, %eax 1076 call do_int80_syscall_32 1077.Lsyscall_32_done: 1078 STACKLEAK_ERASE 1079 1080restore_all_switch_stack: 1081 SWITCH_TO_ENTRY_STACK 1082 CHECK_AND_APPLY_ESPFIX 1083 1084 /* Switch back to user CR3 */ 1085 SWITCH_TO_USER_CR3 scratch_reg=%eax 1086 1087 BUG_IF_WRONG_CR3 1088 1089 /* Restore user state */ 1090 RESTORE_REGS pop=4 # skip orig_eax/error_code 1091.Lirq_return: 1092 /* 1093 * ARCH_HAS_MEMBARRIER_SYNC_CORE rely on IRET core serialization 1094 * when returning from IPI handler and when returning from 1095 * scheduler to user-space. 1096 */ 1097 INTERRUPT_RETURN 1098 1099.section .fixup, "ax" 1100SYM_CODE_START(asm_iret_error) 1101 pushl $0 # no error code 1102 pushl $iret_error 1103 1104#ifdef CONFIG_DEBUG_ENTRY 1105 /* 1106 * The stack-frame here is the one that iret faulted on, so its a 1107 * return-to-user frame. We are on kernel-cr3 because we come here from 1108 * the fixup code. This confuses the CR3 checker, so switch to user-cr3 1109 * as the checker expects it. 1110 */ 1111 pushl %eax 1112 SWITCH_TO_USER_CR3 scratch_reg=%eax 1113 popl %eax 1114#endif 1115 1116 jmp handle_exception 1117SYM_CODE_END(asm_iret_error) 1118.previous 1119 _ASM_EXTABLE(.Lirq_return, asm_iret_error) 1120SYM_FUNC_END(entry_INT80_32) 1121 1122.macro FIXUP_ESPFIX_STACK 1123/* 1124 * Switch back for ESPFIX stack to the normal zerobased stack 1125 * 1126 * We can't call C functions using the ESPFIX stack. This code reads 1127 * the high word of the segment base from the GDT and swiches to the 1128 * normal stack and adjusts ESP with the matching offset. 1129 * 1130 * We might be on user CR3 here, so percpu data is not mapped and we can't 1131 * access the GDT through the percpu segment. Instead, use SGDT to find 1132 * the cpu_entry_area alias of the GDT. 1133 */ 1134#ifdef CONFIG_X86_ESPFIX32 1135 /* fixup the stack */ 1136 pushl %ecx 1137 subl $2*4, %esp 1138 sgdt (%esp) 1139 movl 2(%esp), %ecx /* GDT address */ 1140 /* 1141 * Careful: ECX is a linear pointer, so we need to force base 1142 * zero. %cs is the only known-linear segment we have right now. 1143 */ 1144 mov %cs:GDT_ESPFIX_OFFSET + 4(%ecx), %al /* bits 16..23 */ 1145 mov %cs:GDT_ESPFIX_OFFSET + 7(%ecx), %ah /* bits 24..31 */ 1146 shl $16, %eax 1147 addl $2*4, %esp 1148 popl %ecx 1149 addl %esp, %eax /* the adjusted stack pointer */ 1150 pushl $__KERNEL_DS 1151 pushl %eax 1152 lss (%esp), %esp /* switch to the normal stack segment */ 1153#endif 1154.endm 1155 1156.macro UNWIND_ESPFIX_STACK 1157 /* It's safe to clobber %eax, all other regs need to be preserved */ 1158#ifdef CONFIG_X86_ESPFIX32 1159 movl %ss, %eax 1160 /* see if on espfix stack */ 1161 cmpw $__ESPFIX_SS, %ax 1162 jne .Lno_fixup_\@ 1163 /* switch to normal stack */ 1164 FIXUP_ESPFIX_STACK 1165.Lno_fixup_\@: 1166#endif 1167.endm 1168 1169#ifdef CONFIG_PARAVIRT 1170SYM_CODE_START(native_iret) 1171 iret 1172 _ASM_EXTABLE(native_iret, asm_iret_error) 1173SYM_CODE_END(native_iret) 1174#endif 1175 1176#ifdef CONFIG_XEN_PV 1177/* 1178 * See comment in entry_64.S for further explanation 1179 * 1180 * Note: This is not an actual IDT entry point. It's a XEN specific entry 1181 * point and therefore named to match the 64-bit trampoline counterpart. 1182 */ 1183SYM_FUNC_START(xen_asm_exc_xen_hypervisor_callback) 1184 /* 1185 * Check to see if we got the event in the critical 1186 * region in xen_iret_direct, after we've reenabled 1187 * events and checked for pending events. This simulates 1188 * iret instruction's behaviour where it delivers a 1189 * pending interrupt when enabling interrupts: 1190 */ 1191 cmpl $xen_iret_start_crit, (%esp) 1192 jb 1f 1193 cmpl $xen_iret_end_crit, (%esp) 1194 jae 1f 1195 call xen_iret_crit_fixup 11961: 1197 pushl $-1 /* orig_ax = -1 => not a system call */ 1198 SAVE_ALL 1199 ENCODE_FRAME_POINTER 1200 1201 mov %esp, %eax 1202 call xen_pv_evtchn_do_upcall 1203 jmp handle_exception_return 1204SYM_FUNC_END(xen_asm_exc_xen_hypervisor_callback) 1205 1206/* 1207 * Hypervisor uses this for application faults while it executes. 1208 * We get here for two reasons: 1209 * 1. Fault while reloading DS, ES, FS or GS 1210 * 2. Fault while executing IRET 1211 * Category 1 we fix up by reattempting the load, and zeroing the segment 1212 * register if the load fails. 1213 * Category 2 we fix up by jumping to do_iret_error. We cannot use the 1214 * normal Linux return path in this case because if we use the IRET hypercall 1215 * to pop the stack frame we end up in an infinite loop of failsafe callbacks. 1216 * We distinguish between categories by maintaining a status value in EAX. 1217 */ 1218SYM_FUNC_START(xen_failsafe_callback) 1219 pushl %eax 1220 movl $1, %eax 12211: mov 4(%esp), %ds 12222: mov 8(%esp), %es 12233: mov 12(%esp), %fs 12244: mov 16(%esp), %gs 1225 /* EAX == 0 => Category 1 (Bad segment) 1226 EAX != 0 => Category 2 (Bad IRET) */ 1227 testl %eax, %eax 1228 popl %eax 1229 lea 16(%esp), %esp 1230 jz 5f 1231 jmp asm_iret_error 12325: pushl $-1 /* orig_ax = -1 => not a system call */ 1233 SAVE_ALL 1234 ENCODE_FRAME_POINTER 1235 jmp handle_exception_return 1236 1237.section .fixup, "ax" 12386: xorl %eax, %eax 1239 movl %eax, 4(%esp) 1240 jmp 1b 12417: xorl %eax, %eax 1242 movl %eax, 8(%esp) 1243 jmp 2b 12448: xorl %eax, %eax 1245 movl %eax, 12(%esp) 1246 jmp 3b 12479: xorl %eax, %eax 1248 movl %eax, 16(%esp) 1249 jmp 4b 1250.previous 1251 _ASM_EXTABLE(1b, 6b) 1252 _ASM_EXTABLE(2b, 7b) 1253 _ASM_EXTABLE(3b, 8b) 1254 _ASM_EXTABLE(4b, 9b) 1255SYM_FUNC_END(xen_failsafe_callback) 1256#endif /* CONFIG_XEN_PV */ 1257 1258SYM_CODE_START_LOCAL_NOALIGN(handle_exception) 1259 /* the function address is in %gs's slot on the stack */ 1260 SAVE_ALL switch_stacks=1 skip_gs=1 unwind_espfix=1 1261 ENCODE_FRAME_POINTER 1262 1263 /* fixup %gs */ 1264 GS_TO_REG %ecx 1265 movl PT_GS(%esp), %edi # get the function address 1266 REG_TO_PTGS %ecx 1267 SET_KERNEL_GS %ecx 1268 1269 /* fixup orig %eax */ 1270 movl PT_ORIG_EAX(%esp), %edx # get the error code 1271 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart 1272 1273 movl %esp, %eax # pt_regs pointer 1274 CALL_NOSPEC edi 1275 1276handle_exception_return: 1277#ifdef CONFIG_VM86 1278 movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS 1279 movb PT_CS(%esp), %al 1280 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax 1281#else 1282 /* 1283 * We can be coming here from child spawned by kernel_thread(). 1284 */ 1285 movl PT_CS(%esp), %eax 1286 andl $SEGMENT_RPL_MASK, %eax 1287#endif 1288 cmpl $USER_RPL, %eax # returning to v8086 or userspace ? 1289 jnb ret_to_user 1290 1291 PARANOID_EXIT_TO_KERNEL_MODE 1292 BUG_IF_WRONG_CR3 1293 RESTORE_REGS 4 1294 jmp .Lirq_return 1295 1296ret_to_user: 1297 movl %esp, %eax 1298 jmp restore_all_switch_stack 1299SYM_CODE_END(handle_exception) 1300 1301SYM_CODE_START(asm_exc_double_fault) 13021: 1303 /* 1304 * This is a task gate handler, not an interrupt gate handler. 1305 * The error code is on the stack, but the stack is otherwise 1306 * empty. Interrupts are off. Our state is sane with the following 1307 * exceptions: 1308 * 1309 * - CR0.TS is set. "TS" literally means "task switched". 1310 * - EFLAGS.NT is set because we're a "nested task". 1311 * - The doublefault TSS has back_link set and has been marked busy. 1312 * - TR points to the doublefault TSS and the normal TSS is busy. 1313 * - CR3 is the normal kernel PGD. This would be delightful, except 1314 * that the CPU didn't bother to save the old CR3 anywhere. This 1315 * would make it very awkward to return back to the context we came 1316 * from. 1317 * 1318 * The rest of EFLAGS is sanitized for us, so we don't need to 1319 * worry about AC or DF. 1320 * 1321 * Don't even bother popping the error code. It's always zero, 1322 * and ignoring it makes us a bit more robust against buggy 1323 * hypervisor task gate implementations. 1324 * 1325 * We will manually undo the task switch instead of doing a 1326 * task-switching IRET. 1327 */ 1328 1329 clts /* clear CR0.TS */ 1330 pushl $X86_EFLAGS_FIXED 1331 popfl /* clear EFLAGS.NT */ 1332 1333 call doublefault_shim 1334 1335 /* We don't support returning, so we have no IRET here. */ 13361: 1337 hlt 1338 jmp 1b 1339SYM_CODE_END(asm_exc_double_fault) 1340 1341/* 1342 * NMI is doubly nasty. It can happen on the first instruction of 1343 * entry_SYSENTER_32 (just like #DB), but it can also interrupt the beginning 1344 * of the #DB handler even if that #DB in turn hit before entry_SYSENTER_32 1345 * switched stacks. We handle both conditions by simply checking whether we 1346 * interrupted kernel code running on the SYSENTER stack. 1347 */ 1348SYM_CODE_START(asm_exc_nmi) 1349 ASM_CLAC 1350 1351#ifdef CONFIG_X86_ESPFIX32 1352 /* 1353 * ESPFIX_SS is only ever set on the return to user path 1354 * after we've switched to the entry stack. 1355 */ 1356 pushl %eax 1357 movl %ss, %eax 1358 cmpw $__ESPFIX_SS, %ax 1359 popl %eax 1360 je .Lnmi_espfix_stack 1361#endif 1362 1363 pushl %eax # pt_regs->orig_ax 1364 SAVE_ALL_NMI cr3_reg=%edi 1365 ENCODE_FRAME_POINTER 1366 xorl %edx, %edx # zero error code 1367 movl %esp, %eax # pt_regs pointer 1368 1369 /* Are we currently on the SYSENTER stack? */ 1370 movl PER_CPU_VAR(cpu_entry_area), %ecx 1371 addl $CPU_ENTRY_AREA_entry_stack + SIZEOF_entry_stack, %ecx 1372 subl %eax, %ecx /* ecx = (end of entry_stack) - esp */ 1373 cmpl $SIZEOF_entry_stack, %ecx 1374 jb .Lnmi_from_sysenter_stack 1375 1376 /* Not on SYSENTER stack. */ 1377 call exc_nmi 1378 jmp .Lnmi_return 1379 1380.Lnmi_from_sysenter_stack: 1381 /* 1382 * We're on the SYSENTER stack. Switch off. No one (not even debug) 1383 * is using the thread stack right now, so it's safe for us to use it. 1384 */ 1385 movl %esp, %ebx 1386 movl PER_CPU_VAR(cpu_current_top_of_stack), %esp 1387 call exc_nmi 1388 movl %ebx, %esp 1389 1390.Lnmi_return: 1391#ifdef CONFIG_X86_ESPFIX32 1392 testl $CS_FROM_ESPFIX, PT_CS(%esp) 1393 jnz .Lnmi_from_espfix 1394#endif 1395 1396 CHECK_AND_APPLY_ESPFIX 1397 RESTORE_ALL_NMI cr3_reg=%edi pop=4 1398 jmp .Lirq_return 1399 1400#ifdef CONFIG_X86_ESPFIX32 1401.Lnmi_espfix_stack: 1402 /* 1403 * Create the pointer to LSS back 1404 */ 1405 pushl %ss 1406 pushl %esp 1407 addl $4, (%esp) 1408 1409 /* Copy the (short) IRET frame */ 1410 pushl 4*4(%esp) # flags 1411 pushl 4*4(%esp) # cs 1412 pushl 4*4(%esp) # ip 1413 1414 pushl %eax # orig_ax 1415 1416 SAVE_ALL_NMI cr3_reg=%edi unwind_espfix=1 1417 ENCODE_FRAME_POINTER 1418 1419 /* clear CS_FROM_KERNEL, set CS_FROM_ESPFIX */ 1420 xorl $(CS_FROM_ESPFIX | CS_FROM_KERNEL), PT_CS(%esp) 1421 1422 xorl %edx, %edx # zero error code 1423 movl %esp, %eax # pt_regs pointer 1424 jmp .Lnmi_from_sysenter_stack 1425 1426.Lnmi_from_espfix: 1427 RESTORE_ALL_NMI cr3_reg=%edi 1428 /* 1429 * Because we cleared CS_FROM_KERNEL, IRET_FRAME 'forgot' to 1430 * fix up the gap and long frame: 1431 * 1432 * 3 - original frame (exception) 1433 * 2 - ESPFIX block (above) 1434 * 6 - gap (FIXUP_FRAME) 1435 * 5 - long frame (FIXUP_FRAME) 1436 * 1 - orig_ax 1437 */ 1438 lss (1+5+6)*4(%esp), %esp # back to espfix stack 1439 jmp .Lirq_return 1440#endif 1441SYM_CODE_END(asm_exc_nmi) 1442 1443.pushsection .text, "ax" 1444SYM_CODE_START(rewind_stack_do_exit) 1445 /* Prevent any naive code from trying to unwind to our caller. */ 1446 xorl %ebp, %ebp 1447 1448 movl PER_CPU_VAR(cpu_current_top_of_stack), %esi 1449 leal -TOP_OF_KERNEL_STACK_PADDING-PTREGS_SIZE(%esi), %esp 1450 1451 call do_exit 14521: jmp 1b 1453SYM_CODE_END(rewind_stack_do_exit) 1454.popsection 1455