1/* SPDX-License-Identifier: GPL-2.0 */ 2/* 3 * Copyright (C) 1991,1992 Linus Torvalds 4 * 5 * entry_32.S contains the system-call and low-level fault and trap handling routines. 6 * 7 * Stack layout while running C code: 8 * ptrace needs to have all registers on the stack. 9 * If the order here is changed, it needs to be 10 * updated in fork.c:copy_process(), signal.c:do_signal(), 11 * ptrace.c and ptrace.h 12 * 13 * 0(%esp) - %ebx 14 * 4(%esp) - %ecx 15 * 8(%esp) - %edx 16 * C(%esp) - %esi 17 * 10(%esp) - %edi 18 * 14(%esp) - %ebp 19 * 18(%esp) - %eax 20 * 1C(%esp) - %ds 21 * 20(%esp) - %es 22 * 24(%esp) - %fs 23 * 28(%esp) - %gs saved iff !CONFIG_X86_32_LAZY_GS 24 * 2C(%esp) - orig_eax 25 * 30(%esp) - %eip 26 * 34(%esp) - %cs 27 * 38(%esp) - %eflags 28 * 3C(%esp) - %oldesp 29 * 40(%esp) - %oldss 30 */ 31 32#include <linux/linkage.h> 33#include <linux/err.h> 34#include <asm/thread_info.h> 35#include <asm/irqflags.h> 36#include <asm/errno.h> 37#include <asm/segment.h> 38#include <asm/smp.h> 39#include <asm/percpu.h> 40#include <asm/processor-flags.h> 41#include <asm/irq_vectors.h> 42#include <asm/cpufeatures.h> 43#include <asm/alternative-asm.h> 44#include <asm/asm.h> 45#include <asm/smap.h> 46#include <asm/frame.h> 47#include <asm/trapnr.h> 48#include <asm/nospec-branch.h> 49 50#include "calling.h" 51 52 .section .entry.text, "ax" 53 54/* 55 * We use macros for low-level operations which need to be overridden 56 * for paravirtualization. The following will never clobber any registers: 57 * INTERRUPT_RETURN (aka. "iret") 58 * GET_CR0_INTO_EAX (aka. "movl %cr0, %eax") 59 * ENABLE_INTERRUPTS_SYSEXIT (aka "sti; sysexit"). 60 * 61 * For DISABLE_INTERRUPTS/ENABLE_INTERRUPTS (aka "cli"/"sti"), you must 62 * specify what registers can be overwritten (CLBR_NONE, CLBR_EAX/EDX/ECX/ANY). 63 * Allowing a register to be clobbered can shrink the paravirt replacement 64 * enough to patch inline, increasing performance. 65 */ 66 67#ifdef CONFIG_PREEMPTION 68# define preempt_stop(clobbers) DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF 69#else 70# define preempt_stop(clobbers) 71#endif 72 73.macro TRACE_IRQS_IRET 74#ifdef CONFIG_TRACE_IRQFLAGS 75 testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off? 76 jz 1f 77 TRACE_IRQS_ON 781: 79#endif 80.endm 81 82#define PTI_SWITCH_MASK (1 << PAGE_SHIFT) 83 84/* 85 * User gs save/restore 86 * 87 * %gs is used for userland TLS and kernel only uses it for stack 88 * canary which is required to be at %gs:20 by gcc. Read the comment 89 * at the top of stackprotector.h for more info. 90 * 91 * Local labels 98 and 99 are used. 92 */ 93#ifdef CONFIG_X86_32_LAZY_GS 94 95 /* unfortunately push/pop can't be no-op */ 96.macro PUSH_GS 97 pushl $0 98.endm 99.macro POP_GS pop=0 100 addl $(4 + \pop), %esp 101.endm 102.macro POP_GS_EX 103.endm 104 105 /* all the rest are no-op */ 106.macro PTGS_TO_GS 107.endm 108.macro PTGS_TO_GS_EX 109.endm 110.macro GS_TO_REG reg 111.endm 112.macro REG_TO_PTGS reg 113.endm 114.macro SET_KERNEL_GS reg 115.endm 116 117#else /* CONFIG_X86_32_LAZY_GS */ 118 119.macro PUSH_GS 120 pushl %gs 121.endm 122 123.macro POP_GS pop=0 12498: popl %gs 125 .if \pop <> 0 126 add $\pop, %esp 127 .endif 128.endm 129.macro POP_GS_EX 130.pushsection .fixup, "ax" 13199: movl $0, (%esp) 132 jmp 98b 133.popsection 134 _ASM_EXTABLE(98b, 99b) 135.endm 136 137.macro PTGS_TO_GS 13898: mov PT_GS(%esp), %gs 139.endm 140.macro PTGS_TO_GS_EX 141.pushsection .fixup, "ax" 14299: movl $0, PT_GS(%esp) 143 jmp 98b 144.popsection 145 _ASM_EXTABLE(98b, 99b) 146.endm 147 148.macro GS_TO_REG reg 149 movl %gs, \reg 150.endm 151.macro REG_TO_PTGS reg 152 movl \reg, PT_GS(%esp) 153.endm 154.macro SET_KERNEL_GS reg 155 movl $(__KERNEL_STACK_CANARY), \reg 156 movl \reg, %gs 157.endm 158 159#endif /* CONFIG_X86_32_LAZY_GS */ 160 161/* Unconditionally switch to user cr3 */ 162.macro SWITCH_TO_USER_CR3 scratch_reg:req 163 ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI 164 165 movl %cr3, \scratch_reg 166 orl $PTI_SWITCH_MASK, \scratch_reg 167 movl \scratch_reg, %cr3 168.Lend_\@: 169.endm 170 171.macro BUG_IF_WRONG_CR3 no_user_check=0 172#ifdef CONFIG_DEBUG_ENTRY 173 ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI 174 .if \no_user_check == 0 175 /* coming from usermode? */ 176 testl $USER_SEGMENT_RPL_MASK, PT_CS(%esp) 177 jz .Lend_\@ 178 .endif 179 /* On user-cr3? */ 180 movl %cr3, %eax 181 testl $PTI_SWITCH_MASK, %eax 182 jnz .Lend_\@ 183 /* From userspace with kernel cr3 - BUG */ 184 ud2 185.Lend_\@: 186#endif 187.endm 188 189/* 190 * Switch to kernel cr3 if not already loaded and return current cr3 in 191 * \scratch_reg 192 */ 193.macro SWITCH_TO_KERNEL_CR3 scratch_reg:req 194 ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI 195 movl %cr3, \scratch_reg 196 /* Test if we are already on kernel CR3 */ 197 testl $PTI_SWITCH_MASK, \scratch_reg 198 jz .Lend_\@ 199 andl $(~PTI_SWITCH_MASK), \scratch_reg 200 movl \scratch_reg, %cr3 201 /* Return original CR3 in \scratch_reg */ 202 orl $PTI_SWITCH_MASK, \scratch_reg 203.Lend_\@: 204.endm 205 206#define CS_FROM_ENTRY_STACK (1 << 31) 207#define CS_FROM_USER_CR3 (1 << 30) 208#define CS_FROM_KERNEL (1 << 29) 209#define CS_FROM_ESPFIX (1 << 28) 210 211.macro FIXUP_FRAME 212 /* 213 * The high bits of the CS dword (__csh) are used for CS_FROM_*. 214 * Clear them in case hardware didn't do this for us. 215 */ 216 andl $0x0000ffff, 4*4(%esp) 217 218#ifdef CONFIG_VM86 219 testl $X86_EFLAGS_VM, 5*4(%esp) 220 jnz .Lfrom_usermode_no_fixup_\@ 221#endif 222 testl $USER_SEGMENT_RPL_MASK, 4*4(%esp) 223 jnz .Lfrom_usermode_no_fixup_\@ 224 225 orl $CS_FROM_KERNEL, 4*4(%esp) 226 227 /* 228 * When we're here from kernel mode; the (exception) stack looks like: 229 * 230 * 6*4(%esp) - <previous context> 231 * 5*4(%esp) - flags 232 * 4*4(%esp) - cs 233 * 3*4(%esp) - ip 234 * 2*4(%esp) - orig_eax 235 * 1*4(%esp) - gs / function 236 * 0*4(%esp) - fs 237 * 238 * Lets build a 5 entry IRET frame after that, such that struct pt_regs 239 * is complete and in particular regs->sp is correct. This gives us 240 * the original 6 enties as gap: 241 * 242 * 14*4(%esp) - <previous context> 243 * 13*4(%esp) - gap / flags 244 * 12*4(%esp) - gap / cs 245 * 11*4(%esp) - gap / ip 246 * 10*4(%esp) - gap / orig_eax 247 * 9*4(%esp) - gap / gs / function 248 * 8*4(%esp) - gap / fs 249 * 7*4(%esp) - ss 250 * 6*4(%esp) - sp 251 * 5*4(%esp) - flags 252 * 4*4(%esp) - cs 253 * 3*4(%esp) - ip 254 * 2*4(%esp) - orig_eax 255 * 1*4(%esp) - gs / function 256 * 0*4(%esp) - fs 257 */ 258 259 pushl %ss # ss 260 pushl %esp # sp (points at ss) 261 addl $7*4, (%esp) # point sp back at the previous context 262 pushl 7*4(%esp) # flags 263 pushl 7*4(%esp) # cs 264 pushl 7*4(%esp) # ip 265 pushl 7*4(%esp) # orig_eax 266 pushl 7*4(%esp) # gs / function 267 pushl 7*4(%esp) # fs 268.Lfrom_usermode_no_fixup_\@: 269.endm 270 271.macro IRET_FRAME 272 /* 273 * We're called with %ds, %es, %fs, and %gs from the interrupted 274 * frame, so we shouldn't use them. Also, we may be in ESPFIX 275 * mode and therefore have a nonzero SS base and an offset ESP, 276 * so any attempt to access the stack needs to use SS. (except for 277 * accesses through %esp, which automatically use SS.) 278 */ 279 testl $CS_FROM_KERNEL, 1*4(%esp) 280 jz .Lfinished_frame_\@ 281 282 /* 283 * Reconstruct the 3 entry IRET frame right after the (modified) 284 * regs->sp without lowering %esp in between, such that an NMI in the 285 * middle doesn't scribble our stack. 286 */ 287 pushl %eax 288 pushl %ecx 289 movl 5*4(%esp), %eax # (modified) regs->sp 290 291 movl 4*4(%esp), %ecx # flags 292 movl %ecx, %ss:-1*4(%eax) 293 294 movl 3*4(%esp), %ecx # cs 295 andl $0x0000ffff, %ecx 296 movl %ecx, %ss:-2*4(%eax) 297 298 movl 2*4(%esp), %ecx # ip 299 movl %ecx, %ss:-3*4(%eax) 300 301 movl 1*4(%esp), %ecx # eax 302 movl %ecx, %ss:-4*4(%eax) 303 304 popl %ecx 305 lea -4*4(%eax), %esp 306 popl %eax 307.Lfinished_frame_\@: 308.endm 309 310.macro SAVE_ALL pt_regs_ax=%eax switch_stacks=0 skip_gs=0 unwind_espfix=0 311 cld 312.if \skip_gs == 0 313 PUSH_GS 314.endif 315 pushl %fs 316 317 pushl %eax 318 movl $(__KERNEL_PERCPU), %eax 319 movl %eax, %fs 320.if \unwind_espfix > 0 321 UNWIND_ESPFIX_STACK 322.endif 323 popl %eax 324 325 FIXUP_FRAME 326 pushl %es 327 pushl %ds 328 pushl \pt_regs_ax 329 pushl %ebp 330 pushl %edi 331 pushl %esi 332 pushl %edx 333 pushl %ecx 334 pushl %ebx 335 movl $(__USER_DS), %edx 336 movl %edx, %ds 337 movl %edx, %es 338.if \skip_gs == 0 339 SET_KERNEL_GS %edx 340.endif 341 /* Switch to kernel stack if necessary */ 342.if \switch_stacks > 0 343 SWITCH_TO_KERNEL_STACK 344.endif 345.endm 346 347.macro SAVE_ALL_NMI cr3_reg:req unwind_espfix=0 348 SAVE_ALL unwind_espfix=\unwind_espfix 349 350 BUG_IF_WRONG_CR3 351 352 /* 353 * Now switch the CR3 when PTI is enabled. 354 * 355 * We can enter with either user or kernel cr3, the code will 356 * store the old cr3 in \cr3_reg and switches to the kernel cr3 357 * if necessary. 358 */ 359 SWITCH_TO_KERNEL_CR3 scratch_reg=\cr3_reg 360 361.Lend_\@: 362.endm 363 364.macro RESTORE_INT_REGS 365 popl %ebx 366 popl %ecx 367 popl %edx 368 popl %esi 369 popl %edi 370 popl %ebp 371 popl %eax 372.endm 373 374.macro RESTORE_REGS pop=0 375 RESTORE_INT_REGS 3761: popl %ds 3772: popl %es 3783: popl %fs 379 POP_GS \pop 380 IRET_FRAME 381.pushsection .fixup, "ax" 3824: movl $0, (%esp) 383 jmp 1b 3845: movl $0, (%esp) 385 jmp 2b 3866: movl $0, (%esp) 387 jmp 3b 388.popsection 389 _ASM_EXTABLE(1b, 4b) 390 _ASM_EXTABLE(2b, 5b) 391 _ASM_EXTABLE(3b, 6b) 392 POP_GS_EX 393.endm 394 395.macro RESTORE_ALL_NMI cr3_reg:req pop=0 396 /* 397 * Now switch the CR3 when PTI is enabled. 398 * 399 * We enter with kernel cr3 and switch the cr3 to the value 400 * stored on \cr3_reg, which is either a user or a kernel cr3. 401 */ 402 ALTERNATIVE "jmp .Lswitched_\@", "", X86_FEATURE_PTI 403 404 testl $PTI_SWITCH_MASK, \cr3_reg 405 jz .Lswitched_\@ 406 407 /* User cr3 in \cr3_reg - write it to hardware cr3 */ 408 movl \cr3_reg, %cr3 409 410.Lswitched_\@: 411 412 BUG_IF_WRONG_CR3 413 414 RESTORE_REGS pop=\pop 415.endm 416 417.macro CHECK_AND_APPLY_ESPFIX 418#ifdef CONFIG_X86_ESPFIX32 419#define GDT_ESPFIX_OFFSET (GDT_ENTRY_ESPFIX_SS * 8) 420#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + GDT_ESPFIX_OFFSET 421 422 ALTERNATIVE "jmp .Lend_\@", "", X86_BUG_ESPFIX 423 424 movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS 425 /* 426 * Warning: PT_OLDSS(%esp) contains the wrong/random values if we 427 * are returning to the kernel. 428 * See comments in process.c:copy_thread() for details. 429 */ 430 movb PT_OLDSS(%esp), %ah 431 movb PT_CS(%esp), %al 432 andl $(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax 433 cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax 434 jne .Lend_\@ # returning to user-space with LDT SS 435 436 /* 437 * Setup and switch to ESPFIX stack 438 * 439 * We're returning to userspace with a 16 bit stack. The CPU will not 440 * restore the high word of ESP for us on executing iret... This is an 441 * "official" bug of all the x86-compatible CPUs, which we can work 442 * around to make dosemu and wine happy. We do this by preloading the 443 * high word of ESP with the high word of the userspace ESP while 444 * compensating for the offset by changing to the ESPFIX segment with 445 * a base address that matches for the difference. 446 */ 447 mov %esp, %edx /* load kernel esp */ 448 mov PT_OLDESP(%esp), %eax /* load userspace esp */ 449 mov %dx, %ax /* eax: new kernel esp */ 450 sub %eax, %edx /* offset (low word is 0) */ 451 shr $16, %edx 452 mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */ 453 mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */ 454 pushl $__ESPFIX_SS 455 pushl %eax /* new kernel esp */ 456 /* 457 * Disable interrupts, but do not irqtrace this section: we 458 * will soon execute iret and the tracer was already set to 459 * the irqstate after the IRET: 460 */ 461 DISABLE_INTERRUPTS(CLBR_ANY) 462 lss (%esp), %esp /* switch to espfix segment */ 463.Lend_\@: 464#endif /* CONFIG_X86_ESPFIX32 */ 465.endm 466 467/* 468 * Called with pt_regs fully populated and kernel segments loaded, 469 * so we can access PER_CPU and use the integer registers. 470 * 471 * We need to be very careful here with the %esp switch, because an NMI 472 * can happen everywhere. If the NMI handler finds itself on the 473 * entry-stack, it will overwrite the task-stack and everything we 474 * copied there. So allocate the stack-frame on the task-stack and 475 * switch to it before we do any copying. 476 */ 477 478.macro SWITCH_TO_KERNEL_STACK 479 480 ALTERNATIVE "", "jmp .Lend_\@", X86_FEATURE_XENPV 481 482 BUG_IF_WRONG_CR3 483 484 SWITCH_TO_KERNEL_CR3 scratch_reg=%eax 485 486 /* 487 * %eax now contains the entry cr3 and we carry it forward in 488 * that register for the time this macro runs 489 */ 490 491 /* Are we on the entry stack? Bail out if not! */ 492 movl PER_CPU_VAR(cpu_entry_area), %ecx 493 addl $CPU_ENTRY_AREA_entry_stack + SIZEOF_entry_stack, %ecx 494 subl %esp, %ecx /* ecx = (end of entry_stack) - esp */ 495 cmpl $SIZEOF_entry_stack, %ecx 496 jae .Lend_\@ 497 498 /* Load stack pointer into %esi and %edi */ 499 movl %esp, %esi 500 movl %esi, %edi 501 502 /* Move %edi to the top of the entry stack */ 503 andl $(MASK_entry_stack), %edi 504 addl $(SIZEOF_entry_stack), %edi 505 506 /* Load top of task-stack into %edi */ 507 movl TSS_entry2task_stack(%edi), %edi 508 509 /* Special case - entry from kernel mode via entry stack */ 510#ifdef CONFIG_VM86 511 movl PT_EFLAGS(%esp), %ecx # mix EFLAGS and CS 512 movb PT_CS(%esp), %cl 513 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %ecx 514#else 515 movl PT_CS(%esp), %ecx 516 andl $SEGMENT_RPL_MASK, %ecx 517#endif 518 cmpl $USER_RPL, %ecx 519 jb .Lentry_from_kernel_\@ 520 521 /* Bytes to copy */ 522 movl $PTREGS_SIZE, %ecx 523 524#ifdef CONFIG_VM86 525 testl $X86_EFLAGS_VM, PT_EFLAGS(%esi) 526 jz .Lcopy_pt_regs_\@ 527 528 /* 529 * Stack-frame contains 4 additional segment registers when 530 * coming from VM86 mode 531 */ 532 addl $(4 * 4), %ecx 533 534#endif 535.Lcopy_pt_regs_\@: 536 537 /* Allocate frame on task-stack */ 538 subl %ecx, %edi 539 540 /* Switch to task-stack */ 541 movl %edi, %esp 542 543 /* 544 * We are now on the task-stack and can safely copy over the 545 * stack-frame 546 */ 547 shrl $2, %ecx 548 cld 549 rep movsl 550 551 jmp .Lend_\@ 552 553.Lentry_from_kernel_\@: 554 555 /* 556 * This handles the case when we enter the kernel from 557 * kernel-mode and %esp points to the entry-stack. When this 558 * happens we need to switch to the task-stack to run C code, 559 * but switch back to the entry-stack again when we approach 560 * iret and return to the interrupted code-path. This usually 561 * happens when we hit an exception while restoring user-space 562 * segment registers on the way back to user-space or when the 563 * sysenter handler runs with eflags.tf set. 564 * 565 * When we switch to the task-stack here, we can't trust the 566 * contents of the entry-stack anymore, as the exception handler 567 * might be scheduled out or moved to another CPU. Therefore we 568 * copy the complete entry-stack to the task-stack and set a 569 * marker in the iret-frame (bit 31 of the CS dword) to detect 570 * what we've done on the iret path. 571 * 572 * On the iret path we copy everything back and switch to the 573 * entry-stack, so that the interrupted kernel code-path 574 * continues on the same stack it was interrupted with. 575 * 576 * Be aware that an NMI can happen anytime in this code. 577 * 578 * %esi: Entry-Stack pointer (same as %esp) 579 * %edi: Top of the task stack 580 * %eax: CR3 on kernel entry 581 */ 582 583 /* Calculate number of bytes on the entry stack in %ecx */ 584 movl %esi, %ecx 585 586 /* %ecx to the top of entry-stack */ 587 andl $(MASK_entry_stack), %ecx 588 addl $(SIZEOF_entry_stack), %ecx 589 590 /* Number of bytes on the entry stack to %ecx */ 591 sub %esi, %ecx 592 593 /* Mark stackframe as coming from entry stack */ 594 orl $CS_FROM_ENTRY_STACK, PT_CS(%esp) 595 596 /* 597 * Test the cr3 used to enter the kernel and add a marker 598 * so that we can switch back to it before iret. 599 */ 600 testl $PTI_SWITCH_MASK, %eax 601 jz .Lcopy_pt_regs_\@ 602 orl $CS_FROM_USER_CR3, PT_CS(%esp) 603 604 /* 605 * %esi and %edi are unchanged, %ecx contains the number of 606 * bytes to copy. The code at .Lcopy_pt_regs_\@ will allocate 607 * the stack-frame on task-stack and copy everything over 608 */ 609 jmp .Lcopy_pt_regs_\@ 610 611.Lend_\@: 612.endm 613 614/* 615 * Switch back from the kernel stack to the entry stack. 616 * 617 * The %esp register must point to pt_regs on the task stack. It will 618 * first calculate the size of the stack-frame to copy, depending on 619 * whether we return to VM86 mode or not. With that it uses 'rep movsl' 620 * to copy the contents of the stack over to the entry stack. 621 * 622 * We must be very careful here, as we can't trust the contents of the 623 * task-stack once we switched to the entry-stack. When an NMI happens 624 * while on the entry-stack, the NMI handler will switch back to the top 625 * of the task stack, overwriting our stack-frame we are about to copy. 626 * Therefore we switch the stack only after everything is copied over. 627 */ 628.macro SWITCH_TO_ENTRY_STACK 629 630 ALTERNATIVE "", "jmp .Lend_\@", X86_FEATURE_XENPV 631 632 /* Bytes to copy */ 633 movl $PTREGS_SIZE, %ecx 634 635#ifdef CONFIG_VM86 636 testl $(X86_EFLAGS_VM), PT_EFLAGS(%esp) 637 jz .Lcopy_pt_regs_\@ 638 639 /* Additional 4 registers to copy when returning to VM86 mode */ 640 addl $(4 * 4), %ecx 641 642.Lcopy_pt_regs_\@: 643#endif 644 645 /* Initialize source and destination for movsl */ 646 movl PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %edi 647 subl %ecx, %edi 648 movl %esp, %esi 649 650 /* Save future stack pointer in %ebx */ 651 movl %edi, %ebx 652 653 /* Copy over the stack-frame */ 654 shrl $2, %ecx 655 cld 656 rep movsl 657 658 /* 659 * Switch to entry-stack - needs to happen after everything is 660 * copied because the NMI handler will overwrite the task-stack 661 * when on entry-stack 662 */ 663 movl %ebx, %esp 664 665.Lend_\@: 666.endm 667 668/* 669 * This macro handles the case when we return to kernel-mode on the iret 670 * path and have to switch back to the entry stack and/or user-cr3 671 * 672 * See the comments below the .Lentry_from_kernel_\@ label in the 673 * SWITCH_TO_KERNEL_STACK macro for more details. 674 */ 675.macro PARANOID_EXIT_TO_KERNEL_MODE 676 677 /* 678 * Test if we entered the kernel with the entry-stack. Most 679 * likely we did not, because this code only runs on the 680 * return-to-kernel path. 681 */ 682 testl $CS_FROM_ENTRY_STACK, PT_CS(%esp) 683 jz .Lend_\@ 684 685 /* Unlikely slow-path */ 686 687 /* Clear marker from stack-frame */ 688 andl $(~CS_FROM_ENTRY_STACK), PT_CS(%esp) 689 690 /* Copy the remaining task-stack contents to entry-stack */ 691 movl %esp, %esi 692 movl PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %edi 693 694 /* Bytes on the task-stack to ecx */ 695 movl PER_CPU_VAR(cpu_tss_rw + TSS_sp1), %ecx 696 subl %esi, %ecx 697 698 /* Allocate stack-frame on entry-stack */ 699 subl %ecx, %edi 700 701 /* 702 * Save future stack-pointer, we must not switch until the 703 * copy is done, otherwise the NMI handler could destroy the 704 * contents of the task-stack we are about to copy. 705 */ 706 movl %edi, %ebx 707 708 /* Do the copy */ 709 shrl $2, %ecx 710 cld 711 rep movsl 712 713 /* Safe to switch to entry-stack now */ 714 movl %ebx, %esp 715 716 /* 717 * We came from entry-stack and need to check if we also need to 718 * switch back to user cr3. 719 */ 720 testl $CS_FROM_USER_CR3, PT_CS(%esp) 721 jz .Lend_\@ 722 723 /* Clear marker from stack-frame */ 724 andl $(~CS_FROM_USER_CR3), PT_CS(%esp) 725 726 SWITCH_TO_USER_CR3 scratch_reg=%eax 727 728.Lend_\@: 729.endm 730 731/** 732 * idtentry - Macro to generate entry stubs for simple IDT entries 733 * @vector: Vector number 734 * @asmsym: ASM symbol for the entry point 735 * @cfunc: C function to be called 736 * @has_error_code: Hardware pushed error code on stack 737 */ 738.macro idtentry vector asmsym cfunc has_error_code:req 739SYM_CODE_START(\asmsym) 740 ASM_CLAC 741 cld 742 743 .if \has_error_code == 0 744 pushl $0 /* Clear the error code */ 745 .endif 746 747 /* Push the C-function address into the GS slot */ 748 pushl $\cfunc 749 /* Invoke the common exception entry */ 750 jmp handle_exception 751SYM_CODE_END(\asmsym) 752.endm 753 754.macro idtentry_irq vector cfunc 755 .p2align CONFIG_X86_L1_CACHE_SHIFT 756SYM_CODE_START_LOCAL(asm_\cfunc) 757 ASM_CLAC 758 SAVE_ALL switch_stacks=1 759 ENCODE_FRAME_POINTER 760 movl %esp, %eax 761 movl PT_ORIG_EAX(%esp), %edx /* get the vector from stack */ 762 movl $-1, PT_ORIG_EAX(%esp) /* no syscall to restart */ 763 call \cfunc 764 jmp handle_exception_return 765SYM_CODE_END(asm_\cfunc) 766.endm 767 768.macro idtentry_sysvec vector cfunc 769 idtentry \vector asm_\cfunc \cfunc has_error_code=0 770.endm 771 772/* 773 * Include the defines which emit the idt entries which are shared 774 * shared between 32 and 64 bit. 775 */ 776#include <asm/idtentry.h> 777 778/* 779 * %eax: prev task 780 * %edx: next task 781 */ 782.pushsection .text, "ax" 783SYM_CODE_START(__switch_to_asm) 784 /* 785 * Save callee-saved registers 786 * This must match the order in struct inactive_task_frame 787 */ 788 pushl %ebp 789 pushl %ebx 790 pushl %edi 791 pushl %esi 792 /* 793 * Flags are saved to prevent AC leakage. This could go 794 * away if objtool would have 32bit support to verify 795 * the STAC/CLAC correctness. 796 */ 797 pushfl 798 799 /* switch stack */ 800 movl %esp, TASK_threadsp(%eax) 801 movl TASK_threadsp(%edx), %esp 802 803#ifdef CONFIG_STACKPROTECTOR 804 movl TASK_stack_canary(%edx), %ebx 805 movl %ebx, PER_CPU_VAR(stack_canary)+stack_canary_offset 806#endif 807 808#ifdef CONFIG_RETPOLINE 809 /* 810 * When switching from a shallower to a deeper call stack 811 * the RSB may either underflow or use entries populated 812 * with userspace addresses. On CPUs where those concerns 813 * exist, overwrite the RSB with entries which capture 814 * speculative execution to prevent attack. 815 */ 816 FILL_RETURN_BUFFER %ebx, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW 817#endif 818 819 /* Restore flags or the incoming task to restore AC state. */ 820 popfl 821 /* restore callee-saved registers */ 822 popl %esi 823 popl %edi 824 popl %ebx 825 popl %ebp 826 827 jmp __switch_to 828SYM_CODE_END(__switch_to_asm) 829.popsection 830 831/* 832 * The unwinder expects the last frame on the stack to always be at the same 833 * offset from the end of the page, which allows it to validate the stack. 834 * Calling schedule_tail() directly would break that convention because its an 835 * asmlinkage function so its argument has to be pushed on the stack. This 836 * wrapper creates a proper "end of stack" frame header before the call. 837 */ 838.pushsection .text, "ax" 839SYM_FUNC_START(schedule_tail_wrapper) 840 FRAME_BEGIN 841 842 pushl %eax 843 call schedule_tail 844 popl %eax 845 846 FRAME_END 847 ret 848SYM_FUNC_END(schedule_tail_wrapper) 849.popsection 850 851/* 852 * A newly forked process directly context switches into this address. 853 * 854 * eax: prev task we switched from 855 * ebx: kernel thread func (NULL for user thread) 856 * edi: kernel thread arg 857 */ 858.pushsection .text, "ax" 859SYM_CODE_START(ret_from_fork) 860 call schedule_tail_wrapper 861 862 testl %ebx, %ebx 863 jnz 1f /* kernel threads are uncommon */ 864 8652: 866 /* When we fork, we trace the syscall return in the child, too. */ 867 movl %esp, %eax 868 call syscall_return_slowpath 869 jmp .Lsyscall_32_done 870 871 /* kernel thread */ 8721: movl %edi, %eax 873 CALL_NOSPEC ebx 874 /* 875 * A kernel thread is allowed to return here after successfully 876 * calling do_execve(). Exit to userspace to complete the execve() 877 * syscall. 878 */ 879 movl $0, PT_EAX(%esp) 880 jmp 2b 881SYM_CODE_END(ret_from_fork) 882.popsection 883 884/* 885 * Return to user mode is not as complex as all this looks, 886 * but we want the default path for a system call return to 887 * go as quickly as possible which is why some of this is 888 * less clear than it otherwise should be. 889 */ 890 891 # userspace resumption stub bypassing syscall exit tracing 892SYM_CODE_START_LOCAL(ret_from_exception) 893 preempt_stop(CLBR_ANY) 894ret_from_intr: 895#ifdef CONFIG_VM86 896 movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS 897 movb PT_CS(%esp), %al 898 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax 899#else 900 /* 901 * We can be coming here from child spawned by kernel_thread(). 902 */ 903 movl PT_CS(%esp), %eax 904 andl $SEGMENT_RPL_MASK, %eax 905#endif 906 cmpl $USER_RPL, %eax 907 jb restore_all_kernel # not returning to v8086 or userspace 908 909 DISABLE_INTERRUPTS(CLBR_ANY) 910 TRACE_IRQS_OFF 911 movl %esp, %eax 912 call prepare_exit_to_usermode 913 jmp restore_all_switch_stack 914SYM_CODE_END(ret_from_exception) 915 916SYM_ENTRY(__begin_SYSENTER_singlestep_region, SYM_L_GLOBAL, SYM_A_NONE) 917/* 918 * All code from here through __end_SYSENTER_singlestep_region is subject 919 * to being single-stepped if a user program sets TF and executes SYSENTER. 920 * There is absolutely nothing that we can do to prevent this from happening 921 * (thanks Intel!). To keep our handling of this situation as simple as 922 * possible, we handle TF just like AC and NT, except that our #DB handler 923 * will ignore all of the single-step traps generated in this range. 924 */ 925 926#ifdef CONFIG_XEN_PV 927/* 928 * Xen doesn't set %esp to be precisely what the normal SYSENTER 929 * entry point expects, so fix it up before using the normal path. 930 */ 931SYM_CODE_START(xen_sysenter_target) 932 addl $5*4, %esp /* remove xen-provided frame */ 933 jmp .Lsysenter_past_esp 934SYM_CODE_END(xen_sysenter_target) 935#endif 936 937/* 938 * 32-bit SYSENTER entry. 939 * 940 * 32-bit system calls through the vDSO's __kernel_vsyscall enter here 941 * if X86_FEATURE_SEP is available. This is the preferred system call 942 * entry on 32-bit systems. 943 * 944 * The SYSENTER instruction, in principle, should *only* occur in the 945 * vDSO. In practice, a small number of Android devices were shipped 946 * with a copy of Bionic that inlined a SYSENTER instruction. This 947 * never happened in any of Google's Bionic versions -- it only happened 948 * in a narrow range of Intel-provided versions. 949 * 950 * SYSENTER loads SS, ESP, CS, and EIP from previously programmed MSRs. 951 * IF and VM in RFLAGS are cleared (IOW: interrupts are off). 952 * SYSENTER does not save anything on the stack, 953 * and does not save old EIP (!!!), ESP, or EFLAGS. 954 * 955 * To avoid losing track of EFLAGS.VM (and thus potentially corrupting 956 * user and/or vm86 state), we explicitly disable the SYSENTER 957 * instruction in vm86 mode by reprogramming the MSRs. 958 * 959 * Arguments: 960 * eax system call number 961 * ebx arg1 962 * ecx arg2 963 * edx arg3 964 * esi arg4 965 * edi arg5 966 * ebp user stack 967 * 0(%ebp) arg6 968 */ 969SYM_FUNC_START(entry_SYSENTER_32) 970 /* 971 * On entry-stack with all userspace-regs live - save and 972 * restore eflags and %eax to use it as scratch-reg for the cr3 973 * switch. 974 */ 975 pushfl 976 pushl %eax 977 BUG_IF_WRONG_CR3 no_user_check=1 978 SWITCH_TO_KERNEL_CR3 scratch_reg=%eax 979 popl %eax 980 popfl 981 982 /* Stack empty again, switch to task stack */ 983 movl TSS_entry2task_stack(%esp), %esp 984 985.Lsysenter_past_esp: 986 pushl $__USER_DS /* pt_regs->ss */ 987 pushl %ebp /* pt_regs->sp (stashed in bp) */ 988 pushfl /* pt_regs->flags (except IF = 0) */ 989 orl $X86_EFLAGS_IF, (%esp) /* Fix IF */ 990 pushl $__USER_CS /* pt_regs->cs */ 991 pushl $0 /* pt_regs->ip = 0 (placeholder) */ 992 pushl %eax /* pt_regs->orig_ax */ 993 SAVE_ALL pt_regs_ax=$-ENOSYS /* save rest, stack already switched */ 994 995 /* 996 * SYSENTER doesn't filter flags, so we need to clear NT, AC 997 * and TF ourselves. To save a few cycles, we can check whether 998 * either was set instead of doing an unconditional popfq. 999 * This needs to happen before enabling interrupts so that 1000 * we don't get preempted with NT set. 1001 * 1002 * If TF is set, we will single-step all the way to here -- do_debug 1003 * will ignore all the traps. (Yes, this is slow, but so is 1004 * single-stepping in general. This allows us to avoid having 1005 * a more complicated code to handle the case where a user program 1006 * forces us to single-step through the SYSENTER entry code.) 1007 * 1008 * NB.: .Lsysenter_fix_flags is a label with the code under it moved 1009 * out-of-line as an optimization: NT is unlikely to be set in the 1010 * majority of the cases and instead of polluting the I$ unnecessarily, 1011 * we're keeping that code behind a branch which will predict as 1012 * not-taken and therefore its instructions won't be fetched. 1013 */ 1014 testl $X86_EFLAGS_NT|X86_EFLAGS_AC|X86_EFLAGS_TF, PT_EFLAGS(%esp) 1015 jnz .Lsysenter_fix_flags 1016.Lsysenter_flags_fixed: 1017 1018 movl %esp, %eax 1019 call do_fast_syscall_32 1020 /* XEN PV guests always use IRET path */ 1021 ALTERNATIVE "testl %eax, %eax; jz .Lsyscall_32_done", \ 1022 "jmp .Lsyscall_32_done", X86_FEATURE_XENPV 1023 1024 STACKLEAK_ERASE 1025 1026 /* Opportunistic SYSEXIT */ 1027 1028 /* 1029 * Setup entry stack - we keep the pointer in %eax and do the 1030 * switch after almost all user-state is restored. 1031 */ 1032 1033 /* Load entry stack pointer and allocate frame for eflags/eax */ 1034 movl PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %eax 1035 subl $(2*4), %eax 1036 1037 /* Copy eflags and eax to entry stack */ 1038 movl PT_EFLAGS(%esp), %edi 1039 movl PT_EAX(%esp), %esi 1040 movl %edi, (%eax) 1041 movl %esi, 4(%eax) 1042 1043 /* Restore user registers and segments */ 1044 movl PT_EIP(%esp), %edx /* pt_regs->ip */ 1045 movl PT_OLDESP(%esp), %ecx /* pt_regs->sp */ 10461: mov PT_FS(%esp), %fs 1047 PTGS_TO_GS 1048 1049 popl %ebx /* pt_regs->bx */ 1050 addl $2*4, %esp /* skip pt_regs->cx and pt_regs->dx */ 1051 popl %esi /* pt_regs->si */ 1052 popl %edi /* pt_regs->di */ 1053 popl %ebp /* pt_regs->bp */ 1054 1055 /* Switch to entry stack */ 1056 movl %eax, %esp 1057 1058 /* Now ready to switch the cr3 */ 1059 SWITCH_TO_USER_CR3 scratch_reg=%eax 1060 1061 /* 1062 * Restore all flags except IF. (We restore IF separately because 1063 * STI gives a one-instruction window in which we won't be interrupted, 1064 * whereas POPF does not.) 1065 */ 1066 btrl $X86_EFLAGS_IF_BIT, (%esp) 1067 BUG_IF_WRONG_CR3 no_user_check=1 1068 popfl 1069 popl %eax 1070 1071 /* 1072 * Return back to the vDSO, which will pop ecx and edx. 1073 * Don't bother with DS and ES (they already contain __USER_DS). 1074 */ 1075 sti 1076 sysexit 1077 1078.pushsection .fixup, "ax" 10792: movl $0, PT_FS(%esp) 1080 jmp 1b 1081.popsection 1082 _ASM_EXTABLE(1b, 2b) 1083 PTGS_TO_GS_EX 1084 1085.Lsysenter_fix_flags: 1086 pushl $X86_EFLAGS_FIXED 1087 popfl 1088 jmp .Lsysenter_flags_fixed 1089SYM_ENTRY(__end_SYSENTER_singlestep_region, SYM_L_GLOBAL, SYM_A_NONE) 1090SYM_FUNC_END(entry_SYSENTER_32) 1091 1092/* 1093 * 32-bit legacy system call entry. 1094 * 1095 * 32-bit x86 Linux system calls traditionally used the INT $0x80 1096 * instruction. INT $0x80 lands here. 1097 * 1098 * This entry point can be used by any 32-bit perform system calls. 1099 * Instances of INT $0x80 can be found inline in various programs and 1100 * libraries. It is also used by the vDSO's __kernel_vsyscall 1101 * fallback for hardware that doesn't support a faster entry method. 1102 * Restarted 32-bit system calls also fall back to INT $0x80 1103 * regardless of what instruction was originally used to do the system 1104 * call. (64-bit programs can use INT $0x80 as well, but they can 1105 * only run on 64-bit kernels and therefore land in 1106 * entry_INT80_compat.) 1107 * 1108 * This is considered a slow path. It is not used by most libc 1109 * implementations on modern hardware except during process startup. 1110 * 1111 * Arguments: 1112 * eax system call number 1113 * ebx arg1 1114 * ecx arg2 1115 * edx arg3 1116 * esi arg4 1117 * edi arg5 1118 * ebp arg6 1119 */ 1120SYM_FUNC_START(entry_INT80_32) 1121 ASM_CLAC 1122 pushl %eax /* pt_regs->orig_ax */ 1123 1124 SAVE_ALL pt_regs_ax=$-ENOSYS switch_stacks=1 /* save rest */ 1125 1126 movl %esp, %eax 1127 call do_int80_syscall_32 1128.Lsyscall_32_done: 1129 STACKLEAK_ERASE 1130 1131restore_all_switch_stack: 1132 SWITCH_TO_ENTRY_STACK 1133 CHECK_AND_APPLY_ESPFIX 1134 1135 /* Switch back to user CR3 */ 1136 SWITCH_TO_USER_CR3 scratch_reg=%eax 1137 1138 BUG_IF_WRONG_CR3 1139 1140 /* Restore user state */ 1141 RESTORE_REGS pop=4 # skip orig_eax/error_code 1142.Lirq_return: 1143 /* 1144 * ARCH_HAS_MEMBARRIER_SYNC_CORE rely on IRET core serialization 1145 * when returning from IPI handler and when returning from 1146 * scheduler to user-space. 1147 */ 1148 INTERRUPT_RETURN 1149 1150restore_all_kernel: 1151#ifdef CONFIG_PREEMPTION 1152 DISABLE_INTERRUPTS(CLBR_ANY) 1153 cmpl $0, PER_CPU_VAR(__preempt_count) 1154 jnz .Lno_preempt 1155 testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off (exception path) ? 1156 jz .Lno_preempt 1157 call preempt_schedule_irq 1158.Lno_preempt: 1159#endif 1160 TRACE_IRQS_IRET 1161 PARANOID_EXIT_TO_KERNEL_MODE 1162 BUG_IF_WRONG_CR3 1163 RESTORE_REGS 4 1164 jmp .Lirq_return 1165 1166.section .fixup, "ax" 1167SYM_CODE_START(asm_iret_error) 1168 pushl $0 # no error code 1169 pushl $iret_error 1170 1171#ifdef CONFIG_DEBUG_ENTRY 1172 /* 1173 * The stack-frame here is the one that iret faulted on, so its a 1174 * return-to-user frame. We are on kernel-cr3 because we come here from 1175 * the fixup code. This confuses the CR3 checker, so switch to user-cr3 1176 * as the checker expects it. 1177 */ 1178 pushl %eax 1179 SWITCH_TO_USER_CR3 scratch_reg=%eax 1180 popl %eax 1181#endif 1182 1183 jmp handle_exception 1184SYM_CODE_END(asm_iret_error) 1185.previous 1186 _ASM_EXTABLE(.Lirq_return, asm_iret_error) 1187SYM_FUNC_END(entry_INT80_32) 1188 1189.macro FIXUP_ESPFIX_STACK 1190/* 1191 * Switch back for ESPFIX stack to the normal zerobased stack 1192 * 1193 * We can't call C functions using the ESPFIX stack. This code reads 1194 * the high word of the segment base from the GDT and swiches to the 1195 * normal stack and adjusts ESP with the matching offset. 1196 * 1197 * We might be on user CR3 here, so percpu data is not mapped and we can't 1198 * access the GDT through the percpu segment. Instead, use SGDT to find 1199 * the cpu_entry_area alias of the GDT. 1200 */ 1201#ifdef CONFIG_X86_ESPFIX32 1202 /* fixup the stack */ 1203 pushl %ecx 1204 subl $2*4, %esp 1205 sgdt (%esp) 1206 movl 2(%esp), %ecx /* GDT address */ 1207 /* 1208 * Careful: ECX is a linear pointer, so we need to force base 1209 * zero. %cs is the only known-linear segment we have right now. 1210 */ 1211 mov %cs:GDT_ESPFIX_OFFSET + 4(%ecx), %al /* bits 16..23 */ 1212 mov %cs:GDT_ESPFIX_OFFSET + 7(%ecx), %ah /* bits 24..31 */ 1213 shl $16, %eax 1214 addl $2*4, %esp 1215 popl %ecx 1216 addl %esp, %eax /* the adjusted stack pointer */ 1217 pushl $__KERNEL_DS 1218 pushl %eax 1219 lss (%esp), %esp /* switch to the normal stack segment */ 1220#endif 1221.endm 1222 1223.macro UNWIND_ESPFIX_STACK 1224 /* It's safe to clobber %eax, all other regs need to be preserved */ 1225#ifdef CONFIG_X86_ESPFIX32 1226 movl %ss, %eax 1227 /* see if on espfix stack */ 1228 cmpw $__ESPFIX_SS, %ax 1229 jne .Lno_fixup_\@ 1230 /* switch to normal stack */ 1231 FIXUP_ESPFIX_STACK 1232.Lno_fixup_\@: 1233#endif 1234.endm 1235 1236#define BUILD_INTERRUPT3(name, nr, fn) \ 1237SYM_FUNC_START(name) \ 1238 ASM_CLAC; \ 1239 pushl $~(nr); \ 1240 SAVE_ALL switch_stacks=1; \ 1241 ENCODE_FRAME_POINTER; \ 1242 TRACE_IRQS_OFF \ 1243 movl %esp, %eax; \ 1244 call fn; \ 1245 jmp ret_from_intr; \ 1246SYM_FUNC_END(name) 1247 1248#define BUILD_INTERRUPT(name, nr) \ 1249 BUILD_INTERRUPT3(name, nr, smp_##name); \ 1250 1251/* The include is where all of the SMP etc. interrupts come from */ 1252#include <asm/entry_arch.h> 1253 1254#ifdef CONFIG_PARAVIRT 1255SYM_CODE_START(native_iret) 1256 iret 1257 _ASM_EXTABLE(native_iret, asm_iret_error) 1258SYM_CODE_END(native_iret) 1259#endif 1260 1261#ifdef CONFIG_XEN_PV 1262/* 1263 * See comment in entry_64.S for further explanation 1264 * 1265 * Note: This is not an actual IDT entry point. It's a XEN specific entry 1266 * point and therefore named to match the 64-bit trampoline counterpart. 1267 */ 1268SYM_FUNC_START(xen_asm_exc_xen_hypervisor_callback) 1269 /* 1270 * Check to see if we got the event in the critical 1271 * region in xen_iret_direct, after we've reenabled 1272 * events and checked for pending events. This simulates 1273 * iret instruction's behaviour where it delivers a 1274 * pending interrupt when enabling interrupts: 1275 */ 1276 cmpl $xen_iret_start_crit, (%esp) 1277 jb 1f 1278 cmpl $xen_iret_end_crit, (%esp) 1279 jae 1f 1280 call xen_iret_crit_fixup 12811: 1282 pushl $-1 /* orig_ax = -1 => not a system call */ 1283 SAVE_ALL 1284 ENCODE_FRAME_POINTER 1285 1286 mov %esp, %eax 1287 call xen_pv_evtchn_do_upcall 1288 jmp handle_exception_return 1289SYM_FUNC_END(xen_asm_exc_xen_hypervisor_callback) 1290 1291/* 1292 * Hypervisor uses this for application faults while it executes. 1293 * We get here for two reasons: 1294 * 1. Fault while reloading DS, ES, FS or GS 1295 * 2. Fault while executing IRET 1296 * Category 1 we fix up by reattempting the load, and zeroing the segment 1297 * register if the load fails. 1298 * Category 2 we fix up by jumping to do_iret_error. We cannot use the 1299 * normal Linux return path in this case because if we use the IRET hypercall 1300 * to pop the stack frame we end up in an infinite loop of failsafe callbacks. 1301 * We distinguish between categories by maintaining a status value in EAX. 1302 */ 1303SYM_FUNC_START(xen_failsafe_callback) 1304 pushl %eax 1305 movl $1, %eax 13061: mov 4(%esp), %ds 13072: mov 8(%esp), %es 13083: mov 12(%esp), %fs 13094: mov 16(%esp), %gs 1310 /* EAX == 0 => Category 1 (Bad segment) 1311 EAX != 0 => Category 2 (Bad IRET) */ 1312 testl %eax, %eax 1313 popl %eax 1314 lea 16(%esp), %esp 1315 jz 5f 1316 jmp asm_iret_error 13175: pushl $-1 /* orig_ax = -1 => not a system call */ 1318 SAVE_ALL 1319 ENCODE_FRAME_POINTER 1320 jmp handle_exception_return 1321 1322.section .fixup, "ax" 13236: xorl %eax, %eax 1324 movl %eax, 4(%esp) 1325 jmp 1b 13267: xorl %eax, %eax 1327 movl %eax, 8(%esp) 1328 jmp 2b 13298: xorl %eax, %eax 1330 movl %eax, 12(%esp) 1331 jmp 3b 13329: xorl %eax, %eax 1333 movl %eax, 16(%esp) 1334 jmp 4b 1335.previous 1336 _ASM_EXTABLE(1b, 6b) 1337 _ASM_EXTABLE(2b, 7b) 1338 _ASM_EXTABLE(3b, 8b) 1339 _ASM_EXTABLE(4b, 9b) 1340SYM_FUNC_END(xen_failsafe_callback) 1341#endif /* CONFIG_XEN_PV */ 1342 1343#ifdef CONFIG_XEN_PVHVM 1344BUILD_INTERRUPT3(xen_hvm_callback_vector, HYPERVISOR_CALLBACK_VECTOR, 1345 xen_evtchn_do_upcall) 1346#endif 1347 1348SYM_CODE_START_LOCAL_NOALIGN(handle_exception) 1349 /* the function address is in %gs's slot on the stack */ 1350 SAVE_ALL switch_stacks=1 skip_gs=1 unwind_espfix=1 1351 ENCODE_FRAME_POINTER 1352 1353 /* fixup %gs */ 1354 GS_TO_REG %ecx 1355 movl PT_GS(%esp), %edi # get the function address 1356 REG_TO_PTGS %ecx 1357 SET_KERNEL_GS %ecx 1358 1359 /* fixup orig %eax */ 1360 movl PT_ORIG_EAX(%esp), %edx # get the error code 1361 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart 1362 1363 movl %esp, %eax # pt_regs pointer 1364 CALL_NOSPEC edi 1365 1366handle_exception_return: 1367#ifdef CONFIG_VM86 1368 movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS 1369 movb PT_CS(%esp), %al 1370 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax 1371#else 1372 /* 1373 * We can be coming here from child spawned by kernel_thread(). 1374 */ 1375 movl PT_CS(%esp), %eax 1376 andl $SEGMENT_RPL_MASK, %eax 1377#endif 1378 cmpl $USER_RPL, %eax # returning to v8086 or userspace ? 1379 jnb ret_to_user 1380 1381 PARANOID_EXIT_TO_KERNEL_MODE 1382 BUG_IF_WRONG_CR3 1383 RESTORE_REGS 4 1384 jmp .Lirq_return 1385 1386ret_to_user: 1387 movl %esp, %eax 1388 jmp restore_all_switch_stack 1389SYM_CODE_END(handle_exception) 1390 1391SYM_CODE_START(asm_exc_double_fault) 13921: 1393 /* 1394 * This is a task gate handler, not an interrupt gate handler. 1395 * The error code is on the stack, but the stack is otherwise 1396 * empty. Interrupts are off. Our state is sane with the following 1397 * exceptions: 1398 * 1399 * - CR0.TS is set. "TS" literally means "task switched". 1400 * - EFLAGS.NT is set because we're a "nested task". 1401 * - The doublefault TSS has back_link set and has been marked busy. 1402 * - TR points to the doublefault TSS and the normal TSS is busy. 1403 * - CR3 is the normal kernel PGD. This would be delightful, except 1404 * that the CPU didn't bother to save the old CR3 anywhere. This 1405 * would make it very awkward to return back to the context we came 1406 * from. 1407 * 1408 * The rest of EFLAGS is sanitized for us, so we don't need to 1409 * worry about AC or DF. 1410 * 1411 * Don't even bother popping the error code. It's always zero, 1412 * and ignoring it makes us a bit more robust against buggy 1413 * hypervisor task gate implementations. 1414 * 1415 * We will manually undo the task switch instead of doing a 1416 * task-switching IRET. 1417 */ 1418 1419 clts /* clear CR0.TS */ 1420 pushl $X86_EFLAGS_FIXED 1421 popfl /* clear EFLAGS.NT */ 1422 1423 call doublefault_shim 1424 1425 /* We don't support returning, so we have no IRET here. */ 14261: 1427 hlt 1428 jmp 1b 1429SYM_CODE_END(asm_exc_double_fault) 1430 1431/* 1432 * NMI is doubly nasty. It can happen on the first instruction of 1433 * entry_SYSENTER_32 (just like #DB), but it can also interrupt the beginning 1434 * of the #DB handler even if that #DB in turn hit before entry_SYSENTER_32 1435 * switched stacks. We handle both conditions by simply checking whether we 1436 * interrupted kernel code running on the SYSENTER stack. 1437 */ 1438SYM_CODE_START(asm_exc_nmi) 1439 ASM_CLAC 1440 1441#ifdef CONFIG_X86_ESPFIX32 1442 /* 1443 * ESPFIX_SS is only ever set on the return to user path 1444 * after we've switched to the entry stack. 1445 */ 1446 pushl %eax 1447 movl %ss, %eax 1448 cmpw $__ESPFIX_SS, %ax 1449 popl %eax 1450 je .Lnmi_espfix_stack 1451#endif 1452 1453 pushl %eax # pt_regs->orig_ax 1454 SAVE_ALL_NMI cr3_reg=%edi 1455 ENCODE_FRAME_POINTER 1456 xorl %edx, %edx # zero error code 1457 movl %esp, %eax # pt_regs pointer 1458 1459 /* Are we currently on the SYSENTER stack? */ 1460 movl PER_CPU_VAR(cpu_entry_area), %ecx 1461 addl $CPU_ENTRY_AREA_entry_stack + SIZEOF_entry_stack, %ecx 1462 subl %eax, %ecx /* ecx = (end of entry_stack) - esp */ 1463 cmpl $SIZEOF_entry_stack, %ecx 1464 jb .Lnmi_from_sysenter_stack 1465 1466 /* Not on SYSENTER stack. */ 1467 call exc_nmi 1468 jmp .Lnmi_return 1469 1470.Lnmi_from_sysenter_stack: 1471 /* 1472 * We're on the SYSENTER stack. Switch off. No one (not even debug) 1473 * is using the thread stack right now, so it's safe for us to use it. 1474 */ 1475 movl %esp, %ebx 1476 movl PER_CPU_VAR(cpu_current_top_of_stack), %esp 1477 call exc_nmi 1478 movl %ebx, %esp 1479 1480.Lnmi_return: 1481#ifdef CONFIG_X86_ESPFIX32 1482 testl $CS_FROM_ESPFIX, PT_CS(%esp) 1483 jnz .Lnmi_from_espfix 1484#endif 1485 1486 CHECK_AND_APPLY_ESPFIX 1487 RESTORE_ALL_NMI cr3_reg=%edi pop=4 1488 jmp .Lirq_return 1489 1490#ifdef CONFIG_X86_ESPFIX32 1491.Lnmi_espfix_stack: 1492 /* 1493 * Create the pointer to LSS back 1494 */ 1495 pushl %ss 1496 pushl %esp 1497 addl $4, (%esp) 1498 1499 /* Copy the (short) IRET frame */ 1500 pushl 4*4(%esp) # flags 1501 pushl 4*4(%esp) # cs 1502 pushl 4*4(%esp) # ip 1503 1504 pushl %eax # orig_ax 1505 1506 SAVE_ALL_NMI cr3_reg=%edi unwind_espfix=1 1507 ENCODE_FRAME_POINTER 1508 1509 /* clear CS_FROM_KERNEL, set CS_FROM_ESPFIX */ 1510 xorl $(CS_FROM_ESPFIX | CS_FROM_KERNEL), PT_CS(%esp) 1511 1512 xorl %edx, %edx # zero error code 1513 movl %esp, %eax # pt_regs pointer 1514 jmp .Lnmi_from_sysenter_stack 1515 1516.Lnmi_from_espfix: 1517 RESTORE_ALL_NMI cr3_reg=%edi 1518 /* 1519 * Because we cleared CS_FROM_KERNEL, IRET_FRAME 'forgot' to 1520 * fix up the gap and long frame: 1521 * 1522 * 3 - original frame (exception) 1523 * 2 - ESPFIX block (above) 1524 * 6 - gap (FIXUP_FRAME) 1525 * 5 - long frame (FIXUP_FRAME) 1526 * 1 - orig_ax 1527 */ 1528 lss (1+5+6)*4(%esp), %esp # back to espfix stack 1529 jmp .Lirq_return 1530#endif 1531SYM_CODE_END(asm_exc_nmi) 1532 1533.pushsection .text, "ax" 1534SYM_CODE_START(rewind_stack_do_exit) 1535 /* Prevent any naive code from trying to unwind to our caller. */ 1536 xorl %ebp, %ebp 1537 1538 movl PER_CPU_VAR(cpu_current_top_of_stack), %esi 1539 leal -TOP_OF_KERNEL_STACK_PADDING-PTREGS_SIZE(%esi), %esp 1540 1541 call do_exit 15421: jmp 1b 1543SYM_CODE_END(rewind_stack_do_exit) 1544.popsection 1545