1/* SPDX-License-Identifier: GPL-2.0 */ 2/* 3 * linux/arch/x86_64/entry.S 4 * 5 * Copyright (C) 1991, 1992 Linus Torvalds 6 * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs 7 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz> 8 * 9 * entry.S contains the system-call and fault low-level handling routines. 10 * 11 * Some of this is documented in Documentation/x86/entry_64.rst 12 * 13 * A note on terminology: 14 * - iret frame: Architecture defined interrupt frame from SS to RIP 15 * at the top of the kernel process stack. 16 * 17 * Some macro usage: 18 * - SYM_FUNC_START/END:Define functions in the symbol table. 19 * - idtentry: Define exception entry points. 20 */ 21#include <linux/linkage.h> 22#include <asm/segment.h> 23#include <asm/cache.h> 24#include <asm/errno.h> 25#include <asm/asm-offsets.h> 26#include <asm/msr.h> 27#include <asm/unistd.h> 28#include <asm/thread_info.h> 29#include <asm/hw_irq.h> 30#include <asm/page_types.h> 31#include <asm/irqflags.h> 32#include <asm/paravirt.h> 33#include <asm/percpu.h> 34#include <asm/asm.h> 35#include <asm/smap.h> 36#include <asm/pgtable_types.h> 37#include <asm/export.h> 38#include <asm/frame.h> 39#include <asm/trapnr.h> 40#include <asm/nospec-branch.h> 41#include <asm/fsgsbase.h> 42#include <linux/err.h> 43 44#include "calling.h" 45 46.code64 47.section .entry.text, "ax" 48 49#ifdef CONFIG_PARAVIRT 50SYM_CODE_START(native_usergs_sysret64) 51 UNWIND_HINT_EMPTY 52 swapgs 53 sysretq 54SYM_CODE_END(native_usergs_sysret64) 55#endif /* CONFIG_PARAVIRT */ 56 57/* 58 * 64-bit SYSCALL instruction entry. Up to 6 arguments in registers. 59 * 60 * This is the only entry point used for 64-bit system calls. The 61 * hardware interface is reasonably well designed and the register to 62 * argument mapping Linux uses fits well with the registers that are 63 * available when SYSCALL is used. 64 * 65 * SYSCALL instructions can be found inlined in libc implementations as 66 * well as some other programs and libraries. There are also a handful 67 * of SYSCALL instructions in the vDSO used, for example, as a 68 * clock_gettimeofday fallback. 69 * 70 * 64-bit SYSCALL saves rip to rcx, clears rflags.RF, then saves rflags to r11, 71 * then loads new ss, cs, and rip from previously programmed MSRs. 72 * rflags gets masked by a value from another MSR (so CLD and CLAC 73 * are not needed). SYSCALL does not save anything on the stack 74 * and does not change rsp. 75 * 76 * Registers on entry: 77 * rax system call number 78 * rcx return address 79 * r11 saved rflags (note: r11 is callee-clobbered register in C ABI) 80 * rdi arg0 81 * rsi arg1 82 * rdx arg2 83 * r10 arg3 (needs to be moved to rcx to conform to C ABI) 84 * r8 arg4 85 * r9 arg5 86 * (note: r12-r15, rbp, rbx are callee-preserved in C ABI) 87 * 88 * Only called from user space. 89 * 90 * When user can change pt_regs->foo always force IRET. That is because 91 * it deals with uncanonical addresses better. SYSRET has trouble 92 * with them due to bugs in both AMD and Intel CPUs. 93 */ 94 95SYM_CODE_START(entry_SYSCALL_64) 96 UNWIND_HINT_EMPTY 97 98 swapgs 99 /* tss.sp2 is scratch space. */ 100 movq %rsp, PER_CPU_VAR(cpu_tss_rw + TSS_sp2) 101 SWITCH_TO_KERNEL_CR3 scratch_reg=%rsp 102 movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp 103 104 /* Construct struct pt_regs on stack */ 105 pushq $__USER_DS /* pt_regs->ss */ 106 pushq PER_CPU_VAR(cpu_tss_rw + TSS_sp2) /* pt_regs->sp */ 107 pushq %r11 /* pt_regs->flags */ 108 pushq $__USER_CS /* pt_regs->cs */ 109 pushq %rcx /* pt_regs->ip */ 110SYM_INNER_LABEL(entry_SYSCALL_64_after_hwframe, SYM_L_GLOBAL) 111 pushq %rax /* pt_regs->orig_ax */ 112 113 PUSH_AND_CLEAR_REGS rax=$-ENOSYS 114 115 /* IRQs are off. */ 116 movq %rax, %rdi 117 movq %rsp, %rsi 118 call do_syscall_64 /* returns with IRQs disabled */ 119 120 /* 121 * Try to use SYSRET instead of IRET if we're returning to 122 * a completely clean 64-bit userspace context. If we're not, 123 * go to the slow exit path. 124 */ 125 movq RCX(%rsp), %rcx 126 movq RIP(%rsp), %r11 127 128 cmpq %rcx, %r11 /* SYSRET requires RCX == RIP */ 129 jne swapgs_restore_regs_and_return_to_usermode 130 131 /* 132 * On Intel CPUs, SYSRET with non-canonical RCX/RIP will #GP 133 * in kernel space. This essentially lets the user take over 134 * the kernel, since userspace controls RSP. 135 * 136 * If width of "canonical tail" ever becomes variable, this will need 137 * to be updated to remain correct on both old and new CPUs. 138 * 139 * Change top bits to match most significant bit (47th or 56th bit 140 * depending on paging mode) in the address. 141 */ 142#ifdef CONFIG_X86_5LEVEL 143 ALTERNATIVE "shl $(64 - 48), %rcx; sar $(64 - 48), %rcx", \ 144 "shl $(64 - 57), %rcx; sar $(64 - 57), %rcx", X86_FEATURE_LA57 145#else 146 shl $(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx 147 sar $(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx 148#endif 149 150 /* If this changed %rcx, it was not canonical */ 151 cmpq %rcx, %r11 152 jne swapgs_restore_regs_and_return_to_usermode 153 154 cmpq $__USER_CS, CS(%rsp) /* CS must match SYSRET */ 155 jne swapgs_restore_regs_and_return_to_usermode 156 157 movq R11(%rsp), %r11 158 cmpq %r11, EFLAGS(%rsp) /* R11 == RFLAGS */ 159 jne swapgs_restore_regs_and_return_to_usermode 160 161 /* 162 * SYSCALL clears RF when it saves RFLAGS in R11 and SYSRET cannot 163 * restore RF properly. If the slowpath sets it for whatever reason, we 164 * need to restore it correctly. 165 * 166 * SYSRET can restore TF, but unlike IRET, restoring TF results in a 167 * trap from userspace immediately after SYSRET. This would cause an 168 * infinite loop whenever #DB happens with register state that satisfies 169 * the opportunistic SYSRET conditions. For example, single-stepping 170 * this user code: 171 * 172 * movq $stuck_here, %rcx 173 * pushfq 174 * popq %r11 175 * stuck_here: 176 * 177 * would never get past 'stuck_here'. 178 */ 179 testq $(X86_EFLAGS_RF|X86_EFLAGS_TF), %r11 180 jnz swapgs_restore_regs_and_return_to_usermode 181 182 /* nothing to check for RSP */ 183 184 cmpq $__USER_DS, SS(%rsp) /* SS must match SYSRET */ 185 jne swapgs_restore_regs_and_return_to_usermode 186 187 /* 188 * We win! This label is here just for ease of understanding 189 * perf profiles. Nothing jumps here. 190 */ 191syscall_return_via_sysret: 192 /* rcx and r11 are already restored (see code above) */ 193 POP_REGS pop_rdi=0 skip_r11rcx=1 194 195 /* 196 * Now all regs are restored except RSP and RDI. 197 * Save old stack pointer and switch to trampoline stack. 198 */ 199 movq %rsp, %rdi 200 movq PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp 201 UNWIND_HINT_EMPTY 202 203 pushq RSP-RDI(%rdi) /* RSP */ 204 pushq (%rdi) /* RDI */ 205 206 /* 207 * We are on the trampoline stack. All regs except RDI are live. 208 * We can do future final exit work right here. 209 */ 210 STACKLEAK_ERASE_NOCLOBBER 211 212 SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi 213 214 popq %rdi 215 popq %rsp 216 USERGS_SYSRET64 217SYM_CODE_END(entry_SYSCALL_64) 218 219/* 220 * %rdi: prev task 221 * %rsi: next task 222 */ 223.pushsection .text, "ax" 224SYM_FUNC_START(__switch_to_asm) 225 /* 226 * Save callee-saved registers 227 * This must match the order in inactive_task_frame 228 */ 229 pushq %rbp 230 pushq %rbx 231 pushq %r12 232 pushq %r13 233 pushq %r14 234 pushq %r15 235 236 /* switch stack */ 237 movq %rsp, TASK_threadsp(%rdi) 238 movq TASK_threadsp(%rsi), %rsp 239 240#ifdef CONFIG_STACKPROTECTOR 241 movq TASK_stack_canary(%rsi), %rbx 242 movq %rbx, PER_CPU_VAR(fixed_percpu_data) + stack_canary_offset 243#endif 244 245#ifdef CONFIG_RETPOLINE 246 /* 247 * When switching from a shallower to a deeper call stack 248 * the RSB may either underflow or use entries populated 249 * with userspace addresses. On CPUs where those concerns 250 * exist, overwrite the RSB with entries which capture 251 * speculative execution to prevent attack. 252 */ 253 FILL_RETURN_BUFFER %r12, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW 254#endif 255 256 /* restore callee-saved registers */ 257 popq %r15 258 popq %r14 259 popq %r13 260 popq %r12 261 popq %rbx 262 popq %rbp 263 264 jmp __switch_to 265SYM_FUNC_END(__switch_to_asm) 266.popsection 267 268/* 269 * A newly forked process directly context switches into this address. 270 * 271 * rax: prev task we switched from 272 * rbx: kernel thread func (NULL for user thread) 273 * r12: kernel thread arg 274 */ 275.pushsection .text, "ax" 276SYM_CODE_START(ret_from_fork) 277 UNWIND_HINT_EMPTY 278 movq %rax, %rdi 279 call schedule_tail /* rdi: 'prev' task parameter */ 280 281 testq %rbx, %rbx /* from kernel_thread? */ 282 jnz 1f /* kernel threads are uncommon */ 283 2842: 285 UNWIND_HINT_REGS 286 movq %rsp, %rdi 287 call syscall_exit_to_user_mode /* returns with IRQs disabled */ 288 jmp swapgs_restore_regs_and_return_to_usermode 289 2901: 291 /* kernel thread */ 292 UNWIND_HINT_EMPTY 293 movq %r12, %rdi 294 CALL_NOSPEC rbx 295 /* 296 * A kernel thread is allowed to return here after successfully 297 * calling kernel_execve(). Exit to userspace to complete the execve() 298 * syscall. 299 */ 300 movq $0, RAX(%rsp) 301 jmp 2b 302SYM_CODE_END(ret_from_fork) 303.popsection 304 305.macro DEBUG_ENTRY_ASSERT_IRQS_OFF 306#ifdef CONFIG_DEBUG_ENTRY 307 pushq %rax 308 SAVE_FLAGS(CLBR_RAX) 309 testl $X86_EFLAGS_IF, %eax 310 jz .Lokay_\@ 311 ud2 312.Lokay_\@: 313 popq %rax 314#endif 315.endm 316 317/** 318 * idtentry_body - Macro to emit code calling the C function 319 * @cfunc: C function to be called 320 * @has_error_code: Hardware pushed error code on stack 321 */ 322.macro idtentry_body cfunc has_error_code:req 323 324 call error_entry 325 UNWIND_HINT_REGS 326 327 movq %rsp, %rdi /* pt_regs pointer into 1st argument*/ 328 329 .if \has_error_code == 1 330 movq ORIG_RAX(%rsp), %rsi /* get error code into 2nd argument*/ 331 movq $-1, ORIG_RAX(%rsp) /* no syscall to restart */ 332 .endif 333 334 call \cfunc 335 336 jmp error_return 337.endm 338 339/** 340 * idtentry - Macro to generate entry stubs for simple IDT entries 341 * @vector: Vector number 342 * @asmsym: ASM symbol for the entry point 343 * @cfunc: C function to be called 344 * @has_error_code: Hardware pushed error code on stack 345 * 346 * The macro emits code to set up the kernel context for straight forward 347 * and simple IDT entries. No IST stack, no paranoid entry checks. 348 */ 349.macro idtentry vector asmsym cfunc has_error_code:req 350SYM_CODE_START(\asmsym) 351 UNWIND_HINT_IRET_REGS offset=\has_error_code*8 352 ASM_CLAC 353 354 .if \has_error_code == 0 355 pushq $-1 /* ORIG_RAX: no syscall to restart */ 356 .endif 357 358 .if \vector == X86_TRAP_BP 359 /* 360 * If coming from kernel space, create a 6-word gap to allow the 361 * int3 handler to emulate a call instruction. 362 */ 363 testb $3, CS-ORIG_RAX(%rsp) 364 jnz .Lfrom_usermode_no_gap_\@ 365 .rept 6 366 pushq 5*8(%rsp) 367 .endr 368 UNWIND_HINT_IRET_REGS offset=8 369.Lfrom_usermode_no_gap_\@: 370 .endif 371 372 idtentry_body \cfunc \has_error_code 373 374_ASM_NOKPROBE(\asmsym) 375SYM_CODE_END(\asmsym) 376.endm 377 378/* 379 * Interrupt entry/exit. 380 * 381 + The interrupt stubs push (vector) onto the stack, which is the error_code 382 * position of idtentry exceptions, and jump to one of the two idtentry points 383 * (common/spurious). 384 * 385 * common_interrupt is a hotpath, align it to a cache line 386 */ 387.macro idtentry_irq vector cfunc 388 .p2align CONFIG_X86_L1_CACHE_SHIFT 389 idtentry \vector asm_\cfunc \cfunc has_error_code=1 390.endm 391 392/* 393 * System vectors which invoke their handlers directly and are not 394 * going through the regular common device interrupt handling code. 395 */ 396.macro idtentry_sysvec vector cfunc 397 idtentry \vector asm_\cfunc \cfunc has_error_code=0 398.endm 399 400/** 401 * idtentry_mce_db - Macro to generate entry stubs for #MC and #DB 402 * @vector: Vector number 403 * @asmsym: ASM symbol for the entry point 404 * @cfunc: C function to be called 405 * 406 * The macro emits code to set up the kernel context for #MC and #DB 407 * 408 * If the entry comes from user space it uses the normal entry path 409 * including the return to user space work and preemption checks on 410 * exit. 411 * 412 * If hits in kernel mode then it needs to go through the paranoid 413 * entry as the exception can hit any random state. No preemption 414 * check on exit to keep the paranoid path simple. 415 */ 416.macro idtentry_mce_db vector asmsym cfunc 417SYM_CODE_START(\asmsym) 418 UNWIND_HINT_IRET_REGS 419 ASM_CLAC 420 421 pushq $-1 /* ORIG_RAX: no syscall to restart */ 422 423 /* 424 * If the entry is from userspace, switch stacks and treat it as 425 * a normal entry. 426 */ 427 testb $3, CS-ORIG_RAX(%rsp) 428 jnz .Lfrom_usermode_switch_stack_\@ 429 430 /* paranoid_entry returns GS information for paranoid_exit in EBX. */ 431 call paranoid_entry 432 433 UNWIND_HINT_REGS 434 435 movq %rsp, %rdi /* pt_regs pointer */ 436 437 call \cfunc 438 439 jmp paranoid_exit 440 441 /* Switch to the regular task stack and use the noist entry point */ 442.Lfrom_usermode_switch_stack_\@: 443 idtentry_body noist_\cfunc, has_error_code=0 444 445_ASM_NOKPROBE(\asmsym) 446SYM_CODE_END(\asmsym) 447.endm 448 449/* 450 * Double fault entry. Straight paranoid. No checks from which context 451 * this comes because for the espfix induced #DF this would do the wrong 452 * thing. 453 */ 454.macro idtentry_df vector asmsym cfunc 455SYM_CODE_START(\asmsym) 456 UNWIND_HINT_IRET_REGS offset=8 457 ASM_CLAC 458 459 /* paranoid_entry returns GS information for paranoid_exit in EBX. */ 460 call paranoid_entry 461 UNWIND_HINT_REGS 462 463 movq %rsp, %rdi /* pt_regs pointer into first argument */ 464 movq ORIG_RAX(%rsp), %rsi /* get error code into 2nd argument*/ 465 movq $-1, ORIG_RAX(%rsp) /* no syscall to restart */ 466 call \cfunc 467 468 jmp paranoid_exit 469 470_ASM_NOKPROBE(\asmsym) 471SYM_CODE_END(\asmsym) 472.endm 473 474/* 475 * Include the defines which emit the idt entries which are shared 476 * shared between 32 and 64 bit and emit the __irqentry_text_* markers 477 * so the stacktrace boundary checks work. 478 */ 479 .align 16 480 .globl __irqentry_text_start 481__irqentry_text_start: 482 483#include <asm/idtentry.h> 484 485 .align 16 486 .globl __irqentry_text_end 487__irqentry_text_end: 488 489SYM_CODE_START_LOCAL(common_interrupt_return) 490SYM_INNER_LABEL(swapgs_restore_regs_and_return_to_usermode, SYM_L_GLOBAL) 491#ifdef CONFIG_DEBUG_ENTRY 492 /* Assert that pt_regs indicates user mode. */ 493 testb $3, CS(%rsp) 494 jnz 1f 495 ud2 4961: 497#endif 498 POP_REGS pop_rdi=0 499 500 /* 501 * The stack is now user RDI, orig_ax, RIP, CS, EFLAGS, RSP, SS. 502 * Save old stack pointer and switch to trampoline stack. 503 */ 504 movq %rsp, %rdi 505 movq PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp 506 UNWIND_HINT_EMPTY 507 508 /* Copy the IRET frame to the trampoline stack. */ 509 pushq 6*8(%rdi) /* SS */ 510 pushq 5*8(%rdi) /* RSP */ 511 pushq 4*8(%rdi) /* EFLAGS */ 512 pushq 3*8(%rdi) /* CS */ 513 pushq 2*8(%rdi) /* RIP */ 514 515 /* Push user RDI on the trampoline stack. */ 516 pushq (%rdi) 517 518 /* 519 * We are on the trampoline stack. All regs except RDI are live. 520 * We can do future final exit work right here. 521 */ 522 STACKLEAK_ERASE_NOCLOBBER 523 524 SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi 525 526 /* Restore RDI. */ 527 popq %rdi 528 SWAPGS 529 INTERRUPT_RETURN 530 531 532SYM_INNER_LABEL(restore_regs_and_return_to_kernel, SYM_L_GLOBAL) 533#ifdef CONFIG_DEBUG_ENTRY 534 /* Assert that pt_regs indicates kernel mode. */ 535 testb $3, CS(%rsp) 536 jz 1f 537 ud2 5381: 539#endif 540 POP_REGS 541 addq $8, %rsp /* skip regs->orig_ax */ 542 /* 543 * ARCH_HAS_MEMBARRIER_SYNC_CORE rely on IRET core serialization 544 * when returning from IPI handler. 545 */ 546 INTERRUPT_RETURN 547 548SYM_INNER_LABEL_ALIGN(native_iret, SYM_L_GLOBAL) 549 UNWIND_HINT_IRET_REGS 550 /* 551 * Are we returning to a stack segment from the LDT? Note: in 552 * 64-bit mode SS:RSP on the exception stack is always valid. 553 */ 554#ifdef CONFIG_X86_ESPFIX64 555 testb $4, (SS-RIP)(%rsp) 556 jnz native_irq_return_ldt 557#endif 558 559SYM_INNER_LABEL(native_irq_return_iret, SYM_L_GLOBAL) 560 /* 561 * This may fault. Non-paranoid faults on return to userspace are 562 * handled by fixup_bad_iret. These include #SS, #GP, and #NP. 563 * Double-faults due to espfix64 are handled in exc_double_fault. 564 * Other faults here are fatal. 565 */ 566 iretq 567 568#ifdef CONFIG_X86_ESPFIX64 569native_irq_return_ldt: 570 /* 571 * We are running with user GSBASE. All GPRs contain their user 572 * values. We have a percpu ESPFIX stack that is eight slots 573 * long (see ESPFIX_STACK_SIZE). espfix_waddr points to the bottom 574 * of the ESPFIX stack. 575 * 576 * We clobber RAX and RDI in this code. We stash RDI on the 577 * normal stack and RAX on the ESPFIX stack. 578 * 579 * The ESPFIX stack layout we set up looks like this: 580 * 581 * --- top of ESPFIX stack --- 582 * SS 583 * RSP 584 * RFLAGS 585 * CS 586 * RIP <-- RSP points here when we're done 587 * RAX <-- espfix_waddr points here 588 * --- bottom of ESPFIX stack --- 589 */ 590 591 pushq %rdi /* Stash user RDI */ 592 SWAPGS /* to kernel GS */ 593 SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi /* to kernel CR3 */ 594 595 movq PER_CPU_VAR(espfix_waddr), %rdi 596 movq %rax, (0*8)(%rdi) /* user RAX */ 597 movq (1*8)(%rsp), %rax /* user RIP */ 598 movq %rax, (1*8)(%rdi) 599 movq (2*8)(%rsp), %rax /* user CS */ 600 movq %rax, (2*8)(%rdi) 601 movq (3*8)(%rsp), %rax /* user RFLAGS */ 602 movq %rax, (3*8)(%rdi) 603 movq (5*8)(%rsp), %rax /* user SS */ 604 movq %rax, (5*8)(%rdi) 605 movq (4*8)(%rsp), %rax /* user RSP */ 606 movq %rax, (4*8)(%rdi) 607 /* Now RAX == RSP. */ 608 609 andl $0xffff0000, %eax /* RAX = (RSP & 0xffff0000) */ 610 611 /* 612 * espfix_stack[31:16] == 0. The page tables are set up such that 613 * (espfix_stack | (X & 0xffff0000)) points to a read-only alias of 614 * espfix_waddr for any X. That is, there are 65536 RO aliases of 615 * the same page. Set up RSP so that RSP[31:16] contains the 616 * respective 16 bits of the /userspace/ RSP and RSP nonetheless 617 * still points to an RO alias of the ESPFIX stack. 618 */ 619 orq PER_CPU_VAR(espfix_stack), %rax 620 621 SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi 622 SWAPGS /* to user GS */ 623 popq %rdi /* Restore user RDI */ 624 625 movq %rax, %rsp 626 UNWIND_HINT_IRET_REGS offset=8 627 628 /* 629 * At this point, we cannot write to the stack any more, but we can 630 * still read. 631 */ 632 popq %rax /* Restore user RAX */ 633 634 /* 635 * RSP now points to an ordinary IRET frame, except that the page 636 * is read-only and RSP[31:16] are preloaded with the userspace 637 * values. We can now IRET back to userspace. 638 */ 639 jmp native_irq_return_iret 640#endif 641SYM_CODE_END(common_interrupt_return) 642_ASM_NOKPROBE(common_interrupt_return) 643 644/* 645 * Reload gs selector with exception handling 646 * edi: new selector 647 * 648 * Is in entry.text as it shouldn't be instrumented. 649 */ 650SYM_FUNC_START(asm_load_gs_index) 651 FRAME_BEGIN 652 swapgs 653.Lgs_change: 654 movl %edi, %gs 6552: ALTERNATIVE "", "mfence", X86_BUG_SWAPGS_FENCE 656 swapgs 657 FRAME_END 658 ret 659SYM_FUNC_END(asm_load_gs_index) 660EXPORT_SYMBOL(asm_load_gs_index) 661 662 _ASM_EXTABLE(.Lgs_change, .Lbad_gs) 663 .section .fixup, "ax" 664 /* running with kernelgs */ 665SYM_CODE_START_LOCAL_NOALIGN(.Lbad_gs) 666 swapgs /* switch back to user gs */ 667.macro ZAP_GS 668 /* This can't be a string because the preprocessor needs to see it. */ 669 movl $__USER_DS, %eax 670 movl %eax, %gs 671.endm 672 ALTERNATIVE "", "ZAP_GS", X86_BUG_NULL_SEG 673 xorl %eax, %eax 674 movl %eax, %gs 675 jmp 2b 676SYM_CODE_END(.Lbad_gs) 677 .previous 678 679/* 680 * rdi: New stack pointer points to the top word of the stack 681 * rsi: Function pointer 682 * rdx: Function argument (can be NULL if none) 683 */ 684SYM_FUNC_START(asm_call_on_stack) 685SYM_INNER_LABEL(asm_call_sysvec_on_stack, SYM_L_GLOBAL) 686SYM_INNER_LABEL(asm_call_irq_on_stack, SYM_L_GLOBAL) 687 /* 688 * Save the frame pointer unconditionally. This allows the ORC 689 * unwinder to handle the stack switch. 690 */ 691 pushq %rbp 692 mov %rsp, %rbp 693 694 /* 695 * The unwinder relies on the word at the top of the new stack 696 * page linking back to the previous RSP. 697 */ 698 mov %rsp, (%rdi) 699 mov %rdi, %rsp 700 /* Move the argument to the right place */ 701 mov %rdx, %rdi 702 7031: 704 .pushsection .discard.instr_begin 705 .long 1b - . 706 .popsection 707 708 CALL_NOSPEC rsi 709 7102: 711 .pushsection .discard.instr_end 712 .long 2b - . 713 .popsection 714 715 /* Restore the previous stack pointer from RBP. */ 716 leaveq 717 ret 718SYM_FUNC_END(asm_call_on_stack) 719 720#ifdef CONFIG_XEN_PV 721/* 722 * A note on the "critical region" in our callback handler. 723 * We want to avoid stacking callback handlers due to events occurring 724 * during handling of the last event. To do this, we keep events disabled 725 * until we've done all processing. HOWEVER, we must enable events before 726 * popping the stack frame (can't be done atomically) and so it would still 727 * be possible to get enough handler activations to overflow the stack. 728 * Although unlikely, bugs of that kind are hard to track down, so we'd 729 * like to avoid the possibility. 730 * So, on entry to the handler we detect whether we interrupted an 731 * existing activation in its critical region -- if so, we pop the current 732 * activation and restart the handler using the previous one. 733 * 734 * C calling convention: exc_xen_hypervisor_callback(struct *pt_regs) 735 */ 736SYM_CODE_START_LOCAL(exc_xen_hypervisor_callback) 737 738/* 739 * Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will 740 * see the correct pointer to the pt_regs 741 */ 742 UNWIND_HINT_FUNC 743 movq %rdi, %rsp /* we don't return, adjust the stack frame */ 744 UNWIND_HINT_REGS 745 746 call xen_pv_evtchn_do_upcall 747 748 jmp error_return 749SYM_CODE_END(exc_xen_hypervisor_callback) 750 751/* 752 * Hypervisor uses this for application faults while it executes. 753 * We get here for two reasons: 754 * 1. Fault while reloading DS, ES, FS or GS 755 * 2. Fault while executing IRET 756 * Category 1 we do not need to fix up as Xen has already reloaded all segment 757 * registers that could be reloaded and zeroed the others. 758 * Category 2 we fix up by killing the current process. We cannot use the 759 * normal Linux return path in this case because if we use the IRET hypercall 760 * to pop the stack frame we end up in an infinite loop of failsafe callbacks. 761 * We distinguish between categories by comparing each saved segment register 762 * with its current contents: any discrepancy means we in category 1. 763 */ 764SYM_CODE_START(xen_failsafe_callback) 765 UNWIND_HINT_EMPTY 766 movl %ds, %ecx 767 cmpw %cx, 0x10(%rsp) 768 jne 1f 769 movl %es, %ecx 770 cmpw %cx, 0x18(%rsp) 771 jne 1f 772 movl %fs, %ecx 773 cmpw %cx, 0x20(%rsp) 774 jne 1f 775 movl %gs, %ecx 776 cmpw %cx, 0x28(%rsp) 777 jne 1f 778 /* All segments match their saved values => Category 2 (Bad IRET). */ 779 movq (%rsp), %rcx 780 movq 8(%rsp), %r11 781 addq $0x30, %rsp 782 pushq $0 /* RIP */ 783 UNWIND_HINT_IRET_REGS offset=8 784 jmp asm_exc_general_protection 7851: /* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */ 786 movq (%rsp), %rcx 787 movq 8(%rsp), %r11 788 addq $0x30, %rsp 789 UNWIND_HINT_IRET_REGS 790 pushq $-1 /* orig_ax = -1 => not a system call */ 791 PUSH_AND_CLEAR_REGS 792 ENCODE_FRAME_POINTER 793 jmp error_return 794SYM_CODE_END(xen_failsafe_callback) 795#endif /* CONFIG_XEN_PV */ 796 797/* 798 * Save all registers in pt_regs. Return GSBASE related information 799 * in EBX depending on the availability of the FSGSBASE instructions: 800 * 801 * FSGSBASE R/EBX 802 * N 0 -> SWAPGS on exit 803 * 1 -> no SWAPGS on exit 804 * 805 * Y GSBASE value at entry, must be restored in paranoid_exit 806 */ 807SYM_CODE_START_LOCAL(paranoid_entry) 808 UNWIND_HINT_FUNC 809 cld 810 PUSH_AND_CLEAR_REGS save_ret=1 811 ENCODE_FRAME_POINTER 8 812 813 /* 814 * Always stash CR3 in %r14. This value will be restored, 815 * verbatim, at exit. Needed if paranoid_entry interrupted 816 * another entry that already switched to the user CR3 value 817 * but has not yet returned to userspace. 818 * 819 * This is also why CS (stashed in the "iret frame" by the 820 * hardware at entry) can not be used: this may be a return 821 * to kernel code, but with a user CR3 value. 822 * 823 * Switching CR3 does not depend on kernel GSBASE so it can 824 * be done before switching to the kernel GSBASE. This is 825 * required for FSGSBASE because the kernel GSBASE has to 826 * be retrieved from a kernel internal table. 827 */ 828 SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg=%rax save_reg=%r14 829 830 /* 831 * Handling GSBASE depends on the availability of FSGSBASE. 832 * 833 * Without FSGSBASE the kernel enforces that negative GSBASE 834 * values indicate kernel GSBASE. With FSGSBASE no assumptions 835 * can be made about the GSBASE value when entering from user 836 * space. 837 */ 838 ALTERNATIVE "jmp .Lparanoid_entry_checkgs", "", X86_FEATURE_FSGSBASE 839 840 /* 841 * Read the current GSBASE and store it in %rbx unconditionally, 842 * retrieve and set the current CPUs kernel GSBASE. The stored value 843 * has to be restored in paranoid_exit unconditionally. 844 * 845 * The MSR write ensures that no subsequent load is based on a 846 * mispredicted GSBASE. No extra FENCE required. 847 */ 848 SAVE_AND_SET_GSBASE scratch_reg=%rax save_reg=%rbx 849 ret 850 851.Lparanoid_entry_checkgs: 852 /* EBX = 1 -> kernel GSBASE active, no restore required */ 853 movl $1, %ebx 854 /* 855 * The kernel-enforced convention is a negative GSBASE indicates 856 * a kernel value. No SWAPGS needed on entry and exit. 857 */ 858 movl $MSR_GS_BASE, %ecx 859 rdmsr 860 testl %edx, %edx 861 jns .Lparanoid_entry_swapgs 862 ret 863 864.Lparanoid_entry_swapgs: 865 SWAPGS 866 867 /* 868 * The above SAVE_AND_SWITCH_TO_KERNEL_CR3 macro doesn't do an 869 * unconditional CR3 write, even in the PTI case. So do an lfence 870 * to prevent GS speculation, regardless of whether PTI is enabled. 871 */ 872 FENCE_SWAPGS_KERNEL_ENTRY 873 874 /* EBX = 0 -> SWAPGS required on exit */ 875 xorl %ebx, %ebx 876 ret 877SYM_CODE_END(paranoid_entry) 878 879/* 880 * "Paranoid" exit path from exception stack. This is invoked 881 * only on return from non-NMI IST interrupts that came 882 * from kernel space. 883 * 884 * We may be returning to very strange contexts (e.g. very early 885 * in syscall entry), so checking for preemption here would 886 * be complicated. Fortunately, there's no good reason to try 887 * to handle preemption here. 888 * 889 * R/EBX contains the GSBASE related information depending on the 890 * availability of the FSGSBASE instructions: 891 * 892 * FSGSBASE R/EBX 893 * N 0 -> SWAPGS on exit 894 * 1 -> no SWAPGS on exit 895 * 896 * Y User space GSBASE, must be restored unconditionally 897 */ 898SYM_CODE_START_LOCAL(paranoid_exit) 899 UNWIND_HINT_REGS 900 /* 901 * The order of operations is important. RESTORE_CR3 requires 902 * kernel GSBASE. 903 * 904 * NB to anyone to try to optimize this code: this code does 905 * not execute at all for exceptions from user mode. Those 906 * exceptions go through error_exit instead. 907 */ 908 RESTORE_CR3 scratch_reg=%rax save_reg=%r14 909 910 /* Handle the three GSBASE cases */ 911 ALTERNATIVE "jmp .Lparanoid_exit_checkgs", "", X86_FEATURE_FSGSBASE 912 913 /* With FSGSBASE enabled, unconditionally restore GSBASE */ 914 wrgsbase %rbx 915 jmp restore_regs_and_return_to_kernel 916 917.Lparanoid_exit_checkgs: 918 /* On non-FSGSBASE systems, conditionally do SWAPGS */ 919 testl %ebx, %ebx 920 jnz restore_regs_and_return_to_kernel 921 922 /* We are returning to a context with user GSBASE */ 923 SWAPGS_UNSAFE_STACK 924 jmp restore_regs_and_return_to_kernel 925SYM_CODE_END(paranoid_exit) 926 927/* 928 * Save all registers in pt_regs, and switch GS if needed. 929 */ 930SYM_CODE_START_LOCAL(error_entry) 931 UNWIND_HINT_FUNC 932 cld 933 PUSH_AND_CLEAR_REGS save_ret=1 934 ENCODE_FRAME_POINTER 8 935 testb $3, CS+8(%rsp) 936 jz .Lerror_kernelspace 937 938 /* 939 * We entered from user mode or we're pretending to have entered 940 * from user mode due to an IRET fault. 941 */ 942 SWAPGS 943 FENCE_SWAPGS_USER_ENTRY 944 /* We have user CR3. Change to kernel CR3. */ 945 SWITCH_TO_KERNEL_CR3 scratch_reg=%rax 946 947.Lerror_entry_from_usermode_after_swapgs: 948 /* Put us onto the real thread stack. */ 949 popq %r12 /* save return addr in %12 */ 950 movq %rsp, %rdi /* arg0 = pt_regs pointer */ 951 call sync_regs 952 movq %rax, %rsp /* switch stack */ 953 ENCODE_FRAME_POINTER 954 pushq %r12 955 ret 956 957.Lerror_entry_done_lfence: 958 FENCE_SWAPGS_KERNEL_ENTRY 959.Lerror_entry_done: 960 ret 961 962 /* 963 * There are two places in the kernel that can potentially fault with 964 * usergs. Handle them here. B stepping K8s sometimes report a 965 * truncated RIP for IRET exceptions returning to compat mode. Check 966 * for these here too. 967 */ 968.Lerror_kernelspace: 969 leaq native_irq_return_iret(%rip), %rcx 970 cmpq %rcx, RIP+8(%rsp) 971 je .Lerror_bad_iret 972 movl %ecx, %eax /* zero extend */ 973 cmpq %rax, RIP+8(%rsp) 974 je .Lbstep_iret 975 cmpq $.Lgs_change, RIP+8(%rsp) 976 jne .Lerror_entry_done_lfence 977 978 /* 979 * hack: .Lgs_change can fail with user gsbase. If this happens, fix up 980 * gsbase and proceed. We'll fix up the exception and land in 981 * .Lgs_change's error handler with kernel gsbase. 982 */ 983 SWAPGS 984 FENCE_SWAPGS_USER_ENTRY 985 jmp .Lerror_entry_done 986 987.Lbstep_iret: 988 /* Fix truncated RIP */ 989 movq %rcx, RIP+8(%rsp) 990 /* fall through */ 991 992.Lerror_bad_iret: 993 /* 994 * We came from an IRET to user mode, so we have user 995 * gsbase and CR3. Switch to kernel gsbase and CR3: 996 */ 997 SWAPGS 998 FENCE_SWAPGS_USER_ENTRY 999 SWITCH_TO_KERNEL_CR3 scratch_reg=%rax 1000 1001 /* 1002 * Pretend that the exception came from user mode: set up pt_regs 1003 * as if we faulted immediately after IRET. 1004 */ 1005 mov %rsp, %rdi 1006 call fixup_bad_iret 1007 mov %rax, %rsp 1008 jmp .Lerror_entry_from_usermode_after_swapgs 1009SYM_CODE_END(error_entry) 1010 1011SYM_CODE_START_LOCAL(error_return) 1012 UNWIND_HINT_REGS 1013 DEBUG_ENTRY_ASSERT_IRQS_OFF 1014 testb $3, CS(%rsp) 1015 jz restore_regs_and_return_to_kernel 1016 jmp swapgs_restore_regs_and_return_to_usermode 1017SYM_CODE_END(error_return) 1018 1019/* 1020 * Runs on exception stack. Xen PV does not go through this path at all, 1021 * so we can use real assembly here. 1022 * 1023 * Registers: 1024 * %r14: Used to save/restore the CR3 of the interrupted context 1025 * when PAGE_TABLE_ISOLATION is in use. Do not clobber. 1026 */ 1027SYM_CODE_START(asm_exc_nmi) 1028 UNWIND_HINT_IRET_REGS 1029 1030 /* 1031 * We allow breakpoints in NMIs. If a breakpoint occurs, then 1032 * the iretq it performs will take us out of NMI context. 1033 * This means that we can have nested NMIs where the next 1034 * NMI is using the top of the stack of the previous NMI. We 1035 * can't let it execute because the nested NMI will corrupt the 1036 * stack of the previous NMI. NMI handlers are not re-entrant 1037 * anyway. 1038 * 1039 * To handle this case we do the following: 1040 * Check the a special location on the stack that contains 1041 * a variable that is set when NMIs are executing. 1042 * The interrupted task's stack is also checked to see if it 1043 * is an NMI stack. 1044 * If the variable is not set and the stack is not the NMI 1045 * stack then: 1046 * o Set the special variable on the stack 1047 * o Copy the interrupt frame into an "outermost" location on the 1048 * stack 1049 * o Copy the interrupt frame into an "iret" location on the stack 1050 * o Continue processing the NMI 1051 * If the variable is set or the previous stack is the NMI stack: 1052 * o Modify the "iret" location to jump to the repeat_nmi 1053 * o return back to the first NMI 1054 * 1055 * Now on exit of the first NMI, we first clear the stack variable 1056 * The NMI stack will tell any nested NMIs at that point that it is 1057 * nested. Then we pop the stack normally with iret, and if there was 1058 * a nested NMI that updated the copy interrupt stack frame, a 1059 * jump will be made to the repeat_nmi code that will handle the second 1060 * NMI. 1061 * 1062 * However, espfix prevents us from directly returning to userspace 1063 * with a single IRET instruction. Similarly, IRET to user mode 1064 * can fault. We therefore handle NMIs from user space like 1065 * other IST entries. 1066 */ 1067 1068 ASM_CLAC 1069 1070 /* Use %rdx as our temp variable throughout */ 1071 pushq %rdx 1072 1073 testb $3, CS-RIP+8(%rsp) 1074 jz .Lnmi_from_kernel 1075 1076 /* 1077 * NMI from user mode. We need to run on the thread stack, but we 1078 * can't go through the normal entry paths: NMIs are masked, and 1079 * we don't want to enable interrupts, because then we'll end 1080 * up in an awkward situation in which IRQs are on but NMIs 1081 * are off. 1082 * 1083 * We also must not push anything to the stack before switching 1084 * stacks lest we corrupt the "NMI executing" variable. 1085 */ 1086 1087 swapgs 1088 cld 1089 FENCE_SWAPGS_USER_ENTRY 1090 SWITCH_TO_KERNEL_CR3 scratch_reg=%rdx 1091 movq %rsp, %rdx 1092 movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp 1093 UNWIND_HINT_IRET_REGS base=%rdx offset=8 1094 pushq 5*8(%rdx) /* pt_regs->ss */ 1095 pushq 4*8(%rdx) /* pt_regs->rsp */ 1096 pushq 3*8(%rdx) /* pt_regs->flags */ 1097 pushq 2*8(%rdx) /* pt_regs->cs */ 1098 pushq 1*8(%rdx) /* pt_regs->rip */ 1099 UNWIND_HINT_IRET_REGS 1100 pushq $-1 /* pt_regs->orig_ax */ 1101 PUSH_AND_CLEAR_REGS rdx=(%rdx) 1102 ENCODE_FRAME_POINTER 1103 1104 /* 1105 * At this point we no longer need to worry about stack damage 1106 * due to nesting -- we're on the normal thread stack and we're 1107 * done with the NMI stack. 1108 */ 1109 1110 movq %rsp, %rdi 1111 movq $-1, %rsi 1112 call exc_nmi 1113 1114 /* 1115 * Return back to user mode. We must *not* do the normal exit 1116 * work, because we don't want to enable interrupts. 1117 */ 1118 jmp swapgs_restore_regs_and_return_to_usermode 1119 1120.Lnmi_from_kernel: 1121 /* 1122 * Here's what our stack frame will look like: 1123 * +---------------------------------------------------------+ 1124 * | original SS | 1125 * | original Return RSP | 1126 * | original RFLAGS | 1127 * | original CS | 1128 * | original RIP | 1129 * +---------------------------------------------------------+ 1130 * | temp storage for rdx | 1131 * +---------------------------------------------------------+ 1132 * | "NMI executing" variable | 1133 * +---------------------------------------------------------+ 1134 * | iret SS } Copied from "outermost" frame | 1135 * | iret Return RSP } on each loop iteration; overwritten | 1136 * | iret RFLAGS } by a nested NMI to force another | 1137 * | iret CS } iteration if needed. | 1138 * | iret RIP } | 1139 * +---------------------------------------------------------+ 1140 * | outermost SS } initialized in first_nmi; | 1141 * | outermost Return RSP } will not be changed before | 1142 * | outermost RFLAGS } NMI processing is done. | 1143 * | outermost CS } Copied to "iret" frame on each | 1144 * | outermost RIP } iteration. | 1145 * +---------------------------------------------------------+ 1146 * | pt_regs | 1147 * +---------------------------------------------------------+ 1148 * 1149 * The "original" frame is used by hardware. Before re-enabling 1150 * NMIs, we need to be done with it, and we need to leave enough 1151 * space for the asm code here. 1152 * 1153 * We return by executing IRET while RSP points to the "iret" frame. 1154 * That will either return for real or it will loop back into NMI 1155 * processing. 1156 * 1157 * The "outermost" frame is copied to the "iret" frame on each 1158 * iteration of the loop, so each iteration starts with the "iret" 1159 * frame pointing to the final return target. 1160 */ 1161 1162 /* 1163 * Determine whether we're a nested NMI. 1164 * 1165 * If we interrupted kernel code between repeat_nmi and 1166 * end_repeat_nmi, then we are a nested NMI. We must not 1167 * modify the "iret" frame because it's being written by 1168 * the outer NMI. That's okay; the outer NMI handler is 1169 * about to about to call exc_nmi() anyway, so we can just 1170 * resume the outer NMI. 1171 */ 1172 1173 movq $repeat_nmi, %rdx 1174 cmpq 8(%rsp), %rdx 1175 ja 1f 1176 movq $end_repeat_nmi, %rdx 1177 cmpq 8(%rsp), %rdx 1178 ja nested_nmi_out 11791: 1180 1181 /* 1182 * Now check "NMI executing". If it's set, then we're nested. 1183 * This will not detect if we interrupted an outer NMI just 1184 * before IRET. 1185 */ 1186 cmpl $1, -8(%rsp) 1187 je nested_nmi 1188 1189 /* 1190 * Now test if the previous stack was an NMI stack. This covers 1191 * the case where we interrupt an outer NMI after it clears 1192 * "NMI executing" but before IRET. We need to be careful, though: 1193 * there is one case in which RSP could point to the NMI stack 1194 * despite there being no NMI active: naughty userspace controls 1195 * RSP at the very beginning of the SYSCALL targets. We can 1196 * pull a fast one on naughty userspace, though: we program 1197 * SYSCALL to mask DF, so userspace cannot cause DF to be set 1198 * if it controls the kernel's RSP. We set DF before we clear 1199 * "NMI executing". 1200 */ 1201 lea 6*8(%rsp), %rdx 1202 /* Compare the NMI stack (rdx) with the stack we came from (4*8(%rsp)) */ 1203 cmpq %rdx, 4*8(%rsp) 1204 /* If the stack pointer is above the NMI stack, this is a normal NMI */ 1205 ja first_nmi 1206 1207 subq $EXCEPTION_STKSZ, %rdx 1208 cmpq %rdx, 4*8(%rsp) 1209 /* If it is below the NMI stack, it is a normal NMI */ 1210 jb first_nmi 1211 1212 /* Ah, it is within the NMI stack. */ 1213 1214 testb $(X86_EFLAGS_DF >> 8), (3*8 + 1)(%rsp) 1215 jz first_nmi /* RSP was user controlled. */ 1216 1217 /* This is a nested NMI. */ 1218 1219nested_nmi: 1220 /* 1221 * Modify the "iret" frame to point to repeat_nmi, forcing another 1222 * iteration of NMI handling. 1223 */ 1224 subq $8, %rsp 1225 leaq -10*8(%rsp), %rdx 1226 pushq $__KERNEL_DS 1227 pushq %rdx 1228 pushfq 1229 pushq $__KERNEL_CS 1230 pushq $repeat_nmi 1231 1232 /* Put stack back */ 1233 addq $(6*8), %rsp 1234 1235nested_nmi_out: 1236 popq %rdx 1237 1238 /* We are returning to kernel mode, so this cannot result in a fault. */ 1239 iretq 1240 1241first_nmi: 1242 /* Restore rdx. */ 1243 movq (%rsp), %rdx 1244 1245 /* Make room for "NMI executing". */ 1246 pushq $0 1247 1248 /* Leave room for the "iret" frame */ 1249 subq $(5*8), %rsp 1250 1251 /* Copy the "original" frame to the "outermost" frame */ 1252 .rept 5 1253 pushq 11*8(%rsp) 1254 .endr 1255 UNWIND_HINT_IRET_REGS 1256 1257 /* Everything up to here is safe from nested NMIs */ 1258 1259#ifdef CONFIG_DEBUG_ENTRY 1260 /* 1261 * For ease of testing, unmask NMIs right away. Disabled by 1262 * default because IRET is very expensive. 1263 */ 1264 pushq $0 /* SS */ 1265 pushq %rsp /* RSP (minus 8 because of the previous push) */ 1266 addq $8, (%rsp) /* Fix up RSP */ 1267 pushfq /* RFLAGS */ 1268 pushq $__KERNEL_CS /* CS */ 1269 pushq $1f /* RIP */ 1270 iretq /* continues at repeat_nmi below */ 1271 UNWIND_HINT_IRET_REGS 12721: 1273#endif 1274 1275repeat_nmi: 1276 /* 1277 * If there was a nested NMI, the first NMI's iret will return 1278 * here. But NMIs are still enabled and we can take another 1279 * nested NMI. The nested NMI checks the interrupted RIP to see 1280 * if it is between repeat_nmi and end_repeat_nmi, and if so 1281 * it will just return, as we are about to repeat an NMI anyway. 1282 * This makes it safe to copy to the stack frame that a nested 1283 * NMI will update. 1284 * 1285 * RSP is pointing to "outermost RIP". gsbase is unknown, but, if 1286 * we're repeating an NMI, gsbase has the same value that it had on 1287 * the first iteration. paranoid_entry will load the kernel 1288 * gsbase if needed before we call exc_nmi(). "NMI executing" 1289 * is zero. 1290 */ 1291 movq $1, 10*8(%rsp) /* Set "NMI executing". */ 1292 1293 /* 1294 * Copy the "outermost" frame to the "iret" frame. NMIs that nest 1295 * here must not modify the "iret" frame while we're writing to 1296 * it or it will end up containing garbage. 1297 */ 1298 addq $(10*8), %rsp 1299 .rept 5 1300 pushq -6*8(%rsp) 1301 .endr 1302 subq $(5*8), %rsp 1303end_repeat_nmi: 1304 1305 /* 1306 * Everything below this point can be preempted by a nested NMI. 1307 * If this happens, then the inner NMI will change the "iret" 1308 * frame to point back to repeat_nmi. 1309 */ 1310 pushq $-1 /* ORIG_RAX: no syscall to restart */ 1311 1312 /* 1313 * Use paranoid_entry to handle SWAPGS, but no need to use paranoid_exit 1314 * as we should not be calling schedule in NMI context. 1315 * Even with normal interrupts enabled. An NMI should not be 1316 * setting NEED_RESCHED or anything that normal interrupts and 1317 * exceptions might do. 1318 */ 1319 call paranoid_entry 1320 UNWIND_HINT_REGS 1321 1322 movq %rsp, %rdi 1323 movq $-1, %rsi 1324 call exc_nmi 1325 1326 /* Always restore stashed CR3 value (see paranoid_entry) */ 1327 RESTORE_CR3 scratch_reg=%r15 save_reg=%r14 1328 1329 /* 1330 * The above invocation of paranoid_entry stored the GSBASE 1331 * related information in R/EBX depending on the availability 1332 * of FSGSBASE. 1333 * 1334 * If FSGSBASE is enabled, restore the saved GSBASE value 1335 * unconditionally, otherwise take the conditional SWAPGS path. 1336 */ 1337 ALTERNATIVE "jmp nmi_no_fsgsbase", "", X86_FEATURE_FSGSBASE 1338 1339 wrgsbase %rbx 1340 jmp nmi_restore 1341 1342nmi_no_fsgsbase: 1343 /* EBX == 0 -> invoke SWAPGS */ 1344 testl %ebx, %ebx 1345 jnz nmi_restore 1346 1347nmi_swapgs: 1348 SWAPGS_UNSAFE_STACK 1349 1350nmi_restore: 1351 POP_REGS 1352 1353 /* 1354 * Skip orig_ax and the "outermost" frame to point RSP at the "iret" 1355 * at the "iret" frame. 1356 */ 1357 addq $6*8, %rsp 1358 1359 /* 1360 * Clear "NMI executing". Set DF first so that we can easily 1361 * distinguish the remaining code between here and IRET from 1362 * the SYSCALL entry and exit paths. 1363 * 1364 * We arguably should just inspect RIP instead, but I (Andy) wrote 1365 * this code when I had the misapprehension that Xen PV supported 1366 * NMIs, and Xen PV would break that approach. 1367 */ 1368 std 1369 movq $0, 5*8(%rsp) /* clear "NMI executing" */ 1370 1371 /* 1372 * iretq reads the "iret" frame and exits the NMI stack in a 1373 * single instruction. We are returning to kernel mode, so this 1374 * cannot result in a fault. Similarly, we don't need to worry 1375 * about espfix64 on the way back to kernel mode. 1376 */ 1377 iretq 1378SYM_CODE_END(asm_exc_nmi) 1379 1380#ifndef CONFIG_IA32_EMULATION 1381/* 1382 * This handles SYSCALL from 32-bit code. There is no way to program 1383 * MSRs to fully disable 32-bit SYSCALL. 1384 */ 1385SYM_CODE_START(ignore_sysret) 1386 UNWIND_HINT_EMPTY 1387 mov $-ENOSYS, %eax 1388 sysretl 1389SYM_CODE_END(ignore_sysret) 1390#endif 1391 1392.pushsection .text, "ax" 1393SYM_CODE_START(rewind_stack_do_exit) 1394 UNWIND_HINT_FUNC 1395 /* Prevent any naive code from trying to unwind to our caller. */ 1396 xorl %ebp, %ebp 1397 1398 movq PER_CPU_VAR(cpu_current_top_of_stack), %rax 1399 leaq -PTREGS_SIZE(%rax), %rsp 1400 UNWIND_HINT_REGS 1401 1402 call do_exit 1403SYM_CODE_END(rewind_stack_do_exit) 1404.popsection 1405