1/* SPDX-License-Identifier: GPL-2.0 */ 2/* 3 * Copyright (C) 1991,1992 Linus Torvalds 4 * 5 * entry_32.S contains the system-call and low-level fault and trap handling routines. 6 * 7 * Stack layout while running C code: 8 * ptrace needs to have all registers on the stack. 9 * If the order here is changed, it needs to be 10 * updated in fork.c:copy_process(), signal.c:do_signal(), 11 * ptrace.c and ptrace.h 12 * 13 * 0(%esp) - %ebx 14 * 4(%esp) - %ecx 15 * 8(%esp) - %edx 16 * C(%esp) - %esi 17 * 10(%esp) - %edi 18 * 14(%esp) - %ebp 19 * 18(%esp) - %eax 20 * 1C(%esp) - %ds 21 * 20(%esp) - %es 22 * 24(%esp) - %fs 23 * 28(%esp) - unused -- was %gs on old stackprotector kernels 24 * 2C(%esp) - orig_eax 25 * 30(%esp) - %eip 26 * 34(%esp) - %cs 27 * 38(%esp) - %eflags 28 * 3C(%esp) - %oldesp 29 * 40(%esp) - %oldss 30 */ 31 32#include <linux/linkage.h> 33#include <linux/err.h> 34#include <asm/thread_info.h> 35#include <asm/irqflags.h> 36#include <asm/errno.h> 37#include <asm/segment.h> 38#include <asm/smp.h> 39#include <asm/percpu.h> 40#include <asm/processor-flags.h> 41#include <asm/irq_vectors.h> 42#include <asm/cpufeatures.h> 43#include <asm/alternative.h> 44#include <asm/asm.h> 45#include <asm/smap.h> 46#include <asm/frame.h> 47#include <asm/trapnr.h> 48#include <asm/nospec-branch.h> 49 50#include "calling.h" 51 52 .section .entry.text, "ax" 53 54#define PTI_SWITCH_MASK (1 << PAGE_SHIFT) 55 56/* Unconditionally switch to user cr3 */ 57.macro SWITCH_TO_USER_CR3 scratch_reg:req 58 ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI 59 60 movl %cr3, \scratch_reg 61 orl $PTI_SWITCH_MASK, \scratch_reg 62 movl \scratch_reg, %cr3 63.Lend_\@: 64.endm 65 66.macro BUG_IF_WRONG_CR3 no_user_check=0 67#ifdef CONFIG_DEBUG_ENTRY 68 ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI 69 .if \no_user_check == 0 70 /* coming from usermode? */ 71 testl $USER_SEGMENT_RPL_MASK, PT_CS(%esp) 72 jz .Lend_\@ 73 .endif 74 /* On user-cr3? */ 75 movl %cr3, %eax 76 testl $PTI_SWITCH_MASK, %eax 77 jnz .Lend_\@ 78 /* From userspace with kernel cr3 - BUG */ 79 ud2 80.Lend_\@: 81#endif 82.endm 83 84/* 85 * Switch to kernel cr3 if not already loaded and return current cr3 in 86 * \scratch_reg 87 */ 88.macro SWITCH_TO_KERNEL_CR3 scratch_reg:req 89 ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI 90 movl %cr3, \scratch_reg 91 /* Test if we are already on kernel CR3 */ 92 testl $PTI_SWITCH_MASK, \scratch_reg 93 jz .Lend_\@ 94 andl $(~PTI_SWITCH_MASK), \scratch_reg 95 movl \scratch_reg, %cr3 96 /* Return original CR3 in \scratch_reg */ 97 orl $PTI_SWITCH_MASK, \scratch_reg 98.Lend_\@: 99.endm 100 101#define CS_FROM_ENTRY_STACK (1 << 31) 102#define CS_FROM_USER_CR3 (1 << 30) 103#define CS_FROM_KERNEL (1 << 29) 104#define CS_FROM_ESPFIX (1 << 28) 105 106.macro FIXUP_FRAME 107 /* 108 * The high bits of the CS dword (__csh) are used for CS_FROM_*. 109 * Clear them in case hardware didn't do this for us. 110 */ 111 andl $0x0000ffff, 4*4(%esp) 112 113#ifdef CONFIG_VM86 114 testl $X86_EFLAGS_VM, 5*4(%esp) 115 jnz .Lfrom_usermode_no_fixup_\@ 116#endif 117 testl $USER_SEGMENT_RPL_MASK, 4*4(%esp) 118 jnz .Lfrom_usermode_no_fixup_\@ 119 120 orl $CS_FROM_KERNEL, 4*4(%esp) 121 122 /* 123 * When we're here from kernel mode; the (exception) stack looks like: 124 * 125 * 6*4(%esp) - <previous context> 126 * 5*4(%esp) - flags 127 * 4*4(%esp) - cs 128 * 3*4(%esp) - ip 129 * 2*4(%esp) - orig_eax 130 * 1*4(%esp) - gs / function 131 * 0*4(%esp) - fs 132 * 133 * Lets build a 5 entry IRET frame after that, such that struct pt_regs 134 * is complete and in particular regs->sp is correct. This gives us 135 * the original 6 entries as gap: 136 * 137 * 14*4(%esp) - <previous context> 138 * 13*4(%esp) - gap / flags 139 * 12*4(%esp) - gap / cs 140 * 11*4(%esp) - gap / ip 141 * 10*4(%esp) - gap / orig_eax 142 * 9*4(%esp) - gap / gs / function 143 * 8*4(%esp) - gap / fs 144 * 7*4(%esp) - ss 145 * 6*4(%esp) - sp 146 * 5*4(%esp) - flags 147 * 4*4(%esp) - cs 148 * 3*4(%esp) - ip 149 * 2*4(%esp) - orig_eax 150 * 1*4(%esp) - gs / function 151 * 0*4(%esp) - fs 152 */ 153 154 pushl %ss # ss 155 pushl %esp # sp (points at ss) 156 addl $7*4, (%esp) # point sp back at the previous context 157 pushl 7*4(%esp) # flags 158 pushl 7*4(%esp) # cs 159 pushl 7*4(%esp) # ip 160 pushl 7*4(%esp) # orig_eax 161 pushl 7*4(%esp) # gs / function 162 pushl 7*4(%esp) # fs 163.Lfrom_usermode_no_fixup_\@: 164.endm 165 166.macro IRET_FRAME 167 /* 168 * We're called with %ds, %es, %fs, and %gs from the interrupted 169 * frame, so we shouldn't use them. Also, we may be in ESPFIX 170 * mode and therefore have a nonzero SS base and an offset ESP, 171 * so any attempt to access the stack needs to use SS. (except for 172 * accesses through %esp, which automatically use SS.) 173 */ 174 testl $CS_FROM_KERNEL, 1*4(%esp) 175 jz .Lfinished_frame_\@ 176 177 /* 178 * Reconstruct the 3 entry IRET frame right after the (modified) 179 * regs->sp without lowering %esp in between, such that an NMI in the 180 * middle doesn't scribble our stack. 181 */ 182 pushl %eax 183 pushl %ecx 184 movl 5*4(%esp), %eax # (modified) regs->sp 185 186 movl 4*4(%esp), %ecx # flags 187 movl %ecx, %ss:-1*4(%eax) 188 189 movl 3*4(%esp), %ecx # cs 190 andl $0x0000ffff, %ecx 191 movl %ecx, %ss:-2*4(%eax) 192 193 movl 2*4(%esp), %ecx # ip 194 movl %ecx, %ss:-3*4(%eax) 195 196 movl 1*4(%esp), %ecx # eax 197 movl %ecx, %ss:-4*4(%eax) 198 199 popl %ecx 200 lea -4*4(%eax), %esp 201 popl %eax 202.Lfinished_frame_\@: 203.endm 204 205.macro SAVE_ALL pt_regs_ax=%eax switch_stacks=0 skip_gs=0 unwind_espfix=0 206 cld 207.if \skip_gs == 0 208 pushl $0 209.endif 210 pushl %fs 211 212 pushl %eax 213 movl $(__KERNEL_PERCPU), %eax 214 movl %eax, %fs 215.if \unwind_espfix > 0 216 UNWIND_ESPFIX_STACK 217.endif 218 popl %eax 219 220 FIXUP_FRAME 221 pushl %es 222 pushl %ds 223 pushl \pt_regs_ax 224 pushl %ebp 225 pushl %edi 226 pushl %esi 227 pushl %edx 228 pushl %ecx 229 pushl %ebx 230 movl $(__USER_DS), %edx 231 movl %edx, %ds 232 movl %edx, %es 233 /* Switch to kernel stack if necessary */ 234.if \switch_stacks > 0 235 SWITCH_TO_KERNEL_STACK 236.endif 237.endm 238 239.macro SAVE_ALL_NMI cr3_reg:req unwind_espfix=0 240 SAVE_ALL unwind_espfix=\unwind_espfix 241 242 BUG_IF_WRONG_CR3 243 244 /* 245 * Now switch the CR3 when PTI is enabled. 246 * 247 * We can enter with either user or kernel cr3, the code will 248 * store the old cr3 in \cr3_reg and switches to the kernel cr3 249 * if necessary. 250 */ 251 SWITCH_TO_KERNEL_CR3 scratch_reg=\cr3_reg 252 253.Lend_\@: 254.endm 255 256.macro RESTORE_INT_REGS 257 popl %ebx 258 popl %ecx 259 popl %edx 260 popl %esi 261 popl %edi 262 popl %ebp 263 popl %eax 264.endm 265 266.macro RESTORE_REGS pop=0 267 RESTORE_INT_REGS 2681: popl %ds 2692: popl %es 2703: popl %fs 2714: addl $(4 + \pop), %esp /* pop the unused "gs" slot */ 272 IRET_FRAME 273 274 /* 275 * There is no _ASM_EXTABLE_TYPE_REG() for ASM, however since this is 276 * ASM the registers are known and we can trivially hard-code them. 277 */ 278 _ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_POP_ZERO|EX_REG_DS) 279 _ASM_EXTABLE_TYPE(2b, 3b, EX_TYPE_POP_ZERO|EX_REG_ES) 280 _ASM_EXTABLE_TYPE(3b, 4b, EX_TYPE_POP_ZERO|EX_REG_FS) 281.endm 282 283.macro RESTORE_ALL_NMI cr3_reg:req pop=0 284 /* 285 * Now switch the CR3 when PTI is enabled. 286 * 287 * We enter with kernel cr3 and switch the cr3 to the value 288 * stored on \cr3_reg, which is either a user or a kernel cr3. 289 */ 290 ALTERNATIVE "jmp .Lswitched_\@", "", X86_FEATURE_PTI 291 292 testl $PTI_SWITCH_MASK, \cr3_reg 293 jz .Lswitched_\@ 294 295 /* User cr3 in \cr3_reg - write it to hardware cr3 */ 296 movl \cr3_reg, %cr3 297 298.Lswitched_\@: 299 300 BUG_IF_WRONG_CR3 301 302 RESTORE_REGS pop=\pop 303.endm 304 305.macro CHECK_AND_APPLY_ESPFIX 306#ifdef CONFIG_X86_ESPFIX32 307#define GDT_ESPFIX_OFFSET (GDT_ENTRY_ESPFIX_SS * 8) 308#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + GDT_ESPFIX_OFFSET 309 310 ALTERNATIVE "jmp .Lend_\@", "", X86_BUG_ESPFIX 311 312 movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS 313 /* 314 * Warning: PT_OLDSS(%esp) contains the wrong/random values if we 315 * are returning to the kernel. 316 * See comments in process.c:copy_thread() for details. 317 */ 318 movb PT_OLDSS(%esp), %ah 319 movb PT_CS(%esp), %al 320 andl $(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax 321 cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax 322 jne .Lend_\@ # returning to user-space with LDT SS 323 324 /* 325 * Setup and switch to ESPFIX stack 326 * 327 * We're returning to userspace with a 16 bit stack. The CPU will not 328 * restore the high word of ESP for us on executing iret... This is an 329 * "official" bug of all the x86-compatible CPUs, which we can work 330 * around to make dosemu and wine happy. We do this by preloading the 331 * high word of ESP with the high word of the userspace ESP while 332 * compensating for the offset by changing to the ESPFIX segment with 333 * a base address that matches for the difference. 334 */ 335 mov %esp, %edx /* load kernel esp */ 336 mov PT_OLDESP(%esp), %eax /* load userspace esp */ 337 mov %dx, %ax /* eax: new kernel esp */ 338 sub %eax, %edx /* offset (low word is 0) */ 339 shr $16, %edx 340 mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */ 341 mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */ 342 pushl $__ESPFIX_SS 343 pushl %eax /* new kernel esp */ 344 /* 345 * Disable interrupts, but do not irqtrace this section: we 346 * will soon execute iret and the tracer was already set to 347 * the irqstate after the IRET: 348 */ 349 cli 350 lss (%esp), %esp /* switch to espfix segment */ 351.Lend_\@: 352#endif /* CONFIG_X86_ESPFIX32 */ 353.endm 354 355/* 356 * Called with pt_regs fully populated and kernel segments loaded, 357 * so we can access PER_CPU and use the integer registers. 358 * 359 * We need to be very careful here with the %esp switch, because an NMI 360 * can happen everywhere. If the NMI handler finds itself on the 361 * entry-stack, it will overwrite the task-stack and everything we 362 * copied there. So allocate the stack-frame on the task-stack and 363 * switch to it before we do any copying. 364 */ 365 366.macro SWITCH_TO_KERNEL_STACK 367 368 BUG_IF_WRONG_CR3 369 370 SWITCH_TO_KERNEL_CR3 scratch_reg=%eax 371 372 /* 373 * %eax now contains the entry cr3 and we carry it forward in 374 * that register for the time this macro runs 375 */ 376 377 /* Are we on the entry stack? Bail out if not! */ 378 movl PER_CPU_VAR(cpu_entry_area), %ecx 379 addl $CPU_ENTRY_AREA_entry_stack + SIZEOF_entry_stack, %ecx 380 subl %esp, %ecx /* ecx = (end of entry_stack) - esp */ 381 cmpl $SIZEOF_entry_stack, %ecx 382 jae .Lend_\@ 383 384 /* Load stack pointer into %esi and %edi */ 385 movl %esp, %esi 386 movl %esi, %edi 387 388 /* Move %edi to the top of the entry stack */ 389 andl $(MASK_entry_stack), %edi 390 addl $(SIZEOF_entry_stack), %edi 391 392 /* Load top of task-stack into %edi */ 393 movl TSS_entry2task_stack(%edi), %edi 394 395 /* Special case - entry from kernel mode via entry stack */ 396#ifdef CONFIG_VM86 397 movl PT_EFLAGS(%esp), %ecx # mix EFLAGS and CS 398 movb PT_CS(%esp), %cl 399 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %ecx 400#else 401 movl PT_CS(%esp), %ecx 402 andl $SEGMENT_RPL_MASK, %ecx 403#endif 404 cmpl $USER_RPL, %ecx 405 jb .Lentry_from_kernel_\@ 406 407 /* Bytes to copy */ 408 movl $PTREGS_SIZE, %ecx 409 410#ifdef CONFIG_VM86 411 testl $X86_EFLAGS_VM, PT_EFLAGS(%esi) 412 jz .Lcopy_pt_regs_\@ 413 414 /* 415 * Stack-frame contains 4 additional segment registers when 416 * coming from VM86 mode 417 */ 418 addl $(4 * 4), %ecx 419 420#endif 421.Lcopy_pt_regs_\@: 422 423 /* Allocate frame on task-stack */ 424 subl %ecx, %edi 425 426 /* Switch to task-stack */ 427 movl %edi, %esp 428 429 /* 430 * We are now on the task-stack and can safely copy over the 431 * stack-frame 432 */ 433 shrl $2, %ecx 434 cld 435 rep movsl 436 437 jmp .Lend_\@ 438 439.Lentry_from_kernel_\@: 440 441 /* 442 * This handles the case when we enter the kernel from 443 * kernel-mode and %esp points to the entry-stack. When this 444 * happens we need to switch to the task-stack to run C code, 445 * but switch back to the entry-stack again when we approach 446 * iret and return to the interrupted code-path. This usually 447 * happens when we hit an exception while restoring user-space 448 * segment registers on the way back to user-space or when the 449 * sysenter handler runs with eflags.tf set. 450 * 451 * When we switch to the task-stack here, we can't trust the 452 * contents of the entry-stack anymore, as the exception handler 453 * might be scheduled out or moved to another CPU. Therefore we 454 * copy the complete entry-stack to the task-stack and set a 455 * marker in the iret-frame (bit 31 of the CS dword) to detect 456 * what we've done on the iret path. 457 * 458 * On the iret path we copy everything back and switch to the 459 * entry-stack, so that the interrupted kernel code-path 460 * continues on the same stack it was interrupted with. 461 * 462 * Be aware that an NMI can happen anytime in this code. 463 * 464 * %esi: Entry-Stack pointer (same as %esp) 465 * %edi: Top of the task stack 466 * %eax: CR3 on kernel entry 467 */ 468 469 /* Calculate number of bytes on the entry stack in %ecx */ 470 movl %esi, %ecx 471 472 /* %ecx to the top of entry-stack */ 473 andl $(MASK_entry_stack), %ecx 474 addl $(SIZEOF_entry_stack), %ecx 475 476 /* Number of bytes on the entry stack to %ecx */ 477 sub %esi, %ecx 478 479 /* Mark stackframe as coming from entry stack */ 480 orl $CS_FROM_ENTRY_STACK, PT_CS(%esp) 481 482 /* 483 * Test the cr3 used to enter the kernel and add a marker 484 * so that we can switch back to it before iret. 485 */ 486 testl $PTI_SWITCH_MASK, %eax 487 jz .Lcopy_pt_regs_\@ 488 orl $CS_FROM_USER_CR3, PT_CS(%esp) 489 490 /* 491 * %esi and %edi are unchanged, %ecx contains the number of 492 * bytes to copy. The code at .Lcopy_pt_regs_\@ will allocate 493 * the stack-frame on task-stack and copy everything over 494 */ 495 jmp .Lcopy_pt_regs_\@ 496 497.Lend_\@: 498.endm 499 500/* 501 * Switch back from the kernel stack to the entry stack. 502 * 503 * The %esp register must point to pt_regs on the task stack. It will 504 * first calculate the size of the stack-frame to copy, depending on 505 * whether we return to VM86 mode or not. With that it uses 'rep movsl' 506 * to copy the contents of the stack over to the entry stack. 507 * 508 * We must be very careful here, as we can't trust the contents of the 509 * task-stack once we switched to the entry-stack. When an NMI happens 510 * while on the entry-stack, the NMI handler will switch back to the top 511 * of the task stack, overwriting our stack-frame we are about to copy. 512 * Therefore we switch the stack only after everything is copied over. 513 */ 514.macro SWITCH_TO_ENTRY_STACK 515 516 /* Bytes to copy */ 517 movl $PTREGS_SIZE, %ecx 518 519#ifdef CONFIG_VM86 520 testl $(X86_EFLAGS_VM), PT_EFLAGS(%esp) 521 jz .Lcopy_pt_regs_\@ 522 523 /* Additional 4 registers to copy when returning to VM86 mode */ 524 addl $(4 * 4), %ecx 525 526.Lcopy_pt_regs_\@: 527#endif 528 529 /* Initialize source and destination for movsl */ 530 movl PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %edi 531 subl %ecx, %edi 532 movl %esp, %esi 533 534 /* Save future stack pointer in %ebx */ 535 movl %edi, %ebx 536 537 /* Copy over the stack-frame */ 538 shrl $2, %ecx 539 cld 540 rep movsl 541 542 /* 543 * Switch to entry-stack - needs to happen after everything is 544 * copied because the NMI handler will overwrite the task-stack 545 * when on entry-stack 546 */ 547 movl %ebx, %esp 548 549.Lend_\@: 550.endm 551 552/* 553 * This macro handles the case when we return to kernel-mode on the iret 554 * path and have to switch back to the entry stack and/or user-cr3 555 * 556 * See the comments below the .Lentry_from_kernel_\@ label in the 557 * SWITCH_TO_KERNEL_STACK macro for more details. 558 */ 559.macro PARANOID_EXIT_TO_KERNEL_MODE 560 561 /* 562 * Test if we entered the kernel with the entry-stack. Most 563 * likely we did not, because this code only runs on the 564 * return-to-kernel path. 565 */ 566 testl $CS_FROM_ENTRY_STACK, PT_CS(%esp) 567 jz .Lend_\@ 568 569 /* Unlikely slow-path */ 570 571 /* Clear marker from stack-frame */ 572 andl $(~CS_FROM_ENTRY_STACK), PT_CS(%esp) 573 574 /* Copy the remaining task-stack contents to entry-stack */ 575 movl %esp, %esi 576 movl PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %edi 577 578 /* Bytes on the task-stack to ecx */ 579 movl PER_CPU_VAR(cpu_tss_rw + TSS_sp1), %ecx 580 subl %esi, %ecx 581 582 /* Allocate stack-frame on entry-stack */ 583 subl %ecx, %edi 584 585 /* 586 * Save future stack-pointer, we must not switch until the 587 * copy is done, otherwise the NMI handler could destroy the 588 * contents of the task-stack we are about to copy. 589 */ 590 movl %edi, %ebx 591 592 /* Do the copy */ 593 shrl $2, %ecx 594 cld 595 rep movsl 596 597 /* Safe to switch to entry-stack now */ 598 movl %ebx, %esp 599 600 /* 601 * We came from entry-stack and need to check if we also need to 602 * switch back to user cr3. 603 */ 604 testl $CS_FROM_USER_CR3, PT_CS(%esp) 605 jz .Lend_\@ 606 607 /* Clear marker from stack-frame */ 608 andl $(~CS_FROM_USER_CR3), PT_CS(%esp) 609 610 SWITCH_TO_USER_CR3 scratch_reg=%eax 611 612.Lend_\@: 613.endm 614 615/** 616 * idtentry - Macro to generate entry stubs for simple IDT entries 617 * @vector: Vector number 618 * @asmsym: ASM symbol for the entry point 619 * @cfunc: C function to be called 620 * @has_error_code: Hardware pushed error code on stack 621 */ 622.macro idtentry vector asmsym cfunc has_error_code:req 623SYM_CODE_START(\asmsym) 624 ASM_CLAC 625 cld 626 627 .if \has_error_code == 0 628 pushl $0 /* Clear the error code */ 629 .endif 630 631 /* Push the C-function address into the GS slot */ 632 pushl $\cfunc 633 /* Invoke the common exception entry */ 634 jmp handle_exception 635SYM_CODE_END(\asmsym) 636.endm 637 638.macro idtentry_irq vector cfunc 639 .p2align CONFIG_X86_L1_CACHE_SHIFT 640SYM_CODE_START_LOCAL(asm_\cfunc) 641 ASM_CLAC 642 SAVE_ALL switch_stacks=1 643 ENCODE_FRAME_POINTER 644 movl %esp, %eax 645 movl PT_ORIG_EAX(%esp), %edx /* get the vector from stack */ 646 movl $-1, PT_ORIG_EAX(%esp) /* no syscall to restart */ 647 call \cfunc 648 jmp handle_exception_return 649SYM_CODE_END(asm_\cfunc) 650.endm 651 652.macro idtentry_sysvec vector cfunc 653 idtentry \vector asm_\cfunc \cfunc has_error_code=0 654.endm 655 656/* 657 * Include the defines which emit the idt entries which are shared 658 * shared between 32 and 64 bit and emit the __irqentry_text_* markers 659 * so the stacktrace boundary checks work. 660 */ 661 .align 16 662 .globl __irqentry_text_start 663__irqentry_text_start: 664 665#include <asm/idtentry.h> 666 667 .align 16 668 .globl __irqentry_text_end 669__irqentry_text_end: 670 671/* 672 * %eax: prev task 673 * %edx: next task 674 */ 675.pushsection .text, "ax" 676SYM_CODE_START(__switch_to_asm) 677 /* 678 * Save callee-saved registers 679 * This must match the order in struct inactive_task_frame 680 */ 681 pushl %ebp 682 pushl %ebx 683 pushl %edi 684 pushl %esi 685 /* 686 * Flags are saved to prevent AC leakage. This could go 687 * away if objtool would have 32bit support to verify 688 * the STAC/CLAC correctness. 689 */ 690 pushfl 691 692 /* switch stack */ 693 movl %esp, TASK_threadsp(%eax) 694 movl TASK_threadsp(%edx), %esp 695 696#ifdef CONFIG_STACKPROTECTOR 697 movl TASK_stack_canary(%edx), %ebx 698 movl %ebx, PER_CPU_VAR(__stack_chk_guard) 699#endif 700 701#ifdef CONFIG_RETPOLINE 702 /* 703 * When switching from a shallower to a deeper call stack 704 * the RSB may either underflow or use entries populated 705 * with userspace addresses. On CPUs where those concerns 706 * exist, overwrite the RSB with entries which capture 707 * speculative execution to prevent attack. 708 */ 709 FILL_RETURN_BUFFER %ebx, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW 710#endif 711 712 /* Restore flags or the incoming task to restore AC state. */ 713 popfl 714 /* restore callee-saved registers */ 715 popl %esi 716 popl %edi 717 popl %ebx 718 popl %ebp 719 720 jmp __switch_to 721SYM_CODE_END(__switch_to_asm) 722.popsection 723 724/* 725 * The unwinder expects the last frame on the stack to always be at the same 726 * offset from the end of the page, which allows it to validate the stack. 727 * Calling schedule_tail() directly would break that convention because its an 728 * asmlinkage function so its argument has to be pushed on the stack. This 729 * wrapper creates a proper "end of stack" frame header before the call. 730 */ 731.pushsection .text, "ax" 732SYM_FUNC_START(schedule_tail_wrapper) 733 FRAME_BEGIN 734 735 pushl %eax 736 call schedule_tail 737 popl %eax 738 739 FRAME_END 740 RET 741SYM_FUNC_END(schedule_tail_wrapper) 742.popsection 743 744/* 745 * A newly forked process directly context switches into this address. 746 * 747 * eax: prev task we switched from 748 * ebx: kernel thread func (NULL for user thread) 749 * edi: kernel thread arg 750 */ 751.pushsection .text, "ax" 752SYM_CODE_START(ret_from_fork) 753 call schedule_tail_wrapper 754 755 testl %ebx, %ebx 756 jnz 1f /* kernel threads are uncommon */ 757 7582: 759 /* When we fork, we trace the syscall return in the child, too. */ 760 movl %esp, %eax 761 call syscall_exit_to_user_mode 762 jmp .Lsyscall_32_done 763 764 /* kernel thread */ 7651: movl %edi, %eax 766 CALL_NOSPEC ebx 767 /* 768 * A kernel thread is allowed to return here after successfully 769 * calling kernel_execve(). Exit to userspace to complete the execve() 770 * syscall. 771 */ 772 movl $0, PT_EAX(%esp) 773 jmp 2b 774SYM_CODE_END(ret_from_fork) 775.popsection 776 777SYM_ENTRY(__begin_SYSENTER_singlestep_region, SYM_L_GLOBAL, SYM_A_NONE) 778/* 779 * All code from here through __end_SYSENTER_singlestep_region is subject 780 * to being single-stepped if a user program sets TF and executes SYSENTER. 781 * There is absolutely nothing that we can do to prevent this from happening 782 * (thanks Intel!). To keep our handling of this situation as simple as 783 * possible, we handle TF just like AC and NT, except that our #DB handler 784 * will ignore all of the single-step traps generated in this range. 785 */ 786 787/* 788 * 32-bit SYSENTER entry. 789 * 790 * 32-bit system calls through the vDSO's __kernel_vsyscall enter here 791 * if X86_FEATURE_SEP is available. This is the preferred system call 792 * entry on 32-bit systems. 793 * 794 * The SYSENTER instruction, in principle, should *only* occur in the 795 * vDSO. In practice, a small number of Android devices were shipped 796 * with a copy of Bionic that inlined a SYSENTER instruction. This 797 * never happened in any of Google's Bionic versions -- it only happened 798 * in a narrow range of Intel-provided versions. 799 * 800 * SYSENTER loads SS, ESP, CS, and EIP from previously programmed MSRs. 801 * IF and VM in RFLAGS are cleared (IOW: interrupts are off). 802 * SYSENTER does not save anything on the stack, 803 * and does not save old EIP (!!!), ESP, or EFLAGS. 804 * 805 * To avoid losing track of EFLAGS.VM (and thus potentially corrupting 806 * user and/or vm86 state), we explicitly disable the SYSENTER 807 * instruction in vm86 mode by reprogramming the MSRs. 808 * 809 * Arguments: 810 * eax system call number 811 * ebx arg1 812 * ecx arg2 813 * edx arg3 814 * esi arg4 815 * edi arg5 816 * ebp user stack 817 * 0(%ebp) arg6 818 */ 819SYM_FUNC_START(entry_SYSENTER_32) 820 /* 821 * On entry-stack with all userspace-regs live - save and 822 * restore eflags and %eax to use it as scratch-reg for the cr3 823 * switch. 824 */ 825 pushfl 826 pushl %eax 827 BUG_IF_WRONG_CR3 no_user_check=1 828 SWITCH_TO_KERNEL_CR3 scratch_reg=%eax 829 popl %eax 830 popfl 831 832 /* Stack empty again, switch to task stack */ 833 movl TSS_entry2task_stack(%esp), %esp 834 835.Lsysenter_past_esp: 836 pushl $__USER_DS /* pt_regs->ss */ 837 pushl $0 /* pt_regs->sp (placeholder) */ 838 pushfl /* pt_regs->flags (except IF = 0) */ 839 pushl $__USER_CS /* pt_regs->cs */ 840 pushl $0 /* pt_regs->ip = 0 (placeholder) */ 841 pushl %eax /* pt_regs->orig_ax */ 842 SAVE_ALL pt_regs_ax=$-ENOSYS /* save rest, stack already switched */ 843 844 /* 845 * SYSENTER doesn't filter flags, so we need to clear NT, AC 846 * and TF ourselves. To save a few cycles, we can check whether 847 * either was set instead of doing an unconditional popfq. 848 * This needs to happen before enabling interrupts so that 849 * we don't get preempted with NT set. 850 * 851 * If TF is set, we will single-step all the way to here -- do_debug 852 * will ignore all the traps. (Yes, this is slow, but so is 853 * single-stepping in general. This allows us to avoid having 854 * a more complicated code to handle the case where a user program 855 * forces us to single-step through the SYSENTER entry code.) 856 * 857 * NB.: .Lsysenter_fix_flags is a label with the code under it moved 858 * out-of-line as an optimization: NT is unlikely to be set in the 859 * majority of the cases and instead of polluting the I$ unnecessarily, 860 * we're keeping that code behind a branch which will predict as 861 * not-taken and therefore its instructions won't be fetched. 862 */ 863 testl $X86_EFLAGS_NT|X86_EFLAGS_AC|X86_EFLAGS_TF, PT_EFLAGS(%esp) 864 jnz .Lsysenter_fix_flags 865.Lsysenter_flags_fixed: 866 867 movl %esp, %eax 868 call do_SYSENTER_32 869 testl %eax, %eax 870 jz .Lsyscall_32_done 871 872 STACKLEAK_ERASE 873 874 /* Opportunistic SYSEXIT */ 875 876 /* 877 * Setup entry stack - we keep the pointer in %eax and do the 878 * switch after almost all user-state is restored. 879 */ 880 881 /* Load entry stack pointer and allocate frame for eflags/eax */ 882 movl PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %eax 883 subl $(2*4), %eax 884 885 /* Copy eflags and eax to entry stack */ 886 movl PT_EFLAGS(%esp), %edi 887 movl PT_EAX(%esp), %esi 888 movl %edi, (%eax) 889 movl %esi, 4(%eax) 890 891 /* Restore user registers and segments */ 892 movl PT_EIP(%esp), %edx /* pt_regs->ip */ 893 movl PT_OLDESP(%esp), %ecx /* pt_regs->sp */ 8941: mov PT_FS(%esp), %fs 895 896 popl %ebx /* pt_regs->bx */ 897 addl $2*4, %esp /* skip pt_regs->cx and pt_regs->dx */ 898 popl %esi /* pt_regs->si */ 899 popl %edi /* pt_regs->di */ 900 popl %ebp /* pt_regs->bp */ 901 902 /* Switch to entry stack */ 903 movl %eax, %esp 904 905 /* Now ready to switch the cr3 */ 906 SWITCH_TO_USER_CR3 scratch_reg=%eax 907 908 /* 909 * Restore all flags except IF. (We restore IF separately because 910 * STI gives a one-instruction window in which we won't be interrupted, 911 * whereas POPF does not.) 912 */ 913 btrl $X86_EFLAGS_IF_BIT, (%esp) 914 BUG_IF_WRONG_CR3 no_user_check=1 915 popfl 916 popl %eax 917 918 /* 919 * Return back to the vDSO, which will pop ecx and edx. 920 * Don't bother with DS and ES (they already contain __USER_DS). 921 */ 922 sti 923 sysexit 924 9252: movl $0, PT_FS(%esp) 926 jmp 1b 927 _ASM_EXTABLE(1b, 2b) 928 929.Lsysenter_fix_flags: 930 pushl $X86_EFLAGS_FIXED 931 popfl 932 jmp .Lsysenter_flags_fixed 933SYM_ENTRY(__end_SYSENTER_singlestep_region, SYM_L_GLOBAL, SYM_A_NONE) 934SYM_FUNC_END(entry_SYSENTER_32) 935 936/* 937 * 32-bit legacy system call entry. 938 * 939 * 32-bit x86 Linux system calls traditionally used the INT $0x80 940 * instruction. INT $0x80 lands here. 941 * 942 * This entry point can be used by any 32-bit perform system calls. 943 * Instances of INT $0x80 can be found inline in various programs and 944 * libraries. It is also used by the vDSO's __kernel_vsyscall 945 * fallback for hardware that doesn't support a faster entry method. 946 * Restarted 32-bit system calls also fall back to INT $0x80 947 * regardless of what instruction was originally used to do the system 948 * call. (64-bit programs can use INT $0x80 as well, but they can 949 * only run on 64-bit kernels and therefore land in 950 * entry_INT80_compat.) 951 * 952 * This is considered a slow path. It is not used by most libc 953 * implementations on modern hardware except during process startup. 954 * 955 * Arguments: 956 * eax system call number 957 * ebx arg1 958 * ecx arg2 959 * edx arg3 960 * esi arg4 961 * edi arg5 962 * ebp arg6 963 */ 964SYM_FUNC_START(entry_INT80_32) 965 ASM_CLAC 966 pushl %eax /* pt_regs->orig_ax */ 967 968 SAVE_ALL pt_regs_ax=$-ENOSYS switch_stacks=1 /* save rest */ 969 970 movl %esp, %eax 971 call do_int80_syscall_32 972.Lsyscall_32_done: 973 STACKLEAK_ERASE 974 975restore_all_switch_stack: 976 SWITCH_TO_ENTRY_STACK 977 CHECK_AND_APPLY_ESPFIX 978 979 /* Switch back to user CR3 */ 980 SWITCH_TO_USER_CR3 scratch_reg=%eax 981 982 BUG_IF_WRONG_CR3 983 984 /* Restore user state */ 985 RESTORE_REGS pop=4 # skip orig_eax/error_code 986.Lirq_return: 987 /* 988 * ARCH_HAS_MEMBARRIER_SYNC_CORE rely on IRET core serialization 989 * when returning from IPI handler and when returning from 990 * scheduler to user-space. 991 */ 992 iret 993 994.Lasm_iret_error: 995 pushl $0 # no error code 996 pushl $iret_error 997 998#ifdef CONFIG_DEBUG_ENTRY 999 /* 1000 * The stack-frame here is the one that iret faulted on, so its a 1001 * return-to-user frame. We are on kernel-cr3 because we come here from 1002 * the fixup code. This confuses the CR3 checker, so switch to user-cr3 1003 * as the checker expects it. 1004 */ 1005 pushl %eax 1006 SWITCH_TO_USER_CR3 scratch_reg=%eax 1007 popl %eax 1008#endif 1009 1010 jmp handle_exception 1011 1012 _ASM_EXTABLE(.Lirq_return, .Lasm_iret_error) 1013SYM_FUNC_END(entry_INT80_32) 1014 1015.macro FIXUP_ESPFIX_STACK 1016/* 1017 * Switch back for ESPFIX stack to the normal zerobased stack 1018 * 1019 * We can't call C functions using the ESPFIX stack. This code reads 1020 * the high word of the segment base from the GDT and swiches to the 1021 * normal stack and adjusts ESP with the matching offset. 1022 * 1023 * We might be on user CR3 here, so percpu data is not mapped and we can't 1024 * access the GDT through the percpu segment. Instead, use SGDT to find 1025 * the cpu_entry_area alias of the GDT. 1026 */ 1027#ifdef CONFIG_X86_ESPFIX32 1028 /* fixup the stack */ 1029 pushl %ecx 1030 subl $2*4, %esp 1031 sgdt (%esp) 1032 movl 2(%esp), %ecx /* GDT address */ 1033 /* 1034 * Careful: ECX is a linear pointer, so we need to force base 1035 * zero. %cs is the only known-linear segment we have right now. 1036 */ 1037 mov %cs:GDT_ESPFIX_OFFSET + 4(%ecx), %al /* bits 16..23 */ 1038 mov %cs:GDT_ESPFIX_OFFSET + 7(%ecx), %ah /* bits 24..31 */ 1039 shl $16, %eax 1040 addl $2*4, %esp 1041 popl %ecx 1042 addl %esp, %eax /* the adjusted stack pointer */ 1043 pushl $__KERNEL_DS 1044 pushl %eax 1045 lss (%esp), %esp /* switch to the normal stack segment */ 1046#endif 1047.endm 1048 1049.macro UNWIND_ESPFIX_STACK 1050 /* It's safe to clobber %eax, all other regs need to be preserved */ 1051#ifdef CONFIG_X86_ESPFIX32 1052 movl %ss, %eax 1053 /* see if on espfix stack */ 1054 cmpw $__ESPFIX_SS, %ax 1055 jne .Lno_fixup_\@ 1056 /* switch to normal stack */ 1057 FIXUP_ESPFIX_STACK 1058.Lno_fixup_\@: 1059#endif 1060.endm 1061 1062SYM_CODE_START_LOCAL_NOALIGN(handle_exception) 1063 /* the function address is in %gs's slot on the stack */ 1064 SAVE_ALL switch_stacks=1 skip_gs=1 unwind_espfix=1 1065 ENCODE_FRAME_POINTER 1066 1067 movl PT_GS(%esp), %edi # get the function address 1068 1069 /* fixup orig %eax */ 1070 movl PT_ORIG_EAX(%esp), %edx # get the error code 1071 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart 1072 1073 movl %esp, %eax # pt_regs pointer 1074 CALL_NOSPEC edi 1075 1076handle_exception_return: 1077#ifdef CONFIG_VM86 1078 movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS 1079 movb PT_CS(%esp), %al 1080 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax 1081#else 1082 /* 1083 * We can be coming here from child spawned by kernel_thread(). 1084 */ 1085 movl PT_CS(%esp), %eax 1086 andl $SEGMENT_RPL_MASK, %eax 1087#endif 1088 cmpl $USER_RPL, %eax # returning to v8086 or userspace ? 1089 jnb ret_to_user 1090 1091 PARANOID_EXIT_TO_KERNEL_MODE 1092 BUG_IF_WRONG_CR3 1093 RESTORE_REGS 4 1094 jmp .Lirq_return 1095 1096ret_to_user: 1097 movl %esp, %eax 1098 jmp restore_all_switch_stack 1099SYM_CODE_END(handle_exception) 1100 1101SYM_CODE_START(asm_exc_double_fault) 11021: 1103 /* 1104 * This is a task gate handler, not an interrupt gate handler. 1105 * The error code is on the stack, but the stack is otherwise 1106 * empty. Interrupts are off. Our state is sane with the following 1107 * exceptions: 1108 * 1109 * - CR0.TS is set. "TS" literally means "task switched". 1110 * - EFLAGS.NT is set because we're a "nested task". 1111 * - The doublefault TSS has back_link set and has been marked busy. 1112 * - TR points to the doublefault TSS and the normal TSS is busy. 1113 * - CR3 is the normal kernel PGD. This would be delightful, except 1114 * that the CPU didn't bother to save the old CR3 anywhere. This 1115 * would make it very awkward to return back to the context we came 1116 * from. 1117 * 1118 * The rest of EFLAGS is sanitized for us, so we don't need to 1119 * worry about AC or DF. 1120 * 1121 * Don't even bother popping the error code. It's always zero, 1122 * and ignoring it makes us a bit more robust against buggy 1123 * hypervisor task gate implementations. 1124 * 1125 * We will manually undo the task switch instead of doing a 1126 * task-switching IRET. 1127 */ 1128 1129 clts /* clear CR0.TS */ 1130 pushl $X86_EFLAGS_FIXED 1131 popfl /* clear EFLAGS.NT */ 1132 1133 call doublefault_shim 1134 1135 /* We don't support returning, so we have no IRET here. */ 11361: 1137 hlt 1138 jmp 1b 1139SYM_CODE_END(asm_exc_double_fault) 1140 1141/* 1142 * NMI is doubly nasty. It can happen on the first instruction of 1143 * entry_SYSENTER_32 (just like #DB), but it can also interrupt the beginning 1144 * of the #DB handler even if that #DB in turn hit before entry_SYSENTER_32 1145 * switched stacks. We handle both conditions by simply checking whether we 1146 * interrupted kernel code running on the SYSENTER stack. 1147 */ 1148SYM_CODE_START(asm_exc_nmi) 1149 ASM_CLAC 1150 1151#ifdef CONFIG_X86_ESPFIX32 1152 /* 1153 * ESPFIX_SS is only ever set on the return to user path 1154 * after we've switched to the entry stack. 1155 */ 1156 pushl %eax 1157 movl %ss, %eax 1158 cmpw $__ESPFIX_SS, %ax 1159 popl %eax 1160 je .Lnmi_espfix_stack 1161#endif 1162 1163 pushl %eax # pt_regs->orig_ax 1164 SAVE_ALL_NMI cr3_reg=%edi 1165 ENCODE_FRAME_POINTER 1166 xorl %edx, %edx # zero error code 1167 movl %esp, %eax # pt_regs pointer 1168 1169 /* Are we currently on the SYSENTER stack? */ 1170 movl PER_CPU_VAR(cpu_entry_area), %ecx 1171 addl $CPU_ENTRY_AREA_entry_stack + SIZEOF_entry_stack, %ecx 1172 subl %eax, %ecx /* ecx = (end of entry_stack) - esp */ 1173 cmpl $SIZEOF_entry_stack, %ecx 1174 jb .Lnmi_from_sysenter_stack 1175 1176 /* Not on SYSENTER stack. */ 1177 call exc_nmi 1178 jmp .Lnmi_return 1179 1180.Lnmi_from_sysenter_stack: 1181 /* 1182 * We're on the SYSENTER stack. Switch off. No one (not even debug) 1183 * is using the thread stack right now, so it's safe for us to use it. 1184 */ 1185 movl %esp, %ebx 1186 movl PER_CPU_VAR(cpu_current_top_of_stack), %esp 1187 call exc_nmi 1188 movl %ebx, %esp 1189 1190.Lnmi_return: 1191#ifdef CONFIG_X86_ESPFIX32 1192 testl $CS_FROM_ESPFIX, PT_CS(%esp) 1193 jnz .Lnmi_from_espfix 1194#endif 1195 1196 CHECK_AND_APPLY_ESPFIX 1197 RESTORE_ALL_NMI cr3_reg=%edi pop=4 1198 jmp .Lirq_return 1199 1200#ifdef CONFIG_X86_ESPFIX32 1201.Lnmi_espfix_stack: 1202 /* 1203 * Create the pointer to LSS back 1204 */ 1205 pushl %ss 1206 pushl %esp 1207 addl $4, (%esp) 1208 1209 /* Copy the (short) IRET frame */ 1210 pushl 4*4(%esp) # flags 1211 pushl 4*4(%esp) # cs 1212 pushl 4*4(%esp) # ip 1213 1214 pushl %eax # orig_ax 1215 1216 SAVE_ALL_NMI cr3_reg=%edi unwind_espfix=1 1217 ENCODE_FRAME_POINTER 1218 1219 /* clear CS_FROM_KERNEL, set CS_FROM_ESPFIX */ 1220 xorl $(CS_FROM_ESPFIX | CS_FROM_KERNEL), PT_CS(%esp) 1221 1222 xorl %edx, %edx # zero error code 1223 movl %esp, %eax # pt_regs pointer 1224 jmp .Lnmi_from_sysenter_stack 1225 1226.Lnmi_from_espfix: 1227 RESTORE_ALL_NMI cr3_reg=%edi 1228 /* 1229 * Because we cleared CS_FROM_KERNEL, IRET_FRAME 'forgot' to 1230 * fix up the gap and long frame: 1231 * 1232 * 3 - original frame (exception) 1233 * 2 - ESPFIX block (above) 1234 * 6 - gap (FIXUP_FRAME) 1235 * 5 - long frame (FIXUP_FRAME) 1236 * 1 - orig_ax 1237 */ 1238 lss (1+5+6)*4(%esp), %esp # back to espfix stack 1239 jmp .Lirq_return 1240#endif 1241SYM_CODE_END(asm_exc_nmi) 1242 1243.pushsection .text, "ax" 1244SYM_CODE_START(rewind_stack_and_make_dead) 1245 /* Prevent any naive code from trying to unwind to our caller. */ 1246 xorl %ebp, %ebp 1247 1248 movl PER_CPU_VAR(cpu_current_top_of_stack), %esi 1249 leal -TOP_OF_KERNEL_STACK_PADDING-PTREGS_SIZE(%esi), %esp 1250 1251 call make_task_dead 12521: jmp 1b 1253SYM_CODE_END(rewind_stack_and_make_dead) 1254.popsection 1255