1 /* 2 * Copyright (C) 1995 Linus Torvalds 3 * Copyright (C) 2001, 2002 Andi Kleen, SuSE Labs. 4 * Copyright (C) 2008-2009, Red Hat Inc., Ingo Molnar 5 */ 6 #include <linux/sched.h> /* test_thread_flag(), ... */ 7 #include <linux/kdebug.h> /* oops_begin/end, ... */ 8 #include <linux/module.h> /* search_exception_table */ 9 #include <linux/bootmem.h> /* max_low_pfn */ 10 #include <linux/kprobes.h> /* NOKPROBE_SYMBOL, ... */ 11 #include <linux/mmiotrace.h> /* kmmio_handler, ... */ 12 #include <linux/perf_event.h> /* perf_sw_event */ 13 #include <linux/hugetlb.h> /* hstate_index_to_shift */ 14 #include <linux/prefetch.h> /* prefetchw */ 15 #include <linux/context_tracking.h> /* exception_enter(), ... */ 16 #include <linux/uaccess.h> /* faulthandler_disabled() */ 17 18 #include <asm/traps.h> /* dotraplinkage, ... */ 19 #include <asm/pgalloc.h> /* pgd_*(), ... */ 20 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */ 21 #include <asm/fixmap.h> /* VSYSCALL_ADDR */ 22 #include <asm/vsyscall.h> /* emulate_vsyscall */ 23 24 #define CREATE_TRACE_POINTS 25 #include <asm/trace/exceptions.h> 26 27 /* 28 * Page fault error code bits: 29 * 30 * bit 0 == 0: no page found 1: protection fault 31 * bit 1 == 0: read access 1: write access 32 * bit 2 == 0: kernel-mode access 1: user-mode access 33 * bit 3 == 1: use of reserved bit detected 34 * bit 4 == 1: fault was an instruction fetch 35 */ 36 enum x86_pf_error_code { 37 38 PF_PROT = 1 << 0, 39 PF_WRITE = 1 << 1, 40 PF_USER = 1 << 2, 41 PF_RSVD = 1 << 3, 42 PF_INSTR = 1 << 4, 43 }; 44 45 /* 46 * Returns 0 if mmiotrace is disabled, or if the fault is not 47 * handled by mmiotrace: 48 */ 49 static nokprobe_inline int 50 kmmio_fault(struct pt_regs *regs, unsigned long addr) 51 { 52 if (unlikely(is_kmmio_active())) 53 if (kmmio_handler(regs, addr) == 1) 54 return -1; 55 return 0; 56 } 57 58 static nokprobe_inline int kprobes_fault(struct pt_regs *regs) 59 { 60 int ret = 0; 61 62 /* kprobe_running() needs smp_processor_id() */ 63 if (kprobes_built_in() && !user_mode(regs)) { 64 preempt_disable(); 65 if (kprobe_running() && kprobe_fault_handler(regs, 14)) 66 ret = 1; 67 preempt_enable(); 68 } 69 70 return ret; 71 } 72 73 /* 74 * Prefetch quirks: 75 * 76 * 32-bit mode: 77 * 78 * Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch. 79 * Check that here and ignore it. 80 * 81 * 64-bit mode: 82 * 83 * Sometimes the CPU reports invalid exceptions on prefetch. 84 * Check that here and ignore it. 85 * 86 * Opcode checker based on code by Richard Brunner. 87 */ 88 static inline int 89 check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr, 90 unsigned char opcode, int *prefetch) 91 { 92 unsigned char instr_hi = opcode & 0xf0; 93 unsigned char instr_lo = opcode & 0x0f; 94 95 switch (instr_hi) { 96 case 0x20: 97 case 0x30: 98 /* 99 * Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes. 100 * In X86_64 long mode, the CPU will signal invalid 101 * opcode if some of these prefixes are present so 102 * X86_64 will never get here anyway 103 */ 104 return ((instr_lo & 7) == 0x6); 105 #ifdef CONFIG_X86_64 106 case 0x40: 107 /* 108 * In AMD64 long mode 0x40..0x4F are valid REX prefixes 109 * Need to figure out under what instruction mode the 110 * instruction was issued. Could check the LDT for lm, 111 * but for now it's good enough to assume that long 112 * mode only uses well known segments or kernel. 113 */ 114 return (!user_mode(regs) || user_64bit_mode(regs)); 115 #endif 116 case 0x60: 117 /* 0x64 thru 0x67 are valid prefixes in all modes. */ 118 return (instr_lo & 0xC) == 0x4; 119 case 0xF0: 120 /* 0xF0, 0xF2, 0xF3 are valid prefixes in all modes. */ 121 return !instr_lo || (instr_lo>>1) == 1; 122 case 0x00: 123 /* Prefetch instruction is 0x0F0D or 0x0F18 */ 124 if (probe_kernel_address(instr, opcode)) 125 return 0; 126 127 *prefetch = (instr_lo == 0xF) && 128 (opcode == 0x0D || opcode == 0x18); 129 return 0; 130 default: 131 return 0; 132 } 133 } 134 135 static int 136 is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr) 137 { 138 unsigned char *max_instr; 139 unsigned char *instr; 140 int prefetch = 0; 141 142 /* 143 * If it was a exec (instruction fetch) fault on NX page, then 144 * do not ignore the fault: 145 */ 146 if (error_code & PF_INSTR) 147 return 0; 148 149 instr = (void *)convert_ip_to_linear(current, regs); 150 max_instr = instr + 15; 151 152 if (user_mode(regs) && instr >= (unsigned char *)TASK_SIZE_MAX) 153 return 0; 154 155 while (instr < max_instr) { 156 unsigned char opcode; 157 158 if (probe_kernel_address(instr, opcode)) 159 break; 160 161 instr++; 162 163 if (!check_prefetch_opcode(regs, instr, opcode, &prefetch)) 164 break; 165 } 166 return prefetch; 167 } 168 169 static void 170 force_sig_info_fault(int si_signo, int si_code, unsigned long address, 171 struct task_struct *tsk, int fault) 172 { 173 unsigned lsb = 0; 174 siginfo_t info; 175 176 info.si_signo = si_signo; 177 info.si_errno = 0; 178 info.si_code = si_code; 179 info.si_addr = (void __user *)address; 180 if (fault & VM_FAULT_HWPOISON_LARGE) 181 lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault)); 182 if (fault & VM_FAULT_HWPOISON) 183 lsb = PAGE_SHIFT; 184 info.si_addr_lsb = lsb; 185 186 force_sig_info(si_signo, &info, tsk); 187 } 188 189 DEFINE_SPINLOCK(pgd_lock); 190 LIST_HEAD(pgd_list); 191 192 #ifdef CONFIG_X86_32 193 static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address) 194 { 195 unsigned index = pgd_index(address); 196 pgd_t *pgd_k; 197 pud_t *pud, *pud_k; 198 pmd_t *pmd, *pmd_k; 199 200 pgd += index; 201 pgd_k = init_mm.pgd + index; 202 203 if (!pgd_present(*pgd_k)) 204 return NULL; 205 206 /* 207 * set_pgd(pgd, *pgd_k); here would be useless on PAE 208 * and redundant with the set_pmd() on non-PAE. As would 209 * set_pud. 210 */ 211 pud = pud_offset(pgd, address); 212 pud_k = pud_offset(pgd_k, address); 213 if (!pud_present(*pud_k)) 214 return NULL; 215 216 pmd = pmd_offset(pud, address); 217 pmd_k = pmd_offset(pud_k, address); 218 if (!pmd_present(*pmd_k)) 219 return NULL; 220 221 if (!pmd_present(*pmd)) 222 set_pmd(pmd, *pmd_k); 223 else 224 BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k)); 225 226 return pmd_k; 227 } 228 229 void vmalloc_sync_all(void) 230 { 231 unsigned long address; 232 233 if (SHARED_KERNEL_PMD) 234 return; 235 236 for (address = VMALLOC_START & PMD_MASK; 237 address >= TASK_SIZE && address < FIXADDR_TOP; 238 address += PMD_SIZE) { 239 struct page *page; 240 241 spin_lock(&pgd_lock); 242 list_for_each_entry(page, &pgd_list, lru) { 243 spinlock_t *pgt_lock; 244 pmd_t *ret; 245 246 /* the pgt_lock only for Xen */ 247 pgt_lock = &pgd_page_get_mm(page)->page_table_lock; 248 249 spin_lock(pgt_lock); 250 ret = vmalloc_sync_one(page_address(page), address); 251 spin_unlock(pgt_lock); 252 253 if (!ret) 254 break; 255 } 256 spin_unlock(&pgd_lock); 257 } 258 } 259 260 /* 261 * 32-bit: 262 * 263 * Handle a fault on the vmalloc or module mapping area 264 */ 265 static noinline int vmalloc_fault(unsigned long address) 266 { 267 unsigned long pgd_paddr; 268 pmd_t *pmd_k; 269 pte_t *pte_k; 270 271 /* Make sure we are in vmalloc area: */ 272 if (!(address >= VMALLOC_START && address < VMALLOC_END)) 273 return -1; 274 275 WARN_ON_ONCE(in_nmi()); 276 277 /* 278 * Synchronize this task's top level page-table 279 * with the 'reference' page table. 280 * 281 * Do _not_ use "current" here. We might be inside 282 * an interrupt in the middle of a task switch.. 283 */ 284 pgd_paddr = read_cr3(); 285 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address); 286 if (!pmd_k) 287 return -1; 288 289 pte_k = pte_offset_kernel(pmd_k, address); 290 if (!pte_present(*pte_k)) 291 return -1; 292 293 return 0; 294 } 295 NOKPROBE_SYMBOL(vmalloc_fault); 296 297 /* 298 * Did it hit the DOS screen memory VA from vm86 mode? 299 */ 300 static inline void 301 check_v8086_mode(struct pt_regs *regs, unsigned long address, 302 struct task_struct *tsk) 303 { 304 unsigned long bit; 305 306 if (!v8086_mode(regs)) 307 return; 308 309 bit = (address - 0xA0000) >> PAGE_SHIFT; 310 if (bit < 32) 311 tsk->thread.screen_bitmap |= 1 << bit; 312 } 313 314 static bool low_pfn(unsigned long pfn) 315 { 316 return pfn < max_low_pfn; 317 } 318 319 static void dump_pagetable(unsigned long address) 320 { 321 pgd_t *base = __va(read_cr3()); 322 pgd_t *pgd = &base[pgd_index(address)]; 323 pmd_t *pmd; 324 pte_t *pte; 325 326 #ifdef CONFIG_X86_PAE 327 printk("*pdpt = %016Lx ", pgd_val(*pgd)); 328 if (!low_pfn(pgd_val(*pgd) >> PAGE_SHIFT) || !pgd_present(*pgd)) 329 goto out; 330 #endif 331 pmd = pmd_offset(pud_offset(pgd, address), address); 332 printk(KERN_CONT "*pde = %0*Lx ", sizeof(*pmd) * 2, (u64)pmd_val(*pmd)); 333 334 /* 335 * We must not directly access the pte in the highpte 336 * case if the page table is located in highmem. 337 * And let's rather not kmap-atomic the pte, just in case 338 * it's allocated already: 339 */ 340 if (!low_pfn(pmd_pfn(*pmd)) || !pmd_present(*pmd) || pmd_large(*pmd)) 341 goto out; 342 343 pte = pte_offset_kernel(pmd, address); 344 printk("*pte = %0*Lx ", sizeof(*pte) * 2, (u64)pte_val(*pte)); 345 out: 346 printk("\n"); 347 } 348 349 #else /* CONFIG_X86_64: */ 350 351 void vmalloc_sync_all(void) 352 { 353 sync_global_pgds(VMALLOC_START & PGDIR_MASK, VMALLOC_END, 0); 354 } 355 356 /* 357 * 64-bit: 358 * 359 * Handle a fault on the vmalloc area 360 * 361 * This assumes no large pages in there. 362 */ 363 static noinline int vmalloc_fault(unsigned long address) 364 { 365 pgd_t *pgd, *pgd_ref; 366 pud_t *pud, *pud_ref; 367 pmd_t *pmd, *pmd_ref; 368 pte_t *pte, *pte_ref; 369 370 /* Make sure we are in vmalloc area: */ 371 if (!(address >= VMALLOC_START && address < VMALLOC_END)) 372 return -1; 373 374 WARN_ON_ONCE(in_nmi()); 375 376 /* 377 * Copy kernel mappings over when needed. This can also 378 * happen within a race in page table update. In the later 379 * case just flush: 380 */ 381 pgd = pgd_offset(current->active_mm, address); 382 pgd_ref = pgd_offset_k(address); 383 if (pgd_none(*pgd_ref)) 384 return -1; 385 386 if (pgd_none(*pgd)) { 387 set_pgd(pgd, *pgd_ref); 388 arch_flush_lazy_mmu_mode(); 389 } else { 390 BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref)); 391 } 392 393 /* 394 * Below here mismatches are bugs because these lower tables 395 * are shared: 396 */ 397 398 pud = pud_offset(pgd, address); 399 pud_ref = pud_offset(pgd_ref, address); 400 if (pud_none(*pud_ref)) 401 return -1; 402 403 if (pud_none(*pud) || pud_page_vaddr(*pud) != pud_page_vaddr(*pud_ref)) 404 BUG(); 405 406 pmd = pmd_offset(pud, address); 407 pmd_ref = pmd_offset(pud_ref, address); 408 if (pmd_none(*pmd_ref)) 409 return -1; 410 411 if (pmd_none(*pmd) || pmd_page(*pmd) != pmd_page(*pmd_ref)) 412 BUG(); 413 414 pte_ref = pte_offset_kernel(pmd_ref, address); 415 if (!pte_present(*pte_ref)) 416 return -1; 417 418 pte = pte_offset_kernel(pmd, address); 419 420 /* 421 * Don't use pte_page here, because the mappings can point 422 * outside mem_map, and the NUMA hash lookup cannot handle 423 * that: 424 */ 425 if (!pte_present(*pte) || pte_pfn(*pte) != pte_pfn(*pte_ref)) 426 BUG(); 427 428 return 0; 429 } 430 NOKPROBE_SYMBOL(vmalloc_fault); 431 432 #ifdef CONFIG_CPU_SUP_AMD 433 static const char errata93_warning[] = 434 KERN_ERR 435 "******* Your BIOS seems to not contain a fix for K8 errata #93\n" 436 "******* Working around it, but it may cause SEGVs or burn power.\n" 437 "******* Please consider a BIOS update.\n" 438 "******* Disabling USB legacy in the BIOS may also help.\n"; 439 #endif 440 441 /* 442 * No vm86 mode in 64-bit mode: 443 */ 444 static inline void 445 check_v8086_mode(struct pt_regs *regs, unsigned long address, 446 struct task_struct *tsk) 447 { 448 } 449 450 static int bad_address(void *p) 451 { 452 unsigned long dummy; 453 454 return probe_kernel_address((unsigned long *)p, dummy); 455 } 456 457 static void dump_pagetable(unsigned long address) 458 { 459 pgd_t *base = __va(read_cr3() & PHYSICAL_PAGE_MASK); 460 pgd_t *pgd = base + pgd_index(address); 461 pud_t *pud; 462 pmd_t *pmd; 463 pte_t *pte; 464 465 if (bad_address(pgd)) 466 goto bad; 467 468 printk("PGD %lx ", pgd_val(*pgd)); 469 470 if (!pgd_present(*pgd)) 471 goto out; 472 473 pud = pud_offset(pgd, address); 474 if (bad_address(pud)) 475 goto bad; 476 477 printk("PUD %lx ", pud_val(*pud)); 478 if (!pud_present(*pud) || pud_large(*pud)) 479 goto out; 480 481 pmd = pmd_offset(pud, address); 482 if (bad_address(pmd)) 483 goto bad; 484 485 printk("PMD %lx ", pmd_val(*pmd)); 486 if (!pmd_present(*pmd) || pmd_large(*pmd)) 487 goto out; 488 489 pte = pte_offset_kernel(pmd, address); 490 if (bad_address(pte)) 491 goto bad; 492 493 printk("PTE %lx", pte_val(*pte)); 494 out: 495 printk("\n"); 496 return; 497 bad: 498 printk("BAD\n"); 499 } 500 501 #endif /* CONFIG_X86_64 */ 502 503 /* 504 * Workaround for K8 erratum #93 & buggy BIOS. 505 * 506 * BIOS SMM functions are required to use a specific workaround 507 * to avoid corruption of the 64bit RIP register on C stepping K8. 508 * 509 * A lot of BIOS that didn't get tested properly miss this. 510 * 511 * The OS sees this as a page fault with the upper 32bits of RIP cleared. 512 * Try to work around it here. 513 * 514 * Note we only handle faults in kernel here. 515 * Does nothing on 32-bit. 516 */ 517 static int is_errata93(struct pt_regs *regs, unsigned long address) 518 { 519 #if defined(CONFIG_X86_64) && defined(CONFIG_CPU_SUP_AMD) 520 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD 521 || boot_cpu_data.x86 != 0xf) 522 return 0; 523 524 if (address != regs->ip) 525 return 0; 526 527 if ((address >> 32) != 0) 528 return 0; 529 530 address |= 0xffffffffUL << 32; 531 if ((address >= (u64)_stext && address <= (u64)_etext) || 532 (address >= MODULES_VADDR && address <= MODULES_END)) { 533 printk_once(errata93_warning); 534 regs->ip = address; 535 return 1; 536 } 537 #endif 538 return 0; 539 } 540 541 /* 542 * Work around K8 erratum #100 K8 in compat mode occasionally jumps 543 * to illegal addresses >4GB. 544 * 545 * We catch this in the page fault handler because these addresses 546 * are not reachable. Just detect this case and return. Any code 547 * segment in LDT is compatibility mode. 548 */ 549 static int is_errata100(struct pt_regs *regs, unsigned long address) 550 { 551 #ifdef CONFIG_X86_64 552 if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32)) 553 return 1; 554 #endif 555 return 0; 556 } 557 558 static int is_f00f_bug(struct pt_regs *regs, unsigned long address) 559 { 560 #ifdef CONFIG_X86_F00F_BUG 561 unsigned long nr; 562 563 /* 564 * Pentium F0 0F C7 C8 bug workaround: 565 */ 566 if (boot_cpu_has_bug(X86_BUG_F00F)) { 567 nr = (address - idt_descr.address) >> 3; 568 569 if (nr == 6) { 570 do_invalid_op(regs, 0); 571 return 1; 572 } 573 } 574 #endif 575 return 0; 576 } 577 578 static const char nx_warning[] = KERN_CRIT 579 "kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n"; 580 static const char smep_warning[] = KERN_CRIT 581 "unable to execute userspace code (SMEP?) (uid: %d)\n"; 582 583 static void 584 show_fault_oops(struct pt_regs *regs, unsigned long error_code, 585 unsigned long address) 586 { 587 if (!oops_may_print()) 588 return; 589 590 if (error_code & PF_INSTR) { 591 unsigned int level; 592 pgd_t *pgd; 593 pte_t *pte; 594 595 pgd = __va(read_cr3() & PHYSICAL_PAGE_MASK); 596 pgd += pgd_index(address); 597 598 pte = lookup_address_in_pgd(pgd, address, &level); 599 600 if (pte && pte_present(*pte) && !pte_exec(*pte)) 601 printk(nx_warning, from_kuid(&init_user_ns, current_uid())); 602 if (pte && pte_present(*pte) && pte_exec(*pte) && 603 (pgd_flags(*pgd) & _PAGE_USER) && 604 (__read_cr4() & X86_CR4_SMEP)) 605 printk(smep_warning, from_kuid(&init_user_ns, current_uid())); 606 } 607 608 printk(KERN_ALERT "BUG: unable to handle kernel "); 609 if (address < PAGE_SIZE) 610 printk(KERN_CONT "NULL pointer dereference"); 611 else 612 printk(KERN_CONT "paging request"); 613 614 printk(KERN_CONT " at %p\n", (void *) address); 615 printk(KERN_ALERT "IP:"); 616 printk_address(regs->ip); 617 618 dump_pagetable(address); 619 } 620 621 static noinline void 622 pgtable_bad(struct pt_regs *regs, unsigned long error_code, 623 unsigned long address) 624 { 625 struct task_struct *tsk; 626 unsigned long flags; 627 int sig; 628 629 flags = oops_begin(); 630 tsk = current; 631 sig = SIGKILL; 632 633 printk(KERN_ALERT "%s: Corrupted page table at address %lx\n", 634 tsk->comm, address); 635 dump_pagetable(address); 636 637 tsk->thread.cr2 = address; 638 tsk->thread.trap_nr = X86_TRAP_PF; 639 tsk->thread.error_code = error_code; 640 641 if (__die("Bad pagetable", regs, error_code)) 642 sig = 0; 643 644 oops_end(flags, regs, sig); 645 } 646 647 static noinline void 648 no_context(struct pt_regs *regs, unsigned long error_code, 649 unsigned long address, int signal, int si_code) 650 { 651 struct task_struct *tsk = current; 652 unsigned long flags; 653 int sig; 654 655 /* Are we prepared to handle this kernel fault? */ 656 if (fixup_exception(regs)) { 657 /* 658 * Any interrupt that takes a fault gets the fixup. This makes 659 * the below recursive fault logic only apply to a faults from 660 * task context. 661 */ 662 if (in_interrupt()) 663 return; 664 665 /* 666 * Per the above we're !in_interrupt(), aka. task context. 667 * 668 * In this case we need to make sure we're not recursively 669 * faulting through the emulate_vsyscall() logic. 670 */ 671 if (current_thread_info()->sig_on_uaccess_error && signal) { 672 tsk->thread.trap_nr = X86_TRAP_PF; 673 tsk->thread.error_code = error_code | PF_USER; 674 tsk->thread.cr2 = address; 675 676 /* XXX: hwpoison faults will set the wrong code. */ 677 force_sig_info_fault(signal, si_code, address, tsk, 0); 678 } 679 680 /* 681 * Barring that, we can do the fixup and be happy. 682 */ 683 return; 684 } 685 686 /* 687 * 32-bit: 688 * 689 * Valid to do another page fault here, because if this fault 690 * had been triggered by is_prefetch fixup_exception would have 691 * handled it. 692 * 693 * 64-bit: 694 * 695 * Hall of shame of CPU/BIOS bugs. 696 */ 697 if (is_prefetch(regs, error_code, address)) 698 return; 699 700 if (is_errata93(regs, address)) 701 return; 702 703 /* 704 * Oops. The kernel tried to access some bad page. We'll have to 705 * terminate things with extreme prejudice: 706 */ 707 flags = oops_begin(); 708 709 show_fault_oops(regs, error_code, address); 710 711 if (task_stack_end_corrupted(tsk)) 712 printk(KERN_EMERG "Thread overran stack, or stack corrupted\n"); 713 714 tsk->thread.cr2 = address; 715 tsk->thread.trap_nr = X86_TRAP_PF; 716 tsk->thread.error_code = error_code; 717 718 sig = SIGKILL; 719 if (__die("Oops", regs, error_code)) 720 sig = 0; 721 722 /* Executive summary in case the body of the oops scrolled away */ 723 printk(KERN_DEFAULT "CR2: %016lx\n", address); 724 725 oops_end(flags, regs, sig); 726 } 727 728 /* 729 * Print out info about fatal segfaults, if the show_unhandled_signals 730 * sysctl is set: 731 */ 732 static inline void 733 show_signal_msg(struct pt_regs *regs, unsigned long error_code, 734 unsigned long address, struct task_struct *tsk) 735 { 736 if (!unhandled_signal(tsk, SIGSEGV)) 737 return; 738 739 if (!printk_ratelimit()) 740 return; 741 742 printk("%s%s[%d]: segfault at %lx ip %p sp %p error %lx", 743 task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG, 744 tsk->comm, task_pid_nr(tsk), address, 745 (void *)regs->ip, (void *)regs->sp, error_code); 746 747 print_vma_addr(KERN_CONT " in ", regs->ip); 748 749 printk(KERN_CONT "\n"); 750 } 751 752 static void 753 __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, 754 unsigned long address, int si_code) 755 { 756 struct task_struct *tsk = current; 757 758 /* User mode accesses just cause a SIGSEGV */ 759 if (error_code & PF_USER) { 760 /* 761 * It's possible to have interrupts off here: 762 */ 763 local_irq_enable(); 764 765 /* 766 * Valid to do another page fault here because this one came 767 * from user space: 768 */ 769 if (is_prefetch(regs, error_code, address)) 770 return; 771 772 if (is_errata100(regs, address)) 773 return; 774 775 #ifdef CONFIG_X86_64 776 /* 777 * Instruction fetch faults in the vsyscall page might need 778 * emulation. 779 */ 780 if (unlikely((error_code & PF_INSTR) && 781 ((address & ~0xfff) == VSYSCALL_ADDR))) { 782 if (emulate_vsyscall(regs, address)) 783 return; 784 } 785 #endif 786 /* Kernel addresses are always protection faults: */ 787 if (address >= TASK_SIZE) 788 error_code |= PF_PROT; 789 790 if (likely(show_unhandled_signals)) 791 show_signal_msg(regs, error_code, address, tsk); 792 793 tsk->thread.cr2 = address; 794 tsk->thread.error_code = error_code; 795 tsk->thread.trap_nr = X86_TRAP_PF; 796 797 force_sig_info_fault(SIGSEGV, si_code, address, tsk, 0); 798 799 return; 800 } 801 802 if (is_f00f_bug(regs, address)) 803 return; 804 805 no_context(regs, error_code, address, SIGSEGV, si_code); 806 } 807 808 static noinline void 809 bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, 810 unsigned long address) 811 { 812 __bad_area_nosemaphore(regs, error_code, address, SEGV_MAPERR); 813 } 814 815 static void 816 __bad_area(struct pt_regs *regs, unsigned long error_code, 817 unsigned long address, int si_code) 818 { 819 struct mm_struct *mm = current->mm; 820 821 /* 822 * Something tried to access memory that isn't in our memory map.. 823 * Fix it, but check if it's kernel or user first.. 824 */ 825 up_read(&mm->mmap_sem); 826 827 __bad_area_nosemaphore(regs, error_code, address, si_code); 828 } 829 830 static noinline void 831 bad_area(struct pt_regs *regs, unsigned long error_code, unsigned long address) 832 { 833 __bad_area(regs, error_code, address, SEGV_MAPERR); 834 } 835 836 static noinline void 837 bad_area_access_error(struct pt_regs *regs, unsigned long error_code, 838 unsigned long address) 839 { 840 __bad_area(regs, error_code, address, SEGV_ACCERR); 841 } 842 843 static void 844 do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address, 845 unsigned int fault) 846 { 847 struct task_struct *tsk = current; 848 int code = BUS_ADRERR; 849 850 /* Kernel mode? Handle exceptions or die: */ 851 if (!(error_code & PF_USER)) { 852 no_context(regs, error_code, address, SIGBUS, BUS_ADRERR); 853 return; 854 } 855 856 /* User-space => ok to do another page fault: */ 857 if (is_prefetch(regs, error_code, address)) 858 return; 859 860 tsk->thread.cr2 = address; 861 tsk->thread.error_code = error_code; 862 tsk->thread.trap_nr = X86_TRAP_PF; 863 864 #ifdef CONFIG_MEMORY_FAILURE 865 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) { 866 printk(KERN_ERR 867 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n", 868 tsk->comm, tsk->pid, address); 869 code = BUS_MCEERR_AR; 870 } 871 #endif 872 force_sig_info_fault(SIGBUS, code, address, tsk, fault); 873 } 874 875 static noinline void 876 mm_fault_error(struct pt_regs *regs, unsigned long error_code, 877 unsigned long address, unsigned int fault) 878 { 879 if (fatal_signal_pending(current) && !(error_code & PF_USER)) { 880 no_context(regs, error_code, address, 0, 0); 881 return; 882 } 883 884 if (fault & VM_FAULT_OOM) { 885 /* Kernel mode? Handle exceptions or die: */ 886 if (!(error_code & PF_USER)) { 887 no_context(regs, error_code, address, 888 SIGSEGV, SEGV_MAPERR); 889 return; 890 } 891 892 /* 893 * We ran out of memory, call the OOM killer, and return the 894 * userspace (which will retry the fault, or kill us if we got 895 * oom-killed): 896 */ 897 pagefault_out_of_memory(); 898 } else { 899 if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON| 900 VM_FAULT_HWPOISON_LARGE)) 901 do_sigbus(regs, error_code, address, fault); 902 else if (fault & VM_FAULT_SIGSEGV) 903 bad_area_nosemaphore(regs, error_code, address); 904 else 905 BUG(); 906 } 907 } 908 909 static int spurious_fault_check(unsigned long error_code, pte_t *pte) 910 { 911 if ((error_code & PF_WRITE) && !pte_write(*pte)) 912 return 0; 913 914 if ((error_code & PF_INSTR) && !pte_exec(*pte)) 915 return 0; 916 917 return 1; 918 } 919 920 /* 921 * Handle a spurious fault caused by a stale TLB entry. 922 * 923 * This allows us to lazily refresh the TLB when increasing the 924 * permissions of a kernel page (RO -> RW or NX -> X). Doing it 925 * eagerly is very expensive since that implies doing a full 926 * cross-processor TLB flush, even if no stale TLB entries exist 927 * on other processors. 928 * 929 * Spurious faults may only occur if the TLB contains an entry with 930 * fewer permission than the page table entry. Non-present (P = 0) 931 * and reserved bit (R = 1) faults are never spurious. 932 * 933 * There are no security implications to leaving a stale TLB when 934 * increasing the permissions on a page. 935 * 936 * Returns non-zero if a spurious fault was handled, zero otherwise. 937 * 938 * See Intel Developer's Manual Vol 3 Section 4.10.4.3, bullet 3 939 * (Optional Invalidation). 940 */ 941 static noinline int 942 spurious_fault(unsigned long error_code, unsigned long address) 943 { 944 pgd_t *pgd; 945 pud_t *pud; 946 pmd_t *pmd; 947 pte_t *pte; 948 int ret; 949 950 /* 951 * Only writes to RO or instruction fetches from NX may cause 952 * spurious faults. 953 * 954 * These could be from user or supervisor accesses but the TLB 955 * is only lazily flushed after a kernel mapping protection 956 * change, so user accesses are not expected to cause spurious 957 * faults. 958 */ 959 if (error_code != (PF_WRITE | PF_PROT) 960 && error_code != (PF_INSTR | PF_PROT)) 961 return 0; 962 963 pgd = init_mm.pgd + pgd_index(address); 964 if (!pgd_present(*pgd)) 965 return 0; 966 967 pud = pud_offset(pgd, address); 968 if (!pud_present(*pud)) 969 return 0; 970 971 if (pud_large(*pud)) 972 return spurious_fault_check(error_code, (pte_t *) pud); 973 974 pmd = pmd_offset(pud, address); 975 if (!pmd_present(*pmd)) 976 return 0; 977 978 if (pmd_large(*pmd)) 979 return spurious_fault_check(error_code, (pte_t *) pmd); 980 981 pte = pte_offset_kernel(pmd, address); 982 if (!pte_present(*pte)) 983 return 0; 984 985 ret = spurious_fault_check(error_code, pte); 986 if (!ret) 987 return 0; 988 989 /* 990 * Make sure we have permissions in PMD. 991 * If not, then there's a bug in the page tables: 992 */ 993 ret = spurious_fault_check(error_code, (pte_t *) pmd); 994 WARN_ONCE(!ret, "PMD has incorrect permission bits\n"); 995 996 return ret; 997 } 998 NOKPROBE_SYMBOL(spurious_fault); 999 1000 int show_unhandled_signals = 1; 1001 1002 static inline int 1003 access_error(unsigned long error_code, struct vm_area_struct *vma) 1004 { 1005 if (error_code & PF_WRITE) { 1006 /* write, present and write, not present: */ 1007 if (unlikely(!(vma->vm_flags & VM_WRITE))) 1008 return 1; 1009 return 0; 1010 } 1011 1012 /* read, present: */ 1013 if (unlikely(error_code & PF_PROT)) 1014 return 1; 1015 1016 /* read, not present: */ 1017 if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))) 1018 return 1; 1019 1020 return 0; 1021 } 1022 1023 static int fault_in_kernel_space(unsigned long address) 1024 { 1025 return address >= TASK_SIZE_MAX; 1026 } 1027 1028 static inline bool smap_violation(int error_code, struct pt_regs *regs) 1029 { 1030 if (!IS_ENABLED(CONFIG_X86_SMAP)) 1031 return false; 1032 1033 if (!static_cpu_has(X86_FEATURE_SMAP)) 1034 return false; 1035 1036 if (error_code & PF_USER) 1037 return false; 1038 1039 if (!user_mode(regs) && (regs->flags & X86_EFLAGS_AC)) 1040 return false; 1041 1042 return true; 1043 } 1044 1045 /* 1046 * This routine handles page faults. It determines the address, 1047 * and the problem, and then passes it off to one of the appropriate 1048 * routines. 1049 * 1050 * This function must have noinline because both callers 1051 * {,trace_}do_page_fault() have notrace on. Having this an actual function 1052 * guarantees there's a function trace entry. 1053 */ 1054 static noinline void 1055 __do_page_fault(struct pt_regs *regs, unsigned long error_code, 1056 unsigned long address) 1057 { 1058 struct vm_area_struct *vma; 1059 struct task_struct *tsk; 1060 struct mm_struct *mm; 1061 int fault, major = 0; 1062 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; 1063 1064 tsk = current; 1065 mm = tsk->mm; 1066 1067 /* 1068 * Detect and handle instructions that would cause a page fault for 1069 * both a tracked kernel page and a userspace page. 1070 */ 1071 if (kmemcheck_active(regs)) 1072 kmemcheck_hide(regs); 1073 prefetchw(&mm->mmap_sem); 1074 1075 if (unlikely(kmmio_fault(regs, address))) 1076 return; 1077 1078 /* 1079 * We fault-in kernel-space virtual memory on-demand. The 1080 * 'reference' page table is init_mm.pgd. 1081 * 1082 * NOTE! We MUST NOT take any locks for this case. We may 1083 * be in an interrupt or a critical region, and should 1084 * only copy the information from the master page table, 1085 * nothing more. 1086 * 1087 * This verifies that the fault happens in kernel space 1088 * (error_code & 4) == 0, and that the fault was not a 1089 * protection error (error_code & 9) == 0. 1090 */ 1091 if (unlikely(fault_in_kernel_space(address))) { 1092 if (!(error_code & (PF_RSVD | PF_USER | PF_PROT))) { 1093 if (vmalloc_fault(address) >= 0) 1094 return; 1095 1096 if (kmemcheck_fault(regs, address, error_code)) 1097 return; 1098 } 1099 1100 /* Can handle a stale RO->RW TLB: */ 1101 if (spurious_fault(error_code, address)) 1102 return; 1103 1104 /* kprobes don't want to hook the spurious faults: */ 1105 if (kprobes_fault(regs)) 1106 return; 1107 /* 1108 * Don't take the mm semaphore here. If we fixup a prefetch 1109 * fault we could otherwise deadlock: 1110 */ 1111 bad_area_nosemaphore(regs, error_code, address); 1112 1113 return; 1114 } 1115 1116 /* kprobes don't want to hook the spurious faults: */ 1117 if (unlikely(kprobes_fault(regs))) 1118 return; 1119 1120 if (unlikely(error_code & PF_RSVD)) 1121 pgtable_bad(regs, error_code, address); 1122 1123 if (unlikely(smap_violation(error_code, regs))) { 1124 bad_area_nosemaphore(regs, error_code, address); 1125 return; 1126 } 1127 1128 /* 1129 * If we're in an interrupt, have no user context or are running 1130 * in a region with pagefaults disabled then we must not take the fault 1131 */ 1132 if (unlikely(faulthandler_disabled() || !mm)) { 1133 bad_area_nosemaphore(regs, error_code, address); 1134 return; 1135 } 1136 1137 /* 1138 * It's safe to allow irq's after cr2 has been saved and the 1139 * vmalloc fault has been handled. 1140 * 1141 * User-mode registers count as a user access even for any 1142 * potential system fault or CPU buglet: 1143 */ 1144 if (user_mode(regs)) { 1145 local_irq_enable(); 1146 error_code |= PF_USER; 1147 flags |= FAULT_FLAG_USER; 1148 } else { 1149 if (regs->flags & X86_EFLAGS_IF) 1150 local_irq_enable(); 1151 } 1152 1153 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); 1154 1155 if (error_code & PF_WRITE) 1156 flags |= FAULT_FLAG_WRITE; 1157 1158 /* 1159 * When running in the kernel we expect faults to occur only to 1160 * addresses in user space. All other faults represent errors in 1161 * the kernel and should generate an OOPS. Unfortunately, in the 1162 * case of an erroneous fault occurring in a code path which already 1163 * holds mmap_sem we will deadlock attempting to validate the fault 1164 * against the address space. Luckily the kernel only validly 1165 * references user space from well defined areas of code, which are 1166 * listed in the exceptions table. 1167 * 1168 * As the vast majority of faults will be valid we will only perform 1169 * the source reference check when there is a possibility of a 1170 * deadlock. Attempt to lock the address space, if we cannot we then 1171 * validate the source. If this is invalid we can skip the address 1172 * space check, thus avoiding the deadlock: 1173 */ 1174 if (unlikely(!down_read_trylock(&mm->mmap_sem))) { 1175 if ((error_code & PF_USER) == 0 && 1176 !search_exception_tables(regs->ip)) { 1177 bad_area_nosemaphore(regs, error_code, address); 1178 return; 1179 } 1180 retry: 1181 down_read(&mm->mmap_sem); 1182 } else { 1183 /* 1184 * The above down_read_trylock() might have succeeded in 1185 * which case we'll have missed the might_sleep() from 1186 * down_read(): 1187 */ 1188 might_sleep(); 1189 } 1190 1191 vma = find_vma(mm, address); 1192 if (unlikely(!vma)) { 1193 bad_area(regs, error_code, address); 1194 return; 1195 } 1196 if (likely(vma->vm_start <= address)) 1197 goto good_area; 1198 if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) { 1199 bad_area(regs, error_code, address); 1200 return; 1201 } 1202 if (error_code & PF_USER) { 1203 /* 1204 * Accessing the stack below %sp is always a bug. 1205 * The large cushion allows instructions like enter 1206 * and pusha to work. ("enter $65535, $31" pushes 1207 * 32 pointers and then decrements %sp by 65535.) 1208 */ 1209 if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) { 1210 bad_area(regs, error_code, address); 1211 return; 1212 } 1213 } 1214 if (unlikely(expand_stack(vma, address))) { 1215 bad_area(regs, error_code, address); 1216 return; 1217 } 1218 1219 /* 1220 * Ok, we have a good vm_area for this memory access, so 1221 * we can handle it.. 1222 */ 1223 good_area: 1224 if (unlikely(access_error(error_code, vma))) { 1225 bad_area_access_error(regs, error_code, address); 1226 return; 1227 } 1228 1229 /* 1230 * If for any reason at all we couldn't handle the fault, 1231 * make sure we exit gracefully rather than endlessly redo 1232 * the fault. Since we never set FAULT_FLAG_RETRY_NOWAIT, if 1233 * we get VM_FAULT_RETRY back, the mmap_sem has been unlocked. 1234 */ 1235 fault = handle_mm_fault(mm, vma, address, flags); 1236 major |= fault & VM_FAULT_MAJOR; 1237 1238 /* 1239 * If we need to retry the mmap_sem has already been released, 1240 * and if there is a fatal signal pending there is no guarantee 1241 * that we made any progress. Handle this case first. 1242 */ 1243 if (unlikely(fault & VM_FAULT_RETRY)) { 1244 /* Retry at most once */ 1245 if (flags & FAULT_FLAG_ALLOW_RETRY) { 1246 flags &= ~FAULT_FLAG_ALLOW_RETRY; 1247 flags |= FAULT_FLAG_TRIED; 1248 if (!fatal_signal_pending(tsk)) 1249 goto retry; 1250 } 1251 1252 /* User mode? Just return to handle the fatal exception */ 1253 if (flags & FAULT_FLAG_USER) 1254 return; 1255 1256 /* Not returning to user mode? Handle exceptions or die: */ 1257 no_context(regs, error_code, address, SIGBUS, BUS_ADRERR); 1258 return; 1259 } 1260 1261 up_read(&mm->mmap_sem); 1262 if (unlikely(fault & VM_FAULT_ERROR)) { 1263 mm_fault_error(regs, error_code, address, fault); 1264 return; 1265 } 1266 1267 /* 1268 * Major/minor page fault accounting. If any of the events 1269 * returned VM_FAULT_MAJOR, we account it as a major fault. 1270 */ 1271 if (major) { 1272 tsk->maj_flt++; 1273 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address); 1274 } else { 1275 tsk->min_flt++; 1276 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address); 1277 } 1278 1279 check_v8086_mode(regs, address, tsk); 1280 } 1281 NOKPROBE_SYMBOL(__do_page_fault); 1282 1283 dotraplinkage void notrace 1284 do_page_fault(struct pt_regs *regs, unsigned long error_code) 1285 { 1286 unsigned long address = read_cr2(); /* Get the faulting address */ 1287 enum ctx_state prev_state; 1288 1289 /* 1290 * We must have this function tagged with __kprobes, notrace and call 1291 * read_cr2() before calling anything else. To avoid calling any kind 1292 * of tracing machinery before we've observed the CR2 value. 1293 * 1294 * exception_{enter,exit}() contain all sorts of tracepoints. 1295 */ 1296 1297 prev_state = exception_enter(); 1298 __do_page_fault(regs, error_code, address); 1299 exception_exit(prev_state); 1300 } 1301 NOKPROBE_SYMBOL(do_page_fault); 1302 1303 #ifdef CONFIG_TRACING 1304 static nokprobe_inline void 1305 trace_page_fault_entries(unsigned long address, struct pt_regs *regs, 1306 unsigned long error_code) 1307 { 1308 if (user_mode(regs)) 1309 trace_page_fault_user(address, regs, error_code); 1310 else 1311 trace_page_fault_kernel(address, regs, error_code); 1312 } 1313 1314 dotraplinkage void notrace 1315 trace_do_page_fault(struct pt_regs *regs, unsigned long error_code) 1316 { 1317 /* 1318 * The exception_enter and tracepoint processing could 1319 * trigger another page faults (user space callchain 1320 * reading) and destroy the original cr2 value, so read 1321 * the faulting address now. 1322 */ 1323 unsigned long address = read_cr2(); 1324 enum ctx_state prev_state; 1325 1326 prev_state = exception_enter(); 1327 trace_page_fault_entries(address, regs, error_code); 1328 __do_page_fault(regs, error_code, address); 1329 exception_exit(prev_state); 1330 } 1331 NOKPROBE_SYMBOL(trace_do_page_fault); 1332 #endif /* CONFIG_TRACING */ 1333