1 /* 2 * Copyright (C) 1995 Linus Torvalds 3 * Copyright (C) 2001, 2002 Andi Kleen, SuSE Labs. 4 * Copyright (C) 2008-2009, Red Hat Inc., Ingo Molnar 5 */ 6 #include <linux/magic.h> /* STACK_END_MAGIC */ 7 #include <linux/sched.h> /* test_thread_flag(), ... */ 8 #include <linux/kdebug.h> /* oops_begin/end, ... */ 9 #include <linux/module.h> /* search_exception_table */ 10 #include <linux/bootmem.h> /* max_low_pfn */ 11 #include <linux/kprobes.h> /* __kprobes, ... */ 12 #include <linux/mmiotrace.h> /* kmmio_handler, ... */ 13 #include <linux/perf_event.h> /* perf_sw_event */ 14 #include <linux/hugetlb.h> /* hstate_index_to_shift */ 15 #include <linux/prefetch.h> /* prefetchw */ 16 #include <linux/context_tracking.h> /* exception_enter(), ... */ 17 18 #include <asm/traps.h> /* dotraplinkage, ... */ 19 #include <asm/pgalloc.h> /* pgd_*(), ... */ 20 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */ 21 #include <asm/fixmap.h> /* VSYSCALL_START */ 22 23 #define CREATE_TRACE_POINTS 24 #include <asm/trace/exceptions.h> 25 26 /* 27 * Page fault error code bits: 28 * 29 * bit 0 == 0: no page found 1: protection fault 30 * bit 1 == 0: read access 1: write access 31 * bit 2 == 0: kernel-mode access 1: user-mode access 32 * bit 3 == 1: use of reserved bit detected 33 * bit 4 == 1: fault was an instruction fetch 34 */ 35 enum x86_pf_error_code { 36 37 PF_PROT = 1 << 0, 38 PF_WRITE = 1 << 1, 39 PF_USER = 1 << 2, 40 PF_RSVD = 1 << 3, 41 PF_INSTR = 1 << 4, 42 }; 43 44 /* 45 * Returns 0 if mmiotrace is disabled, or if the fault is not 46 * handled by mmiotrace: 47 */ 48 static inline int __kprobes 49 kmmio_fault(struct pt_regs *regs, unsigned long addr) 50 { 51 if (unlikely(is_kmmio_active())) 52 if (kmmio_handler(regs, addr) == 1) 53 return -1; 54 return 0; 55 } 56 57 static inline int __kprobes kprobes_fault(struct pt_regs *regs) 58 { 59 int ret = 0; 60 61 /* kprobe_running() needs smp_processor_id() */ 62 if (kprobes_built_in() && !user_mode_vm(regs)) { 63 preempt_disable(); 64 if (kprobe_running() && kprobe_fault_handler(regs, 14)) 65 ret = 1; 66 preempt_enable(); 67 } 68 69 return ret; 70 } 71 72 /* 73 * Prefetch quirks: 74 * 75 * 32-bit mode: 76 * 77 * Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch. 78 * Check that here and ignore it. 79 * 80 * 64-bit mode: 81 * 82 * Sometimes the CPU reports invalid exceptions on prefetch. 83 * Check that here and ignore it. 84 * 85 * Opcode checker based on code by Richard Brunner. 86 */ 87 static inline int 88 check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr, 89 unsigned char opcode, int *prefetch) 90 { 91 unsigned char instr_hi = opcode & 0xf0; 92 unsigned char instr_lo = opcode & 0x0f; 93 94 switch (instr_hi) { 95 case 0x20: 96 case 0x30: 97 /* 98 * Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes. 99 * In X86_64 long mode, the CPU will signal invalid 100 * opcode if some of these prefixes are present so 101 * X86_64 will never get here anyway 102 */ 103 return ((instr_lo & 7) == 0x6); 104 #ifdef CONFIG_X86_64 105 case 0x40: 106 /* 107 * In AMD64 long mode 0x40..0x4F are valid REX prefixes 108 * Need to figure out under what instruction mode the 109 * instruction was issued. Could check the LDT for lm, 110 * but for now it's good enough to assume that long 111 * mode only uses well known segments or kernel. 112 */ 113 return (!user_mode(regs) || user_64bit_mode(regs)); 114 #endif 115 case 0x60: 116 /* 0x64 thru 0x67 are valid prefixes in all modes. */ 117 return (instr_lo & 0xC) == 0x4; 118 case 0xF0: 119 /* 0xF0, 0xF2, 0xF3 are valid prefixes in all modes. */ 120 return !instr_lo || (instr_lo>>1) == 1; 121 case 0x00: 122 /* Prefetch instruction is 0x0F0D or 0x0F18 */ 123 if (probe_kernel_address(instr, opcode)) 124 return 0; 125 126 *prefetch = (instr_lo == 0xF) && 127 (opcode == 0x0D || opcode == 0x18); 128 return 0; 129 default: 130 return 0; 131 } 132 } 133 134 static int 135 is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr) 136 { 137 unsigned char *max_instr; 138 unsigned char *instr; 139 int prefetch = 0; 140 141 /* 142 * If it was a exec (instruction fetch) fault on NX page, then 143 * do not ignore the fault: 144 */ 145 if (error_code & PF_INSTR) 146 return 0; 147 148 instr = (void *)convert_ip_to_linear(current, regs); 149 max_instr = instr + 15; 150 151 if (user_mode(regs) && instr >= (unsigned char *)TASK_SIZE) 152 return 0; 153 154 while (instr < max_instr) { 155 unsigned char opcode; 156 157 if (probe_kernel_address(instr, opcode)) 158 break; 159 160 instr++; 161 162 if (!check_prefetch_opcode(regs, instr, opcode, &prefetch)) 163 break; 164 } 165 return prefetch; 166 } 167 168 static void 169 force_sig_info_fault(int si_signo, int si_code, unsigned long address, 170 struct task_struct *tsk, int fault) 171 { 172 unsigned lsb = 0; 173 siginfo_t info; 174 175 info.si_signo = si_signo; 176 info.si_errno = 0; 177 info.si_code = si_code; 178 info.si_addr = (void __user *)address; 179 if (fault & VM_FAULT_HWPOISON_LARGE) 180 lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault)); 181 if (fault & VM_FAULT_HWPOISON) 182 lsb = PAGE_SHIFT; 183 info.si_addr_lsb = lsb; 184 185 force_sig_info(si_signo, &info, tsk); 186 } 187 188 DEFINE_SPINLOCK(pgd_lock); 189 LIST_HEAD(pgd_list); 190 191 #ifdef CONFIG_X86_32 192 static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address) 193 { 194 unsigned index = pgd_index(address); 195 pgd_t *pgd_k; 196 pud_t *pud, *pud_k; 197 pmd_t *pmd, *pmd_k; 198 199 pgd += index; 200 pgd_k = init_mm.pgd + index; 201 202 if (!pgd_present(*pgd_k)) 203 return NULL; 204 205 /* 206 * set_pgd(pgd, *pgd_k); here would be useless on PAE 207 * and redundant with the set_pmd() on non-PAE. As would 208 * set_pud. 209 */ 210 pud = pud_offset(pgd, address); 211 pud_k = pud_offset(pgd_k, address); 212 if (!pud_present(*pud_k)) 213 return NULL; 214 215 pmd = pmd_offset(pud, address); 216 pmd_k = pmd_offset(pud_k, address); 217 if (!pmd_present(*pmd_k)) 218 return NULL; 219 220 if (!pmd_present(*pmd)) 221 set_pmd(pmd, *pmd_k); 222 else 223 BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k)); 224 225 return pmd_k; 226 } 227 228 void vmalloc_sync_all(void) 229 { 230 unsigned long address; 231 232 if (SHARED_KERNEL_PMD) 233 return; 234 235 for (address = VMALLOC_START & PMD_MASK; 236 address >= TASK_SIZE && address < FIXADDR_TOP; 237 address += PMD_SIZE) { 238 struct page *page; 239 240 spin_lock(&pgd_lock); 241 list_for_each_entry(page, &pgd_list, lru) { 242 spinlock_t *pgt_lock; 243 pmd_t *ret; 244 245 /* the pgt_lock only for Xen */ 246 pgt_lock = &pgd_page_get_mm(page)->page_table_lock; 247 248 spin_lock(pgt_lock); 249 ret = vmalloc_sync_one(page_address(page), address); 250 spin_unlock(pgt_lock); 251 252 if (!ret) 253 break; 254 } 255 spin_unlock(&pgd_lock); 256 } 257 } 258 259 /* 260 * 32-bit: 261 * 262 * Handle a fault on the vmalloc or module mapping area 263 */ 264 static noinline __kprobes int vmalloc_fault(unsigned long address) 265 { 266 unsigned long pgd_paddr; 267 pmd_t *pmd_k; 268 pte_t *pte_k; 269 270 /* Make sure we are in vmalloc area: */ 271 if (!(address >= VMALLOC_START && address < VMALLOC_END)) 272 return -1; 273 274 WARN_ON_ONCE(in_nmi()); 275 276 /* 277 * Synchronize this task's top level page-table 278 * with the 'reference' page table. 279 * 280 * Do _not_ use "current" here. We might be inside 281 * an interrupt in the middle of a task switch.. 282 */ 283 pgd_paddr = read_cr3(); 284 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address); 285 if (!pmd_k) 286 return -1; 287 288 pte_k = pte_offset_kernel(pmd_k, address); 289 if (!pte_present(*pte_k)) 290 return -1; 291 292 return 0; 293 } 294 295 /* 296 * Did it hit the DOS screen memory VA from vm86 mode? 297 */ 298 static inline void 299 check_v8086_mode(struct pt_regs *regs, unsigned long address, 300 struct task_struct *tsk) 301 { 302 unsigned long bit; 303 304 if (!v8086_mode(regs)) 305 return; 306 307 bit = (address - 0xA0000) >> PAGE_SHIFT; 308 if (bit < 32) 309 tsk->thread.screen_bitmap |= 1 << bit; 310 } 311 312 static bool low_pfn(unsigned long pfn) 313 { 314 return pfn < max_low_pfn; 315 } 316 317 static void dump_pagetable(unsigned long address) 318 { 319 pgd_t *base = __va(read_cr3()); 320 pgd_t *pgd = &base[pgd_index(address)]; 321 pmd_t *pmd; 322 pte_t *pte; 323 324 #ifdef CONFIG_X86_PAE 325 printk("*pdpt = %016Lx ", pgd_val(*pgd)); 326 if (!low_pfn(pgd_val(*pgd) >> PAGE_SHIFT) || !pgd_present(*pgd)) 327 goto out; 328 #endif 329 pmd = pmd_offset(pud_offset(pgd, address), address); 330 printk(KERN_CONT "*pde = %0*Lx ", sizeof(*pmd) * 2, (u64)pmd_val(*pmd)); 331 332 /* 333 * We must not directly access the pte in the highpte 334 * case if the page table is located in highmem. 335 * And let's rather not kmap-atomic the pte, just in case 336 * it's allocated already: 337 */ 338 if (!low_pfn(pmd_pfn(*pmd)) || !pmd_present(*pmd) || pmd_large(*pmd)) 339 goto out; 340 341 pte = pte_offset_kernel(pmd, address); 342 printk("*pte = %0*Lx ", sizeof(*pte) * 2, (u64)pte_val(*pte)); 343 out: 344 printk("\n"); 345 } 346 347 #else /* CONFIG_X86_64: */ 348 349 void vmalloc_sync_all(void) 350 { 351 sync_global_pgds(VMALLOC_START & PGDIR_MASK, VMALLOC_END); 352 } 353 354 /* 355 * 64-bit: 356 * 357 * Handle a fault on the vmalloc area 358 * 359 * This assumes no large pages in there. 360 */ 361 static noinline __kprobes int vmalloc_fault(unsigned long address) 362 { 363 pgd_t *pgd, *pgd_ref; 364 pud_t *pud, *pud_ref; 365 pmd_t *pmd, *pmd_ref; 366 pte_t *pte, *pte_ref; 367 368 /* Make sure we are in vmalloc area: */ 369 if (!(address >= VMALLOC_START && address < VMALLOC_END)) 370 return -1; 371 372 WARN_ON_ONCE(in_nmi()); 373 374 /* 375 * Copy kernel mappings over when needed. This can also 376 * happen within a race in page table update. In the later 377 * case just flush: 378 */ 379 pgd = pgd_offset(current->active_mm, address); 380 pgd_ref = pgd_offset_k(address); 381 if (pgd_none(*pgd_ref)) 382 return -1; 383 384 if (pgd_none(*pgd)) { 385 set_pgd(pgd, *pgd_ref); 386 arch_flush_lazy_mmu_mode(); 387 } else { 388 BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref)); 389 } 390 391 /* 392 * Below here mismatches are bugs because these lower tables 393 * are shared: 394 */ 395 396 pud = pud_offset(pgd, address); 397 pud_ref = pud_offset(pgd_ref, address); 398 if (pud_none(*pud_ref)) 399 return -1; 400 401 if (pud_none(*pud) || pud_page_vaddr(*pud) != pud_page_vaddr(*pud_ref)) 402 BUG(); 403 404 pmd = pmd_offset(pud, address); 405 pmd_ref = pmd_offset(pud_ref, address); 406 if (pmd_none(*pmd_ref)) 407 return -1; 408 409 if (pmd_none(*pmd) || pmd_page(*pmd) != pmd_page(*pmd_ref)) 410 BUG(); 411 412 pte_ref = pte_offset_kernel(pmd_ref, address); 413 if (!pte_present(*pte_ref)) 414 return -1; 415 416 pte = pte_offset_kernel(pmd, address); 417 418 /* 419 * Don't use pte_page here, because the mappings can point 420 * outside mem_map, and the NUMA hash lookup cannot handle 421 * that: 422 */ 423 if (!pte_present(*pte) || pte_pfn(*pte) != pte_pfn(*pte_ref)) 424 BUG(); 425 426 return 0; 427 } 428 429 #ifdef CONFIG_CPU_SUP_AMD 430 static const char errata93_warning[] = 431 KERN_ERR 432 "******* Your BIOS seems to not contain a fix for K8 errata #93\n" 433 "******* Working around it, but it may cause SEGVs or burn power.\n" 434 "******* Please consider a BIOS update.\n" 435 "******* Disabling USB legacy in the BIOS may also help.\n"; 436 #endif 437 438 /* 439 * No vm86 mode in 64-bit mode: 440 */ 441 static inline void 442 check_v8086_mode(struct pt_regs *regs, unsigned long address, 443 struct task_struct *tsk) 444 { 445 } 446 447 static int bad_address(void *p) 448 { 449 unsigned long dummy; 450 451 return probe_kernel_address((unsigned long *)p, dummy); 452 } 453 454 static void dump_pagetable(unsigned long address) 455 { 456 pgd_t *base = __va(read_cr3() & PHYSICAL_PAGE_MASK); 457 pgd_t *pgd = base + pgd_index(address); 458 pud_t *pud; 459 pmd_t *pmd; 460 pte_t *pte; 461 462 if (bad_address(pgd)) 463 goto bad; 464 465 printk("PGD %lx ", pgd_val(*pgd)); 466 467 if (!pgd_present(*pgd)) 468 goto out; 469 470 pud = pud_offset(pgd, address); 471 if (bad_address(pud)) 472 goto bad; 473 474 printk("PUD %lx ", pud_val(*pud)); 475 if (!pud_present(*pud) || pud_large(*pud)) 476 goto out; 477 478 pmd = pmd_offset(pud, address); 479 if (bad_address(pmd)) 480 goto bad; 481 482 printk("PMD %lx ", pmd_val(*pmd)); 483 if (!pmd_present(*pmd) || pmd_large(*pmd)) 484 goto out; 485 486 pte = pte_offset_kernel(pmd, address); 487 if (bad_address(pte)) 488 goto bad; 489 490 printk("PTE %lx", pte_val(*pte)); 491 out: 492 printk("\n"); 493 return; 494 bad: 495 printk("BAD\n"); 496 } 497 498 #endif /* CONFIG_X86_64 */ 499 500 /* 501 * Workaround for K8 erratum #93 & buggy BIOS. 502 * 503 * BIOS SMM functions are required to use a specific workaround 504 * to avoid corruption of the 64bit RIP register on C stepping K8. 505 * 506 * A lot of BIOS that didn't get tested properly miss this. 507 * 508 * The OS sees this as a page fault with the upper 32bits of RIP cleared. 509 * Try to work around it here. 510 * 511 * Note we only handle faults in kernel here. 512 * Does nothing on 32-bit. 513 */ 514 static int is_errata93(struct pt_regs *regs, unsigned long address) 515 { 516 #if defined(CONFIG_X86_64) && defined(CONFIG_CPU_SUP_AMD) 517 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD 518 || boot_cpu_data.x86 != 0xf) 519 return 0; 520 521 if (address != regs->ip) 522 return 0; 523 524 if ((address >> 32) != 0) 525 return 0; 526 527 address |= 0xffffffffUL << 32; 528 if ((address >= (u64)_stext && address <= (u64)_etext) || 529 (address >= MODULES_VADDR && address <= MODULES_END)) { 530 printk_once(errata93_warning); 531 regs->ip = address; 532 return 1; 533 } 534 #endif 535 return 0; 536 } 537 538 /* 539 * Work around K8 erratum #100 K8 in compat mode occasionally jumps 540 * to illegal addresses >4GB. 541 * 542 * We catch this in the page fault handler because these addresses 543 * are not reachable. Just detect this case and return. Any code 544 * segment in LDT is compatibility mode. 545 */ 546 static int is_errata100(struct pt_regs *regs, unsigned long address) 547 { 548 #ifdef CONFIG_X86_64 549 if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32)) 550 return 1; 551 #endif 552 return 0; 553 } 554 555 static int is_f00f_bug(struct pt_regs *regs, unsigned long address) 556 { 557 #ifdef CONFIG_X86_F00F_BUG 558 unsigned long nr; 559 560 /* 561 * Pentium F0 0F C7 C8 bug workaround: 562 */ 563 if (boot_cpu_has_bug(X86_BUG_F00F)) { 564 nr = (address - idt_descr.address) >> 3; 565 566 if (nr == 6) { 567 do_invalid_op(regs, 0); 568 return 1; 569 } 570 } 571 #endif 572 return 0; 573 } 574 575 static const char nx_warning[] = KERN_CRIT 576 "kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n"; 577 578 static void 579 show_fault_oops(struct pt_regs *regs, unsigned long error_code, 580 unsigned long address) 581 { 582 if (!oops_may_print()) 583 return; 584 585 if (error_code & PF_INSTR) { 586 unsigned int level; 587 pgd_t *pgd; 588 pte_t *pte; 589 590 pgd = __va(read_cr3() & PHYSICAL_PAGE_MASK); 591 pgd += pgd_index(address); 592 593 pte = lookup_address_in_pgd(pgd, address, &level); 594 595 if (pte && pte_present(*pte) && !pte_exec(*pte)) 596 printk(nx_warning, from_kuid(&init_user_ns, current_uid())); 597 } 598 599 printk(KERN_ALERT "BUG: unable to handle kernel "); 600 if (address < PAGE_SIZE) 601 printk(KERN_CONT "NULL pointer dereference"); 602 else 603 printk(KERN_CONT "paging request"); 604 605 printk(KERN_CONT " at %p\n", (void *) address); 606 printk(KERN_ALERT "IP:"); 607 printk_address(regs->ip); 608 609 dump_pagetable(address); 610 } 611 612 static noinline void 613 pgtable_bad(struct pt_regs *regs, unsigned long error_code, 614 unsigned long address) 615 { 616 struct task_struct *tsk; 617 unsigned long flags; 618 int sig; 619 620 flags = oops_begin(); 621 tsk = current; 622 sig = SIGKILL; 623 624 printk(KERN_ALERT "%s: Corrupted page table at address %lx\n", 625 tsk->comm, address); 626 dump_pagetable(address); 627 628 tsk->thread.cr2 = address; 629 tsk->thread.trap_nr = X86_TRAP_PF; 630 tsk->thread.error_code = error_code; 631 632 if (__die("Bad pagetable", regs, error_code)) 633 sig = 0; 634 635 oops_end(flags, regs, sig); 636 } 637 638 static noinline void 639 no_context(struct pt_regs *regs, unsigned long error_code, 640 unsigned long address, int signal, int si_code) 641 { 642 struct task_struct *tsk = current; 643 unsigned long *stackend; 644 unsigned long flags; 645 int sig; 646 647 /* Are we prepared to handle this kernel fault? */ 648 if (fixup_exception(regs)) { 649 /* 650 * Any interrupt that takes a fault gets the fixup. This makes 651 * the below recursive fault logic only apply to a faults from 652 * task context. 653 */ 654 if (in_interrupt()) 655 return; 656 657 /* 658 * Per the above we're !in_interrupt(), aka. task context. 659 * 660 * In this case we need to make sure we're not recursively 661 * faulting through the emulate_vsyscall() logic. 662 */ 663 if (current_thread_info()->sig_on_uaccess_error && signal) { 664 tsk->thread.trap_nr = X86_TRAP_PF; 665 tsk->thread.error_code = error_code | PF_USER; 666 tsk->thread.cr2 = address; 667 668 /* XXX: hwpoison faults will set the wrong code. */ 669 force_sig_info_fault(signal, si_code, address, tsk, 0); 670 } 671 672 /* 673 * Barring that, we can do the fixup and be happy. 674 */ 675 return; 676 } 677 678 /* 679 * 32-bit: 680 * 681 * Valid to do another page fault here, because if this fault 682 * had been triggered by is_prefetch fixup_exception would have 683 * handled it. 684 * 685 * 64-bit: 686 * 687 * Hall of shame of CPU/BIOS bugs. 688 */ 689 if (is_prefetch(regs, error_code, address)) 690 return; 691 692 if (is_errata93(regs, address)) 693 return; 694 695 /* 696 * Oops. The kernel tried to access some bad page. We'll have to 697 * terminate things with extreme prejudice: 698 */ 699 flags = oops_begin(); 700 701 show_fault_oops(regs, error_code, address); 702 703 stackend = end_of_stack(tsk); 704 if (tsk != &init_task && *stackend != STACK_END_MAGIC) 705 printk(KERN_EMERG "Thread overran stack, or stack corrupted\n"); 706 707 tsk->thread.cr2 = address; 708 tsk->thread.trap_nr = X86_TRAP_PF; 709 tsk->thread.error_code = error_code; 710 711 sig = SIGKILL; 712 if (__die("Oops", regs, error_code)) 713 sig = 0; 714 715 /* Executive summary in case the body of the oops scrolled away */ 716 printk(KERN_DEFAULT "CR2: %016lx\n", address); 717 718 oops_end(flags, regs, sig); 719 } 720 721 /* 722 * Print out info about fatal segfaults, if the show_unhandled_signals 723 * sysctl is set: 724 */ 725 static inline void 726 show_signal_msg(struct pt_regs *regs, unsigned long error_code, 727 unsigned long address, struct task_struct *tsk) 728 { 729 if (!unhandled_signal(tsk, SIGSEGV)) 730 return; 731 732 if (!printk_ratelimit()) 733 return; 734 735 printk("%s%s[%d]: segfault at %lx ip %p sp %p error %lx", 736 task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG, 737 tsk->comm, task_pid_nr(tsk), address, 738 (void *)regs->ip, (void *)regs->sp, error_code); 739 740 print_vma_addr(KERN_CONT " in ", regs->ip); 741 742 printk(KERN_CONT "\n"); 743 } 744 745 static void 746 __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, 747 unsigned long address, int si_code) 748 { 749 struct task_struct *tsk = current; 750 751 /* User mode accesses just cause a SIGSEGV */ 752 if (error_code & PF_USER) { 753 /* 754 * It's possible to have interrupts off here: 755 */ 756 local_irq_enable(); 757 758 /* 759 * Valid to do another page fault here because this one came 760 * from user space: 761 */ 762 if (is_prefetch(regs, error_code, address)) 763 return; 764 765 if (is_errata100(regs, address)) 766 return; 767 768 #ifdef CONFIG_X86_64 769 /* 770 * Instruction fetch faults in the vsyscall page might need 771 * emulation. 772 */ 773 if (unlikely((error_code & PF_INSTR) && 774 ((address & ~0xfff) == VSYSCALL_START))) { 775 if (emulate_vsyscall(regs, address)) 776 return; 777 } 778 #endif 779 /* Kernel addresses are always protection faults: */ 780 if (address >= TASK_SIZE) 781 error_code |= PF_PROT; 782 783 if (likely(show_unhandled_signals)) 784 show_signal_msg(regs, error_code, address, tsk); 785 786 tsk->thread.cr2 = address; 787 tsk->thread.error_code = error_code; 788 tsk->thread.trap_nr = X86_TRAP_PF; 789 790 force_sig_info_fault(SIGSEGV, si_code, address, tsk, 0); 791 792 return; 793 } 794 795 if (is_f00f_bug(regs, address)) 796 return; 797 798 no_context(regs, error_code, address, SIGSEGV, si_code); 799 } 800 801 static noinline void 802 bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, 803 unsigned long address) 804 { 805 __bad_area_nosemaphore(regs, error_code, address, SEGV_MAPERR); 806 } 807 808 static void 809 __bad_area(struct pt_regs *regs, unsigned long error_code, 810 unsigned long address, int si_code) 811 { 812 struct mm_struct *mm = current->mm; 813 814 /* 815 * Something tried to access memory that isn't in our memory map.. 816 * Fix it, but check if it's kernel or user first.. 817 */ 818 up_read(&mm->mmap_sem); 819 820 __bad_area_nosemaphore(regs, error_code, address, si_code); 821 } 822 823 static noinline void 824 bad_area(struct pt_regs *regs, unsigned long error_code, unsigned long address) 825 { 826 __bad_area(regs, error_code, address, SEGV_MAPERR); 827 } 828 829 static noinline void 830 bad_area_access_error(struct pt_regs *regs, unsigned long error_code, 831 unsigned long address) 832 { 833 __bad_area(regs, error_code, address, SEGV_ACCERR); 834 } 835 836 static void 837 do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address, 838 unsigned int fault) 839 { 840 struct task_struct *tsk = current; 841 struct mm_struct *mm = tsk->mm; 842 int code = BUS_ADRERR; 843 844 up_read(&mm->mmap_sem); 845 846 /* Kernel mode? Handle exceptions or die: */ 847 if (!(error_code & PF_USER)) { 848 no_context(regs, error_code, address, SIGBUS, BUS_ADRERR); 849 return; 850 } 851 852 /* User-space => ok to do another page fault: */ 853 if (is_prefetch(regs, error_code, address)) 854 return; 855 856 tsk->thread.cr2 = address; 857 tsk->thread.error_code = error_code; 858 tsk->thread.trap_nr = X86_TRAP_PF; 859 860 #ifdef CONFIG_MEMORY_FAILURE 861 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) { 862 printk(KERN_ERR 863 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n", 864 tsk->comm, tsk->pid, address); 865 code = BUS_MCEERR_AR; 866 } 867 #endif 868 force_sig_info_fault(SIGBUS, code, address, tsk, fault); 869 } 870 871 static noinline void 872 mm_fault_error(struct pt_regs *regs, unsigned long error_code, 873 unsigned long address, unsigned int fault) 874 { 875 if (fatal_signal_pending(current) && !(error_code & PF_USER)) { 876 up_read(¤t->mm->mmap_sem); 877 no_context(regs, error_code, address, 0, 0); 878 return; 879 } 880 881 if (fault & VM_FAULT_OOM) { 882 /* Kernel mode? Handle exceptions or die: */ 883 if (!(error_code & PF_USER)) { 884 up_read(¤t->mm->mmap_sem); 885 no_context(regs, error_code, address, 886 SIGSEGV, SEGV_MAPERR); 887 return; 888 } 889 890 up_read(¤t->mm->mmap_sem); 891 892 /* 893 * We ran out of memory, call the OOM killer, and return the 894 * userspace (which will retry the fault, or kill us if we got 895 * oom-killed): 896 */ 897 pagefault_out_of_memory(); 898 } else { 899 if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON| 900 VM_FAULT_HWPOISON_LARGE)) 901 do_sigbus(regs, error_code, address, fault); 902 else 903 BUG(); 904 } 905 } 906 907 static int spurious_fault_check(unsigned long error_code, pte_t *pte) 908 { 909 if ((error_code & PF_WRITE) && !pte_write(*pte)) 910 return 0; 911 912 if ((error_code & PF_INSTR) && !pte_exec(*pte)) 913 return 0; 914 915 return 1; 916 } 917 918 /* 919 * Handle a spurious fault caused by a stale TLB entry. 920 * 921 * This allows us to lazily refresh the TLB when increasing the 922 * permissions of a kernel page (RO -> RW or NX -> X). Doing it 923 * eagerly is very expensive since that implies doing a full 924 * cross-processor TLB flush, even if no stale TLB entries exist 925 * on other processors. 926 * 927 * There are no security implications to leaving a stale TLB when 928 * increasing the permissions on a page. 929 */ 930 static noinline __kprobes int 931 spurious_fault(unsigned long error_code, unsigned long address) 932 { 933 pgd_t *pgd; 934 pud_t *pud; 935 pmd_t *pmd; 936 pte_t *pte; 937 int ret; 938 939 /* Reserved-bit violation or user access to kernel space? */ 940 if (error_code & (PF_USER | PF_RSVD)) 941 return 0; 942 943 pgd = init_mm.pgd + pgd_index(address); 944 if (!pgd_present(*pgd)) 945 return 0; 946 947 pud = pud_offset(pgd, address); 948 if (!pud_present(*pud)) 949 return 0; 950 951 if (pud_large(*pud)) 952 return spurious_fault_check(error_code, (pte_t *) pud); 953 954 pmd = pmd_offset(pud, address); 955 if (!pmd_present(*pmd)) 956 return 0; 957 958 if (pmd_large(*pmd)) 959 return spurious_fault_check(error_code, (pte_t *) pmd); 960 961 pte = pte_offset_kernel(pmd, address); 962 if (!pte_present(*pte)) 963 return 0; 964 965 ret = spurious_fault_check(error_code, pte); 966 if (!ret) 967 return 0; 968 969 /* 970 * Make sure we have permissions in PMD. 971 * If not, then there's a bug in the page tables: 972 */ 973 ret = spurious_fault_check(error_code, (pte_t *) pmd); 974 WARN_ONCE(!ret, "PMD has incorrect permission bits\n"); 975 976 return ret; 977 } 978 979 int show_unhandled_signals = 1; 980 981 static inline int 982 access_error(unsigned long error_code, struct vm_area_struct *vma) 983 { 984 if (error_code & PF_WRITE) { 985 /* write, present and write, not present: */ 986 if (unlikely(!(vma->vm_flags & VM_WRITE))) 987 return 1; 988 return 0; 989 } 990 991 /* read, present: */ 992 if (unlikely(error_code & PF_PROT)) 993 return 1; 994 995 /* read, not present: */ 996 if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))) 997 return 1; 998 999 return 0; 1000 } 1001 1002 static int fault_in_kernel_space(unsigned long address) 1003 { 1004 return address >= TASK_SIZE_MAX; 1005 } 1006 1007 static inline bool smap_violation(int error_code, struct pt_regs *regs) 1008 { 1009 if (!IS_ENABLED(CONFIG_X86_SMAP)) 1010 return false; 1011 1012 if (!static_cpu_has(X86_FEATURE_SMAP)) 1013 return false; 1014 1015 if (error_code & PF_USER) 1016 return false; 1017 1018 if (!user_mode_vm(regs) && (regs->flags & X86_EFLAGS_AC)) 1019 return false; 1020 1021 return true; 1022 } 1023 1024 /* 1025 * This routine handles page faults. It determines the address, 1026 * and the problem, and then passes it off to one of the appropriate 1027 * routines. 1028 * 1029 * This function must have noinline because both callers 1030 * {,trace_}do_page_fault() have notrace on. Having this an actual function 1031 * guarantees there's a function trace entry. 1032 */ 1033 static void __kprobes noinline 1034 __do_page_fault(struct pt_regs *regs, unsigned long error_code, 1035 unsigned long address) 1036 { 1037 struct vm_area_struct *vma; 1038 struct task_struct *tsk; 1039 struct mm_struct *mm; 1040 int fault; 1041 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; 1042 1043 tsk = current; 1044 mm = tsk->mm; 1045 1046 /* 1047 * Detect and handle instructions that would cause a page fault for 1048 * both a tracked kernel page and a userspace page. 1049 */ 1050 if (kmemcheck_active(regs)) 1051 kmemcheck_hide(regs); 1052 prefetchw(&mm->mmap_sem); 1053 1054 if (unlikely(kmmio_fault(regs, address))) 1055 return; 1056 1057 /* 1058 * We fault-in kernel-space virtual memory on-demand. The 1059 * 'reference' page table is init_mm.pgd. 1060 * 1061 * NOTE! We MUST NOT take any locks for this case. We may 1062 * be in an interrupt or a critical region, and should 1063 * only copy the information from the master page table, 1064 * nothing more. 1065 * 1066 * This verifies that the fault happens in kernel space 1067 * (error_code & 4) == 0, and that the fault was not a 1068 * protection error (error_code & 9) == 0. 1069 */ 1070 if (unlikely(fault_in_kernel_space(address))) { 1071 if (!(error_code & (PF_RSVD | PF_USER | PF_PROT))) { 1072 if (vmalloc_fault(address) >= 0) 1073 return; 1074 1075 if (kmemcheck_fault(regs, address, error_code)) 1076 return; 1077 } 1078 1079 /* Can handle a stale RO->RW TLB: */ 1080 if (spurious_fault(error_code, address)) 1081 return; 1082 1083 /* kprobes don't want to hook the spurious faults: */ 1084 if (kprobes_fault(regs)) 1085 return; 1086 /* 1087 * Don't take the mm semaphore here. If we fixup a prefetch 1088 * fault we could otherwise deadlock: 1089 */ 1090 bad_area_nosemaphore(regs, error_code, address); 1091 1092 return; 1093 } 1094 1095 /* kprobes don't want to hook the spurious faults: */ 1096 if (unlikely(kprobes_fault(regs))) 1097 return; 1098 1099 if (unlikely(error_code & PF_RSVD)) 1100 pgtable_bad(regs, error_code, address); 1101 1102 if (unlikely(smap_violation(error_code, regs))) { 1103 bad_area_nosemaphore(regs, error_code, address); 1104 return; 1105 } 1106 1107 /* 1108 * If we're in an interrupt, have no user context or are running 1109 * in an atomic region then we must not take the fault: 1110 */ 1111 if (unlikely(in_atomic() || !mm)) { 1112 bad_area_nosemaphore(regs, error_code, address); 1113 return; 1114 } 1115 1116 /* 1117 * It's safe to allow irq's after cr2 has been saved and the 1118 * vmalloc fault has been handled. 1119 * 1120 * User-mode registers count as a user access even for any 1121 * potential system fault or CPU buglet: 1122 */ 1123 if (user_mode_vm(regs)) { 1124 local_irq_enable(); 1125 error_code |= PF_USER; 1126 flags |= FAULT_FLAG_USER; 1127 } else { 1128 if (regs->flags & X86_EFLAGS_IF) 1129 local_irq_enable(); 1130 } 1131 1132 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); 1133 1134 if (error_code & PF_WRITE) 1135 flags |= FAULT_FLAG_WRITE; 1136 1137 /* 1138 * When running in the kernel we expect faults to occur only to 1139 * addresses in user space. All other faults represent errors in 1140 * the kernel and should generate an OOPS. Unfortunately, in the 1141 * case of an erroneous fault occurring in a code path which already 1142 * holds mmap_sem we will deadlock attempting to validate the fault 1143 * against the address space. Luckily the kernel only validly 1144 * references user space from well defined areas of code, which are 1145 * listed in the exceptions table. 1146 * 1147 * As the vast majority of faults will be valid we will only perform 1148 * the source reference check when there is a possibility of a 1149 * deadlock. Attempt to lock the address space, if we cannot we then 1150 * validate the source. If this is invalid we can skip the address 1151 * space check, thus avoiding the deadlock: 1152 */ 1153 if (unlikely(!down_read_trylock(&mm->mmap_sem))) { 1154 if ((error_code & PF_USER) == 0 && 1155 !search_exception_tables(regs->ip)) { 1156 bad_area_nosemaphore(regs, error_code, address); 1157 return; 1158 } 1159 retry: 1160 down_read(&mm->mmap_sem); 1161 } else { 1162 /* 1163 * The above down_read_trylock() might have succeeded in 1164 * which case we'll have missed the might_sleep() from 1165 * down_read(): 1166 */ 1167 might_sleep(); 1168 } 1169 1170 vma = find_vma(mm, address); 1171 if (unlikely(!vma)) { 1172 bad_area(regs, error_code, address); 1173 return; 1174 } 1175 if (likely(vma->vm_start <= address)) 1176 goto good_area; 1177 if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) { 1178 bad_area(regs, error_code, address); 1179 return; 1180 } 1181 if (error_code & PF_USER) { 1182 /* 1183 * Accessing the stack below %sp is always a bug. 1184 * The large cushion allows instructions like enter 1185 * and pusha to work. ("enter $65535, $31" pushes 1186 * 32 pointers and then decrements %sp by 65535.) 1187 */ 1188 if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) { 1189 bad_area(regs, error_code, address); 1190 return; 1191 } 1192 } 1193 if (unlikely(expand_stack(vma, address))) { 1194 bad_area(regs, error_code, address); 1195 return; 1196 } 1197 1198 /* 1199 * Ok, we have a good vm_area for this memory access, so 1200 * we can handle it.. 1201 */ 1202 good_area: 1203 if (unlikely(access_error(error_code, vma))) { 1204 bad_area_access_error(regs, error_code, address); 1205 return; 1206 } 1207 1208 /* 1209 * If for any reason at all we couldn't handle the fault, 1210 * make sure we exit gracefully rather than endlessly redo 1211 * the fault: 1212 */ 1213 fault = handle_mm_fault(mm, vma, address, flags); 1214 1215 /* 1216 * If we need to retry but a fatal signal is pending, handle the 1217 * signal first. We do not need to release the mmap_sem because it 1218 * would already be released in __lock_page_or_retry in mm/filemap.c. 1219 */ 1220 if (unlikely((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))) 1221 return; 1222 1223 if (unlikely(fault & VM_FAULT_ERROR)) { 1224 mm_fault_error(regs, error_code, address, fault); 1225 return; 1226 } 1227 1228 /* 1229 * Major/minor page fault accounting is only done on the 1230 * initial attempt. If we go through a retry, it is extremely 1231 * likely that the page will be found in page cache at that point. 1232 */ 1233 if (flags & FAULT_FLAG_ALLOW_RETRY) { 1234 if (fault & VM_FAULT_MAJOR) { 1235 tsk->maj_flt++; 1236 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 1237 regs, address); 1238 } else { 1239 tsk->min_flt++; 1240 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 1241 regs, address); 1242 } 1243 if (fault & VM_FAULT_RETRY) { 1244 /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk 1245 * of starvation. */ 1246 flags &= ~FAULT_FLAG_ALLOW_RETRY; 1247 flags |= FAULT_FLAG_TRIED; 1248 goto retry; 1249 } 1250 } 1251 1252 check_v8086_mode(regs, address, tsk); 1253 1254 up_read(&mm->mmap_sem); 1255 } 1256 1257 dotraplinkage void __kprobes notrace 1258 do_page_fault(struct pt_regs *regs, unsigned long error_code) 1259 { 1260 unsigned long address = read_cr2(); /* Get the faulting address */ 1261 enum ctx_state prev_state; 1262 1263 /* 1264 * We must have this function tagged with __kprobes, notrace and call 1265 * read_cr2() before calling anything else. To avoid calling any kind 1266 * of tracing machinery before we've observed the CR2 value. 1267 * 1268 * exception_{enter,exit}() contain all sorts of tracepoints. 1269 */ 1270 1271 prev_state = exception_enter(); 1272 __do_page_fault(regs, error_code, address); 1273 exception_exit(prev_state); 1274 } 1275 1276 #ifdef CONFIG_TRACING 1277 static void trace_page_fault_entries(unsigned long address, struct pt_regs *regs, 1278 unsigned long error_code) 1279 { 1280 if (user_mode(regs)) 1281 trace_page_fault_user(address, regs, error_code); 1282 else 1283 trace_page_fault_kernel(address, regs, error_code); 1284 } 1285 1286 dotraplinkage void __kprobes notrace 1287 trace_do_page_fault(struct pt_regs *regs, unsigned long error_code) 1288 { 1289 /* 1290 * The exception_enter and tracepoint processing could 1291 * trigger another page faults (user space callchain 1292 * reading) and destroy the original cr2 value, so read 1293 * the faulting address now. 1294 */ 1295 unsigned long address = read_cr2(); 1296 enum ctx_state prev_state; 1297 1298 prev_state = exception_enter(); 1299 trace_page_fault_entries(address, regs, error_code); 1300 __do_page_fault(regs, error_code, address); 1301 exception_exit(prev_state); 1302 } 1303 #endif /* CONFIG_TRACING */ 1304