1 /* 2 * Copyright (C) 1995 Linus Torvalds 3 * Copyright (C) 2001, 2002 Andi Kleen, SuSE Labs. 4 * Copyright (C) 2008-2009, Red Hat Inc., Ingo Molnar 5 */ 6 #include <linux/magic.h> /* STACK_END_MAGIC */ 7 #include <linux/sched.h> /* test_thread_flag(), ... */ 8 #include <linux/kdebug.h> /* oops_begin/end, ... */ 9 #include <linux/module.h> /* search_exception_table */ 10 #include <linux/bootmem.h> /* max_low_pfn */ 11 #include <linux/kprobes.h> /* __kprobes, ... */ 12 #include <linux/mmiotrace.h> /* kmmio_handler, ... */ 13 #include <linux/perf_event.h> /* perf_sw_event */ 14 #include <linux/hugetlb.h> /* hstate_index_to_shift */ 15 #include <linux/prefetch.h> /* prefetchw */ 16 17 #include <asm/traps.h> /* dotraplinkage, ... */ 18 #include <asm/pgalloc.h> /* pgd_*(), ... */ 19 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */ 20 #include <asm/vsyscall.h> 21 22 /* 23 * Page fault error code bits: 24 * 25 * bit 0 == 0: no page found 1: protection fault 26 * bit 1 == 0: read access 1: write access 27 * bit 2 == 0: kernel-mode access 1: user-mode access 28 * bit 3 == 1: use of reserved bit detected 29 * bit 4 == 1: fault was an instruction fetch 30 */ 31 enum x86_pf_error_code { 32 33 PF_PROT = 1 << 0, 34 PF_WRITE = 1 << 1, 35 PF_USER = 1 << 2, 36 PF_RSVD = 1 << 3, 37 PF_INSTR = 1 << 4, 38 }; 39 40 /* 41 * Returns 0 if mmiotrace is disabled, or if the fault is not 42 * handled by mmiotrace: 43 */ 44 static inline int __kprobes 45 kmmio_fault(struct pt_regs *regs, unsigned long addr) 46 { 47 if (unlikely(is_kmmio_active())) 48 if (kmmio_handler(regs, addr) == 1) 49 return -1; 50 return 0; 51 } 52 53 static inline int __kprobes notify_page_fault(struct pt_regs *regs) 54 { 55 int ret = 0; 56 57 /* kprobe_running() needs smp_processor_id() */ 58 if (kprobes_built_in() && !user_mode_vm(regs)) { 59 preempt_disable(); 60 if (kprobe_running() && kprobe_fault_handler(regs, 14)) 61 ret = 1; 62 preempt_enable(); 63 } 64 65 return ret; 66 } 67 68 /* 69 * Prefetch quirks: 70 * 71 * 32-bit mode: 72 * 73 * Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch. 74 * Check that here and ignore it. 75 * 76 * 64-bit mode: 77 * 78 * Sometimes the CPU reports invalid exceptions on prefetch. 79 * Check that here and ignore it. 80 * 81 * Opcode checker based on code by Richard Brunner. 82 */ 83 static inline int 84 check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr, 85 unsigned char opcode, int *prefetch) 86 { 87 unsigned char instr_hi = opcode & 0xf0; 88 unsigned char instr_lo = opcode & 0x0f; 89 90 switch (instr_hi) { 91 case 0x20: 92 case 0x30: 93 /* 94 * Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes. 95 * In X86_64 long mode, the CPU will signal invalid 96 * opcode if some of these prefixes are present so 97 * X86_64 will never get here anyway 98 */ 99 return ((instr_lo & 7) == 0x6); 100 #ifdef CONFIG_X86_64 101 case 0x40: 102 /* 103 * In AMD64 long mode 0x40..0x4F are valid REX prefixes 104 * Need to figure out under what instruction mode the 105 * instruction was issued. Could check the LDT for lm, 106 * but for now it's good enough to assume that long 107 * mode only uses well known segments or kernel. 108 */ 109 return (!user_mode(regs) || user_64bit_mode(regs)); 110 #endif 111 case 0x60: 112 /* 0x64 thru 0x67 are valid prefixes in all modes. */ 113 return (instr_lo & 0xC) == 0x4; 114 case 0xF0: 115 /* 0xF0, 0xF2, 0xF3 are valid prefixes in all modes. */ 116 return !instr_lo || (instr_lo>>1) == 1; 117 case 0x00: 118 /* Prefetch instruction is 0x0F0D or 0x0F18 */ 119 if (probe_kernel_address(instr, opcode)) 120 return 0; 121 122 *prefetch = (instr_lo == 0xF) && 123 (opcode == 0x0D || opcode == 0x18); 124 return 0; 125 default: 126 return 0; 127 } 128 } 129 130 static int 131 is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr) 132 { 133 unsigned char *max_instr; 134 unsigned char *instr; 135 int prefetch = 0; 136 137 /* 138 * If it was a exec (instruction fetch) fault on NX page, then 139 * do not ignore the fault: 140 */ 141 if (error_code & PF_INSTR) 142 return 0; 143 144 instr = (void *)convert_ip_to_linear(current, regs); 145 max_instr = instr + 15; 146 147 if (user_mode(regs) && instr >= (unsigned char *)TASK_SIZE) 148 return 0; 149 150 while (instr < max_instr) { 151 unsigned char opcode; 152 153 if (probe_kernel_address(instr, opcode)) 154 break; 155 156 instr++; 157 158 if (!check_prefetch_opcode(regs, instr, opcode, &prefetch)) 159 break; 160 } 161 return prefetch; 162 } 163 164 static void 165 force_sig_info_fault(int si_signo, int si_code, unsigned long address, 166 struct task_struct *tsk, int fault) 167 { 168 unsigned lsb = 0; 169 siginfo_t info; 170 171 info.si_signo = si_signo; 172 info.si_errno = 0; 173 info.si_code = si_code; 174 info.si_addr = (void __user *)address; 175 if (fault & VM_FAULT_HWPOISON_LARGE) 176 lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault)); 177 if (fault & VM_FAULT_HWPOISON) 178 lsb = PAGE_SHIFT; 179 info.si_addr_lsb = lsb; 180 181 force_sig_info(si_signo, &info, tsk); 182 } 183 184 DEFINE_SPINLOCK(pgd_lock); 185 LIST_HEAD(pgd_list); 186 187 #ifdef CONFIG_X86_32 188 static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address) 189 { 190 unsigned index = pgd_index(address); 191 pgd_t *pgd_k; 192 pud_t *pud, *pud_k; 193 pmd_t *pmd, *pmd_k; 194 195 pgd += index; 196 pgd_k = init_mm.pgd + index; 197 198 if (!pgd_present(*pgd_k)) 199 return NULL; 200 201 /* 202 * set_pgd(pgd, *pgd_k); here would be useless on PAE 203 * and redundant with the set_pmd() on non-PAE. As would 204 * set_pud. 205 */ 206 pud = pud_offset(pgd, address); 207 pud_k = pud_offset(pgd_k, address); 208 if (!pud_present(*pud_k)) 209 return NULL; 210 211 pmd = pmd_offset(pud, address); 212 pmd_k = pmd_offset(pud_k, address); 213 if (!pmd_present(*pmd_k)) 214 return NULL; 215 216 if (!pmd_present(*pmd)) 217 set_pmd(pmd, *pmd_k); 218 else 219 BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k)); 220 221 return pmd_k; 222 } 223 224 void vmalloc_sync_all(void) 225 { 226 unsigned long address; 227 228 if (SHARED_KERNEL_PMD) 229 return; 230 231 for (address = VMALLOC_START & PMD_MASK; 232 address >= TASK_SIZE && address < FIXADDR_TOP; 233 address += PMD_SIZE) { 234 struct page *page; 235 236 spin_lock(&pgd_lock); 237 list_for_each_entry(page, &pgd_list, lru) { 238 spinlock_t *pgt_lock; 239 pmd_t *ret; 240 241 /* the pgt_lock only for Xen */ 242 pgt_lock = &pgd_page_get_mm(page)->page_table_lock; 243 244 spin_lock(pgt_lock); 245 ret = vmalloc_sync_one(page_address(page), address); 246 spin_unlock(pgt_lock); 247 248 if (!ret) 249 break; 250 } 251 spin_unlock(&pgd_lock); 252 } 253 } 254 255 /* 256 * 32-bit: 257 * 258 * Handle a fault on the vmalloc or module mapping area 259 */ 260 static noinline __kprobes int vmalloc_fault(unsigned long address) 261 { 262 unsigned long pgd_paddr; 263 pmd_t *pmd_k; 264 pte_t *pte_k; 265 266 /* Make sure we are in vmalloc area: */ 267 if (!(address >= VMALLOC_START && address < VMALLOC_END)) 268 return -1; 269 270 WARN_ON_ONCE(in_nmi()); 271 272 /* 273 * Synchronize this task's top level page-table 274 * with the 'reference' page table. 275 * 276 * Do _not_ use "current" here. We might be inside 277 * an interrupt in the middle of a task switch.. 278 */ 279 pgd_paddr = read_cr3(); 280 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address); 281 if (!pmd_k) 282 return -1; 283 284 pte_k = pte_offset_kernel(pmd_k, address); 285 if (!pte_present(*pte_k)) 286 return -1; 287 288 return 0; 289 } 290 291 /* 292 * Did it hit the DOS screen memory VA from vm86 mode? 293 */ 294 static inline void 295 check_v8086_mode(struct pt_regs *regs, unsigned long address, 296 struct task_struct *tsk) 297 { 298 unsigned long bit; 299 300 if (!v8086_mode(regs)) 301 return; 302 303 bit = (address - 0xA0000) >> PAGE_SHIFT; 304 if (bit < 32) 305 tsk->thread.screen_bitmap |= 1 << bit; 306 } 307 308 static bool low_pfn(unsigned long pfn) 309 { 310 return pfn < max_low_pfn; 311 } 312 313 static void dump_pagetable(unsigned long address) 314 { 315 pgd_t *base = __va(read_cr3()); 316 pgd_t *pgd = &base[pgd_index(address)]; 317 pmd_t *pmd; 318 pte_t *pte; 319 320 #ifdef CONFIG_X86_PAE 321 printk("*pdpt = %016Lx ", pgd_val(*pgd)); 322 if (!low_pfn(pgd_val(*pgd) >> PAGE_SHIFT) || !pgd_present(*pgd)) 323 goto out; 324 #endif 325 pmd = pmd_offset(pud_offset(pgd, address), address); 326 printk(KERN_CONT "*pde = %0*Lx ", sizeof(*pmd) * 2, (u64)pmd_val(*pmd)); 327 328 /* 329 * We must not directly access the pte in the highpte 330 * case if the page table is located in highmem. 331 * And let's rather not kmap-atomic the pte, just in case 332 * it's allocated already: 333 */ 334 if (!low_pfn(pmd_pfn(*pmd)) || !pmd_present(*pmd) || pmd_large(*pmd)) 335 goto out; 336 337 pte = pte_offset_kernel(pmd, address); 338 printk("*pte = %0*Lx ", sizeof(*pte) * 2, (u64)pte_val(*pte)); 339 out: 340 printk("\n"); 341 } 342 343 #else /* CONFIG_X86_64: */ 344 345 void vmalloc_sync_all(void) 346 { 347 sync_global_pgds(VMALLOC_START & PGDIR_MASK, VMALLOC_END); 348 } 349 350 /* 351 * 64-bit: 352 * 353 * Handle a fault on the vmalloc area 354 * 355 * This assumes no large pages in there. 356 */ 357 static noinline __kprobes int vmalloc_fault(unsigned long address) 358 { 359 pgd_t *pgd, *pgd_ref; 360 pud_t *pud, *pud_ref; 361 pmd_t *pmd, *pmd_ref; 362 pte_t *pte, *pte_ref; 363 364 /* Make sure we are in vmalloc area: */ 365 if (!(address >= VMALLOC_START && address < VMALLOC_END)) 366 return -1; 367 368 WARN_ON_ONCE(in_nmi()); 369 370 /* 371 * Copy kernel mappings over when needed. This can also 372 * happen within a race in page table update. In the later 373 * case just flush: 374 */ 375 pgd = pgd_offset(current->active_mm, address); 376 pgd_ref = pgd_offset_k(address); 377 if (pgd_none(*pgd_ref)) 378 return -1; 379 380 if (pgd_none(*pgd)) 381 set_pgd(pgd, *pgd_ref); 382 else 383 BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref)); 384 385 /* 386 * Below here mismatches are bugs because these lower tables 387 * are shared: 388 */ 389 390 pud = pud_offset(pgd, address); 391 pud_ref = pud_offset(pgd_ref, address); 392 if (pud_none(*pud_ref)) 393 return -1; 394 395 if (pud_none(*pud) || pud_page_vaddr(*pud) != pud_page_vaddr(*pud_ref)) 396 BUG(); 397 398 pmd = pmd_offset(pud, address); 399 pmd_ref = pmd_offset(pud_ref, address); 400 if (pmd_none(*pmd_ref)) 401 return -1; 402 403 if (pmd_none(*pmd) || pmd_page(*pmd) != pmd_page(*pmd_ref)) 404 BUG(); 405 406 pte_ref = pte_offset_kernel(pmd_ref, address); 407 if (!pte_present(*pte_ref)) 408 return -1; 409 410 pte = pte_offset_kernel(pmd, address); 411 412 /* 413 * Don't use pte_page here, because the mappings can point 414 * outside mem_map, and the NUMA hash lookup cannot handle 415 * that: 416 */ 417 if (!pte_present(*pte) || pte_pfn(*pte) != pte_pfn(*pte_ref)) 418 BUG(); 419 420 return 0; 421 } 422 423 static const char errata93_warning[] = 424 KERN_ERR 425 "******* Your BIOS seems to not contain a fix for K8 errata #93\n" 426 "******* Working around it, but it may cause SEGVs or burn power.\n" 427 "******* Please consider a BIOS update.\n" 428 "******* Disabling USB legacy in the BIOS may also help.\n"; 429 430 /* 431 * No vm86 mode in 64-bit mode: 432 */ 433 static inline void 434 check_v8086_mode(struct pt_regs *regs, unsigned long address, 435 struct task_struct *tsk) 436 { 437 } 438 439 static int bad_address(void *p) 440 { 441 unsigned long dummy; 442 443 return probe_kernel_address((unsigned long *)p, dummy); 444 } 445 446 static void dump_pagetable(unsigned long address) 447 { 448 pgd_t *base = __va(read_cr3() & PHYSICAL_PAGE_MASK); 449 pgd_t *pgd = base + pgd_index(address); 450 pud_t *pud; 451 pmd_t *pmd; 452 pte_t *pte; 453 454 if (bad_address(pgd)) 455 goto bad; 456 457 printk("PGD %lx ", pgd_val(*pgd)); 458 459 if (!pgd_present(*pgd)) 460 goto out; 461 462 pud = pud_offset(pgd, address); 463 if (bad_address(pud)) 464 goto bad; 465 466 printk("PUD %lx ", pud_val(*pud)); 467 if (!pud_present(*pud) || pud_large(*pud)) 468 goto out; 469 470 pmd = pmd_offset(pud, address); 471 if (bad_address(pmd)) 472 goto bad; 473 474 printk("PMD %lx ", pmd_val(*pmd)); 475 if (!pmd_present(*pmd) || pmd_large(*pmd)) 476 goto out; 477 478 pte = pte_offset_kernel(pmd, address); 479 if (bad_address(pte)) 480 goto bad; 481 482 printk("PTE %lx", pte_val(*pte)); 483 out: 484 printk("\n"); 485 return; 486 bad: 487 printk("BAD\n"); 488 } 489 490 #endif /* CONFIG_X86_64 */ 491 492 /* 493 * Workaround for K8 erratum #93 & buggy BIOS. 494 * 495 * BIOS SMM functions are required to use a specific workaround 496 * to avoid corruption of the 64bit RIP register on C stepping K8. 497 * 498 * A lot of BIOS that didn't get tested properly miss this. 499 * 500 * The OS sees this as a page fault with the upper 32bits of RIP cleared. 501 * Try to work around it here. 502 * 503 * Note we only handle faults in kernel here. 504 * Does nothing on 32-bit. 505 */ 506 static int is_errata93(struct pt_regs *regs, unsigned long address) 507 { 508 #ifdef CONFIG_X86_64 509 if (address != regs->ip) 510 return 0; 511 512 if ((address >> 32) != 0) 513 return 0; 514 515 address |= 0xffffffffUL << 32; 516 if ((address >= (u64)_stext && address <= (u64)_etext) || 517 (address >= MODULES_VADDR && address <= MODULES_END)) { 518 printk_once(errata93_warning); 519 regs->ip = address; 520 return 1; 521 } 522 #endif 523 return 0; 524 } 525 526 /* 527 * Work around K8 erratum #100 K8 in compat mode occasionally jumps 528 * to illegal addresses >4GB. 529 * 530 * We catch this in the page fault handler because these addresses 531 * are not reachable. Just detect this case and return. Any code 532 * segment in LDT is compatibility mode. 533 */ 534 static int is_errata100(struct pt_regs *regs, unsigned long address) 535 { 536 #ifdef CONFIG_X86_64 537 if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32)) 538 return 1; 539 #endif 540 return 0; 541 } 542 543 static int is_f00f_bug(struct pt_regs *regs, unsigned long address) 544 { 545 #ifdef CONFIG_X86_F00F_BUG 546 unsigned long nr; 547 548 /* 549 * Pentium F0 0F C7 C8 bug workaround: 550 */ 551 if (boot_cpu_data.f00f_bug) { 552 nr = (address - idt_descr.address) >> 3; 553 554 if (nr == 6) { 555 do_invalid_op(regs, 0); 556 return 1; 557 } 558 } 559 #endif 560 return 0; 561 } 562 563 static const char nx_warning[] = KERN_CRIT 564 "kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n"; 565 566 static void 567 show_fault_oops(struct pt_regs *regs, unsigned long error_code, 568 unsigned long address) 569 { 570 if (!oops_may_print()) 571 return; 572 573 if (error_code & PF_INSTR) { 574 unsigned int level; 575 576 pte_t *pte = lookup_address(address, &level); 577 578 if (pte && pte_present(*pte) && !pte_exec(*pte)) 579 printk(nx_warning, current_uid()); 580 } 581 582 printk(KERN_ALERT "BUG: unable to handle kernel "); 583 if (address < PAGE_SIZE) 584 printk(KERN_CONT "NULL pointer dereference"); 585 else 586 printk(KERN_CONT "paging request"); 587 588 printk(KERN_CONT " at %p\n", (void *) address); 589 printk(KERN_ALERT "IP:"); 590 printk_address(regs->ip, 1); 591 592 dump_pagetable(address); 593 } 594 595 static noinline void 596 pgtable_bad(struct pt_regs *regs, unsigned long error_code, 597 unsigned long address) 598 { 599 struct task_struct *tsk; 600 unsigned long flags; 601 int sig; 602 603 flags = oops_begin(); 604 tsk = current; 605 sig = SIGKILL; 606 607 printk(KERN_ALERT "%s: Corrupted page table at address %lx\n", 608 tsk->comm, address); 609 dump_pagetable(address); 610 611 tsk->thread.cr2 = address; 612 tsk->thread.trap_no = 14; 613 tsk->thread.error_code = error_code; 614 615 if (__die("Bad pagetable", regs, error_code)) 616 sig = 0; 617 618 oops_end(flags, regs, sig); 619 } 620 621 static noinline void 622 no_context(struct pt_regs *regs, unsigned long error_code, 623 unsigned long address) 624 { 625 struct task_struct *tsk = current; 626 unsigned long *stackend; 627 unsigned long flags; 628 int sig; 629 630 /* Are we prepared to handle this kernel fault? */ 631 if (fixup_exception(regs)) 632 return; 633 634 /* 635 * 32-bit: 636 * 637 * Valid to do another page fault here, because if this fault 638 * had been triggered by is_prefetch fixup_exception would have 639 * handled it. 640 * 641 * 64-bit: 642 * 643 * Hall of shame of CPU/BIOS bugs. 644 */ 645 if (is_prefetch(regs, error_code, address)) 646 return; 647 648 if (is_errata93(regs, address)) 649 return; 650 651 /* 652 * Oops. The kernel tried to access some bad page. We'll have to 653 * terminate things with extreme prejudice: 654 */ 655 flags = oops_begin(); 656 657 show_fault_oops(regs, error_code, address); 658 659 stackend = end_of_stack(tsk); 660 if (tsk != &init_task && *stackend != STACK_END_MAGIC) 661 printk(KERN_ALERT "Thread overran stack, or stack corrupted\n"); 662 663 tsk->thread.cr2 = address; 664 tsk->thread.trap_no = 14; 665 tsk->thread.error_code = error_code; 666 667 sig = SIGKILL; 668 if (__die("Oops", regs, error_code)) 669 sig = 0; 670 671 /* Executive summary in case the body of the oops scrolled away */ 672 printk(KERN_EMERG "CR2: %016lx\n", address); 673 674 oops_end(flags, regs, sig); 675 } 676 677 /* 678 * Print out info about fatal segfaults, if the show_unhandled_signals 679 * sysctl is set: 680 */ 681 static inline void 682 show_signal_msg(struct pt_regs *regs, unsigned long error_code, 683 unsigned long address, struct task_struct *tsk) 684 { 685 if (!unhandled_signal(tsk, SIGSEGV)) 686 return; 687 688 if (!printk_ratelimit()) 689 return; 690 691 printk("%s%s[%d]: segfault at %lx ip %p sp %p error %lx", 692 task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG, 693 tsk->comm, task_pid_nr(tsk), address, 694 (void *)regs->ip, (void *)regs->sp, error_code); 695 696 print_vma_addr(KERN_CONT " in ", regs->ip); 697 698 printk(KERN_CONT "\n"); 699 } 700 701 static void 702 __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, 703 unsigned long address, int si_code) 704 { 705 struct task_struct *tsk = current; 706 707 /* User mode accesses just cause a SIGSEGV */ 708 if (error_code & PF_USER) { 709 /* 710 * It's possible to have interrupts off here: 711 */ 712 local_irq_enable(); 713 714 /* 715 * Valid to do another page fault here because this one came 716 * from user space: 717 */ 718 if (is_prefetch(regs, error_code, address)) 719 return; 720 721 if (is_errata100(regs, address)) 722 return; 723 724 #ifdef CONFIG_X86_64 725 /* 726 * Instruction fetch faults in the vsyscall page might need 727 * emulation. 728 */ 729 if (unlikely((error_code & PF_INSTR) && 730 ((address & ~0xfff) == VSYSCALL_START))) { 731 if (emulate_vsyscall(regs, address)) 732 return; 733 } 734 #endif 735 736 if (unlikely(show_unhandled_signals)) 737 show_signal_msg(regs, error_code, address, tsk); 738 739 /* Kernel addresses are always protection faults: */ 740 tsk->thread.cr2 = address; 741 tsk->thread.error_code = error_code | (address >= TASK_SIZE); 742 tsk->thread.trap_no = 14; 743 744 force_sig_info_fault(SIGSEGV, si_code, address, tsk, 0); 745 746 return; 747 } 748 749 if (is_f00f_bug(regs, address)) 750 return; 751 752 no_context(regs, error_code, address); 753 } 754 755 static noinline void 756 bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, 757 unsigned long address) 758 { 759 __bad_area_nosemaphore(regs, error_code, address, SEGV_MAPERR); 760 } 761 762 static void 763 __bad_area(struct pt_regs *regs, unsigned long error_code, 764 unsigned long address, int si_code) 765 { 766 struct mm_struct *mm = current->mm; 767 768 /* 769 * Something tried to access memory that isn't in our memory map.. 770 * Fix it, but check if it's kernel or user first.. 771 */ 772 up_read(&mm->mmap_sem); 773 774 __bad_area_nosemaphore(regs, error_code, address, si_code); 775 } 776 777 static noinline void 778 bad_area(struct pt_regs *regs, unsigned long error_code, unsigned long address) 779 { 780 __bad_area(regs, error_code, address, SEGV_MAPERR); 781 } 782 783 static noinline void 784 bad_area_access_error(struct pt_regs *regs, unsigned long error_code, 785 unsigned long address) 786 { 787 __bad_area(regs, error_code, address, SEGV_ACCERR); 788 } 789 790 /* TODO: fixup for "mm-invoke-oom-killer-from-page-fault.patch" */ 791 static void 792 out_of_memory(struct pt_regs *regs, unsigned long error_code, 793 unsigned long address) 794 { 795 /* 796 * We ran out of memory, call the OOM killer, and return the userspace 797 * (which will retry the fault, or kill us if we got oom-killed): 798 */ 799 up_read(¤t->mm->mmap_sem); 800 801 pagefault_out_of_memory(); 802 } 803 804 static void 805 do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address, 806 unsigned int fault) 807 { 808 struct task_struct *tsk = current; 809 struct mm_struct *mm = tsk->mm; 810 int code = BUS_ADRERR; 811 812 up_read(&mm->mmap_sem); 813 814 /* Kernel mode? Handle exceptions or die: */ 815 if (!(error_code & PF_USER)) { 816 no_context(regs, error_code, address); 817 return; 818 } 819 820 /* User-space => ok to do another page fault: */ 821 if (is_prefetch(regs, error_code, address)) 822 return; 823 824 tsk->thread.cr2 = address; 825 tsk->thread.error_code = error_code; 826 tsk->thread.trap_no = 14; 827 828 #ifdef CONFIG_MEMORY_FAILURE 829 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) { 830 printk(KERN_ERR 831 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n", 832 tsk->comm, tsk->pid, address); 833 code = BUS_MCEERR_AR; 834 } 835 #endif 836 force_sig_info_fault(SIGBUS, code, address, tsk, fault); 837 } 838 839 static noinline int 840 mm_fault_error(struct pt_regs *regs, unsigned long error_code, 841 unsigned long address, unsigned int fault) 842 { 843 /* 844 * Pagefault was interrupted by SIGKILL. We have no reason to 845 * continue pagefault. 846 */ 847 if (fatal_signal_pending(current)) { 848 if (!(fault & VM_FAULT_RETRY)) 849 up_read(¤t->mm->mmap_sem); 850 if (!(error_code & PF_USER)) 851 no_context(regs, error_code, address); 852 return 1; 853 } 854 if (!(fault & VM_FAULT_ERROR)) 855 return 0; 856 857 if (fault & VM_FAULT_OOM) { 858 /* Kernel mode? Handle exceptions or die: */ 859 if (!(error_code & PF_USER)) { 860 up_read(¤t->mm->mmap_sem); 861 no_context(regs, error_code, address); 862 return 1; 863 } 864 865 out_of_memory(regs, error_code, address); 866 } else { 867 if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON| 868 VM_FAULT_HWPOISON_LARGE)) 869 do_sigbus(regs, error_code, address, fault); 870 else 871 BUG(); 872 } 873 return 1; 874 } 875 876 static int spurious_fault_check(unsigned long error_code, pte_t *pte) 877 { 878 if ((error_code & PF_WRITE) && !pte_write(*pte)) 879 return 0; 880 881 if ((error_code & PF_INSTR) && !pte_exec(*pte)) 882 return 0; 883 884 return 1; 885 } 886 887 /* 888 * Handle a spurious fault caused by a stale TLB entry. 889 * 890 * This allows us to lazily refresh the TLB when increasing the 891 * permissions of a kernel page (RO -> RW or NX -> X). Doing it 892 * eagerly is very expensive since that implies doing a full 893 * cross-processor TLB flush, even if no stale TLB entries exist 894 * on other processors. 895 * 896 * There are no security implications to leaving a stale TLB when 897 * increasing the permissions on a page. 898 */ 899 static noinline __kprobes int 900 spurious_fault(unsigned long error_code, unsigned long address) 901 { 902 pgd_t *pgd; 903 pud_t *pud; 904 pmd_t *pmd; 905 pte_t *pte; 906 int ret; 907 908 /* Reserved-bit violation or user access to kernel space? */ 909 if (error_code & (PF_USER | PF_RSVD)) 910 return 0; 911 912 pgd = init_mm.pgd + pgd_index(address); 913 if (!pgd_present(*pgd)) 914 return 0; 915 916 pud = pud_offset(pgd, address); 917 if (!pud_present(*pud)) 918 return 0; 919 920 if (pud_large(*pud)) 921 return spurious_fault_check(error_code, (pte_t *) pud); 922 923 pmd = pmd_offset(pud, address); 924 if (!pmd_present(*pmd)) 925 return 0; 926 927 if (pmd_large(*pmd)) 928 return spurious_fault_check(error_code, (pte_t *) pmd); 929 930 /* 931 * Note: don't use pte_present() here, since it returns true 932 * if the _PAGE_PROTNONE bit is set. However, this aliases the 933 * _PAGE_GLOBAL bit, which for kernel pages give false positives 934 * when CONFIG_DEBUG_PAGEALLOC is used. 935 */ 936 pte = pte_offset_kernel(pmd, address); 937 if (!(pte_flags(*pte) & _PAGE_PRESENT)) 938 return 0; 939 940 ret = spurious_fault_check(error_code, pte); 941 if (!ret) 942 return 0; 943 944 /* 945 * Make sure we have permissions in PMD. 946 * If not, then there's a bug in the page tables: 947 */ 948 ret = spurious_fault_check(error_code, (pte_t *) pmd); 949 WARN_ONCE(!ret, "PMD has incorrect permission bits\n"); 950 951 return ret; 952 } 953 954 int show_unhandled_signals = 1; 955 956 static inline int 957 access_error(unsigned long error_code, struct vm_area_struct *vma) 958 { 959 if (error_code & PF_WRITE) { 960 /* write, present and write, not present: */ 961 if (unlikely(!(vma->vm_flags & VM_WRITE))) 962 return 1; 963 return 0; 964 } 965 966 /* read, present: */ 967 if (unlikely(error_code & PF_PROT)) 968 return 1; 969 970 /* read, not present: */ 971 if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))) 972 return 1; 973 974 return 0; 975 } 976 977 static int fault_in_kernel_space(unsigned long address) 978 { 979 return address >= TASK_SIZE_MAX; 980 } 981 982 /* 983 * This routine handles page faults. It determines the address, 984 * and the problem, and then passes it off to one of the appropriate 985 * routines. 986 */ 987 dotraplinkage void __kprobes 988 do_page_fault(struct pt_regs *regs, unsigned long error_code) 989 { 990 struct vm_area_struct *vma; 991 struct task_struct *tsk; 992 unsigned long address; 993 struct mm_struct *mm; 994 int fault; 995 int write = error_code & PF_WRITE; 996 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE | 997 (write ? FAULT_FLAG_WRITE : 0); 998 999 tsk = current; 1000 mm = tsk->mm; 1001 1002 /* Get the faulting address: */ 1003 address = read_cr2(); 1004 1005 /* 1006 * Detect and handle instructions that would cause a page fault for 1007 * both a tracked kernel page and a userspace page. 1008 */ 1009 if (kmemcheck_active(regs)) 1010 kmemcheck_hide(regs); 1011 prefetchw(&mm->mmap_sem); 1012 1013 if (unlikely(kmmio_fault(regs, address))) 1014 return; 1015 1016 /* 1017 * We fault-in kernel-space virtual memory on-demand. The 1018 * 'reference' page table is init_mm.pgd. 1019 * 1020 * NOTE! We MUST NOT take any locks for this case. We may 1021 * be in an interrupt or a critical region, and should 1022 * only copy the information from the master page table, 1023 * nothing more. 1024 * 1025 * This verifies that the fault happens in kernel space 1026 * (error_code & 4) == 0, and that the fault was not a 1027 * protection error (error_code & 9) == 0. 1028 */ 1029 if (unlikely(fault_in_kernel_space(address))) { 1030 if (!(error_code & (PF_RSVD | PF_USER | PF_PROT))) { 1031 if (vmalloc_fault(address) >= 0) 1032 return; 1033 1034 if (kmemcheck_fault(regs, address, error_code)) 1035 return; 1036 } 1037 1038 /* Can handle a stale RO->RW TLB: */ 1039 if (spurious_fault(error_code, address)) 1040 return; 1041 1042 /* kprobes don't want to hook the spurious faults: */ 1043 if (notify_page_fault(regs)) 1044 return; 1045 /* 1046 * Don't take the mm semaphore here. If we fixup a prefetch 1047 * fault we could otherwise deadlock: 1048 */ 1049 bad_area_nosemaphore(regs, error_code, address); 1050 1051 return; 1052 } 1053 1054 /* kprobes don't want to hook the spurious faults: */ 1055 if (unlikely(notify_page_fault(regs))) 1056 return; 1057 /* 1058 * It's safe to allow irq's after cr2 has been saved and the 1059 * vmalloc fault has been handled. 1060 * 1061 * User-mode registers count as a user access even for any 1062 * potential system fault or CPU buglet: 1063 */ 1064 if (user_mode_vm(regs)) { 1065 local_irq_enable(); 1066 error_code |= PF_USER; 1067 } else { 1068 if (regs->flags & X86_EFLAGS_IF) 1069 local_irq_enable(); 1070 } 1071 1072 if (unlikely(error_code & PF_RSVD)) 1073 pgtable_bad(regs, error_code, address); 1074 1075 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); 1076 1077 /* 1078 * If we're in an interrupt, have no user context or are running 1079 * in an atomic region then we must not take the fault: 1080 */ 1081 if (unlikely(in_atomic() || !mm)) { 1082 bad_area_nosemaphore(regs, error_code, address); 1083 return; 1084 } 1085 1086 /* 1087 * When running in the kernel we expect faults to occur only to 1088 * addresses in user space. All other faults represent errors in 1089 * the kernel and should generate an OOPS. Unfortunately, in the 1090 * case of an erroneous fault occurring in a code path which already 1091 * holds mmap_sem we will deadlock attempting to validate the fault 1092 * against the address space. Luckily the kernel only validly 1093 * references user space from well defined areas of code, which are 1094 * listed in the exceptions table. 1095 * 1096 * As the vast majority of faults will be valid we will only perform 1097 * the source reference check when there is a possibility of a 1098 * deadlock. Attempt to lock the address space, if we cannot we then 1099 * validate the source. If this is invalid we can skip the address 1100 * space check, thus avoiding the deadlock: 1101 */ 1102 if (unlikely(!down_read_trylock(&mm->mmap_sem))) { 1103 if ((error_code & PF_USER) == 0 && 1104 !search_exception_tables(regs->ip)) { 1105 bad_area_nosemaphore(regs, error_code, address); 1106 return; 1107 } 1108 retry: 1109 down_read(&mm->mmap_sem); 1110 } else { 1111 /* 1112 * The above down_read_trylock() might have succeeded in 1113 * which case we'll have missed the might_sleep() from 1114 * down_read(): 1115 */ 1116 might_sleep(); 1117 } 1118 1119 vma = find_vma(mm, address); 1120 if (unlikely(!vma)) { 1121 bad_area(regs, error_code, address); 1122 return; 1123 } 1124 if (likely(vma->vm_start <= address)) 1125 goto good_area; 1126 if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) { 1127 bad_area(regs, error_code, address); 1128 return; 1129 } 1130 if (error_code & PF_USER) { 1131 /* 1132 * Accessing the stack below %sp is always a bug. 1133 * The large cushion allows instructions like enter 1134 * and pusha to work. ("enter $65535, $31" pushes 1135 * 32 pointers and then decrements %sp by 65535.) 1136 */ 1137 if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) { 1138 bad_area(regs, error_code, address); 1139 return; 1140 } 1141 } 1142 if (unlikely(expand_stack(vma, address))) { 1143 bad_area(regs, error_code, address); 1144 return; 1145 } 1146 1147 /* 1148 * Ok, we have a good vm_area for this memory access, so 1149 * we can handle it.. 1150 */ 1151 good_area: 1152 if (unlikely(access_error(error_code, vma))) { 1153 bad_area_access_error(regs, error_code, address); 1154 return; 1155 } 1156 1157 /* 1158 * If for any reason at all we couldn't handle the fault, 1159 * make sure we exit gracefully rather than endlessly redo 1160 * the fault: 1161 */ 1162 fault = handle_mm_fault(mm, vma, address, flags); 1163 1164 if (unlikely(fault & (VM_FAULT_RETRY|VM_FAULT_ERROR))) { 1165 if (mm_fault_error(regs, error_code, address, fault)) 1166 return; 1167 } 1168 1169 /* 1170 * Major/minor page fault accounting is only done on the 1171 * initial attempt. If we go through a retry, it is extremely 1172 * likely that the page will be found in page cache at that point. 1173 */ 1174 if (flags & FAULT_FLAG_ALLOW_RETRY) { 1175 if (fault & VM_FAULT_MAJOR) { 1176 tsk->maj_flt++; 1177 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 1178 regs, address); 1179 } else { 1180 tsk->min_flt++; 1181 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 1182 regs, address); 1183 } 1184 if (fault & VM_FAULT_RETRY) { 1185 /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk 1186 * of starvation. */ 1187 flags &= ~FAULT_FLAG_ALLOW_RETRY; 1188 goto retry; 1189 } 1190 } 1191 1192 check_v8086_mode(regs, address, tsk); 1193 1194 up_read(&mm->mmap_sem); 1195 } 1196