1 /* 2 * PowerPC version 3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 4 * 5 * Derived from "arch/i386/mm/fault.c" 6 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 7 * 8 * Modified by Cort Dougan and Paul Mackerras. 9 * 10 * Modified for PPC64 by Dave Engebretsen (engebret@ibm.com) 11 * 12 * This program is free software; you can redistribute it and/or 13 * modify it under the terms of the GNU General Public License 14 * as published by the Free Software Foundation; either version 15 * 2 of the License, or (at your option) any later version. 16 */ 17 18 #include <linux/signal.h> 19 #include <linux/sched.h> 20 #include <linux/sched/task_stack.h> 21 #include <linux/kernel.h> 22 #include <linux/errno.h> 23 #include <linux/string.h> 24 #include <linux/types.h> 25 #include <linux/ptrace.h> 26 #include <linux/mman.h> 27 #include <linux/mm.h> 28 #include <linux/interrupt.h> 29 #include <linux/highmem.h> 30 #include <linux/extable.h> 31 #include <linux/kprobes.h> 32 #include <linux/kdebug.h> 33 #include <linux/perf_event.h> 34 #include <linux/ratelimit.h> 35 #include <linux/context_tracking.h> 36 #include <linux/hugetlb.h> 37 #include <linux/uaccess.h> 38 39 #include <asm/firmware.h> 40 #include <asm/page.h> 41 #include <asm/pgtable.h> 42 #include <asm/mmu.h> 43 #include <asm/mmu_context.h> 44 #include <asm/tlbflush.h> 45 #include <asm/siginfo.h> 46 #include <asm/debug.h> 47 48 static inline bool notify_page_fault(struct pt_regs *regs) 49 { 50 bool ret = false; 51 52 #ifdef CONFIG_KPROBES 53 /* kprobe_running() needs smp_processor_id() */ 54 if (!user_mode(regs)) { 55 preempt_disable(); 56 if (kprobe_running() && kprobe_fault_handler(regs, 11)) 57 ret = true; 58 preempt_enable(); 59 } 60 #endif /* CONFIG_KPROBES */ 61 62 if (unlikely(debugger_fault_handler(regs))) 63 ret = true; 64 65 return ret; 66 } 67 68 /* 69 * Check whether the instruction at regs->nip is a store using 70 * an update addressing form which will update r1. 71 */ 72 static bool store_updates_sp(struct pt_regs *regs) 73 { 74 unsigned int inst; 75 76 if (get_user(inst, (unsigned int __user *)regs->nip)) 77 return false; 78 /* check for 1 in the rA field */ 79 if (((inst >> 16) & 0x1f) != 1) 80 return false; 81 /* check major opcode */ 82 switch (inst >> 26) { 83 case 37: /* stwu */ 84 case 39: /* stbu */ 85 case 45: /* sthu */ 86 case 53: /* stfsu */ 87 case 55: /* stfdu */ 88 return true; 89 case 62: /* std or stdu */ 90 return (inst & 3) == 1; 91 case 31: 92 /* check minor opcode */ 93 switch ((inst >> 1) & 0x3ff) { 94 case 181: /* stdux */ 95 case 183: /* stwux */ 96 case 247: /* stbux */ 97 case 439: /* sthux */ 98 case 695: /* stfsux */ 99 case 759: /* stfdux */ 100 return true; 101 } 102 } 103 return false; 104 } 105 /* 106 * do_page_fault error handling helpers 107 */ 108 109 static int 110 __bad_area_nosemaphore(struct pt_regs *regs, unsigned long address, int si_code, 111 int pkey) 112 { 113 /* 114 * If we are in kernel mode, bail out with a SEGV, this will 115 * be caught by the assembly which will restore the non-volatile 116 * registers before calling bad_page_fault() 117 */ 118 if (!user_mode(regs)) 119 return SIGSEGV; 120 121 _exception_pkey(SIGSEGV, regs, si_code, address, pkey); 122 123 return 0; 124 } 125 126 static noinline int bad_area_nosemaphore(struct pt_regs *regs, unsigned long address) 127 { 128 return __bad_area_nosemaphore(regs, address, SEGV_MAPERR, 0); 129 } 130 131 static int __bad_area(struct pt_regs *regs, unsigned long address, int si_code, 132 int pkey) 133 { 134 struct mm_struct *mm = current->mm; 135 136 /* 137 * Something tried to access memory that isn't in our memory map.. 138 * Fix it, but check if it's kernel or user first.. 139 */ 140 up_read(&mm->mmap_sem); 141 142 return __bad_area_nosemaphore(regs, address, si_code, pkey); 143 } 144 145 static noinline int bad_area(struct pt_regs *regs, unsigned long address) 146 { 147 return __bad_area(regs, address, SEGV_MAPERR, 0); 148 } 149 150 static int bad_key_fault_exception(struct pt_regs *regs, unsigned long address, 151 int pkey) 152 { 153 return __bad_area_nosemaphore(regs, address, SEGV_PKUERR, pkey); 154 } 155 156 static noinline int bad_access(struct pt_regs *regs, unsigned long address) 157 { 158 return __bad_area(regs, address, SEGV_ACCERR, 0); 159 } 160 161 static int do_sigbus(struct pt_regs *regs, unsigned long address, 162 unsigned int fault) 163 { 164 siginfo_t info; 165 unsigned int lsb = 0; 166 167 if (!user_mode(regs)) 168 return SIGBUS; 169 170 current->thread.trap_nr = BUS_ADRERR; 171 info.si_signo = SIGBUS; 172 info.si_errno = 0; 173 info.si_code = BUS_ADRERR; 174 info.si_addr = (void __user *)address; 175 #ifdef CONFIG_MEMORY_FAILURE 176 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) { 177 pr_err("MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n", 178 current->comm, current->pid, address); 179 info.si_code = BUS_MCEERR_AR; 180 } 181 182 if (fault & VM_FAULT_HWPOISON_LARGE) 183 lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault)); 184 if (fault & VM_FAULT_HWPOISON) 185 lsb = PAGE_SHIFT; 186 #endif 187 info.si_addr_lsb = lsb; 188 force_sig_info(SIGBUS, &info, current); 189 return 0; 190 } 191 192 static int mm_fault_error(struct pt_regs *regs, unsigned long addr, int fault) 193 { 194 /* 195 * Kernel page fault interrupted by SIGKILL. We have no reason to 196 * continue processing. 197 */ 198 if (fatal_signal_pending(current) && !user_mode(regs)) 199 return SIGKILL; 200 201 /* Out of memory */ 202 if (fault & VM_FAULT_OOM) { 203 /* 204 * We ran out of memory, or some other thing happened to us that 205 * made us unable to handle the page fault gracefully. 206 */ 207 if (!user_mode(regs)) 208 return SIGSEGV; 209 pagefault_out_of_memory(); 210 } else { 211 if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON| 212 VM_FAULT_HWPOISON_LARGE)) 213 return do_sigbus(regs, addr, fault); 214 else if (fault & VM_FAULT_SIGSEGV) 215 return bad_area_nosemaphore(regs, addr); 216 else 217 BUG(); 218 } 219 return 0; 220 } 221 222 /* Is this a bad kernel fault ? */ 223 static bool bad_kernel_fault(bool is_exec, unsigned long error_code, 224 unsigned long address) 225 { 226 if (is_exec && (error_code & (DSISR_NOEXEC_OR_G | DSISR_KEYFAULT))) { 227 printk_ratelimited(KERN_CRIT "kernel tried to execute" 228 " exec-protected page (%lx) -" 229 "exploit attempt? (uid: %d)\n", 230 address, from_kuid(&init_user_ns, 231 current_uid())); 232 } 233 return is_exec || (address >= TASK_SIZE); 234 } 235 236 static bool bad_stack_expansion(struct pt_regs *regs, unsigned long address, 237 struct vm_area_struct *vma, 238 bool store_update_sp) 239 { 240 /* 241 * N.B. The POWER/Open ABI allows programs to access up to 242 * 288 bytes below the stack pointer. 243 * The kernel signal delivery code writes up to about 1.5kB 244 * below the stack pointer (r1) before decrementing it. 245 * The exec code can write slightly over 640kB to the stack 246 * before setting the user r1. Thus we allow the stack to 247 * expand to 1MB without further checks. 248 */ 249 if (address + 0x100000 < vma->vm_end) { 250 /* get user regs even if this fault is in kernel mode */ 251 struct pt_regs *uregs = current->thread.regs; 252 if (uregs == NULL) 253 return true; 254 255 /* 256 * A user-mode access to an address a long way below 257 * the stack pointer is only valid if the instruction 258 * is one which would update the stack pointer to the 259 * address accessed if the instruction completed, 260 * i.e. either stwu rs,n(r1) or stwux rs,r1,rb 261 * (or the byte, halfword, float or double forms). 262 * 263 * If we don't check this then any write to the area 264 * between the last mapped region and the stack will 265 * expand the stack rather than segfaulting. 266 */ 267 if (address + 2048 < uregs->gpr[1] && !store_update_sp) 268 return true; 269 } 270 return false; 271 } 272 273 static bool access_error(bool is_write, bool is_exec, 274 struct vm_area_struct *vma) 275 { 276 /* 277 * Allow execution from readable areas if the MMU does not 278 * provide separate controls over reading and executing. 279 * 280 * Note: That code used to not be enabled for 4xx/BookE. 281 * It is now as I/D cache coherency for these is done at 282 * set_pte_at() time and I see no reason why the test 283 * below wouldn't be valid on those processors. This -may- 284 * break programs compiled with a really old ABI though. 285 */ 286 if (is_exec) { 287 return !(vma->vm_flags & VM_EXEC) && 288 (cpu_has_feature(CPU_FTR_NOEXECUTE) || 289 !(vma->vm_flags & (VM_READ | VM_WRITE))); 290 } 291 292 if (is_write) { 293 if (unlikely(!(vma->vm_flags & VM_WRITE))) 294 return true; 295 return false; 296 } 297 298 if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))) 299 return true; 300 /* 301 * We should ideally do the vma pkey access check here. But in the 302 * fault path, handle_mm_fault() also does the same check. To avoid 303 * these multiple checks, we skip it here and handle access error due 304 * to pkeys later. 305 */ 306 return false; 307 } 308 309 #ifdef CONFIG_PPC_SMLPAR 310 static inline void cmo_account_page_fault(void) 311 { 312 if (firmware_has_feature(FW_FEATURE_CMO)) { 313 u32 page_ins; 314 315 preempt_disable(); 316 page_ins = be32_to_cpu(get_lppaca()->page_ins); 317 page_ins += 1 << PAGE_FACTOR; 318 get_lppaca()->page_ins = cpu_to_be32(page_ins); 319 preempt_enable(); 320 } 321 } 322 #else 323 static inline void cmo_account_page_fault(void) { } 324 #endif /* CONFIG_PPC_SMLPAR */ 325 326 #ifdef CONFIG_PPC_STD_MMU 327 static void sanity_check_fault(bool is_write, unsigned long error_code) 328 { 329 /* 330 * For hash translation mode, we should never get a 331 * PROTFAULT. Any update to pte to reduce access will result in us 332 * removing the hash page table entry, thus resulting in a DSISR_NOHPTE 333 * fault instead of DSISR_PROTFAULT. 334 * 335 * A pte update to relax the access will not result in a hash page table 336 * entry invalidate and hence can result in DSISR_PROTFAULT. 337 * ptep_set_access_flags() doesn't do a hpte flush. This is why we have 338 * the special !is_write in the below conditional. 339 * 340 * For platforms that doesn't supports coherent icache and do support 341 * per page noexec bit, we do setup things such that we do the 342 * sync between D/I cache via fault. But that is handled via low level 343 * hash fault code (hash_page_do_lazy_icache()) and we should not reach 344 * here in such case. 345 * 346 * For wrong access that can result in PROTFAULT, the above vma->vm_flags 347 * check should handle those and hence we should fall to the bad_area 348 * handling correctly. 349 * 350 * For embedded with per page exec support that doesn't support coherent 351 * icache we do get PROTFAULT and we handle that D/I cache sync in 352 * set_pte_at while taking the noexec/prot fault. Hence this is WARN_ON 353 * is conditional for server MMU. 354 * 355 * For radix, we can get prot fault for autonuma case, because radix 356 * page table will have them marked noaccess for user. 357 */ 358 if (!radix_enabled() && !is_write) 359 WARN_ON_ONCE(error_code & DSISR_PROTFAULT); 360 } 361 #else 362 static void sanity_check_fault(bool is_write, unsigned long error_code) { } 363 #endif /* CONFIG_PPC_STD_MMU */ 364 365 /* 366 * Define the correct "is_write" bit in error_code based 367 * on the processor family 368 */ 369 #if (defined(CONFIG_4xx) || defined(CONFIG_BOOKE)) 370 #define page_fault_is_write(__err) ((__err) & ESR_DST) 371 #define page_fault_is_bad(__err) (0) 372 #else 373 #define page_fault_is_write(__err) ((__err) & DSISR_ISSTORE) 374 #if defined(CONFIG_PPC_8xx) 375 #define page_fault_is_bad(__err) ((__err) & DSISR_NOEXEC_OR_G) 376 #elif defined(CONFIG_PPC64) 377 #define page_fault_is_bad(__err) ((__err) & DSISR_BAD_FAULT_64S) 378 #else 379 #define page_fault_is_bad(__err) ((__err) & DSISR_BAD_FAULT_32S) 380 #endif 381 #endif 382 383 /* 384 * For 600- and 800-family processors, the error_code parameter is DSISR 385 * for a data fault, SRR1 for an instruction fault. For 400-family processors 386 * the error_code parameter is ESR for a data fault, 0 for an instruction 387 * fault. 388 * For 64-bit processors, the error_code parameter is 389 * - DSISR for a non-SLB data access fault, 390 * - SRR1 & 0x08000000 for a non-SLB instruction access fault 391 * - 0 any SLB fault. 392 * 393 * The return value is 0 if the fault was handled, or the signal 394 * number if this is a kernel fault that can't be handled here. 395 */ 396 static int __do_page_fault(struct pt_regs *regs, unsigned long address, 397 unsigned long error_code) 398 { 399 struct vm_area_struct * vma; 400 struct mm_struct *mm = current->mm; 401 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; 402 int is_exec = TRAP(regs) == 0x400; 403 int is_user = user_mode(regs); 404 int is_write = page_fault_is_write(error_code); 405 int fault, major = 0; 406 bool store_update_sp = false; 407 408 if (notify_page_fault(regs)) 409 return 0; 410 411 if (unlikely(page_fault_is_bad(error_code))) { 412 if (is_user) { 413 _exception(SIGBUS, regs, BUS_OBJERR, address); 414 return 0; 415 } 416 return SIGBUS; 417 } 418 419 /* Additional sanity check(s) */ 420 sanity_check_fault(is_write, error_code); 421 422 /* 423 * The kernel should never take an execute fault nor should it 424 * take a page fault to a kernel address. 425 */ 426 if (unlikely(!is_user && bad_kernel_fault(is_exec, error_code, address))) 427 return SIGSEGV; 428 429 /* 430 * If we're in an interrupt, have no user context or are running 431 * in a region with pagefaults disabled then we must not take the fault 432 */ 433 if (unlikely(faulthandler_disabled() || !mm)) { 434 if (is_user) 435 printk_ratelimited(KERN_ERR "Page fault in user mode" 436 " with faulthandler_disabled()=%d" 437 " mm=%p\n", 438 faulthandler_disabled(), mm); 439 return bad_area_nosemaphore(regs, address); 440 } 441 442 /* We restore the interrupt state now */ 443 if (!arch_irq_disabled_regs(regs)) 444 local_irq_enable(); 445 446 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); 447 448 if (error_code & DSISR_KEYFAULT) 449 return bad_key_fault_exception(regs, address, 450 get_mm_addr_key(mm, address)); 451 452 /* 453 * We want to do this outside mmap_sem, because reading code around nip 454 * can result in fault, which will cause a deadlock when called with 455 * mmap_sem held 456 */ 457 if (is_write && is_user) 458 store_update_sp = store_updates_sp(regs); 459 460 if (is_user) 461 flags |= FAULT_FLAG_USER; 462 if (is_write) 463 flags |= FAULT_FLAG_WRITE; 464 if (is_exec) 465 flags |= FAULT_FLAG_INSTRUCTION; 466 467 /* When running in the kernel we expect faults to occur only to 468 * addresses in user space. All other faults represent errors in the 469 * kernel and should generate an OOPS. Unfortunately, in the case of an 470 * erroneous fault occurring in a code path which already holds mmap_sem 471 * we will deadlock attempting to validate the fault against the 472 * address space. Luckily the kernel only validly references user 473 * space from well defined areas of code, which are listed in the 474 * exceptions table. 475 * 476 * As the vast majority of faults will be valid we will only perform 477 * the source reference check when there is a possibility of a deadlock. 478 * Attempt to lock the address space, if we cannot we then validate the 479 * source. If this is invalid we can skip the address space check, 480 * thus avoiding the deadlock. 481 */ 482 if (unlikely(!down_read_trylock(&mm->mmap_sem))) { 483 if (!is_user && !search_exception_tables(regs->nip)) 484 return bad_area_nosemaphore(regs, address); 485 486 retry: 487 down_read(&mm->mmap_sem); 488 } else { 489 /* 490 * The above down_read_trylock() might have succeeded in 491 * which case we'll have missed the might_sleep() from 492 * down_read(): 493 */ 494 might_sleep(); 495 } 496 497 vma = find_vma(mm, address); 498 if (unlikely(!vma)) 499 return bad_area(regs, address); 500 if (likely(vma->vm_start <= address)) 501 goto good_area; 502 if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) 503 return bad_area(regs, address); 504 505 /* The stack is being expanded, check if it's valid */ 506 if (unlikely(bad_stack_expansion(regs, address, vma, store_update_sp))) 507 return bad_area(regs, address); 508 509 /* Try to expand it */ 510 if (unlikely(expand_stack(vma, address))) 511 return bad_area(regs, address); 512 513 good_area: 514 if (unlikely(access_error(is_write, is_exec, vma))) 515 return bad_access(regs, address); 516 517 /* 518 * If for any reason at all we couldn't handle the fault, 519 * make sure we exit gracefully rather than endlessly redo 520 * the fault. 521 */ 522 fault = handle_mm_fault(vma, address, flags); 523 524 #ifdef CONFIG_PPC_MEM_KEYS 525 /* 526 * we skipped checking for access error due to key earlier. 527 * Check that using handle_mm_fault error return. 528 */ 529 if (unlikely(fault & VM_FAULT_SIGSEGV) && 530 !arch_vma_access_permitted(vma, is_write, is_exec, 0)) { 531 532 int pkey = vma_pkey(vma); 533 534 up_read(&mm->mmap_sem); 535 return bad_key_fault_exception(regs, address, pkey); 536 } 537 #endif /* CONFIG_PPC_MEM_KEYS */ 538 539 major |= fault & VM_FAULT_MAJOR; 540 541 /* 542 * Handle the retry right now, the mmap_sem has been released in that 543 * case. 544 */ 545 if (unlikely(fault & VM_FAULT_RETRY)) { 546 /* We retry only once */ 547 if (flags & FAULT_FLAG_ALLOW_RETRY) { 548 /* 549 * Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk 550 * of starvation. 551 */ 552 flags &= ~FAULT_FLAG_ALLOW_RETRY; 553 flags |= FAULT_FLAG_TRIED; 554 if (!fatal_signal_pending(current)) 555 goto retry; 556 } 557 558 /* 559 * User mode? Just return to handle the fatal exception otherwise 560 * return to bad_page_fault 561 */ 562 return is_user ? 0 : SIGBUS; 563 } 564 565 up_read(¤t->mm->mmap_sem); 566 567 if (unlikely(fault & VM_FAULT_ERROR)) 568 return mm_fault_error(regs, address, fault); 569 570 /* 571 * Major/minor page fault accounting. 572 */ 573 if (major) { 574 current->maj_flt++; 575 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address); 576 cmo_account_page_fault(); 577 } else { 578 current->min_flt++; 579 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address); 580 } 581 return 0; 582 } 583 NOKPROBE_SYMBOL(__do_page_fault); 584 585 int do_page_fault(struct pt_regs *regs, unsigned long address, 586 unsigned long error_code) 587 { 588 enum ctx_state prev_state = exception_enter(); 589 int rc = __do_page_fault(regs, address, error_code); 590 exception_exit(prev_state); 591 return rc; 592 } 593 NOKPROBE_SYMBOL(do_page_fault); 594 595 /* 596 * bad_page_fault is called when we have a bad access from the kernel. 597 * It is called from the DSI and ISI handlers in head.S and from some 598 * of the procedures in traps.c. 599 */ 600 void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig) 601 { 602 const struct exception_table_entry *entry; 603 604 /* Are we prepared to handle this fault? */ 605 if ((entry = search_exception_tables(regs->nip)) != NULL) { 606 regs->nip = extable_fixup(entry); 607 return; 608 } 609 610 /* kernel has accessed a bad area */ 611 612 switch (TRAP(regs)) { 613 case 0x300: 614 case 0x380: 615 printk(KERN_ALERT "Unable to handle kernel paging request for " 616 "data at address 0x%08lx\n", regs->dar); 617 break; 618 case 0x400: 619 case 0x480: 620 printk(KERN_ALERT "Unable to handle kernel paging request for " 621 "instruction fetch\n"); 622 break; 623 case 0x600: 624 printk(KERN_ALERT "Unable to handle kernel paging request for " 625 "unaligned access at address 0x%08lx\n", regs->dar); 626 break; 627 default: 628 printk(KERN_ALERT "Unable to handle kernel paging request for " 629 "unknown fault\n"); 630 break; 631 } 632 printk(KERN_ALERT "Faulting instruction address: 0x%08lx\n", 633 regs->nip); 634 635 if (task_stack_end_corrupted(current)) 636 printk(KERN_ALERT "Thread overran stack, or stack corrupted\n"); 637 638 die("Kernel access of bad area", regs, sig); 639 } 640