1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/arch/arm/mm/fault.c 4 * 5 * Copyright (C) 1995 Linus Torvalds 6 * Modifications for ARM processor (c) 1995-2004 Russell King 7 */ 8 #include <linux/extable.h> 9 #include <linux/signal.h> 10 #include <linux/mm.h> 11 #include <linux/hardirq.h> 12 #include <linux/init.h> 13 #include <linux/kprobes.h> 14 #include <linux/uaccess.h> 15 #include <linux/page-flags.h> 16 #include <linux/sched/signal.h> 17 #include <linux/sched/debug.h> 18 #include <linux/highmem.h> 19 #include <linux/perf_event.h> 20 21 #include <asm/system_misc.h> 22 #include <asm/system_info.h> 23 #include <asm/tlbflush.h> 24 25 #include "fault.h" 26 27 #ifdef CONFIG_MMU 28 29 /* 30 * This is useful to dump out the page tables associated with 31 * 'addr' in mm 'mm'. 32 */ 33 void show_pte(const char *lvl, struct mm_struct *mm, unsigned long addr) 34 { 35 pgd_t *pgd; 36 37 if (!mm) 38 mm = &init_mm; 39 40 printk("%spgd = %p\n", lvl, mm->pgd); 41 pgd = pgd_offset(mm, addr); 42 printk("%s[%08lx] *pgd=%08llx", lvl, addr, (long long)pgd_val(*pgd)); 43 44 do { 45 p4d_t *p4d; 46 pud_t *pud; 47 pmd_t *pmd; 48 pte_t *pte; 49 50 p4d = p4d_offset(pgd, addr); 51 if (p4d_none(*p4d)) 52 break; 53 54 if (p4d_bad(*p4d)) { 55 pr_cont("(bad)"); 56 break; 57 } 58 59 pud = pud_offset(p4d, addr); 60 if (PTRS_PER_PUD != 1) 61 pr_cont(", *pud=%08llx", (long long)pud_val(*pud)); 62 63 if (pud_none(*pud)) 64 break; 65 66 if (pud_bad(*pud)) { 67 pr_cont("(bad)"); 68 break; 69 } 70 71 pmd = pmd_offset(pud, addr); 72 if (PTRS_PER_PMD != 1) 73 pr_cont(", *pmd=%08llx", (long long)pmd_val(*pmd)); 74 75 if (pmd_none(*pmd)) 76 break; 77 78 if (pmd_bad(*pmd)) { 79 pr_cont("(bad)"); 80 break; 81 } 82 83 /* We must not map this if we have highmem enabled */ 84 if (PageHighMem(pfn_to_page(pmd_val(*pmd) >> PAGE_SHIFT))) 85 break; 86 87 pte = pte_offset_map(pmd, addr); 88 pr_cont(", *pte=%08llx", (long long)pte_val(*pte)); 89 #ifndef CONFIG_ARM_LPAE 90 pr_cont(", *ppte=%08llx", 91 (long long)pte_val(pte[PTE_HWTABLE_PTRS])); 92 #endif 93 pte_unmap(pte); 94 } while(0); 95 96 pr_cont("\n"); 97 } 98 #else /* CONFIG_MMU */ 99 void show_pte(const char *lvl, struct mm_struct *mm, unsigned long addr) 100 { } 101 #endif /* CONFIG_MMU */ 102 103 /* 104 * Oops. The kernel tried to access some page that wasn't present. 105 */ 106 static void 107 __do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr, 108 struct pt_regs *regs) 109 { 110 /* 111 * Are we prepared to handle this kernel fault? 112 */ 113 if (fixup_exception(regs)) 114 return; 115 116 /* 117 * No handler, we'll have to terminate things with extreme prejudice. 118 */ 119 bust_spinlocks(1); 120 pr_alert("8<--- cut here ---\n"); 121 pr_alert("Unable to handle kernel %s at virtual address %08lx\n", 122 (addr < PAGE_SIZE) ? "NULL pointer dereference" : 123 "paging request", addr); 124 125 show_pte(KERN_ALERT, mm, addr); 126 die("Oops", regs, fsr); 127 bust_spinlocks(0); 128 do_exit(SIGKILL); 129 } 130 131 /* 132 * Something tried to access memory that isn't in our memory map.. 133 * User mode accesses just cause a SIGSEGV 134 */ 135 static void 136 __do_user_fault(unsigned long addr, unsigned int fsr, unsigned int sig, 137 int code, struct pt_regs *regs) 138 { 139 struct task_struct *tsk = current; 140 141 if (addr > TASK_SIZE) 142 harden_branch_predictor(); 143 144 #ifdef CONFIG_DEBUG_USER 145 if (((user_debug & UDBG_SEGV) && (sig == SIGSEGV)) || 146 ((user_debug & UDBG_BUS) && (sig == SIGBUS))) { 147 pr_err("8<--- cut here ---\n"); 148 pr_err("%s: unhandled page fault (%d) at 0x%08lx, code 0x%03x\n", 149 tsk->comm, sig, addr, fsr); 150 show_pte(KERN_ERR, tsk->mm, addr); 151 show_regs(regs); 152 } 153 #endif 154 #ifndef CONFIG_KUSER_HELPERS 155 if ((sig == SIGSEGV) && ((addr & PAGE_MASK) == 0xffff0000)) 156 printk_ratelimited(KERN_DEBUG 157 "%s: CONFIG_KUSER_HELPERS disabled at 0x%08lx\n", 158 tsk->comm, addr); 159 #endif 160 161 tsk->thread.address = addr; 162 tsk->thread.error_code = fsr; 163 tsk->thread.trap_no = 14; 164 force_sig_fault(sig, code, (void __user *)addr); 165 } 166 167 void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs) 168 { 169 struct task_struct *tsk = current; 170 struct mm_struct *mm = tsk->active_mm; 171 172 /* 173 * If we are in kernel mode at this point, we 174 * have no context to handle this fault with. 175 */ 176 if (user_mode(regs)) 177 __do_user_fault(addr, fsr, SIGSEGV, SEGV_MAPERR, regs); 178 else 179 __do_kernel_fault(mm, addr, fsr, regs); 180 } 181 182 #ifdef CONFIG_MMU 183 #define VM_FAULT_BADMAP 0x010000 184 #define VM_FAULT_BADACCESS 0x020000 185 186 /* 187 * Check that the permissions on the VMA allow for the fault which occurred. 188 * If we encountered a write fault, we must have write permission, otherwise 189 * we allow any permission. 190 */ 191 static inline bool access_error(unsigned int fsr, struct vm_area_struct *vma) 192 { 193 unsigned int mask = VM_ACCESS_FLAGS; 194 195 if ((fsr & FSR_WRITE) && !(fsr & FSR_CM)) 196 mask = VM_WRITE; 197 if (fsr & FSR_LNX_PF) 198 mask = VM_EXEC; 199 200 return vma->vm_flags & mask ? false : true; 201 } 202 203 static vm_fault_t __kprobes 204 __do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr, 205 unsigned int flags, struct task_struct *tsk, 206 struct pt_regs *regs) 207 { 208 struct vm_area_struct *vma = find_vma(mm, addr); 209 if (unlikely(!vma)) 210 return VM_FAULT_BADMAP; 211 212 if (unlikely(vma->vm_start > addr)) { 213 if (!(vma->vm_flags & VM_GROWSDOWN)) 214 return VM_FAULT_BADMAP; 215 if (addr < FIRST_USER_ADDRESS) 216 return VM_FAULT_BADMAP; 217 if (expand_stack(vma, addr)) 218 return VM_FAULT_BADMAP; 219 } 220 221 /* 222 * Ok, we have a good vm_area for this 223 * memory access, so we can handle it. 224 */ 225 if (access_error(fsr, vma)) 226 return VM_FAULT_BADACCESS; 227 228 return handle_mm_fault(vma, addr & PAGE_MASK, flags, regs); 229 } 230 231 static int __kprobes 232 do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) 233 { 234 struct task_struct *tsk; 235 struct mm_struct *mm; 236 int sig, code; 237 vm_fault_t fault; 238 unsigned int flags = FAULT_FLAG_DEFAULT; 239 240 if (kprobe_page_fault(regs, fsr)) 241 return 0; 242 243 tsk = current; 244 mm = tsk->mm; 245 246 /* Enable interrupts if they were enabled in the parent context. */ 247 if (interrupts_enabled(regs)) 248 local_irq_enable(); 249 250 /* 251 * If we're in an interrupt or have no user 252 * context, we must not take the fault.. 253 */ 254 if (faulthandler_disabled() || !mm) 255 goto no_context; 256 257 if (user_mode(regs)) 258 flags |= FAULT_FLAG_USER; 259 if ((fsr & FSR_WRITE) && !(fsr & FSR_CM)) 260 flags |= FAULT_FLAG_WRITE; 261 262 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr); 263 264 /* 265 * As per x86, we may deadlock here. However, since the kernel only 266 * validly references user space from well defined areas of the code, 267 * we can bug out early if this is from code which shouldn't. 268 */ 269 if (!mmap_read_trylock(mm)) { 270 if (!user_mode(regs) && !search_exception_tables(regs->ARM_pc)) 271 goto no_context; 272 retry: 273 mmap_read_lock(mm); 274 } else { 275 /* 276 * The above down_read_trylock() might have succeeded in 277 * which case, we'll have missed the might_sleep() from 278 * down_read() 279 */ 280 might_sleep(); 281 #ifdef CONFIG_DEBUG_VM 282 if (!user_mode(regs) && 283 !search_exception_tables(regs->ARM_pc)) 284 goto no_context; 285 #endif 286 } 287 288 fault = __do_page_fault(mm, addr, fsr, flags, tsk, regs); 289 290 /* If we need to retry but a fatal signal is pending, handle the 291 * signal first. We do not need to release the mmap_lock because 292 * it would already be released in __lock_page_or_retry in 293 * mm/filemap.c. */ 294 if (fault_signal_pending(fault, regs)) { 295 if (!user_mode(regs)) 296 goto no_context; 297 return 0; 298 } 299 300 if (!(fault & VM_FAULT_ERROR) && flags & FAULT_FLAG_ALLOW_RETRY) { 301 if (fault & VM_FAULT_RETRY) { 302 flags |= FAULT_FLAG_TRIED; 303 goto retry; 304 } 305 } 306 307 mmap_read_unlock(mm); 308 309 /* 310 * Handle the "normal" case first - VM_FAULT_MAJOR 311 */ 312 if (likely(!(fault & (VM_FAULT_ERROR | VM_FAULT_BADMAP | VM_FAULT_BADACCESS)))) 313 return 0; 314 315 /* 316 * If we are in kernel mode at this point, we 317 * have no context to handle this fault with. 318 */ 319 if (!user_mode(regs)) 320 goto no_context; 321 322 if (fault & VM_FAULT_OOM) { 323 /* 324 * We ran out of memory, call the OOM killer, and return to 325 * userspace (which will retry the fault, or kill us if we 326 * got oom-killed) 327 */ 328 pagefault_out_of_memory(); 329 return 0; 330 } 331 332 if (fault & VM_FAULT_SIGBUS) { 333 /* 334 * We had some memory, but were unable to 335 * successfully fix up this page fault. 336 */ 337 sig = SIGBUS; 338 code = BUS_ADRERR; 339 } else { 340 /* 341 * Something tried to access memory that 342 * isn't in our memory map.. 343 */ 344 sig = SIGSEGV; 345 code = fault == VM_FAULT_BADACCESS ? 346 SEGV_ACCERR : SEGV_MAPERR; 347 } 348 349 __do_user_fault(addr, fsr, sig, code, regs); 350 return 0; 351 352 no_context: 353 __do_kernel_fault(mm, addr, fsr, regs); 354 return 0; 355 } 356 #else /* CONFIG_MMU */ 357 static int 358 do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) 359 { 360 return 0; 361 } 362 #endif /* CONFIG_MMU */ 363 364 /* 365 * First Level Translation Fault Handler 366 * 367 * We enter here because the first level page table doesn't contain 368 * a valid entry for the address. 369 * 370 * If the address is in kernel space (>= TASK_SIZE), then we are 371 * probably faulting in the vmalloc() area. 372 * 373 * If the init_task's first level page tables contains the relevant 374 * entry, we copy the it to this task. If not, we send the process 375 * a signal, fixup the exception, or oops the kernel. 376 * 377 * NOTE! We MUST NOT take any locks for this case. We may be in an 378 * interrupt or a critical region, and should only copy the information 379 * from the master page table, nothing more. 380 */ 381 #ifdef CONFIG_MMU 382 static int __kprobes 383 do_translation_fault(unsigned long addr, unsigned int fsr, 384 struct pt_regs *regs) 385 { 386 unsigned int index; 387 pgd_t *pgd, *pgd_k; 388 p4d_t *p4d, *p4d_k; 389 pud_t *pud, *pud_k; 390 pmd_t *pmd, *pmd_k; 391 392 if (addr < TASK_SIZE) 393 return do_page_fault(addr, fsr, regs); 394 395 if (user_mode(regs)) 396 goto bad_area; 397 398 index = pgd_index(addr); 399 400 pgd = cpu_get_pgd() + index; 401 pgd_k = init_mm.pgd + index; 402 403 p4d = p4d_offset(pgd, addr); 404 p4d_k = p4d_offset(pgd_k, addr); 405 406 if (p4d_none(*p4d_k)) 407 goto bad_area; 408 if (!p4d_present(*p4d)) 409 set_p4d(p4d, *p4d_k); 410 411 pud = pud_offset(p4d, addr); 412 pud_k = pud_offset(p4d_k, addr); 413 414 if (pud_none(*pud_k)) 415 goto bad_area; 416 if (!pud_present(*pud)) 417 set_pud(pud, *pud_k); 418 419 pmd = pmd_offset(pud, addr); 420 pmd_k = pmd_offset(pud_k, addr); 421 422 #ifdef CONFIG_ARM_LPAE 423 /* 424 * Only one hardware entry per PMD with LPAE. 425 */ 426 index = 0; 427 #else 428 /* 429 * On ARM one Linux PGD entry contains two hardware entries (see page 430 * tables layout in pgtable.h). We normally guarantee that we always 431 * fill both L1 entries. But create_mapping() doesn't follow the rule. 432 * It can create inidividual L1 entries, so here we have to call 433 * pmd_none() check for the entry really corresponded to address, not 434 * for the first of pair. 435 */ 436 index = (addr >> SECTION_SHIFT) & 1; 437 #endif 438 if (pmd_none(pmd_k[index])) 439 goto bad_area; 440 441 copy_pmd(pmd, pmd_k); 442 return 0; 443 444 bad_area: 445 do_bad_area(addr, fsr, regs); 446 return 0; 447 } 448 #else /* CONFIG_MMU */ 449 static int 450 do_translation_fault(unsigned long addr, unsigned int fsr, 451 struct pt_regs *regs) 452 { 453 return 0; 454 } 455 #endif /* CONFIG_MMU */ 456 457 /* 458 * Some section permission faults need to be handled gracefully. 459 * They can happen due to a __{get,put}_user during an oops. 460 */ 461 #ifndef CONFIG_ARM_LPAE 462 static int 463 do_sect_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) 464 { 465 do_bad_area(addr, fsr, regs); 466 return 0; 467 } 468 #endif /* CONFIG_ARM_LPAE */ 469 470 /* 471 * This abort handler always returns "fault". 472 */ 473 static int 474 do_bad(unsigned long addr, unsigned int fsr, struct pt_regs *regs) 475 { 476 return 1; 477 } 478 479 struct fsr_info { 480 int (*fn)(unsigned long addr, unsigned int fsr, struct pt_regs *regs); 481 int sig; 482 int code; 483 const char *name; 484 }; 485 486 /* FSR definition */ 487 #ifdef CONFIG_ARM_LPAE 488 #include "fsr-3level.c" 489 #else 490 #include "fsr-2level.c" 491 #endif 492 493 void __init 494 hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *), 495 int sig, int code, const char *name) 496 { 497 if (nr < 0 || nr >= ARRAY_SIZE(fsr_info)) 498 BUG(); 499 500 fsr_info[nr].fn = fn; 501 fsr_info[nr].sig = sig; 502 fsr_info[nr].code = code; 503 fsr_info[nr].name = name; 504 } 505 506 /* 507 * Dispatch a data abort to the relevant handler. 508 */ 509 asmlinkage void 510 do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs) 511 { 512 const struct fsr_info *inf = fsr_info + fsr_fs(fsr); 513 514 if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs)) 515 return; 516 517 pr_alert("8<--- cut here ---\n"); 518 pr_alert("Unhandled fault: %s (0x%03x) at 0x%08lx\n", 519 inf->name, fsr, addr); 520 show_pte(KERN_ALERT, current->mm, addr); 521 522 arm_notify_die("", regs, inf->sig, inf->code, (void __user *)addr, 523 fsr, 0); 524 } 525 526 void __init 527 hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *), 528 int sig, int code, const char *name) 529 { 530 if (nr < 0 || nr >= ARRAY_SIZE(ifsr_info)) 531 BUG(); 532 533 ifsr_info[nr].fn = fn; 534 ifsr_info[nr].sig = sig; 535 ifsr_info[nr].code = code; 536 ifsr_info[nr].name = name; 537 } 538 539 asmlinkage void 540 do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs) 541 { 542 const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr); 543 544 if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs)) 545 return; 546 547 pr_alert("Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n", 548 inf->name, ifsr, addr); 549 550 arm_notify_die("", regs, inf->sig, inf->code, (void __user *)addr, 551 ifsr, 0); 552 } 553 554 /* 555 * Abort handler to be used only during first unmasking of asynchronous aborts 556 * on the boot CPU. This makes sure that the machine will not die if the 557 * firmware/bootloader left an imprecise abort pending for us to trip over. 558 */ 559 static int __init early_abort_handler(unsigned long addr, unsigned int fsr, 560 struct pt_regs *regs) 561 { 562 pr_warn("Hit pending asynchronous external abort (FSR=0x%08x) during " 563 "first unmask, this is most likely caused by a " 564 "firmware/bootloader bug.\n", fsr); 565 566 return 0; 567 } 568 569 void __init early_abt_enable(void) 570 { 571 fsr_info[FSR_FS_AEA].fn = early_abort_handler; 572 local_abt_enable(); 573 fsr_info[FSR_FS_AEA].fn = do_bad; 574 } 575 576 #ifndef CONFIG_ARM_LPAE 577 static int __init exceptions_init(void) 578 { 579 if (cpu_architecture() >= CPU_ARCH_ARMv6) { 580 hook_fault_code(4, do_translation_fault, SIGSEGV, SEGV_MAPERR, 581 "I-cache maintenance fault"); 582 } 583 584 if (cpu_architecture() >= CPU_ARCH_ARMv7) { 585 /* 586 * TODO: Access flag faults introduced in ARMv6K. 587 * Runtime check for 'K' extension is needed 588 */ 589 hook_fault_code(3, do_bad, SIGSEGV, SEGV_MAPERR, 590 "section access flag fault"); 591 hook_fault_code(6, do_bad, SIGSEGV, SEGV_MAPERR, 592 "section access flag fault"); 593 } 594 595 return 0; 596 } 597 598 arch_initcall(exceptions_init); 599 #endif 600