1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Based on arch/arm/mm/fault.c 4 * 5 * Copyright (C) 1995 Linus Torvalds 6 * Copyright (C) 1995-2004 Russell King 7 * Copyright (C) 2012 ARM Ltd. 8 */ 9 10 #include <linux/acpi.h> 11 #include <linux/bitfield.h> 12 #include <linux/extable.h> 13 #include <linux/kfence.h> 14 #include <linux/signal.h> 15 #include <linux/mm.h> 16 #include <linux/hardirq.h> 17 #include <linux/init.h> 18 #include <linux/kasan.h> 19 #include <linux/kprobes.h> 20 #include <linux/uaccess.h> 21 #include <linux/page-flags.h> 22 #include <linux/sched/signal.h> 23 #include <linux/sched/debug.h> 24 #include <linux/highmem.h> 25 #include <linux/perf_event.h> 26 #include <linux/preempt.h> 27 #include <linux/hugetlb.h> 28 29 #include <asm/acpi.h> 30 #include <asm/bug.h> 31 #include <asm/cmpxchg.h> 32 #include <asm/cpufeature.h> 33 #include <asm/efi.h> 34 #include <asm/exception.h> 35 #include <asm/daifflags.h> 36 #include <asm/debug-monitors.h> 37 #include <asm/esr.h> 38 #include <asm/kprobes.h> 39 #include <asm/mte.h> 40 #include <asm/processor.h> 41 #include <asm/sysreg.h> 42 #include <asm/system_misc.h> 43 #include <asm/tlbflush.h> 44 #include <asm/traps.h> 45 46 struct fault_info { 47 int (*fn)(unsigned long far, unsigned long esr, 48 struct pt_regs *regs); 49 int sig; 50 int code; 51 const char *name; 52 }; 53 54 static const struct fault_info fault_info[]; 55 static struct fault_info debug_fault_info[]; 56 57 static inline const struct fault_info *esr_to_fault_info(unsigned long esr) 58 { 59 return fault_info + (esr & ESR_ELx_FSC); 60 } 61 62 static inline const struct fault_info *esr_to_debug_fault_info(unsigned long esr) 63 { 64 return debug_fault_info + DBG_ESR_EVT(esr); 65 } 66 67 static void data_abort_decode(unsigned long esr) 68 { 69 pr_alert("Data abort info:\n"); 70 71 if (esr & ESR_ELx_ISV) { 72 pr_alert(" Access size = %u byte(s)\n", 73 1U << ((esr & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT)); 74 pr_alert(" SSE = %lu, SRT = %lu\n", 75 (esr & ESR_ELx_SSE) >> ESR_ELx_SSE_SHIFT, 76 (esr & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT); 77 pr_alert(" SF = %lu, AR = %lu\n", 78 (esr & ESR_ELx_SF) >> ESR_ELx_SF_SHIFT, 79 (esr & ESR_ELx_AR) >> ESR_ELx_AR_SHIFT); 80 } else { 81 pr_alert(" ISV = 0, ISS = 0x%08lx\n", esr & ESR_ELx_ISS_MASK); 82 } 83 84 pr_alert(" CM = %lu, WnR = %lu\n", 85 (esr & ESR_ELx_CM) >> ESR_ELx_CM_SHIFT, 86 (esr & ESR_ELx_WNR) >> ESR_ELx_WNR_SHIFT); 87 } 88 89 static void mem_abort_decode(unsigned long esr) 90 { 91 pr_alert("Mem abort info:\n"); 92 93 pr_alert(" ESR = 0x%016lx\n", esr); 94 pr_alert(" EC = 0x%02lx: %s, IL = %u bits\n", 95 ESR_ELx_EC(esr), esr_get_class_string(esr), 96 (esr & ESR_ELx_IL) ? 32 : 16); 97 pr_alert(" SET = %lu, FnV = %lu\n", 98 (esr & ESR_ELx_SET_MASK) >> ESR_ELx_SET_SHIFT, 99 (esr & ESR_ELx_FnV) >> ESR_ELx_FnV_SHIFT); 100 pr_alert(" EA = %lu, S1PTW = %lu\n", 101 (esr & ESR_ELx_EA) >> ESR_ELx_EA_SHIFT, 102 (esr & ESR_ELx_S1PTW) >> ESR_ELx_S1PTW_SHIFT); 103 pr_alert(" FSC = 0x%02lx: %s\n", (esr & ESR_ELx_FSC), 104 esr_to_fault_info(esr)->name); 105 106 if (esr_is_data_abort(esr)) 107 data_abort_decode(esr); 108 } 109 110 static inline unsigned long mm_to_pgd_phys(struct mm_struct *mm) 111 { 112 /* Either init_pg_dir or swapper_pg_dir */ 113 if (mm == &init_mm) 114 return __pa_symbol(mm->pgd); 115 116 return (unsigned long)virt_to_phys(mm->pgd); 117 } 118 119 /* 120 * Dump out the page tables associated with 'addr' in the currently active mm. 121 */ 122 static void show_pte(unsigned long addr) 123 { 124 struct mm_struct *mm; 125 pgd_t *pgdp; 126 pgd_t pgd; 127 128 if (is_ttbr0_addr(addr)) { 129 /* TTBR0 */ 130 mm = current->active_mm; 131 if (mm == &init_mm) { 132 pr_alert("[%016lx] user address but active_mm is swapper\n", 133 addr); 134 return; 135 } 136 } else if (is_ttbr1_addr(addr)) { 137 /* TTBR1 */ 138 mm = &init_mm; 139 } else { 140 pr_alert("[%016lx] address between user and kernel address ranges\n", 141 addr); 142 return; 143 } 144 145 pr_alert("%s pgtable: %luk pages, %llu-bit VAs, pgdp=%016lx\n", 146 mm == &init_mm ? "swapper" : "user", PAGE_SIZE / SZ_1K, 147 vabits_actual, mm_to_pgd_phys(mm)); 148 pgdp = pgd_offset(mm, addr); 149 pgd = READ_ONCE(*pgdp); 150 pr_alert("[%016lx] pgd=%016llx", addr, pgd_val(pgd)); 151 152 do { 153 p4d_t *p4dp, p4d; 154 pud_t *pudp, pud; 155 pmd_t *pmdp, pmd; 156 pte_t *ptep, pte; 157 158 if (pgd_none(pgd) || pgd_bad(pgd)) 159 break; 160 161 p4dp = p4d_offset(pgdp, addr); 162 p4d = READ_ONCE(*p4dp); 163 pr_cont(", p4d=%016llx", p4d_val(p4d)); 164 if (p4d_none(p4d) || p4d_bad(p4d)) 165 break; 166 167 pudp = pud_offset(p4dp, addr); 168 pud = READ_ONCE(*pudp); 169 pr_cont(", pud=%016llx", pud_val(pud)); 170 if (pud_none(pud) || pud_bad(pud)) 171 break; 172 173 pmdp = pmd_offset(pudp, addr); 174 pmd = READ_ONCE(*pmdp); 175 pr_cont(", pmd=%016llx", pmd_val(pmd)); 176 if (pmd_none(pmd) || pmd_bad(pmd)) 177 break; 178 179 ptep = pte_offset_map(pmdp, addr); 180 pte = READ_ONCE(*ptep); 181 pr_cont(", pte=%016llx", pte_val(pte)); 182 pte_unmap(ptep); 183 } while(0); 184 185 pr_cont("\n"); 186 } 187 188 /* 189 * This function sets the access flags (dirty, accessed), as well as write 190 * permission, and only to a more permissive setting. 191 * 192 * It needs to cope with hardware update of the accessed/dirty state by other 193 * agents in the system and can safely skip the __sync_icache_dcache() call as, 194 * like set_pte_at(), the PTE is never changed from no-exec to exec here. 195 * 196 * Returns whether or not the PTE actually changed. 197 */ 198 int ptep_set_access_flags(struct vm_area_struct *vma, 199 unsigned long address, pte_t *ptep, 200 pte_t entry, int dirty) 201 { 202 pteval_t old_pteval, pteval; 203 pte_t pte = READ_ONCE(*ptep); 204 205 if (pte_same(pte, entry)) 206 return 0; 207 208 /* only preserve the access flags and write permission */ 209 pte_val(entry) &= PTE_RDONLY | PTE_AF | PTE_WRITE | PTE_DIRTY; 210 211 /* 212 * Setting the flags must be done atomically to avoid racing with the 213 * hardware update of the access/dirty state. The PTE_RDONLY bit must 214 * be set to the most permissive (lowest value) of *ptep and entry 215 * (calculated as: a & b == ~(~a | ~b)). 216 */ 217 pte_val(entry) ^= PTE_RDONLY; 218 pteval = pte_val(pte); 219 do { 220 old_pteval = pteval; 221 pteval ^= PTE_RDONLY; 222 pteval |= pte_val(entry); 223 pteval ^= PTE_RDONLY; 224 pteval = cmpxchg_relaxed(&pte_val(*ptep), old_pteval, pteval); 225 } while (pteval != old_pteval); 226 227 /* Invalidate a stale read-only entry */ 228 if (dirty) 229 flush_tlb_page(vma, address); 230 return 1; 231 } 232 233 static bool is_el1_instruction_abort(unsigned long esr) 234 { 235 return ESR_ELx_EC(esr) == ESR_ELx_EC_IABT_CUR; 236 } 237 238 static bool is_el1_data_abort(unsigned long esr) 239 { 240 return ESR_ELx_EC(esr) == ESR_ELx_EC_DABT_CUR; 241 } 242 243 static inline bool is_el1_permission_fault(unsigned long addr, unsigned long esr, 244 struct pt_regs *regs) 245 { 246 unsigned long fsc_type = esr & ESR_ELx_FSC_TYPE; 247 248 if (!is_el1_data_abort(esr) && !is_el1_instruction_abort(esr)) 249 return false; 250 251 if (fsc_type == ESR_ELx_FSC_PERM) 252 return true; 253 254 if (is_ttbr0_addr(addr) && system_uses_ttbr0_pan()) 255 return fsc_type == ESR_ELx_FSC_FAULT && 256 (regs->pstate & PSR_PAN_BIT); 257 258 return false; 259 } 260 261 static bool __kprobes is_spurious_el1_translation_fault(unsigned long addr, 262 unsigned long esr, 263 struct pt_regs *regs) 264 { 265 unsigned long flags; 266 u64 par, dfsc; 267 268 if (!is_el1_data_abort(esr) || 269 (esr & ESR_ELx_FSC_TYPE) != ESR_ELx_FSC_FAULT) 270 return false; 271 272 local_irq_save(flags); 273 asm volatile("at s1e1r, %0" :: "r" (addr)); 274 isb(); 275 par = read_sysreg_par(); 276 local_irq_restore(flags); 277 278 /* 279 * If we now have a valid translation, treat the translation fault as 280 * spurious. 281 */ 282 if (!(par & SYS_PAR_EL1_F)) 283 return true; 284 285 /* 286 * If we got a different type of fault from the AT instruction, 287 * treat the translation fault as spurious. 288 */ 289 dfsc = FIELD_GET(SYS_PAR_EL1_FST, par); 290 return (dfsc & ESR_ELx_FSC_TYPE) != ESR_ELx_FSC_FAULT; 291 } 292 293 static void die_kernel_fault(const char *msg, unsigned long addr, 294 unsigned long esr, struct pt_regs *regs) 295 { 296 bust_spinlocks(1); 297 298 pr_alert("Unable to handle kernel %s at virtual address %016lx\n", msg, 299 addr); 300 301 kasan_non_canonical_hook(addr); 302 303 mem_abort_decode(esr); 304 305 show_pte(addr); 306 die("Oops", regs, esr); 307 bust_spinlocks(0); 308 make_task_dead(SIGKILL); 309 } 310 311 #ifdef CONFIG_KASAN_HW_TAGS 312 static void report_tag_fault(unsigned long addr, unsigned long esr, 313 struct pt_regs *regs) 314 { 315 /* 316 * SAS bits aren't set for all faults reported in EL1, so we can't 317 * find out access size. 318 */ 319 bool is_write = !!(esr & ESR_ELx_WNR); 320 kasan_report(addr, 0, is_write, regs->pc); 321 } 322 #else 323 /* Tag faults aren't enabled without CONFIG_KASAN_HW_TAGS. */ 324 static inline void report_tag_fault(unsigned long addr, unsigned long esr, 325 struct pt_regs *regs) { } 326 #endif 327 328 static void do_tag_recovery(unsigned long addr, unsigned long esr, 329 struct pt_regs *regs) 330 { 331 332 report_tag_fault(addr, esr, regs); 333 334 /* 335 * Disable MTE Tag Checking on the local CPU for the current EL. 336 * It will be done lazily on the other CPUs when they will hit a 337 * tag fault. 338 */ 339 sysreg_clear_set(sctlr_el1, SCTLR_EL1_TCF_MASK, 340 SYS_FIELD_PREP_ENUM(SCTLR_EL1, TCF, NONE)); 341 isb(); 342 } 343 344 static bool is_el1_mte_sync_tag_check_fault(unsigned long esr) 345 { 346 unsigned long fsc = esr & ESR_ELx_FSC; 347 348 if (!is_el1_data_abort(esr)) 349 return false; 350 351 if (fsc == ESR_ELx_FSC_MTE) 352 return true; 353 354 return false; 355 } 356 357 static bool is_translation_fault(unsigned long esr) 358 { 359 return (esr & ESR_ELx_FSC_TYPE) == ESR_ELx_FSC_FAULT; 360 } 361 362 static void __do_kernel_fault(unsigned long addr, unsigned long esr, 363 struct pt_regs *regs) 364 { 365 const char *msg; 366 367 /* 368 * Are we prepared to handle this kernel fault? 369 * We are almost certainly not prepared to handle instruction faults. 370 */ 371 if (!is_el1_instruction_abort(esr) && fixup_exception(regs)) 372 return; 373 374 if (WARN_RATELIMIT(is_spurious_el1_translation_fault(addr, esr, regs), 375 "Ignoring spurious kernel translation fault at virtual address %016lx\n", addr)) 376 return; 377 378 if (is_el1_mte_sync_tag_check_fault(esr)) { 379 do_tag_recovery(addr, esr, regs); 380 381 return; 382 } 383 384 if (is_el1_permission_fault(addr, esr, regs)) { 385 if (esr & ESR_ELx_WNR) 386 msg = "write to read-only memory"; 387 else if (is_el1_instruction_abort(esr)) 388 msg = "execute from non-executable memory"; 389 else 390 msg = "read from unreadable memory"; 391 } else if (addr < PAGE_SIZE) { 392 msg = "NULL pointer dereference"; 393 } else { 394 if (is_translation_fault(esr) && 395 kfence_handle_page_fault(addr, esr & ESR_ELx_WNR, regs)) 396 return; 397 398 msg = "paging request"; 399 } 400 401 if (efi_runtime_fixup_exception(regs, msg)) 402 return; 403 404 die_kernel_fault(msg, addr, esr, regs); 405 } 406 407 static void set_thread_esr(unsigned long address, unsigned long esr) 408 { 409 current->thread.fault_address = address; 410 411 /* 412 * If the faulting address is in the kernel, we must sanitize the ESR. 413 * From userspace's point of view, kernel-only mappings don't exist 414 * at all, so we report them as level 0 translation faults. 415 * (This is not quite the way that "no mapping there at all" behaves: 416 * an alignment fault not caused by the memory type would take 417 * precedence over translation fault for a real access to empty 418 * space. Unfortunately we can't easily distinguish "alignment fault 419 * not caused by memory type" from "alignment fault caused by memory 420 * type", so we ignore this wrinkle and just return the translation 421 * fault.) 422 */ 423 if (!is_ttbr0_addr(current->thread.fault_address)) { 424 switch (ESR_ELx_EC(esr)) { 425 case ESR_ELx_EC_DABT_LOW: 426 /* 427 * These bits provide only information about the 428 * faulting instruction, which userspace knows already. 429 * We explicitly clear bits which are architecturally 430 * RES0 in case they are given meanings in future. 431 * We always report the ESR as if the fault was taken 432 * to EL1 and so ISV and the bits in ISS[23:14] are 433 * clear. (In fact it always will be a fault to EL1.) 434 */ 435 esr &= ESR_ELx_EC_MASK | ESR_ELx_IL | 436 ESR_ELx_CM | ESR_ELx_WNR; 437 esr |= ESR_ELx_FSC_FAULT; 438 break; 439 case ESR_ELx_EC_IABT_LOW: 440 /* 441 * Claim a level 0 translation fault. 442 * All other bits are architecturally RES0 for faults 443 * reported with that DFSC value, so we clear them. 444 */ 445 esr &= ESR_ELx_EC_MASK | ESR_ELx_IL; 446 esr |= ESR_ELx_FSC_FAULT; 447 break; 448 default: 449 /* 450 * This should never happen (entry.S only brings us 451 * into this code for insn and data aborts from a lower 452 * exception level). Fail safe by not providing an ESR 453 * context record at all. 454 */ 455 WARN(1, "ESR 0x%lx is not DABT or IABT from EL0\n", esr); 456 esr = 0; 457 break; 458 } 459 } 460 461 current->thread.fault_code = esr; 462 } 463 464 static void do_bad_area(unsigned long far, unsigned long esr, 465 struct pt_regs *regs) 466 { 467 unsigned long addr = untagged_addr(far); 468 469 /* 470 * If we are in kernel mode at this point, we have no context to 471 * handle this fault with. 472 */ 473 if (user_mode(regs)) { 474 const struct fault_info *inf = esr_to_fault_info(esr); 475 476 set_thread_esr(addr, esr); 477 arm64_force_sig_fault(inf->sig, inf->code, far, inf->name); 478 } else { 479 __do_kernel_fault(addr, esr, regs); 480 } 481 } 482 483 #define VM_FAULT_BADMAP ((__force vm_fault_t)0x010000) 484 #define VM_FAULT_BADACCESS ((__force vm_fault_t)0x020000) 485 486 static vm_fault_t __do_page_fault(struct mm_struct *mm, unsigned long addr, 487 unsigned int mm_flags, unsigned long vm_flags, 488 struct pt_regs *regs) 489 { 490 struct vm_area_struct *vma = find_vma(mm, addr); 491 492 if (unlikely(!vma)) 493 return VM_FAULT_BADMAP; 494 495 /* 496 * Ok, we have a good vm_area for this memory access, so we can handle 497 * it. 498 */ 499 if (unlikely(vma->vm_start > addr)) { 500 if (!(vma->vm_flags & VM_GROWSDOWN)) 501 return VM_FAULT_BADMAP; 502 if (expand_stack(vma, addr)) 503 return VM_FAULT_BADMAP; 504 } 505 506 /* 507 * Check that the permissions on the VMA allow for the fault which 508 * occurred. 509 */ 510 if (!(vma->vm_flags & vm_flags)) 511 return VM_FAULT_BADACCESS; 512 return handle_mm_fault(vma, addr, mm_flags, regs); 513 } 514 515 static bool is_el0_instruction_abort(unsigned long esr) 516 { 517 return ESR_ELx_EC(esr) == ESR_ELx_EC_IABT_LOW; 518 } 519 520 /* 521 * Note: not valid for EL1 DC IVAC, but we never use that such that it 522 * should fault. EL0 cannot issue DC IVAC (undef). 523 */ 524 static bool is_write_abort(unsigned long esr) 525 { 526 return (esr & ESR_ELx_WNR) && !(esr & ESR_ELx_CM); 527 } 528 529 static int __kprobes do_page_fault(unsigned long far, unsigned long esr, 530 struct pt_regs *regs) 531 { 532 const struct fault_info *inf; 533 struct mm_struct *mm = current->mm; 534 vm_fault_t fault; 535 unsigned long vm_flags; 536 unsigned int mm_flags = FAULT_FLAG_DEFAULT; 537 unsigned long addr = untagged_addr(far); 538 #ifdef CONFIG_PER_VMA_LOCK 539 struct vm_area_struct *vma; 540 #endif 541 542 if (kprobe_page_fault(regs, esr)) 543 return 0; 544 545 /* 546 * If we're in an interrupt or have no user context, we must not take 547 * the fault. 548 */ 549 if (faulthandler_disabled() || !mm) 550 goto no_context; 551 552 if (user_mode(regs)) 553 mm_flags |= FAULT_FLAG_USER; 554 555 /* 556 * vm_flags tells us what bits we must have in vma->vm_flags 557 * for the fault to be benign, __do_page_fault() would check 558 * vma->vm_flags & vm_flags and returns an error if the 559 * intersection is empty 560 */ 561 if (is_el0_instruction_abort(esr)) { 562 /* It was exec fault */ 563 vm_flags = VM_EXEC; 564 mm_flags |= FAULT_FLAG_INSTRUCTION; 565 } else if (is_write_abort(esr)) { 566 /* It was write fault */ 567 vm_flags = VM_WRITE; 568 mm_flags |= FAULT_FLAG_WRITE; 569 } else { 570 /* It was read fault */ 571 vm_flags = VM_READ; 572 /* Write implies read */ 573 vm_flags |= VM_WRITE; 574 /* If EPAN is absent then exec implies read */ 575 if (!cpus_have_const_cap(ARM64_HAS_EPAN)) 576 vm_flags |= VM_EXEC; 577 } 578 579 if (is_ttbr0_addr(addr) && is_el1_permission_fault(addr, esr, regs)) { 580 if (is_el1_instruction_abort(esr)) 581 die_kernel_fault("execution of user memory", 582 addr, esr, regs); 583 584 if (!search_exception_tables(regs->pc)) 585 die_kernel_fault("access to user memory outside uaccess routines", 586 addr, esr, regs); 587 } 588 589 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr); 590 591 #ifdef CONFIG_PER_VMA_LOCK 592 if (!(mm_flags & FAULT_FLAG_USER)) 593 goto lock_mmap; 594 595 vma = lock_vma_under_rcu(mm, addr); 596 if (!vma) 597 goto lock_mmap; 598 599 if (!(vma->vm_flags & vm_flags)) { 600 vma_end_read(vma); 601 goto lock_mmap; 602 } 603 fault = handle_mm_fault(vma, addr, mm_flags | FAULT_FLAG_VMA_LOCK, regs); 604 vma_end_read(vma); 605 606 if (!(fault & VM_FAULT_RETRY)) { 607 count_vm_vma_lock_event(VMA_LOCK_SUCCESS); 608 goto done; 609 } 610 count_vm_vma_lock_event(VMA_LOCK_RETRY); 611 612 /* Quick path to respond to signals */ 613 if (fault_signal_pending(fault, regs)) { 614 if (!user_mode(regs)) 615 goto no_context; 616 return 0; 617 } 618 lock_mmap: 619 #endif /* CONFIG_PER_VMA_LOCK */ 620 /* 621 * As per x86, we may deadlock here. However, since the kernel only 622 * validly references user space from well defined areas of the code, 623 * we can bug out early if this is from code which shouldn't. 624 */ 625 if (!mmap_read_trylock(mm)) { 626 if (!user_mode(regs) && !search_exception_tables(regs->pc)) 627 goto no_context; 628 retry: 629 mmap_read_lock(mm); 630 } else { 631 /* 632 * The above mmap_read_trylock() might have succeeded in which 633 * case, we'll have missed the might_sleep() from down_read(). 634 */ 635 might_sleep(); 636 #ifdef CONFIG_DEBUG_VM 637 if (!user_mode(regs) && !search_exception_tables(regs->pc)) { 638 mmap_read_unlock(mm); 639 goto no_context; 640 } 641 #endif 642 } 643 644 fault = __do_page_fault(mm, addr, mm_flags, vm_flags, regs); 645 646 /* Quick path to respond to signals */ 647 if (fault_signal_pending(fault, regs)) { 648 if (!user_mode(regs)) 649 goto no_context; 650 return 0; 651 } 652 653 /* The fault is fully completed (including releasing mmap lock) */ 654 if (fault & VM_FAULT_COMPLETED) 655 return 0; 656 657 if (fault & VM_FAULT_RETRY) { 658 mm_flags |= FAULT_FLAG_TRIED; 659 goto retry; 660 } 661 mmap_read_unlock(mm); 662 663 #ifdef CONFIG_PER_VMA_LOCK 664 done: 665 #endif 666 /* 667 * Handle the "normal" (no error) case first. 668 */ 669 if (likely(!(fault & (VM_FAULT_ERROR | VM_FAULT_BADMAP | 670 VM_FAULT_BADACCESS)))) 671 return 0; 672 673 /* 674 * If we are in kernel mode at this point, we have no context to 675 * handle this fault with. 676 */ 677 if (!user_mode(regs)) 678 goto no_context; 679 680 if (fault & VM_FAULT_OOM) { 681 /* 682 * We ran out of memory, call the OOM killer, and return to 683 * userspace (which will retry the fault, or kill us if we got 684 * oom-killed). 685 */ 686 pagefault_out_of_memory(); 687 return 0; 688 } 689 690 inf = esr_to_fault_info(esr); 691 set_thread_esr(addr, esr); 692 if (fault & VM_FAULT_SIGBUS) { 693 /* 694 * We had some memory, but were unable to successfully fix up 695 * this page fault. 696 */ 697 arm64_force_sig_fault(SIGBUS, BUS_ADRERR, far, inf->name); 698 } else if (fault & (VM_FAULT_HWPOISON_LARGE | VM_FAULT_HWPOISON)) { 699 unsigned int lsb; 700 701 lsb = PAGE_SHIFT; 702 if (fault & VM_FAULT_HWPOISON_LARGE) 703 lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault)); 704 705 arm64_force_sig_mceerr(BUS_MCEERR_AR, far, lsb, inf->name); 706 } else { 707 /* 708 * Something tried to access memory that isn't in our memory 709 * map. 710 */ 711 arm64_force_sig_fault(SIGSEGV, 712 fault == VM_FAULT_BADACCESS ? SEGV_ACCERR : SEGV_MAPERR, 713 far, inf->name); 714 } 715 716 return 0; 717 718 no_context: 719 __do_kernel_fault(addr, esr, regs); 720 return 0; 721 } 722 723 static int __kprobes do_translation_fault(unsigned long far, 724 unsigned long esr, 725 struct pt_regs *regs) 726 { 727 unsigned long addr = untagged_addr(far); 728 729 if (is_ttbr0_addr(addr)) 730 return do_page_fault(far, esr, regs); 731 732 do_bad_area(far, esr, regs); 733 return 0; 734 } 735 736 static int do_alignment_fault(unsigned long far, unsigned long esr, 737 struct pt_regs *regs) 738 { 739 if (IS_ENABLED(CONFIG_COMPAT_ALIGNMENT_FIXUPS) && 740 compat_user_mode(regs)) 741 return do_compat_alignment_fixup(far, regs); 742 do_bad_area(far, esr, regs); 743 return 0; 744 } 745 746 static int do_bad(unsigned long far, unsigned long esr, struct pt_regs *regs) 747 { 748 return 1; /* "fault" */ 749 } 750 751 static int do_sea(unsigned long far, unsigned long esr, struct pt_regs *regs) 752 { 753 const struct fault_info *inf; 754 unsigned long siaddr; 755 756 inf = esr_to_fault_info(esr); 757 758 if (user_mode(regs) && apei_claim_sea(regs) == 0) { 759 /* 760 * APEI claimed this as a firmware-first notification. 761 * Some processing deferred to task_work before ret_to_user(). 762 */ 763 return 0; 764 } 765 766 if (esr & ESR_ELx_FnV) { 767 siaddr = 0; 768 } else { 769 /* 770 * The architecture specifies that the tag bits of FAR_EL1 are 771 * UNKNOWN for synchronous external aborts. Mask them out now 772 * so that userspace doesn't see them. 773 */ 774 siaddr = untagged_addr(far); 775 } 776 arm64_notify_die(inf->name, regs, inf->sig, inf->code, siaddr, esr); 777 778 return 0; 779 } 780 781 static int do_tag_check_fault(unsigned long far, unsigned long esr, 782 struct pt_regs *regs) 783 { 784 /* 785 * The architecture specifies that bits 63:60 of FAR_EL1 are UNKNOWN 786 * for tag check faults. Set them to corresponding bits in the untagged 787 * address. 788 */ 789 far = (__untagged_addr(far) & ~MTE_TAG_MASK) | (far & MTE_TAG_MASK); 790 do_bad_area(far, esr, regs); 791 return 0; 792 } 793 794 static const struct fault_info fault_info[] = { 795 { do_bad, SIGKILL, SI_KERNEL, "ttbr address size fault" }, 796 { do_bad, SIGKILL, SI_KERNEL, "level 1 address size fault" }, 797 { do_bad, SIGKILL, SI_KERNEL, "level 2 address size fault" }, 798 { do_bad, SIGKILL, SI_KERNEL, "level 3 address size fault" }, 799 { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 0 translation fault" }, 800 { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 1 translation fault" }, 801 { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 2 translation fault" }, 802 { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 3 translation fault" }, 803 { do_bad, SIGKILL, SI_KERNEL, "unknown 8" }, 804 { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 1 access flag fault" }, 805 { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 access flag fault" }, 806 { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 access flag fault" }, 807 { do_bad, SIGKILL, SI_KERNEL, "unknown 12" }, 808 { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 1 permission fault" }, 809 { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 permission fault" }, 810 { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 permission fault" }, 811 { do_sea, SIGBUS, BUS_OBJERR, "synchronous external abort" }, 812 { do_tag_check_fault, SIGSEGV, SEGV_MTESERR, "synchronous tag check fault" }, 813 { do_bad, SIGKILL, SI_KERNEL, "unknown 18" }, 814 { do_bad, SIGKILL, SI_KERNEL, "unknown 19" }, 815 { do_sea, SIGKILL, SI_KERNEL, "level 0 (translation table walk)" }, 816 { do_sea, SIGKILL, SI_KERNEL, "level 1 (translation table walk)" }, 817 { do_sea, SIGKILL, SI_KERNEL, "level 2 (translation table walk)" }, 818 { do_sea, SIGKILL, SI_KERNEL, "level 3 (translation table walk)" }, 819 { do_sea, SIGBUS, BUS_OBJERR, "synchronous parity or ECC error" }, // Reserved when RAS is implemented 820 { do_bad, SIGKILL, SI_KERNEL, "unknown 25" }, 821 { do_bad, SIGKILL, SI_KERNEL, "unknown 26" }, 822 { do_bad, SIGKILL, SI_KERNEL, "unknown 27" }, 823 { do_sea, SIGKILL, SI_KERNEL, "level 0 synchronous parity error (translation table walk)" }, // Reserved when RAS is implemented 824 { do_sea, SIGKILL, SI_KERNEL, "level 1 synchronous parity error (translation table walk)" }, // Reserved when RAS is implemented 825 { do_sea, SIGKILL, SI_KERNEL, "level 2 synchronous parity error (translation table walk)" }, // Reserved when RAS is implemented 826 { do_sea, SIGKILL, SI_KERNEL, "level 3 synchronous parity error (translation table walk)" }, // Reserved when RAS is implemented 827 { do_bad, SIGKILL, SI_KERNEL, "unknown 32" }, 828 { do_alignment_fault, SIGBUS, BUS_ADRALN, "alignment fault" }, 829 { do_bad, SIGKILL, SI_KERNEL, "unknown 34" }, 830 { do_bad, SIGKILL, SI_KERNEL, "unknown 35" }, 831 { do_bad, SIGKILL, SI_KERNEL, "unknown 36" }, 832 { do_bad, SIGKILL, SI_KERNEL, "unknown 37" }, 833 { do_bad, SIGKILL, SI_KERNEL, "unknown 38" }, 834 { do_bad, SIGKILL, SI_KERNEL, "unknown 39" }, 835 { do_bad, SIGKILL, SI_KERNEL, "unknown 40" }, 836 { do_bad, SIGKILL, SI_KERNEL, "unknown 41" }, 837 { do_bad, SIGKILL, SI_KERNEL, "unknown 42" }, 838 { do_bad, SIGKILL, SI_KERNEL, "unknown 43" }, 839 { do_bad, SIGKILL, SI_KERNEL, "unknown 44" }, 840 { do_bad, SIGKILL, SI_KERNEL, "unknown 45" }, 841 { do_bad, SIGKILL, SI_KERNEL, "unknown 46" }, 842 { do_bad, SIGKILL, SI_KERNEL, "unknown 47" }, 843 { do_bad, SIGKILL, SI_KERNEL, "TLB conflict abort" }, 844 { do_bad, SIGKILL, SI_KERNEL, "Unsupported atomic hardware update fault" }, 845 { do_bad, SIGKILL, SI_KERNEL, "unknown 50" }, 846 { do_bad, SIGKILL, SI_KERNEL, "unknown 51" }, 847 { do_bad, SIGKILL, SI_KERNEL, "implementation fault (lockdown abort)" }, 848 { do_bad, SIGBUS, BUS_OBJERR, "implementation fault (unsupported exclusive)" }, 849 { do_bad, SIGKILL, SI_KERNEL, "unknown 54" }, 850 { do_bad, SIGKILL, SI_KERNEL, "unknown 55" }, 851 { do_bad, SIGKILL, SI_KERNEL, "unknown 56" }, 852 { do_bad, SIGKILL, SI_KERNEL, "unknown 57" }, 853 { do_bad, SIGKILL, SI_KERNEL, "unknown 58" }, 854 { do_bad, SIGKILL, SI_KERNEL, "unknown 59" }, 855 { do_bad, SIGKILL, SI_KERNEL, "unknown 60" }, 856 { do_bad, SIGKILL, SI_KERNEL, "section domain fault" }, 857 { do_bad, SIGKILL, SI_KERNEL, "page domain fault" }, 858 { do_bad, SIGKILL, SI_KERNEL, "unknown 63" }, 859 }; 860 861 void do_mem_abort(unsigned long far, unsigned long esr, struct pt_regs *regs) 862 { 863 const struct fault_info *inf = esr_to_fault_info(esr); 864 unsigned long addr = untagged_addr(far); 865 866 if (!inf->fn(far, esr, regs)) 867 return; 868 869 if (!user_mode(regs)) 870 die_kernel_fault(inf->name, addr, esr, regs); 871 872 /* 873 * At this point we have an unrecognized fault type whose tag bits may 874 * have been defined as UNKNOWN. Therefore we only expose the untagged 875 * address to the signal handler. 876 */ 877 arm64_notify_die(inf->name, regs, inf->sig, inf->code, addr, esr); 878 } 879 NOKPROBE_SYMBOL(do_mem_abort); 880 881 void do_sp_pc_abort(unsigned long addr, unsigned long esr, struct pt_regs *regs) 882 { 883 arm64_notify_die("SP/PC alignment exception", regs, SIGBUS, BUS_ADRALN, 884 addr, esr); 885 } 886 NOKPROBE_SYMBOL(do_sp_pc_abort); 887 888 int __init early_brk64(unsigned long addr, unsigned long esr, 889 struct pt_regs *regs); 890 891 /* 892 * __refdata because early_brk64 is __init, but the reference to it is 893 * clobbered at arch_initcall time. 894 * See traps.c and debug-monitors.c:debug_traps_init(). 895 */ 896 static struct fault_info __refdata debug_fault_info[] = { 897 { do_bad, SIGTRAP, TRAP_HWBKPT, "hardware breakpoint" }, 898 { do_bad, SIGTRAP, TRAP_HWBKPT, "hardware single-step" }, 899 { do_bad, SIGTRAP, TRAP_HWBKPT, "hardware watchpoint" }, 900 { do_bad, SIGKILL, SI_KERNEL, "unknown 3" }, 901 { do_bad, SIGTRAP, TRAP_BRKPT, "aarch32 BKPT" }, 902 { do_bad, SIGKILL, SI_KERNEL, "aarch32 vector catch" }, 903 { early_brk64, SIGTRAP, TRAP_BRKPT, "aarch64 BRK" }, 904 { do_bad, SIGKILL, SI_KERNEL, "unknown 7" }, 905 }; 906 907 void __init hook_debug_fault_code(int nr, 908 int (*fn)(unsigned long, unsigned long, struct pt_regs *), 909 int sig, int code, const char *name) 910 { 911 BUG_ON(nr < 0 || nr >= ARRAY_SIZE(debug_fault_info)); 912 913 debug_fault_info[nr].fn = fn; 914 debug_fault_info[nr].sig = sig; 915 debug_fault_info[nr].code = code; 916 debug_fault_info[nr].name = name; 917 } 918 919 /* 920 * In debug exception context, we explicitly disable preemption despite 921 * having interrupts disabled. 922 * This serves two purposes: it makes it much less likely that we would 923 * accidentally schedule in exception context and it will force a warning 924 * if we somehow manage to schedule by accident. 925 */ 926 static void debug_exception_enter(struct pt_regs *regs) 927 { 928 preempt_disable(); 929 930 /* This code is a bit fragile. Test it. */ 931 RCU_LOCKDEP_WARN(!rcu_is_watching(), "exception_enter didn't work"); 932 } 933 NOKPROBE_SYMBOL(debug_exception_enter); 934 935 static void debug_exception_exit(struct pt_regs *regs) 936 { 937 preempt_enable_no_resched(); 938 } 939 NOKPROBE_SYMBOL(debug_exception_exit); 940 941 void do_debug_exception(unsigned long addr_if_watchpoint, unsigned long esr, 942 struct pt_regs *regs) 943 { 944 const struct fault_info *inf = esr_to_debug_fault_info(esr); 945 unsigned long pc = instruction_pointer(regs); 946 947 debug_exception_enter(regs); 948 949 if (user_mode(regs) && !is_ttbr0_addr(pc)) 950 arm64_apply_bp_hardening(); 951 952 if (inf->fn(addr_if_watchpoint, esr, regs)) { 953 arm64_notify_die(inf->name, regs, inf->sig, inf->code, pc, esr); 954 } 955 956 debug_exception_exit(regs); 957 } 958 NOKPROBE_SYMBOL(do_debug_exception); 959 960 /* 961 * Used during anonymous page fault handling. 962 */ 963 struct folio *vma_alloc_zeroed_movable_folio(struct vm_area_struct *vma, 964 unsigned long vaddr) 965 { 966 gfp_t flags = GFP_HIGHUSER_MOVABLE | __GFP_ZERO; 967 968 /* 969 * If the page is mapped with PROT_MTE, initialise the tags at the 970 * point of allocation and page zeroing as this is usually faster than 971 * separate DC ZVA and STGM. 972 */ 973 if (vma->vm_flags & VM_MTE) 974 flags |= __GFP_ZEROTAGS; 975 976 return vma_alloc_folio(flags, 0, vma, vaddr, false); 977 } 978 979 void tag_clear_highpage(struct page *page) 980 { 981 /* Newly allocated page, shouldn't have been tagged yet */ 982 WARN_ON_ONCE(!try_page_mte_tagging(page)); 983 mte_zero_clear_page_tags(page_address(page)); 984 set_page_mte_tagged(page); 985 } 986