1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Based on arch/arm/kernel/traps.c 4 * 5 * Copyright (C) 1995-2009 Russell King 6 * Copyright (C) 2012 ARM Ltd. 7 */ 8 9 #include <linux/bug.h> 10 #include <linux/context_tracking.h> 11 #include <linux/signal.h> 12 #include <linux/personality.h> 13 #include <linux/kallsyms.h> 14 #include <linux/kprobes.h> 15 #include <linux/spinlock.h> 16 #include <linux/uaccess.h> 17 #include <linux/hardirq.h> 18 #include <linux/kdebug.h> 19 #include <linux/module.h> 20 #include <linux/kexec.h> 21 #include <linux/delay.h> 22 #include <linux/init.h> 23 #include <linux/sched/signal.h> 24 #include <linux/sched/debug.h> 25 #include <linux/sched/task_stack.h> 26 #include <linux/sizes.h> 27 #include <linux/syscalls.h> 28 #include <linux/mm_types.h> 29 #include <linux/kasan.h> 30 31 #include <asm/atomic.h> 32 #include <asm/bug.h> 33 #include <asm/cpufeature.h> 34 #include <asm/daifflags.h> 35 #include <asm/debug-monitors.h> 36 #include <asm/esr.h> 37 #include <asm/insn.h> 38 #include <asm/traps.h> 39 #include <asm/smp.h> 40 #include <asm/stack_pointer.h> 41 #include <asm/stacktrace.h> 42 #include <asm/exception.h> 43 #include <asm/system_misc.h> 44 #include <asm/sysreg.h> 45 46 static const char *handler[]= { 47 "Synchronous Abort", 48 "IRQ", 49 "FIQ", 50 "Error" 51 }; 52 53 int show_unhandled_signals = 0; 54 55 static void dump_backtrace_entry(unsigned long where) 56 { 57 printk(" %pS\n", (void *)where); 58 } 59 60 static void dump_kernel_instr(const char *lvl, struct pt_regs *regs) 61 { 62 unsigned long addr = instruction_pointer(regs); 63 char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str; 64 int i; 65 66 if (user_mode(regs)) 67 return; 68 69 for (i = -4; i < 1; i++) { 70 unsigned int val, bad; 71 72 bad = aarch64_insn_read(&((u32 *)addr)[i], &val); 73 74 if (!bad) 75 p += sprintf(p, i == 0 ? "(%08x) " : "%08x ", val); 76 else { 77 p += sprintf(p, "bad PC value"); 78 break; 79 } 80 } 81 82 printk("%sCode: %s\n", lvl, str); 83 } 84 85 void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk) 86 { 87 struct stackframe frame; 88 int skip = 0; 89 90 pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk); 91 92 if (regs) { 93 if (user_mode(regs)) 94 return; 95 skip = 1; 96 } 97 98 if (!tsk) 99 tsk = current; 100 101 if (!try_get_task_stack(tsk)) 102 return; 103 104 if (tsk == current) { 105 start_backtrace(&frame, 106 (unsigned long)__builtin_frame_address(0), 107 (unsigned long)dump_backtrace); 108 } else { 109 /* 110 * task blocked in __switch_to 111 */ 112 start_backtrace(&frame, 113 thread_saved_fp(tsk), 114 thread_saved_pc(tsk)); 115 } 116 117 printk("Call trace:\n"); 118 do { 119 /* skip until specified stack frame */ 120 if (!skip) { 121 dump_backtrace_entry(frame.pc); 122 } else if (frame.fp == regs->regs[29]) { 123 skip = 0; 124 /* 125 * Mostly, this is the case where this function is 126 * called in panic/abort. As exception handler's 127 * stack frame does not contain the corresponding pc 128 * at which an exception has taken place, use regs->pc 129 * instead. 130 */ 131 dump_backtrace_entry(regs->pc); 132 } 133 } while (!unwind_frame(tsk, &frame)); 134 135 put_task_stack(tsk); 136 } 137 138 void show_stack(struct task_struct *tsk, unsigned long *sp) 139 { 140 dump_backtrace(NULL, tsk); 141 barrier(); 142 } 143 144 #ifdef CONFIG_PREEMPT 145 #define S_PREEMPT " PREEMPT" 146 #else 147 #define S_PREEMPT "" 148 #endif 149 #define S_SMP " SMP" 150 151 static int __die(const char *str, int err, struct pt_regs *regs) 152 { 153 static int die_counter; 154 int ret; 155 156 pr_emerg("Internal error: %s: %x [#%d]" S_PREEMPT S_SMP "\n", 157 str, err, ++die_counter); 158 159 /* trap and error numbers are mostly meaningless on ARM */ 160 ret = notify_die(DIE_OOPS, str, regs, err, 0, SIGSEGV); 161 if (ret == NOTIFY_STOP) 162 return ret; 163 164 print_modules(); 165 show_regs(regs); 166 167 dump_kernel_instr(KERN_EMERG, regs); 168 169 return ret; 170 } 171 172 static DEFINE_RAW_SPINLOCK(die_lock); 173 174 /* 175 * This function is protected against re-entrancy. 176 */ 177 void die(const char *str, struct pt_regs *regs, int err) 178 { 179 int ret; 180 unsigned long flags; 181 182 raw_spin_lock_irqsave(&die_lock, flags); 183 184 oops_enter(); 185 186 console_verbose(); 187 bust_spinlocks(1); 188 ret = __die(str, err, regs); 189 190 if (regs && kexec_should_crash(current)) 191 crash_kexec(regs); 192 193 bust_spinlocks(0); 194 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); 195 oops_exit(); 196 197 if (in_interrupt()) 198 panic("Fatal exception in interrupt"); 199 if (panic_on_oops) 200 panic("Fatal exception"); 201 202 raw_spin_unlock_irqrestore(&die_lock, flags); 203 204 if (ret != NOTIFY_STOP) 205 do_exit(SIGSEGV); 206 } 207 208 static void arm64_show_signal(int signo, const char *str) 209 { 210 static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL, 211 DEFAULT_RATELIMIT_BURST); 212 struct task_struct *tsk = current; 213 unsigned int esr = tsk->thread.fault_code; 214 struct pt_regs *regs = task_pt_regs(tsk); 215 216 /* Leave if the signal won't be shown */ 217 if (!show_unhandled_signals || 218 !unhandled_signal(tsk, signo) || 219 !__ratelimit(&rs)) 220 return; 221 222 pr_info("%s[%d]: unhandled exception: ", tsk->comm, task_pid_nr(tsk)); 223 if (esr) 224 pr_cont("%s, ESR 0x%08x, ", esr_get_class_string(esr), esr); 225 226 pr_cont("%s", str); 227 print_vma_addr(KERN_CONT " in ", regs->pc); 228 pr_cont("\n"); 229 __show_regs(regs); 230 } 231 232 void arm64_force_sig_fault(int signo, int code, void __user *addr, 233 const char *str) 234 { 235 arm64_show_signal(signo, str); 236 if (signo == SIGKILL) 237 force_sig(SIGKILL); 238 else 239 force_sig_fault(signo, code, addr); 240 } 241 242 void arm64_force_sig_mceerr(int code, void __user *addr, short lsb, 243 const char *str) 244 { 245 arm64_show_signal(SIGBUS, str); 246 force_sig_mceerr(code, addr, lsb); 247 } 248 249 void arm64_force_sig_ptrace_errno_trap(int errno, void __user *addr, 250 const char *str) 251 { 252 arm64_show_signal(SIGTRAP, str); 253 force_sig_ptrace_errno_trap(errno, addr); 254 } 255 256 void arm64_notify_die(const char *str, struct pt_regs *regs, 257 int signo, int sicode, void __user *addr, 258 int err) 259 { 260 if (user_mode(regs)) { 261 WARN_ON(regs != current_pt_regs()); 262 current->thread.fault_address = 0; 263 current->thread.fault_code = err; 264 265 arm64_force_sig_fault(signo, sicode, addr, str); 266 } else { 267 die(str, regs, err); 268 } 269 } 270 271 void arm64_skip_faulting_instruction(struct pt_regs *regs, unsigned long size) 272 { 273 regs->pc += size; 274 275 /* 276 * If we were single stepping, we want to get the step exception after 277 * we return from the trap. 278 */ 279 if (user_mode(regs)) 280 user_fastforward_single_step(current); 281 } 282 283 static LIST_HEAD(undef_hook); 284 static DEFINE_RAW_SPINLOCK(undef_lock); 285 286 void register_undef_hook(struct undef_hook *hook) 287 { 288 unsigned long flags; 289 290 raw_spin_lock_irqsave(&undef_lock, flags); 291 list_add(&hook->node, &undef_hook); 292 raw_spin_unlock_irqrestore(&undef_lock, flags); 293 } 294 295 void unregister_undef_hook(struct undef_hook *hook) 296 { 297 unsigned long flags; 298 299 raw_spin_lock_irqsave(&undef_lock, flags); 300 list_del(&hook->node); 301 raw_spin_unlock_irqrestore(&undef_lock, flags); 302 } 303 304 static int call_undef_hook(struct pt_regs *regs) 305 { 306 struct undef_hook *hook; 307 unsigned long flags; 308 u32 instr; 309 int (*fn)(struct pt_regs *regs, u32 instr) = NULL; 310 void __user *pc = (void __user *)instruction_pointer(regs); 311 312 if (!user_mode(regs)) { 313 __le32 instr_le; 314 if (probe_kernel_address((__force __le32 *)pc, instr_le)) 315 goto exit; 316 instr = le32_to_cpu(instr_le); 317 } else if (compat_thumb_mode(regs)) { 318 /* 16-bit Thumb instruction */ 319 __le16 instr_le; 320 if (get_user(instr_le, (__le16 __user *)pc)) 321 goto exit; 322 instr = le16_to_cpu(instr_le); 323 if (aarch32_insn_is_wide(instr)) { 324 u32 instr2; 325 326 if (get_user(instr_le, (__le16 __user *)(pc + 2))) 327 goto exit; 328 instr2 = le16_to_cpu(instr_le); 329 instr = (instr << 16) | instr2; 330 } 331 } else { 332 /* 32-bit ARM instruction */ 333 __le32 instr_le; 334 if (get_user(instr_le, (__le32 __user *)pc)) 335 goto exit; 336 instr = le32_to_cpu(instr_le); 337 } 338 339 raw_spin_lock_irqsave(&undef_lock, flags); 340 list_for_each_entry(hook, &undef_hook, node) 341 if ((instr & hook->instr_mask) == hook->instr_val && 342 (regs->pstate & hook->pstate_mask) == hook->pstate_val) 343 fn = hook->fn; 344 345 raw_spin_unlock_irqrestore(&undef_lock, flags); 346 exit: 347 return fn ? fn(regs, instr) : 1; 348 } 349 350 void force_signal_inject(int signal, int code, unsigned long address) 351 { 352 const char *desc; 353 struct pt_regs *regs = current_pt_regs(); 354 355 if (WARN_ON(!user_mode(regs))) 356 return; 357 358 switch (signal) { 359 case SIGILL: 360 desc = "undefined instruction"; 361 break; 362 case SIGSEGV: 363 desc = "illegal memory access"; 364 break; 365 default: 366 desc = "unknown or unrecoverable error"; 367 break; 368 } 369 370 /* Force signals we don't understand to SIGKILL */ 371 if (WARN_ON(signal != SIGKILL && 372 siginfo_layout(signal, code) != SIL_FAULT)) { 373 signal = SIGKILL; 374 } 375 376 arm64_notify_die(desc, regs, signal, code, (void __user *)address, 0); 377 } 378 379 /* 380 * Set up process info to signal segmentation fault - called on access error. 381 */ 382 void arm64_notify_segfault(unsigned long addr) 383 { 384 int code; 385 386 down_read(¤t->mm->mmap_sem); 387 if (find_vma(current->mm, addr) == NULL) 388 code = SEGV_MAPERR; 389 else 390 code = SEGV_ACCERR; 391 up_read(¤t->mm->mmap_sem); 392 393 force_signal_inject(SIGSEGV, code, addr); 394 } 395 396 asmlinkage void __exception do_undefinstr(struct pt_regs *regs) 397 { 398 /* check for AArch32 breakpoint instructions */ 399 if (!aarch32_break_handler(regs)) 400 return; 401 402 if (call_undef_hook(regs) == 0) 403 return; 404 405 BUG_ON(!user_mode(regs)); 406 force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc); 407 } 408 409 #define __user_cache_maint(insn, address, res) \ 410 if (address >= user_addr_max()) { \ 411 res = -EFAULT; \ 412 } else { \ 413 uaccess_ttbr0_enable(); \ 414 asm volatile ( \ 415 "1: " insn ", %1\n" \ 416 " mov %w0, #0\n" \ 417 "2:\n" \ 418 " .pushsection .fixup,\"ax\"\n" \ 419 " .align 2\n" \ 420 "3: mov %w0, %w2\n" \ 421 " b 2b\n" \ 422 " .popsection\n" \ 423 _ASM_EXTABLE(1b, 3b) \ 424 : "=r" (res) \ 425 : "r" (address), "i" (-EFAULT)); \ 426 uaccess_ttbr0_disable(); \ 427 } 428 429 static void user_cache_maint_handler(unsigned int esr, struct pt_regs *regs) 430 { 431 unsigned long address; 432 int rt = ESR_ELx_SYS64_ISS_RT(esr); 433 int crm = (esr & ESR_ELx_SYS64_ISS_CRM_MASK) >> ESR_ELx_SYS64_ISS_CRM_SHIFT; 434 int ret = 0; 435 436 address = untagged_addr(pt_regs_read_reg(regs, rt)); 437 438 switch (crm) { 439 case ESR_ELx_SYS64_ISS_CRM_DC_CVAU: /* DC CVAU, gets promoted */ 440 __user_cache_maint("dc civac", address, ret); 441 break; 442 case ESR_ELx_SYS64_ISS_CRM_DC_CVAC: /* DC CVAC, gets promoted */ 443 __user_cache_maint("dc civac", address, ret); 444 break; 445 case ESR_ELx_SYS64_ISS_CRM_DC_CVADP: /* DC CVADP */ 446 __user_cache_maint("sys 3, c7, c13, 1", address, ret); 447 break; 448 case ESR_ELx_SYS64_ISS_CRM_DC_CVAP: /* DC CVAP */ 449 __user_cache_maint("sys 3, c7, c12, 1", address, ret); 450 break; 451 case ESR_ELx_SYS64_ISS_CRM_DC_CIVAC: /* DC CIVAC */ 452 __user_cache_maint("dc civac", address, ret); 453 break; 454 case ESR_ELx_SYS64_ISS_CRM_IC_IVAU: /* IC IVAU */ 455 __user_cache_maint("ic ivau", address, ret); 456 break; 457 default: 458 force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc); 459 return; 460 } 461 462 if (ret) 463 arm64_notify_segfault(address); 464 else 465 arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE); 466 } 467 468 static void ctr_read_handler(unsigned int esr, struct pt_regs *regs) 469 { 470 int rt = ESR_ELx_SYS64_ISS_RT(esr); 471 unsigned long val = arm64_ftr_reg_user_value(&arm64_ftr_reg_ctrel0); 472 473 pt_regs_write_reg(regs, rt, val); 474 475 arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE); 476 } 477 478 static void cntvct_read_handler(unsigned int esr, struct pt_regs *regs) 479 { 480 int rt = ESR_ELx_SYS64_ISS_RT(esr); 481 482 pt_regs_write_reg(regs, rt, arch_timer_read_counter()); 483 arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE); 484 } 485 486 static void cntfrq_read_handler(unsigned int esr, struct pt_regs *regs) 487 { 488 int rt = ESR_ELx_SYS64_ISS_RT(esr); 489 490 pt_regs_write_reg(regs, rt, arch_timer_get_rate()); 491 arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE); 492 } 493 494 static void mrs_handler(unsigned int esr, struct pt_regs *regs) 495 { 496 u32 sysreg, rt; 497 498 rt = ESR_ELx_SYS64_ISS_RT(esr); 499 sysreg = esr_sys64_to_sysreg(esr); 500 501 if (do_emulate_mrs(regs, sysreg, rt) != 0) 502 force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc); 503 } 504 505 static void wfi_handler(unsigned int esr, struct pt_regs *regs) 506 { 507 arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE); 508 } 509 510 struct sys64_hook { 511 unsigned int esr_mask; 512 unsigned int esr_val; 513 void (*handler)(unsigned int esr, struct pt_regs *regs); 514 }; 515 516 static const struct sys64_hook sys64_hooks[] = { 517 { 518 .esr_mask = ESR_ELx_SYS64_ISS_EL0_CACHE_OP_MASK, 519 .esr_val = ESR_ELx_SYS64_ISS_EL0_CACHE_OP_VAL, 520 .handler = user_cache_maint_handler, 521 }, 522 { 523 /* Trap read access to CTR_EL0 */ 524 .esr_mask = ESR_ELx_SYS64_ISS_SYS_OP_MASK, 525 .esr_val = ESR_ELx_SYS64_ISS_SYS_CTR_READ, 526 .handler = ctr_read_handler, 527 }, 528 { 529 /* Trap read access to CNTVCT_EL0 */ 530 .esr_mask = ESR_ELx_SYS64_ISS_SYS_OP_MASK, 531 .esr_val = ESR_ELx_SYS64_ISS_SYS_CNTVCT, 532 .handler = cntvct_read_handler, 533 }, 534 { 535 /* Trap read access to CNTFRQ_EL0 */ 536 .esr_mask = ESR_ELx_SYS64_ISS_SYS_OP_MASK, 537 .esr_val = ESR_ELx_SYS64_ISS_SYS_CNTFRQ, 538 .handler = cntfrq_read_handler, 539 }, 540 { 541 /* Trap read access to CPUID registers */ 542 .esr_mask = ESR_ELx_SYS64_ISS_SYS_MRS_OP_MASK, 543 .esr_val = ESR_ELx_SYS64_ISS_SYS_MRS_OP_VAL, 544 .handler = mrs_handler, 545 }, 546 { 547 /* Trap WFI instructions executed in userspace */ 548 .esr_mask = ESR_ELx_WFx_MASK, 549 .esr_val = ESR_ELx_WFx_WFI_VAL, 550 .handler = wfi_handler, 551 }, 552 {}, 553 }; 554 555 556 #ifdef CONFIG_COMPAT 557 #define PSTATE_IT_1_0_SHIFT 25 558 #define PSTATE_IT_1_0_MASK (0x3 << PSTATE_IT_1_0_SHIFT) 559 #define PSTATE_IT_7_2_SHIFT 10 560 #define PSTATE_IT_7_2_MASK (0x3f << PSTATE_IT_7_2_SHIFT) 561 562 static u32 compat_get_it_state(struct pt_regs *regs) 563 { 564 u32 it, pstate = regs->pstate; 565 566 it = (pstate & PSTATE_IT_1_0_MASK) >> PSTATE_IT_1_0_SHIFT; 567 it |= ((pstate & PSTATE_IT_7_2_MASK) >> PSTATE_IT_7_2_SHIFT) << 2; 568 569 return it; 570 } 571 572 static void compat_set_it_state(struct pt_regs *regs, u32 it) 573 { 574 u32 pstate_it; 575 576 pstate_it = (it << PSTATE_IT_1_0_SHIFT) & PSTATE_IT_1_0_MASK; 577 pstate_it |= ((it >> 2) << PSTATE_IT_7_2_SHIFT) & PSTATE_IT_7_2_MASK; 578 579 regs->pstate &= ~PSR_AA32_IT_MASK; 580 regs->pstate |= pstate_it; 581 } 582 583 static bool cp15_cond_valid(unsigned int esr, struct pt_regs *regs) 584 { 585 int cond; 586 587 /* Only a T32 instruction can trap without CV being set */ 588 if (!(esr & ESR_ELx_CV)) { 589 u32 it; 590 591 it = compat_get_it_state(regs); 592 if (!it) 593 return true; 594 595 cond = it >> 4; 596 } else { 597 cond = (esr & ESR_ELx_COND_MASK) >> ESR_ELx_COND_SHIFT; 598 } 599 600 return aarch32_opcode_cond_checks[cond](regs->pstate); 601 } 602 603 static void advance_itstate(struct pt_regs *regs) 604 { 605 u32 it; 606 607 /* ARM mode */ 608 if (!(regs->pstate & PSR_AA32_T_BIT) || 609 !(regs->pstate & PSR_AA32_IT_MASK)) 610 return; 611 612 it = compat_get_it_state(regs); 613 614 /* 615 * If this is the last instruction of the block, wipe the IT 616 * state. Otherwise advance it. 617 */ 618 if (!(it & 7)) 619 it = 0; 620 else 621 it = (it & 0xe0) | ((it << 1) & 0x1f); 622 623 compat_set_it_state(regs, it); 624 } 625 626 static void arm64_compat_skip_faulting_instruction(struct pt_regs *regs, 627 unsigned int sz) 628 { 629 advance_itstate(regs); 630 arm64_skip_faulting_instruction(regs, sz); 631 } 632 633 static void compat_cntfrq_read_handler(unsigned int esr, struct pt_regs *regs) 634 { 635 int reg = (esr & ESR_ELx_CP15_32_ISS_RT_MASK) >> ESR_ELx_CP15_32_ISS_RT_SHIFT; 636 637 pt_regs_write_reg(regs, reg, arch_timer_get_rate()); 638 arm64_compat_skip_faulting_instruction(regs, 4); 639 } 640 641 static const struct sys64_hook cp15_32_hooks[] = { 642 { 643 .esr_mask = ESR_ELx_CP15_32_ISS_SYS_MASK, 644 .esr_val = ESR_ELx_CP15_32_ISS_SYS_CNTFRQ, 645 .handler = compat_cntfrq_read_handler, 646 }, 647 {}, 648 }; 649 650 static void compat_cntvct_read_handler(unsigned int esr, struct pt_regs *regs) 651 { 652 int rt = (esr & ESR_ELx_CP15_64_ISS_RT_MASK) >> ESR_ELx_CP15_64_ISS_RT_SHIFT; 653 int rt2 = (esr & ESR_ELx_CP15_64_ISS_RT2_MASK) >> ESR_ELx_CP15_64_ISS_RT2_SHIFT; 654 u64 val = arch_timer_read_counter(); 655 656 pt_regs_write_reg(regs, rt, lower_32_bits(val)); 657 pt_regs_write_reg(regs, rt2, upper_32_bits(val)); 658 arm64_compat_skip_faulting_instruction(regs, 4); 659 } 660 661 static const struct sys64_hook cp15_64_hooks[] = { 662 { 663 .esr_mask = ESR_ELx_CP15_64_ISS_SYS_MASK, 664 .esr_val = ESR_ELx_CP15_64_ISS_SYS_CNTVCT, 665 .handler = compat_cntvct_read_handler, 666 }, 667 {}, 668 }; 669 670 asmlinkage void __exception do_cp15instr(unsigned int esr, struct pt_regs *regs) 671 { 672 const struct sys64_hook *hook, *hook_base; 673 674 if (!cp15_cond_valid(esr, regs)) { 675 /* 676 * There is no T16 variant of a CP access, so we 677 * always advance PC by 4 bytes. 678 */ 679 arm64_compat_skip_faulting_instruction(regs, 4); 680 return; 681 } 682 683 switch (ESR_ELx_EC(esr)) { 684 case ESR_ELx_EC_CP15_32: 685 hook_base = cp15_32_hooks; 686 break; 687 case ESR_ELx_EC_CP15_64: 688 hook_base = cp15_64_hooks; 689 break; 690 default: 691 do_undefinstr(regs); 692 return; 693 } 694 695 for (hook = hook_base; hook->handler; hook++) 696 if ((hook->esr_mask & esr) == hook->esr_val) { 697 hook->handler(esr, regs); 698 return; 699 } 700 701 /* 702 * New cp15 instructions may previously have been undefined at 703 * EL0. Fall back to our usual undefined instruction handler 704 * so that we handle these consistently. 705 */ 706 do_undefinstr(regs); 707 } 708 #endif 709 710 asmlinkage void __exception do_sysinstr(unsigned int esr, struct pt_regs *regs) 711 { 712 const struct sys64_hook *hook; 713 714 for (hook = sys64_hooks; hook->handler; hook++) 715 if ((hook->esr_mask & esr) == hook->esr_val) { 716 hook->handler(esr, regs); 717 return; 718 } 719 720 /* 721 * New SYS instructions may previously have been undefined at EL0. Fall 722 * back to our usual undefined instruction handler so that we handle 723 * these consistently. 724 */ 725 do_undefinstr(regs); 726 } 727 728 static const char *esr_class_str[] = { 729 [0 ... ESR_ELx_EC_MAX] = "UNRECOGNIZED EC", 730 [ESR_ELx_EC_UNKNOWN] = "Unknown/Uncategorized", 731 [ESR_ELx_EC_WFx] = "WFI/WFE", 732 [ESR_ELx_EC_CP15_32] = "CP15 MCR/MRC", 733 [ESR_ELx_EC_CP15_64] = "CP15 MCRR/MRRC", 734 [ESR_ELx_EC_CP14_MR] = "CP14 MCR/MRC", 735 [ESR_ELx_EC_CP14_LS] = "CP14 LDC/STC", 736 [ESR_ELx_EC_FP_ASIMD] = "ASIMD", 737 [ESR_ELx_EC_CP10_ID] = "CP10 MRC/VMRS", 738 [ESR_ELx_EC_PAC] = "PAC", 739 [ESR_ELx_EC_CP14_64] = "CP14 MCRR/MRRC", 740 [ESR_ELx_EC_ILL] = "PSTATE.IL", 741 [ESR_ELx_EC_SVC32] = "SVC (AArch32)", 742 [ESR_ELx_EC_HVC32] = "HVC (AArch32)", 743 [ESR_ELx_EC_SMC32] = "SMC (AArch32)", 744 [ESR_ELx_EC_SVC64] = "SVC (AArch64)", 745 [ESR_ELx_EC_HVC64] = "HVC (AArch64)", 746 [ESR_ELx_EC_SMC64] = "SMC (AArch64)", 747 [ESR_ELx_EC_SYS64] = "MSR/MRS (AArch64)", 748 [ESR_ELx_EC_SVE] = "SVE", 749 [ESR_ELx_EC_ERET] = "ERET/ERETAA/ERETAB", 750 [ESR_ELx_EC_IMP_DEF] = "EL3 IMP DEF", 751 [ESR_ELx_EC_IABT_LOW] = "IABT (lower EL)", 752 [ESR_ELx_EC_IABT_CUR] = "IABT (current EL)", 753 [ESR_ELx_EC_PC_ALIGN] = "PC Alignment", 754 [ESR_ELx_EC_DABT_LOW] = "DABT (lower EL)", 755 [ESR_ELx_EC_DABT_CUR] = "DABT (current EL)", 756 [ESR_ELx_EC_SP_ALIGN] = "SP Alignment", 757 [ESR_ELx_EC_FP_EXC32] = "FP (AArch32)", 758 [ESR_ELx_EC_FP_EXC64] = "FP (AArch64)", 759 [ESR_ELx_EC_SERROR] = "SError", 760 [ESR_ELx_EC_BREAKPT_LOW] = "Breakpoint (lower EL)", 761 [ESR_ELx_EC_BREAKPT_CUR] = "Breakpoint (current EL)", 762 [ESR_ELx_EC_SOFTSTP_LOW] = "Software Step (lower EL)", 763 [ESR_ELx_EC_SOFTSTP_CUR] = "Software Step (current EL)", 764 [ESR_ELx_EC_WATCHPT_LOW] = "Watchpoint (lower EL)", 765 [ESR_ELx_EC_WATCHPT_CUR] = "Watchpoint (current EL)", 766 [ESR_ELx_EC_BKPT32] = "BKPT (AArch32)", 767 [ESR_ELx_EC_VECTOR32] = "Vector catch (AArch32)", 768 [ESR_ELx_EC_BRK64] = "BRK (AArch64)", 769 }; 770 771 const char *esr_get_class_string(u32 esr) 772 { 773 return esr_class_str[ESR_ELx_EC(esr)]; 774 } 775 776 /* 777 * bad_mode handles the impossible case in the exception vector. This is always 778 * fatal. 779 */ 780 asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr) 781 { 782 console_verbose(); 783 784 pr_crit("Bad mode in %s handler detected on CPU%d, code 0x%08x -- %s\n", 785 handler[reason], smp_processor_id(), esr, 786 esr_get_class_string(esr)); 787 788 local_daif_mask(); 789 panic("bad mode"); 790 } 791 792 /* 793 * bad_el0_sync handles unexpected, but potentially recoverable synchronous 794 * exceptions taken from EL0. Unlike bad_mode, this returns. 795 */ 796 asmlinkage void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr) 797 { 798 void __user *pc = (void __user *)instruction_pointer(regs); 799 800 current->thread.fault_address = 0; 801 current->thread.fault_code = esr; 802 803 arm64_force_sig_fault(SIGILL, ILL_ILLOPC, pc, 804 "Bad EL0 synchronous exception"); 805 } 806 807 #ifdef CONFIG_VMAP_STACK 808 809 DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack) 810 __aligned(16); 811 812 asmlinkage void handle_bad_stack(struct pt_regs *regs) 813 { 814 unsigned long tsk_stk = (unsigned long)current->stack; 815 unsigned long irq_stk = (unsigned long)this_cpu_read(irq_stack_ptr); 816 unsigned long ovf_stk = (unsigned long)this_cpu_ptr(overflow_stack); 817 unsigned int esr = read_sysreg(esr_el1); 818 unsigned long far = read_sysreg(far_el1); 819 820 console_verbose(); 821 pr_emerg("Insufficient stack space to handle exception!"); 822 823 pr_emerg("ESR: 0x%08x -- %s\n", esr, esr_get_class_string(esr)); 824 pr_emerg("FAR: 0x%016lx\n", far); 825 826 pr_emerg("Task stack: [0x%016lx..0x%016lx]\n", 827 tsk_stk, tsk_stk + THREAD_SIZE); 828 pr_emerg("IRQ stack: [0x%016lx..0x%016lx]\n", 829 irq_stk, irq_stk + THREAD_SIZE); 830 pr_emerg("Overflow stack: [0x%016lx..0x%016lx]\n", 831 ovf_stk, ovf_stk + OVERFLOW_STACK_SIZE); 832 833 __show_regs(regs); 834 835 /* 836 * We use nmi_panic to limit the potential for recusive overflows, and 837 * to get a better stack trace. 838 */ 839 nmi_panic(NULL, "kernel stack overflow"); 840 cpu_park_loop(); 841 } 842 #endif 843 844 void __noreturn arm64_serror_panic(struct pt_regs *regs, u32 esr) 845 { 846 console_verbose(); 847 848 pr_crit("SError Interrupt on CPU%d, code 0x%08x -- %s\n", 849 smp_processor_id(), esr, esr_get_class_string(esr)); 850 if (regs) 851 __show_regs(regs); 852 853 nmi_panic(regs, "Asynchronous SError Interrupt"); 854 855 cpu_park_loop(); 856 unreachable(); 857 } 858 859 bool arm64_is_fatal_ras_serror(struct pt_regs *regs, unsigned int esr) 860 { 861 u32 aet = arm64_ras_serror_get_severity(esr); 862 863 switch (aet) { 864 case ESR_ELx_AET_CE: /* corrected error */ 865 case ESR_ELx_AET_UEO: /* restartable, not yet consumed */ 866 /* 867 * The CPU can make progress. We may take UEO again as 868 * a more severe error. 869 */ 870 return false; 871 872 case ESR_ELx_AET_UEU: /* Uncorrected Unrecoverable */ 873 case ESR_ELx_AET_UER: /* Uncorrected Recoverable */ 874 /* 875 * The CPU can't make progress. The exception may have 876 * been imprecise. 877 * 878 * Neoverse-N1 #1349291 means a non-KVM SError reported as 879 * Unrecoverable should be treated as Uncontainable. We 880 * call arm64_serror_panic() in both cases. 881 */ 882 return true; 883 884 case ESR_ELx_AET_UC: /* Uncontainable or Uncategorized error */ 885 default: 886 /* Error has been silently propagated */ 887 arm64_serror_panic(regs, esr); 888 } 889 } 890 891 asmlinkage void do_serror(struct pt_regs *regs, unsigned int esr) 892 { 893 const bool was_in_nmi = in_nmi(); 894 895 if (!was_in_nmi) 896 nmi_enter(); 897 898 /* non-RAS errors are not containable */ 899 if (!arm64_is_ras_serror(esr) || arm64_is_fatal_ras_serror(regs, esr)) 900 arm64_serror_panic(regs, esr); 901 902 if (!was_in_nmi) 903 nmi_exit(); 904 } 905 906 asmlinkage void enter_from_user_mode(void) 907 { 908 CT_WARN_ON(ct_state() != CONTEXT_USER); 909 user_exit_irqoff(); 910 } 911 NOKPROBE_SYMBOL(enter_from_user_mode); 912 913 void __pte_error(const char *file, int line, unsigned long val) 914 { 915 pr_err("%s:%d: bad pte %016lx.\n", file, line, val); 916 } 917 918 void __pmd_error(const char *file, int line, unsigned long val) 919 { 920 pr_err("%s:%d: bad pmd %016lx.\n", file, line, val); 921 } 922 923 void __pud_error(const char *file, int line, unsigned long val) 924 { 925 pr_err("%s:%d: bad pud %016lx.\n", file, line, val); 926 } 927 928 void __pgd_error(const char *file, int line, unsigned long val) 929 { 930 pr_err("%s:%d: bad pgd %016lx.\n", file, line, val); 931 } 932 933 /* GENERIC_BUG traps */ 934 935 int is_valid_bugaddr(unsigned long addr) 936 { 937 /* 938 * bug_handler() only called for BRK #BUG_BRK_IMM. 939 * So the answer is trivial -- any spurious instances with no 940 * bug table entry will be rejected by report_bug() and passed 941 * back to the debug-monitors code and handled as a fatal 942 * unexpected debug exception. 943 */ 944 return 1; 945 } 946 947 static int bug_handler(struct pt_regs *regs, unsigned int esr) 948 { 949 switch (report_bug(regs->pc, regs)) { 950 case BUG_TRAP_TYPE_BUG: 951 die("Oops - BUG", regs, 0); 952 break; 953 954 case BUG_TRAP_TYPE_WARN: 955 break; 956 957 default: 958 /* unknown/unrecognised bug trap type */ 959 return DBG_HOOK_ERROR; 960 } 961 962 /* If thread survives, skip over the BUG instruction and continue: */ 963 arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE); 964 return DBG_HOOK_HANDLED; 965 } 966 967 static struct break_hook bug_break_hook = { 968 .fn = bug_handler, 969 .imm = BUG_BRK_IMM, 970 }; 971 972 #ifdef CONFIG_KASAN_SW_TAGS 973 974 #define KASAN_ESR_RECOVER 0x20 975 #define KASAN_ESR_WRITE 0x10 976 #define KASAN_ESR_SIZE_MASK 0x0f 977 #define KASAN_ESR_SIZE(esr) (1 << ((esr) & KASAN_ESR_SIZE_MASK)) 978 979 static int kasan_handler(struct pt_regs *regs, unsigned int esr) 980 { 981 bool recover = esr & KASAN_ESR_RECOVER; 982 bool write = esr & KASAN_ESR_WRITE; 983 size_t size = KASAN_ESR_SIZE(esr); 984 u64 addr = regs->regs[0]; 985 u64 pc = regs->pc; 986 987 kasan_report(addr, size, write, pc); 988 989 /* 990 * The instrumentation allows to control whether we can proceed after 991 * a crash was detected. This is done by passing the -recover flag to 992 * the compiler. Disabling recovery allows to generate more compact 993 * code. 994 * 995 * Unfortunately disabling recovery doesn't work for the kernel right 996 * now. KASAN reporting is disabled in some contexts (for example when 997 * the allocator accesses slab object metadata; this is controlled by 998 * current->kasan_depth). All these accesses are detected by the tool, 999 * even though the reports for them are not printed. 1000 * 1001 * This is something that might be fixed at some point in the future. 1002 */ 1003 if (!recover) 1004 die("Oops - KASAN", regs, 0); 1005 1006 /* If thread survives, skip over the brk instruction and continue: */ 1007 arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE); 1008 return DBG_HOOK_HANDLED; 1009 } 1010 1011 static struct break_hook kasan_break_hook = { 1012 .fn = kasan_handler, 1013 .imm = KASAN_BRK_IMM, 1014 .mask = KASAN_BRK_MASK, 1015 }; 1016 #endif 1017 1018 /* 1019 * Initial handler for AArch64 BRK exceptions 1020 * This handler only used until debug_traps_init(). 1021 */ 1022 int __init early_brk64(unsigned long addr, unsigned int esr, 1023 struct pt_regs *regs) 1024 { 1025 #ifdef CONFIG_KASAN_SW_TAGS 1026 unsigned int comment = esr & ESR_ELx_BRK64_ISS_COMMENT_MASK; 1027 1028 if ((comment & ~KASAN_BRK_MASK) == KASAN_BRK_IMM) 1029 return kasan_handler(regs, esr) != DBG_HOOK_HANDLED; 1030 #endif 1031 return bug_handler(regs, esr) != DBG_HOOK_HANDLED; 1032 } 1033 1034 /* This registration must happen early, before debug_traps_init(). */ 1035 void __init trap_init(void) 1036 { 1037 register_kernel_break_hook(&bug_break_hook); 1038 #ifdef CONFIG_KASAN_SW_TAGS 1039 register_kernel_break_hook(&kasan_break_hook); 1040 #endif 1041 } 1042