1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Based on arch/arm/kernel/traps.c 4 * 5 * Copyright (C) 1995-2009 Russell King 6 * Copyright (C) 2012 ARM Ltd. 7 */ 8 9 #include <linux/bug.h> 10 #include <linux/context_tracking.h> 11 #include <linux/signal.h> 12 #include <linux/kallsyms.h> 13 #include <linux/kprobes.h> 14 #include <linux/spinlock.h> 15 #include <linux/uaccess.h> 16 #include <linux/hardirq.h> 17 #include <linux/kdebug.h> 18 #include <linux/module.h> 19 #include <linux/kexec.h> 20 #include <linux/delay.h> 21 #include <linux/init.h> 22 #include <linux/sched/signal.h> 23 #include <linux/sched/debug.h> 24 #include <linux/sched/task_stack.h> 25 #include <linux/sizes.h> 26 #include <linux/syscalls.h> 27 #include <linux/mm_types.h> 28 #include <linux/kasan.h> 29 30 #include <asm/atomic.h> 31 #include <asm/bug.h> 32 #include <asm/cpufeature.h> 33 #include <asm/daifflags.h> 34 #include <asm/debug-monitors.h> 35 #include <asm/esr.h> 36 #include <asm/exception.h> 37 #include <asm/extable.h> 38 #include <asm/insn.h> 39 #include <asm/kprobes.h> 40 #include <asm/patching.h> 41 #include <asm/traps.h> 42 #include <asm/smp.h> 43 #include <asm/stack_pointer.h> 44 #include <asm/stacktrace.h> 45 #include <asm/system_misc.h> 46 #include <asm/sysreg.h> 47 48 static bool __kprobes __check_eq(unsigned long pstate) 49 { 50 return (pstate & PSR_Z_BIT) != 0; 51 } 52 53 static bool __kprobes __check_ne(unsigned long pstate) 54 { 55 return (pstate & PSR_Z_BIT) == 0; 56 } 57 58 static bool __kprobes __check_cs(unsigned long pstate) 59 { 60 return (pstate & PSR_C_BIT) != 0; 61 } 62 63 static bool __kprobes __check_cc(unsigned long pstate) 64 { 65 return (pstate & PSR_C_BIT) == 0; 66 } 67 68 static bool __kprobes __check_mi(unsigned long pstate) 69 { 70 return (pstate & PSR_N_BIT) != 0; 71 } 72 73 static bool __kprobes __check_pl(unsigned long pstate) 74 { 75 return (pstate & PSR_N_BIT) == 0; 76 } 77 78 static bool __kprobes __check_vs(unsigned long pstate) 79 { 80 return (pstate & PSR_V_BIT) != 0; 81 } 82 83 static bool __kprobes __check_vc(unsigned long pstate) 84 { 85 return (pstate & PSR_V_BIT) == 0; 86 } 87 88 static bool __kprobes __check_hi(unsigned long pstate) 89 { 90 pstate &= ~(pstate >> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */ 91 return (pstate & PSR_C_BIT) != 0; 92 } 93 94 static bool __kprobes __check_ls(unsigned long pstate) 95 { 96 pstate &= ~(pstate >> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */ 97 return (pstate & PSR_C_BIT) == 0; 98 } 99 100 static bool __kprobes __check_ge(unsigned long pstate) 101 { 102 pstate ^= (pstate << 3); /* PSR_N_BIT ^= PSR_V_BIT */ 103 return (pstate & PSR_N_BIT) == 0; 104 } 105 106 static bool __kprobes __check_lt(unsigned long pstate) 107 { 108 pstate ^= (pstate << 3); /* PSR_N_BIT ^= PSR_V_BIT */ 109 return (pstate & PSR_N_BIT) != 0; 110 } 111 112 static bool __kprobes __check_gt(unsigned long pstate) 113 { 114 /*PSR_N_BIT ^= PSR_V_BIT */ 115 unsigned long temp = pstate ^ (pstate << 3); 116 117 temp |= (pstate << 1); /*PSR_N_BIT |= PSR_Z_BIT */ 118 return (temp & PSR_N_BIT) == 0; 119 } 120 121 static bool __kprobes __check_le(unsigned long pstate) 122 { 123 /*PSR_N_BIT ^= PSR_V_BIT */ 124 unsigned long temp = pstate ^ (pstate << 3); 125 126 temp |= (pstate << 1); /*PSR_N_BIT |= PSR_Z_BIT */ 127 return (temp & PSR_N_BIT) != 0; 128 } 129 130 static bool __kprobes __check_al(unsigned long pstate) 131 { 132 return true; 133 } 134 135 /* 136 * Note that the ARMv8 ARM calls condition code 0b1111 "nv", but states that 137 * it behaves identically to 0b1110 ("al"). 138 */ 139 pstate_check_t * const aarch32_opcode_cond_checks[16] = { 140 __check_eq, __check_ne, __check_cs, __check_cc, 141 __check_mi, __check_pl, __check_vs, __check_vc, 142 __check_hi, __check_ls, __check_ge, __check_lt, 143 __check_gt, __check_le, __check_al, __check_al 144 }; 145 146 int show_unhandled_signals = 0; 147 148 static void dump_kernel_instr(const char *lvl, struct pt_regs *regs) 149 { 150 unsigned long addr = instruction_pointer(regs); 151 char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str; 152 int i; 153 154 if (user_mode(regs)) 155 return; 156 157 for (i = -4; i < 1; i++) { 158 unsigned int val, bad; 159 160 bad = aarch64_insn_read(&((u32 *)addr)[i], &val); 161 162 if (!bad) 163 p += sprintf(p, i == 0 ? "(%08x) " : "%08x ", val); 164 else { 165 p += sprintf(p, "bad PC value"); 166 break; 167 } 168 } 169 170 printk("%sCode: %s\n", lvl, str); 171 } 172 173 #ifdef CONFIG_PREEMPT 174 #define S_PREEMPT " PREEMPT" 175 #elif defined(CONFIG_PREEMPT_RT) 176 #define S_PREEMPT " PREEMPT_RT" 177 #else 178 #define S_PREEMPT "" 179 #endif 180 181 #define S_SMP " SMP" 182 183 static int __die(const char *str, int err, struct pt_regs *regs) 184 { 185 static int die_counter; 186 int ret; 187 188 pr_emerg("Internal error: %s: %x [#%d]" S_PREEMPT S_SMP "\n", 189 str, err, ++die_counter); 190 191 /* trap and error numbers are mostly meaningless on ARM */ 192 ret = notify_die(DIE_OOPS, str, regs, err, 0, SIGSEGV); 193 if (ret == NOTIFY_STOP) 194 return ret; 195 196 print_modules(); 197 show_regs(regs); 198 199 dump_kernel_instr(KERN_EMERG, regs); 200 201 return ret; 202 } 203 204 static DEFINE_RAW_SPINLOCK(die_lock); 205 206 /* 207 * This function is protected against re-entrancy. 208 */ 209 void die(const char *str, struct pt_regs *regs, int err) 210 { 211 int ret; 212 unsigned long flags; 213 214 raw_spin_lock_irqsave(&die_lock, flags); 215 216 oops_enter(); 217 218 console_verbose(); 219 bust_spinlocks(1); 220 ret = __die(str, err, regs); 221 222 if (regs && kexec_should_crash(current)) 223 crash_kexec(regs); 224 225 bust_spinlocks(0); 226 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); 227 oops_exit(); 228 229 if (in_interrupt()) 230 panic("%s: Fatal exception in interrupt", str); 231 if (panic_on_oops) 232 panic("%s: Fatal exception", str); 233 234 raw_spin_unlock_irqrestore(&die_lock, flags); 235 236 if (ret != NOTIFY_STOP) 237 make_task_dead(SIGSEGV); 238 } 239 240 static void arm64_show_signal(int signo, const char *str) 241 { 242 static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL, 243 DEFAULT_RATELIMIT_BURST); 244 struct task_struct *tsk = current; 245 unsigned int esr = tsk->thread.fault_code; 246 struct pt_regs *regs = task_pt_regs(tsk); 247 248 /* Leave if the signal won't be shown */ 249 if (!show_unhandled_signals || 250 !unhandled_signal(tsk, signo) || 251 !__ratelimit(&rs)) 252 return; 253 254 pr_info("%s[%d]: unhandled exception: ", tsk->comm, task_pid_nr(tsk)); 255 if (esr) 256 pr_cont("%s, ESR 0x%08x, ", esr_get_class_string(esr), esr); 257 258 pr_cont("%s", str); 259 print_vma_addr(KERN_CONT " in ", regs->pc); 260 pr_cont("\n"); 261 __show_regs(regs); 262 } 263 264 void arm64_force_sig_fault(int signo, int code, unsigned long far, 265 const char *str) 266 { 267 arm64_show_signal(signo, str); 268 if (signo == SIGKILL) 269 force_sig(SIGKILL); 270 else 271 force_sig_fault(signo, code, (void __user *)far); 272 } 273 274 void arm64_force_sig_mceerr(int code, unsigned long far, short lsb, 275 const char *str) 276 { 277 arm64_show_signal(SIGBUS, str); 278 force_sig_mceerr(code, (void __user *)far, lsb); 279 } 280 281 void arm64_force_sig_ptrace_errno_trap(int errno, unsigned long far, 282 const char *str) 283 { 284 arm64_show_signal(SIGTRAP, str); 285 force_sig_ptrace_errno_trap(errno, (void __user *)far); 286 } 287 288 void arm64_notify_die(const char *str, struct pt_regs *regs, 289 int signo, int sicode, unsigned long far, 290 int err) 291 { 292 if (user_mode(regs)) { 293 WARN_ON(regs != current_pt_regs()); 294 current->thread.fault_address = 0; 295 current->thread.fault_code = err; 296 297 arm64_force_sig_fault(signo, sicode, far, str); 298 } else { 299 die(str, regs, err); 300 } 301 } 302 303 #ifdef CONFIG_COMPAT 304 #define PSTATE_IT_1_0_SHIFT 25 305 #define PSTATE_IT_1_0_MASK (0x3 << PSTATE_IT_1_0_SHIFT) 306 #define PSTATE_IT_7_2_SHIFT 10 307 #define PSTATE_IT_7_2_MASK (0x3f << PSTATE_IT_7_2_SHIFT) 308 309 static u32 compat_get_it_state(struct pt_regs *regs) 310 { 311 u32 it, pstate = regs->pstate; 312 313 it = (pstate & PSTATE_IT_1_0_MASK) >> PSTATE_IT_1_0_SHIFT; 314 it |= ((pstate & PSTATE_IT_7_2_MASK) >> PSTATE_IT_7_2_SHIFT) << 2; 315 316 return it; 317 } 318 319 static void compat_set_it_state(struct pt_regs *regs, u32 it) 320 { 321 u32 pstate_it; 322 323 pstate_it = (it << PSTATE_IT_1_0_SHIFT) & PSTATE_IT_1_0_MASK; 324 pstate_it |= ((it >> 2) << PSTATE_IT_7_2_SHIFT) & PSTATE_IT_7_2_MASK; 325 326 regs->pstate &= ~PSR_AA32_IT_MASK; 327 regs->pstate |= pstate_it; 328 } 329 330 static void advance_itstate(struct pt_regs *regs) 331 { 332 u32 it; 333 334 /* ARM mode */ 335 if (!(regs->pstate & PSR_AA32_T_BIT) || 336 !(regs->pstate & PSR_AA32_IT_MASK)) 337 return; 338 339 it = compat_get_it_state(regs); 340 341 /* 342 * If this is the last instruction of the block, wipe the IT 343 * state. Otherwise advance it. 344 */ 345 if (!(it & 7)) 346 it = 0; 347 else 348 it = (it & 0xe0) | ((it << 1) & 0x1f); 349 350 compat_set_it_state(regs, it); 351 } 352 #else 353 static void advance_itstate(struct pt_regs *regs) 354 { 355 } 356 #endif 357 358 void arm64_skip_faulting_instruction(struct pt_regs *regs, unsigned long size) 359 { 360 regs->pc += size; 361 362 /* 363 * If we were single stepping, we want to get the step exception after 364 * we return from the trap. 365 */ 366 if (user_mode(regs)) 367 user_fastforward_single_step(current); 368 369 if (compat_user_mode(regs)) 370 advance_itstate(regs); 371 else 372 regs->pstate &= ~PSR_BTYPE_MASK; 373 } 374 375 static LIST_HEAD(undef_hook); 376 static DEFINE_RAW_SPINLOCK(undef_lock); 377 378 void register_undef_hook(struct undef_hook *hook) 379 { 380 unsigned long flags; 381 382 raw_spin_lock_irqsave(&undef_lock, flags); 383 list_add(&hook->node, &undef_hook); 384 raw_spin_unlock_irqrestore(&undef_lock, flags); 385 } 386 387 void unregister_undef_hook(struct undef_hook *hook) 388 { 389 unsigned long flags; 390 391 raw_spin_lock_irqsave(&undef_lock, flags); 392 list_del(&hook->node); 393 raw_spin_unlock_irqrestore(&undef_lock, flags); 394 } 395 396 static int call_undef_hook(struct pt_regs *regs) 397 { 398 struct undef_hook *hook; 399 unsigned long flags; 400 u32 instr; 401 int (*fn)(struct pt_regs *regs, u32 instr) = NULL; 402 unsigned long pc = instruction_pointer(regs); 403 404 if (!user_mode(regs)) { 405 __le32 instr_le; 406 if (get_kernel_nofault(instr_le, (__le32 *)pc)) 407 goto exit; 408 instr = le32_to_cpu(instr_le); 409 } else if (compat_thumb_mode(regs)) { 410 /* 16-bit Thumb instruction */ 411 __le16 instr_le; 412 if (get_user(instr_le, (__le16 __user *)pc)) 413 goto exit; 414 instr = le16_to_cpu(instr_le); 415 if (aarch32_insn_is_wide(instr)) { 416 u32 instr2; 417 418 if (get_user(instr_le, (__le16 __user *)(pc + 2))) 419 goto exit; 420 instr2 = le16_to_cpu(instr_le); 421 instr = (instr << 16) | instr2; 422 } 423 } else { 424 /* 32-bit ARM instruction */ 425 __le32 instr_le; 426 if (get_user(instr_le, (__le32 __user *)pc)) 427 goto exit; 428 instr = le32_to_cpu(instr_le); 429 } 430 431 raw_spin_lock_irqsave(&undef_lock, flags); 432 list_for_each_entry(hook, &undef_hook, node) 433 if ((instr & hook->instr_mask) == hook->instr_val && 434 (regs->pstate & hook->pstate_mask) == hook->pstate_val) 435 fn = hook->fn; 436 437 raw_spin_unlock_irqrestore(&undef_lock, flags); 438 exit: 439 return fn ? fn(regs, instr) : 1; 440 } 441 442 void force_signal_inject(int signal, int code, unsigned long address, unsigned int err) 443 { 444 const char *desc; 445 struct pt_regs *regs = current_pt_regs(); 446 447 if (WARN_ON(!user_mode(regs))) 448 return; 449 450 switch (signal) { 451 case SIGILL: 452 desc = "undefined instruction"; 453 break; 454 case SIGSEGV: 455 desc = "illegal memory access"; 456 break; 457 default: 458 desc = "unknown or unrecoverable error"; 459 break; 460 } 461 462 /* Force signals we don't understand to SIGKILL */ 463 if (WARN_ON(signal != SIGKILL && 464 siginfo_layout(signal, code) != SIL_FAULT)) { 465 signal = SIGKILL; 466 } 467 468 arm64_notify_die(desc, regs, signal, code, address, err); 469 } 470 471 /* 472 * Set up process info to signal segmentation fault - called on access error. 473 */ 474 void arm64_notify_segfault(unsigned long addr) 475 { 476 int code; 477 478 mmap_read_lock(current->mm); 479 if (find_vma(current->mm, untagged_addr(addr)) == NULL) 480 code = SEGV_MAPERR; 481 else 482 code = SEGV_ACCERR; 483 mmap_read_unlock(current->mm); 484 485 force_signal_inject(SIGSEGV, code, addr, 0); 486 } 487 488 void do_undefinstr(struct pt_regs *regs) 489 { 490 /* check for AArch32 breakpoint instructions */ 491 if (!aarch32_break_handler(regs)) 492 return; 493 494 if (call_undef_hook(regs) == 0) 495 return; 496 497 BUG_ON(!user_mode(regs)); 498 force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0); 499 } 500 NOKPROBE_SYMBOL(do_undefinstr); 501 502 void do_bti(struct pt_regs *regs) 503 { 504 BUG_ON(!user_mode(regs)); 505 force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0); 506 } 507 NOKPROBE_SYMBOL(do_bti); 508 509 void do_ptrauth_fault(struct pt_regs *regs, unsigned int esr) 510 { 511 /* 512 * Unexpected FPAC exception or pointer authentication failure in 513 * the kernel: kill the task before it does any more harm. 514 */ 515 BUG_ON(!user_mode(regs)); 516 force_signal_inject(SIGILL, ILL_ILLOPN, regs->pc, esr); 517 } 518 NOKPROBE_SYMBOL(do_ptrauth_fault); 519 520 #define __user_cache_maint(insn, address, res) \ 521 if (address >= TASK_SIZE_MAX) { \ 522 res = -EFAULT; \ 523 } else { \ 524 uaccess_ttbr0_enable(); \ 525 asm volatile ( \ 526 "1: " insn ", %1\n" \ 527 " mov %w0, #0\n" \ 528 "2:\n" \ 529 _ASM_EXTABLE_UACCESS_ERR(1b, 2b, %w0) \ 530 : "=r" (res) \ 531 : "r" (address)); \ 532 uaccess_ttbr0_disable(); \ 533 } 534 535 static void user_cache_maint_handler(unsigned int esr, struct pt_regs *regs) 536 { 537 unsigned long tagged_address, address; 538 int rt = ESR_ELx_SYS64_ISS_RT(esr); 539 int crm = (esr & ESR_ELx_SYS64_ISS_CRM_MASK) >> ESR_ELx_SYS64_ISS_CRM_SHIFT; 540 int ret = 0; 541 542 tagged_address = pt_regs_read_reg(regs, rt); 543 address = untagged_addr(tagged_address); 544 545 switch (crm) { 546 case ESR_ELx_SYS64_ISS_CRM_DC_CVAU: /* DC CVAU, gets promoted */ 547 __user_cache_maint("dc civac", address, ret); 548 break; 549 case ESR_ELx_SYS64_ISS_CRM_DC_CVAC: /* DC CVAC, gets promoted */ 550 __user_cache_maint("dc civac", address, ret); 551 break; 552 case ESR_ELx_SYS64_ISS_CRM_DC_CVADP: /* DC CVADP */ 553 __user_cache_maint("sys 3, c7, c13, 1", address, ret); 554 break; 555 case ESR_ELx_SYS64_ISS_CRM_DC_CVAP: /* DC CVAP */ 556 __user_cache_maint("sys 3, c7, c12, 1", address, ret); 557 break; 558 case ESR_ELx_SYS64_ISS_CRM_DC_CIVAC: /* DC CIVAC */ 559 __user_cache_maint("dc civac", address, ret); 560 break; 561 case ESR_ELx_SYS64_ISS_CRM_IC_IVAU: /* IC IVAU */ 562 __user_cache_maint("ic ivau", address, ret); 563 break; 564 default: 565 force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0); 566 return; 567 } 568 569 if (ret) 570 arm64_notify_segfault(tagged_address); 571 else 572 arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE); 573 } 574 575 static void ctr_read_handler(unsigned int esr, struct pt_regs *regs) 576 { 577 int rt = ESR_ELx_SYS64_ISS_RT(esr); 578 unsigned long val = arm64_ftr_reg_user_value(&arm64_ftr_reg_ctrel0); 579 580 if (cpus_have_const_cap(ARM64_WORKAROUND_1542419)) { 581 /* Hide DIC so that we can trap the unnecessary maintenance...*/ 582 val &= ~BIT(CTR_DIC_SHIFT); 583 584 /* ... and fake IminLine to reduce the number of traps. */ 585 val &= ~CTR_IMINLINE_MASK; 586 val |= (PAGE_SHIFT - 2) & CTR_IMINLINE_MASK; 587 } 588 589 pt_regs_write_reg(regs, rt, val); 590 591 arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE); 592 } 593 594 static void cntvct_read_handler(unsigned int esr, struct pt_regs *regs) 595 { 596 int rt = ESR_ELx_SYS64_ISS_RT(esr); 597 598 pt_regs_write_reg(regs, rt, arch_timer_read_counter()); 599 arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE); 600 } 601 602 static void cntfrq_read_handler(unsigned int esr, struct pt_regs *regs) 603 { 604 int rt = ESR_ELx_SYS64_ISS_RT(esr); 605 606 pt_regs_write_reg(regs, rt, arch_timer_get_rate()); 607 arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE); 608 } 609 610 static void mrs_handler(unsigned int esr, struct pt_regs *regs) 611 { 612 u32 sysreg, rt; 613 614 rt = ESR_ELx_SYS64_ISS_RT(esr); 615 sysreg = esr_sys64_to_sysreg(esr); 616 617 if (do_emulate_mrs(regs, sysreg, rt) != 0) 618 force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0); 619 } 620 621 static void wfi_handler(unsigned int esr, struct pt_regs *regs) 622 { 623 arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE); 624 } 625 626 struct sys64_hook { 627 unsigned int esr_mask; 628 unsigned int esr_val; 629 void (*handler)(unsigned int esr, struct pt_regs *regs); 630 }; 631 632 static const struct sys64_hook sys64_hooks[] = { 633 { 634 .esr_mask = ESR_ELx_SYS64_ISS_EL0_CACHE_OP_MASK, 635 .esr_val = ESR_ELx_SYS64_ISS_EL0_CACHE_OP_VAL, 636 .handler = user_cache_maint_handler, 637 }, 638 { 639 /* Trap read access to CTR_EL0 */ 640 .esr_mask = ESR_ELx_SYS64_ISS_SYS_OP_MASK, 641 .esr_val = ESR_ELx_SYS64_ISS_SYS_CTR_READ, 642 .handler = ctr_read_handler, 643 }, 644 { 645 /* Trap read access to CNTVCT_EL0 */ 646 .esr_mask = ESR_ELx_SYS64_ISS_SYS_OP_MASK, 647 .esr_val = ESR_ELx_SYS64_ISS_SYS_CNTVCT, 648 .handler = cntvct_read_handler, 649 }, 650 { 651 /* Trap read access to CNTVCTSS_EL0 */ 652 .esr_mask = ESR_ELx_SYS64_ISS_SYS_OP_MASK, 653 .esr_val = ESR_ELx_SYS64_ISS_SYS_CNTVCTSS, 654 .handler = cntvct_read_handler, 655 }, 656 { 657 /* Trap read access to CNTFRQ_EL0 */ 658 .esr_mask = ESR_ELx_SYS64_ISS_SYS_OP_MASK, 659 .esr_val = ESR_ELx_SYS64_ISS_SYS_CNTFRQ, 660 .handler = cntfrq_read_handler, 661 }, 662 { 663 /* Trap read access to CPUID registers */ 664 .esr_mask = ESR_ELx_SYS64_ISS_SYS_MRS_OP_MASK, 665 .esr_val = ESR_ELx_SYS64_ISS_SYS_MRS_OP_VAL, 666 .handler = mrs_handler, 667 }, 668 { 669 /* Trap WFI instructions executed in userspace */ 670 .esr_mask = ESR_ELx_WFx_MASK, 671 .esr_val = ESR_ELx_WFx_WFI_VAL, 672 .handler = wfi_handler, 673 }, 674 {}, 675 }; 676 677 #ifdef CONFIG_COMPAT 678 static bool cp15_cond_valid(unsigned int esr, struct pt_regs *regs) 679 { 680 int cond; 681 682 /* Only a T32 instruction can trap without CV being set */ 683 if (!(esr & ESR_ELx_CV)) { 684 u32 it; 685 686 it = compat_get_it_state(regs); 687 if (!it) 688 return true; 689 690 cond = it >> 4; 691 } else { 692 cond = (esr & ESR_ELx_COND_MASK) >> ESR_ELx_COND_SHIFT; 693 } 694 695 return aarch32_opcode_cond_checks[cond](regs->pstate); 696 } 697 698 static void compat_cntfrq_read_handler(unsigned int esr, struct pt_regs *regs) 699 { 700 int reg = (esr & ESR_ELx_CP15_32_ISS_RT_MASK) >> ESR_ELx_CP15_32_ISS_RT_SHIFT; 701 702 pt_regs_write_reg(regs, reg, arch_timer_get_rate()); 703 arm64_skip_faulting_instruction(regs, 4); 704 } 705 706 static const struct sys64_hook cp15_32_hooks[] = { 707 { 708 .esr_mask = ESR_ELx_CP15_32_ISS_SYS_MASK, 709 .esr_val = ESR_ELx_CP15_32_ISS_SYS_CNTFRQ, 710 .handler = compat_cntfrq_read_handler, 711 }, 712 {}, 713 }; 714 715 static void compat_cntvct_read_handler(unsigned int esr, struct pt_regs *regs) 716 { 717 int rt = (esr & ESR_ELx_CP15_64_ISS_RT_MASK) >> ESR_ELx_CP15_64_ISS_RT_SHIFT; 718 int rt2 = (esr & ESR_ELx_CP15_64_ISS_RT2_MASK) >> ESR_ELx_CP15_64_ISS_RT2_SHIFT; 719 u64 val = arch_timer_read_counter(); 720 721 pt_regs_write_reg(regs, rt, lower_32_bits(val)); 722 pt_regs_write_reg(regs, rt2, upper_32_bits(val)); 723 arm64_skip_faulting_instruction(regs, 4); 724 } 725 726 static const struct sys64_hook cp15_64_hooks[] = { 727 { 728 .esr_mask = ESR_ELx_CP15_64_ISS_SYS_MASK, 729 .esr_val = ESR_ELx_CP15_64_ISS_SYS_CNTVCT, 730 .handler = compat_cntvct_read_handler, 731 }, 732 { 733 .esr_mask = ESR_ELx_CP15_64_ISS_SYS_MASK, 734 .esr_val = ESR_ELx_CP15_64_ISS_SYS_CNTVCTSS, 735 .handler = compat_cntvct_read_handler, 736 }, 737 {}, 738 }; 739 740 void do_cp15instr(unsigned int esr, struct pt_regs *regs) 741 { 742 const struct sys64_hook *hook, *hook_base; 743 744 if (!cp15_cond_valid(esr, regs)) { 745 /* 746 * There is no T16 variant of a CP access, so we 747 * always advance PC by 4 bytes. 748 */ 749 arm64_skip_faulting_instruction(regs, 4); 750 return; 751 } 752 753 switch (ESR_ELx_EC(esr)) { 754 case ESR_ELx_EC_CP15_32: 755 hook_base = cp15_32_hooks; 756 break; 757 case ESR_ELx_EC_CP15_64: 758 hook_base = cp15_64_hooks; 759 break; 760 default: 761 do_undefinstr(regs); 762 return; 763 } 764 765 for (hook = hook_base; hook->handler; hook++) 766 if ((hook->esr_mask & esr) == hook->esr_val) { 767 hook->handler(esr, regs); 768 return; 769 } 770 771 /* 772 * New cp15 instructions may previously have been undefined at 773 * EL0. Fall back to our usual undefined instruction handler 774 * so that we handle these consistently. 775 */ 776 do_undefinstr(regs); 777 } 778 NOKPROBE_SYMBOL(do_cp15instr); 779 #endif 780 781 void do_sysinstr(unsigned int esr, struct pt_regs *regs) 782 { 783 const struct sys64_hook *hook; 784 785 for (hook = sys64_hooks; hook->handler; hook++) 786 if ((hook->esr_mask & esr) == hook->esr_val) { 787 hook->handler(esr, regs); 788 return; 789 } 790 791 /* 792 * New SYS instructions may previously have been undefined at EL0. Fall 793 * back to our usual undefined instruction handler so that we handle 794 * these consistently. 795 */ 796 do_undefinstr(regs); 797 } 798 NOKPROBE_SYMBOL(do_sysinstr); 799 800 static const char *esr_class_str[] = { 801 [0 ... ESR_ELx_EC_MAX] = "UNRECOGNIZED EC", 802 [ESR_ELx_EC_UNKNOWN] = "Unknown/Uncategorized", 803 [ESR_ELx_EC_WFx] = "WFI/WFE", 804 [ESR_ELx_EC_CP15_32] = "CP15 MCR/MRC", 805 [ESR_ELx_EC_CP15_64] = "CP15 MCRR/MRRC", 806 [ESR_ELx_EC_CP14_MR] = "CP14 MCR/MRC", 807 [ESR_ELx_EC_CP14_LS] = "CP14 LDC/STC", 808 [ESR_ELx_EC_FP_ASIMD] = "ASIMD", 809 [ESR_ELx_EC_CP10_ID] = "CP10 MRC/VMRS", 810 [ESR_ELx_EC_PAC] = "PAC", 811 [ESR_ELx_EC_CP14_64] = "CP14 MCRR/MRRC", 812 [ESR_ELx_EC_BTI] = "BTI", 813 [ESR_ELx_EC_ILL] = "PSTATE.IL", 814 [ESR_ELx_EC_SVC32] = "SVC (AArch32)", 815 [ESR_ELx_EC_HVC32] = "HVC (AArch32)", 816 [ESR_ELx_EC_SMC32] = "SMC (AArch32)", 817 [ESR_ELx_EC_SVC64] = "SVC (AArch64)", 818 [ESR_ELx_EC_HVC64] = "HVC (AArch64)", 819 [ESR_ELx_EC_SMC64] = "SMC (AArch64)", 820 [ESR_ELx_EC_SYS64] = "MSR/MRS (AArch64)", 821 [ESR_ELx_EC_SVE] = "SVE", 822 [ESR_ELx_EC_ERET] = "ERET/ERETAA/ERETAB", 823 [ESR_ELx_EC_FPAC] = "FPAC", 824 [ESR_ELx_EC_IMP_DEF] = "EL3 IMP DEF", 825 [ESR_ELx_EC_IABT_LOW] = "IABT (lower EL)", 826 [ESR_ELx_EC_IABT_CUR] = "IABT (current EL)", 827 [ESR_ELx_EC_PC_ALIGN] = "PC Alignment", 828 [ESR_ELx_EC_DABT_LOW] = "DABT (lower EL)", 829 [ESR_ELx_EC_DABT_CUR] = "DABT (current EL)", 830 [ESR_ELx_EC_SP_ALIGN] = "SP Alignment", 831 [ESR_ELx_EC_FP_EXC32] = "FP (AArch32)", 832 [ESR_ELx_EC_FP_EXC64] = "FP (AArch64)", 833 [ESR_ELx_EC_SERROR] = "SError", 834 [ESR_ELx_EC_BREAKPT_LOW] = "Breakpoint (lower EL)", 835 [ESR_ELx_EC_BREAKPT_CUR] = "Breakpoint (current EL)", 836 [ESR_ELx_EC_SOFTSTP_LOW] = "Software Step (lower EL)", 837 [ESR_ELx_EC_SOFTSTP_CUR] = "Software Step (current EL)", 838 [ESR_ELx_EC_WATCHPT_LOW] = "Watchpoint (lower EL)", 839 [ESR_ELx_EC_WATCHPT_CUR] = "Watchpoint (current EL)", 840 [ESR_ELx_EC_BKPT32] = "BKPT (AArch32)", 841 [ESR_ELx_EC_VECTOR32] = "Vector catch (AArch32)", 842 [ESR_ELx_EC_BRK64] = "BRK (AArch64)", 843 }; 844 845 const char *esr_get_class_string(u32 esr) 846 { 847 return esr_class_str[ESR_ELx_EC(esr)]; 848 } 849 850 /* 851 * bad_el0_sync handles unexpected, but potentially recoverable synchronous 852 * exceptions taken from EL0. 853 */ 854 void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr) 855 { 856 unsigned long pc = instruction_pointer(regs); 857 858 current->thread.fault_address = 0; 859 current->thread.fault_code = esr; 860 861 arm64_force_sig_fault(SIGILL, ILL_ILLOPC, pc, 862 "Bad EL0 synchronous exception"); 863 } 864 865 #ifdef CONFIG_VMAP_STACK 866 867 DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack) 868 __aligned(16); 869 870 void panic_bad_stack(struct pt_regs *regs, unsigned int esr, unsigned long far) 871 { 872 unsigned long tsk_stk = (unsigned long)current->stack; 873 unsigned long irq_stk = (unsigned long)this_cpu_read(irq_stack_ptr); 874 unsigned long ovf_stk = (unsigned long)this_cpu_ptr(overflow_stack); 875 876 console_verbose(); 877 pr_emerg("Insufficient stack space to handle exception!"); 878 879 pr_emerg("ESR: 0x%08x -- %s\n", esr, esr_get_class_string(esr)); 880 pr_emerg("FAR: 0x%016lx\n", far); 881 882 pr_emerg("Task stack: [0x%016lx..0x%016lx]\n", 883 tsk_stk, tsk_stk + THREAD_SIZE); 884 pr_emerg("IRQ stack: [0x%016lx..0x%016lx]\n", 885 irq_stk, irq_stk + IRQ_STACK_SIZE); 886 pr_emerg("Overflow stack: [0x%016lx..0x%016lx]\n", 887 ovf_stk, ovf_stk + OVERFLOW_STACK_SIZE); 888 889 __show_regs(regs); 890 891 /* 892 * We use nmi_panic to limit the potential for recusive overflows, and 893 * to get a better stack trace. 894 */ 895 nmi_panic(NULL, "kernel stack overflow"); 896 cpu_park_loop(); 897 } 898 #endif 899 900 void __noreturn arm64_serror_panic(struct pt_regs *regs, u32 esr) 901 { 902 console_verbose(); 903 904 pr_crit("SError Interrupt on CPU%d, code 0x%08x -- %s\n", 905 smp_processor_id(), esr, esr_get_class_string(esr)); 906 if (regs) 907 __show_regs(regs); 908 909 nmi_panic(regs, "Asynchronous SError Interrupt"); 910 911 cpu_park_loop(); 912 unreachable(); 913 } 914 915 bool arm64_is_fatal_ras_serror(struct pt_regs *regs, unsigned int esr) 916 { 917 u32 aet = arm64_ras_serror_get_severity(esr); 918 919 switch (aet) { 920 case ESR_ELx_AET_CE: /* corrected error */ 921 case ESR_ELx_AET_UEO: /* restartable, not yet consumed */ 922 /* 923 * The CPU can make progress. We may take UEO again as 924 * a more severe error. 925 */ 926 return false; 927 928 case ESR_ELx_AET_UEU: /* Uncorrected Unrecoverable */ 929 case ESR_ELx_AET_UER: /* Uncorrected Recoverable */ 930 /* 931 * The CPU can't make progress. The exception may have 932 * been imprecise. 933 * 934 * Neoverse-N1 #1349291 means a non-KVM SError reported as 935 * Unrecoverable should be treated as Uncontainable. We 936 * call arm64_serror_panic() in both cases. 937 */ 938 return true; 939 940 case ESR_ELx_AET_UC: /* Uncontainable or Uncategorized error */ 941 default: 942 /* Error has been silently propagated */ 943 arm64_serror_panic(regs, esr); 944 } 945 } 946 947 void do_serror(struct pt_regs *regs, unsigned int esr) 948 { 949 /* non-RAS errors are not containable */ 950 if (!arm64_is_ras_serror(esr) || arm64_is_fatal_ras_serror(regs, esr)) 951 arm64_serror_panic(regs, esr); 952 } 953 954 /* GENERIC_BUG traps */ 955 956 int is_valid_bugaddr(unsigned long addr) 957 { 958 /* 959 * bug_handler() only called for BRK #BUG_BRK_IMM. 960 * So the answer is trivial -- any spurious instances with no 961 * bug table entry will be rejected by report_bug() and passed 962 * back to the debug-monitors code and handled as a fatal 963 * unexpected debug exception. 964 */ 965 return 1; 966 } 967 968 static int bug_handler(struct pt_regs *regs, unsigned int esr) 969 { 970 switch (report_bug(regs->pc, regs)) { 971 case BUG_TRAP_TYPE_BUG: 972 die("Oops - BUG", regs, 0); 973 break; 974 975 case BUG_TRAP_TYPE_WARN: 976 break; 977 978 default: 979 /* unknown/unrecognised bug trap type */ 980 return DBG_HOOK_ERROR; 981 } 982 983 /* If thread survives, skip over the BUG instruction and continue: */ 984 arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE); 985 return DBG_HOOK_HANDLED; 986 } 987 988 static struct break_hook bug_break_hook = { 989 .fn = bug_handler, 990 .imm = BUG_BRK_IMM, 991 }; 992 993 static int reserved_fault_handler(struct pt_regs *regs, unsigned int esr) 994 { 995 pr_err("%s generated an invalid instruction at %pS!\n", 996 "Kernel text patching", 997 (void *)instruction_pointer(regs)); 998 999 /* We cannot handle this */ 1000 return DBG_HOOK_ERROR; 1001 } 1002 1003 static struct break_hook fault_break_hook = { 1004 .fn = reserved_fault_handler, 1005 .imm = FAULT_BRK_IMM, 1006 }; 1007 1008 #ifdef CONFIG_KASAN_SW_TAGS 1009 1010 #define KASAN_ESR_RECOVER 0x20 1011 #define KASAN_ESR_WRITE 0x10 1012 #define KASAN_ESR_SIZE_MASK 0x0f 1013 #define KASAN_ESR_SIZE(esr) (1 << ((esr) & KASAN_ESR_SIZE_MASK)) 1014 1015 static int kasan_handler(struct pt_regs *regs, unsigned int esr) 1016 { 1017 bool recover = esr & KASAN_ESR_RECOVER; 1018 bool write = esr & KASAN_ESR_WRITE; 1019 size_t size = KASAN_ESR_SIZE(esr); 1020 u64 addr = regs->regs[0]; 1021 u64 pc = regs->pc; 1022 1023 kasan_report(addr, size, write, pc); 1024 1025 /* 1026 * The instrumentation allows to control whether we can proceed after 1027 * a crash was detected. This is done by passing the -recover flag to 1028 * the compiler. Disabling recovery allows to generate more compact 1029 * code. 1030 * 1031 * Unfortunately disabling recovery doesn't work for the kernel right 1032 * now. KASAN reporting is disabled in some contexts (for example when 1033 * the allocator accesses slab object metadata; this is controlled by 1034 * current->kasan_depth). All these accesses are detected by the tool, 1035 * even though the reports for them are not printed. 1036 * 1037 * This is something that might be fixed at some point in the future. 1038 */ 1039 if (!recover) 1040 die("Oops - KASAN", regs, 0); 1041 1042 /* If thread survives, skip over the brk instruction and continue: */ 1043 arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE); 1044 return DBG_HOOK_HANDLED; 1045 } 1046 1047 static struct break_hook kasan_break_hook = { 1048 .fn = kasan_handler, 1049 .imm = KASAN_BRK_IMM, 1050 .mask = KASAN_BRK_MASK, 1051 }; 1052 #endif 1053 1054 /* 1055 * Initial handler for AArch64 BRK exceptions 1056 * This handler only used until debug_traps_init(). 1057 */ 1058 int __init early_brk64(unsigned long addr, unsigned int esr, 1059 struct pt_regs *regs) 1060 { 1061 #ifdef CONFIG_KASAN_SW_TAGS 1062 unsigned int comment = esr & ESR_ELx_BRK64_ISS_COMMENT_MASK; 1063 1064 if ((comment & ~KASAN_BRK_MASK) == KASAN_BRK_IMM) 1065 return kasan_handler(regs, esr) != DBG_HOOK_HANDLED; 1066 #endif 1067 return bug_handler(regs, esr) != DBG_HOOK_HANDLED; 1068 } 1069 1070 void __init trap_init(void) 1071 { 1072 register_kernel_break_hook(&bug_break_hook); 1073 register_kernel_break_hook(&fault_break_hook); 1074 #ifdef CONFIG_KASAN_SW_TAGS 1075 register_kernel_break_hook(&kasan_break_hook); 1076 #endif 1077 debug_traps_init(); 1078 } 1079