1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Based on arch/arm/kernel/traps.c 4 * 5 * Copyright (C) 1995-2009 Russell King 6 * Copyright (C) 2012 ARM Ltd. 7 */ 8 9 #include <linux/bug.h> 10 #include <linux/context_tracking.h> 11 #include <linux/signal.h> 12 #include <linux/kallsyms.h> 13 #include <linux/kprobes.h> 14 #include <linux/spinlock.h> 15 #include <linux/uaccess.h> 16 #include <linux/hardirq.h> 17 #include <linux/kdebug.h> 18 #include <linux/module.h> 19 #include <linux/kexec.h> 20 #include <linux/delay.h> 21 #include <linux/init.h> 22 #include <linux/sched/signal.h> 23 #include <linux/sched/debug.h> 24 #include <linux/sched/task_stack.h> 25 #include <linux/sizes.h> 26 #include <linux/syscalls.h> 27 #include <linux/mm_types.h> 28 #include <linux/kasan.h> 29 #include <linux/ubsan.h> 30 #include <linux/cfi.h> 31 32 #include <asm/atomic.h> 33 #include <asm/bug.h> 34 #include <asm/cpufeature.h> 35 #include <asm/daifflags.h> 36 #include <asm/debug-monitors.h> 37 #include <asm/esr.h> 38 #include <asm/exception.h> 39 #include <asm/extable.h> 40 #include <asm/insn.h> 41 #include <asm/kprobes.h> 42 #include <asm/patching.h> 43 #include <asm/traps.h> 44 #include <asm/smp.h> 45 #include <asm/stack_pointer.h> 46 #include <asm/stacktrace.h> 47 #include <asm/system_misc.h> 48 #include <asm/sysreg.h> 49 50 static bool __kprobes __check_eq(unsigned long pstate) 51 { 52 return (pstate & PSR_Z_BIT) != 0; 53 } 54 55 static bool __kprobes __check_ne(unsigned long pstate) 56 { 57 return (pstate & PSR_Z_BIT) == 0; 58 } 59 60 static bool __kprobes __check_cs(unsigned long pstate) 61 { 62 return (pstate & PSR_C_BIT) != 0; 63 } 64 65 static bool __kprobes __check_cc(unsigned long pstate) 66 { 67 return (pstate & PSR_C_BIT) == 0; 68 } 69 70 static bool __kprobes __check_mi(unsigned long pstate) 71 { 72 return (pstate & PSR_N_BIT) != 0; 73 } 74 75 static bool __kprobes __check_pl(unsigned long pstate) 76 { 77 return (pstate & PSR_N_BIT) == 0; 78 } 79 80 static bool __kprobes __check_vs(unsigned long pstate) 81 { 82 return (pstate & PSR_V_BIT) != 0; 83 } 84 85 static bool __kprobes __check_vc(unsigned long pstate) 86 { 87 return (pstate & PSR_V_BIT) == 0; 88 } 89 90 static bool __kprobes __check_hi(unsigned long pstate) 91 { 92 pstate &= ~(pstate >> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */ 93 return (pstate & PSR_C_BIT) != 0; 94 } 95 96 static bool __kprobes __check_ls(unsigned long pstate) 97 { 98 pstate &= ~(pstate >> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */ 99 return (pstate & PSR_C_BIT) == 0; 100 } 101 102 static bool __kprobes __check_ge(unsigned long pstate) 103 { 104 pstate ^= (pstate << 3); /* PSR_N_BIT ^= PSR_V_BIT */ 105 return (pstate & PSR_N_BIT) == 0; 106 } 107 108 static bool __kprobes __check_lt(unsigned long pstate) 109 { 110 pstate ^= (pstate << 3); /* PSR_N_BIT ^= PSR_V_BIT */ 111 return (pstate & PSR_N_BIT) != 0; 112 } 113 114 static bool __kprobes __check_gt(unsigned long pstate) 115 { 116 /*PSR_N_BIT ^= PSR_V_BIT */ 117 unsigned long temp = pstate ^ (pstate << 3); 118 119 temp |= (pstate << 1); /*PSR_N_BIT |= PSR_Z_BIT */ 120 return (temp & PSR_N_BIT) == 0; 121 } 122 123 static bool __kprobes __check_le(unsigned long pstate) 124 { 125 /*PSR_N_BIT ^= PSR_V_BIT */ 126 unsigned long temp = pstate ^ (pstate << 3); 127 128 temp |= (pstate << 1); /*PSR_N_BIT |= PSR_Z_BIT */ 129 return (temp & PSR_N_BIT) != 0; 130 } 131 132 static bool __kprobes __check_al(unsigned long pstate) 133 { 134 return true; 135 } 136 137 /* 138 * Note that the ARMv8 ARM calls condition code 0b1111 "nv", but states that 139 * it behaves identically to 0b1110 ("al"). 140 */ 141 pstate_check_t * const aarch32_opcode_cond_checks[16] = { 142 __check_eq, __check_ne, __check_cs, __check_cc, 143 __check_mi, __check_pl, __check_vs, __check_vc, 144 __check_hi, __check_ls, __check_ge, __check_lt, 145 __check_gt, __check_le, __check_al, __check_al 146 }; 147 148 int show_unhandled_signals = 0; 149 150 static void dump_kernel_instr(const char *lvl, struct pt_regs *regs) 151 { 152 unsigned long addr = instruction_pointer(regs); 153 char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str; 154 int i; 155 156 if (user_mode(regs)) 157 return; 158 159 for (i = -4; i < 1; i++) { 160 unsigned int val, bad; 161 162 bad = aarch64_insn_read(&((u32 *)addr)[i], &val); 163 164 if (!bad) 165 p += sprintf(p, i == 0 ? "(%08x) " : "%08x ", val); 166 else 167 p += sprintf(p, i == 0 ? "(????????) " : "???????? "); 168 } 169 170 printk("%sCode: %s\n", lvl, str); 171 } 172 173 #ifdef CONFIG_PREEMPT 174 #define S_PREEMPT " PREEMPT" 175 #elif defined(CONFIG_PREEMPT_RT) 176 #define S_PREEMPT " PREEMPT_RT" 177 #else 178 #define S_PREEMPT "" 179 #endif 180 181 #define S_SMP " SMP" 182 183 static int __die(const char *str, long err, struct pt_regs *regs) 184 { 185 static int die_counter; 186 int ret; 187 188 pr_emerg("Internal error: %s: %016lx [#%d]" S_PREEMPT S_SMP "\n", 189 str, err, ++die_counter); 190 191 /* trap and error numbers are mostly meaningless on ARM */ 192 ret = notify_die(DIE_OOPS, str, regs, err, 0, SIGSEGV); 193 if (ret == NOTIFY_STOP) 194 return ret; 195 196 print_modules(); 197 show_regs(regs); 198 199 dump_kernel_instr(KERN_EMERG, regs); 200 201 return ret; 202 } 203 204 static DEFINE_RAW_SPINLOCK(die_lock); 205 206 /* 207 * This function is protected against re-entrancy. 208 */ 209 void die(const char *str, struct pt_regs *regs, long err) 210 { 211 int ret; 212 unsigned long flags; 213 214 raw_spin_lock_irqsave(&die_lock, flags); 215 216 oops_enter(); 217 218 console_verbose(); 219 bust_spinlocks(1); 220 ret = __die(str, err, regs); 221 222 if (regs && kexec_should_crash(current)) 223 crash_kexec(regs); 224 225 bust_spinlocks(0); 226 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); 227 oops_exit(); 228 229 if (in_interrupt()) 230 panic("%s: Fatal exception in interrupt", str); 231 if (panic_on_oops) 232 panic("%s: Fatal exception", str); 233 234 raw_spin_unlock_irqrestore(&die_lock, flags); 235 236 if (ret != NOTIFY_STOP) 237 make_task_dead(SIGSEGV); 238 } 239 240 static void arm64_show_signal(int signo, const char *str) 241 { 242 static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL, 243 DEFAULT_RATELIMIT_BURST); 244 struct task_struct *tsk = current; 245 unsigned long esr = tsk->thread.fault_code; 246 struct pt_regs *regs = task_pt_regs(tsk); 247 248 /* Leave if the signal won't be shown */ 249 if (!show_unhandled_signals || 250 !unhandled_signal(tsk, signo) || 251 !__ratelimit(&rs)) 252 return; 253 254 pr_info("%s[%d]: unhandled exception: ", tsk->comm, task_pid_nr(tsk)); 255 if (esr) 256 pr_cont("%s, ESR 0x%016lx, ", esr_get_class_string(esr), esr); 257 258 pr_cont("%s", str); 259 print_vma_addr(KERN_CONT " in ", regs->pc); 260 pr_cont("\n"); 261 __show_regs(regs); 262 } 263 264 void arm64_force_sig_fault(int signo, int code, unsigned long far, 265 const char *str) 266 { 267 arm64_show_signal(signo, str); 268 if (signo == SIGKILL) 269 force_sig(SIGKILL); 270 else 271 force_sig_fault(signo, code, (void __user *)far); 272 } 273 274 void arm64_force_sig_mceerr(int code, unsigned long far, short lsb, 275 const char *str) 276 { 277 arm64_show_signal(SIGBUS, str); 278 force_sig_mceerr(code, (void __user *)far, lsb); 279 } 280 281 void arm64_force_sig_ptrace_errno_trap(int errno, unsigned long far, 282 const char *str) 283 { 284 arm64_show_signal(SIGTRAP, str); 285 force_sig_ptrace_errno_trap(errno, (void __user *)far); 286 } 287 288 void arm64_notify_die(const char *str, struct pt_regs *regs, 289 int signo, int sicode, unsigned long far, 290 unsigned long err) 291 { 292 if (user_mode(regs)) { 293 WARN_ON(regs != current_pt_regs()); 294 current->thread.fault_address = 0; 295 current->thread.fault_code = err; 296 297 arm64_force_sig_fault(signo, sicode, far, str); 298 } else { 299 die(str, regs, err); 300 } 301 } 302 303 #ifdef CONFIG_COMPAT 304 #define PSTATE_IT_1_0_SHIFT 25 305 #define PSTATE_IT_1_0_MASK (0x3 << PSTATE_IT_1_0_SHIFT) 306 #define PSTATE_IT_7_2_SHIFT 10 307 #define PSTATE_IT_7_2_MASK (0x3f << PSTATE_IT_7_2_SHIFT) 308 309 static u32 compat_get_it_state(struct pt_regs *regs) 310 { 311 u32 it, pstate = regs->pstate; 312 313 it = (pstate & PSTATE_IT_1_0_MASK) >> PSTATE_IT_1_0_SHIFT; 314 it |= ((pstate & PSTATE_IT_7_2_MASK) >> PSTATE_IT_7_2_SHIFT) << 2; 315 316 return it; 317 } 318 319 static void compat_set_it_state(struct pt_regs *regs, u32 it) 320 { 321 u32 pstate_it; 322 323 pstate_it = (it << PSTATE_IT_1_0_SHIFT) & PSTATE_IT_1_0_MASK; 324 pstate_it |= ((it >> 2) << PSTATE_IT_7_2_SHIFT) & PSTATE_IT_7_2_MASK; 325 326 regs->pstate &= ~PSR_AA32_IT_MASK; 327 regs->pstate |= pstate_it; 328 } 329 330 static void advance_itstate(struct pt_regs *regs) 331 { 332 u32 it; 333 334 /* ARM mode */ 335 if (!(regs->pstate & PSR_AA32_T_BIT) || 336 !(regs->pstate & PSR_AA32_IT_MASK)) 337 return; 338 339 it = compat_get_it_state(regs); 340 341 /* 342 * If this is the last instruction of the block, wipe the IT 343 * state. Otherwise advance it. 344 */ 345 if (!(it & 7)) 346 it = 0; 347 else 348 it = (it & 0xe0) | ((it << 1) & 0x1f); 349 350 compat_set_it_state(regs, it); 351 } 352 #else 353 static void advance_itstate(struct pt_regs *regs) 354 { 355 } 356 #endif 357 358 void arm64_skip_faulting_instruction(struct pt_regs *regs, unsigned long size) 359 { 360 regs->pc += size; 361 362 /* 363 * If we were single stepping, we want to get the step exception after 364 * we return from the trap. 365 */ 366 if (user_mode(regs)) 367 user_fastforward_single_step(current); 368 369 if (compat_user_mode(regs)) 370 advance_itstate(regs); 371 else 372 regs->pstate &= ~PSR_BTYPE_MASK; 373 } 374 375 static int user_insn_read(struct pt_regs *regs, u32 *insnp) 376 { 377 u32 instr; 378 unsigned long pc = instruction_pointer(regs); 379 380 if (compat_thumb_mode(regs)) { 381 /* 16-bit Thumb instruction */ 382 __le16 instr_le; 383 if (get_user(instr_le, (__le16 __user *)pc)) 384 return -EFAULT; 385 instr = le16_to_cpu(instr_le); 386 if (aarch32_insn_is_wide(instr)) { 387 u32 instr2; 388 389 if (get_user(instr_le, (__le16 __user *)(pc + 2))) 390 return -EFAULT; 391 instr2 = le16_to_cpu(instr_le); 392 instr = (instr << 16) | instr2; 393 } 394 } else { 395 /* 32-bit ARM instruction */ 396 __le32 instr_le; 397 if (get_user(instr_le, (__le32 __user *)pc)) 398 return -EFAULT; 399 instr = le32_to_cpu(instr_le); 400 } 401 402 *insnp = instr; 403 return 0; 404 } 405 406 void force_signal_inject(int signal, int code, unsigned long address, unsigned long err) 407 { 408 const char *desc; 409 struct pt_regs *regs = current_pt_regs(); 410 411 if (WARN_ON(!user_mode(regs))) 412 return; 413 414 switch (signal) { 415 case SIGILL: 416 desc = "undefined instruction"; 417 break; 418 case SIGSEGV: 419 desc = "illegal memory access"; 420 break; 421 default: 422 desc = "unknown or unrecoverable error"; 423 break; 424 } 425 426 /* Force signals we don't understand to SIGKILL */ 427 if (WARN_ON(signal != SIGKILL && 428 siginfo_layout(signal, code) != SIL_FAULT)) { 429 signal = SIGKILL; 430 } 431 432 arm64_notify_die(desc, regs, signal, code, address, err); 433 } 434 435 /* 436 * Set up process info to signal segmentation fault - called on access error. 437 */ 438 void arm64_notify_segfault(unsigned long addr) 439 { 440 int code; 441 442 mmap_read_lock(current->mm); 443 if (find_vma(current->mm, untagged_addr(addr)) == NULL) 444 code = SEGV_MAPERR; 445 else 446 code = SEGV_ACCERR; 447 mmap_read_unlock(current->mm); 448 449 force_signal_inject(SIGSEGV, code, addr, 0); 450 } 451 452 void do_el0_undef(struct pt_regs *regs, unsigned long esr) 453 { 454 u32 insn; 455 456 /* check for AArch32 breakpoint instructions */ 457 if (!aarch32_break_handler(regs)) 458 return; 459 460 if (user_insn_read(regs, &insn)) 461 goto out_err; 462 463 if (try_emulate_mrs(regs, insn)) 464 return; 465 466 if (try_emulate_armv8_deprecated(regs, insn)) 467 return; 468 469 out_err: 470 force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0); 471 } 472 473 void do_el1_undef(struct pt_regs *regs, unsigned long esr) 474 { 475 u32 insn; 476 477 if (aarch64_insn_read((void *)regs->pc, &insn)) 478 goto out_err; 479 480 if (try_emulate_el1_ssbs(regs, insn)) 481 return; 482 483 out_err: 484 die("Oops - Undefined instruction", regs, esr); 485 } 486 487 void do_el0_bti(struct pt_regs *regs) 488 { 489 force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0); 490 } 491 492 void do_el1_bti(struct pt_regs *regs, unsigned long esr) 493 { 494 die("Oops - BTI", regs, esr); 495 } 496 497 void do_el0_fpac(struct pt_regs *regs, unsigned long esr) 498 { 499 force_signal_inject(SIGILL, ILL_ILLOPN, regs->pc, esr); 500 } 501 502 void do_el1_fpac(struct pt_regs *regs, unsigned long esr) 503 { 504 /* 505 * Unexpected FPAC exception in the kernel: kill the task before it 506 * does any more harm. 507 */ 508 die("Oops - FPAC", regs, esr); 509 } 510 511 #define __user_cache_maint(insn, address, res) \ 512 if (address >= TASK_SIZE_MAX) { \ 513 res = -EFAULT; \ 514 } else { \ 515 uaccess_ttbr0_enable(); \ 516 asm volatile ( \ 517 "1: " insn ", %1\n" \ 518 " mov %w0, #0\n" \ 519 "2:\n" \ 520 _ASM_EXTABLE_UACCESS_ERR(1b, 2b, %w0) \ 521 : "=r" (res) \ 522 : "r" (address)); \ 523 uaccess_ttbr0_disable(); \ 524 } 525 526 static void user_cache_maint_handler(unsigned long esr, struct pt_regs *regs) 527 { 528 unsigned long tagged_address, address; 529 int rt = ESR_ELx_SYS64_ISS_RT(esr); 530 int crm = (esr & ESR_ELx_SYS64_ISS_CRM_MASK) >> ESR_ELx_SYS64_ISS_CRM_SHIFT; 531 int ret = 0; 532 533 tagged_address = pt_regs_read_reg(regs, rt); 534 address = untagged_addr(tagged_address); 535 536 switch (crm) { 537 case ESR_ELx_SYS64_ISS_CRM_DC_CVAU: /* DC CVAU, gets promoted */ 538 __user_cache_maint("dc civac", address, ret); 539 break; 540 case ESR_ELx_SYS64_ISS_CRM_DC_CVAC: /* DC CVAC, gets promoted */ 541 __user_cache_maint("dc civac", address, ret); 542 break; 543 case ESR_ELx_SYS64_ISS_CRM_DC_CVADP: /* DC CVADP */ 544 __user_cache_maint("sys 3, c7, c13, 1", address, ret); 545 break; 546 case ESR_ELx_SYS64_ISS_CRM_DC_CVAP: /* DC CVAP */ 547 __user_cache_maint("sys 3, c7, c12, 1", address, ret); 548 break; 549 case ESR_ELx_SYS64_ISS_CRM_DC_CIVAC: /* DC CIVAC */ 550 __user_cache_maint("dc civac", address, ret); 551 break; 552 case ESR_ELx_SYS64_ISS_CRM_IC_IVAU: /* IC IVAU */ 553 __user_cache_maint("ic ivau", address, ret); 554 break; 555 default: 556 force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0); 557 return; 558 } 559 560 if (ret) 561 arm64_notify_segfault(tagged_address); 562 else 563 arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE); 564 } 565 566 static void ctr_read_handler(unsigned long esr, struct pt_regs *regs) 567 { 568 int rt = ESR_ELx_SYS64_ISS_RT(esr); 569 unsigned long val = arm64_ftr_reg_user_value(&arm64_ftr_reg_ctrel0); 570 571 if (cpus_have_const_cap(ARM64_WORKAROUND_1542419)) { 572 /* Hide DIC so that we can trap the unnecessary maintenance...*/ 573 val &= ~BIT(CTR_EL0_DIC_SHIFT); 574 575 /* ... and fake IminLine to reduce the number of traps. */ 576 val &= ~CTR_EL0_IminLine_MASK; 577 val |= (PAGE_SHIFT - 2) & CTR_EL0_IminLine_MASK; 578 } 579 580 pt_regs_write_reg(regs, rt, val); 581 582 arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE); 583 } 584 585 static void cntvct_read_handler(unsigned long esr, struct pt_regs *regs) 586 { 587 int rt = ESR_ELx_SYS64_ISS_RT(esr); 588 589 pt_regs_write_reg(regs, rt, arch_timer_read_counter()); 590 arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE); 591 } 592 593 static void cntfrq_read_handler(unsigned long esr, struct pt_regs *regs) 594 { 595 int rt = ESR_ELx_SYS64_ISS_RT(esr); 596 597 pt_regs_write_reg(regs, rt, arch_timer_get_rate()); 598 arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE); 599 } 600 601 static void mrs_handler(unsigned long esr, struct pt_regs *regs) 602 { 603 u32 sysreg, rt; 604 605 rt = ESR_ELx_SYS64_ISS_RT(esr); 606 sysreg = esr_sys64_to_sysreg(esr); 607 608 if (do_emulate_mrs(regs, sysreg, rt) != 0) 609 force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0); 610 } 611 612 static void wfi_handler(unsigned long esr, struct pt_regs *regs) 613 { 614 arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE); 615 } 616 617 struct sys64_hook { 618 unsigned long esr_mask; 619 unsigned long esr_val; 620 void (*handler)(unsigned long esr, struct pt_regs *regs); 621 }; 622 623 static const struct sys64_hook sys64_hooks[] = { 624 { 625 .esr_mask = ESR_ELx_SYS64_ISS_EL0_CACHE_OP_MASK, 626 .esr_val = ESR_ELx_SYS64_ISS_EL0_CACHE_OP_VAL, 627 .handler = user_cache_maint_handler, 628 }, 629 { 630 /* Trap read access to CTR_EL0 */ 631 .esr_mask = ESR_ELx_SYS64_ISS_SYS_OP_MASK, 632 .esr_val = ESR_ELx_SYS64_ISS_SYS_CTR_READ, 633 .handler = ctr_read_handler, 634 }, 635 { 636 /* Trap read access to CNTVCT_EL0 */ 637 .esr_mask = ESR_ELx_SYS64_ISS_SYS_OP_MASK, 638 .esr_val = ESR_ELx_SYS64_ISS_SYS_CNTVCT, 639 .handler = cntvct_read_handler, 640 }, 641 { 642 /* Trap read access to CNTVCTSS_EL0 */ 643 .esr_mask = ESR_ELx_SYS64_ISS_SYS_OP_MASK, 644 .esr_val = ESR_ELx_SYS64_ISS_SYS_CNTVCTSS, 645 .handler = cntvct_read_handler, 646 }, 647 { 648 /* Trap read access to CNTFRQ_EL0 */ 649 .esr_mask = ESR_ELx_SYS64_ISS_SYS_OP_MASK, 650 .esr_val = ESR_ELx_SYS64_ISS_SYS_CNTFRQ, 651 .handler = cntfrq_read_handler, 652 }, 653 { 654 /* Trap read access to CPUID registers */ 655 .esr_mask = ESR_ELx_SYS64_ISS_SYS_MRS_OP_MASK, 656 .esr_val = ESR_ELx_SYS64_ISS_SYS_MRS_OP_VAL, 657 .handler = mrs_handler, 658 }, 659 { 660 /* Trap WFI instructions executed in userspace */ 661 .esr_mask = ESR_ELx_WFx_MASK, 662 .esr_val = ESR_ELx_WFx_WFI_VAL, 663 .handler = wfi_handler, 664 }, 665 {}, 666 }; 667 668 #ifdef CONFIG_COMPAT 669 static bool cp15_cond_valid(unsigned long esr, struct pt_regs *regs) 670 { 671 int cond; 672 673 /* Only a T32 instruction can trap without CV being set */ 674 if (!(esr & ESR_ELx_CV)) { 675 u32 it; 676 677 it = compat_get_it_state(regs); 678 if (!it) 679 return true; 680 681 cond = it >> 4; 682 } else { 683 cond = (esr & ESR_ELx_COND_MASK) >> ESR_ELx_COND_SHIFT; 684 } 685 686 return aarch32_opcode_cond_checks[cond](regs->pstate); 687 } 688 689 static void compat_cntfrq_read_handler(unsigned long esr, struct pt_regs *regs) 690 { 691 int reg = (esr & ESR_ELx_CP15_32_ISS_RT_MASK) >> ESR_ELx_CP15_32_ISS_RT_SHIFT; 692 693 pt_regs_write_reg(regs, reg, arch_timer_get_rate()); 694 arm64_skip_faulting_instruction(regs, 4); 695 } 696 697 static const struct sys64_hook cp15_32_hooks[] = { 698 { 699 .esr_mask = ESR_ELx_CP15_32_ISS_SYS_MASK, 700 .esr_val = ESR_ELx_CP15_32_ISS_SYS_CNTFRQ, 701 .handler = compat_cntfrq_read_handler, 702 }, 703 {}, 704 }; 705 706 static void compat_cntvct_read_handler(unsigned long esr, struct pt_regs *regs) 707 { 708 int rt = (esr & ESR_ELx_CP15_64_ISS_RT_MASK) >> ESR_ELx_CP15_64_ISS_RT_SHIFT; 709 int rt2 = (esr & ESR_ELx_CP15_64_ISS_RT2_MASK) >> ESR_ELx_CP15_64_ISS_RT2_SHIFT; 710 u64 val = arch_timer_read_counter(); 711 712 pt_regs_write_reg(regs, rt, lower_32_bits(val)); 713 pt_regs_write_reg(regs, rt2, upper_32_bits(val)); 714 arm64_skip_faulting_instruction(regs, 4); 715 } 716 717 static const struct sys64_hook cp15_64_hooks[] = { 718 { 719 .esr_mask = ESR_ELx_CP15_64_ISS_SYS_MASK, 720 .esr_val = ESR_ELx_CP15_64_ISS_SYS_CNTVCT, 721 .handler = compat_cntvct_read_handler, 722 }, 723 { 724 .esr_mask = ESR_ELx_CP15_64_ISS_SYS_MASK, 725 .esr_val = ESR_ELx_CP15_64_ISS_SYS_CNTVCTSS, 726 .handler = compat_cntvct_read_handler, 727 }, 728 {}, 729 }; 730 731 void do_el0_cp15(unsigned long esr, struct pt_regs *regs) 732 { 733 const struct sys64_hook *hook, *hook_base; 734 735 if (!cp15_cond_valid(esr, regs)) { 736 /* 737 * There is no T16 variant of a CP access, so we 738 * always advance PC by 4 bytes. 739 */ 740 arm64_skip_faulting_instruction(regs, 4); 741 return; 742 } 743 744 switch (ESR_ELx_EC(esr)) { 745 case ESR_ELx_EC_CP15_32: 746 hook_base = cp15_32_hooks; 747 break; 748 case ESR_ELx_EC_CP15_64: 749 hook_base = cp15_64_hooks; 750 break; 751 default: 752 do_el0_undef(regs, esr); 753 return; 754 } 755 756 for (hook = hook_base; hook->handler; hook++) 757 if ((hook->esr_mask & esr) == hook->esr_val) { 758 hook->handler(esr, regs); 759 return; 760 } 761 762 /* 763 * New cp15 instructions may previously have been undefined at 764 * EL0. Fall back to our usual undefined instruction handler 765 * so that we handle these consistently. 766 */ 767 do_el0_undef(regs, esr); 768 } 769 #endif 770 771 void do_el0_sys(unsigned long esr, struct pt_regs *regs) 772 { 773 const struct sys64_hook *hook; 774 775 for (hook = sys64_hooks; hook->handler; hook++) 776 if ((hook->esr_mask & esr) == hook->esr_val) { 777 hook->handler(esr, regs); 778 return; 779 } 780 781 /* 782 * New SYS instructions may previously have been undefined at EL0. Fall 783 * back to our usual undefined instruction handler so that we handle 784 * these consistently. 785 */ 786 do_el0_undef(regs, esr); 787 } 788 789 static const char *esr_class_str[] = { 790 [0 ... ESR_ELx_EC_MAX] = "UNRECOGNIZED EC", 791 [ESR_ELx_EC_UNKNOWN] = "Unknown/Uncategorized", 792 [ESR_ELx_EC_WFx] = "WFI/WFE", 793 [ESR_ELx_EC_CP15_32] = "CP15 MCR/MRC", 794 [ESR_ELx_EC_CP15_64] = "CP15 MCRR/MRRC", 795 [ESR_ELx_EC_CP14_MR] = "CP14 MCR/MRC", 796 [ESR_ELx_EC_CP14_LS] = "CP14 LDC/STC", 797 [ESR_ELx_EC_FP_ASIMD] = "ASIMD", 798 [ESR_ELx_EC_CP10_ID] = "CP10 MRC/VMRS", 799 [ESR_ELx_EC_PAC] = "PAC", 800 [ESR_ELx_EC_CP14_64] = "CP14 MCRR/MRRC", 801 [ESR_ELx_EC_BTI] = "BTI", 802 [ESR_ELx_EC_ILL] = "PSTATE.IL", 803 [ESR_ELx_EC_SVC32] = "SVC (AArch32)", 804 [ESR_ELx_EC_HVC32] = "HVC (AArch32)", 805 [ESR_ELx_EC_SMC32] = "SMC (AArch32)", 806 [ESR_ELx_EC_SVC64] = "SVC (AArch64)", 807 [ESR_ELx_EC_HVC64] = "HVC (AArch64)", 808 [ESR_ELx_EC_SMC64] = "SMC (AArch64)", 809 [ESR_ELx_EC_SYS64] = "MSR/MRS (AArch64)", 810 [ESR_ELx_EC_SVE] = "SVE", 811 [ESR_ELx_EC_ERET] = "ERET/ERETAA/ERETAB", 812 [ESR_ELx_EC_FPAC] = "FPAC", 813 [ESR_ELx_EC_SME] = "SME", 814 [ESR_ELx_EC_IMP_DEF] = "EL3 IMP DEF", 815 [ESR_ELx_EC_IABT_LOW] = "IABT (lower EL)", 816 [ESR_ELx_EC_IABT_CUR] = "IABT (current EL)", 817 [ESR_ELx_EC_PC_ALIGN] = "PC Alignment", 818 [ESR_ELx_EC_DABT_LOW] = "DABT (lower EL)", 819 [ESR_ELx_EC_DABT_CUR] = "DABT (current EL)", 820 [ESR_ELx_EC_SP_ALIGN] = "SP Alignment", 821 [ESR_ELx_EC_FP_EXC32] = "FP (AArch32)", 822 [ESR_ELx_EC_FP_EXC64] = "FP (AArch64)", 823 [ESR_ELx_EC_SERROR] = "SError", 824 [ESR_ELx_EC_BREAKPT_LOW] = "Breakpoint (lower EL)", 825 [ESR_ELx_EC_BREAKPT_CUR] = "Breakpoint (current EL)", 826 [ESR_ELx_EC_SOFTSTP_LOW] = "Software Step (lower EL)", 827 [ESR_ELx_EC_SOFTSTP_CUR] = "Software Step (current EL)", 828 [ESR_ELx_EC_WATCHPT_LOW] = "Watchpoint (lower EL)", 829 [ESR_ELx_EC_WATCHPT_CUR] = "Watchpoint (current EL)", 830 [ESR_ELx_EC_BKPT32] = "BKPT (AArch32)", 831 [ESR_ELx_EC_VECTOR32] = "Vector catch (AArch32)", 832 [ESR_ELx_EC_BRK64] = "BRK (AArch64)", 833 }; 834 835 const char *esr_get_class_string(unsigned long esr) 836 { 837 return esr_class_str[ESR_ELx_EC(esr)]; 838 } 839 840 /* 841 * bad_el0_sync handles unexpected, but potentially recoverable synchronous 842 * exceptions taken from EL0. 843 */ 844 void bad_el0_sync(struct pt_regs *regs, int reason, unsigned long esr) 845 { 846 unsigned long pc = instruction_pointer(regs); 847 848 current->thread.fault_address = 0; 849 current->thread.fault_code = esr; 850 851 arm64_force_sig_fault(SIGILL, ILL_ILLOPC, pc, 852 "Bad EL0 synchronous exception"); 853 } 854 855 #ifdef CONFIG_VMAP_STACK 856 857 DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack) 858 __aligned(16); 859 860 void panic_bad_stack(struct pt_regs *regs, unsigned long esr, unsigned long far) 861 { 862 unsigned long tsk_stk = (unsigned long)current->stack; 863 unsigned long irq_stk = (unsigned long)this_cpu_read(irq_stack_ptr); 864 unsigned long ovf_stk = (unsigned long)this_cpu_ptr(overflow_stack); 865 866 console_verbose(); 867 pr_emerg("Insufficient stack space to handle exception!"); 868 869 pr_emerg("ESR: 0x%016lx -- %s\n", esr, esr_get_class_string(esr)); 870 pr_emerg("FAR: 0x%016lx\n", far); 871 872 pr_emerg("Task stack: [0x%016lx..0x%016lx]\n", 873 tsk_stk, tsk_stk + THREAD_SIZE); 874 pr_emerg("IRQ stack: [0x%016lx..0x%016lx]\n", 875 irq_stk, irq_stk + IRQ_STACK_SIZE); 876 pr_emerg("Overflow stack: [0x%016lx..0x%016lx]\n", 877 ovf_stk, ovf_stk + OVERFLOW_STACK_SIZE); 878 879 __show_regs(regs); 880 881 /* 882 * We use nmi_panic to limit the potential for recusive overflows, and 883 * to get a better stack trace. 884 */ 885 nmi_panic(NULL, "kernel stack overflow"); 886 cpu_park_loop(); 887 } 888 #endif 889 890 void __noreturn arm64_serror_panic(struct pt_regs *regs, unsigned long esr) 891 { 892 console_verbose(); 893 894 pr_crit("SError Interrupt on CPU%d, code 0x%016lx -- %s\n", 895 smp_processor_id(), esr, esr_get_class_string(esr)); 896 if (regs) 897 __show_regs(regs); 898 899 nmi_panic(regs, "Asynchronous SError Interrupt"); 900 901 cpu_park_loop(); 902 unreachable(); 903 } 904 905 bool arm64_is_fatal_ras_serror(struct pt_regs *regs, unsigned long esr) 906 { 907 unsigned long aet = arm64_ras_serror_get_severity(esr); 908 909 switch (aet) { 910 case ESR_ELx_AET_CE: /* corrected error */ 911 case ESR_ELx_AET_UEO: /* restartable, not yet consumed */ 912 /* 913 * The CPU can make progress. We may take UEO again as 914 * a more severe error. 915 */ 916 return false; 917 918 case ESR_ELx_AET_UEU: /* Uncorrected Unrecoverable */ 919 case ESR_ELx_AET_UER: /* Uncorrected Recoverable */ 920 /* 921 * The CPU can't make progress. The exception may have 922 * been imprecise. 923 * 924 * Neoverse-N1 #1349291 means a non-KVM SError reported as 925 * Unrecoverable should be treated as Uncontainable. We 926 * call arm64_serror_panic() in both cases. 927 */ 928 return true; 929 930 case ESR_ELx_AET_UC: /* Uncontainable or Uncategorized error */ 931 default: 932 /* Error has been silently propagated */ 933 arm64_serror_panic(regs, esr); 934 } 935 } 936 937 void do_serror(struct pt_regs *regs, unsigned long esr) 938 { 939 /* non-RAS errors are not containable */ 940 if (!arm64_is_ras_serror(esr) || arm64_is_fatal_ras_serror(regs, esr)) 941 arm64_serror_panic(regs, esr); 942 } 943 944 /* GENERIC_BUG traps */ 945 946 int is_valid_bugaddr(unsigned long addr) 947 { 948 /* 949 * bug_handler() only called for BRK #BUG_BRK_IMM. 950 * So the answer is trivial -- any spurious instances with no 951 * bug table entry will be rejected by report_bug() and passed 952 * back to the debug-monitors code and handled as a fatal 953 * unexpected debug exception. 954 */ 955 return 1; 956 } 957 958 static int bug_handler(struct pt_regs *regs, unsigned long esr) 959 { 960 switch (report_bug(regs->pc, regs)) { 961 case BUG_TRAP_TYPE_BUG: 962 die("Oops - BUG", regs, esr); 963 break; 964 965 case BUG_TRAP_TYPE_WARN: 966 break; 967 968 default: 969 /* unknown/unrecognised bug trap type */ 970 return DBG_HOOK_ERROR; 971 } 972 973 /* If thread survives, skip over the BUG instruction and continue: */ 974 arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE); 975 return DBG_HOOK_HANDLED; 976 } 977 978 static struct break_hook bug_break_hook = { 979 .fn = bug_handler, 980 .imm = BUG_BRK_IMM, 981 }; 982 983 #ifdef CONFIG_CFI_CLANG 984 static int cfi_handler(struct pt_regs *regs, unsigned long esr) 985 { 986 unsigned long target; 987 u32 type; 988 989 target = pt_regs_read_reg(regs, FIELD_GET(CFI_BRK_IMM_TARGET, esr)); 990 type = (u32)pt_regs_read_reg(regs, FIELD_GET(CFI_BRK_IMM_TYPE, esr)); 991 992 switch (report_cfi_failure(regs, regs->pc, &target, type)) { 993 case BUG_TRAP_TYPE_BUG: 994 die("Oops - CFI", regs, 0); 995 break; 996 997 case BUG_TRAP_TYPE_WARN: 998 break; 999 1000 default: 1001 return DBG_HOOK_ERROR; 1002 } 1003 1004 arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE); 1005 return DBG_HOOK_HANDLED; 1006 } 1007 1008 static struct break_hook cfi_break_hook = { 1009 .fn = cfi_handler, 1010 .imm = CFI_BRK_IMM_BASE, 1011 .mask = CFI_BRK_IMM_MASK, 1012 }; 1013 #endif /* CONFIG_CFI_CLANG */ 1014 1015 static int reserved_fault_handler(struct pt_regs *regs, unsigned long esr) 1016 { 1017 pr_err("%s generated an invalid instruction at %pS!\n", 1018 "Kernel text patching", 1019 (void *)instruction_pointer(regs)); 1020 1021 /* We cannot handle this */ 1022 return DBG_HOOK_ERROR; 1023 } 1024 1025 static struct break_hook fault_break_hook = { 1026 .fn = reserved_fault_handler, 1027 .imm = FAULT_BRK_IMM, 1028 }; 1029 1030 #ifdef CONFIG_KASAN_SW_TAGS 1031 1032 #define KASAN_ESR_RECOVER 0x20 1033 #define KASAN_ESR_WRITE 0x10 1034 #define KASAN_ESR_SIZE_MASK 0x0f 1035 #define KASAN_ESR_SIZE(esr) (1 << ((esr) & KASAN_ESR_SIZE_MASK)) 1036 1037 static int kasan_handler(struct pt_regs *regs, unsigned long esr) 1038 { 1039 bool recover = esr & KASAN_ESR_RECOVER; 1040 bool write = esr & KASAN_ESR_WRITE; 1041 size_t size = KASAN_ESR_SIZE(esr); 1042 u64 addr = regs->regs[0]; 1043 u64 pc = regs->pc; 1044 1045 kasan_report(addr, size, write, pc); 1046 1047 /* 1048 * The instrumentation allows to control whether we can proceed after 1049 * a crash was detected. This is done by passing the -recover flag to 1050 * the compiler. Disabling recovery allows to generate more compact 1051 * code. 1052 * 1053 * Unfortunately disabling recovery doesn't work for the kernel right 1054 * now. KASAN reporting is disabled in some contexts (for example when 1055 * the allocator accesses slab object metadata; this is controlled by 1056 * current->kasan_depth). All these accesses are detected by the tool, 1057 * even though the reports for them are not printed. 1058 * 1059 * This is something that might be fixed at some point in the future. 1060 */ 1061 if (!recover) 1062 die("Oops - KASAN", regs, esr); 1063 1064 /* If thread survives, skip over the brk instruction and continue: */ 1065 arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE); 1066 return DBG_HOOK_HANDLED; 1067 } 1068 1069 static struct break_hook kasan_break_hook = { 1070 .fn = kasan_handler, 1071 .imm = KASAN_BRK_IMM, 1072 .mask = KASAN_BRK_MASK, 1073 }; 1074 #endif 1075 1076 #ifdef CONFIG_UBSAN_TRAP 1077 static int ubsan_handler(struct pt_regs *regs, unsigned long esr) 1078 { 1079 die(report_ubsan_failure(regs, esr & UBSAN_BRK_MASK), regs, esr); 1080 return DBG_HOOK_HANDLED; 1081 } 1082 1083 static struct break_hook ubsan_break_hook = { 1084 .fn = ubsan_handler, 1085 .imm = UBSAN_BRK_IMM, 1086 .mask = UBSAN_BRK_MASK, 1087 }; 1088 #endif 1089 1090 #define esr_comment(esr) ((esr) & ESR_ELx_BRK64_ISS_COMMENT_MASK) 1091 1092 /* 1093 * Initial handler for AArch64 BRK exceptions 1094 * This handler only used until debug_traps_init(). 1095 */ 1096 int __init early_brk64(unsigned long addr, unsigned long esr, 1097 struct pt_regs *regs) 1098 { 1099 #ifdef CONFIG_CFI_CLANG 1100 if ((esr_comment(esr) & ~CFI_BRK_IMM_MASK) == CFI_BRK_IMM_BASE) 1101 return cfi_handler(regs, esr) != DBG_HOOK_HANDLED; 1102 #endif 1103 #ifdef CONFIG_KASAN_SW_TAGS 1104 if ((esr_comment(esr) & ~KASAN_BRK_MASK) == KASAN_BRK_IMM) 1105 return kasan_handler(regs, esr) != DBG_HOOK_HANDLED; 1106 #endif 1107 #ifdef CONFIG_UBSAN_TRAP 1108 if ((esr_comment(esr) & ~UBSAN_BRK_MASK) == UBSAN_BRK_IMM) 1109 return ubsan_handler(regs, esr) != DBG_HOOK_HANDLED; 1110 #endif 1111 return bug_handler(regs, esr) != DBG_HOOK_HANDLED; 1112 } 1113 1114 void __init trap_init(void) 1115 { 1116 register_kernel_break_hook(&bug_break_hook); 1117 #ifdef CONFIG_CFI_CLANG 1118 register_kernel_break_hook(&cfi_break_hook); 1119 #endif 1120 register_kernel_break_hook(&fault_break_hook); 1121 #ifdef CONFIG_KASAN_SW_TAGS 1122 register_kernel_break_hook(&kasan_break_hook); 1123 #endif 1124 #ifdef CONFIG_UBSAN_TRAP 1125 register_kernel_break_hook(&ubsan_break_hook); 1126 #endif 1127 debug_traps_init(); 1128 } 1129