1*0603839bSHuacai Chen // SPDX-License-Identifier: GPL-2.0 2*0603839bSHuacai Chen /* 3*0603839bSHuacai Chen * Author: Huacai Chen <chenhuacai@loongson.cn> 4*0603839bSHuacai Chen * Copyright (C) 2020-2022 Loongson Technology Corporation Limited 5*0603839bSHuacai Chen */ 6*0603839bSHuacai Chen #include <linux/bitops.h> 7*0603839bSHuacai Chen #include <linux/bug.h> 8*0603839bSHuacai Chen #include <linux/compiler.h> 9*0603839bSHuacai Chen #include <linux/context_tracking.h> 10*0603839bSHuacai Chen #include <linux/entry-common.h> 11*0603839bSHuacai Chen #include <linux/init.h> 12*0603839bSHuacai Chen #include <linux/kernel.h> 13*0603839bSHuacai Chen #include <linux/module.h> 14*0603839bSHuacai Chen #include <linux/extable.h> 15*0603839bSHuacai Chen #include <linux/mm.h> 16*0603839bSHuacai Chen #include <linux/sched/mm.h> 17*0603839bSHuacai Chen #include <linux/sched/debug.h> 18*0603839bSHuacai Chen #include <linux/smp.h> 19*0603839bSHuacai Chen #include <linux/spinlock.h> 20*0603839bSHuacai Chen #include <linux/kallsyms.h> 21*0603839bSHuacai Chen #include <linux/memblock.h> 22*0603839bSHuacai Chen #include <linux/interrupt.h> 23*0603839bSHuacai Chen #include <linux/ptrace.h> 24*0603839bSHuacai Chen #include <linux/kgdb.h> 25*0603839bSHuacai Chen #include <linux/kdebug.h> 26*0603839bSHuacai Chen #include <linux/kprobes.h> 27*0603839bSHuacai Chen #include <linux/notifier.h> 28*0603839bSHuacai Chen #include <linux/irq.h> 29*0603839bSHuacai Chen #include <linux/perf_event.h> 30*0603839bSHuacai Chen 31*0603839bSHuacai Chen #include <asm/addrspace.h> 32*0603839bSHuacai Chen #include <asm/bootinfo.h> 33*0603839bSHuacai Chen #include <asm/branch.h> 34*0603839bSHuacai Chen #include <asm/break.h> 35*0603839bSHuacai Chen #include <asm/cpu.h> 36*0603839bSHuacai Chen #include <asm/fpu.h> 37*0603839bSHuacai Chen #include <asm/loongarch.h> 38*0603839bSHuacai Chen #include <asm/mmu_context.h> 39*0603839bSHuacai Chen #include <asm/pgtable.h> 40*0603839bSHuacai Chen #include <asm/ptrace.h> 41*0603839bSHuacai Chen #include <asm/sections.h> 42*0603839bSHuacai Chen #include <asm/siginfo.h> 43*0603839bSHuacai Chen #include <asm/stacktrace.h> 44*0603839bSHuacai Chen #include <asm/tlb.h> 45*0603839bSHuacai Chen #include <asm/types.h> 46*0603839bSHuacai Chen 47*0603839bSHuacai Chen #include "access-helper.h" 48*0603839bSHuacai Chen 49*0603839bSHuacai Chen extern asmlinkage void handle_ade(void); 50*0603839bSHuacai Chen extern asmlinkage void handle_ale(void); 51*0603839bSHuacai Chen extern asmlinkage void handle_sys(void); 52*0603839bSHuacai Chen extern asmlinkage void handle_bp(void); 53*0603839bSHuacai Chen extern asmlinkage void handle_ri(void); 54*0603839bSHuacai Chen extern asmlinkage void handle_fpu(void); 55*0603839bSHuacai Chen extern asmlinkage void handle_fpe(void); 56*0603839bSHuacai Chen extern asmlinkage void handle_lbt(void); 57*0603839bSHuacai Chen extern asmlinkage void handle_lsx(void); 58*0603839bSHuacai Chen extern asmlinkage void handle_lasx(void); 59*0603839bSHuacai Chen extern asmlinkage void handle_reserved(void); 60*0603839bSHuacai Chen extern asmlinkage void handle_watch(void); 61*0603839bSHuacai Chen extern asmlinkage void handle_vint(void); 62*0603839bSHuacai Chen 63*0603839bSHuacai Chen static void show_backtrace(struct task_struct *task, const struct pt_regs *regs, 64*0603839bSHuacai Chen const char *loglvl, bool user) 65*0603839bSHuacai Chen { 66*0603839bSHuacai Chen unsigned long addr; 67*0603839bSHuacai Chen unsigned long *sp = (unsigned long *)(regs->regs[3] & ~3); 68*0603839bSHuacai Chen 69*0603839bSHuacai Chen printk("%sCall Trace:", loglvl); 70*0603839bSHuacai Chen #ifdef CONFIG_KALLSYMS 71*0603839bSHuacai Chen printk("%s\n", loglvl); 72*0603839bSHuacai Chen #endif 73*0603839bSHuacai Chen while (!kstack_end(sp)) { 74*0603839bSHuacai Chen if (__get_addr(&addr, sp++, user)) { 75*0603839bSHuacai Chen printk("%s (Bad stack address)", loglvl); 76*0603839bSHuacai Chen break; 77*0603839bSHuacai Chen } 78*0603839bSHuacai Chen if (__kernel_text_address(addr)) 79*0603839bSHuacai Chen print_ip_sym(loglvl, addr); 80*0603839bSHuacai Chen } 81*0603839bSHuacai Chen printk("%s\n", loglvl); 82*0603839bSHuacai Chen } 83*0603839bSHuacai Chen 84*0603839bSHuacai Chen static void show_stacktrace(struct task_struct *task, 85*0603839bSHuacai Chen const struct pt_regs *regs, const char *loglvl, bool user) 86*0603839bSHuacai Chen { 87*0603839bSHuacai Chen int i; 88*0603839bSHuacai Chen const int field = 2 * sizeof(unsigned long); 89*0603839bSHuacai Chen unsigned long stackdata; 90*0603839bSHuacai Chen unsigned long *sp = (unsigned long *)regs->regs[3]; 91*0603839bSHuacai Chen 92*0603839bSHuacai Chen printk("%sStack :", loglvl); 93*0603839bSHuacai Chen i = 0; 94*0603839bSHuacai Chen while ((unsigned long) sp & (PAGE_SIZE - 1)) { 95*0603839bSHuacai Chen if (i && ((i % (64 / field)) == 0)) { 96*0603839bSHuacai Chen pr_cont("\n"); 97*0603839bSHuacai Chen printk("%s ", loglvl); 98*0603839bSHuacai Chen } 99*0603839bSHuacai Chen if (i > 39) { 100*0603839bSHuacai Chen pr_cont(" ..."); 101*0603839bSHuacai Chen break; 102*0603839bSHuacai Chen } 103*0603839bSHuacai Chen 104*0603839bSHuacai Chen if (__get_addr(&stackdata, sp++, user)) { 105*0603839bSHuacai Chen pr_cont(" (Bad stack address)"); 106*0603839bSHuacai Chen break; 107*0603839bSHuacai Chen } 108*0603839bSHuacai Chen 109*0603839bSHuacai Chen pr_cont(" %0*lx", field, stackdata); 110*0603839bSHuacai Chen i++; 111*0603839bSHuacai Chen } 112*0603839bSHuacai Chen pr_cont("\n"); 113*0603839bSHuacai Chen show_backtrace(task, regs, loglvl, user); 114*0603839bSHuacai Chen } 115*0603839bSHuacai Chen 116*0603839bSHuacai Chen void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl) 117*0603839bSHuacai Chen { 118*0603839bSHuacai Chen struct pt_regs regs; 119*0603839bSHuacai Chen 120*0603839bSHuacai Chen regs.csr_crmd = 0; 121*0603839bSHuacai Chen if (sp) { 122*0603839bSHuacai Chen regs.csr_era = 0; 123*0603839bSHuacai Chen regs.regs[1] = 0; 124*0603839bSHuacai Chen regs.regs[3] = (unsigned long)sp; 125*0603839bSHuacai Chen } else { 126*0603839bSHuacai Chen if (!task || task == current) 127*0603839bSHuacai Chen prepare_frametrace(®s); 128*0603839bSHuacai Chen else { 129*0603839bSHuacai Chen regs.csr_era = task->thread.reg01; 130*0603839bSHuacai Chen regs.regs[1] = 0; 131*0603839bSHuacai Chen regs.regs[3] = task->thread.reg03; 132*0603839bSHuacai Chen regs.regs[22] = task->thread.reg22; 133*0603839bSHuacai Chen } 134*0603839bSHuacai Chen } 135*0603839bSHuacai Chen 136*0603839bSHuacai Chen show_stacktrace(task, ®s, loglvl, false); 137*0603839bSHuacai Chen } 138*0603839bSHuacai Chen 139*0603839bSHuacai Chen static void show_code(unsigned int *pc, bool user) 140*0603839bSHuacai Chen { 141*0603839bSHuacai Chen long i; 142*0603839bSHuacai Chen unsigned int insn; 143*0603839bSHuacai Chen 144*0603839bSHuacai Chen printk("Code:"); 145*0603839bSHuacai Chen 146*0603839bSHuacai Chen for(i = -3 ; i < 6 ; i++) { 147*0603839bSHuacai Chen if (__get_inst(&insn, pc + i, user)) { 148*0603839bSHuacai Chen pr_cont(" (Bad address in era)\n"); 149*0603839bSHuacai Chen break; 150*0603839bSHuacai Chen } 151*0603839bSHuacai Chen pr_cont("%c%08x%c", (i?' ':'<'), insn, (i?' ':'>')); 152*0603839bSHuacai Chen } 153*0603839bSHuacai Chen pr_cont("\n"); 154*0603839bSHuacai Chen } 155*0603839bSHuacai Chen 156*0603839bSHuacai Chen static void __show_regs(const struct pt_regs *regs) 157*0603839bSHuacai Chen { 158*0603839bSHuacai Chen const int field = 2 * sizeof(unsigned long); 159*0603839bSHuacai Chen unsigned int excsubcode; 160*0603839bSHuacai Chen unsigned int exccode; 161*0603839bSHuacai Chen int i; 162*0603839bSHuacai Chen 163*0603839bSHuacai Chen show_regs_print_info(KERN_DEFAULT); 164*0603839bSHuacai Chen 165*0603839bSHuacai Chen /* 166*0603839bSHuacai Chen * Saved main processor registers 167*0603839bSHuacai Chen */ 168*0603839bSHuacai Chen for (i = 0; i < 32; ) { 169*0603839bSHuacai Chen if ((i % 4) == 0) 170*0603839bSHuacai Chen printk("$%2d :", i); 171*0603839bSHuacai Chen pr_cont(" %0*lx", field, regs->regs[i]); 172*0603839bSHuacai Chen 173*0603839bSHuacai Chen i++; 174*0603839bSHuacai Chen if ((i % 4) == 0) 175*0603839bSHuacai Chen pr_cont("\n"); 176*0603839bSHuacai Chen } 177*0603839bSHuacai Chen 178*0603839bSHuacai Chen /* 179*0603839bSHuacai Chen * Saved csr registers 180*0603839bSHuacai Chen */ 181*0603839bSHuacai Chen printk("era : %0*lx %pS\n", field, regs->csr_era, 182*0603839bSHuacai Chen (void *) regs->csr_era); 183*0603839bSHuacai Chen printk("ra : %0*lx %pS\n", field, regs->regs[1], 184*0603839bSHuacai Chen (void *) regs->regs[1]); 185*0603839bSHuacai Chen 186*0603839bSHuacai Chen printk("CSR crmd: %08lx ", regs->csr_crmd); 187*0603839bSHuacai Chen printk("CSR prmd: %08lx ", regs->csr_prmd); 188*0603839bSHuacai Chen printk("CSR euen: %08lx ", regs->csr_euen); 189*0603839bSHuacai Chen printk("CSR ecfg: %08lx ", regs->csr_ecfg); 190*0603839bSHuacai Chen printk("CSR estat: %08lx ", regs->csr_estat); 191*0603839bSHuacai Chen 192*0603839bSHuacai Chen pr_cont("\n"); 193*0603839bSHuacai Chen 194*0603839bSHuacai Chen exccode = ((regs->csr_estat) & CSR_ESTAT_EXC) >> CSR_ESTAT_EXC_SHIFT; 195*0603839bSHuacai Chen excsubcode = ((regs->csr_estat) & CSR_ESTAT_ESUBCODE) >> CSR_ESTAT_ESUBCODE_SHIFT; 196*0603839bSHuacai Chen printk("ExcCode : %x (SubCode %x)\n", exccode, excsubcode); 197*0603839bSHuacai Chen 198*0603839bSHuacai Chen if (exccode >= EXCCODE_TLBL && exccode <= EXCCODE_ALE) 199*0603839bSHuacai Chen printk("BadVA : %0*lx\n", field, regs->csr_badvaddr); 200*0603839bSHuacai Chen 201*0603839bSHuacai Chen printk("PrId : %08x (%s)\n", read_cpucfg(LOONGARCH_CPUCFG0), 202*0603839bSHuacai Chen cpu_family_string()); 203*0603839bSHuacai Chen } 204*0603839bSHuacai Chen 205*0603839bSHuacai Chen void show_regs(struct pt_regs *regs) 206*0603839bSHuacai Chen { 207*0603839bSHuacai Chen __show_regs((struct pt_regs *)regs); 208*0603839bSHuacai Chen dump_stack(); 209*0603839bSHuacai Chen } 210*0603839bSHuacai Chen 211*0603839bSHuacai Chen void show_registers(struct pt_regs *regs) 212*0603839bSHuacai Chen { 213*0603839bSHuacai Chen __show_regs(regs); 214*0603839bSHuacai Chen print_modules(); 215*0603839bSHuacai Chen printk("Process %s (pid: %d, threadinfo=%p, task=%p)\n", 216*0603839bSHuacai Chen current->comm, current->pid, current_thread_info(), current); 217*0603839bSHuacai Chen 218*0603839bSHuacai Chen show_stacktrace(current, regs, KERN_DEFAULT, user_mode(regs)); 219*0603839bSHuacai Chen show_code((void *)regs->csr_era, user_mode(regs)); 220*0603839bSHuacai Chen printk("\n"); 221*0603839bSHuacai Chen } 222*0603839bSHuacai Chen 223*0603839bSHuacai Chen static DEFINE_RAW_SPINLOCK(die_lock); 224*0603839bSHuacai Chen 225*0603839bSHuacai Chen void __noreturn die(const char *str, struct pt_regs *regs) 226*0603839bSHuacai Chen { 227*0603839bSHuacai Chen static int die_counter; 228*0603839bSHuacai Chen int sig = SIGSEGV; 229*0603839bSHuacai Chen 230*0603839bSHuacai Chen oops_enter(); 231*0603839bSHuacai Chen 232*0603839bSHuacai Chen if (notify_die(DIE_OOPS, str, regs, 0, current->thread.trap_nr, 233*0603839bSHuacai Chen SIGSEGV) == NOTIFY_STOP) 234*0603839bSHuacai Chen sig = 0; 235*0603839bSHuacai Chen 236*0603839bSHuacai Chen console_verbose(); 237*0603839bSHuacai Chen raw_spin_lock_irq(&die_lock); 238*0603839bSHuacai Chen bust_spinlocks(1); 239*0603839bSHuacai Chen 240*0603839bSHuacai Chen printk("%s[#%d]:\n", str, ++die_counter); 241*0603839bSHuacai Chen show_registers(regs); 242*0603839bSHuacai Chen add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); 243*0603839bSHuacai Chen raw_spin_unlock_irq(&die_lock); 244*0603839bSHuacai Chen 245*0603839bSHuacai Chen oops_exit(); 246*0603839bSHuacai Chen 247*0603839bSHuacai Chen if (in_interrupt()) 248*0603839bSHuacai Chen panic("Fatal exception in interrupt"); 249*0603839bSHuacai Chen 250*0603839bSHuacai Chen if (panic_on_oops) 251*0603839bSHuacai Chen panic("Fatal exception"); 252*0603839bSHuacai Chen 253*0603839bSHuacai Chen make_task_dead(sig); 254*0603839bSHuacai Chen } 255*0603839bSHuacai Chen 256*0603839bSHuacai Chen static inline void setup_vint_size(unsigned int size) 257*0603839bSHuacai Chen { 258*0603839bSHuacai Chen unsigned int vs; 259*0603839bSHuacai Chen 260*0603839bSHuacai Chen vs = ilog2(size/4); 261*0603839bSHuacai Chen 262*0603839bSHuacai Chen if (vs == 0 || vs > 7) 263*0603839bSHuacai Chen panic("vint_size %d Not support yet", vs); 264*0603839bSHuacai Chen 265*0603839bSHuacai Chen csr_xchg32(vs<<CSR_ECFG_VS_SHIFT, CSR_ECFG_VS, LOONGARCH_CSR_ECFG); 266*0603839bSHuacai Chen } 267*0603839bSHuacai Chen 268*0603839bSHuacai Chen /* 269*0603839bSHuacai Chen * Send SIGFPE according to FCSR Cause bits, which must have already 270*0603839bSHuacai Chen * been masked against Enable bits. This is impotant as Inexact can 271*0603839bSHuacai Chen * happen together with Overflow or Underflow, and `ptrace' can set 272*0603839bSHuacai Chen * any bits. 273*0603839bSHuacai Chen */ 274*0603839bSHuacai Chen void force_fcsr_sig(unsigned long fcsr, void __user *fault_addr, 275*0603839bSHuacai Chen struct task_struct *tsk) 276*0603839bSHuacai Chen { 277*0603839bSHuacai Chen int si_code = FPE_FLTUNK; 278*0603839bSHuacai Chen 279*0603839bSHuacai Chen if (fcsr & FPU_CSR_INV_X) 280*0603839bSHuacai Chen si_code = FPE_FLTINV; 281*0603839bSHuacai Chen else if (fcsr & FPU_CSR_DIV_X) 282*0603839bSHuacai Chen si_code = FPE_FLTDIV; 283*0603839bSHuacai Chen else if (fcsr & FPU_CSR_OVF_X) 284*0603839bSHuacai Chen si_code = FPE_FLTOVF; 285*0603839bSHuacai Chen else if (fcsr & FPU_CSR_UDF_X) 286*0603839bSHuacai Chen si_code = FPE_FLTUND; 287*0603839bSHuacai Chen else if (fcsr & FPU_CSR_INE_X) 288*0603839bSHuacai Chen si_code = FPE_FLTRES; 289*0603839bSHuacai Chen 290*0603839bSHuacai Chen force_sig_fault(SIGFPE, si_code, fault_addr); 291*0603839bSHuacai Chen } 292*0603839bSHuacai Chen 293*0603839bSHuacai Chen int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcsr) 294*0603839bSHuacai Chen { 295*0603839bSHuacai Chen int si_code; 296*0603839bSHuacai Chen 297*0603839bSHuacai Chen switch (sig) { 298*0603839bSHuacai Chen case 0: 299*0603839bSHuacai Chen return 0; 300*0603839bSHuacai Chen 301*0603839bSHuacai Chen case SIGFPE: 302*0603839bSHuacai Chen force_fcsr_sig(fcsr, fault_addr, current); 303*0603839bSHuacai Chen return 1; 304*0603839bSHuacai Chen 305*0603839bSHuacai Chen case SIGBUS: 306*0603839bSHuacai Chen force_sig_fault(SIGBUS, BUS_ADRERR, fault_addr); 307*0603839bSHuacai Chen return 1; 308*0603839bSHuacai Chen 309*0603839bSHuacai Chen case SIGSEGV: 310*0603839bSHuacai Chen mmap_read_lock(current->mm); 311*0603839bSHuacai Chen if (vma_lookup(current->mm, (unsigned long)fault_addr)) 312*0603839bSHuacai Chen si_code = SEGV_ACCERR; 313*0603839bSHuacai Chen else 314*0603839bSHuacai Chen si_code = SEGV_MAPERR; 315*0603839bSHuacai Chen mmap_read_unlock(current->mm); 316*0603839bSHuacai Chen force_sig_fault(SIGSEGV, si_code, fault_addr); 317*0603839bSHuacai Chen return 1; 318*0603839bSHuacai Chen 319*0603839bSHuacai Chen default: 320*0603839bSHuacai Chen force_sig(sig); 321*0603839bSHuacai Chen return 1; 322*0603839bSHuacai Chen } 323*0603839bSHuacai Chen } 324*0603839bSHuacai Chen 325*0603839bSHuacai Chen /* 326*0603839bSHuacai Chen * Delayed fp exceptions when doing a lazy ctx switch 327*0603839bSHuacai Chen */ 328*0603839bSHuacai Chen asmlinkage void noinstr do_fpe(struct pt_regs *regs, unsigned long fcsr) 329*0603839bSHuacai Chen { 330*0603839bSHuacai Chen int sig; 331*0603839bSHuacai Chen void __user *fault_addr; 332*0603839bSHuacai Chen irqentry_state_t state = irqentry_enter(regs); 333*0603839bSHuacai Chen 334*0603839bSHuacai Chen if (notify_die(DIE_FP, "FP exception", regs, 0, current->thread.trap_nr, 335*0603839bSHuacai Chen SIGFPE) == NOTIFY_STOP) 336*0603839bSHuacai Chen goto out; 337*0603839bSHuacai Chen 338*0603839bSHuacai Chen /* Clear FCSR.Cause before enabling interrupts */ 339*0603839bSHuacai Chen write_fcsr(LOONGARCH_FCSR0, fcsr & ~mask_fcsr_x(fcsr)); 340*0603839bSHuacai Chen local_irq_enable(); 341*0603839bSHuacai Chen 342*0603839bSHuacai Chen die_if_kernel("FP exception in kernel code", regs); 343*0603839bSHuacai Chen 344*0603839bSHuacai Chen sig = SIGFPE; 345*0603839bSHuacai Chen fault_addr = (void __user *) regs->csr_era; 346*0603839bSHuacai Chen 347*0603839bSHuacai Chen /* Send a signal if required. */ 348*0603839bSHuacai Chen process_fpemu_return(sig, fault_addr, fcsr); 349*0603839bSHuacai Chen 350*0603839bSHuacai Chen out: 351*0603839bSHuacai Chen local_irq_disable(); 352*0603839bSHuacai Chen irqentry_exit(regs, state); 353*0603839bSHuacai Chen } 354*0603839bSHuacai Chen 355*0603839bSHuacai Chen asmlinkage void noinstr do_ade(struct pt_regs *regs) 356*0603839bSHuacai Chen { 357*0603839bSHuacai Chen irqentry_state_t state = irqentry_enter(regs); 358*0603839bSHuacai Chen 359*0603839bSHuacai Chen die_if_kernel("Kernel ade access", regs); 360*0603839bSHuacai Chen force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)regs->csr_badvaddr); 361*0603839bSHuacai Chen 362*0603839bSHuacai Chen irqentry_exit(regs, state); 363*0603839bSHuacai Chen } 364*0603839bSHuacai Chen 365*0603839bSHuacai Chen asmlinkage void noinstr do_ale(struct pt_regs *regs) 366*0603839bSHuacai Chen { 367*0603839bSHuacai Chen irqentry_state_t state = irqentry_enter(regs); 368*0603839bSHuacai Chen 369*0603839bSHuacai Chen die_if_kernel("Kernel ale access", regs); 370*0603839bSHuacai Chen force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)regs->csr_badvaddr); 371*0603839bSHuacai Chen 372*0603839bSHuacai Chen irqentry_exit(regs, state); 373*0603839bSHuacai Chen } 374*0603839bSHuacai Chen 375*0603839bSHuacai Chen asmlinkage void noinstr do_bp(struct pt_regs *regs) 376*0603839bSHuacai Chen { 377*0603839bSHuacai Chen bool user = user_mode(regs); 378*0603839bSHuacai Chen unsigned int opcode, bcode; 379*0603839bSHuacai Chen unsigned long era = exception_era(regs); 380*0603839bSHuacai Chen irqentry_state_t state = irqentry_enter(regs); 381*0603839bSHuacai Chen 382*0603839bSHuacai Chen local_irq_enable(); 383*0603839bSHuacai Chen current->thread.trap_nr = read_csr_excode(); 384*0603839bSHuacai Chen if (__get_inst(&opcode, (u32 *)era, user)) 385*0603839bSHuacai Chen goto out_sigsegv; 386*0603839bSHuacai Chen 387*0603839bSHuacai Chen bcode = (opcode & 0x7fff); 388*0603839bSHuacai Chen 389*0603839bSHuacai Chen /* 390*0603839bSHuacai Chen * notify the kprobe handlers, if instruction is likely to 391*0603839bSHuacai Chen * pertain to them. 392*0603839bSHuacai Chen */ 393*0603839bSHuacai Chen switch (bcode) { 394*0603839bSHuacai Chen case BRK_KPROBE_BP: 395*0603839bSHuacai Chen if (notify_die(DIE_BREAK, "Kprobe", regs, bcode, 396*0603839bSHuacai Chen current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP) 397*0603839bSHuacai Chen goto out; 398*0603839bSHuacai Chen else 399*0603839bSHuacai Chen break; 400*0603839bSHuacai Chen case BRK_KPROBE_SSTEPBP: 401*0603839bSHuacai Chen if (notify_die(DIE_SSTEPBP, "Kprobe_SingleStep", regs, bcode, 402*0603839bSHuacai Chen current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP) 403*0603839bSHuacai Chen goto out; 404*0603839bSHuacai Chen else 405*0603839bSHuacai Chen break; 406*0603839bSHuacai Chen case BRK_UPROBE_BP: 407*0603839bSHuacai Chen if (notify_die(DIE_UPROBE, "Uprobe", regs, bcode, 408*0603839bSHuacai Chen current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP) 409*0603839bSHuacai Chen goto out; 410*0603839bSHuacai Chen else 411*0603839bSHuacai Chen break; 412*0603839bSHuacai Chen case BRK_UPROBE_XOLBP: 413*0603839bSHuacai Chen if (notify_die(DIE_UPROBE_XOL, "Uprobe_XOL", regs, bcode, 414*0603839bSHuacai Chen current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP) 415*0603839bSHuacai Chen goto out; 416*0603839bSHuacai Chen else 417*0603839bSHuacai Chen break; 418*0603839bSHuacai Chen default: 419*0603839bSHuacai Chen if (notify_die(DIE_TRAP, "Break", regs, bcode, 420*0603839bSHuacai Chen current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP) 421*0603839bSHuacai Chen goto out; 422*0603839bSHuacai Chen else 423*0603839bSHuacai Chen break; 424*0603839bSHuacai Chen } 425*0603839bSHuacai Chen 426*0603839bSHuacai Chen switch (bcode) { 427*0603839bSHuacai Chen case BRK_BUG: 428*0603839bSHuacai Chen die_if_kernel("Kernel bug detected", regs); 429*0603839bSHuacai Chen force_sig(SIGTRAP); 430*0603839bSHuacai Chen break; 431*0603839bSHuacai Chen case BRK_DIVZERO: 432*0603839bSHuacai Chen die_if_kernel("Break instruction in kernel code", regs); 433*0603839bSHuacai Chen force_sig_fault(SIGFPE, FPE_INTDIV, (void __user *)regs->csr_era); 434*0603839bSHuacai Chen break; 435*0603839bSHuacai Chen case BRK_OVERFLOW: 436*0603839bSHuacai Chen die_if_kernel("Break instruction in kernel code", regs); 437*0603839bSHuacai Chen force_sig_fault(SIGFPE, FPE_INTOVF, (void __user *)regs->csr_era); 438*0603839bSHuacai Chen break; 439*0603839bSHuacai Chen default: 440*0603839bSHuacai Chen die_if_kernel("Break instruction in kernel code", regs); 441*0603839bSHuacai Chen force_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)regs->csr_era); 442*0603839bSHuacai Chen break; 443*0603839bSHuacai Chen } 444*0603839bSHuacai Chen 445*0603839bSHuacai Chen out: 446*0603839bSHuacai Chen local_irq_disable(); 447*0603839bSHuacai Chen irqentry_exit(regs, state); 448*0603839bSHuacai Chen return; 449*0603839bSHuacai Chen 450*0603839bSHuacai Chen out_sigsegv: 451*0603839bSHuacai Chen force_sig(SIGSEGV); 452*0603839bSHuacai Chen goto out; 453*0603839bSHuacai Chen } 454*0603839bSHuacai Chen 455*0603839bSHuacai Chen asmlinkage void noinstr do_watch(struct pt_regs *regs) 456*0603839bSHuacai Chen { 457*0603839bSHuacai Chen pr_warn("Hardware watch point handler not implemented!\n"); 458*0603839bSHuacai Chen } 459*0603839bSHuacai Chen 460*0603839bSHuacai Chen asmlinkage void noinstr do_ri(struct pt_regs *regs) 461*0603839bSHuacai Chen { 462*0603839bSHuacai Chen int status = -1; 463*0603839bSHuacai Chen unsigned int opcode = 0; 464*0603839bSHuacai Chen unsigned int __user *era = (unsigned int __user *)exception_era(regs); 465*0603839bSHuacai Chen unsigned long old_era = regs->csr_era; 466*0603839bSHuacai Chen unsigned long old_ra = regs->regs[1]; 467*0603839bSHuacai Chen irqentry_state_t state = irqentry_enter(regs); 468*0603839bSHuacai Chen 469*0603839bSHuacai Chen local_irq_enable(); 470*0603839bSHuacai Chen current->thread.trap_nr = read_csr_excode(); 471*0603839bSHuacai Chen 472*0603839bSHuacai Chen if (notify_die(DIE_RI, "RI Fault", regs, 0, current->thread.trap_nr, 473*0603839bSHuacai Chen SIGILL) == NOTIFY_STOP) 474*0603839bSHuacai Chen goto out; 475*0603839bSHuacai Chen 476*0603839bSHuacai Chen die_if_kernel("Reserved instruction in kernel code", regs); 477*0603839bSHuacai Chen 478*0603839bSHuacai Chen if (unlikely(compute_return_era(regs) < 0)) 479*0603839bSHuacai Chen goto out; 480*0603839bSHuacai Chen 481*0603839bSHuacai Chen if (unlikely(get_user(opcode, era) < 0)) { 482*0603839bSHuacai Chen status = SIGSEGV; 483*0603839bSHuacai Chen current->thread.error_code = 1; 484*0603839bSHuacai Chen } 485*0603839bSHuacai Chen 486*0603839bSHuacai Chen if (status < 0) 487*0603839bSHuacai Chen status = SIGILL; 488*0603839bSHuacai Chen 489*0603839bSHuacai Chen if (unlikely(status > 0)) { 490*0603839bSHuacai Chen regs->csr_era = old_era; /* Undo skip-over. */ 491*0603839bSHuacai Chen regs->regs[1] = old_ra; 492*0603839bSHuacai Chen force_sig(status); 493*0603839bSHuacai Chen } 494*0603839bSHuacai Chen 495*0603839bSHuacai Chen out: 496*0603839bSHuacai Chen local_irq_disable(); 497*0603839bSHuacai Chen irqentry_exit(regs, state); 498*0603839bSHuacai Chen } 499*0603839bSHuacai Chen 500*0603839bSHuacai Chen static void init_restore_fp(void) 501*0603839bSHuacai Chen { 502*0603839bSHuacai Chen if (!used_math()) { 503*0603839bSHuacai Chen /* First time FP context user. */ 504*0603839bSHuacai Chen init_fpu(); 505*0603839bSHuacai Chen } else { 506*0603839bSHuacai Chen /* This task has formerly used the FP context */ 507*0603839bSHuacai Chen if (!is_fpu_owner()) 508*0603839bSHuacai Chen own_fpu_inatomic(1); 509*0603839bSHuacai Chen } 510*0603839bSHuacai Chen 511*0603839bSHuacai Chen BUG_ON(!is_fp_enabled()); 512*0603839bSHuacai Chen } 513*0603839bSHuacai Chen 514*0603839bSHuacai Chen asmlinkage void noinstr do_fpu(struct pt_regs *regs) 515*0603839bSHuacai Chen { 516*0603839bSHuacai Chen irqentry_state_t state = irqentry_enter(regs); 517*0603839bSHuacai Chen 518*0603839bSHuacai Chen local_irq_enable(); 519*0603839bSHuacai Chen die_if_kernel("do_fpu invoked from kernel context!", regs); 520*0603839bSHuacai Chen 521*0603839bSHuacai Chen preempt_disable(); 522*0603839bSHuacai Chen init_restore_fp(); 523*0603839bSHuacai Chen preempt_enable(); 524*0603839bSHuacai Chen 525*0603839bSHuacai Chen local_irq_disable(); 526*0603839bSHuacai Chen irqentry_exit(regs, state); 527*0603839bSHuacai Chen } 528*0603839bSHuacai Chen 529*0603839bSHuacai Chen asmlinkage void noinstr do_lsx(struct pt_regs *regs) 530*0603839bSHuacai Chen { 531*0603839bSHuacai Chen irqentry_state_t state = irqentry_enter(regs); 532*0603839bSHuacai Chen 533*0603839bSHuacai Chen local_irq_enable(); 534*0603839bSHuacai Chen force_sig(SIGILL); 535*0603839bSHuacai Chen local_irq_disable(); 536*0603839bSHuacai Chen 537*0603839bSHuacai Chen irqentry_exit(regs, state); 538*0603839bSHuacai Chen } 539*0603839bSHuacai Chen 540*0603839bSHuacai Chen asmlinkage void noinstr do_lasx(struct pt_regs *regs) 541*0603839bSHuacai Chen { 542*0603839bSHuacai Chen irqentry_state_t state = irqentry_enter(regs); 543*0603839bSHuacai Chen 544*0603839bSHuacai Chen local_irq_enable(); 545*0603839bSHuacai Chen force_sig(SIGILL); 546*0603839bSHuacai Chen local_irq_disable(); 547*0603839bSHuacai Chen 548*0603839bSHuacai Chen irqentry_exit(regs, state); 549*0603839bSHuacai Chen } 550*0603839bSHuacai Chen 551*0603839bSHuacai Chen asmlinkage void noinstr do_lbt(struct pt_regs *regs) 552*0603839bSHuacai Chen { 553*0603839bSHuacai Chen irqentry_state_t state = irqentry_enter(regs); 554*0603839bSHuacai Chen 555*0603839bSHuacai Chen local_irq_enable(); 556*0603839bSHuacai Chen force_sig(SIGILL); 557*0603839bSHuacai Chen local_irq_disable(); 558*0603839bSHuacai Chen 559*0603839bSHuacai Chen irqentry_exit(regs, state); 560*0603839bSHuacai Chen } 561*0603839bSHuacai Chen 562*0603839bSHuacai Chen asmlinkage void noinstr do_reserved(struct pt_regs *regs) 563*0603839bSHuacai Chen { 564*0603839bSHuacai Chen irqentry_state_t state = irqentry_enter(regs); 565*0603839bSHuacai Chen 566*0603839bSHuacai Chen local_irq_enable(); 567*0603839bSHuacai Chen /* 568*0603839bSHuacai Chen * Game over - no way to handle this if it ever occurs. Most probably 569*0603839bSHuacai Chen * caused by a fatal error after another hardware/software error. 570*0603839bSHuacai Chen */ 571*0603839bSHuacai Chen pr_err("Caught reserved exception %u on pid:%d [%s] - should not happen\n", 572*0603839bSHuacai Chen read_csr_excode(), current->pid, current->comm); 573*0603839bSHuacai Chen die_if_kernel("do_reserved exception", regs); 574*0603839bSHuacai Chen force_sig(SIGUNUSED); 575*0603839bSHuacai Chen 576*0603839bSHuacai Chen local_irq_disable(); 577*0603839bSHuacai Chen 578*0603839bSHuacai Chen irqentry_exit(regs, state); 579*0603839bSHuacai Chen } 580*0603839bSHuacai Chen 581*0603839bSHuacai Chen asmlinkage void cache_parity_error(void) 582*0603839bSHuacai Chen { 583*0603839bSHuacai Chen /* For the moment, report the problem and hang. */ 584*0603839bSHuacai Chen pr_err("Cache error exception:\n"); 585*0603839bSHuacai Chen pr_err("csr_merrctl == %08x\n", csr_read32(LOONGARCH_CSR_MERRCTL)); 586*0603839bSHuacai Chen pr_err("csr_merrera == %016llx\n", csr_read64(LOONGARCH_CSR_MERRERA)); 587*0603839bSHuacai Chen panic("Can't handle the cache error!"); 588*0603839bSHuacai Chen } 589*0603839bSHuacai Chen 590*0603839bSHuacai Chen asmlinkage void noinstr handle_loongarch_irq(struct pt_regs *regs) 591*0603839bSHuacai Chen { 592*0603839bSHuacai Chen struct pt_regs *old_regs; 593*0603839bSHuacai Chen 594*0603839bSHuacai Chen irq_enter_rcu(); 595*0603839bSHuacai Chen old_regs = set_irq_regs(regs); 596*0603839bSHuacai Chen handle_arch_irq(regs); 597*0603839bSHuacai Chen set_irq_regs(old_regs); 598*0603839bSHuacai Chen irq_exit_rcu(); 599*0603839bSHuacai Chen } 600*0603839bSHuacai Chen 601*0603839bSHuacai Chen asmlinkage void noinstr do_vint(struct pt_regs *regs, unsigned long sp) 602*0603839bSHuacai Chen { 603*0603839bSHuacai Chen register int cpu; 604*0603839bSHuacai Chen register unsigned long stack; 605*0603839bSHuacai Chen irqentry_state_t state = irqentry_enter(regs); 606*0603839bSHuacai Chen 607*0603839bSHuacai Chen cpu = smp_processor_id(); 608*0603839bSHuacai Chen 609*0603839bSHuacai Chen if (on_irq_stack(cpu, sp)) 610*0603839bSHuacai Chen handle_loongarch_irq(regs); 611*0603839bSHuacai Chen else { 612*0603839bSHuacai Chen stack = per_cpu(irq_stack, cpu) + IRQ_STACK_START; 613*0603839bSHuacai Chen 614*0603839bSHuacai Chen /* Save task's sp on IRQ stack for unwinding */ 615*0603839bSHuacai Chen *(unsigned long *)stack = sp; 616*0603839bSHuacai Chen 617*0603839bSHuacai Chen __asm__ __volatile__( 618*0603839bSHuacai Chen "move $s0, $sp \n" /* Preserve sp */ 619*0603839bSHuacai Chen "move $sp, %[stk] \n" /* Switch stack */ 620*0603839bSHuacai Chen "move $a0, %[regs] \n" 621*0603839bSHuacai Chen "bl handle_loongarch_irq \n" 622*0603839bSHuacai Chen "move $sp, $s0 \n" /* Restore sp */ 623*0603839bSHuacai Chen : /* No outputs */ 624*0603839bSHuacai Chen : [stk] "r" (stack), [regs] "r" (regs) 625*0603839bSHuacai Chen : "$a0", "$a1", "$a2", "$a3", "$a4", "$a5", "$a6", "$a7", "$s0", 626*0603839bSHuacai Chen "$t0", "$t1", "$t2", "$t3", "$t4", "$t5", "$t6", "$t7", "$t8", 627*0603839bSHuacai Chen "memory"); 628*0603839bSHuacai Chen } 629*0603839bSHuacai Chen 630*0603839bSHuacai Chen irqentry_exit(regs, state); 631*0603839bSHuacai Chen } 632*0603839bSHuacai Chen 633*0603839bSHuacai Chen extern void tlb_init(void); 634*0603839bSHuacai Chen extern void cache_error_setup(void); 635*0603839bSHuacai Chen 636*0603839bSHuacai Chen unsigned long eentry; 637*0603839bSHuacai Chen unsigned long tlbrentry; 638*0603839bSHuacai Chen 639*0603839bSHuacai Chen long exception_handlers[VECSIZE * 128 / sizeof(long)] __aligned(SZ_64K); 640*0603839bSHuacai Chen 641*0603839bSHuacai Chen static void configure_exception_vector(void) 642*0603839bSHuacai Chen { 643*0603839bSHuacai Chen eentry = (unsigned long)exception_handlers; 644*0603839bSHuacai Chen tlbrentry = (unsigned long)exception_handlers + 80*VECSIZE; 645*0603839bSHuacai Chen 646*0603839bSHuacai Chen csr_write64(eentry, LOONGARCH_CSR_EENTRY); 647*0603839bSHuacai Chen csr_write64(eentry, LOONGARCH_CSR_MERRENTRY); 648*0603839bSHuacai Chen csr_write64(tlbrentry, LOONGARCH_CSR_TLBRENTRY); 649*0603839bSHuacai Chen } 650*0603839bSHuacai Chen 651*0603839bSHuacai Chen void per_cpu_trap_init(int cpu) 652*0603839bSHuacai Chen { 653*0603839bSHuacai Chen unsigned int i; 654*0603839bSHuacai Chen 655*0603839bSHuacai Chen setup_vint_size(VECSIZE); 656*0603839bSHuacai Chen 657*0603839bSHuacai Chen configure_exception_vector(); 658*0603839bSHuacai Chen 659*0603839bSHuacai Chen if (!cpu_data[cpu].asid_cache) 660*0603839bSHuacai Chen cpu_data[cpu].asid_cache = asid_first_version(cpu); 661*0603839bSHuacai Chen 662*0603839bSHuacai Chen mmgrab(&init_mm); 663*0603839bSHuacai Chen current->active_mm = &init_mm; 664*0603839bSHuacai Chen BUG_ON(current->mm); 665*0603839bSHuacai Chen enter_lazy_tlb(&init_mm, current); 666*0603839bSHuacai Chen 667*0603839bSHuacai Chen /* Initialise exception handlers */ 668*0603839bSHuacai Chen if (cpu == 0) 669*0603839bSHuacai Chen for (i = 0; i < 64; i++) 670*0603839bSHuacai Chen set_handler(i * VECSIZE, handle_reserved, VECSIZE); 671*0603839bSHuacai Chen 672*0603839bSHuacai Chen tlb_init(); 673*0603839bSHuacai Chen cpu_cache_init(); 674*0603839bSHuacai Chen } 675*0603839bSHuacai Chen 676*0603839bSHuacai Chen /* Install CPU exception handler */ 677*0603839bSHuacai Chen void set_handler(unsigned long offset, void *addr, unsigned long size) 678*0603839bSHuacai Chen { 679*0603839bSHuacai Chen memcpy((void *)(eentry + offset), addr, size); 680*0603839bSHuacai Chen local_flush_icache_range(eentry + offset, eentry + offset + size); 681*0603839bSHuacai Chen } 682*0603839bSHuacai Chen 683*0603839bSHuacai Chen static const char panic_null_cerr[] = 684*0603839bSHuacai Chen "Trying to set NULL cache error exception handler\n"; 685*0603839bSHuacai Chen 686*0603839bSHuacai Chen /* 687*0603839bSHuacai Chen * Install uncached CPU exception handler. 688*0603839bSHuacai Chen * This is suitable only for the cache error exception which is the only 689*0603839bSHuacai Chen * exception handler that is being run uncached. 690*0603839bSHuacai Chen */ 691*0603839bSHuacai Chen void set_merr_handler(unsigned long offset, void *addr, unsigned long size) 692*0603839bSHuacai Chen { 693*0603839bSHuacai Chen unsigned long uncached_eentry = TO_UNCACHE(__pa(eentry)); 694*0603839bSHuacai Chen 695*0603839bSHuacai Chen if (!addr) 696*0603839bSHuacai Chen panic(panic_null_cerr); 697*0603839bSHuacai Chen 698*0603839bSHuacai Chen memcpy((void *)(uncached_eentry + offset), addr, size); 699*0603839bSHuacai Chen } 700*0603839bSHuacai Chen 701*0603839bSHuacai Chen void __init trap_init(void) 702*0603839bSHuacai Chen { 703*0603839bSHuacai Chen long i; 704*0603839bSHuacai Chen 705*0603839bSHuacai Chen /* Set interrupt vector handler */ 706*0603839bSHuacai Chen for (i = EXCCODE_INT_START; i < EXCCODE_INT_END; i++) 707*0603839bSHuacai Chen set_handler(i * VECSIZE, handle_vint, VECSIZE); 708*0603839bSHuacai Chen 709*0603839bSHuacai Chen set_handler(EXCCODE_ADE * VECSIZE, handle_ade, VECSIZE); 710*0603839bSHuacai Chen set_handler(EXCCODE_ALE * VECSIZE, handle_ale, VECSIZE); 711*0603839bSHuacai Chen set_handler(EXCCODE_SYS * VECSIZE, handle_sys, VECSIZE); 712*0603839bSHuacai Chen set_handler(EXCCODE_BP * VECSIZE, handle_bp, VECSIZE); 713*0603839bSHuacai Chen set_handler(EXCCODE_INE * VECSIZE, handle_ri, VECSIZE); 714*0603839bSHuacai Chen set_handler(EXCCODE_IPE * VECSIZE, handle_ri, VECSIZE); 715*0603839bSHuacai Chen set_handler(EXCCODE_FPDIS * VECSIZE, handle_fpu, VECSIZE); 716*0603839bSHuacai Chen set_handler(EXCCODE_LSXDIS * VECSIZE, handle_lsx, VECSIZE); 717*0603839bSHuacai Chen set_handler(EXCCODE_LASXDIS * VECSIZE, handle_lasx, VECSIZE); 718*0603839bSHuacai Chen set_handler(EXCCODE_FPE * VECSIZE, handle_fpe, VECSIZE); 719*0603839bSHuacai Chen set_handler(EXCCODE_BTDIS * VECSIZE, handle_lbt, VECSIZE); 720*0603839bSHuacai Chen set_handler(EXCCODE_WATCH * VECSIZE, handle_watch, VECSIZE); 721*0603839bSHuacai Chen 722*0603839bSHuacai Chen cache_error_setup(); 723*0603839bSHuacai Chen 724*0603839bSHuacai Chen local_flush_icache_range(eentry, eentry + 0x400); 725*0603839bSHuacai Chen } 726