xref: /openbmc/linux/arch/loongarch/kernel/traps.c (revision e50e86dbcabda570fc8a1435fe2fca97e9ab7312)
1  // SPDX-License-Identifier: GPL-2.0
2  /*
3   * Author: Huacai Chen <chenhuacai@loongson.cn>
4   * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
5   */
6  #include <linux/bitfield.h>
7  #include <linux/bitops.h>
8  #include <linux/bug.h>
9  #include <linux/compiler.h>
10  #include <linux/context_tracking.h>
11  #include <linux/entry-common.h>
12  #include <linux/init.h>
13  #include <linux/kernel.h>
14  #include <linux/kexec.h>
15  #include <linux/module.h>
16  #include <linux/extable.h>
17  #include <linux/mm.h>
18  #include <linux/sched/mm.h>
19  #include <linux/sched/debug.h>
20  #include <linux/smp.h>
21  #include <linux/spinlock.h>
22  #include <linux/kallsyms.h>
23  #include <linux/memblock.h>
24  #include <linux/interrupt.h>
25  #include <linux/ptrace.h>
26  #include <linux/kgdb.h>
27  #include <linux/kdebug.h>
28  #include <linux/notifier.h>
29  #include <linux/irq.h>
30  #include <linux/perf_event.h>
31  
32  #include <asm/addrspace.h>
33  #include <asm/bootinfo.h>
34  #include <asm/branch.h>
35  #include <asm/break.h>
36  #include <asm/cpu.h>
37  #include <asm/exception.h>
38  #include <asm/fpu.h>
39  #include <asm/lbt.h>
40  #include <asm/inst.h>
41  #include <asm/kgdb.h>
42  #include <asm/loongarch.h>
43  #include <asm/mmu_context.h>
44  #include <asm/pgtable.h>
45  #include <asm/ptrace.h>
46  #include <asm/sections.h>
47  #include <asm/siginfo.h>
48  #include <asm/stacktrace.h>
49  #include <asm/tlb.h>
50  #include <asm/types.h>
51  #include <asm/unwind.h>
52  #include <asm/uprobes.h>
53  
54  #include "access-helper.h"
55  
show_backtrace(struct task_struct * task,const struct pt_regs * regs,const char * loglvl,bool user)56  static void show_backtrace(struct task_struct *task, const struct pt_regs *regs,
57  			   const char *loglvl, bool user)
58  {
59  	unsigned long addr;
60  	struct unwind_state state;
61  	struct pt_regs *pregs = (struct pt_regs *)regs;
62  
63  	if (!task)
64  		task = current;
65  
66  	printk("%sCall Trace:", loglvl);
67  	for (unwind_start(&state, task, pregs);
68  	      !unwind_done(&state); unwind_next_frame(&state)) {
69  		addr = unwind_get_return_address(&state);
70  		print_ip_sym(loglvl, addr);
71  	}
72  	printk("%s\n", loglvl);
73  }
74  
show_stacktrace(struct task_struct * task,const struct pt_regs * regs,const char * loglvl,bool user)75  static void show_stacktrace(struct task_struct *task,
76  	const struct pt_regs *regs, const char *loglvl, bool user)
77  {
78  	int i;
79  	const int field = 2 * sizeof(unsigned long);
80  	unsigned long stackdata;
81  	unsigned long *sp = (unsigned long *)regs->regs[3];
82  
83  	printk("%sStack :", loglvl);
84  	i = 0;
85  	while ((unsigned long) sp & (PAGE_SIZE - 1)) {
86  		if (i && ((i % (64 / field)) == 0)) {
87  			pr_cont("\n");
88  			printk("%s       ", loglvl);
89  		}
90  		if (i > 39) {
91  			pr_cont(" ...");
92  			break;
93  		}
94  
95  		if (__get_addr(&stackdata, sp++, user)) {
96  			pr_cont(" (Bad stack address)");
97  			break;
98  		}
99  
100  		pr_cont(" %0*lx", field, stackdata);
101  		i++;
102  	}
103  	pr_cont("\n");
104  	show_backtrace(task, regs, loglvl, user);
105  }
106  
show_stack(struct task_struct * task,unsigned long * sp,const char * loglvl)107  void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl)
108  {
109  	struct pt_regs regs;
110  
111  	regs.csr_crmd = 0;
112  	if (sp) {
113  		regs.csr_era = 0;
114  		regs.regs[1] = 0;
115  		regs.regs[3] = (unsigned long)sp;
116  	} else {
117  		if (!task || task == current)
118  			prepare_frametrace(&regs);
119  		else {
120  			regs.csr_era = task->thread.reg01;
121  			regs.regs[1] = 0;
122  			regs.regs[3] = task->thread.reg03;
123  			regs.regs[22] = task->thread.reg22;
124  		}
125  	}
126  
127  	show_stacktrace(task, &regs, loglvl, false);
128  }
129  
show_code(unsigned int * pc,bool user)130  static void show_code(unsigned int *pc, bool user)
131  {
132  	long i;
133  	unsigned int insn;
134  
135  	printk("Code:");
136  
137  	for(i = -3 ; i < 6 ; i++) {
138  		if (__get_inst(&insn, pc + i, user)) {
139  			pr_cont(" (Bad address in era)\n");
140  			break;
141  		}
142  		pr_cont("%c%08x%c", (i?' ':'<'), insn, (i?' ':'>'));
143  	}
144  	pr_cont("\n");
145  }
146  
print_bool_fragment(const char * key,unsigned long val,bool first)147  static void print_bool_fragment(const char *key, unsigned long val, bool first)
148  {
149  	/* e.g. "+PG", "-DA" */
150  	pr_cont("%s%c%s", first ? "" : " ", val ? '+' : '-', key);
151  }
152  
print_plv_fragment(const char * key,int val)153  static void print_plv_fragment(const char *key, int val)
154  {
155  	/* e.g. "PLV0", "PPLV3" */
156  	pr_cont("%s%d", key, val);
157  }
158  
print_memory_type_fragment(const char * key,unsigned long val)159  static void print_memory_type_fragment(const char *key, unsigned long val)
160  {
161  	const char *humanized_type;
162  
163  	switch (val) {
164  	case 0:
165  		humanized_type = "SUC";
166  		break;
167  	case 1:
168  		humanized_type = "CC";
169  		break;
170  	case 2:
171  		humanized_type = "WUC";
172  		break;
173  	default:
174  		pr_cont(" %s=Reserved(%lu)", key, val);
175  		return;
176  	}
177  
178  	/* e.g. " DATM=WUC" */
179  	pr_cont(" %s=%s", key, humanized_type);
180  }
181  
print_intr_fragment(const char * key,unsigned long val)182  static void print_intr_fragment(const char *key, unsigned long val)
183  {
184  	/* e.g. "LIE=0-1,3,5-7" */
185  	pr_cont("%s=%*pbl", key, EXCCODE_INT_NUM, &val);
186  }
187  
print_crmd(unsigned long x)188  static void print_crmd(unsigned long x)
189  {
190  	printk(" CRMD: %08lx (", x);
191  	print_plv_fragment("PLV", (int) FIELD_GET(CSR_CRMD_PLV, x));
192  	print_bool_fragment("IE", FIELD_GET(CSR_CRMD_IE, x), false);
193  	print_bool_fragment("DA", FIELD_GET(CSR_CRMD_DA, x), false);
194  	print_bool_fragment("PG", FIELD_GET(CSR_CRMD_PG, x), false);
195  	print_memory_type_fragment("DACF", FIELD_GET(CSR_CRMD_DACF, x));
196  	print_memory_type_fragment("DACM", FIELD_GET(CSR_CRMD_DACM, x));
197  	print_bool_fragment("WE", FIELD_GET(CSR_CRMD_WE, x), false);
198  	pr_cont(")\n");
199  }
200  
print_prmd(unsigned long x)201  static void print_prmd(unsigned long x)
202  {
203  	printk(" PRMD: %08lx (", x);
204  	print_plv_fragment("PPLV", (int) FIELD_GET(CSR_PRMD_PPLV, x));
205  	print_bool_fragment("PIE", FIELD_GET(CSR_PRMD_PIE, x), false);
206  	print_bool_fragment("PWE", FIELD_GET(CSR_PRMD_PWE, x), false);
207  	pr_cont(")\n");
208  }
209  
print_euen(unsigned long x)210  static void print_euen(unsigned long x)
211  {
212  	printk(" EUEN: %08lx (", x);
213  	print_bool_fragment("FPE", FIELD_GET(CSR_EUEN_FPEN, x), true);
214  	print_bool_fragment("SXE", FIELD_GET(CSR_EUEN_LSXEN, x), false);
215  	print_bool_fragment("ASXE", FIELD_GET(CSR_EUEN_LASXEN, x), false);
216  	print_bool_fragment("BTE", FIELD_GET(CSR_EUEN_LBTEN, x), false);
217  	pr_cont(")\n");
218  }
219  
print_ecfg(unsigned long x)220  static void print_ecfg(unsigned long x)
221  {
222  	printk(" ECFG: %08lx (", x);
223  	print_intr_fragment("LIE", FIELD_GET(CSR_ECFG_IM, x));
224  	pr_cont(" VS=%d)\n", (int) FIELD_GET(CSR_ECFG_VS, x));
225  }
226  
humanize_exc_name(unsigned int ecode,unsigned int esubcode)227  static const char *humanize_exc_name(unsigned int ecode, unsigned int esubcode)
228  {
229  	/*
230  	 * LoongArch users and developers are probably more familiar with
231  	 * those names found in the ISA manual, so we are going to print out
232  	 * the latter. This will require some mapping.
233  	 */
234  	switch (ecode) {
235  	case EXCCODE_RSV: return "INT";
236  	case EXCCODE_TLBL: return "PIL";
237  	case EXCCODE_TLBS: return "PIS";
238  	case EXCCODE_TLBI: return "PIF";
239  	case EXCCODE_TLBM: return "PME";
240  	case EXCCODE_TLBNR: return "PNR";
241  	case EXCCODE_TLBNX: return "PNX";
242  	case EXCCODE_TLBPE: return "PPI";
243  	case EXCCODE_ADE:
244  		switch (esubcode) {
245  		case EXSUBCODE_ADEF: return "ADEF";
246  		case EXSUBCODE_ADEM: return "ADEM";
247  		}
248  		break;
249  	case EXCCODE_ALE: return "ALE";
250  	case EXCCODE_BCE: return "BCE";
251  	case EXCCODE_SYS: return "SYS";
252  	case EXCCODE_BP: return "BRK";
253  	case EXCCODE_INE: return "INE";
254  	case EXCCODE_IPE: return "IPE";
255  	case EXCCODE_FPDIS: return "FPD";
256  	case EXCCODE_LSXDIS: return "SXD";
257  	case EXCCODE_LASXDIS: return "ASXD";
258  	case EXCCODE_FPE:
259  		switch (esubcode) {
260  		case EXCSUBCODE_FPE: return "FPE";
261  		case EXCSUBCODE_VFPE: return "VFPE";
262  		}
263  		break;
264  	case EXCCODE_WATCH:
265  		switch (esubcode) {
266  		case EXCSUBCODE_WPEF: return "WPEF";
267  		case EXCSUBCODE_WPEM: return "WPEM";
268  		}
269  		break;
270  	case EXCCODE_BTDIS: return "BTD";
271  	case EXCCODE_BTE: return "BTE";
272  	case EXCCODE_GSPR: return "GSPR";
273  	case EXCCODE_HVC: return "HVC";
274  	case EXCCODE_GCM:
275  		switch (esubcode) {
276  		case EXCSUBCODE_GCSC: return "GCSC";
277  		case EXCSUBCODE_GCHC: return "GCHC";
278  		}
279  		break;
280  	/*
281  	 * The manual did not mention the EXCCODE_SE case, but print out it
282  	 * nevertheless.
283  	 */
284  	case EXCCODE_SE: return "SE";
285  	}
286  
287  	return "???";
288  }
289  
print_estat(unsigned long x)290  static void print_estat(unsigned long x)
291  {
292  	unsigned int ecode = FIELD_GET(CSR_ESTAT_EXC, x);
293  	unsigned int esubcode = FIELD_GET(CSR_ESTAT_ESUBCODE, x);
294  
295  	printk("ESTAT: %08lx [%s] (", x, humanize_exc_name(ecode, esubcode));
296  	print_intr_fragment("IS", FIELD_GET(CSR_ESTAT_IS, x));
297  	pr_cont(" ECode=%d EsubCode=%d)\n", (int) ecode, (int) esubcode);
298  }
299  
__show_regs(const struct pt_regs * regs)300  static void __show_regs(const struct pt_regs *regs)
301  {
302  	const int field = 2 * sizeof(unsigned long);
303  	unsigned int exccode = FIELD_GET(CSR_ESTAT_EXC, regs->csr_estat);
304  
305  	show_regs_print_info(KERN_DEFAULT);
306  
307  	/* Print saved GPRs except $zero (substituting with PC/ERA) */
308  #define GPR_FIELD(x) field, regs->regs[x]
309  	printk("pc %0*lx ra %0*lx tp %0*lx sp %0*lx\n",
310  	       field, regs->csr_era, GPR_FIELD(1), GPR_FIELD(2), GPR_FIELD(3));
311  	printk("a0 %0*lx a1 %0*lx a2 %0*lx a3 %0*lx\n",
312  	       GPR_FIELD(4), GPR_FIELD(5), GPR_FIELD(6), GPR_FIELD(7));
313  	printk("a4 %0*lx a5 %0*lx a6 %0*lx a7 %0*lx\n",
314  	       GPR_FIELD(8), GPR_FIELD(9), GPR_FIELD(10), GPR_FIELD(11));
315  	printk("t0 %0*lx t1 %0*lx t2 %0*lx t3 %0*lx\n",
316  	       GPR_FIELD(12), GPR_FIELD(13), GPR_FIELD(14), GPR_FIELD(15));
317  	printk("t4 %0*lx t5 %0*lx t6 %0*lx t7 %0*lx\n",
318  	       GPR_FIELD(16), GPR_FIELD(17), GPR_FIELD(18), GPR_FIELD(19));
319  	printk("t8 %0*lx u0 %0*lx s9 %0*lx s0 %0*lx\n",
320  	       GPR_FIELD(20), GPR_FIELD(21), GPR_FIELD(22), GPR_FIELD(23));
321  	printk("s1 %0*lx s2 %0*lx s3 %0*lx s4 %0*lx\n",
322  	       GPR_FIELD(24), GPR_FIELD(25), GPR_FIELD(26), GPR_FIELD(27));
323  	printk("s5 %0*lx s6 %0*lx s7 %0*lx s8 %0*lx\n",
324  	       GPR_FIELD(28), GPR_FIELD(29), GPR_FIELD(30), GPR_FIELD(31));
325  
326  	/* The slot for $zero is reused as the syscall restart flag */
327  	if (regs->regs[0])
328  		printk("syscall restart flag: %0*lx\n", GPR_FIELD(0));
329  
330  	if (user_mode(regs)) {
331  		printk("   ra: %0*lx\n", GPR_FIELD(1));
332  		printk("  ERA: %0*lx\n", field, regs->csr_era);
333  	} else {
334  		printk("   ra: %0*lx %pS\n", GPR_FIELD(1), (void *) regs->regs[1]);
335  		printk("  ERA: %0*lx %pS\n", field, regs->csr_era, (void *) regs->csr_era);
336  	}
337  #undef GPR_FIELD
338  
339  	/* Print saved important CSRs */
340  	print_crmd(regs->csr_crmd);
341  	print_prmd(regs->csr_prmd);
342  	print_euen(regs->csr_euen);
343  	print_ecfg(regs->csr_ecfg);
344  	print_estat(regs->csr_estat);
345  
346  	if (exccode >= EXCCODE_TLBL && exccode <= EXCCODE_ALE)
347  		printk(" BADV: %0*lx\n", field, regs->csr_badvaddr);
348  
349  	printk(" PRID: %08x (%s, %s)\n", read_cpucfg(LOONGARCH_CPUCFG0),
350  	       cpu_family_string(), cpu_full_name_string());
351  }
352  
show_regs(struct pt_regs * regs)353  void show_regs(struct pt_regs *regs)
354  {
355  	__show_regs((struct pt_regs *)regs);
356  	dump_stack();
357  }
358  
show_registers(struct pt_regs * regs)359  void show_registers(struct pt_regs *regs)
360  {
361  	__show_regs(regs);
362  	print_modules();
363  	printk("Process %s (pid: %d, threadinfo=%p, task=%p)\n",
364  	       current->comm, current->pid, current_thread_info(), current);
365  
366  	show_stacktrace(current, regs, KERN_DEFAULT, user_mode(regs));
367  	show_code((void *)regs->csr_era, user_mode(regs));
368  	printk("\n");
369  }
370  
371  static DEFINE_RAW_SPINLOCK(die_lock);
372  
die(const char * str,struct pt_regs * regs)373  void die(const char *str, struct pt_regs *regs)
374  {
375  	int ret;
376  	static int die_counter;
377  
378  	oops_enter();
379  
380  	ret = notify_die(DIE_OOPS, str, regs, 0,
381  			 current->thread.trap_nr, SIGSEGV);
382  
383  	console_verbose();
384  	raw_spin_lock_irq(&die_lock);
385  	bust_spinlocks(1);
386  
387  	printk("%s[#%d]:\n", str, ++die_counter);
388  	show_registers(regs);
389  	add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
390  	raw_spin_unlock_irq(&die_lock);
391  
392  	oops_exit();
393  
394  	if (ret == NOTIFY_STOP)
395  		return;
396  
397  	if (regs && kexec_should_crash(current))
398  		crash_kexec(regs);
399  
400  	if (in_interrupt())
401  		panic("Fatal exception in interrupt");
402  
403  	if (panic_on_oops)
404  		panic("Fatal exception");
405  
406  	make_task_dead(SIGSEGV);
407  }
408  
setup_vint_size(unsigned int size)409  static inline void setup_vint_size(unsigned int size)
410  {
411  	unsigned int vs;
412  
413  	vs = ilog2(size/4);
414  
415  	if (vs == 0 || vs > 7)
416  		panic("vint_size %d Not support yet", vs);
417  
418  	csr_xchg32(vs<<CSR_ECFG_VS_SHIFT, CSR_ECFG_VS, LOONGARCH_CSR_ECFG);
419  }
420  
421  /*
422   * Send SIGFPE according to FCSR Cause bits, which must have already
423   * been masked against Enable bits.  This is impotant as Inexact can
424   * happen together with Overflow or Underflow, and `ptrace' can set
425   * any bits.
426   */
force_fcsr_sig(unsigned long fcsr,void __user * fault_addr,struct task_struct * tsk)427  static void force_fcsr_sig(unsigned long fcsr,
428  			void __user *fault_addr, struct task_struct *tsk)
429  {
430  	int si_code = FPE_FLTUNK;
431  
432  	if (fcsr & FPU_CSR_INV_X)
433  		si_code = FPE_FLTINV;
434  	else if (fcsr & FPU_CSR_DIV_X)
435  		si_code = FPE_FLTDIV;
436  	else if (fcsr & FPU_CSR_OVF_X)
437  		si_code = FPE_FLTOVF;
438  	else if (fcsr & FPU_CSR_UDF_X)
439  		si_code = FPE_FLTUND;
440  	else if (fcsr & FPU_CSR_INE_X)
441  		si_code = FPE_FLTRES;
442  
443  	force_sig_fault(SIGFPE, si_code, fault_addr);
444  }
445  
process_fpemu_return(int sig,void __user * fault_addr,unsigned long fcsr)446  static int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcsr)
447  {
448  	int si_code;
449  
450  	switch (sig) {
451  	case 0:
452  		return 0;
453  
454  	case SIGFPE:
455  		force_fcsr_sig(fcsr, fault_addr, current);
456  		return 1;
457  
458  	case SIGBUS:
459  		force_sig_fault(SIGBUS, BUS_ADRERR, fault_addr);
460  		return 1;
461  
462  	case SIGSEGV:
463  		mmap_read_lock(current->mm);
464  		if (vma_lookup(current->mm, (unsigned long)fault_addr))
465  			si_code = SEGV_ACCERR;
466  		else
467  			si_code = SEGV_MAPERR;
468  		mmap_read_unlock(current->mm);
469  		force_sig_fault(SIGSEGV, si_code, fault_addr);
470  		return 1;
471  
472  	default:
473  		force_sig(sig);
474  		return 1;
475  	}
476  }
477  
478  /*
479   * Delayed fp exceptions when doing a lazy ctx switch
480   */
do_fpe(struct pt_regs * regs,unsigned long fcsr)481  asmlinkage void noinstr do_fpe(struct pt_regs *regs, unsigned long fcsr)
482  {
483  	int sig;
484  	void __user *fault_addr;
485  	irqentry_state_t state = irqentry_enter(regs);
486  
487  	if (notify_die(DIE_FP, "FP exception", regs, 0, current->thread.trap_nr,
488  		       SIGFPE) == NOTIFY_STOP)
489  		goto out;
490  
491  	/* Clear FCSR.Cause before enabling interrupts */
492  	write_fcsr(LOONGARCH_FCSR0, fcsr & ~mask_fcsr_x(fcsr));
493  	local_irq_enable();
494  
495  	die_if_kernel("FP exception in kernel code", regs);
496  
497  	sig = SIGFPE;
498  	fault_addr = (void __user *) regs->csr_era;
499  
500  	/* Send a signal if required.  */
501  	process_fpemu_return(sig, fault_addr, fcsr);
502  
503  out:
504  	local_irq_disable();
505  	irqentry_exit(regs, state);
506  }
507  
do_ade(struct pt_regs * regs)508  asmlinkage void noinstr do_ade(struct pt_regs *regs)
509  {
510  	irqentry_state_t state = irqentry_enter(regs);
511  
512  	die_if_kernel("Kernel ade access", regs);
513  	force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)regs->csr_badvaddr);
514  
515  	irqentry_exit(regs, state);
516  }
517  
518  /* sysctl hooks */
519  int unaligned_enabled __read_mostly = 1;	/* Enabled by default */
520  int no_unaligned_warning __read_mostly = 1;	/* Only 1 warning by default */
521  
do_ale(struct pt_regs * regs)522  asmlinkage void noinstr do_ale(struct pt_regs *regs)
523  {
524  	irqentry_state_t state = irqentry_enter(regs);
525  
526  #ifndef CONFIG_ARCH_STRICT_ALIGN
527  	die_if_kernel("Kernel ale access", regs);
528  	force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)regs->csr_badvaddr);
529  #else
530  	unsigned int *pc;
531  
532  	if (regs->csr_prmd & CSR_PRMD_PIE)
533  		local_irq_enable();
534  
535  	perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, regs->csr_badvaddr);
536  
537  	/*
538  	 * Did we catch a fault trying to load an instruction?
539  	 */
540  	if (regs->csr_badvaddr == regs->csr_era)
541  		goto sigbus;
542  	if (user_mode(regs) && !test_thread_flag(TIF_FIXADE))
543  		goto sigbus;
544  	if (!unaligned_enabled)
545  		goto sigbus;
546  	if (!no_unaligned_warning)
547  		show_registers(regs);
548  
549  	pc = (unsigned int *)exception_era(regs);
550  
551  	emulate_load_store_insn(regs, (void __user *)regs->csr_badvaddr, pc);
552  
553  	goto out;
554  
555  sigbus:
556  	die_if_kernel("Kernel ale access", regs);
557  	force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)regs->csr_badvaddr);
558  out:
559  	if (regs->csr_prmd & CSR_PRMD_PIE)
560  		local_irq_disable();
561  #endif
562  	irqentry_exit(regs, state);
563  }
564  
565  #ifdef CONFIG_GENERIC_BUG
is_valid_bugaddr(unsigned long addr)566  int is_valid_bugaddr(unsigned long addr)
567  {
568  	return 1;
569  }
570  #endif /* CONFIG_GENERIC_BUG */
571  
bug_handler(struct pt_regs * regs)572  static void bug_handler(struct pt_regs *regs)
573  {
574  	switch (report_bug(regs->csr_era, regs)) {
575  	case BUG_TRAP_TYPE_BUG:
576  	case BUG_TRAP_TYPE_NONE:
577  		die_if_kernel("Oops - BUG", regs);
578  		force_sig(SIGTRAP);
579  		break;
580  
581  	case BUG_TRAP_TYPE_WARN:
582  		/* Skip the BUG instruction and continue */
583  		regs->csr_era += LOONGARCH_INSN_SIZE;
584  		break;
585  	}
586  }
587  
do_bce(struct pt_regs * regs)588  asmlinkage void noinstr do_bce(struct pt_regs *regs)
589  {
590  	bool user = user_mode(regs);
591  	unsigned long era = exception_era(regs);
592  	u64 badv = 0, lower = 0, upper = ULONG_MAX;
593  	union loongarch_instruction insn;
594  	irqentry_state_t state = irqentry_enter(regs);
595  
596  	if (regs->csr_prmd & CSR_PRMD_PIE)
597  		local_irq_enable();
598  
599  	current->thread.trap_nr = read_csr_excode();
600  
601  	die_if_kernel("Bounds check error in kernel code", regs);
602  
603  	/*
604  	 * Pull out the address that failed bounds checking, and the lower /
605  	 * upper bound, by minimally looking at the faulting instruction word
606  	 * and reading from the correct register.
607  	 */
608  	if (__get_inst(&insn.word, (u32 *)era, user))
609  		goto bad_era;
610  
611  	switch (insn.reg3_format.opcode) {
612  	case asrtle_op:
613  		if (insn.reg3_format.rd != 0)
614  			break;	/* not asrtle */
615  		badv = regs->regs[insn.reg3_format.rj];
616  		upper = regs->regs[insn.reg3_format.rk];
617  		break;
618  
619  	case asrtgt_op:
620  		if (insn.reg3_format.rd != 0)
621  			break;	/* not asrtgt */
622  		badv = regs->regs[insn.reg3_format.rj];
623  		lower = regs->regs[insn.reg3_format.rk];
624  		break;
625  
626  	case ldleb_op:
627  	case ldleh_op:
628  	case ldlew_op:
629  	case ldled_op:
630  	case stleb_op:
631  	case stleh_op:
632  	case stlew_op:
633  	case stled_op:
634  	case fldles_op:
635  	case fldled_op:
636  	case fstles_op:
637  	case fstled_op:
638  		badv = regs->regs[insn.reg3_format.rj];
639  		upper = regs->regs[insn.reg3_format.rk];
640  		break;
641  
642  	case ldgtb_op:
643  	case ldgth_op:
644  	case ldgtw_op:
645  	case ldgtd_op:
646  	case stgtb_op:
647  	case stgth_op:
648  	case stgtw_op:
649  	case stgtd_op:
650  	case fldgts_op:
651  	case fldgtd_op:
652  	case fstgts_op:
653  	case fstgtd_op:
654  		badv = regs->regs[insn.reg3_format.rj];
655  		lower = regs->regs[insn.reg3_format.rk];
656  		break;
657  	}
658  
659  	force_sig_bnderr((void __user *)badv, (void __user *)lower, (void __user *)upper);
660  
661  out:
662  	if (regs->csr_prmd & CSR_PRMD_PIE)
663  		local_irq_disable();
664  
665  	irqentry_exit(regs, state);
666  	return;
667  
668  bad_era:
669  	/*
670  	 * Cannot pull out the instruction word, hence cannot provide more
671  	 * info than a regular SIGSEGV in this case.
672  	 */
673  	force_sig(SIGSEGV);
674  	goto out;
675  }
676  
do_bp(struct pt_regs * regs)677  asmlinkage void noinstr do_bp(struct pt_regs *regs)
678  {
679  	bool user = user_mode(regs);
680  	unsigned int opcode, bcode;
681  	unsigned long era = exception_era(regs);
682  	irqentry_state_t state = irqentry_enter(regs);
683  
684  	if (regs->csr_prmd & CSR_PRMD_PIE)
685  		local_irq_enable();
686  
687  	if (__get_inst(&opcode, (u32 *)era, user))
688  		goto out_sigsegv;
689  
690  	bcode = (opcode & 0x7fff);
691  
692  	/*
693  	 * notify the kprobe handlers, if instruction is likely to
694  	 * pertain to them.
695  	 */
696  	switch (bcode) {
697  	case BRK_KDB:
698  		if (kgdb_breakpoint_handler(regs))
699  			goto out;
700  		else
701  			break;
702  	case BRK_KPROBE_BP:
703  		if (kprobe_breakpoint_handler(regs))
704  			goto out;
705  		else
706  			break;
707  	case BRK_KPROBE_SSTEPBP:
708  		if (kprobe_singlestep_handler(regs))
709  			goto out;
710  		else
711  			break;
712  	case BRK_UPROBE_BP:
713  		if (uprobe_breakpoint_handler(regs))
714  			goto out;
715  		else
716  			break;
717  	case BRK_UPROBE_XOLBP:
718  		if (uprobe_singlestep_handler(regs))
719  			goto out;
720  		else
721  			break;
722  	default:
723  		current->thread.trap_nr = read_csr_excode();
724  		if (notify_die(DIE_TRAP, "Break", regs, bcode,
725  			       current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
726  			goto out;
727  		else
728  			break;
729  	}
730  
731  	switch (bcode) {
732  	case BRK_BUG:
733  		bug_handler(regs);
734  		break;
735  	case BRK_DIVZERO:
736  		die_if_kernel("Break instruction in kernel code", regs);
737  		force_sig_fault(SIGFPE, FPE_INTDIV, (void __user *)regs->csr_era);
738  		break;
739  	case BRK_OVERFLOW:
740  		die_if_kernel("Break instruction in kernel code", regs);
741  		force_sig_fault(SIGFPE, FPE_INTOVF, (void __user *)regs->csr_era);
742  		break;
743  	default:
744  		die_if_kernel("Break instruction in kernel code", regs);
745  		force_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)regs->csr_era);
746  		break;
747  	}
748  
749  out:
750  	if (regs->csr_prmd & CSR_PRMD_PIE)
751  		local_irq_disable();
752  
753  	irqentry_exit(regs, state);
754  	return;
755  
756  out_sigsegv:
757  	force_sig(SIGSEGV);
758  	goto out;
759  }
760  
do_watch(struct pt_regs * regs)761  asmlinkage void noinstr do_watch(struct pt_regs *regs)
762  {
763  	irqentry_state_t state = irqentry_enter(regs);
764  
765  #ifndef CONFIG_HAVE_HW_BREAKPOINT
766  	pr_warn("Hardware watch point handler not implemented!\n");
767  #else
768  	if (kgdb_breakpoint_handler(regs))
769  		goto out;
770  
771  	if (test_tsk_thread_flag(current, TIF_SINGLESTEP)) {
772  		int llbit = (csr_read32(LOONGARCH_CSR_LLBCTL) & 0x1);
773  		unsigned long pc = instruction_pointer(regs);
774  		union loongarch_instruction *ip = (union loongarch_instruction *)pc;
775  
776  		if (llbit) {
777  			/*
778  			 * When the ll-sc combo is encountered, it is regarded as an single
779  			 * instruction. So don't clear llbit and reset CSR.FWPS.Skip until
780  			 * the llsc execution is completed.
781  			 */
782  			csr_write32(CSR_FWPC_SKIP, LOONGARCH_CSR_FWPS);
783  			csr_write32(CSR_LLBCTL_KLO, LOONGARCH_CSR_LLBCTL);
784  			goto out;
785  		}
786  
787  		if (pc == current->thread.single_step) {
788  			/*
789  			 * Certain insns are occasionally not skipped when CSR.FWPS.Skip is
790  			 * set, such as fld.d/fst.d. So singlestep needs to compare whether
791  			 * the csr_era is equal to the value of singlestep which last time set.
792  			 */
793  			if (!is_self_loop_ins(ip, regs)) {
794  				/*
795  				 * Check if the given instruction the target pc is equal to the
796  				 * current pc, If yes, then we should not set the CSR.FWPS.SKIP
797  				 * bit to break the original instruction stream.
798  				 */
799  				csr_write32(CSR_FWPC_SKIP, LOONGARCH_CSR_FWPS);
800  				goto out;
801  			}
802  		}
803  	} else {
804  		breakpoint_handler(regs);
805  		watchpoint_handler(regs);
806  	}
807  
808  	force_sig(SIGTRAP);
809  out:
810  #endif
811  	irqentry_exit(regs, state);
812  }
813  
do_ri(struct pt_regs * regs)814  asmlinkage void noinstr do_ri(struct pt_regs *regs)
815  {
816  	int status = SIGILL;
817  	unsigned int __maybe_unused opcode;
818  	unsigned int __user *era = (unsigned int __user *)exception_era(regs);
819  	irqentry_state_t state = irqentry_enter(regs);
820  
821  	local_irq_enable();
822  	current->thread.trap_nr = read_csr_excode();
823  
824  	if (notify_die(DIE_RI, "RI Fault", regs, 0, current->thread.trap_nr,
825  		       SIGILL) == NOTIFY_STOP)
826  		goto out;
827  
828  	die_if_kernel("Reserved instruction in kernel code", regs);
829  
830  	if (unlikely(get_user(opcode, era) < 0)) {
831  		status = SIGSEGV;
832  		current->thread.error_code = 1;
833  	}
834  
835  	force_sig(status);
836  
837  out:
838  	local_irq_disable();
839  	irqentry_exit(regs, state);
840  }
841  
init_restore_fp(void)842  static void init_restore_fp(void)
843  {
844  	if (!used_math()) {
845  		/* First time FP context user. */
846  		init_fpu();
847  	} else {
848  		/* This task has formerly used the FP context */
849  		if (!is_fpu_owner())
850  			own_fpu_inatomic(1);
851  	}
852  
853  	BUG_ON(!is_fp_enabled());
854  }
855  
init_restore_lsx(void)856  static void init_restore_lsx(void)
857  {
858  	enable_lsx();
859  
860  	if (!thread_lsx_context_live()) {
861  		/* First time LSX context user */
862  		init_restore_fp();
863  		init_lsx_upper();
864  		set_thread_flag(TIF_LSX_CTX_LIVE);
865  	} else {
866  		if (!is_simd_owner()) {
867  			if (is_fpu_owner()) {
868  				restore_lsx_upper(current);
869  			} else {
870  				__own_fpu();
871  				restore_lsx(current);
872  			}
873  		}
874  	}
875  
876  	set_thread_flag(TIF_USEDSIMD);
877  
878  	BUG_ON(!is_fp_enabled());
879  	BUG_ON(!is_lsx_enabled());
880  }
881  
init_restore_lasx(void)882  static void init_restore_lasx(void)
883  {
884  	enable_lasx();
885  
886  	if (!thread_lasx_context_live()) {
887  		/* First time LASX context user */
888  		init_restore_lsx();
889  		init_lasx_upper();
890  		set_thread_flag(TIF_LASX_CTX_LIVE);
891  	} else {
892  		if (is_fpu_owner() || is_simd_owner()) {
893  			init_restore_lsx();
894  			restore_lasx_upper(current);
895  		} else {
896  			__own_fpu();
897  			enable_lsx();
898  			restore_lasx(current);
899  		}
900  	}
901  
902  	set_thread_flag(TIF_USEDSIMD);
903  
904  	BUG_ON(!is_fp_enabled());
905  	BUG_ON(!is_lsx_enabled());
906  	BUG_ON(!is_lasx_enabled());
907  }
908  
do_fpu(struct pt_regs * regs)909  asmlinkage void noinstr do_fpu(struct pt_regs *regs)
910  {
911  	irqentry_state_t state = irqentry_enter(regs);
912  
913  	local_irq_enable();
914  	die_if_kernel("do_fpu invoked from kernel context!", regs);
915  	BUG_ON(is_lsx_enabled());
916  	BUG_ON(is_lasx_enabled());
917  
918  	preempt_disable();
919  	init_restore_fp();
920  	preempt_enable();
921  
922  	local_irq_disable();
923  	irqentry_exit(regs, state);
924  }
925  
do_lsx(struct pt_regs * regs)926  asmlinkage void noinstr do_lsx(struct pt_regs *regs)
927  {
928  	irqentry_state_t state = irqentry_enter(regs);
929  
930  	local_irq_enable();
931  	if (!cpu_has_lsx) {
932  		force_sig(SIGILL);
933  		goto out;
934  	}
935  
936  	die_if_kernel("do_lsx invoked from kernel context!", regs);
937  	BUG_ON(is_lasx_enabled());
938  
939  	preempt_disable();
940  	init_restore_lsx();
941  	preempt_enable();
942  
943  out:
944  	local_irq_disable();
945  	irqentry_exit(regs, state);
946  }
947  
do_lasx(struct pt_regs * regs)948  asmlinkage void noinstr do_lasx(struct pt_regs *regs)
949  {
950  	irqentry_state_t state = irqentry_enter(regs);
951  
952  	local_irq_enable();
953  	if (!cpu_has_lasx) {
954  		force_sig(SIGILL);
955  		goto out;
956  	}
957  
958  	die_if_kernel("do_lasx invoked from kernel context!", regs);
959  
960  	preempt_disable();
961  	init_restore_lasx();
962  	preempt_enable();
963  
964  out:
965  	local_irq_disable();
966  	irqentry_exit(regs, state);
967  }
968  
init_restore_lbt(void)969  static void init_restore_lbt(void)
970  {
971  	if (!thread_lbt_context_live()) {
972  		/* First time LBT context user */
973  		init_lbt();
974  		set_thread_flag(TIF_LBT_CTX_LIVE);
975  	} else {
976  		if (!is_lbt_owner())
977  			own_lbt_inatomic(1);
978  	}
979  
980  	BUG_ON(!is_lbt_enabled());
981  }
982  
do_lbt(struct pt_regs * regs)983  asmlinkage void noinstr do_lbt(struct pt_regs *regs)
984  {
985  	irqentry_state_t state = irqentry_enter(regs);
986  
987  	/*
988  	 * BTD (Binary Translation Disable exception) can be triggered
989  	 * during FP save/restore if TM (Top Mode) is on, which may
990  	 * cause irq_enable during 'switch_to'. To avoid this situation
991  	 * (including the user using 'MOVGR2GCSR' to turn on TM, which
992  	 * will not trigger the BTE), we need to check PRMD first.
993  	 */
994  	if (regs->csr_prmd & CSR_PRMD_PIE)
995  		local_irq_enable();
996  
997  	if (!cpu_has_lbt) {
998  		force_sig(SIGILL);
999  		goto out;
1000  	}
1001  	BUG_ON(is_lbt_enabled());
1002  
1003  	preempt_disable();
1004  	init_restore_lbt();
1005  	preempt_enable();
1006  
1007  out:
1008  	if (regs->csr_prmd & CSR_PRMD_PIE)
1009  		local_irq_disable();
1010  
1011  	irqentry_exit(regs, state);
1012  }
1013  
do_reserved(struct pt_regs * regs)1014  asmlinkage void noinstr do_reserved(struct pt_regs *regs)
1015  {
1016  	irqentry_state_t state = irqentry_enter(regs);
1017  
1018  	local_irq_enable();
1019  	/*
1020  	 * Game over - no way to handle this if it ever occurs.	Most probably
1021  	 * caused by a fatal error after another hardware/software error.
1022  	 */
1023  	pr_err("Caught reserved exception %u on pid:%d [%s] - should not happen\n",
1024  		read_csr_excode(), current->pid, current->comm);
1025  	die_if_kernel("do_reserved exception", regs);
1026  	force_sig(SIGUNUSED);
1027  
1028  	local_irq_disable();
1029  
1030  	irqentry_exit(regs, state);
1031  }
1032  
cache_parity_error(void)1033  asmlinkage void cache_parity_error(void)
1034  {
1035  	/* For the moment, report the problem and hang. */
1036  	pr_err("Cache error exception:\n");
1037  	pr_err("csr_merrctl == %08x\n", csr_read32(LOONGARCH_CSR_MERRCTL));
1038  	pr_err("csr_merrera == %016lx\n", csr_read64(LOONGARCH_CSR_MERRERA));
1039  	panic("Can't handle the cache error!");
1040  }
1041  
handle_loongarch_irq(struct pt_regs * regs)1042  asmlinkage void noinstr handle_loongarch_irq(struct pt_regs *regs)
1043  {
1044  	struct pt_regs *old_regs;
1045  
1046  	irq_enter_rcu();
1047  	old_regs = set_irq_regs(regs);
1048  	handle_arch_irq(regs);
1049  	set_irq_regs(old_regs);
1050  	irq_exit_rcu();
1051  }
1052  
do_vint(struct pt_regs * regs,unsigned long sp)1053  asmlinkage void noinstr do_vint(struct pt_regs *regs, unsigned long sp)
1054  {
1055  	register int cpu;
1056  	register unsigned long stack;
1057  	irqentry_state_t state = irqentry_enter(regs);
1058  
1059  	cpu = smp_processor_id();
1060  
1061  	if (on_irq_stack(cpu, sp))
1062  		handle_loongarch_irq(regs);
1063  	else {
1064  		stack = per_cpu(irq_stack, cpu) + IRQ_STACK_START;
1065  
1066  		/* Save task's sp on IRQ stack for unwinding */
1067  		*(unsigned long *)stack = sp;
1068  
1069  		__asm__ __volatile__(
1070  		"move	$s0, $sp		\n" /* Preserve sp */
1071  		"move	$sp, %[stk]		\n" /* Switch stack */
1072  		"move	$a0, %[regs]		\n"
1073  		"bl	handle_loongarch_irq	\n"
1074  		"move	$sp, $s0		\n" /* Restore sp */
1075  		: /* No outputs */
1076  		: [stk] "r" (stack), [regs] "r" (regs)
1077  		: "$a0", "$a1", "$a2", "$a3", "$a4", "$a5", "$a6", "$a7", "$s0",
1078  		  "$t0", "$t1", "$t2", "$t3", "$t4", "$t5", "$t6", "$t7", "$t8",
1079  		  "memory");
1080  	}
1081  
1082  	irqentry_exit(regs, state);
1083  }
1084  
1085  unsigned long eentry;
1086  unsigned long tlbrentry;
1087  
1088  long exception_handlers[VECSIZE * 128 / sizeof(long)] __aligned(SZ_64K);
1089  
configure_exception_vector(void)1090  static void configure_exception_vector(void)
1091  {
1092  	eentry    = (unsigned long)exception_handlers;
1093  	tlbrentry = (unsigned long)exception_handlers + 80*VECSIZE;
1094  
1095  	csr_write64(eentry, LOONGARCH_CSR_EENTRY);
1096  	csr_write64(eentry, LOONGARCH_CSR_MERRENTRY);
1097  	csr_write64(tlbrentry, LOONGARCH_CSR_TLBRENTRY);
1098  }
1099  
per_cpu_trap_init(int cpu)1100  void per_cpu_trap_init(int cpu)
1101  {
1102  	unsigned int i;
1103  
1104  	setup_vint_size(VECSIZE);
1105  
1106  	configure_exception_vector();
1107  
1108  	if (!cpu_data[cpu].asid_cache)
1109  		cpu_data[cpu].asid_cache = asid_first_version(cpu);
1110  
1111  	mmgrab(&init_mm);
1112  	current->active_mm = &init_mm;
1113  	BUG_ON(current->mm);
1114  	enter_lazy_tlb(&init_mm, current);
1115  
1116  	/* Initialise exception handlers */
1117  	if (cpu == 0)
1118  		for (i = 0; i < 64; i++)
1119  			set_handler(i * VECSIZE, handle_reserved, VECSIZE);
1120  
1121  	tlb_init(cpu);
1122  	cpu_cache_init();
1123  }
1124  
1125  /* Install CPU exception handler */
set_handler(unsigned long offset,void * addr,unsigned long size)1126  void set_handler(unsigned long offset, void *addr, unsigned long size)
1127  {
1128  	memcpy((void *)(eentry + offset), addr, size);
1129  	local_flush_icache_range(eentry + offset, eentry + offset + size);
1130  }
1131  
1132  static const char panic_null_cerr[] =
1133  	"Trying to set NULL cache error exception handler\n";
1134  
1135  /*
1136   * Install uncached CPU exception handler.
1137   * This is suitable only for the cache error exception which is the only
1138   * exception handler that is being run uncached.
1139   */
set_merr_handler(unsigned long offset,void * addr,unsigned long size)1140  void set_merr_handler(unsigned long offset, void *addr, unsigned long size)
1141  {
1142  	unsigned long uncached_eentry = TO_UNCACHE(__pa(eentry));
1143  
1144  	if (!addr)
1145  		panic(panic_null_cerr);
1146  
1147  	memcpy((void *)(uncached_eentry + offset), addr, size);
1148  }
1149  
trap_init(void)1150  void __init trap_init(void)
1151  {
1152  	long i;
1153  
1154  	/* Set interrupt vector handler */
1155  	for (i = EXCCODE_INT_START; i <= EXCCODE_INT_END; i++)
1156  		set_handler(i * VECSIZE, handle_vint, VECSIZE);
1157  
1158  	set_handler(EXCCODE_ADE * VECSIZE, handle_ade, VECSIZE);
1159  	set_handler(EXCCODE_ALE * VECSIZE, handle_ale, VECSIZE);
1160  	set_handler(EXCCODE_BCE * VECSIZE, handle_bce, VECSIZE);
1161  	set_handler(EXCCODE_SYS * VECSIZE, handle_sys, VECSIZE);
1162  	set_handler(EXCCODE_BP * VECSIZE, handle_bp, VECSIZE);
1163  	set_handler(EXCCODE_INE * VECSIZE, handle_ri, VECSIZE);
1164  	set_handler(EXCCODE_IPE * VECSIZE, handle_ri, VECSIZE);
1165  	set_handler(EXCCODE_FPDIS * VECSIZE, handle_fpu, VECSIZE);
1166  	set_handler(EXCCODE_LSXDIS * VECSIZE, handle_lsx, VECSIZE);
1167  	set_handler(EXCCODE_LASXDIS * VECSIZE, handle_lasx, VECSIZE);
1168  	set_handler(EXCCODE_FPE * VECSIZE, handle_fpe, VECSIZE);
1169  	set_handler(EXCCODE_BTDIS * VECSIZE, handle_lbt, VECSIZE);
1170  	set_handler(EXCCODE_WATCH * VECSIZE, handle_watch, VECSIZE);
1171  
1172  	cache_error_setup();
1173  
1174  	local_flush_icache_range(eentry, eentry + 0x400);
1175  }
1176