xref: /openbmc/linux/arch/loongarch/kernel/traps.c (revision 8957261c)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Author: Huacai Chen <chenhuacai@loongson.cn>
4  * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
5  */
6 #include <linux/bitfield.h>
7 #include <linux/bitops.h>
8 #include <linux/bug.h>
9 #include <linux/compiler.h>
10 #include <linux/context_tracking.h>
11 #include <linux/entry-common.h>
12 #include <linux/init.h>
13 #include <linux/kernel.h>
14 #include <linux/kexec.h>
15 #include <linux/module.h>
16 #include <linux/extable.h>
17 #include <linux/mm.h>
18 #include <linux/sched/mm.h>
19 #include <linux/sched/debug.h>
20 #include <linux/smp.h>
21 #include <linux/spinlock.h>
22 #include <linux/kallsyms.h>
23 #include <linux/memblock.h>
24 #include <linux/interrupt.h>
25 #include <linux/ptrace.h>
26 #include <linux/kgdb.h>
27 #include <linux/kdebug.h>
28 #include <linux/kprobes.h>
29 #include <linux/notifier.h>
30 #include <linux/irq.h>
31 #include <linux/perf_event.h>
32 
33 #include <asm/addrspace.h>
34 #include <asm/bootinfo.h>
35 #include <asm/branch.h>
36 #include <asm/break.h>
37 #include <asm/cpu.h>
38 #include <asm/fpu.h>
39 #include <asm/lbt.h>
40 #include <asm/inst.h>
41 #include <asm/kgdb.h>
42 #include <asm/loongarch.h>
43 #include <asm/mmu_context.h>
44 #include <asm/pgtable.h>
45 #include <asm/ptrace.h>
46 #include <asm/sections.h>
47 #include <asm/siginfo.h>
48 #include <asm/stacktrace.h>
49 #include <asm/tlb.h>
50 #include <asm/types.h>
51 #include <asm/unwind.h>
52 #include <asm/uprobes.h>
53 
54 #include "access-helper.h"
55 
56 extern asmlinkage void handle_ade(void);
57 extern asmlinkage void handle_ale(void);
58 extern asmlinkage void handle_bce(void);
59 extern asmlinkage void handle_sys(void);
60 extern asmlinkage void handle_bp(void);
61 extern asmlinkage void handle_ri(void);
62 extern asmlinkage void handle_fpu(void);
63 extern asmlinkage void handle_fpe(void);
64 extern asmlinkage void handle_lbt(void);
65 extern asmlinkage void handle_lsx(void);
66 extern asmlinkage void handle_lasx(void);
67 extern asmlinkage void handle_reserved(void);
68 extern asmlinkage void handle_watch(void);
69 extern asmlinkage void handle_vint(void);
70 
71 static void show_backtrace(struct task_struct *task, const struct pt_regs *regs,
72 			   const char *loglvl, bool user)
73 {
74 	unsigned long addr;
75 	struct unwind_state state;
76 	struct pt_regs *pregs = (struct pt_regs *)regs;
77 
78 	if (!task)
79 		task = current;
80 
81 	printk("%sCall Trace:", loglvl);
82 	for (unwind_start(&state, task, pregs);
83 	      !unwind_done(&state); unwind_next_frame(&state)) {
84 		addr = unwind_get_return_address(&state);
85 		print_ip_sym(loglvl, addr);
86 	}
87 	printk("%s\n", loglvl);
88 }
89 
90 static void show_stacktrace(struct task_struct *task,
91 	const struct pt_regs *regs, const char *loglvl, bool user)
92 {
93 	int i;
94 	const int field = 2 * sizeof(unsigned long);
95 	unsigned long stackdata;
96 	unsigned long *sp = (unsigned long *)regs->regs[3];
97 
98 	printk("%sStack :", loglvl);
99 	i = 0;
100 	while ((unsigned long) sp & (PAGE_SIZE - 1)) {
101 		if (i && ((i % (64 / field)) == 0)) {
102 			pr_cont("\n");
103 			printk("%s       ", loglvl);
104 		}
105 		if (i > 39) {
106 			pr_cont(" ...");
107 			break;
108 		}
109 
110 		if (__get_addr(&stackdata, sp++, user)) {
111 			pr_cont(" (Bad stack address)");
112 			break;
113 		}
114 
115 		pr_cont(" %0*lx", field, stackdata);
116 		i++;
117 	}
118 	pr_cont("\n");
119 	show_backtrace(task, regs, loglvl, user);
120 }
121 
122 void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl)
123 {
124 	struct pt_regs regs;
125 
126 	regs.csr_crmd = 0;
127 	if (sp) {
128 		regs.csr_era = 0;
129 		regs.regs[1] = 0;
130 		regs.regs[3] = (unsigned long)sp;
131 	} else {
132 		if (!task || task == current)
133 			prepare_frametrace(&regs);
134 		else {
135 			regs.csr_era = task->thread.reg01;
136 			regs.regs[1] = 0;
137 			regs.regs[3] = task->thread.reg03;
138 			regs.regs[22] = task->thread.reg22;
139 		}
140 	}
141 
142 	show_stacktrace(task, &regs, loglvl, false);
143 }
144 
145 static void show_code(unsigned int *pc, bool user)
146 {
147 	long i;
148 	unsigned int insn;
149 
150 	printk("Code:");
151 
152 	for(i = -3 ; i < 6 ; i++) {
153 		if (__get_inst(&insn, pc + i, user)) {
154 			pr_cont(" (Bad address in era)\n");
155 			break;
156 		}
157 		pr_cont("%c%08x%c", (i?' ':'<'), insn, (i?' ':'>'));
158 	}
159 	pr_cont("\n");
160 }
161 
162 static void print_bool_fragment(const char *key, unsigned long val, bool first)
163 {
164 	/* e.g. "+PG", "-DA" */
165 	pr_cont("%s%c%s", first ? "" : " ", val ? '+' : '-', key);
166 }
167 
168 static void print_plv_fragment(const char *key, int val)
169 {
170 	/* e.g. "PLV0", "PPLV3" */
171 	pr_cont("%s%d", key, val);
172 }
173 
174 static void print_memory_type_fragment(const char *key, unsigned long val)
175 {
176 	const char *humanized_type;
177 
178 	switch (val) {
179 	case 0:
180 		humanized_type = "SUC";
181 		break;
182 	case 1:
183 		humanized_type = "CC";
184 		break;
185 	case 2:
186 		humanized_type = "WUC";
187 		break;
188 	default:
189 		pr_cont(" %s=Reserved(%lu)", key, val);
190 		return;
191 	}
192 
193 	/* e.g. " DATM=WUC" */
194 	pr_cont(" %s=%s", key, humanized_type);
195 }
196 
197 static void print_intr_fragment(const char *key, unsigned long val)
198 {
199 	/* e.g. "LIE=0-1,3,5-7" */
200 	pr_cont("%s=%*pbl", key, EXCCODE_INT_NUM, &val);
201 }
202 
203 static void print_crmd(unsigned long x)
204 {
205 	printk(" CRMD: %08lx (", x);
206 	print_plv_fragment("PLV", (int) FIELD_GET(CSR_CRMD_PLV, x));
207 	print_bool_fragment("IE", FIELD_GET(CSR_CRMD_IE, x), false);
208 	print_bool_fragment("DA", FIELD_GET(CSR_CRMD_DA, x), false);
209 	print_bool_fragment("PG", FIELD_GET(CSR_CRMD_PG, x), false);
210 	print_memory_type_fragment("DACF", FIELD_GET(CSR_CRMD_DACF, x));
211 	print_memory_type_fragment("DACM", FIELD_GET(CSR_CRMD_DACM, x));
212 	print_bool_fragment("WE", FIELD_GET(CSR_CRMD_WE, x), false);
213 	pr_cont(")\n");
214 }
215 
216 static void print_prmd(unsigned long x)
217 {
218 	printk(" PRMD: %08lx (", x);
219 	print_plv_fragment("PPLV", (int) FIELD_GET(CSR_PRMD_PPLV, x));
220 	print_bool_fragment("PIE", FIELD_GET(CSR_PRMD_PIE, x), false);
221 	print_bool_fragment("PWE", FIELD_GET(CSR_PRMD_PWE, x), false);
222 	pr_cont(")\n");
223 }
224 
225 static void print_euen(unsigned long x)
226 {
227 	printk(" EUEN: %08lx (", x);
228 	print_bool_fragment("FPE", FIELD_GET(CSR_EUEN_FPEN, x), true);
229 	print_bool_fragment("SXE", FIELD_GET(CSR_EUEN_LSXEN, x), false);
230 	print_bool_fragment("ASXE", FIELD_GET(CSR_EUEN_LASXEN, x), false);
231 	print_bool_fragment("BTE", FIELD_GET(CSR_EUEN_LBTEN, x), false);
232 	pr_cont(")\n");
233 }
234 
235 static void print_ecfg(unsigned long x)
236 {
237 	printk(" ECFG: %08lx (", x);
238 	print_intr_fragment("LIE", FIELD_GET(CSR_ECFG_IM, x));
239 	pr_cont(" VS=%d)\n", (int) FIELD_GET(CSR_ECFG_VS, x));
240 }
241 
242 static const char *humanize_exc_name(unsigned int ecode, unsigned int esubcode)
243 {
244 	/*
245 	 * LoongArch users and developers are probably more familiar with
246 	 * those names found in the ISA manual, so we are going to print out
247 	 * the latter. This will require some mapping.
248 	 */
249 	switch (ecode) {
250 	case EXCCODE_RSV: return "INT";
251 	case EXCCODE_TLBL: return "PIL";
252 	case EXCCODE_TLBS: return "PIS";
253 	case EXCCODE_TLBI: return "PIF";
254 	case EXCCODE_TLBM: return "PME";
255 	case EXCCODE_TLBNR: return "PNR";
256 	case EXCCODE_TLBNX: return "PNX";
257 	case EXCCODE_TLBPE: return "PPI";
258 	case EXCCODE_ADE:
259 		switch (esubcode) {
260 		case EXSUBCODE_ADEF: return "ADEF";
261 		case EXSUBCODE_ADEM: return "ADEM";
262 		}
263 		break;
264 	case EXCCODE_ALE: return "ALE";
265 	case EXCCODE_BCE: return "BCE";
266 	case EXCCODE_SYS: return "SYS";
267 	case EXCCODE_BP: return "BRK";
268 	case EXCCODE_INE: return "INE";
269 	case EXCCODE_IPE: return "IPE";
270 	case EXCCODE_FPDIS: return "FPD";
271 	case EXCCODE_LSXDIS: return "SXD";
272 	case EXCCODE_LASXDIS: return "ASXD";
273 	case EXCCODE_FPE:
274 		switch (esubcode) {
275 		case EXCSUBCODE_FPE: return "FPE";
276 		case EXCSUBCODE_VFPE: return "VFPE";
277 		}
278 		break;
279 	case EXCCODE_WATCH:
280 		switch (esubcode) {
281 		case EXCSUBCODE_WPEF: return "WPEF";
282 		case EXCSUBCODE_WPEM: return "WPEM";
283 		}
284 		break;
285 	case EXCCODE_BTDIS: return "BTD";
286 	case EXCCODE_BTE: return "BTE";
287 	case EXCCODE_GSPR: return "GSPR";
288 	case EXCCODE_HVC: return "HVC";
289 	case EXCCODE_GCM:
290 		switch (esubcode) {
291 		case EXCSUBCODE_GCSC: return "GCSC";
292 		case EXCSUBCODE_GCHC: return "GCHC";
293 		}
294 		break;
295 	/*
296 	 * The manual did not mention the EXCCODE_SE case, but print out it
297 	 * nevertheless.
298 	 */
299 	case EXCCODE_SE: return "SE";
300 	}
301 
302 	return "???";
303 }
304 
305 static void print_estat(unsigned long x)
306 {
307 	unsigned int ecode = FIELD_GET(CSR_ESTAT_EXC, x);
308 	unsigned int esubcode = FIELD_GET(CSR_ESTAT_ESUBCODE, x);
309 
310 	printk("ESTAT: %08lx [%s] (", x, humanize_exc_name(ecode, esubcode));
311 	print_intr_fragment("IS", FIELD_GET(CSR_ESTAT_IS, x));
312 	pr_cont(" ECode=%d EsubCode=%d)\n", (int) ecode, (int) esubcode);
313 }
314 
315 static void __show_regs(const struct pt_regs *regs)
316 {
317 	const int field = 2 * sizeof(unsigned long);
318 	unsigned int exccode = FIELD_GET(CSR_ESTAT_EXC, regs->csr_estat);
319 
320 	show_regs_print_info(KERN_DEFAULT);
321 
322 	/* Print saved GPRs except $zero (substituting with PC/ERA) */
323 #define GPR_FIELD(x) field, regs->regs[x]
324 	printk("pc %0*lx ra %0*lx tp %0*lx sp %0*lx\n",
325 	       field, regs->csr_era, GPR_FIELD(1), GPR_FIELD(2), GPR_FIELD(3));
326 	printk("a0 %0*lx a1 %0*lx a2 %0*lx a3 %0*lx\n",
327 	       GPR_FIELD(4), GPR_FIELD(5), GPR_FIELD(6), GPR_FIELD(7));
328 	printk("a4 %0*lx a5 %0*lx a6 %0*lx a7 %0*lx\n",
329 	       GPR_FIELD(8), GPR_FIELD(9), GPR_FIELD(10), GPR_FIELD(11));
330 	printk("t0 %0*lx t1 %0*lx t2 %0*lx t3 %0*lx\n",
331 	       GPR_FIELD(12), GPR_FIELD(13), GPR_FIELD(14), GPR_FIELD(15));
332 	printk("t4 %0*lx t5 %0*lx t6 %0*lx t7 %0*lx\n",
333 	       GPR_FIELD(16), GPR_FIELD(17), GPR_FIELD(18), GPR_FIELD(19));
334 	printk("t8 %0*lx u0 %0*lx s9 %0*lx s0 %0*lx\n",
335 	       GPR_FIELD(20), GPR_FIELD(21), GPR_FIELD(22), GPR_FIELD(23));
336 	printk("s1 %0*lx s2 %0*lx s3 %0*lx s4 %0*lx\n",
337 	       GPR_FIELD(24), GPR_FIELD(25), GPR_FIELD(26), GPR_FIELD(27));
338 	printk("s5 %0*lx s6 %0*lx s7 %0*lx s8 %0*lx\n",
339 	       GPR_FIELD(28), GPR_FIELD(29), GPR_FIELD(30), GPR_FIELD(31));
340 
341 	/* The slot for $zero is reused as the syscall restart flag */
342 	if (regs->regs[0])
343 		printk("syscall restart flag: %0*lx\n", GPR_FIELD(0));
344 
345 	if (user_mode(regs)) {
346 		printk("   ra: %0*lx\n", GPR_FIELD(1));
347 		printk("  ERA: %0*lx\n", field, regs->csr_era);
348 	} else {
349 		printk("   ra: %0*lx %pS\n", GPR_FIELD(1), (void *) regs->regs[1]);
350 		printk("  ERA: %0*lx %pS\n", field, regs->csr_era, (void *) regs->csr_era);
351 	}
352 #undef GPR_FIELD
353 
354 	/* Print saved important CSRs */
355 	print_crmd(regs->csr_crmd);
356 	print_prmd(regs->csr_prmd);
357 	print_euen(regs->csr_euen);
358 	print_ecfg(regs->csr_ecfg);
359 	print_estat(regs->csr_estat);
360 
361 	if (exccode >= EXCCODE_TLBL && exccode <= EXCCODE_ALE)
362 		printk(" BADV: %0*lx\n", field, regs->csr_badvaddr);
363 
364 	printk(" PRID: %08x (%s, %s)\n", read_cpucfg(LOONGARCH_CPUCFG0),
365 	       cpu_family_string(), cpu_full_name_string());
366 }
367 
368 void show_regs(struct pt_regs *regs)
369 {
370 	__show_regs((struct pt_regs *)regs);
371 	dump_stack();
372 }
373 
374 void show_registers(struct pt_regs *regs)
375 {
376 	__show_regs(regs);
377 	print_modules();
378 	printk("Process %s (pid: %d, threadinfo=%p, task=%p)\n",
379 	       current->comm, current->pid, current_thread_info(), current);
380 
381 	show_stacktrace(current, regs, KERN_DEFAULT, user_mode(regs));
382 	show_code((void *)regs->csr_era, user_mode(regs));
383 	printk("\n");
384 }
385 
386 static DEFINE_RAW_SPINLOCK(die_lock);
387 
388 void die(const char *str, struct pt_regs *regs)
389 {
390 	int ret;
391 	static int die_counter;
392 
393 	oops_enter();
394 
395 	ret = notify_die(DIE_OOPS, str, regs, 0,
396 			 current->thread.trap_nr, SIGSEGV);
397 
398 	console_verbose();
399 	raw_spin_lock_irq(&die_lock);
400 	bust_spinlocks(1);
401 
402 	printk("%s[#%d]:\n", str, ++die_counter);
403 	show_registers(regs);
404 	add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
405 	raw_spin_unlock_irq(&die_lock);
406 
407 	oops_exit();
408 
409 	if (ret == NOTIFY_STOP)
410 		return;
411 
412 	if (regs && kexec_should_crash(current))
413 		crash_kexec(regs);
414 
415 	if (in_interrupt())
416 		panic("Fatal exception in interrupt");
417 
418 	if (panic_on_oops)
419 		panic("Fatal exception");
420 
421 	make_task_dead(SIGSEGV);
422 }
423 
424 static inline void setup_vint_size(unsigned int size)
425 {
426 	unsigned int vs;
427 
428 	vs = ilog2(size/4);
429 
430 	if (vs == 0 || vs > 7)
431 		panic("vint_size %d Not support yet", vs);
432 
433 	csr_xchg32(vs<<CSR_ECFG_VS_SHIFT, CSR_ECFG_VS, LOONGARCH_CSR_ECFG);
434 }
435 
436 /*
437  * Send SIGFPE according to FCSR Cause bits, which must have already
438  * been masked against Enable bits.  This is impotant as Inexact can
439  * happen together with Overflow or Underflow, and `ptrace' can set
440  * any bits.
441  */
442 void force_fcsr_sig(unsigned long fcsr, void __user *fault_addr,
443 		     struct task_struct *tsk)
444 {
445 	int si_code = FPE_FLTUNK;
446 
447 	if (fcsr & FPU_CSR_INV_X)
448 		si_code = FPE_FLTINV;
449 	else if (fcsr & FPU_CSR_DIV_X)
450 		si_code = FPE_FLTDIV;
451 	else if (fcsr & FPU_CSR_OVF_X)
452 		si_code = FPE_FLTOVF;
453 	else if (fcsr & FPU_CSR_UDF_X)
454 		si_code = FPE_FLTUND;
455 	else if (fcsr & FPU_CSR_INE_X)
456 		si_code = FPE_FLTRES;
457 
458 	force_sig_fault(SIGFPE, si_code, fault_addr);
459 }
460 
461 int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcsr)
462 {
463 	int si_code;
464 
465 	switch (sig) {
466 	case 0:
467 		return 0;
468 
469 	case SIGFPE:
470 		force_fcsr_sig(fcsr, fault_addr, current);
471 		return 1;
472 
473 	case SIGBUS:
474 		force_sig_fault(SIGBUS, BUS_ADRERR, fault_addr);
475 		return 1;
476 
477 	case SIGSEGV:
478 		mmap_read_lock(current->mm);
479 		if (vma_lookup(current->mm, (unsigned long)fault_addr))
480 			si_code = SEGV_ACCERR;
481 		else
482 			si_code = SEGV_MAPERR;
483 		mmap_read_unlock(current->mm);
484 		force_sig_fault(SIGSEGV, si_code, fault_addr);
485 		return 1;
486 
487 	default:
488 		force_sig(sig);
489 		return 1;
490 	}
491 }
492 
493 /*
494  * Delayed fp exceptions when doing a lazy ctx switch
495  */
496 asmlinkage void noinstr do_fpe(struct pt_regs *regs, unsigned long fcsr)
497 {
498 	int sig;
499 	void __user *fault_addr;
500 	irqentry_state_t state = irqentry_enter(regs);
501 
502 	if (notify_die(DIE_FP, "FP exception", regs, 0, current->thread.trap_nr,
503 		       SIGFPE) == NOTIFY_STOP)
504 		goto out;
505 
506 	/* Clear FCSR.Cause before enabling interrupts */
507 	write_fcsr(LOONGARCH_FCSR0, fcsr & ~mask_fcsr_x(fcsr));
508 	local_irq_enable();
509 
510 	die_if_kernel("FP exception in kernel code", regs);
511 
512 	sig = SIGFPE;
513 	fault_addr = (void __user *) regs->csr_era;
514 
515 	/* Send a signal if required.  */
516 	process_fpemu_return(sig, fault_addr, fcsr);
517 
518 out:
519 	local_irq_disable();
520 	irqentry_exit(regs, state);
521 }
522 
523 asmlinkage void noinstr do_ade(struct pt_regs *regs)
524 {
525 	irqentry_state_t state = irqentry_enter(regs);
526 
527 	die_if_kernel("Kernel ade access", regs);
528 	force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)regs->csr_badvaddr);
529 
530 	irqentry_exit(regs, state);
531 }
532 
533 /* sysctl hooks */
534 int unaligned_enabled __read_mostly = 1;	/* Enabled by default */
535 int no_unaligned_warning __read_mostly = 1;	/* Only 1 warning by default */
536 
537 asmlinkage void noinstr do_ale(struct pt_regs *regs)
538 {
539 	irqentry_state_t state = irqentry_enter(regs);
540 
541 #ifndef CONFIG_ARCH_STRICT_ALIGN
542 	die_if_kernel("Kernel ale access", regs);
543 	force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)regs->csr_badvaddr);
544 #else
545 	unsigned int *pc;
546 
547 	perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, regs->csr_badvaddr);
548 
549 	/*
550 	 * Did we catch a fault trying to load an instruction?
551 	 */
552 	if (regs->csr_badvaddr == regs->csr_era)
553 		goto sigbus;
554 	if (user_mode(regs) && !test_thread_flag(TIF_FIXADE))
555 		goto sigbus;
556 	if (!unaligned_enabled)
557 		goto sigbus;
558 	if (!no_unaligned_warning)
559 		show_registers(regs);
560 
561 	pc = (unsigned int *)exception_era(regs);
562 
563 	emulate_load_store_insn(regs, (void __user *)regs->csr_badvaddr, pc);
564 
565 	goto out;
566 
567 sigbus:
568 	die_if_kernel("Kernel ale access", regs);
569 	force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)regs->csr_badvaddr);
570 out:
571 #endif
572 	irqentry_exit(regs, state);
573 }
574 
575 #ifdef CONFIG_GENERIC_BUG
576 int is_valid_bugaddr(unsigned long addr)
577 {
578 	return 1;
579 }
580 #endif /* CONFIG_GENERIC_BUG */
581 
582 static void bug_handler(struct pt_regs *regs)
583 {
584 	switch (report_bug(regs->csr_era, regs)) {
585 	case BUG_TRAP_TYPE_BUG:
586 	case BUG_TRAP_TYPE_NONE:
587 		die_if_kernel("Oops - BUG", regs);
588 		force_sig(SIGTRAP);
589 		break;
590 
591 	case BUG_TRAP_TYPE_WARN:
592 		/* Skip the BUG instruction and continue */
593 		regs->csr_era += LOONGARCH_INSN_SIZE;
594 		break;
595 	}
596 }
597 
598 asmlinkage void noinstr do_bce(struct pt_regs *regs)
599 {
600 	bool user = user_mode(regs);
601 	unsigned long era = exception_era(regs);
602 	u64 badv = 0, lower = 0, upper = ULONG_MAX;
603 	union loongarch_instruction insn;
604 	irqentry_state_t state = irqentry_enter(regs);
605 
606 	if (regs->csr_prmd & CSR_PRMD_PIE)
607 		local_irq_enable();
608 
609 	current->thread.trap_nr = read_csr_excode();
610 
611 	die_if_kernel("Bounds check error in kernel code", regs);
612 
613 	/*
614 	 * Pull out the address that failed bounds checking, and the lower /
615 	 * upper bound, by minimally looking at the faulting instruction word
616 	 * and reading from the correct register.
617 	 */
618 	if (__get_inst(&insn.word, (u32 *)era, user))
619 		goto bad_era;
620 
621 	switch (insn.reg3_format.opcode) {
622 	case asrtle_op:
623 		if (insn.reg3_format.rd != 0)
624 			break;	/* not asrtle */
625 		badv = regs->regs[insn.reg3_format.rj];
626 		upper = regs->regs[insn.reg3_format.rk];
627 		break;
628 
629 	case asrtgt_op:
630 		if (insn.reg3_format.rd != 0)
631 			break;	/* not asrtgt */
632 		badv = regs->regs[insn.reg3_format.rj];
633 		lower = regs->regs[insn.reg3_format.rk];
634 		break;
635 
636 	case ldleb_op:
637 	case ldleh_op:
638 	case ldlew_op:
639 	case ldled_op:
640 	case stleb_op:
641 	case stleh_op:
642 	case stlew_op:
643 	case stled_op:
644 	case fldles_op:
645 	case fldled_op:
646 	case fstles_op:
647 	case fstled_op:
648 		badv = regs->regs[insn.reg3_format.rj];
649 		upper = regs->regs[insn.reg3_format.rk];
650 		break;
651 
652 	case ldgtb_op:
653 	case ldgth_op:
654 	case ldgtw_op:
655 	case ldgtd_op:
656 	case stgtb_op:
657 	case stgth_op:
658 	case stgtw_op:
659 	case stgtd_op:
660 	case fldgts_op:
661 	case fldgtd_op:
662 	case fstgts_op:
663 	case fstgtd_op:
664 		badv = regs->regs[insn.reg3_format.rj];
665 		lower = regs->regs[insn.reg3_format.rk];
666 		break;
667 	}
668 
669 	force_sig_bnderr((void __user *)badv, (void __user *)lower, (void __user *)upper);
670 
671 out:
672 	if (regs->csr_prmd & CSR_PRMD_PIE)
673 		local_irq_disable();
674 
675 	irqentry_exit(regs, state);
676 	return;
677 
678 bad_era:
679 	/*
680 	 * Cannot pull out the instruction word, hence cannot provide more
681 	 * info than a regular SIGSEGV in this case.
682 	 */
683 	force_sig(SIGSEGV);
684 	goto out;
685 }
686 
687 asmlinkage void noinstr do_bp(struct pt_regs *regs)
688 {
689 	bool user = user_mode(regs);
690 	unsigned int opcode, bcode;
691 	unsigned long era = exception_era(regs);
692 	irqentry_state_t state = irqentry_enter(regs);
693 
694 	if (regs->csr_prmd & CSR_PRMD_PIE)
695 		local_irq_enable();
696 
697 	if (__get_inst(&opcode, (u32 *)era, user))
698 		goto out_sigsegv;
699 
700 	bcode = (opcode & 0x7fff);
701 
702 	/*
703 	 * notify the kprobe handlers, if instruction is likely to
704 	 * pertain to them.
705 	 */
706 	switch (bcode) {
707 	case BRK_KDB:
708 		if (kgdb_breakpoint_handler(regs))
709 			goto out;
710 		else
711 			break;
712 	case BRK_KPROBE_BP:
713 		if (kprobe_breakpoint_handler(regs))
714 			goto out;
715 		else
716 			break;
717 	case BRK_KPROBE_SSTEPBP:
718 		if (kprobe_singlestep_handler(regs))
719 			goto out;
720 		else
721 			break;
722 	case BRK_UPROBE_BP:
723 		if (uprobe_breakpoint_handler(regs))
724 			goto out;
725 		else
726 			break;
727 	case BRK_UPROBE_XOLBP:
728 		if (uprobe_singlestep_handler(regs))
729 			goto out;
730 		else
731 			break;
732 	default:
733 		current->thread.trap_nr = read_csr_excode();
734 		if (notify_die(DIE_TRAP, "Break", regs, bcode,
735 			       current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
736 			goto out;
737 		else
738 			break;
739 	}
740 
741 	switch (bcode) {
742 	case BRK_BUG:
743 		bug_handler(regs);
744 		break;
745 	case BRK_DIVZERO:
746 		die_if_kernel("Break instruction in kernel code", regs);
747 		force_sig_fault(SIGFPE, FPE_INTDIV, (void __user *)regs->csr_era);
748 		break;
749 	case BRK_OVERFLOW:
750 		die_if_kernel("Break instruction in kernel code", regs);
751 		force_sig_fault(SIGFPE, FPE_INTOVF, (void __user *)regs->csr_era);
752 		break;
753 	default:
754 		die_if_kernel("Break instruction in kernel code", regs);
755 		force_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)regs->csr_era);
756 		break;
757 	}
758 
759 out:
760 	if (regs->csr_prmd & CSR_PRMD_PIE)
761 		local_irq_disable();
762 
763 	irqentry_exit(regs, state);
764 	return;
765 
766 out_sigsegv:
767 	force_sig(SIGSEGV);
768 	goto out;
769 }
770 
771 asmlinkage void noinstr do_watch(struct pt_regs *regs)
772 {
773 	irqentry_state_t state = irqentry_enter(regs);
774 
775 #ifndef CONFIG_HAVE_HW_BREAKPOINT
776 	pr_warn("Hardware watch point handler not implemented!\n");
777 #else
778 	if (kgdb_breakpoint_handler(regs))
779 		goto out;
780 
781 	if (test_tsk_thread_flag(current, TIF_SINGLESTEP)) {
782 		int llbit = (csr_read32(LOONGARCH_CSR_LLBCTL) & 0x1);
783 		unsigned long pc = instruction_pointer(regs);
784 		union loongarch_instruction *ip = (union loongarch_instruction *)pc;
785 
786 		if (llbit) {
787 			/*
788 			 * When the ll-sc combo is encountered, it is regarded as an single
789 			 * instruction. So don't clear llbit and reset CSR.FWPS.Skip until
790 			 * the llsc execution is completed.
791 			 */
792 			csr_write32(CSR_FWPC_SKIP, LOONGARCH_CSR_FWPS);
793 			csr_write32(CSR_LLBCTL_KLO, LOONGARCH_CSR_LLBCTL);
794 			goto out;
795 		}
796 
797 		if (pc == current->thread.single_step) {
798 			/*
799 			 * Certain insns are occasionally not skipped when CSR.FWPS.Skip is
800 			 * set, such as fld.d/fst.d. So singlestep needs to compare whether
801 			 * the csr_era is equal to the value of singlestep which last time set.
802 			 */
803 			if (!is_self_loop_ins(ip, regs)) {
804 				/*
805 				 * Check if the given instruction the target pc is equal to the
806 				 * current pc, If yes, then we should not set the CSR.FWPS.SKIP
807 				 * bit to break the original instruction stream.
808 				 */
809 				csr_write32(CSR_FWPC_SKIP, LOONGARCH_CSR_FWPS);
810 				goto out;
811 			}
812 		}
813 	} else {
814 		breakpoint_handler(regs);
815 		watchpoint_handler(regs);
816 	}
817 
818 	force_sig(SIGTRAP);
819 out:
820 #endif
821 	irqentry_exit(regs, state);
822 }
823 
824 asmlinkage void noinstr do_ri(struct pt_regs *regs)
825 {
826 	int status = SIGILL;
827 	unsigned int opcode = 0;
828 	unsigned int __user *era = (unsigned int __user *)exception_era(regs);
829 	irqentry_state_t state = irqentry_enter(regs);
830 
831 	local_irq_enable();
832 	current->thread.trap_nr = read_csr_excode();
833 
834 	if (notify_die(DIE_RI, "RI Fault", regs, 0, current->thread.trap_nr,
835 		       SIGILL) == NOTIFY_STOP)
836 		goto out;
837 
838 	die_if_kernel("Reserved instruction in kernel code", regs);
839 
840 	if (unlikely(get_user(opcode, era) < 0)) {
841 		status = SIGSEGV;
842 		current->thread.error_code = 1;
843 	}
844 
845 	force_sig(status);
846 
847 out:
848 	local_irq_disable();
849 	irqentry_exit(regs, state);
850 }
851 
852 static void init_restore_fp(void)
853 {
854 	if (!used_math()) {
855 		/* First time FP context user. */
856 		init_fpu();
857 	} else {
858 		/* This task has formerly used the FP context */
859 		if (!is_fpu_owner())
860 			own_fpu_inatomic(1);
861 	}
862 
863 	BUG_ON(!is_fp_enabled());
864 }
865 
866 static void init_restore_lsx(void)
867 {
868 	enable_lsx();
869 
870 	if (!thread_lsx_context_live()) {
871 		/* First time LSX context user */
872 		init_restore_fp();
873 		init_lsx_upper();
874 		set_thread_flag(TIF_LSX_CTX_LIVE);
875 	} else {
876 		if (!is_simd_owner()) {
877 			if (is_fpu_owner()) {
878 				restore_lsx_upper(current);
879 			} else {
880 				__own_fpu();
881 				restore_lsx(current);
882 			}
883 		}
884 	}
885 
886 	set_thread_flag(TIF_USEDSIMD);
887 
888 	BUG_ON(!is_fp_enabled());
889 	BUG_ON(!is_lsx_enabled());
890 }
891 
892 static void init_restore_lasx(void)
893 {
894 	enable_lasx();
895 
896 	if (!thread_lasx_context_live()) {
897 		/* First time LASX context user */
898 		init_restore_lsx();
899 		init_lasx_upper();
900 		set_thread_flag(TIF_LASX_CTX_LIVE);
901 	} else {
902 		if (is_fpu_owner() || is_simd_owner()) {
903 			init_restore_lsx();
904 			restore_lasx_upper(current);
905 		} else {
906 			__own_fpu();
907 			enable_lsx();
908 			restore_lasx(current);
909 		}
910 	}
911 
912 	set_thread_flag(TIF_USEDSIMD);
913 
914 	BUG_ON(!is_fp_enabled());
915 	BUG_ON(!is_lsx_enabled());
916 	BUG_ON(!is_lasx_enabled());
917 }
918 
919 asmlinkage void noinstr do_fpu(struct pt_regs *regs)
920 {
921 	irqentry_state_t state = irqentry_enter(regs);
922 
923 	local_irq_enable();
924 	die_if_kernel("do_fpu invoked from kernel context!", regs);
925 	BUG_ON(is_lsx_enabled());
926 	BUG_ON(is_lasx_enabled());
927 
928 	preempt_disable();
929 	init_restore_fp();
930 	preempt_enable();
931 
932 	local_irq_disable();
933 	irqentry_exit(regs, state);
934 }
935 
936 asmlinkage void noinstr do_lsx(struct pt_regs *regs)
937 {
938 	irqentry_state_t state = irqentry_enter(regs);
939 
940 	local_irq_enable();
941 	if (!cpu_has_lsx) {
942 		force_sig(SIGILL);
943 		goto out;
944 	}
945 
946 	die_if_kernel("do_lsx invoked from kernel context!", regs);
947 	BUG_ON(is_lasx_enabled());
948 
949 	preempt_disable();
950 	init_restore_lsx();
951 	preempt_enable();
952 
953 out:
954 	local_irq_disable();
955 	irqentry_exit(regs, state);
956 }
957 
958 asmlinkage void noinstr do_lasx(struct pt_regs *regs)
959 {
960 	irqentry_state_t state = irqentry_enter(regs);
961 
962 	local_irq_enable();
963 	if (!cpu_has_lasx) {
964 		force_sig(SIGILL);
965 		goto out;
966 	}
967 
968 	die_if_kernel("do_lasx invoked from kernel context!", regs);
969 
970 	preempt_disable();
971 	init_restore_lasx();
972 	preempt_enable();
973 
974 out:
975 	local_irq_disable();
976 	irqentry_exit(regs, state);
977 }
978 
979 static void init_restore_lbt(void)
980 {
981 	if (!thread_lbt_context_live()) {
982 		/* First time LBT context user */
983 		init_lbt();
984 		set_thread_flag(TIF_LBT_CTX_LIVE);
985 	} else {
986 		if (!is_lbt_owner())
987 			own_lbt_inatomic(1);
988 	}
989 
990 	BUG_ON(!is_lbt_enabled());
991 }
992 
993 asmlinkage void noinstr do_lbt(struct pt_regs *regs)
994 {
995 	irqentry_state_t state = irqentry_enter(regs);
996 
997 	/*
998 	 * BTD (Binary Translation Disable exception) can be triggered
999 	 * during FP save/restore if TM (Top Mode) is on, which may
1000 	 * cause irq_enable during 'switch_to'. To avoid this situation
1001 	 * (including the user using 'MOVGR2GCSR' to turn on TM, which
1002 	 * will not trigger the BTE), we need to check PRMD first.
1003 	 */
1004 	if (regs->csr_prmd & CSR_PRMD_PIE)
1005 		local_irq_enable();
1006 
1007 	if (!cpu_has_lbt) {
1008 		force_sig(SIGILL);
1009 		goto out;
1010 	}
1011 	BUG_ON(is_lbt_enabled());
1012 
1013 	preempt_disable();
1014 	init_restore_lbt();
1015 	preempt_enable();
1016 
1017 out:
1018 	if (regs->csr_prmd & CSR_PRMD_PIE)
1019 		local_irq_disable();
1020 
1021 	irqentry_exit(regs, state);
1022 }
1023 
1024 asmlinkage void noinstr do_reserved(struct pt_regs *regs)
1025 {
1026 	irqentry_state_t state = irqentry_enter(regs);
1027 
1028 	local_irq_enable();
1029 	/*
1030 	 * Game over - no way to handle this if it ever occurs.	Most probably
1031 	 * caused by a fatal error after another hardware/software error.
1032 	 */
1033 	pr_err("Caught reserved exception %u on pid:%d [%s] - should not happen\n",
1034 		read_csr_excode(), current->pid, current->comm);
1035 	die_if_kernel("do_reserved exception", regs);
1036 	force_sig(SIGUNUSED);
1037 
1038 	local_irq_disable();
1039 
1040 	irqentry_exit(regs, state);
1041 }
1042 
1043 asmlinkage void cache_parity_error(void)
1044 {
1045 	/* For the moment, report the problem and hang. */
1046 	pr_err("Cache error exception:\n");
1047 	pr_err("csr_merrctl == %08x\n", csr_read32(LOONGARCH_CSR_MERRCTL));
1048 	pr_err("csr_merrera == %016lx\n", csr_read64(LOONGARCH_CSR_MERRERA));
1049 	panic("Can't handle the cache error!");
1050 }
1051 
1052 asmlinkage void noinstr handle_loongarch_irq(struct pt_regs *regs)
1053 {
1054 	struct pt_regs *old_regs;
1055 
1056 	irq_enter_rcu();
1057 	old_regs = set_irq_regs(regs);
1058 	handle_arch_irq(regs);
1059 	set_irq_regs(old_regs);
1060 	irq_exit_rcu();
1061 }
1062 
1063 asmlinkage void noinstr do_vint(struct pt_regs *regs, unsigned long sp)
1064 {
1065 	register int cpu;
1066 	register unsigned long stack;
1067 	irqentry_state_t state = irqentry_enter(regs);
1068 
1069 	cpu = smp_processor_id();
1070 
1071 	if (on_irq_stack(cpu, sp))
1072 		handle_loongarch_irq(regs);
1073 	else {
1074 		stack = per_cpu(irq_stack, cpu) + IRQ_STACK_START;
1075 
1076 		/* Save task's sp on IRQ stack for unwinding */
1077 		*(unsigned long *)stack = sp;
1078 
1079 		__asm__ __volatile__(
1080 		"move	$s0, $sp		\n" /* Preserve sp */
1081 		"move	$sp, %[stk]		\n" /* Switch stack */
1082 		"move	$a0, %[regs]		\n"
1083 		"bl	handle_loongarch_irq	\n"
1084 		"move	$sp, $s0		\n" /* Restore sp */
1085 		: /* No outputs */
1086 		: [stk] "r" (stack), [regs] "r" (regs)
1087 		: "$a0", "$a1", "$a2", "$a3", "$a4", "$a5", "$a6", "$a7", "$s0",
1088 		  "$t0", "$t1", "$t2", "$t3", "$t4", "$t5", "$t6", "$t7", "$t8",
1089 		  "memory");
1090 	}
1091 
1092 	irqentry_exit(regs, state);
1093 }
1094 
1095 unsigned long eentry;
1096 unsigned long tlbrentry;
1097 
1098 long exception_handlers[VECSIZE * 128 / sizeof(long)] __aligned(SZ_64K);
1099 
1100 static void configure_exception_vector(void)
1101 {
1102 	eentry    = (unsigned long)exception_handlers;
1103 	tlbrentry = (unsigned long)exception_handlers + 80*VECSIZE;
1104 
1105 	csr_write64(eentry, LOONGARCH_CSR_EENTRY);
1106 	csr_write64(eentry, LOONGARCH_CSR_MERRENTRY);
1107 	csr_write64(tlbrentry, LOONGARCH_CSR_TLBRENTRY);
1108 }
1109 
1110 void per_cpu_trap_init(int cpu)
1111 {
1112 	unsigned int i;
1113 
1114 	setup_vint_size(VECSIZE);
1115 
1116 	configure_exception_vector();
1117 
1118 	if (!cpu_data[cpu].asid_cache)
1119 		cpu_data[cpu].asid_cache = asid_first_version(cpu);
1120 
1121 	mmgrab(&init_mm);
1122 	current->active_mm = &init_mm;
1123 	BUG_ON(current->mm);
1124 	enter_lazy_tlb(&init_mm, current);
1125 
1126 	/* Initialise exception handlers */
1127 	if (cpu == 0)
1128 		for (i = 0; i < 64; i++)
1129 			set_handler(i * VECSIZE, handle_reserved, VECSIZE);
1130 
1131 	tlb_init(cpu);
1132 	cpu_cache_init();
1133 }
1134 
1135 /* Install CPU exception handler */
1136 void set_handler(unsigned long offset, void *addr, unsigned long size)
1137 {
1138 	memcpy((void *)(eentry + offset), addr, size);
1139 	local_flush_icache_range(eentry + offset, eentry + offset + size);
1140 }
1141 
1142 static const char panic_null_cerr[] =
1143 	"Trying to set NULL cache error exception handler\n";
1144 
1145 /*
1146  * Install uncached CPU exception handler.
1147  * This is suitable only for the cache error exception which is the only
1148  * exception handler that is being run uncached.
1149  */
1150 void set_merr_handler(unsigned long offset, void *addr, unsigned long size)
1151 {
1152 	unsigned long uncached_eentry = TO_UNCACHE(__pa(eentry));
1153 
1154 	if (!addr)
1155 		panic(panic_null_cerr);
1156 
1157 	memcpy((void *)(uncached_eentry + offset), addr, size);
1158 }
1159 
1160 void __init trap_init(void)
1161 {
1162 	long i;
1163 
1164 	/* Set interrupt vector handler */
1165 	for (i = EXCCODE_INT_START; i <= EXCCODE_INT_END; i++)
1166 		set_handler(i * VECSIZE, handle_vint, VECSIZE);
1167 
1168 	set_handler(EXCCODE_ADE * VECSIZE, handle_ade, VECSIZE);
1169 	set_handler(EXCCODE_ALE * VECSIZE, handle_ale, VECSIZE);
1170 	set_handler(EXCCODE_BCE * VECSIZE, handle_bce, VECSIZE);
1171 	set_handler(EXCCODE_SYS * VECSIZE, handle_sys, VECSIZE);
1172 	set_handler(EXCCODE_BP * VECSIZE, handle_bp, VECSIZE);
1173 	set_handler(EXCCODE_INE * VECSIZE, handle_ri, VECSIZE);
1174 	set_handler(EXCCODE_IPE * VECSIZE, handle_ri, VECSIZE);
1175 	set_handler(EXCCODE_FPDIS * VECSIZE, handle_fpu, VECSIZE);
1176 	set_handler(EXCCODE_LSXDIS * VECSIZE, handle_lsx, VECSIZE);
1177 	set_handler(EXCCODE_LASXDIS * VECSIZE, handle_lasx, VECSIZE);
1178 	set_handler(EXCCODE_FPE * VECSIZE, handle_fpe, VECSIZE);
1179 	set_handler(EXCCODE_BTDIS * VECSIZE, handle_lbt, VECSIZE);
1180 	set_handler(EXCCODE_WATCH * VECSIZE, handle_watch, VECSIZE);
1181 
1182 	cache_error_setup();
1183 
1184 	local_flush_icache_range(eentry, eentry + 0x400);
1185 }
1186