xref: /openbmc/linux/arch/mips/kernel/traps.c (revision b03afaa8)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1994 - 1999, 2000, 01, 06 Ralf Baechle
7  * Copyright (C) 1995, 1996 Paul M. Antoine
8  * Copyright (C) 1998 Ulf Carlsson
9  * Copyright (C) 1999 Silicon Graphics, Inc.
10  * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
11  * Copyright (C) 2002, 2003, 2004, 2005, 2007  Maciej W. Rozycki
12  * Copyright (C) 2000, 2001, 2012 MIPS Technologies, Inc.  All rights reserved.
13  * Copyright (C) 2014, Imagination Technologies Ltd.
14  */
15 #include <linux/bitops.h>
16 #include <linux/bug.h>
17 #include <linux/compiler.h>
18 #include <linux/context_tracking.h>
19 #include <linux/cpu_pm.h>
20 #include <linux/kexec.h>
21 #include <linux/init.h>
22 #include <linux/kernel.h>
23 #include <linux/module.h>
24 #include <linux/extable.h>
25 #include <linux/mm.h>
26 #include <linux/sched/mm.h>
27 #include <linux/sched/debug.h>
28 #include <linux/smp.h>
29 #include <linux/spinlock.h>
30 #include <linux/kallsyms.h>
31 #include <linux/memblock.h>
32 #include <linux/interrupt.h>
33 #include <linux/ptrace.h>
34 #include <linux/kgdb.h>
35 #include <linux/kdebug.h>
36 #include <linux/kprobes.h>
37 #include <linux/notifier.h>
38 #include <linux/kdb.h>
39 #include <linux/irq.h>
40 #include <linux/perf_event.h>
41 
42 #include <asm/addrspace.h>
43 #include <asm/bootinfo.h>
44 #include <asm/branch.h>
45 #include <asm/break.h>
46 #include <asm/cop2.h>
47 #include <asm/cpu.h>
48 #include <asm/cpu-type.h>
49 #include <asm/dsp.h>
50 #include <asm/fpu.h>
51 #include <asm/fpu_emulator.h>
52 #include <asm/idle.h>
53 #include <asm/isa-rev.h>
54 #include <asm/mips-cps.h>
55 #include <asm/mips-r2-to-r6-emul.h>
56 #include <asm/mipsregs.h>
57 #include <asm/mipsmtregs.h>
58 #include <asm/module.h>
59 #include <asm/msa.h>
60 #include <asm/ptrace.h>
61 #include <asm/sections.h>
62 #include <asm/siginfo.h>
63 #include <asm/tlbdebug.h>
64 #include <asm/traps.h>
65 #include <linux/uaccess.h>
66 #include <asm/watch.h>
67 #include <asm/mmu_context.h>
68 #include <asm/types.h>
69 #include <asm/stacktrace.h>
70 #include <asm/tlbex.h>
71 #include <asm/uasm.h>
72 
73 #include <asm/mach-loongson64/cpucfg-emul.h>
74 
75 extern void check_wait(void);
76 extern asmlinkage void rollback_handle_int(void);
77 extern asmlinkage void handle_int(void);
78 extern asmlinkage void handle_adel(void);
79 extern asmlinkage void handle_ades(void);
80 extern asmlinkage void handle_ibe(void);
81 extern asmlinkage void handle_dbe(void);
82 extern asmlinkage void handle_sys(void);
83 extern asmlinkage void handle_bp(void);
84 extern asmlinkage void handle_ri(void);
85 extern asmlinkage void handle_ri_rdhwr_tlbp(void);
86 extern asmlinkage void handle_ri_rdhwr(void);
87 extern asmlinkage void handle_cpu(void);
88 extern asmlinkage void handle_ov(void);
89 extern asmlinkage void handle_tr(void);
90 extern asmlinkage void handle_msa_fpe(void);
91 extern asmlinkage void handle_fpe(void);
92 extern asmlinkage void handle_ftlb(void);
93 extern asmlinkage void handle_msa(void);
94 extern asmlinkage void handle_mdmx(void);
95 extern asmlinkage void handle_watch(void);
96 extern asmlinkage void handle_mt(void);
97 extern asmlinkage void handle_dsp(void);
98 extern asmlinkage void handle_mcheck(void);
99 extern asmlinkage void handle_reserved(void);
100 extern void tlb_do_page_fault_0(void);
101 
102 void (*board_be_init)(void);
103 int (*board_be_handler)(struct pt_regs *regs, int is_fixup);
104 void (*board_nmi_handler_setup)(void);
105 void (*board_ejtag_handler_setup)(void);
106 void (*board_bind_eic_interrupt)(int irq, int regset);
107 void (*board_ebase_setup)(void);
108 void(*board_cache_error_setup)(void);
109 
110 static void show_raw_backtrace(unsigned long reg29, const char *loglvl)
111 {
112 	unsigned long *sp = (unsigned long *)(reg29 & ~3);
113 	unsigned long addr;
114 
115 	printk("%sCall Trace:", loglvl);
116 #ifdef CONFIG_KALLSYMS
117 	printk("%s\n", loglvl);
118 #endif
119 	while (!kstack_end(sp)) {
120 		unsigned long __user *p =
121 			(unsigned long __user *)(unsigned long)sp++;
122 		if (__get_user(addr, p)) {
123 			printk("%s (Bad stack address)", loglvl);
124 			break;
125 		}
126 		if (__kernel_text_address(addr))
127 			print_ip_sym(loglvl, addr);
128 	}
129 	printk("%s\n", loglvl);
130 }
131 
132 #ifdef CONFIG_KALLSYMS
133 int raw_show_trace;
134 static int __init set_raw_show_trace(char *str)
135 {
136 	raw_show_trace = 1;
137 	return 1;
138 }
139 __setup("raw_show_trace", set_raw_show_trace);
140 #endif
141 
142 static void show_backtrace(struct task_struct *task, const struct pt_regs *regs,
143 			   const char *loglvl)
144 {
145 	unsigned long sp = regs->regs[29];
146 	unsigned long ra = regs->regs[31];
147 	unsigned long pc = regs->cp0_epc;
148 
149 	if (!task)
150 		task = current;
151 
152 	if (raw_show_trace || user_mode(regs) || !__kernel_text_address(pc)) {
153 		show_raw_backtrace(sp, loglvl);
154 		return;
155 	}
156 	printk("%sCall Trace:\n", loglvl);
157 	do {
158 		print_ip_sym(loglvl, pc);
159 		pc = unwind_stack(task, &sp, pc, &ra);
160 	} while (pc);
161 	pr_cont("\n");
162 }
163 
164 /*
165  * This routine abuses get_user()/put_user() to reference pointers
166  * with at least a bit of error checking ...
167  */
168 static void show_stacktrace(struct task_struct *task,
169 	const struct pt_regs *regs, const char *loglvl)
170 {
171 	const int field = 2 * sizeof(unsigned long);
172 	long stackdata;
173 	int i;
174 	unsigned long __user *sp = (unsigned long __user *)regs->regs[29];
175 
176 	printk("%sStack :", loglvl);
177 	i = 0;
178 	while ((unsigned long) sp & (PAGE_SIZE - 1)) {
179 		if (i && ((i % (64 / field)) == 0)) {
180 			pr_cont("\n");
181 			printk("%s       ", loglvl);
182 		}
183 		if (i > 39) {
184 			pr_cont(" ...");
185 			break;
186 		}
187 
188 		if (__get_user(stackdata, sp++)) {
189 			pr_cont(" (Bad stack address)");
190 			break;
191 		}
192 
193 		pr_cont(" %0*lx", field, stackdata);
194 		i++;
195 	}
196 	pr_cont("\n");
197 	show_backtrace(task, regs, loglvl);
198 }
199 
200 void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl)
201 {
202 	struct pt_regs regs;
203 	mm_segment_t old_fs = get_fs();
204 
205 	regs.cp0_status = KSU_KERNEL;
206 	if (sp) {
207 		regs.regs[29] = (unsigned long)sp;
208 		regs.regs[31] = 0;
209 		regs.cp0_epc = 0;
210 	} else {
211 		if (task && task != current) {
212 			regs.regs[29] = task->thread.reg29;
213 			regs.regs[31] = 0;
214 			regs.cp0_epc = task->thread.reg31;
215 		} else {
216 			prepare_frametrace(&regs);
217 		}
218 	}
219 	/*
220 	 * show_stack() deals exclusively with kernel mode, so be sure to access
221 	 * the stack in the kernel (not user) address space.
222 	 */
223 	set_fs(KERNEL_DS);
224 	show_stacktrace(task, &regs, loglvl);
225 	set_fs(old_fs);
226 }
227 
228 static void show_code(unsigned int __user *pc)
229 {
230 	long i;
231 	unsigned short __user *pc16 = NULL;
232 
233 	printk("Code:");
234 
235 	if ((unsigned long)pc & 1)
236 		pc16 = (unsigned short __user *)((unsigned long)pc & ~1);
237 	for(i = -3 ; i < 6 ; i++) {
238 		unsigned int insn;
239 		if (pc16 ? __get_user(insn, pc16 + i) : __get_user(insn, pc + i)) {
240 			pr_cont(" (Bad address in epc)\n");
241 			break;
242 		}
243 		pr_cont("%c%0*x%c", (i?' ':'<'), pc16 ? 4 : 8, insn, (i?' ':'>'));
244 	}
245 	pr_cont("\n");
246 }
247 
248 static void __show_regs(const struct pt_regs *regs)
249 {
250 	const int field = 2 * sizeof(unsigned long);
251 	unsigned int cause = regs->cp0_cause;
252 	unsigned int exccode;
253 	int i;
254 
255 	show_regs_print_info(KERN_DEFAULT);
256 
257 	/*
258 	 * Saved main processor registers
259 	 */
260 	for (i = 0; i < 32; ) {
261 		if ((i % 4) == 0)
262 			printk("$%2d   :", i);
263 		if (i == 0)
264 			pr_cont(" %0*lx", field, 0UL);
265 		else if (i == 26 || i == 27)
266 			pr_cont(" %*s", field, "");
267 		else
268 			pr_cont(" %0*lx", field, regs->regs[i]);
269 
270 		i++;
271 		if ((i % 4) == 0)
272 			pr_cont("\n");
273 	}
274 
275 #ifdef CONFIG_CPU_HAS_SMARTMIPS
276 	printk("Acx    : %0*lx\n", field, regs->acx);
277 #endif
278 	if (MIPS_ISA_REV < 6) {
279 		printk("Hi    : %0*lx\n", field, regs->hi);
280 		printk("Lo    : %0*lx\n", field, regs->lo);
281 	}
282 
283 	/*
284 	 * Saved cp0 registers
285 	 */
286 	printk("epc   : %0*lx %pS\n", field, regs->cp0_epc,
287 	       (void *) regs->cp0_epc);
288 	printk("ra    : %0*lx %pS\n", field, regs->regs[31],
289 	       (void *) regs->regs[31]);
290 
291 	printk("Status: %08x	", (uint32_t) regs->cp0_status);
292 
293 	if (cpu_has_3kex) {
294 		if (regs->cp0_status & ST0_KUO)
295 			pr_cont("KUo ");
296 		if (regs->cp0_status & ST0_IEO)
297 			pr_cont("IEo ");
298 		if (regs->cp0_status & ST0_KUP)
299 			pr_cont("KUp ");
300 		if (regs->cp0_status & ST0_IEP)
301 			pr_cont("IEp ");
302 		if (regs->cp0_status & ST0_KUC)
303 			pr_cont("KUc ");
304 		if (regs->cp0_status & ST0_IEC)
305 			pr_cont("IEc ");
306 	} else if (cpu_has_4kex) {
307 		if (regs->cp0_status & ST0_KX)
308 			pr_cont("KX ");
309 		if (regs->cp0_status & ST0_SX)
310 			pr_cont("SX ");
311 		if (regs->cp0_status & ST0_UX)
312 			pr_cont("UX ");
313 		switch (regs->cp0_status & ST0_KSU) {
314 		case KSU_USER:
315 			pr_cont("USER ");
316 			break;
317 		case KSU_SUPERVISOR:
318 			pr_cont("SUPERVISOR ");
319 			break;
320 		case KSU_KERNEL:
321 			pr_cont("KERNEL ");
322 			break;
323 		default:
324 			pr_cont("BAD_MODE ");
325 			break;
326 		}
327 		if (regs->cp0_status & ST0_ERL)
328 			pr_cont("ERL ");
329 		if (regs->cp0_status & ST0_EXL)
330 			pr_cont("EXL ");
331 		if (regs->cp0_status & ST0_IE)
332 			pr_cont("IE ");
333 	}
334 	pr_cont("\n");
335 
336 	exccode = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE;
337 	printk("Cause : %08x (ExcCode %02x)\n", cause, exccode);
338 
339 	if (1 <= exccode && exccode <= 5)
340 		printk("BadVA : %0*lx\n", field, regs->cp0_badvaddr);
341 
342 	printk("PrId  : %08x (%s)\n", read_c0_prid(),
343 	       cpu_name_string());
344 }
345 
346 /*
347  * FIXME: really the generic show_regs should take a const pointer argument.
348  */
349 void show_regs(struct pt_regs *regs)
350 {
351 	__show_regs(regs);
352 	dump_stack();
353 }
354 
355 void show_registers(struct pt_regs *regs)
356 {
357 	const int field = 2 * sizeof(unsigned long);
358 	mm_segment_t old_fs = get_fs();
359 
360 	__show_regs(regs);
361 	print_modules();
362 	printk("Process %s (pid: %d, threadinfo=%p, task=%p, tls=%0*lx)\n",
363 	       current->comm, current->pid, current_thread_info(), current,
364 	      field, current_thread_info()->tp_value);
365 	if (cpu_has_userlocal) {
366 		unsigned long tls;
367 
368 		tls = read_c0_userlocal();
369 		if (tls != current_thread_info()->tp_value)
370 			printk("*HwTLS: %0*lx\n", field, tls);
371 	}
372 
373 	if (!user_mode(regs))
374 		/* Necessary for getting the correct stack content */
375 		set_fs(KERNEL_DS);
376 	show_stacktrace(current, regs, KERN_DEFAULT);
377 	show_code((unsigned int __user *) regs->cp0_epc);
378 	printk("\n");
379 	set_fs(old_fs);
380 }
381 
382 static DEFINE_RAW_SPINLOCK(die_lock);
383 
384 void __noreturn die(const char *str, struct pt_regs *regs)
385 {
386 	static int die_counter;
387 	int sig = SIGSEGV;
388 
389 	oops_enter();
390 
391 	if (notify_die(DIE_OOPS, str, regs, 0, current->thread.trap_nr,
392 		       SIGSEGV) == NOTIFY_STOP)
393 		sig = 0;
394 
395 	console_verbose();
396 	raw_spin_lock_irq(&die_lock);
397 	bust_spinlocks(1);
398 
399 	printk("%s[#%d]:\n", str, ++die_counter);
400 	show_registers(regs);
401 	add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
402 	raw_spin_unlock_irq(&die_lock);
403 
404 	oops_exit();
405 
406 	if (in_interrupt())
407 		panic("Fatal exception in interrupt");
408 
409 	if (panic_on_oops)
410 		panic("Fatal exception");
411 
412 	if (regs && kexec_should_crash(current))
413 		crash_kexec(regs);
414 
415 	do_exit(sig);
416 }
417 
418 extern struct exception_table_entry __start___dbe_table[];
419 extern struct exception_table_entry __stop___dbe_table[];
420 
421 __asm__(
422 "	.section	__dbe_table, \"a\"\n"
423 "	.previous			\n");
424 
425 /* Given an address, look for it in the exception tables. */
426 static const struct exception_table_entry *search_dbe_tables(unsigned long addr)
427 {
428 	const struct exception_table_entry *e;
429 
430 	e = search_extable(__start___dbe_table,
431 			   __stop___dbe_table - __start___dbe_table, addr);
432 	if (!e)
433 		e = search_module_dbetables(addr);
434 	return e;
435 }
436 
437 asmlinkage void do_be(struct pt_regs *regs)
438 {
439 	const int field = 2 * sizeof(unsigned long);
440 	const struct exception_table_entry *fixup = NULL;
441 	int data = regs->cp0_cause & 4;
442 	int action = MIPS_BE_FATAL;
443 	enum ctx_state prev_state;
444 
445 	prev_state = exception_enter();
446 	/* XXX For now.	 Fixme, this searches the wrong table ...  */
447 	if (data && !user_mode(regs))
448 		fixup = search_dbe_tables(exception_epc(regs));
449 
450 	if (fixup)
451 		action = MIPS_BE_FIXUP;
452 
453 	if (board_be_handler)
454 		action = board_be_handler(regs, fixup != NULL);
455 	else
456 		mips_cm_error_report();
457 
458 	switch (action) {
459 	case MIPS_BE_DISCARD:
460 		goto out;
461 	case MIPS_BE_FIXUP:
462 		if (fixup) {
463 			regs->cp0_epc = fixup->nextinsn;
464 			goto out;
465 		}
466 		break;
467 	default:
468 		break;
469 	}
470 
471 	/*
472 	 * Assume it would be too dangerous to continue ...
473 	 */
474 	printk(KERN_ALERT "%s bus error, epc == %0*lx, ra == %0*lx\n",
475 	       data ? "Data" : "Instruction",
476 	       field, regs->cp0_epc, field, regs->regs[31]);
477 	if (notify_die(DIE_OOPS, "bus error", regs, 0, current->thread.trap_nr,
478 		       SIGBUS) == NOTIFY_STOP)
479 		goto out;
480 
481 	die_if_kernel("Oops", regs);
482 	force_sig(SIGBUS);
483 
484 out:
485 	exception_exit(prev_state);
486 }
487 
488 /*
489  * ll/sc, rdhwr, sync emulation
490  */
491 
492 #define OPCODE 0xfc000000
493 #define BASE   0x03e00000
494 #define RT     0x001f0000
495 #define OFFSET 0x0000ffff
496 #define LL     0xc0000000
497 #define SC     0xe0000000
498 #define SPEC0  0x00000000
499 #define SPEC3  0x7c000000
500 #define RD     0x0000f800
501 #define FUNC   0x0000003f
502 #define SYNC   0x0000000f
503 #define RDHWR  0x0000003b
504 
505 /*  microMIPS definitions   */
506 #define MM_POOL32A_FUNC 0xfc00ffff
507 #define MM_RDHWR        0x00006b3c
508 #define MM_RS           0x001f0000
509 #define MM_RT           0x03e00000
510 
511 /*
512  * The ll_bit is cleared by r*_switch.S
513  */
514 
515 unsigned int ll_bit;
516 struct task_struct *ll_task;
517 
518 static inline int simulate_ll(struct pt_regs *regs, unsigned int opcode)
519 {
520 	unsigned long value, __user *vaddr;
521 	long offset;
522 
523 	/*
524 	 * analyse the ll instruction that just caused a ri exception
525 	 * and put the referenced address to addr.
526 	 */
527 
528 	/* sign extend offset */
529 	offset = opcode & OFFSET;
530 	offset <<= 16;
531 	offset >>= 16;
532 
533 	vaddr = (unsigned long __user *)
534 		((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
535 
536 	if ((unsigned long)vaddr & 3)
537 		return SIGBUS;
538 	if (get_user(value, vaddr))
539 		return SIGSEGV;
540 
541 	preempt_disable();
542 
543 	if (ll_task == NULL || ll_task == current) {
544 		ll_bit = 1;
545 	} else {
546 		ll_bit = 0;
547 	}
548 	ll_task = current;
549 
550 	preempt_enable();
551 
552 	regs->regs[(opcode & RT) >> 16] = value;
553 
554 	return 0;
555 }
556 
557 static inline int simulate_sc(struct pt_regs *regs, unsigned int opcode)
558 {
559 	unsigned long __user *vaddr;
560 	unsigned long reg;
561 	long offset;
562 
563 	/*
564 	 * analyse the sc instruction that just caused a ri exception
565 	 * and put the referenced address to addr.
566 	 */
567 
568 	/* sign extend offset */
569 	offset = opcode & OFFSET;
570 	offset <<= 16;
571 	offset >>= 16;
572 
573 	vaddr = (unsigned long __user *)
574 		((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
575 	reg = (opcode & RT) >> 16;
576 
577 	if ((unsigned long)vaddr & 3)
578 		return SIGBUS;
579 
580 	preempt_disable();
581 
582 	if (ll_bit == 0 || ll_task != current) {
583 		regs->regs[reg] = 0;
584 		preempt_enable();
585 		return 0;
586 	}
587 
588 	preempt_enable();
589 
590 	if (put_user(regs->regs[reg], vaddr))
591 		return SIGSEGV;
592 
593 	regs->regs[reg] = 1;
594 
595 	return 0;
596 }
597 
598 /*
599  * ll uses the opcode of lwc0 and sc uses the opcode of swc0.  That is both
600  * opcodes are supposed to result in coprocessor unusable exceptions if
601  * executed on ll/sc-less processors.  That's the theory.  In practice a
602  * few processors such as NEC's VR4100 throw reserved instruction exceptions
603  * instead, so we're doing the emulation thing in both exception handlers.
604  */
605 static int simulate_llsc(struct pt_regs *regs, unsigned int opcode)
606 {
607 	if ((opcode & OPCODE) == LL) {
608 		perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
609 				1, regs, 0);
610 		return simulate_ll(regs, opcode);
611 	}
612 	if ((opcode & OPCODE) == SC) {
613 		perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
614 				1, regs, 0);
615 		return simulate_sc(regs, opcode);
616 	}
617 
618 	return -1;			/* Must be something else ... */
619 }
620 
621 /*
622  * Simulate trapping 'rdhwr' instructions to provide user accessible
623  * registers not implemented in hardware.
624  */
625 static int simulate_rdhwr(struct pt_regs *regs, int rd, int rt)
626 {
627 	struct thread_info *ti = task_thread_info(current);
628 
629 	perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
630 			1, regs, 0);
631 	switch (rd) {
632 	case MIPS_HWR_CPUNUM:		/* CPU number */
633 		regs->regs[rt] = smp_processor_id();
634 		return 0;
635 	case MIPS_HWR_SYNCISTEP:	/* SYNCI length */
636 		regs->regs[rt] = min(current_cpu_data.dcache.linesz,
637 				     current_cpu_data.icache.linesz);
638 		return 0;
639 	case MIPS_HWR_CC:		/* Read count register */
640 		regs->regs[rt] = read_c0_count();
641 		return 0;
642 	case MIPS_HWR_CCRES:		/* Count register resolution */
643 		switch (current_cpu_type()) {
644 		case CPU_20KC:
645 		case CPU_25KF:
646 			regs->regs[rt] = 1;
647 			break;
648 		default:
649 			regs->regs[rt] = 2;
650 		}
651 		return 0;
652 	case MIPS_HWR_ULR:		/* Read UserLocal register */
653 		regs->regs[rt] = ti->tp_value;
654 		return 0;
655 	default:
656 		return -1;
657 	}
658 }
659 
660 static int simulate_rdhwr_normal(struct pt_regs *regs, unsigned int opcode)
661 {
662 	if ((opcode & OPCODE) == SPEC3 && (opcode & FUNC) == RDHWR) {
663 		int rd = (opcode & RD) >> 11;
664 		int rt = (opcode & RT) >> 16;
665 
666 		simulate_rdhwr(regs, rd, rt);
667 		return 0;
668 	}
669 
670 	/* Not ours.  */
671 	return -1;
672 }
673 
674 static int simulate_rdhwr_mm(struct pt_regs *regs, unsigned int opcode)
675 {
676 	if ((opcode & MM_POOL32A_FUNC) == MM_RDHWR) {
677 		int rd = (opcode & MM_RS) >> 16;
678 		int rt = (opcode & MM_RT) >> 21;
679 		simulate_rdhwr(regs, rd, rt);
680 		return 0;
681 	}
682 
683 	/* Not ours.  */
684 	return -1;
685 }
686 
687 static int simulate_sync(struct pt_regs *regs, unsigned int opcode)
688 {
689 	if ((opcode & OPCODE) == SPEC0 && (opcode & FUNC) == SYNC) {
690 		perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
691 				1, regs, 0);
692 		return 0;
693 	}
694 
695 	return -1;			/* Must be something else ... */
696 }
697 
698 /*
699  * Loongson-3 CSR instructions emulation
700  */
701 
702 #ifdef CONFIG_CPU_LOONGSON3_CPUCFG_EMULATION
703 
704 #define LWC2             0xc8000000
705 #define RS               BASE
706 #define CSR_OPCODE2      0x00000118
707 #define CSR_OPCODE2_MASK 0x000007ff
708 #define CSR_FUNC_MASK    RT
709 #define CSR_FUNC_CPUCFG  0x8
710 
711 static int simulate_loongson3_cpucfg(struct pt_regs *regs,
712 				     unsigned int opcode)
713 {
714 	int op = opcode & OPCODE;
715 	int op2 = opcode & CSR_OPCODE2_MASK;
716 	int csr_func = (opcode & CSR_FUNC_MASK) >> 16;
717 
718 	if (op == LWC2 && op2 == CSR_OPCODE2 && csr_func == CSR_FUNC_CPUCFG) {
719 		int rd = (opcode & RD) >> 11;
720 		int rs = (opcode & RS) >> 21;
721 		__u64 sel = regs->regs[rs];
722 
723 		perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
724 
725 		/* Do not emulate on unsupported core models. */
726 		preempt_disable();
727 		if (!loongson3_cpucfg_emulation_enabled(&current_cpu_data)) {
728 			preempt_enable();
729 			return -1;
730 		}
731 		regs->regs[rd] = loongson3_cpucfg_read_synthesized(
732 			&current_cpu_data, sel);
733 		preempt_enable();
734 		return 0;
735 	}
736 
737 	/* Not ours.  */
738 	return -1;
739 }
740 #endif /* CONFIG_CPU_LOONGSON3_CPUCFG_EMULATION */
741 
742 asmlinkage void do_ov(struct pt_regs *regs)
743 {
744 	enum ctx_state prev_state;
745 
746 	prev_state = exception_enter();
747 	die_if_kernel("Integer overflow", regs);
748 
749 	force_sig_fault(SIGFPE, FPE_INTOVF, (void __user *)regs->cp0_epc);
750 	exception_exit(prev_state);
751 }
752 
753 #ifdef CONFIG_MIPS_FP_SUPPORT
754 
755 /*
756  * Send SIGFPE according to FCSR Cause bits, which must have already
757  * been masked against Enable bits.  This is impotant as Inexact can
758  * happen together with Overflow or Underflow, and `ptrace' can set
759  * any bits.
760  */
761 void force_fcr31_sig(unsigned long fcr31, void __user *fault_addr,
762 		     struct task_struct *tsk)
763 {
764 	int si_code = FPE_FLTUNK;
765 
766 	if (fcr31 & FPU_CSR_INV_X)
767 		si_code = FPE_FLTINV;
768 	else if (fcr31 & FPU_CSR_DIV_X)
769 		si_code = FPE_FLTDIV;
770 	else if (fcr31 & FPU_CSR_OVF_X)
771 		si_code = FPE_FLTOVF;
772 	else if (fcr31 & FPU_CSR_UDF_X)
773 		si_code = FPE_FLTUND;
774 	else if (fcr31 & FPU_CSR_INE_X)
775 		si_code = FPE_FLTRES;
776 
777 	force_sig_fault_to_task(SIGFPE, si_code, fault_addr, tsk);
778 }
779 
780 int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcr31)
781 {
782 	int si_code;
783 	struct vm_area_struct *vma;
784 
785 	switch (sig) {
786 	case 0:
787 		return 0;
788 
789 	case SIGFPE:
790 		force_fcr31_sig(fcr31, fault_addr, current);
791 		return 1;
792 
793 	case SIGBUS:
794 		force_sig_fault(SIGBUS, BUS_ADRERR, fault_addr);
795 		return 1;
796 
797 	case SIGSEGV:
798 		mmap_read_lock(current->mm);
799 		vma = find_vma(current->mm, (unsigned long)fault_addr);
800 		if (vma && (vma->vm_start <= (unsigned long)fault_addr))
801 			si_code = SEGV_ACCERR;
802 		else
803 			si_code = SEGV_MAPERR;
804 		mmap_read_unlock(current->mm);
805 		force_sig_fault(SIGSEGV, si_code, fault_addr);
806 		return 1;
807 
808 	default:
809 		force_sig(sig);
810 		return 1;
811 	}
812 }
813 
814 static int simulate_fp(struct pt_regs *regs, unsigned int opcode,
815 		       unsigned long old_epc, unsigned long old_ra)
816 {
817 	union mips_instruction inst = { .word = opcode };
818 	void __user *fault_addr;
819 	unsigned long fcr31;
820 	int sig;
821 
822 	/* If it's obviously not an FP instruction, skip it */
823 	switch (inst.i_format.opcode) {
824 	case cop1_op:
825 	case cop1x_op:
826 	case lwc1_op:
827 	case ldc1_op:
828 	case swc1_op:
829 	case sdc1_op:
830 		break;
831 
832 	default:
833 		return -1;
834 	}
835 
836 	/*
837 	 * do_ri skipped over the instruction via compute_return_epc, undo
838 	 * that for the FPU emulator.
839 	 */
840 	regs->cp0_epc = old_epc;
841 	regs->regs[31] = old_ra;
842 
843 	/* Run the emulator */
844 	sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
845 				       &fault_addr);
846 
847 	/*
848 	 * We can't allow the emulated instruction to leave any
849 	 * enabled Cause bits set in $fcr31.
850 	 */
851 	fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
852 	current->thread.fpu.fcr31 &= ~fcr31;
853 
854 	/* Restore the hardware register state */
855 	own_fpu(1);
856 
857 	/* Send a signal if required.  */
858 	process_fpemu_return(sig, fault_addr, fcr31);
859 
860 	return 0;
861 }
862 
863 /*
864  * XXX Delayed fp exceptions when doing a lazy ctx switch XXX
865  */
866 asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
867 {
868 	enum ctx_state prev_state;
869 	void __user *fault_addr;
870 	int sig;
871 
872 	prev_state = exception_enter();
873 	if (notify_die(DIE_FP, "FP exception", regs, 0, current->thread.trap_nr,
874 		       SIGFPE) == NOTIFY_STOP)
875 		goto out;
876 
877 	/* Clear FCSR.Cause before enabling interrupts */
878 	write_32bit_cp1_register(CP1_STATUS, fcr31 & ~mask_fcr31_x(fcr31));
879 	local_irq_enable();
880 
881 	die_if_kernel("FP exception in kernel code", regs);
882 
883 	if (fcr31 & FPU_CSR_UNI_X) {
884 		/*
885 		 * Unimplemented operation exception.  If we've got the full
886 		 * software emulator on-board, let's use it...
887 		 *
888 		 * Force FPU to dump state into task/thread context.  We're
889 		 * moving a lot of data here for what is probably a single
890 		 * instruction, but the alternative is to pre-decode the FP
891 		 * register operands before invoking the emulator, which seems
892 		 * a bit extreme for what should be an infrequent event.
893 		 */
894 
895 		/* Run the emulator */
896 		sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
897 					       &fault_addr);
898 
899 		/*
900 		 * We can't allow the emulated instruction to leave any
901 		 * enabled Cause bits set in $fcr31.
902 		 */
903 		fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
904 		current->thread.fpu.fcr31 &= ~fcr31;
905 
906 		/* Restore the hardware register state */
907 		own_fpu(1);	/* Using the FPU again.	 */
908 	} else {
909 		sig = SIGFPE;
910 		fault_addr = (void __user *) regs->cp0_epc;
911 	}
912 
913 	/* Send a signal if required.  */
914 	process_fpemu_return(sig, fault_addr, fcr31);
915 
916 out:
917 	exception_exit(prev_state);
918 }
919 
920 /*
921  * MIPS MT processors may have fewer FPU contexts than CPU threads. If we've
922  * emulated more than some threshold number of instructions, force migration to
923  * a "CPU" that has FP support.
924  */
925 static void mt_ase_fp_affinity(void)
926 {
927 #ifdef CONFIG_MIPS_MT_FPAFF
928 	if (mt_fpemul_threshold > 0 &&
929 	     ((current->thread.emulated_fp++ > mt_fpemul_threshold))) {
930 		/*
931 		 * If there's no FPU present, or if the application has already
932 		 * restricted the allowed set to exclude any CPUs with FPUs,
933 		 * we'll skip the procedure.
934 		 */
935 		if (cpumask_intersects(&current->cpus_mask, &mt_fpu_cpumask)) {
936 			cpumask_t tmask;
937 
938 			current->thread.user_cpus_allowed
939 				= current->cpus_mask;
940 			cpumask_and(&tmask, &current->cpus_mask,
941 				    &mt_fpu_cpumask);
942 			set_cpus_allowed_ptr(current, &tmask);
943 			set_thread_flag(TIF_FPUBOUND);
944 		}
945 	}
946 #endif /* CONFIG_MIPS_MT_FPAFF */
947 }
948 
949 #else /* !CONFIG_MIPS_FP_SUPPORT */
950 
951 static int simulate_fp(struct pt_regs *regs, unsigned int opcode,
952 		       unsigned long old_epc, unsigned long old_ra)
953 {
954 	return -1;
955 }
956 
957 #endif /* !CONFIG_MIPS_FP_SUPPORT */
958 
959 void do_trap_or_bp(struct pt_regs *regs, unsigned int code, int si_code,
960 	const char *str)
961 {
962 	char b[40];
963 
964 #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
965 	if (kgdb_ll_trap(DIE_TRAP, str, regs, code, current->thread.trap_nr,
966 			 SIGTRAP) == NOTIFY_STOP)
967 		return;
968 #endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
969 
970 	if (notify_die(DIE_TRAP, str, regs, code, current->thread.trap_nr,
971 		       SIGTRAP) == NOTIFY_STOP)
972 		return;
973 
974 	/*
975 	 * A short test says that IRIX 5.3 sends SIGTRAP for all trap
976 	 * insns, even for trap and break codes that indicate arithmetic
977 	 * failures.  Weird ...
978 	 * But should we continue the brokenness???  --macro
979 	 */
980 	switch (code) {
981 	case BRK_OVERFLOW:
982 	case BRK_DIVZERO:
983 		scnprintf(b, sizeof(b), "%s instruction in kernel code", str);
984 		die_if_kernel(b, regs);
985 		force_sig_fault(SIGFPE,
986 				code == BRK_DIVZERO ? FPE_INTDIV : FPE_INTOVF,
987 				(void __user *) regs->cp0_epc);
988 		break;
989 	case BRK_BUG:
990 		die_if_kernel("Kernel bug detected", regs);
991 		force_sig(SIGTRAP);
992 		break;
993 	case BRK_MEMU:
994 		/*
995 		 * This breakpoint code is used by the FPU emulator to retake
996 		 * control of the CPU after executing the instruction from the
997 		 * delay slot of an emulated branch.
998 		 *
999 		 * Terminate if exception was recognized as a delay slot return
1000 		 * otherwise handle as normal.
1001 		 */
1002 		if (do_dsemulret(regs))
1003 			return;
1004 
1005 		die_if_kernel("Math emu break/trap", regs);
1006 		force_sig(SIGTRAP);
1007 		break;
1008 	default:
1009 		scnprintf(b, sizeof(b), "%s instruction in kernel code", str);
1010 		die_if_kernel(b, regs);
1011 		if (si_code) {
1012 			force_sig_fault(SIGTRAP, si_code, NULL);
1013 		} else {
1014 			force_sig(SIGTRAP);
1015 		}
1016 	}
1017 }
1018 
1019 asmlinkage void do_bp(struct pt_regs *regs)
1020 {
1021 	unsigned long epc = msk_isa16_mode(exception_epc(regs));
1022 	unsigned int opcode, bcode;
1023 	enum ctx_state prev_state;
1024 	mm_segment_t seg;
1025 
1026 	seg = get_fs();
1027 	if (!user_mode(regs))
1028 		set_fs(KERNEL_DS);
1029 
1030 	prev_state = exception_enter();
1031 	current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
1032 	if (get_isa16_mode(regs->cp0_epc)) {
1033 		u16 instr[2];
1034 
1035 		if (__get_user(instr[0], (u16 __user *)epc))
1036 			goto out_sigsegv;
1037 
1038 		if (!cpu_has_mmips) {
1039 			/* MIPS16e mode */
1040 			bcode = (instr[0] >> 5) & 0x3f;
1041 		} else if (mm_insn_16bit(instr[0])) {
1042 			/* 16-bit microMIPS BREAK */
1043 			bcode = instr[0] & 0xf;
1044 		} else {
1045 			/* 32-bit microMIPS BREAK */
1046 			if (__get_user(instr[1], (u16 __user *)(epc + 2)))
1047 				goto out_sigsegv;
1048 			opcode = (instr[0] << 16) | instr[1];
1049 			bcode = (opcode >> 6) & ((1 << 20) - 1);
1050 		}
1051 	} else {
1052 		if (__get_user(opcode, (unsigned int __user *)epc))
1053 			goto out_sigsegv;
1054 		bcode = (opcode >> 6) & ((1 << 20) - 1);
1055 	}
1056 
1057 	/*
1058 	 * There is the ancient bug in the MIPS assemblers that the break
1059 	 * code starts left to bit 16 instead to bit 6 in the opcode.
1060 	 * Gas is bug-compatible, but not always, grrr...
1061 	 * We handle both cases with a simple heuristics.  --macro
1062 	 */
1063 	if (bcode >= (1 << 10))
1064 		bcode = ((bcode & ((1 << 10) - 1)) << 10) | (bcode >> 10);
1065 
1066 	/*
1067 	 * notify the kprobe handlers, if instruction is likely to
1068 	 * pertain to them.
1069 	 */
1070 	switch (bcode) {
1071 	case BRK_UPROBE:
1072 		if (notify_die(DIE_UPROBE, "uprobe", regs, bcode,
1073 			       current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
1074 			goto out;
1075 		else
1076 			break;
1077 	case BRK_UPROBE_XOL:
1078 		if (notify_die(DIE_UPROBE_XOL, "uprobe_xol", regs, bcode,
1079 			       current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
1080 			goto out;
1081 		else
1082 			break;
1083 	case BRK_KPROBE_BP:
1084 		if (notify_die(DIE_BREAK, "debug", regs, bcode,
1085 			       current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
1086 			goto out;
1087 		else
1088 			break;
1089 	case BRK_KPROBE_SSTEPBP:
1090 		if (notify_die(DIE_SSTEPBP, "single_step", regs, bcode,
1091 			       current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
1092 			goto out;
1093 		else
1094 			break;
1095 	default:
1096 		break;
1097 	}
1098 
1099 	do_trap_or_bp(regs, bcode, TRAP_BRKPT, "Break");
1100 
1101 out:
1102 	set_fs(seg);
1103 	exception_exit(prev_state);
1104 	return;
1105 
1106 out_sigsegv:
1107 	force_sig(SIGSEGV);
1108 	goto out;
1109 }
1110 
1111 asmlinkage void do_tr(struct pt_regs *regs)
1112 {
1113 	u32 opcode, tcode = 0;
1114 	enum ctx_state prev_state;
1115 	u16 instr[2];
1116 	mm_segment_t seg;
1117 	unsigned long epc = msk_isa16_mode(exception_epc(regs));
1118 
1119 	seg = get_fs();
1120 	if (!user_mode(regs))
1121 		set_fs(KERNEL_DS);
1122 
1123 	prev_state = exception_enter();
1124 	current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
1125 	if (get_isa16_mode(regs->cp0_epc)) {
1126 		if (__get_user(instr[0], (u16 __user *)(epc + 0)) ||
1127 		    __get_user(instr[1], (u16 __user *)(epc + 2)))
1128 			goto out_sigsegv;
1129 		opcode = (instr[0] << 16) | instr[1];
1130 		/* Immediate versions don't provide a code.  */
1131 		if (!(opcode & OPCODE))
1132 			tcode = (opcode >> 12) & ((1 << 4) - 1);
1133 	} else {
1134 		if (__get_user(opcode, (u32 __user *)epc))
1135 			goto out_sigsegv;
1136 		/* Immediate versions don't provide a code.  */
1137 		if (!(opcode & OPCODE))
1138 			tcode = (opcode >> 6) & ((1 << 10) - 1);
1139 	}
1140 
1141 	do_trap_or_bp(regs, tcode, 0, "Trap");
1142 
1143 out:
1144 	set_fs(seg);
1145 	exception_exit(prev_state);
1146 	return;
1147 
1148 out_sigsegv:
1149 	force_sig(SIGSEGV);
1150 	goto out;
1151 }
1152 
1153 asmlinkage void do_ri(struct pt_regs *regs)
1154 {
1155 	unsigned int __user *epc = (unsigned int __user *)exception_epc(regs);
1156 	unsigned long old_epc = regs->cp0_epc;
1157 	unsigned long old31 = regs->regs[31];
1158 	enum ctx_state prev_state;
1159 	unsigned int opcode = 0;
1160 	int status = -1;
1161 
1162 	/*
1163 	 * Avoid any kernel code. Just emulate the R2 instruction
1164 	 * as quickly as possible.
1165 	 */
1166 	if (mipsr2_emulation && cpu_has_mips_r6 &&
1167 	    likely(user_mode(regs)) &&
1168 	    likely(get_user(opcode, epc) >= 0)) {
1169 		unsigned long fcr31 = 0;
1170 
1171 		status = mipsr2_decoder(regs, opcode, &fcr31);
1172 		switch (status) {
1173 		case 0:
1174 		case SIGEMT:
1175 			return;
1176 		case SIGILL:
1177 			goto no_r2_instr;
1178 		default:
1179 			process_fpemu_return(status,
1180 					     &current->thread.cp0_baduaddr,
1181 					     fcr31);
1182 			return;
1183 		}
1184 	}
1185 
1186 no_r2_instr:
1187 
1188 	prev_state = exception_enter();
1189 	current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
1190 
1191 	if (notify_die(DIE_RI, "RI Fault", regs, 0, current->thread.trap_nr,
1192 		       SIGILL) == NOTIFY_STOP)
1193 		goto out;
1194 
1195 	die_if_kernel("Reserved instruction in kernel code", regs);
1196 
1197 	if (unlikely(compute_return_epc(regs) < 0))
1198 		goto out;
1199 
1200 	if (!get_isa16_mode(regs->cp0_epc)) {
1201 		if (unlikely(get_user(opcode, epc) < 0))
1202 			status = SIGSEGV;
1203 
1204 		if (!cpu_has_llsc && status < 0)
1205 			status = simulate_llsc(regs, opcode);
1206 
1207 		if (status < 0)
1208 			status = simulate_rdhwr_normal(regs, opcode);
1209 
1210 		if (status < 0)
1211 			status = simulate_sync(regs, opcode);
1212 
1213 		if (status < 0)
1214 			status = simulate_fp(regs, opcode, old_epc, old31);
1215 
1216 #ifdef CONFIG_CPU_LOONGSON3_CPUCFG_EMULATION
1217 		if (status < 0)
1218 			status = simulate_loongson3_cpucfg(regs, opcode);
1219 #endif
1220 	} else if (cpu_has_mmips) {
1221 		unsigned short mmop[2] = { 0 };
1222 
1223 		if (unlikely(get_user(mmop[0], (u16 __user *)epc + 0) < 0))
1224 			status = SIGSEGV;
1225 		if (unlikely(get_user(mmop[1], (u16 __user *)epc + 1) < 0))
1226 			status = SIGSEGV;
1227 		opcode = mmop[0];
1228 		opcode = (opcode << 16) | mmop[1];
1229 
1230 		if (status < 0)
1231 			status = simulate_rdhwr_mm(regs, opcode);
1232 	}
1233 
1234 	if (status < 0)
1235 		status = SIGILL;
1236 
1237 	if (unlikely(status > 0)) {
1238 		regs->cp0_epc = old_epc;		/* Undo skip-over.  */
1239 		regs->regs[31] = old31;
1240 		force_sig(status);
1241 	}
1242 
1243 out:
1244 	exception_exit(prev_state);
1245 }
1246 
1247 /*
1248  * No lock; only written during early bootup by CPU 0.
1249  */
1250 static RAW_NOTIFIER_HEAD(cu2_chain);
1251 
1252 int __ref register_cu2_notifier(struct notifier_block *nb)
1253 {
1254 	return raw_notifier_chain_register(&cu2_chain, nb);
1255 }
1256 
1257 int cu2_notifier_call_chain(unsigned long val, void *v)
1258 {
1259 	return raw_notifier_call_chain(&cu2_chain, val, v);
1260 }
1261 
1262 static int default_cu2_call(struct notifier_block *nfb, unsigned long action,
1263 	void *data)
1264 {
1265 	struct pt_regs *regs = data;
1266 
1267 	die_if_kernel("COP2: Unhandled kernel unaligned access or invalid "
1268 			      "instruction", regs);
1269 	force_sig(SIGILL);
1270 
1271 	return NOTIFY_OK;
1272 }
1273 
1274 #ifdef CONFIG_MIPS_FP_SUPPORT
1275 
1276 static int enable_restore_fp_context(int msa)
1277 {
1278 	int err, was_fpu_owner, prior_msa;
1279 	bool first_fp;
1280 
1281 	/* Initialize context if it hasn't been used already */
1282 	first_fp = init_fp_ctx(current);
1283 
1284 	if (first_fp) {
1285 		preempt_disable();
1286 		err = own_fpu_inatomic(1);
1287 		if (msa && !err) {
1288 			enable_msa();
1289 			set_thread_flag(TIF_USEDMSA);
1290 			set_thread_flag(TIF_MSA_CTX_LIVE);
1291 		}
1292 		preempt_enable();
1293 		return err;
1294 	}
1295 
1296 	/*
1297 	 * This task has formerly used the FP context.
1298 	 *
1299 	 * If this thread has no live MSA vector context then we can simply
1300 	 * restore the scalar FP context. If it has live MSA vector context
1301 	 * (that is, it has or may have used MSA since last performing a
1302 	 * function call) then we'll need to restore the vector context. This
1303 	 * applies even if we're currently only executing a scalar FP
1304 	 * instruction. This is because if we were to later execute an MSA
1305 	 * instruction then we'd either have to:
1306 	 *
1307 	 *  - Restore the vector context & clobber any registers modified by
1308 	 *    scalar FP instructions between now & then.
1309 	 *
1310 	 * or
1311 	 *
1312 	 *  - Not restore the vector context & lose the most significant bits
1313 	 *    of all vector registers.
1314 	 *
1315 	 * Neither of those options is acceptable. We cannot restore the least
1316 	 * significant bits of the registers now & only restore the most
1317 	 * significant bits later because the most significant bits of any
1318 	 * vector registers whose aliased FP register is modified now will have
1319 	 * been zeroed. We'd have no way to know that when restoring the vector
1320 	 * context & thus may load an outdated value for the most significant
1321 	 * bits of a vector register.
1322 	 */
1323 	if (!msa && !thread_msa_context_live())
1324 		return own_fpu(1);
1325 
1326 	/*
1327 	 * This task is using or has previously used MSA. Thus we require
1328 	 * that Status.FR == 1.
1329 	 */
1330 	preempt_disable();
1331 	was_fpu_owner = is_fpu_owner();
1332 	err = own_fpu_inatomic(0);
1333 	if (err)
1334 		goto out;
1335 
1336 	enable_msa();
1337 	write_msa_csr(current->thread.fpu.msacsr);
1338 	set_thread_flag(TIF_USEDMSA);
1339 
1340 	/*
1341 	 * If this is the first time that the task is using MSA and it has
1342 	 * previously used scalar FP in this time slice then we already nave
1343 	 * FP context which we shouldn't clobber. We do however need to clear
1344 	 * the upper 64b of each vector register so that this task has no
1345 	 * opportunity to see data left behind by another.
1346 	 */
1347 	prior_msa = test_and_set_thread_flag(TIF_MSA_CTX_LIVE);
1348 	if (!prior_msa && was_fpu_owner) {
1349 		init_msa_upper();
1350 
1351 		goto out;
1352 	}
1353 
1354 	if (!prior_msa) {
1355 		/*
1356 		 * Restore the least significant 64b of each vector register
1357 		 * from the existing scalar FP context.
1358 		 */
1359 		_restore_fp(current);
1360 
1361 		/*
1362 		 * The task has not formerly used MSA, so clear the upper 64b
1363 		 * of each vector register such that it cannot see data left
1364 		 * behind by another task.
1365 		 */
1366 		init_msa_upper();
1367 	} else {
1368 		/* We need to restore the vector context. */
1369 		restore_msa(current);
1370 
1371 		/* Restore the scalar FP control & status register */
1372 		if (!was_fpu_owner)
1373 			write_32bit_cp1_register(CP1_STATUS,
1374 						 current->thread.fpu.fcr31);
1375 	}
1376 
1377 out:
1378 	preempt_enable();
1379 
1380 	return 0;
1381 }
1382 
1383 #else /* !CONFIG_MIPS_FP_SUPPORT */
1384 
1385 static int enable_restore_fp_context(int msa)
1386 {
1387 	return SIGILL;
1388 }
1389 
1390 #endif /* CONFIG_MIPS_FP_SUPPORT */
1391 
1392 asmlinkage void do_cpu(struct pt_regs *regs)
1393 {
1394 	enum ctx_state prev_state;
1395 	unsigned int __user *epc;
1396 	unsigned long old_epc, old31;
1397 	unsigned int opcode;
1398 	unsigned int cpid;
1399 	int status;
1400 
1401 	prev_state = exception_enter();
1402 	cpid = (regs->cp0_cause >> CAUSEB_CE) & 3;
1403 
1404 	if (cpid != 2)
1405 		die_if_kernel("do_cpu invoked from kernel context!", regs);
1406 
1407 	switch (cpid) {
1408 	case 0:
1409 		epc = (unsigned int __user *)exception_epc(regs);
1410 		old_epc = regs->cp0_epc;
1411 		old31 = regs->regs[31];
1412 		opcode = 0;
1413 		status = -1;
1414 
1415 		if (unlikely(compute_return_epc(regs) < 0))
1416 			break;
1417 
1418 		if (!get_isa16_mode(regs->cp0_epc)) {
1419 			if (unlikely(get_user(opcode, epc) < 0))
1420 				status = SIGSEGV;
1421 
1422 			if (!cpu_has_llsc && status < 0)
1423 				status = simulate_llsc(regs, opcode);
1424 		}
1425 
1426 		if (status < 0)
1427 			status = SIGILL;
1428 
1429 		if (unlikely(status > 0)) {
1430 			regs->cp0_epc = old_epc;	/* Undo skip-over.  */
1431 			regs->regs[31] = old31;
1432 			force_sig(status);
1433 		}
1434 
1435 		break;
1436 
1437 #ifdef CONFIG_MIPS_FP_SUPPORT
1438 	case 3:
1439 		/*
1440 		 * The COP3 opcode space and consequently the CP0.Status.CU3
1441 		 * bit and the CP0.Cause.CE=3 encoding have been removed as
1442 		 * of the MIPS III ISA.  From the MIPS IV and MIPS32r2 ISAs
1443 		 * up the space has been reused for COP1X instructions, that
1444 		 * are enabled by the CP0.Status.CU1 bit and consequently
1445 		 * use the CP0.Cause.CE=1 encoding for Coprocessor Unusable
1446 		 * exceptions.  Some FPU-less processors that implement one
1447 		 * of these ISAs however use this code erroneously for COP1X
1448 		 * instructions.  Therefore we redirect this trap to the FP
1449 		 * emulator too.
1450 		 */
1451 		if (raw_cpu_has_fpu || !cpu_has_mips_4_5_64_r2_r6) {
1452 			force_sig(SIGILL);
1453 			break;
1454 		}
1455 		fallthrough;
1456 	case 1: {
1457 		void __user *fault_addr;
1458 		unsigned long fcr31;
1459 		int err, sig;
1460 
1461 		err = enable_restore_fp_context(0);
1462 
1463 		if (raw_cpu_has_fpu && !err)
1464 			break;
1465 
1466 		sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 0,
1467 					       &fault_addr);
1468 
1469 		/*
1470 		 * We can't allow the emulated instruction to leave
1471 		 * any enabled Cause bits set in $fcr31.
1472 		 */
1473 		fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
1474 		current->thread.fpu.fcr31 &= ~fcr31;
1475 
1476 		/* Send a signal if required.  */
1477 		if (!process_fpemu_return(sig, fault_addr, fcr31) && !err)
1478 			mt_ase_fp_affinity();
1479 
1480 		break;
1481 	}
1482 #else /* CONFIG_MIPS_FP_SUPPORT */
1483 	case 1:
1484 	case 3:
1485 		force_sig(SIGILL);
1486 		break;
1487 #endif /* CONFIG_MIPS_FP_SUPPORT */
1488 
1489 	case 2:
1490 		raw_notifier_call_chain(&cu2_chain, CU2_EXCEPTION, regs);
1491 		break;
1492 	}
1493 
1494 	exception_exit(prev_state);
1495 }
1496 
1497 asmlinkage void do_msa_fpe(struct pt_regs *regs, unsigned int msacsr)
1498 {
1499 	enum ctx_state prev_state;
1500 
1501 	prev_state = exception_enter();
1502 	current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
1503 	if (notify_die(DIE_MSAFP, "MSA FP exception", regs, 0,
1504 		       current->thread.trap_nr, SIGFPE) == NOTIFY_STOP)
1505 		goto out;
1506 
1507 	/* Clear MSACSR.Cause before enabling interrupts */
1508 	write_msa_csr(msacsr & ~MSA_CSR_CAUSEF);
1509 	local_irq_enable();
1510 
1511 	die_if_kernel("do_msa_fpe invoked from kernel context!", regs);
1512 	force_sig(SIGFPE);
1513 out:
1514 	exception_exit(prev_state);
1515 }
1516 
1517 asmlinkage void do_msa(struct pt_regs *regs)
1518 {
1519 	enum ctx_state prev_state;
1520 	int err;
1521 
1522 	prev_state = exception_enter();
1523 
1524 	if (!cpu_has_msa || test_thread_flag(TIF_32BIT_FPREGS)) {
1525 		force_sig(SIGILL);
1526 		goto out;
1527 	}
1528 
1529 	die_if_kernel("do_msa invoked from kernel context!", regs);
1530 
1531 	err = enable_restore_fp_context(1);
1532 	if (err)
1533 		force_sig(SIGILL);
1534 out:
1535 	exception_exit(prev_state);
1536 }
1537 
1538 asmlinkage void do_mdmx(struct pt_regs *regs)
1539 {
1540 	enum ctx_state prev_state;
1541 
1542 	prev_state = exception_enter();
1543 	force_sig(SIGILL);
1544 	exception_exit(prev_state);
1545 }
1546 
1547 /*
1548  * Called with interrupts disabled.
1549  */
1550 asmlinkage void do_watch(struct pt_regs *regs)
1551 {
1552 	enum ctx_state prev_state;
1553 
1554 	prev_state = exception_enter();
1555 	/*
1556 	 * Clear WP (bit 22) bit of cause register so we don't loop
1557 	 * forever.
1558 	 */
1559 	clear_c0_cause(CAUSEF_WP);
1560 
1561 	/*
1562 	 * If the current thread has the watch registers loaded, save
1563 	 * their values and send SIGTRAP.  Otherwise another thread
1564 	 * left the registers set, clear them and continue.
1565 	 */
1566 	if (test_tsk_thread_flag(current, TIF_LOAD_WATCH)) {
1567 		mips_read_watch_registers();
1568 		local_irq_enable();
1569 		force_sig_fault(SIGTRAP, TRAP_HWBKPT, NULL);
1570 	} else {
1571 		mips_clear_watch_registers();
1572 		local_irq_enable();
1573 	}
1574 	exception_exit(prev_state);
1575 }
1576 
1577 asmlinkage void do_mcheck(struct pt_regs *regs)
1578 {
1579 	int multi_match = regs->cp0_status & ST0_TS;
1580 	enum ctx_state prev_state;
1581 	mm_segment_t old_fs = get_fs();
1582 
1583 	prev_state = exception_enter();
1584 	show_regs(regs);
1585 
1586 	if (multi_match) {
1587 		dump_tlb_regs();
1588 		pr_info("\n");
1589 		dump_tlb_all();
1590 	}
1591 
1592 	if (!user_mode(regs))
1593 		set_fs(KERNEL_DS);
1594 
1595 	show_code((unsigned int __user *) regs->cp0_epc);
1596 
1597 	set_fs(old_fs);
1598 
1599 	/*
1600 	 * Some chips may have other causes of machine check (e.g. SB1
1601 	 * graduation timer)
1602 	 */
1603 	panic("Caught Machine Check exception - %scaused by multiple "
1604 	      "matching entries in the TLB.",
1605 	      (multi_match) ? "" : "not ");
1606 }
1607 
1608 asmlinkage void do_mt(struct pt_regs *regs)
1609 {
1610 	int subcode;
1611 
1612 	subcode = (read_vpe_c0_vpecontrol() & VPECONTROL_EXCPT)
1613 			>> VPECONTROL_EXCPT_SHIFT;
1614 	switch (subcode) {
1615 	case 0:
1616 		printk(KERN_DEBUG "Thread Underflow\n");
1617 		break;
1618 	case 1:
1619 		printk(KERN_DEBUG "Thread Overflow\n");
1620 		break;
1621 	case 2:
1622 		printk(KERN_DEBUG "Invalid YIELD Qualifier\n");
1623 		break;
1624 	case 3:
1625 		printk(KERN_DEBUG "Gating Storage Exception\n");
1626 		break;
1627 	case 4:
1628 		printk(KERN_DEBUG "YIELD Scheduler Exception\n");
1629 		break;
1630 	case 5:
1631 		printk(KERN_DEBUG "Gating Storage Scheduler Exception\n");
1632 		break;
1633 	default:
1634 		printk(KERN_DEBUG "*** UNKNOWN THREAD EXCEPTION %d ***\n",
1635 			subcode);
1636 		break;
1637 	}
1638 	die_if_kernel("MIPS MT Thread exception in kernel", regs);
1639 
1640 	force_sig(SIGILL);
1641 }
1642 
1643 
1644 asmlinkage void do_dsp(struct pt_regs *regs)
1645 {
1646 	if (cpu_has_dsp)
1647 		panic("Unexpected DSP exception");
1648 
1649 	force_sig(SIGILL);
1650 }
1651 
1652 asmlinkage void do_reserved(struct pt_regs *regs)
1653 {
1654 	/*
1655 	 * Game over - no way to handle this if it ever occurs.	 Most probably
1656 	 * caused by a new unknown cpu type or after another deadly
1657 	 * hard/software error.
1658 	 */
1659 	show_regs(regs);
1660 	panic("Caught reserved exception %ld - should not happen.",
1661 	      (regs->cp0_cause & 0x7f) >> 2);
1662 }
1663 
1664 static int __initdata l1parity = 1;
1665 static int __init nol1parity(char *s)
1666 {
1667 	l1parity = 0;
1668 	return 1;
1669 }
1670 __setup("nol1par", nol1parity);
1671 static int __initdata l2parity = 1;
1672 static int __init nol2parity(char *s)
1673 {
1674 	l2parity = 0;
1675 	return 1;
1676 }
1677 __setup("nol2par", nol2parity);
1678 
1679 /*
1680  * Some MIPS CPUs can enable/disable for cache parity detection, but do
1681  * it different ways.
1682  */
1683 static inline void parity_protection_init(void)
1684 {
1685 #define ERRCTL_PE	0x80000000
1686 #define ERRCTL_L2P	0x00800000
1687 
1688 	if (mips_cm_revision() >= CM_REV_CM3) {
1689 		ulong gcr_ectl, cp0_ectl;
1690 
1691 		/*
1692 		 * With CM3 systems we need to ensure that the L1 & L2
1693 		 * parity enables are set to the same value, since this
1694 		 * is presumed by the hardware engineers.
1695 		 *
1696 		 * If the user disabled either of L1 or L2 ECC checking,
1697 		 * disable both.
1698 		 */
1699 		l1parity &= l2parity;
1700 		l2parity &= l1parity;
1701 
1702 		/* Probe L1 ECC support */
1703 		cp0_ectl = read_c0_ecc();
1704 		write_c0_ecc(cp0_ectl | ERRCTL_PE);
1705 		back_to_back_c0_hazard();
1706 		cp0_ectl = read_c0_ecc();
1707 
1708 		/* Probe L2 ECC support */
1709 		gcr_ectl = read_gcr_err_control();
1710 
1711 		if (!(gcr_ectl & CM_GCR_ERR_CONTROL_L2_ECC_SUPPORT) ||
1712 		    !(cp0_ectl & ERRCTL_PE)) {
1713 			/*
1714 			 * One of L1 or L2 ECC checking isn't supported,
1715 			 * so we cannot enable either.
1716 			 */
1717 			l1parity = l2parity = 0;
1718 		}
1719 
1720 		/* Configure L1 ECC checking */
1721 		if (l1parity)
1722 			cp0_ectl |= ERRCTL_PE;
1723 		else
1724 			cp0_ectl &= ~ERRCTL_PE;
1725 		write_c0_ecc(cp0_ectl);
1726 		back_to_back_c0_hazard();
1727 		WARN_ON(!!(read_c0_ecc() & ERRCTL_PE) != l1parity);
1728 
1729 		/* Configure L2 ECC checking */
1730 		if (l2parity)
1731 			gcr_ectl |= CM_GCR_ERR_CONTROL_L2_ECC_EN;
1732 		else
1733 			gcr_ectl &= ~CM_GCR_ERR_CONTROL_L2_ECC_EN;
1734 		write_gcr_err_control(gcr_ectl);
1735 		gcr_ectl = read_gcr_err_control();
1736 		gcr_ectl &= CM_GCR_ERR_CONTROL_L2_ECC_EN;
1737 		WARN_ON(!!gcr_ectl != l2parity);
1738 
1739 		pr_info("Cache parity protection %sabled\n",
1740 			l1parity ? "en" : "dis");
1741 		return;
1742 	}
1743 
1744 	switch (current_cpu_type()) {
1745 	case CPU_24K:
1746 	case CPU_34K:
1747 	case CPU_74K:
1748 	case CPU_1004K:
1749 	case CPU_1074K:
1750 	case CPU_INTERAPTIV:
1751 	case CPU_PROAPTIV:
1752 	case CPU_P5600:
1753 	case CPU_QEMU_GENERIC:
1754 	case CPU_P6600:
1755 		{
1756 			unsigned long errctl;
1757 			unsigned int l1parity_present, l2parity_present;
1758 
1759 			errctl = read_c0_ecc();
1760 			errctl &= ~(ERRCTL_PE|ERRCTL_L2P);
1761 
1762 			/* probe L1 parity support */
1763 			write_c0_ecc(errctl | ERRCTL_PE);
1764 			back_to_back_c0_hazard();
1765 			l1parity_present = (read_c0_ecc() & ERRCTL_PE);
1766 
1767 			/* probe L2 parity support */
1768 			write_c0_ecc(errctl|ERRCTL_L2P);
1769 			back_to_back_c0_hazard();
1770 			l2parity_present = (read_c0_ecc() & ERRCTL_L2P);
1771 
1772 			if (l1parity_present && l2parity_present) {
1773 				if (l1parity)
1774 					errctl |= ERRCTL_PE;
1775 				if (l1parity ^ l2parity)
1776 					errctl |= ERRCTL_L2P;
1777 			} else if (l1parity_present) {
1778 				if (l1parity)
1779 					errctl |= ERRCTL_PE;
1780 			} else if (l2parity_present) {
1781 				if (l2parity)
1782 					errctl |= ERRCTL_L2P;
1783 			} else {
1784 				/* No parity available */
1785 			}
1786 
1787 			printk(KERN_INFO "Writing ErrCtl register=%08lx\n", errctl);
1788 
1789 			write_c0_ecc(errctl);
1790 			back_to_back_c0_hazard();
1791 			errctl = read_c0_ecc();
1792 			printk(KERN_INFO "Readback ErrCtl register=%08lx\n", errctl);
1793 
1794 			if (l1parity_present)
1795 				printk(KERN_INFO "Cache parity protection %sabled\n",
1796 				       (errctl & ERRCTL_PE) ? "en" : "dis");
1797 
1798 			if (l2parity_present) {
1799 				if (l1parity_present && l1parity)
1800 					errctl ^= ERRCTL_L2P;
1801 				printk(KERN_INFO "L2 cache parity protection %sabled\n",
1802 				       (errctl & ERRCTL_L2P) ? "en" : "dis");
1803 			}
1804 		}
1805 		break;
1806 
1807 	case CPU_5KC:
1808 	case CPU_5KE:
1809 	case CPU_LOONGSON32:
1810 		write_c0_ecc(0x80000000);
1811 		back_to_back_c0_hazard();
1812 		/* Set the PE bit (bit 31) in the c0_errctl register. */
1813 		printk(KERN_INFO "Cache parity protection %sabled\n",
1814 		       (read_c0_ecc() & 0x80000000) ? "en" : "dis");
1815 		break;
1816 	case CPU_20KC:
1817 	case CPU_25KF:
1818 		/* Clear the DE bit (bit 16) in the c0_status register. */
1819 		printk(KERN_INFO "Enable cache parity protection for "
1820 		       "MIPS 20KC/25KF CPUs.\n");
1821 		clear_c0_status(ST0_DE);
1822 		break;
1823 	default:
1824 		break;
1825 	}
1826 }
1827 
1828 asmlinkage void cache_parity_error(void)
1829 {
1830 	const int field = 2 * sizeof(unsigned long);
1831 	unsigned int reg_val;
1832 
1833 	/* For the moment, report the problem and hang. */
1834 	printk("Cache error exception:\n");
1835 	printk("cp0_errorepc == %0*lx\n", field, read_c0_errorepc());
1836 	reg_val = read_c0_cacheerr();
1837 	printk("c0_cacheerr == %08x\n", reg_val);
1838 
1839 	printk("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
1840 	       reg_val & (1<<30) ? "secondary" : "primary",
1841 	       reg_val & (1<<31) ? "data" : "insn");
1842 	if ((cpu_has_mips_r2_r6) &&
1843 	    ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) {
1844 		pr_err("Error bits: %s%s%s%s%s%s%s%s\n",
1845 			reg_val & (1<<29) ? "ED " : "",
1846 			reg_val & (1<<28) ? "ET " : "",
1847 			reg_val & (1<<27) ? "ES " : "",
1848 			reg_val & (1<<26) ? "EE " : "",
1849 			reg_val & (1<<25) ? "EB " : "",
1850 			reg_val & (1<<24) ? "EI " : "",
1851 			reg_val & (1<<23) ? "E1 " : "",
1852 			reg_val & (1<<22) ? "E0 " : "");
1853 	} else {
1854 		pr_err("Error bits: %s%s%s%s%s%s%s\n",
1855 			reg_val & (1<<29) ? "ED " : "",
1856 			reg_val & (1<<28) ? "ET " : "",
1857 			reg_val & (1<<26) ? "EE " : "",
1858 			reg_val & (1<<25) ? "EB " : "",
1859 			reg_val & (1<<24) ? "EI " : "",
1860 			reg_val & (1<<23) ? "E1 " : "",
1861 			reg_val & (1<<22) ? "E0 " : "");
1862 	}
1863 	printk("IDX: 0x%08x\n", reg_val & ((1<<22)-1));
1864 
1865 #if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64)
1866 	if (reg_val & (1<<22))
1867 		printk("DErrAddr0: 0x%0*lx\n", field, read_c0_derraddr0());
1868 
1869 	if (reg_val & (1<<23))
1870 		printk("DErrAddr1: 0x%0*lx\n", field, read_c0_derraddr1());
1871 #endif
1872 
1873 	panic("Can't handle the cache error!");
1874 }
1875 
1876 asmlinkage void do_ftlb(void)
1877 {
1878 	const int field = 2 * sizeof(unsigned long);
1879 	unsigned int reg_val;
1880 
1881 	/* For the moment, report the problem and hang. */
1882 	if ((cpu_has_mips_r2_r6) &&
1883 	    (((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS) ||
1884 	    ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_LOONGSON))) {
1885 		pr_err("FTLB error exception, cp0_ecc=0x%08x:\n",
1886 		       read_c0_ecc());
1887 		pr_err("cp0_errorepc == %0*lx\n", field, read_c0_errorepc());
1888 		reg_val = read_c0_cacheerr();
1889 		pr_err("c0_cacheerr == %08x\n", reg_val);
1890 
1891 		if ((reg_val & 0xc0000000) == 0xc0000000) {
1892 			pr_err("Decoded c0_cacheerr: FTLB parity error\n");
1893 		} else {
1894 			pr_err("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
1895 			       reg_val & (1<<30) ? "secondary" : "primary",
1896 			       reg_val & (1<<31) ? "data" : "insn");
1897 		}
1898 	} else {
1899 		pr_err("FTLB error exception\n");
1900 	}
1901 	/* Just print the cacheerr bits for now */
1902 	cache_parity_error();
1903 }
1904 
1905 /*
1906  * SDBBP EJTAG debug exception handler.
1907  * We skip the instruction and return to the next instruction.
1908  */
1909 void ejtag_exception_handler(struct pt_regs *regs)
1910 {
1911 	const int field = 2 * sizeof(unsigned long);
1912 	unsigned long depc, old_epc, old_ra;
1913 	unsigned int debug;
1914 
1915 	printk(KERN_DEBUG "SDBBP EJTAG debug exception - not handled yet, just ignored!\n");
1916 	depc = read_c0_depc();
1917 	debug = read_c0_debug();
1918 	printk(KERN_DEBUG "c0_depc = %0*lx, DEBUG = %08x\n", field, depc, debug);
1919 	if (debug & 0x80000000) {
1920 		/*
1921 		 * In branch delay slot.
1922 		 * We cheat a little bit here and use EPC to calculate the
1923 		 * debug return address (DEPC). EPC is restored after the
1924 		 * calculation.
1925 		 */
1926 		old_epc = regs->cp0_epc;
1927 		old_ra = regs->regs[31];
1928 		regs->cp0_epc = depc;
1929 		compute_return_epc(regs);
1930 		depc = regs->cp0_epc;
1931 		regs->cp0_epc = old_epc;
1932 		regs->regs[31] = old_ra;
1933 	} else
1934 		depc += 4;
1935 	write_c0_depc(depc);
1936 
1937 #if 0
1938 	printk(KERN_DEBUG "\n\n----- Enable EJTAG single stepping ----\n\n");
1939 	write_c0_debug(debug | 0x100);
1940 #endif
1941 }
1942 
1943 /*
1944  * NMI exception handler.
1945  * No lock; only written during early bootup by CPU 0.
1946  */
1947 static RAW_NOTIFIER_HEAD(nmi_chain);
1948 
1949 int register_nmi_notifier(struct notifier_block *nb)
1950 {
1951 	return raw_notifier_chain_register(&nmi_chain, nb);
1952 }
1953 
1954 void __noreturn nmi_exception_handler(struct pt_regs *regs)
1955 {
1956 	char str[100];
1957 
1958 	nmi_enter();
1959 	raw_notifier_call_chain(&nmi_chain, 0, regs);
1960 	bust_spinlocks(1);
1961 	snprintf(str, 100, "CPU%d NMI taken, CP0_EPC=%lx\n",
1962 		 smp_processor_id(), regs->cp0_epc);
1963 	regs->cp0_epc = read_c0_errorepc();
1964 	die(str, regs);
1965 	nmi_exit();
1966 }
1967 
1968 #define VECTORSPACING 0x100	/* for EI/VI mode */
1969 
1970 unsigned long ebase;
1971 EXPORT_SYMBOL_GPL(ebase);
1972 unsigned long exception_handlers[32];
1973 unsigned long vi_handlers[64];
1974 
1975 void __init *set_except_vector(int n, void *addr)
1976 {
1977 	unsigned long handler = (unsigned long) addr;
1978 	unsigned long old_handler;
1979 
1980 #ifdef CONFIG_CPU_MICROMIPS
1981 	/*
1982 	 * Only the TLB handlers are cache aligned with an even
1983 	 * address. All other handlers are on an odd address and
1984 	 * require no modification. Otherwise, MIPS32 mode will
1985 	 * be entered when handling any TLB exceptions. That
1986 	 * would be bad...since we must stay in microMIPS mode.
1987 	 */
1988 	if (!(handler & 0x1))
1989 		handler |= 1;
1990 #endif
1991 	old_handler = xchg(&exception_handlers[n], handler);
1992 
1993 	if (n == 0 && cpu_has_divec) {
1994 #ifdef CONFIG_CPU_MICROMIPS
1995 		unsigned long jump_mask = ~((1 << 27) - 1);
1996 #else
1997 		unsigned long jump_mask = ~((1 << 28) - 1);
1998 #endif
1999 		u32 *buf = (u32 *)(ebase + 0x200);
2000 		unsigned int k0 = 26;
2001 		if ((handler & jump_mask) == ((ebase + 0x200) & jump_mask)) {
2002 			uasm_i_j(&buf, handler & ~jump_mask);
2003 			uasm_i_nop(&buf);
2004 		} else {
2005 			UASM_i_LA(&buf, k0, handler);
2006 			uasm_i_jr(&buf, k0);
2007 			uasm_i_nop(&buf);
2008 		}
2009 		local_flush_icache_range(ebase + 0x200, (unsigned long)buf);
2010 	}
2011 	return (void *)old_handler;
2012 }
2013 
2014 static void do_default_vi(void)
2015 {
2016 	show_regs(get_irq_regs());
2017 	panic("Caught unexpected vectored interrupt.");
2018 }
2019 
2020 static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
2021 {
2022 	unsigned long handler;
2023 	unsigned long old_handler = vi_handlers[n];
2024 	int srssets = current_cpu_data.srsets;
2025 	u16 *h;
2026 	unsigned char *b;
2027 
2028 	BUG_ON(!cpu_has_veic && !cpu_has_vint);
2029 
2030 	if (addr == NULL) {
2031 		handler = (unsigned long) do_default_vi;
2032 		srs = 0;
2033 	} else
2034 		handler = (unsigned long) addr;
2035 	vi_handlers[n] = handler;
2036 
2037 	b = (unsigned char *)(ebase + 0x200 + n*VECTORSPACING);
2038 
2039 	if (srs >= srssets)
2040 		panic("Shadow register set %d not supported", srs);
2041 
2042 	if (cpu_has_veic) {
2043 		if (board_bind_eic_interrupt)
2044 			board_bind_eic_interrupt(n, srs);
2045 	} else if (cpu_has_vint) {
2046 		/* SRSMap is only defined if shadow sets are implemented */
2047 		if (srssets > 1)
2048 			change_c0_srsmap(0xf << n*4, srs << n*4);
2049 	}
2050 
2051 	if (srs == 0) {
2052 		/*
2053 		 * If no shadow set is selected then use the default handler
2054 		 * that does normal register saving and standard interrupt exit
2055 		 */
2056 		extern char except_vec_vi, except_vec_vi_lui;
2057 		extern char except_vec_vi_ori, except_vec_vi_end;
2058 		extern char rollback_except_vec_vi;
2059 		char *vec_start = using_rollback_handler() ?
2060 			&rollback_except_vec_vi : &except_vec_vi;
2061 #if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN)
2062 		const int lui_offset = &except_vec_vi_lui - vec_start + 2;
2063 		const int ori_offset = &except_vec_vi_ori - vec_start + 2;
2064 #else
2065 		const int lui_offset = &except_vec_vi_lui - vec_start;
2066 		const int ori_offset = &except_vec_vi_ori - vec_start;
2067 #endif
2068 		const int handler_len = &except_vec_vi_end - vec_start;
2069 
2070 		if (handler_len > VECTORSPACING) {
2071 			/*
2072 			 * Sigh... panicing won't help as the console
2073 			 * is probably not configured :(
2074 			 */
2075 			panic("VECTORSPACING too small");
2076 		}
2077 
2078 		set_handler(((unsigned long)b - ebase), vec_start,
2079 #ifdef CONFIG_CPU_MICROMIPS
2080 				(handler_len - 1));
2081 #else
2082 				handler_len);
2083 #endif
2084 		h = (u16 *)(b + lui_offset);
2085 		*h = (handler >> 16) & 0xffff;
2086 		h = (u16 *)(b + ori_offset);
2087 		*h = (handler & 0xffff);
2088 		local_flush_icache_range((unsigned long)b,
2089 					 (unsigned long)(b+handler_len));
2090 	}
2091 	else {
2092 		/*
2093 		 * In other cases jump directly to the interrupt handler. It
2094 		 * is the handler's responsibility to save registers if required
2095 		 * (eg hi/lo) and return from the exception using "eret".
2096 		 */
2097 		u32 insn;
2098 
2099 		h = (u16 *)b;
2100 		/* j handler */
2101 #ifdef CONFIG_CPU_MICROMIPS
2102 		insn = 0xd4000000 | (((u32)handler & 0x07ffffff) >> 1);
2103 #else
2104 		insn = 0x08000000 | (((u32)handler & 0x0fffffff) >> 2);
2105 #endif
2106 		h[0] = (insn >> 16) & 0xffff;
2107 		h[1] = insn & 0xffff;
2108 		h[2] = 0;
2109 		h[3] = 0;
2110 		local_flush_icache_range((unsigned long)b,
2111 					 (unsigned long)(b+8));
2112 	}
2113 
2114 	return (void *)old_handler;
2115 }
2116 
2117 void *set_vi_handler(int n, vi_handler_t addr)
2118 {
2119 	return set_vi_srs_handler(n, addr, 0);
2120 }
2121 
2122 extern void tlb_init(void);
2123 
2124 /*
2125  * Timer interrupt
2126  */
2127 int cp0_compare_irq;
2128 EXPORT_SYMBOL_GPL(cp0_compare_irq);
2129 int cp0_compare_irq_shift;
2130 
2131 /*
2132  * Performance counter IRQ or -1 if shared with timer
2133  */
2134 int cp0_perfcount_irq;
2135 EXPORT_SYMBOL_GPL(cp0_perfcount_irq);
2136 
2137 /*
2138  * Fast debug channel IRQ or -1 if not present
2139  */
2140 int cp0_fdc_irq;
2141 EXPORT_SYMBOL_GPL(cp0_fdc_irq);
2142 
2143 static int noulri;
2144 
2145 static int __init ulri_disable(char *s)
2146 {
2147 	pr_info("Disabling ulri\n");
2148 	noulri = 1;
2149 
2150 	return 1;
2151 }
2152 __setup("noulri", ulri_disable);
2153 
2154 /* configure STATUS register */
2155 static void configure_status(void)
2156 {
2157 	/*
2158 	 * Disable coprocessors and select 32-bit or 64-bit addressing
2159 	 * and the 16/32 or 32/32 FPR register model.  Reset the BEV
2160 	 * flag that some firmware may have left set and the TS bit (for
2161 	 * IP27).  Set XX for ISA IV code to work.
2162 	 */
2163 	unsigned int status_set = ST0_CU0;
2164 #ifdef CONFIG_64BIT
2165 	status_set |= ST0_FR|ST0_KX|ST0_SX|ST0_UX;
2166 #endif
2167 	if (current_cpu_data.isa_level & MIPS_CPU_ISA_IV)
2168 		status_set |= ST0_XX;
2169 	if (cpu_has_dsp)
2170 		status_set |= ST0_MX;
2171 
2172 	change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX,
2173 			 status_set);
2174 	back_to_back_c0_hazard();
2175 }
2176 
2177 unsigned int hwrena;
2178 EXPORT_SYMBOL_GPL(hwrena);
2179 
2180 /* configure HWRENA register */
2181 static void configure_hwrena(void)
2182 {
2183 	hwrena = cpu_hwrena_impl_bits;
2184 
2185 	if (cpu_has_mips_r2_r6)
2186 		hwrena |= MIPS_HWRENA_CPUNUM |
2187 			  MIPS_HWRENA_SYNCISTEP |
2188 			  MIPS_HWRENA_CC |
2189 			  MIPS_HWRENA_CCRES;
2190 
2191 	if (!noulri && cpu_has_userlocal)
2192 		hwrena |= MIPS_HWRENA_ULR;
2193 
2194 	if (hwrena)
2195 		write_c0_hwrena(hwrena);
2196 }
2197 
2198 static void configure_exception_vector(void)
2199 {
2200 	if (cpu_has_mips_r2_r6) {
2201 		unsigned long sr = set_c0_status(ST0_BEV);
2202 		/* If available, use WG to set top bits of EBASE */
2203 		if (cpu_has_ebase_wg) {
2204 #ifdef CONFIG_64BIT
2205 			write_c0_ebase_64(ebase | MIPS_EBASE_WG);
2206 #else
2207 			write_c0_ebase(ebase | MIPS_EBASE_WG);
2208 #endif
2209 		}
2210 		write_c0_ebase(ebase);
2211 		write_c0_status(sr);
2212 	}
2213 	if (cpu_has_veic || cpu_has_vint) {
2214 		/* Setting vector spacing enables EI/VI mode  */
2215 		change_c0_intctl(0x3e0, VECTORSPACING);
2216 	}
2217 	if (cpu_has_divec) {
2218 		if (cpu_has_mipsmt) {
2219 			unsigned int vpflags = dvpe();
2220 			set_c0_cause(CAUSEF_IV);
2221 			evpe(vpflags);
2222 		} else
2223 			set_c0_cause(CAUSEF_IV);
2224 	}
2225 }
2226 
2227 void per_cpu_trap_init(bool is_boot_cpu)
2228 {
2229 	unsigned int cpu = smp_processor_id();
2230 
2231 	configure_status();
2232 	configure_hwrena();
2233 
2234 	configure_exception_vector();
2235 
2236 	/*
2237 	 * Before R2 both interrupt numbers were fixed to 7, so on R2 only:
2238 	 *
2239 	 *  o read IntCtl.IPTI to determine the timer interrupt
2240 	 *  o read IntCtl.IPPCI to determine the performance counter interrupt
2241 	 *  o read IntCtl.IPFDC to determine the fast debug channel interrupt
2242 	 */
2243 	if (cpu_has_mips_r2_r6) {
2244 		cp0_compare_irq_shift = CAUSEB_TI - CAUSEB_IP;
2245 		cp0_compare_irq = (read_c0_intctl() >> INTCTLB_IPTI) & 7;
2246 		cp0_perfcount_irq = (read_c0_intctl() >> INTCTLB_IPPCI) & 7;
2247 		cp0_fdc_irq = (read_c0_intctl() >> INTCTLB_IPFDC) & 7;
2248 		if (!cp0_fdc_irq)
2249 			cp0_fdc_irq = -1;
2250 
2251 	} else {
2252 		cp0_compare_irq = CP0_LEGACY_COMPARE_IRQ;
2253 		cp0_compare_irq_shift = CP0_LEGACY_PERFCNT_IRQ;
2254 		cp0_perfcount_irq = -1;
2255 		cp0_fdc_irq = -1;
2256 	}
2257 
2258 	if (cpu_has_mmid)
2259 		cpu_data[cpu].asid_cache = 0;
2260 	else if (!cpu_data[cpu].asid_cache)
2261 		cpu_data[cpu].asid_cache = asid_first_version(cpu);
2262 
2263 	mmgrab(&init_mm);
2264 	current->active_mm = &init_mm;
2265 	BUG_ON(current->mm);
2266 	enter_lazy_tlb(&init_mm, current);
2267 
2268 	/* Boot CPU's cache setup in setup_arch(). */
2269 	if (!is_boot_cpu)
2270 		cpu_cache_init();
2271 	tlb_init();
2272 	TLBMISS_HANDLER_SETUP();
2273 }
2274 
2275 /* Install CPU exception handler */
2276 void set_handler(unsigned long offset, void *addr, unsigned long size)
2277 {
2278 #ifdef CONFIG_CPU_MICROMIPS
2279 	memcpy((void *)(ebase + offset), ((unsigned char *)addr - 1), size);
2280 #else
2281 	memcpy((void *)(ebase + offset), addr, size);
2282 #endif
2283 	local_flush_icache_range(ebase + offset, ebase + offset + size);
2284 }
2285 
2286 static const char panic_null_cerr[] =
2287 	"Trying to set NULL cache error exception handler\n";
2288 
2289 /*
2290  * Install uncached CPU exception handler.
2291  * This is suitable only for the cache error exception which is the only
2292  * exception handler that is being run uncached.
2293  */
2294 void set_uncached_handler(unsigned long offset, void *addr,
2295 	unsigned long size)
2296 {
2297 	unsigned long uncached_ebase = CKSEG1ADDR(ebase);
2298 
2299 	if (!addr)
2300 		panic(panic_null_cerr);
2301 
2302 	memcpy((void *)(uncached_ebase + offset), addr, size);
2303 }
2304 
2305 static int __initdata rdhwr_noopt;
2306 static int __init set_rdhwr_noopt(char *str)
2307 {
2308 	rdhwr_noopt = 1;
2309 	return 1;
2310 }
2311 
2312 __setup("rdhwr_noopt", set_rdhwr_noopt);
2313 
2314 void __init trap_init(void)
2315 {
2316 	extern char except_vec3_generic;
2317 	extern char except_vec4;
2318 	extern char except_vec3_r4000;
2319 	unsigned long i, vec_size;
2320 	phys_addr_t ebase_pa;
2321 
2322 	check_wait();
2323 
2324 	if (!cpu_has_mips_r2_r6) {
2325 		ebase = CAC_BASE;
2326 		ebase_pa = virt_to_phys((void *)ebase);
2327 		vec_size = 0x400;
2328 
2329 		memblock_reserve(ebase_pa, vec_size);
2330 	} else {
2331 		if (cpu_has_veic || cpu_has_vint)
2332 			vec_size = 0x200 + VECTORSPACING*64;
2333 		else
2334 			vec_size = PAGE_SIZE;
2335 
2336 		ebase_pa = memblock_phys_alloc(vec_size, 1 << fls(vec_size));
2337 		if (!ebase_pa)
2338 			panic("%s: Failed to allocate %lu bytes align=0x%x\n",
2339 			      __func__, vec_size, 1 << fls(vec_size));
2340 
2341 		/*
2342 		 * Try to ensure ebase resides in KSeg0 if possible.
2343 		 *
2344 		 * It shouldn't generally be in XKPhys on MIPS64 to avoid
2345 		 * hitting a poorly defined exception base for Cache Errors.
2346 		 * The allocation is likely to be in the low 512MB of physical,
2347 		 * in which case we should be able to convert to KSeg0.
2348 		 *
2349 		 * EVA is special though as it allows segments to be rearranged
2350 		 * and to become uncached during cache error handling.
2351 		 */
2352 		if (!IS_ENABLED(CONFIG_EVA) && !WARN_ON(ebase_pa >= 0x20000000))
2353 			ebase = CKSEG0ADDR(ebase_pa);
2354 		else
2355 			ebase = (unsigned long)phys_to_virt(ebase_pa);
2356 	}
2357 
2358 	if (cpu_has_mmips) {
2359 		unsigned int config3 = read_c0_config3();
2360 
2361 		if (IS_ENABLED(CONFIG_CPU_MICROMIPS))
2362 			write_c0_config3(config3 | MIPS_CONF3_ISA_OE);
2363 		else
2364 			write_c0_config3(config3 & ~MIPS_CONF3_ISA_OE);
2365 	}
2366 
2367 	if (board_ebase_setup)
2368 		board_ebase_setup();
2369 	per_cpu_trap_init(true);
2370 	memblock_set_bottom_up(false);
2371 
2372 	/*
2373 	 * Copy the generic exception handlers to their final destination.
2374 	 * This will be overridden later as suitable for a particular
2375 	 * configuration.
2376 	 */
2377 	set_handler(0x180, &except_vec3_generic, 0x80);
2378 
2379 	/*
2380 	 * Setup default vectors
2381 	 */
2382 	for (i = 0; i <= 31; i++)
2383 		set_except_vector(i, handle_reserved);
2384 
2385 	/*
2386 	 * Copy the EJTAG debug exception vector handler code to it's final
2387 	 * destination.
2388 	 */
2389 	if (cpu_has_ejtag && board_ejtag_handler_setup)
2390 		board_ejtag_handler_setup();
2391 
2392 	/*
2393 	 * Only some CPUs have the watch exceptions.
2394 	 */
2395 	if (cpu_has_watch)
2396 		set_except_vector(EXCCODE_WATCH, handle_watch);
2397 
2398 	/*
2399 	 * Initialise interrupt handlers
2400 	 */
2401 	if (cpu_has_veic || cpu_has_vint) {
2402 		int nvec = cpu_has_veic ? 64 : 8;
2403 		for (i = 0; i < nvec; i++)
2404 			set_vi_handler(i, NULL);
2405 	}
2406 	else if (cpu_has_divec)
2407 		set_handler(0x200, &except_vec4, 0x8);
2408 
2409 	/*
2410 	 * Some CPUs can enable/disable for cache parity detection, but does
2411 	 * it different ways.
2412 	 */
2413 	parity_protection_init();
2414 
2415 	/*
2416 	 * The Data Bus Errors / Instruction Bus Errors are signaled
2417 	 * by external hardware.  Therefore these two exceptions
2418 	 * may have board specific handlers.
2419 	 */
2420 	if (board_be_init)
2421 		board_be_init();
2422 
2423 	set_except_vector(EXCCODE_INT, using_rollback_handler() ?
2424 					rollback_handle_int : handle_int);
2425 	set_except_vector(EXCCODE_MOD, handle_tlbm);
2426 	set_except_vector(EXCCODE_TLBL, handle_tlbl);
2427 	set_except_vector(EXCCODE_TLBS, handle_tlbs);
2428 
2429 	set_except_vector(EXCCODE_ADEL, handle_adel);
2430 	set_except_vector(EXCCODE_ADES, handle_ades);
2431 
2432 	set_except_vector(EXCCODE_IBE, handle_ibe);
2433 	set_except_vector(EXCCODE_DBE, handle_dbe);
2434 
2435 	set_except_vector(EXCCODE_SYS, handle_sys);
2436 	set_except_vector(EXCCODE_BP, handle_bp);
2437 
2438 	if (rdhwr_noopt)
2439 		set_except_vector(EXCCODE_RI, handle_ri);
2440 	else {
2441 		if (cpu_has_vtag_icache)
2442 			set_except_vector(EXCCODE_RI, handle_ri_rdhwr_tlbp);
2443 		else if (current_cpu_type() == CPU_LOONGSON64)
2444 			set_except_vector(EXCCODE_RI, handle_ri_rdhwr_tlbp);
2445 		else
2446 			set_except_vector(EXCCODE_RI, handle_ri_rdhwr);
2447 	}
2448 
2449 	set_except_vector(EXCCODE_CPU, handle_cpu);
2450 	set_except_vector(EXCCODE_OV, handle_ov);
2451 	set_except_vector(EXCCODE_TR, handle_tr);
2452 	set_except_vector(EXCCODE_MSAFPE, handle_msa_fpe);
2453 
2454 	if (board_nmi_handler_setup)
2455 		board_nmi_handler_setup();
2456 
2457 	if (cpu_has_fpu && !cpu_has_nofpuex)
2458 		set_except_vector(EXCCODE_FPE, handle_fpe);
2459 
2460 	set_except_vector(MIPS_EXCCODE_TLBPAR, handle_ftlb);
2461 
2462 	if (cpu_has_rixiex) {
2463 		set_except_vector(EXCCODE_TLBRI, tlb_do_page_fault_0);
2464 		set_except_vector(EXCCODE_TLBXI, tlb_do_page_fault_0);
2465 	}
2466 
2467 	set_except_vector(EXCCODE_MSADIS, handle_msa);
2468 	set_except_vector(EXCCODE_MDMX, handle_mdmx);
2469 
2470 	if (cpu_has_mcheck)
2471 		set_except_vector(EXCCODE_MCHECK, handle_mcheck);
2472 
2473 	if (cpu_has_mipsmt)
2474 		set_except_vector(EXCCODE_THREAD, handle_mt);
2475 
2476 	set_except_vector(EXCCODE_DSPDIS, handle_dsp);
2477 
2478 	if (board_cache_error_setup)
2479 		board_cache_error_setup();
2480 
2481 	if (cpu_has_vce)
2482 		/* Special exception: R4[04]00 uses also the divec space. */
2483 		set_handler(0x180, &except_vec3_r4000, 0x100);
2484 	else if (cpu_has_4kex)
2485 		set_handler(0x180, &except_vec3_generic, 0x80);
2486 	else
2487 		set_handler(0x080, &except_vec3_generic, 0x80);
2488 
2489 	local_flush_icache_range(ebase, ebase + vec_size);
2490 
2491 	sort_extable(__start___dbe_table, __stop___dbe_table);
2492 
2493 	cu2_notifier(default_cu2_call, 0x80000000);	/* Run last  */
2494 }
2495 
2496 static int trap_pm_notifier(struct notifier_block *self, unsigned long cmd,
2497 			    void *v)
2498 {
2499 	switch (cmd) {
2500 	case CPU_PM_ENTER_FAILED:
2501 	case CPU_PM_EXIT:
2502 		configure_status();
2503 		configure_hwrena();
2504 		configure_exception_vector();
2505 
2506 		/* Restore register with CPU number for TLB handlers */
2507 		TLBMISS_HANDLER_RESTORE();
2508 
2509 		break;
2510 	}
2511 
2512 	return NOTIFY_OK;
2513 }
2514 
2515 static struct notifier_block trap_pm_notifier_block = {
2516 	.notifier_call = trap_pm_notifier,
2517 };
2518 
2519 static int __init trap_pm_init(void)
2520 {
2521 	return cpu_pm_register_notifier(&trap_pm_notifier_block);
2522 }
2523 arch_initcall(trap_pm_init);
2524