xref: /openbmc/linux/arch/mips/kernel/traps.c (revision a34a3ed7)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1994 - 1999, 2000, 01, 06 Ralf Baechle
7  * Copyright (C) 1995, 1996 Paul M. Antoine
8  * Copyright (C) 1998 Ulf Carlsson
9  * Copyright (C) 1999 Silicon Graphics, Inc.
10  * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
11  * Copyright (C) 2002, 2003, 2004, 2005, 2007  Maciej W. Rozycki
12  * Copyright (C) 2000, 2001, 2012 MIPS Technologies, Inc.  All rights reserved.
13  * Copyright (C) 2014, Imagination Technologies Ltd.
14  */
15 #include <linux/bitops.h>
16 #include <linux/bug.h>
17 #include <linux/compiler.h>
18 #include <linux/context_tracking.h>
19 #include <linux/cpu_pm.h>
20 #include <linux/kexec.h>
21 #include <linux/init.h>
22 #include <linux/kernel.h>
23 #include <linux/module.h>
24 #include <linux/extable.h>
25 #include <linux/mm.h>
26 #include <linux/sched/mm.h>
27 #include <linux/sched/debug.h>
28 #include <linux/smp.h>
29 #include <linux/spinlock.h>
30 #include <linux/kallsyms.h>
31 #include <linux/bootmem.h>
32 #include <linux/interrupt.h>
33 #include <linux/ptrace.h>
34 #include <linux/kgdb.h>
35 #include <linux/kdebug.h>
36 #include <linux/kprobes.h>
37 #include <linux/notifier.h>
38 #include <linux/kdb.h>
39 #include <linux/irq.h>
40 #include <linux/perf_event.h>
41 
42 #include <asm/addrspace.h>
43 #include <asm/bootinfo.h>
44 #include <asm/branch.h>
45 #include <asm/break.h>
46 #include <asm/cop2.h>
47 #include <asm/cpu.h>
48 #include <asm/cpu-type.h>
49 #include <asm/dsp.h>
50 #include <asm/fpu.h>
51 #include <asm/fpu_emulator.h>
52 #include <asm/idle.h>
53 #include <asm/mips-cps.h>
54 #include <asm/mips-r2-to-r6-emul.h>
55 #include <asm/mipsregs.h>
56 #include <asm/mipsmtregs.h>
57 #include <asm/module.h>
58 #include <asm/msa.h>
59 #include <asm/pgtable.h>
60 #include <asm/ptrace.h>
61 #include <asm/sections.h>
62 #include <asm/siginfo.h>
63 #include <asm/tlbdebug.h>
64 #include <asm/traps.h>
65 #include <linux/uaccess.h>
66 #include <asm/watch.h>
67 #include <asm/mmu_context.h>
68 #include <asm/types.h>
69 #include <asm/stacktrace.h>
70 #include <asm/uasm.h>
71 
72 extern void check_wait(void);
73 extern asmlinkage void rollback_handle_int(void);
74 extern asmlinkage void handle_int(void);
75 extern u32 handle_tlbl[];
76 extern u32 handle_tlbs[];
77 extern u32 handle_tlbm[];
78 extern asmlinkage void handle_adel(void);
79 extern asmlinkage void handle_ades(void);
80 extern asmlinkage void handle_ibe(void);
81 extern asmlinkage void handle_dbe(void);
82 extern asmlinkage void handle_sys(void);
83 extern asmlinkage void handle_bp(void);
84 extern asmlinkage void handle_ri(void);
85 extern asmlinkage void handle_ri_rdhwr_tlbp(void);
86 extern asmlinkage void handle_ri_rdhwr(void);
87 extern asmlinkage void handle_cpu(void);
88 extern asmlinkage void handle_ov(void);
89 extern asmlinkage void handle_tr(void);
90 extern asmlinkage void handle_msa_fpe(void);
91 extern asmlinkage void handle_fpe(void);
92 extern asmlinkage void handle_ftlb(void);
93 extern asmlinkage void handle_msa(void);
94 extern asmlinkage void handle_mdmx(void);
95 extern asmlinkage void handle_watch(void);
96 extern asmlinkage void handle_mt(void);
97 extern asmlinkage void handle_dsp(void);
98 extern asmlinkage void handle_mcheck(void);
99 extern asmlinkage void handle_reserved(void);
100 extern void tlb_do_page_fault_0(void);
101 
102 void (*board_be_init)(void);
103 int (*board_be_handler)(struct pt_regs *regs, int is_fixup);
104 void (*board_nmi_handler_setup)(void);
105 void (*board_ejtag_handler_setup)(void);
106 void (*board_bind_eic_interrupt)(int irq, int regset);
107 void (*board_ebase_setup)(void);
108 void(*board_cache_error_setup)(void);
109 
110 static void show_raw_backtrace(unsigned long reg29)
111 {
112 	unsigned long *sp = (unsigned long *)(reg29 & ~3);
113 	unsigned long addr;
114 
115 	printk("Call Trace:");
116 #ifdef CONFIG_KALLSYMS
117 	printk("\n");
118 #endif
119 	while (!kstack_end(sp)) {
120 		unsigned long __user *p =
121 			(unsigned long __user *)(unsigned long)sp++;
122 		if (__get_user(addr, p)) {
123 			printk(" (Bad stack address)");
124 			break;
125 		}
126 		if (__kernel_text_address(addr))
127 			print_ip_sym(addr);
128 	}
129 	printk("\n");
130 }
131 
132 #ifdef CONFIG_KALLSYMS
133 int raw_show_trace;
134 static int __init set_raw_show_trace(char *str)
135 {
136 	raw_show_trace = 1;
137 	return 1;
138 }
139 __setup("raw_show_trace", set_raw_show_trace);
140 #endif
141 
142 static void show_backtrace(struct task_struct *task, const struct pt_regs *regs)
143 {
144 	unsigned long sp = regs->regs[29];
145 	unsigned long ra = regs->regs[31];
146 	unsigned long pc = regs->cp0_epc;
147 
148 	if (!task)
149 		task = current;
150 
151 	if (raw_show_trace || user_mode(regs) || !__kernel_text_address(pc)) {
152 		show_raw_backtrace(sp);
153 		return;
154 	}
155 	printk("Call Trace:\n");
156 	do {
157 		print_ip_sym(pc);
158 		pc = unwind_stack(task, &sp, pc, &ra);
159 	} while (pc);
160 	pr_cont("\n");
161 }
162 
163 /*
164  * This routine abuses get_user()/put_user() to reference pointers
165  * with at least a bit of error checking ...
166  */
167 static void show_stacktrace(struct task_struct *task,
168 	const struct pt_regs *regs)
169 {
170 	const int field = 2 * sizeof(unsigned long);
171 	long stackdata;
172 	int i;
173 	unsigned long __user *sp = (unsigned long __user *)regs->regs[29];
174 
175 	printk("Stack :");
176 	i = 0;
177 	while ((unsigned long) sp & (PAGE_SIZE - 1)) {
178 		if (i && ((i % (64 / field)) == 0)) {
179 			pr_cont("\n");
180 			printk("       ");
181 		}
182 		if (i > 39) {
183 			pr_cont(" ...");
184 			break;
185 		}
186 
187 		if (__get_user(stackdata, sp++)) {
188 			pr_cont(" (Bad stack address)");
189 			break;
190 		}
191 
192 		pr_cont(" %0*lx", field, stackdata);
193 		i++;
194 	}
195 	pr_cont("\n");
196 	show_backtrace(task, regs);
197 }
198 
199 void show_stack(struct task_struct *task, unsigned long *sp)
200 {
201 	struct pt_regs regs;
202 	mm_segment_t old_fs = get_fs();
203 
204 	regs.cp0_status = KSU_KERNEL;
205 	if (sp) {
206 		regs.regs[29] = (unsigned long)sp;
207 		regs.regs[31] = 0;
208 		regs.cp0_epc = 0;
209 	} else {
210 		if (task && task != current) {
211 			regs.regs[29] = task->thread.reg29;
212 			regs.regs[31] = 0;
213 			regs.cp0_epc = task->thread.reg31;
214 #ifdef CONFIG_KGDB_KDB
215 		} else if (atomic_read(&kgdb_active) != -1 &&
216 			   kdb_current_regs) {
217 			memcpy(&regs, kdb_current_regs, sizeof(regs));
218 #endif /* CONFIG_KGDB_KDB */
219 		} else {
220 			prepare_frametrace(&regs);
221 		}
222 	}
223 	/*
224 	 * show_stack() deals exclusively with kernel mode, so be sure to access
225 	 * the stack in the kernel (not user) address space.
226 	 */
227 	set_fs(KERNEL_DS);
228 	show_stacktrace(task, &regs);
229 	set_fs(old_fs);
230 }
231 
232 static void show_code(unsigned int __user *pc)
233 {
234 	long i;
235 	unsigned short __user *pc16 = NULL;
236 
237 	printk("Code:");
238 
239 	if ((unsigned long)pc & 1)
240 		pc16 = (unsigned short __user *)((unsigned long)pc & ~1);
241 	for(i = -3 ; i < 6 ; i++) {
242 		unsigned int insn;
243 		if (pc16 ? __get_user(insn, pc16 + i) : __get_user(insn, pc + i)) {
244 			pr_cont(" (Bad address in epc)\n");
245 			break;
246 		}
247 		pr_cont("%c%0*x%c", (i?' ':'<'), pc16 ? 4 : 8, insn, (i?' ':'>'));
248 	}
249 	pr_cont("\n");
250 }
251 
252 static void __show_regs(const struct pt_regs *regs)
253 {
254 	const int field = 2 * sizeof(unsigned long);
255 	unsigned int cause = regs->cp0_cause;
256 	unsigned int exccode;
257 	int i;
258 
259 	show_regs_print_info(KERN_DEFAULT);
260 
261 	/*
262 	 * Saved main processor registers
263 	 */
264 	for (i = 0; i < 32; ) {
265 		if ((i % 4) == 0)
266 			printk("$%2d   :", i);
267 		if (i == 0)
268 			pr_cont(" %0*lx", field, 0UL);
269 		else if (i == 26 || i == 27)
270 			pr_cont(" %*s", field, "");
271 		else
272 			pr_cont(" %0*lx", field, regs->regs[i]);
273 
274 		i++;
275 		if ((i % 4) == 0)
276 			pr_cont("\n");
277 	}
278 
279 #ifdef CONFIG_CPU_HAS_SMARTMIPS
280 	printk("Acx    : %0*lx\n", field, regs->acx);
281 #endif
282 	printk("Hi    : %0*lx\n", field, regs->hi);
283 	printk("Lo    : %0*lx\n", field, regs->lo);
284 
285 	/*
286 	 * Saved cp0 registers
287 	 */
288 	printk("epc   : %0*lx %pS\n", field, regs->cp0_epc,
289 	       (void *) regs->cp0_epc);
290 	printk("ra    : %0*lx %pS\n", field, regs->regs[31],
291 	       (void *) regs->regs[31]);
292 
293 	printk("Status: %08x	", (uint32_t) regs->cp0_status);
294 
295 	if (cpu_has_3kex) {
296 		if (regs->cp0_status & ST0_KUO)
297 			pr_cont("KUo ");
298 		if (regs->cp0_status & ST0_IEO)
299 			pr_cont("IEo ");
300 		if (regs->cp0_status & ST0_KUP)
301 			pr_cont("KUp ");
302 		if (regs->cp0_status & ST0_IEP)
303 			pr_cont("IEp ");
304 		if (regs->cp0_status & ST0_KUC)
305 			pr_cont("KUc ");
306 		if (regs->cp0_status & ST0_IEC)
307 			pr_cont("IEc ");
308 	} else if (cpu_has_4kex) {
309 		if (regs->cp0_status & ST0_KX)
310 			pr_cont("KX ");
311 		if (regs->cp0_status & ST0_SX)
312 			pr_cont("SX ");
313 		if (regs->cp0_status & ST0_UX)
314 			pr_cont("UX ");
315 		switch (regs->cp0_status & ST0_KSU) {
316 		case KSU_USER:
317 			pr_cont("USER ");
318 			break;
319 		case KSU_SUPERVISOR:
320 			pr_cont("SUPERVISOR ");
321 			break;
322 		case KSU_KERNEL:
323 			pr_cont("KERNEL ");
324 			break;
325 		default:
326 			pr_cont("BAD_MODE ");
327 			break;
328 		}
329 		if (regs->cp0_status & ST0_ERL)
330 			pr_cont("ERL ");
331 		if (regs->cp0_status & ST0_EXL)
332 			pr_cont("EXL ");
333 		if (regs->cp0_status & ST0_IE)
334 			pr_cont("IE ");
335 	}
336 	pr_cont("\n");
337 
338 	exccode = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE;
339 	printk("Cause : %08x (ExcCode %02x)\n", cause, exccode);
340 
341 	if (1 <= exccode && exccode <= 5)
342 		printk("BadVA : %0*lx\n", field, regs->cp0_badvaddr);
343 
344 	printk("PrId  : %08x (%s)\n", read_c0_prid(),
345 	       cpu_name_string());
346 }
347 
348 /*
349  * FIXME: really the generic show_regs should take a const pointer argument.
350  */
351 void show_regs(struct pt_regs *regs)
352 {
353 	__show_regs((struct pt_regs *)regs);
354 }
355 
356 void show_registers(struct pt_regs *regs)
357 {
358 	const int field = 2 * sizeof(unsigned long);
359 	mm_segment_t old_fs = get_fs();
360 
361 	__show_regs(regs);
362 	print_modules();
363 	printk("Process %s (pid: %d, threadinfo=%p, task=%p, tls=%0*lx)\n",
364 	       current->comm, current->pid, current_thread_info(), current,
365 	      field, current_thread_info()->tp_value);
366 	if (cpu_has_userlocal) {
367 		unsigned long tls;
368 
369 		tls = read_c0_userlocal();
370 		if (tls != current_thread_info()->tp_value)
371 			printk("*HwTLS: %0*lx\n", field, tls);
372 	}
373 
374 	if (!user_mode(regs))
375 		/* Necessary for getting the correct stack content */
376 		set_fs(KERNEL_DS);
377 	show_stacktrace(current, regs);
378 	show_code((unsigned int __user *) regs->cp0_epc);
379 	printk("\n");
380 	set_fs(old_fs);
381 }
382 
383 static DEFINE_RAW_SPINLOCK(die_lock);
384 
385 void __noreturn die(const char *str, struct pt_regs *regs)
386 {
387 	static int die_counter;
388 	int sig = SIGSEGV;
389 
390 	oops_enter();
391 
392 	if (notify_die(DIE_OOPS, str, regs, 0, current->thread.trap_nr,
393 		       SIGSEGV) == NOTIFY_STOP)
394 		sig = 0;
395 
396 	console_verbose();
397 	raw_spin_lock_irq(&die_lock);
398 	bust_spinlocks(1);
399 
400 	printk("%s[#%d]:\n", str, ++die_counter);
401 	show_registers(regs);
402 	add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
403 	raw_spin_unlock_irq(&die_lock);
404 
405 	oops_exit();
406 
407 	if (in_interrupt())
408 		panic("Fatal exception in interrupt");
409 
410 	if (panic_on_oops)
411 		panic("Fatal exception");
412 
413 	if (regs && kexec_should_crash(current))
414 		crash_kexec(regs);
415 
416 	do_exit(sig);
417 }
418 
419 extern struct exception_table_entry __start___dbe_table[];
420 extern struct exception_table_entry __stop___dbe_table[];
421 
422 __asm__(
423 "	.section	__dbe_table, \"a\"\n"
424 "	.previous			\n");
425 
426 /* Given an address, look for it in the exception tables. */
427 static const struct exception_table_entry *search_dbe_tables(unsigned long addr)
428 {
429 	const struct exception_table_entry *e;
430 
431 	e = search_extable(__start___dbe_table,
432 			   __stop___dbe_table - __start___dbe_table, addr);
433 	if (!e)
434 		e = search_module_dbetables(addr);
435 	return e;
436 }
437 
438 asmlinkage void do_be(struct pt_regs *regs)
439 {
440 	const int field = 2 * sizeof(unsigned long);
441 	const struct exception_table_entry *fixup = NULL;
442 	int data = regs->cp0_cause & 4;
443 	int action = MIPS_BE_FATAL;
444 	enum ctx_state prev_state;
445 
446 	prev_state = exception_enter();
447 	/* XXX For now.	 Fixme, this searches the wrong table ...  */
448 	if (data && !user_mode(regs))
449 		fixup = search_dbe_tables(exception_epc(regs));
450 
451 	if (fixup)
452 		action = MIPS_BE_FIXUP;
453 
454 	if (board_be_handler)
455 		action = board_be_handler(regs, fixup != NULL);
456 	else
457 		mips_cm_error_report();
458 
459 	switch (action) {
460 	case MIPS_BE_DISCARD:
461 		goto out;
462 	case MIPS_BE_FIXUP:
463 		if (fixup) {
464 			regs->cp0_epc = fixup->nextinsn;
465 			goto out;
466 		}
467 		break;
468 	default:
469 		break;
470 	}
471 
472 	/*
473 	 * Assume it would be too dangerous to continue ...
474 	 */
475 	printk(KERN_ALERT "%s bus error, epc == %0*lx, ra == %0*lx\n",
476 	       data ? "Data" : "Instruction",
477 	       field, regs->cp0_epc, field, regs->regs[31]);
478 	if (notify_die(DIE_OOPS, "bus error", regs, 0, current->thread.trap_nr,
479 		       SIGBUS) == NOTIFY_STOP)
480 		goto out;
481 
482 	die_if_kernel("Oops", regs);
483 	force_sig(SIGBUS, current);
484 
485 out:
486 	exception_exit(prev_state);
487 }
488 
489 /*
490  * ll/sc, rdhwr, sync emulation
491  */
492 
493 #define OPCODE 0xfc000000
494 #define BASE   0x03e00000
495 #define RT     0x001f0000
496 #define OFFSET 0x0000ffff
497 #define LL     0xc0000000
498 #define SC     0xe0000000
499 #define SPEC0  0x00000000
500 #define SPEC3  0x7c000000
501 #define RD     0x0000f800
502 #define FUNC   0x0000003f
503 #define SYNC   0x0000000f
504 #define RDHWR  0x0000003b
505 
506 /*  microMIPS definitions   */
507 #define MM_POOL32A_FUNC 0xfc00ffff
508 #define MM_RDHWR        0x00006b3c
509 #define MM_RS           0x001f0000
510 #define MM_RT           0x03e00000
511 
512 /*
513  * The ll_bit is cleared by r*_switch.S
514  */
515 
516 unsigned int ll_bit;
517 struct task_struct *ll_task;
518 
519 static inline int simulate_ll(struct pt_regs *regs, unsigned int opcode)
520 {
521 	unsigned long value, __user *vaddr;
522 	long offset;
523 
524 	/*
525 	 * analyse the ll instruction that just caused a ri exception
526 	 * and put the referenced address to addr.
527 	 */
528 
529 	/* sign extend offset */
530 	offset = opcode & OFFSET;
531 	offset <<= 16;
532 	offset >>= 16;
533 
534 	vaddr = (unsigned long __user *)
535 		((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
536 
537 	if ((unsigned long)vaddr & 3)
538 		return SIGBUS;
539 	if (get_user(value, vaddr))
540 		return SIGSEGV;
541 
542 	preempt_disable();
543 
544 	if (ll_task == NULL || ll_task == current) {
545 		ll_bit = 1;
546 	} else {
547 		ll_bit = 0;
548 	}
549 	ll_task = current;
550 
551 	preempt_enable();
552 
553 	regs->regs[(opcode & RT) >> 16] = value;
554 
555 	return 0;
556 }
557 
558 static inline int simulate_sc(struct pt_regs *regs, unsigned int opcode)
559 {
560 	unsigned long __user *vaddr;
561 	unsigned long reg;
562 	long offset;
563 
564 	/*
565 	 * analyse the sc instruction that just caused a ri exception
566 	 * and put the referenced address to addr.
567 	 */
568 
569 	/* sign extend offset */
570 	offset = opcode & OFFSET;
571 	offset <<= 16;
572 	offset >>= 16;
573 
574 	vaddr = (unsigned long __user *)
575 		((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
576 	reg = (opcode & RT) >> 16;
577 
578 	if ((unsigned long)vaddr & 3)
579 		return SIGBUS;
580 
581 	preempt_disable();
582 
583 	if (ll_bit == 0 || ll_task != current) {
584 		regs->regs[reg] = 0;
585 		preempt_enable();
586 		return 0;
587 	}
588 
589 	preempt_enable();
590 
591 	if (put_user(regs->regs[reg], vaddr))
592 		return SIGSEGV;
593 
594 	regs->regs[reg] = 1;
595 
596 	return 0;
597 }
598 
599 /*
600  * ll uses the opcode of lwc0 and sc uses the opcode of swc0.  That is both
601  * opcodes are supposed to result in coprocessor unusable exceptions if
602  * executed on ll/sc-less processors.  That's the theory.  In practice a
603  * few processors such as NEC's VR4100 throw reserved instruction exceptions
604  * instead, so we're doing the emulation thing in both exception handlers.
605  */
606 static int simulate_llsc(struct pt_regs *regs, unsigned int opcode)
607 {
608 	if ((opcode & OPCODE) == LL) {
609 		perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
610 				1, regs, 0);
611 		return simulate_ll(regs, opcode);
612 	}
613 	if ((opcode & OPCODE) == SC) {
614 		perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
615 				1, regs, 0);
616 		return simulate_sc(regs, opcode);
617 	}
618 
619 	return -1;			/* Must be something else ... */
620 }
621 
622 /*
623  * Simulate trapping 'rdhwr' instructions to provide user accessible
624  * registers not implemented in hardware.
625  */
626 static int simulate_rdhwr(struct pt_regs *regs, int rd, int rt)
627 {
628 	struct thread_info *ti = task_thread_info(current);
629 
630 	perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
631 			1, regs, 0);
632 	switch (rd) {
633 	case MIPS_HWR_CPUNUM:		/* CPU number */
634 		regs->regs[rt] = smp_processor_id();
635 		return 0;
636 	case MIPS_HWR_SYNCISTEP:	/* SYNCI length */
637 		regs->regs[rt] = min(current_cpu_data.dcache.linesz,
638 				     current_cpu_data.icache.linesz);
639 		return 0;
640 	case MIPS_HWR_CC:		/* Read count register */
641 		regs->regs[rt] = read_c0_count();
642 		return 0;
643 	case MIPS_HWR_CCRES:		/* Count register resolution */
644 		switch (current_cpu_type()) {
645 		case CPU_20KC:
646 		case CPU_25KF:
647 			regs->regs[rt] = 1;
648 			break;
649 		default:
650 			regs->regs[rt] = 2;
651 		}
652 		return 0;
653 	case MIPS_HWR_ULR:		/* Read UserLocal register */
654 		regs->regs[rt] = ti->tp_value;
655 		return 0;
656 	default:
657 		return -1;
658 	}
659 }
660 
661 static int simulate_rdhwr_normal(struct pt_regs *regs, unsigned int opcode)
662 {
663 	if ((opcode & OPCODE) == SPEC3 && (opcode & FUNC) == RDHWR) {
664 		int rd = (opcode & RD) >> 11;
665 		int rt = (opcode & RT) >> 16;
666 
667 		simulate_rdhwr(regs, rd, rt);
668 		return 0;
669 	}
670 
671 	/* Not ours.  */
672 	return -1;
673 }
674 
675 static int simulate_rdhwr_mm(struct pt_regs *regs, unsigned int opcode)
676 {
677 	if ((opcode & MM_POOL32A_FUNC) == MM_RDHWR) {
678 		int rd = (opcode & MM_RS) >> 16;
679 		int rt = (opcode & MM_RT) >> 21;
680 		simulate_rdhwr(regs, rd, rt);
681 		return 0;
682 	}
683 
684 	/* Not ours.  */
685 	return -1;
686 }
687 
688 static int simulate_sync(struct pt_regs *regs, unsigned int opcode)
689 {
690 	if ((opcode & OPCODE) == SPEC0 && (opcode & FUNC) == SYNC) {
691 		perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
692 				1, regs, 0);
693 		return 0;
694 	}
695 
696 	return -1;			/* Must be something else ... */
697 }
698 
699 asmlinkage void do_ov(struct pt_regs *regs)
700 {
701 	enum ctx_state prev_state;
702 	siginfo_t info;
703 
704 	clear_siginfo(&info);
705 	info.si_signo = SIGFPE;
706 	info.si_code = FPE_INTOVF;
707 	info.si_addr = (void __user *)regs->cp0_epc;
708 
709 	prev_state = exception_enter();
710 	die_if_kernel("Integer overflow", regs);
711 
712 	force_sig_info(SIGFPE, &info, current);
713 	exception_exit(prev_state);
714 }
715 
716 /*
717  * Send SIGFPE according to FCSR Cause bits, which must have already
718  * been masked against Enable bits.  This is impotant as Inexact can
719  * happen together with Overflow or Underflow, and `ptrace' can set
720  * any bits.
721  */
722 void force_fcr31_sig(unsigned long fcr31, void __user *fault_addr,
723 		     struct task_struct *tsk)
724 {
725 	struct siginfo si;
726 
727 	clear_siginfo(&si);
728 	si.si_addr = fault_addr;
729 	si.si_signo = SIGFPE;
730 
731 	if (fcr31 & FPU_CSR_INV_X)
732 		si.si_code = FPE_FLTINV;
733 	else if (fcr31 & FPU_CSR_DIV_X)
734 		si.si_code = FPE_FLTDIV;
735 	else if (fcr31 & FPU_CSR_OVF_X)
736 		si.si_code = FPE_FLTOVF;
737 	else if (fcr31 & FPU_CSR_UDF_X)
738 		si.si_code = FPE_FLTUND;
739 	else if (fcr31 & FPU_CSR_INE_X)
740 		si.si_code = FPE_FLTRES;
741 
742 	force_sig_info(SIGFPE, &si, tsk);
743 }
744 
745 int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcr31)
746 {
747 	struct siginfo si;
748 	struct vm_area_struct *vma;
749 
750 	clear_siginfo(&si);
751 	switch (sig) {
752 	case 0:
753 		return 0;
754 
755 	case SIGFPE:
756 		force_fcr31_sig(fcr31, fault_addr, current);
757 		return 1;
758 
759 	case SIGBUS:
760 		si.si_addr = fault_addr;
761 		si.si_signo = sig;
762 		si.si_code = BUS_ADRERR;
763 		force_sig_info(sig, &si, current);
764 		return 1;
765 
766 	case SIGSEGV:
767 		si.si_addr = fault_addr;
768 		si.si_signo = sig;
769 		down_read(&current->mm->mmap_sem);
770 		vma = find_vma(current->mm, (unsigned long)fault_addr);
771 		if (vma && (vma->vm_start <= (unsigned long)fault_addr))
772 			si.si_code = SEGV_ACCERR;
773 		else
774 			si.si_code = SEGV_MAPERR;
775 		up_read(&current->mm->mmap_sem);
776 		force_sig_info(sig, &si, current);
777 		return 1;
778 
779 	default:
780 		force_sig(sig, current);
781 		return 1;
782 	}
783 }
784 
785 static int simulate_fp(struct pt_regs *regs, unsigned int opcode,
786 		       unsigned long old_epc, unsigned long old_ra)
787 {
788 	union mips_instruction inst = { .word = opcode };
789 	void __user *fault_addr;
790 	unsigned long fcr31;
791 	int sig;
792 
793 	/* If it's obviously not an FP instruction, skip it */
794 	switch (inst.i_format.opcode) {
795 	case cop1_op:
796 	case cop1x_op:
797 	case lwc1_op:
798 	case ldc1_op:
799 	case swc1_op:
800 	case sdc1_op:
801 		break;
802 
803 	default:
804 		return -1;
805 	}
806 
807 	/*
808 	 * do_ri skipped over the instruction via compute_return_epc, undo
809 	 * that for the FPU emulator.
810 	 */
811 	regs->cp0_epc = old_epc;
812 	regs->regs[31] = old_ra;
813 
814 	/* Save the FP context to struct thread_struct */
815 	lose_fpu(1);
816 
817 	/* Run the emulator */
818 	sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
819 				       &fault_addr);
820 
821 	/*
822 	 * We can't allow the emulated instruction to leave any
823 	 * enabled Cause bits set in $fcr31.
824 	 */
825 	fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
826 	current->thread.fpu.fcr31 &= ~fcr31;
827 
828 	/* Restore the hardware register state */
829 	own_fpu(1);
830 
831 	/* Send a signal if required.  */
832 	process_fpemu_return(sig, fault_addr, fcr31);
833 
834 	return 0;
835 }
836 
837 /*
838  * XXX Delayed fp exceptions when doing a lazy ctx switch XXX
839  */
840 asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
841 {
842 	enum ctx_state prev_state;
843 	void __user *fault_addr;
844 	int sig;
845 
846 	prev_state = exception_enter();
847 	if (notify_die(DIE_FP, "FP exception", regs, 0, current->thread.trap_nr,
848 		       SIGFPE) == NOTIFY_STOP)
849 		goto out;
850 
851 	/* Clear FCSR.Cause before enabling interrupts */
852 	write_32bit_cp1_register(CP1_STATUS, fcr31 & ~mask_fcr31_x(fcr31));
853 	local_irq_enable();
854 
855 	die_if_kernel("FP exception in kernel code", regs);
856 
857 	if (fcr31 & FPU_CSR_UNI_X) {
858 		/*
859 		 * Unimplemented operation exception.  If we've got the full
860 		 * software emulator on-board, let's use it...
861 		 *
862 		 * Force FPU to dump state into task/thread context.  We're
863 		 * moving a lot of data here for what is probably a single
864 		 * instruction, but the alternative is to pre-decode the FP
865 		 * register operands before invoking the emulator, which seems
866 		 * a bit extreme for what should be an infrequent event.
867 		 */
868 		/* Ensure 'resume' not overwrite saved fp context again. */
869 		lose_fpu(1);
870 
871 		/* Run the emulator */
872 		sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
873 					       &fault_addr);
874 
875 		/*
876 		 * We can't allow the emulated instruction to leave any
877 		 * enabled Cause bits set in $fcr31.
878 		 */
879 		fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
880 		current->thread.fpu.fcr31 &= ~fcr31;
881 
882 		/* Restore the hardware register state */
883 		own_fpu(1);	/* Using the FPU again.	 */
884 	} else {
885 		sig = SIGFPE;
886 		fault_addr = (void __user *) regs->cp0_epc;
887 	}
888 
889 	/* Send a signal if required.  */
890 	process_fpemu_return(sig, fault_addr, fcr31);
891 
892 out:
893 	exception_exit(prev_state);
894 }
895 
896 void do_trap_or_bp(struct pt_regs *regs, unsigned int code, int si_code,
897 	const char *str)
898 {
899 	siginfo_t info;
900 	char b[40];
901 
902 	clear_siginfo(&info);
903 #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
904 	if (kgdb_ll_trap(DIE_TRAP, str, regs, code, current->thread.trap_nr,
905 			 SIGTRAP) == NOTIFY_STOP)
906 		return;
907 #endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
908 
909 	if (notify_die(DIE_TRAP, str, regs, code, current->thread.trap_nr,
910 		       SIGTRAP) == NOTIFY_STOP)
911 		return;
912 
913 	/*
914 	 * A short test says that IRIX 5.3 sends SIGTRAP for all trap
915 	 * insns, even for trap and break codes that indicate arithmetic
916 	 * failures.  Weird ...
917 	 * But should we continue the brokenness???  --macro
918 	 */
919 	switch (code) {
920 	case BRK_OVERFLOW:
921 	case BRK_DIVZERO:
922 		scnprintf(b, sizeof(b), "%s instruction in kernel code", str);
923 		die_if_kernel(b, regs);
924 		if (code == BRK_DIVZERO)
925 			info.si_code = FPE_INTDIV;
926 		else
927 			info.si_code = FPE_INTOVF;
928 		info.si_signo = SIGFPE;
929 		info.si_addr = (void __user *) regs->cp0_epc;
930 		force_sig_info(SIGFPE, &info, current);
931 		break;
932 	case BRK_BUG:
933 		die_if_kernel("Kernel bug detected", regs);
934 		force_sig(SIGTRAP, current);
935 		break;
936 	case BRK_MEMU:
937 		/*
938 		 * This breakpoint code is used by the FPU emulator to retake
939 		 * control of the CPU after executing the instruction from the
940 		 * delay slot of an emulated branch.
941 		 *
942 		 * Terminate if exception was recognized as a delay slot return
943 		 * otherwise handle as normal.
944 		 */
945 		if (do_dsemulret(regs))
946 			return;
947 
948 		die_if_kernel("Math emu break/trap", regs);
949 		force_sig(SIGTRAP, current);
950 		break;
951 	default:
952 		scnprintf(b, sizeof(b), "%s instruction in kernel code", str);
953 		die_if_kernel(b, regs);
954 		if (si_code) {
955 			info.si_signo = SIGTRAP;
956 			info.si_code = si_code;
957 			force_sig_info(SIGTRAP, &info, current);
958 		} else {
959 			force_sig(SIGTRAP, current);
960 		}
961 	}
962 }
963 
964 asmlinkage void do_bp(struct pt_regs *regs)
965 {
966 	unsigned long epc = msk_isa16_mode(exception_epc(regs));
967 	unsigned int opcode, bcode;
968 	enum ctx_state prev_state;
969 	mm_segment_t seg;
970 
971 	seg = get_fs();
972 	if (!user_mode(regs))
973 		set_fs(KERNEL_DS);
974 
975 	prev_state = exception_enter();
976 	current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
977 	if (get_isa16_mode(regs->cp0_epc)) {
978 		u16 instr[2];
979 
980 		if (__get_user(instr[0], (u16 __user *)epc))
981 			goto out_sigsegv;
982 
983 		if (!cpu_has_mmips) {
984 			/* MIPS16e mode */
985 			bcode = (instr[0] >> 5) & 0x3f;
986 		} else if (mm_insn_16bit(instr[0])) {
987 			/* 16-bit microMIPS BREAK */
988 			bcode = instr[0] & 0xf;
989 		} else {
990 			/* 32-bit microMIPS BREAK */
991 			if (__get_user(instr[1], (u16 __user *)(epc + 2)))
992 				goto out_sigsegv;
993 			opcode = (instr[0] << 16) | instr[1];
994 			bcode = (opcode >> 6) & ((1 << 20) - 1);
995 		}
996 	} else {
997 		if (__get_user(opcode, (unsigned int __user *)epc))
998 			goto out_sigsegv;
999 		bcode = (opcode >> 6) & ((1 << 20) - 1);
1000 	}
1001 
1002 	/*
1003 	 * There is the ancient bug in the MIPS assemblers that the break
1004 	 * code starts left to bit 16 instead to bit 6 in the opcode.
1005 	 * Gas is bug-compatible, but not always, grrr...
1006 	 * We handle both cases with a simple heuristics.  --macro
1007 	 */
1008 	if (bcode >= (1 << 10))
1009 		bcode = ((bcode & ((1 << 10) - 1)) << 10) | (bcode >> 10);
1010 
1011 	/*
1012 	 * notify the kprobe handlers, if instruction is likely to
1013 	 * pertain to them.
1014 	 */
1015 	switch (bcode) {
1016 	case BRK_UPROBE:
1017 		if (notify_die(DIE_UPROBE, "uprobe", regs, bcode,
1018 			       current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
1019 			goto out;
1020 		else
1021 			break;
1022 	case BRK_UPROBE_XOL:
1023 		if (notify_die(DIE_UPROBE_XOL, "uprobe_xol", regs, bcode,
1024 			       current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
1025 			goto out;
1026 		else
1027 			break;
1028 	case BRK_KPROBE_BP:
1029 		if (notify_die(DIE_BREAK, "debug", regs, bcode,
1030 			       current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
1031 			goto out;
1032 		else
1033 			break;
1034 	case BRK_KPROBE_SSTEPBP:
1035 		if (notify_die(DIE_SSTEPBP, "single_step", regs, bcode,
1036 			       current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
1037 			goto out;
1038 		else
1039 			break;
1040 	default:
1041 		break;
1042 	}
1043 
1044 	do_trap_or_bp(regs, bcode, TRAP_BRKPT, "Break");
1045 
1046 out:
1047 	set_fs(seg);
1048 	exception_exit(prev_state);
1049 	return;
1050 
1051 out_sigsegv:
1052 	force_sig(SIGSEGV, current);
1053 	goto out;
1054 }
1055 
1056 asmlinkage void do_tr(struct pt_regs *regs)
1057 {
1058 	u32 opcode, tcode = 0;
1059 	enum ctx_state prev_state;
1060 	u16 instr[2];
1061 	mm_segment_t seg;
1062 	unsigned long epc = msk_isa16_mode(exception_epc(regs));
1063 
1064 	seg = get_fs();
1065 	if (!user_mode(regs))
1066 		set_fs(get_ds());
1067 
1068 	prev_state = exception_enter();
1069 	current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
1070 	if (get_isa16_mode(regs->cp0_epc)) {
1071 		if (__get_user(instr[0], (u16 __user *)(epc + 0)) ||
1072 		    __get_user(instr[1], (u16 __user *)(epc + 2)))
1073 			goto out_sigsegv;
1074 		opcode = (instr[0] << 16) | instr[1];
1075 		/* Immediate versions don't provide a code.  */
1076 		if (!(opcode & OPCODE))
1077 			tcode = (opcode >> 12) & ((1 << 4) - 1);
1078 	} else {
1079 		if (__get_user(opcode, (u32 __user *)epc))
1080 			goto out_sigsegv;
1081 		/* Immediate versions don't provide a code.  */
1082 		if (!(opcode & OPCODE))
1083 			tcode = (opcode >> 6) & ((1 << 10) - 1);
1084 	}
1085 
1086 	do_trap_or_bp(regs, tcode, 0, "Trap");
1087 
1088 out:
1089 	set_fs(seg);
1090 	exception_exit(prev_state);
1091 	return;
1092 
1093 out_sigsegv:
1094 	force_sig(SIGSEGV, current);
1095 	goto out;
1096 }
1097 
1098 asmlinkage void do_ri(struct pt_regs *regs)
1099 {
1100 	unsigned int __user *epc = (unsigned int __user *)exception_epc(regs);
1101 	unsigned long old_epc = regs->cp0_epc;
1102 	unsigned long old31 = regs->regs[31];
1103 	enum ctx_state prev_state;
1104 	unsigned int opcode = 0;
1105 	int status = -1;
1106 
1107 	/*
1108 	 * Avoid any kernel code. Just emulate the R2 instruction
1109 	 * as quickly as possible.
1110 	 */
1111 	if (mipsr2_emulation && cpu_has_mips_r6 &&
1112 	    likely(user_mode(regs)) &&
1113 	    likely(get_user(opcode, epc) >= 0)) {
1114 		unsigned long fcr31 = 0;
1115 
1116 		status = mipsr2_decoder(regs, opcode, &fcr31);
1117 		switch (status) {
1118 		case 0:
1119 		case SIGEMT:
1120 			return;
1121 		case SIGILL:
1122 			goto no_r2_instr;
1123 		default:
1124 			process_fpemu_return(status,
1125 					     &current->thread.cp0_baduaddr,
1126 					     fcr31);
1127 			return;
1128 		}
1129 	}
1130 
1131 no_r2_instr:
1132 
1133 	prev_state = exception_enter();
1134 	current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
1135 
1136 	if (notify_die(DIE_RI, "RI Fault", regs, 0, current->thread.trap_nr,
1137 		       SIGILL) == NOTIFY_STOP)
1138 		goto out;
1139 
1140 	die_if_kernel("Reserved instruction in kernel code", regs);
1141 
1142 	if (unlikely(compute_return_epc(regs) < 0))
1143 		goto out;
1144 
1145 	if (!get_isa16_mode(regs->cp0_epc)) {
1146 		if (unlikely(get_user(opcode, epc) < 0))
1147 			status = SIGSEGV;
1148 
1149 		if (!cpu_has_llsc && status < 0)
1150 			status = simulate_llsc(regs, opcode);
1151 
1152 		if (status < 0)
1153 			status = simulate_rdhwr_normal(regs, opcode);
1154 
1155 		if (status < 0)
1156 			status = simulate_sync(regs, opcode);
1157 
1158 		if (status < 0)
1159 			status = simulate_fp(regs, opcode, old_epc, old31);
1160 	} else if (cpu_has_mmips) {
1161 		unsigned short mmop[2] = { 0 };
1162 
1163 		if (unlikely(get_user(mmop[0], (u16 __user *)epc + 0) < 0))
1164 			status = SIGSEGV;
1165 		if (unlikely(get_user(mmop[1], (u16 __user *)epc + 1) < 0))
1166 			status = SIGSEGV;
1167 		opcode = mmop[0];
1168 		opcode = (opcode << 16) | mmop[1];
1169 
1170 		if (status < 0)
1171 			status = simulate_rdhwr_mm(regs, opcode);
1172 	}
1173 
1174 	if (status < 0)
1175 		status = SIGILL;
1176 
1177 	if (unlikely(status > 0)) {
1178 		regs->cp0_epc = old_epc;		/* Undo skip-over.  */
1179 		regs->regs[31] = old31;
1180 		force_sig(status, current);
1181 	}
1182 
1183 out:
1184 	exception_exit(prev_state);
1185 }
1186 
1187 /*
1188  * MIPS MT processors may have fewer FPU contexts than CPU threads. If we've
1189  * emulated more than some threshold number of instructions, force migration to
1190  * a "CPU" that has FP support.
1191  */
1192 static void mt_ase_fp_affinity(void)
1193 {
1194 #ifdef CONFIG_MIPS_MT_FPAFF
1195 	if (mt_fpemul_threshold > 0 &&
1196 	     ((current->thread.emulated_fp++ > mt_fpemul_threshold))) {
1197 		/*
1198 		 * If there's no FPU present, or if the application has already
1199 		 * restricted the allowed set to exclude any CPUs with FPUs,
1200 		 * we'll skip the procedure.
1201 		 */
1202 		if (cpumask_intersects(&current->cpus_allowed, &mt_fpu_cpumask)) {
1203 			cpumask_t tmask;
1204 
1205 			current->thread.user_cpus_allowed
1206 				= current->cpus_allowed;
1207 			cpumask_and(&tmask, &current->cpus_allowed,
1208 				    &mt_fpu_cpumask);
1209 			set_cpus_allowed_ptr(current, &tmask);
1210 			set_thread_flag(TIF_FPUBOUND);
1211 		}
1212 	}
1213 #endif /* CONFIG_MIPS_MT_FPAFF */
1214 }
1215 
1216 /*
1217  * No lock; only written during early bootup by CPU 0.
1218  */
1219 static RAW_NOTIFIER_HEAD(cu2_chain);
1220 
1221 int __ref register_cu2_notifier(struct notifier_block *nb)
1222 {
1223 	return raw_notifier_chain_register(&cu2_chain, nb);
1224 }
1225 
1226 int cu2_notifier_call_chain(unsigned long val, void *v)
1227 {
1228 	return raw_notifier_call_chain(&cu2_chain, val, v);
1229 }
1230 
1231 static int default_cu2_call(struct notifier_block *nfb, unsigned long action,
1232 	void *data)
1233 {
1234 	struct pt_regs *regs = data;
1235 
1236 	die_if_kernel("COP2: Unhandled kernel unaligned access or invalid "
1237 			      "instruction", regs);
1238 	force_sig(SIGILL, current);
1239 
1240 	return NOTIFY_OK;
1241 }
1242 
1243 static int enable_restore_fp_context(int msa)
1244 {
1245 	int err, was_fpu_owner, prior_msa;
1246 
1247 	/*
1248 	 * If an FP mode switch is currently underway, wait for it to
1249 	 * complete before proceeding.
1250 	 */
1251 	wait_on_atomic_t(&current->mm->context.fp_mode_switching,
1252 			 atomic_t_wait, TASK_KILLABLE);
1253 
1254 	if (!used_math()) {
1255 		/* First time FP context user. */
1256 		preempt_disable();
1257 		err = init_fpu();
1258 		if (msa && !err) {
1259 			enable_msa();
1260 			init_msa_upper();
1261 			set_thread_flag(TIF_USEDMSA);
1262 			set_thread_flag(TIF_MSA_CTX_LIVE);
1263 		}
1264 		preempt_enable();
1265 		if (!err)
1266 			set_used_math();
1267 		return err;
1268 	}
1269 
1270 	/*
1271 	 * This task has formerly used the FP context.
1272 	 *
1273 	 * If this thread has no live MSA vector context then we can simply
1274 	 * restore the scalar FP context. If it has live MSA vector context
1275 	 * (that is, it has or may have used MSA since last performing a
1276 	 * function call) then we'll need to restore the vector context. This
1277 	 * applies even if we're currently only executing a scalar FP
1278 	 * instruction. This is because if we were to later execute an MSA
1279 	 * instruction then we'd either have to:
1280 	 *
1281 	 *  - Restore the vector context & clobber any registers modified by
1282 	 *    scalar FP instructions between now & then.
1283 	 *
1284 	 * or
1285 	 *
1286 	 *  - Not restore the vector context & lose the most significant bits
1287 	 *    of all vector registers.
1288 	 *
1289 	 * Neither of those options is acceptable. We cannot restore the least
1290 	 * significant bits of the registers now & only restore the most
1291 	 * significant bits later because the most significant bits of any
1292 	 * vector registers whose aliased FP register is modified now will have
1293 	 * been zeroed. We'd have no way to know that when restoring the vector
1294 	 * context & thus may load an outdated value for the most significant
1295 	 * bits of a vector register.
1296 	 */
1297 	if (!msa && !thread_msa_context_live())
1298 		return own_fpu(1);
1299 
1300 	/*
1301 	 * This task is using or has previously used MSA. Thus we require
1302 	 * that Status.FR == 1.
1303 	 */
1304 	preempt_disable();
1305 	was_fpu_owner = is_fpu_owner();
1306 	err = own_fpu_inatomic(0);
1307 	if (err)
1308 		goto out;
1309 
1310 	enable_msa();
1311 	write_msa_csr(current->thread.fpu.msacsr);
1312 	set_thread_flag(TIF_USEDMSA);
1313 
1314 	/*
1315 	 * If this is the first time that the task is using MSA and it has
1316 	 * previously used scalar FP in this time slice then we already nave
1317 	 * FP context which we shouldn't clobber. We do however need to clear
1318 	 * the upper 64b of each vector register so that this task has no
1319 	 * opportunity to see data left behind by another.
1320 	 */
1321 	prior_msa = test_and_set_thread_flag(TIF_MSA_CTX_LIVE);
1322 	if (!prior_msa && was_fpu_owner) {
1323 		init_msa_upper();
1324 
1325 		goto out;
1326 	}
1327 
1328 	if (!prior_msa) {
1329 		/*
1330 		 * Restore the least significant 64b of each vector register
1331 		 * from the existing scalar FP context.
1332 		 */
1333 		_restore_fp(current);
1334 
1335 		/*
1336 		 * The task has not formerly used MSA, so clear the upper 64b
1337 		 * of each vector register such that it cannot see data left
1338 		 * behind by another task.
1339 		 */
1340 		init_msa_upper();
1341 	} else {
1342 		/* We need to restore the vector context. */
1343 		restore_msa(current);
1344 
1345 		/* Restore the scalar FP control & status register */
1346 		if (!was_fpu_owner)
1347 			write_32bit_cp1_register(CP1_STATUS,
1348 						 current->thread.fpu.fcr31);
1349 	}
1350 
1351 out:
1352 	preempt_enable();
1353 
1354 	return 0;
1355 }
1356 
1357 asmlinkage void do_cpu(struct pt_regs *regs)
1358 {
1359 	enum ctx_state prev_state;
1360 	unsigned int __user *epc;
1361 	unsigned long old_epc, old31;
1362 	void __user *fault_addr;
1363 	unsigned int opcode;
1364 	unsigned long fcr31;
1365 	unsigned int cpid;
1366 	int status, err;
1367 	int sig;
1368 
1369 	prev_state = exception_enter();
1370 	cpid = (regs->cp0_cause >> CAUSEB_CE) & 3;
1371 
1372 	if (cpid != 2)
1373 		die_if_kernel("do_cpu invoked from kernel context!", regs);
1374 
1375 	switch (cpid) {
1376 	case 0:
1377 		epc = (unsigned int __user *)exception_epc(regs);
1378 		old_epc = regs->cp0_epc;
1379 		old31 = regs->regs[31];
1380 		opcode = 0;
1381 		status = -1;
1382 
1383 		if (unlikely(compute_return_epc(regs) < 0))
1384 			break;
1385 
1386 		if (!get_isa16_mode(regs->cp0_epc)) {
1387 			if (unlikely(get_user(opcode, epc) < 0))
1388 				status = SIGSEGV;
1389 
1390 			if (!cpu_has_llsc && status < 0)
1391 				status = simulate_llsc(regs, opcode);
1392 		}
1393 
1394 		if (status < 0)
1395 			status = SIGILL;
1396 
1397 		if (unlikely(status > 0)) {
1398 			regs->cp0_epc = old_epc;	/* Undo skip-over.  */
1399 			regs->regs[31] = old31;
1400 			force_sig(status, current);
1401 		}
1402 
1403 		break;
1404 
1405 	case 3:
1406 		/*
1407 		 * The COP3 opcode space and consequently the CP0.Status.CU3
1408 		 * bit and the CP0.Cause.CE=3 encoding have been removed as
1409 		 * of the MIPS III ISA.  From the MIPS IV and MIPS32r2 ISAs
1410 		 * up the space has been reused for COP1X instructions, that
1411 		 * are enabled by the CP0.Status.CU1 bit and consequently
1412 		 * use the CP0.Cause.CE=1 encoding for Coprocessor Unusable
1413 		 * exceptions.  Some FPU-less processors that implement one
1414 		 * of these ISAs however use this code erroneously for COP1X
1415 		 * instructions.  Therefore we redirect this trap to the FP
1416 		 * emulator too.
1417 		 */
1418 		if (raw_cpu_has_fpu || !cpu_has_mips_4_5_64_r2_r6) {
1419 			force_sig(SIGILL, current);
1420 			break;
1421 		}
1422 		/* Fall through.  */
1423 
1424 	case 1:
1425 		err = enable_restore_fp_context(0);
1426 
1427 		if (raw_cpu_has_fpu && !err)
1428 			break;
1429 
1430 		sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 0,
1431 					       &fault_addr);
1432 
1433 		/*
1434 		 * We can't allow the emulated instruction to leave
1435 		 * any enabled Cause bits set in $fcr31.
1436 		 */
1437 		fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
1438 		current->thread.fpu.fcr31 &= ~fcr31;
1439 
1440 		/* Send a signal if required.  */
1441 		if (!process_fpemu_return(sig, fault_addr, fcr31) && !err)
1442 			mt_ase_fp_affinity();
1443 
1444 		break;
1445 
1446 	case 2:
1447 		raw_notifier_call_chain(&cu2_chain, CU2_EXCEPTION, regs);
1448 		break;
1449 	}
1450 
1451 	exception_exit(prev_state);
1452 }
1453 
1454 asmlinkage void do_msa_fpe(struct pt_regs *regs, unsigned int msacsr)
1455 {
1456 	enum ctx_state prev_state;
1457 
1458 	prev_state = exception_enter();
1459 	current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
1460 	if (notify_die(DIE_MSAFP, "MSA FP exception", regs, 0,
1461 		       current->thread.trap_nr, SIGFPE) == NOTIFY_STOP)
1462 		goto out;
1463 
1464 	/* Clear MSACSR.Cause before enabling interrupts */
1465 	write_msa_csr(msacsr & ~MSA_CSR_CAUSEF);
1466 	local_irq_enable();
1467 
1468 	die_if_kernel("do_msa_fpe invoked from kernel context!", regs);
1469 	force_sig(SIGFPE, current);
1470 out:
1471 	exception_exit(prev_state);
1472 }
1473 
1474 asmlinkage void do_msa(struct pt_regs *regs)
1475 {
1476 	enum ctx_state prev_state;
1477 	int err;
1478 
1479 	prev_state = exception_enter();
1480 
1481 	if (!cpu_has_msa || test_thread_flag(TIF_32BIT_FPREGS)) {
1482 		force_sig(SIGILL, current);
1483 		goto out;
1484 	}
1485 
1486 	die_if_kernel("do_msa invoked from kernel context!", regs);
1487 
1488 	err = enable_restore_fp_context(1);
1489 	if (err)
1490 		force_sig(SIGILL, current);
1491 out:
1492 	exception_exit(prev_state);
1493 }
1494 
1495 asmlinkage void do_mdmx(struct pt_regs *regs)
1496 {
1497 	enum ctx_state prev_state;
1498 
1499 	prev_state = exception_enter();
1500 	force_sig(SIGILL, current);
1501 	exception_exit(prev_state);
1502 }
1503 
1504 /*
1505  * Called with interrupts disabled.
1506  */
1507 asmlinkage void do_watch(struct pt_regs *regs)
1508 {
1509 	siginfo_t info;
1510 	enum ctx_state prev_state;
1511 
1512 	clear_siginfo(&info);
1513 	info.si_signo = SIGTRAP;
1514 	info.si_code = TRAP_HWBKPT;
1515 
1516 	prev_state = exception_enter();
1517 	/*
1518 	 * Clear WP (bit 22) bit of cause register so we don't loop
1519 	 * forever.
1520 	 */
1521 	clear_c0_cause(CAUSEF_WP);
1522 
1523 	/*
1524 	 * If the current thread has the watch registers loaded, save
1525 	 * their values and send SIGTRAP.  Otherwise another thread
1526 	 * left the registers set, clear them and continue.
1527 	 */
1528 	if (test_tsk_thread_flag(current, TIF_LOAD_WATCH)) {
1529 		mips_read_watch_registers();
1530 		local_irq_enable();
1531 		force_sig_info(SIGTRAP, &info, current);
1532 	} else {
1533 		mips_clear_watch_registers();
1534 		local_irq_enable();
1535 	}
1536 	exception_exit(prev_state);
1537 }
1538 
1539 asmlinkage void do_mcheck(struct pt_regs *regs)
1540 {
1541 	int multi_match = regs->cp0_status & ST0_TS;
1542 	enum ctx_state prev_state;
1543 	mm_segment_t old_fs = get_fs();
1544 
1545 	prev_state = exception_enter();
1546 	show_regs(regs);
1547 
1548 	if (multi_match) {
1549 		dump_tlb_regs();
1550 		pr_info("\n");
1551 		dump_tlb_all();
1552 	}
1553 
1554 	if (!user_mode(regs))
1555 		set_fs(KERNEL_DS);
1556 
1557 	show_code((unsigned int __user *) regs->cp0_epc);
1558 
1559 	set_fs(old_fs);
1560 
1561 	/*
1562 	 * Some chips may have other causes of machine check (e.g. SB1
1563 	 * graduation timer)
1564 	 */
1565 	panic("Caught Machine Check exception - %scaused by multiple "
1566 	      "matching entries in the TLB.",
1567 	      (multi_match) ? "" : "not ");
1568 }
1569 
1570 asmlinkage void do_mt(struct pt_regs *regs)
1571 {
1572 	int subcode;
1573 
1574 	subcode = (read_vpe_c0_vpecontrol() & VPECONTROL_EXCPT)
1575 			>> VPECONTROL_EXCPT_SHIFT;
1576 	switch (subcode) {
1577 	case 0:
1578 		printk(KERN_DEBUG "Thread Underflow\n");
1579 		break;
1580 	case 1:
1581 		printk(KERN_DEBUG "Thread Overflow\n");
1582 		break;
1583 	case 2:
1584 		printk(KERN_DEBUG "Invalid YIELD Qualifier\n");
1585 		break;
1586 	case 3:
1587 		printk(KERN_DEBUG "Gating Storage Exception\n");
1588 		break;
1589 	case 4:
1590 		printk(KERN_DEBUG "YIELD Scheduler Exception\n");
1591 		break;
1592 	case 5:
1593 		printk(KERN_DEBUG "Gating Storage Scheduler Exception\n");
1594 		break;
1595 	default:
1596 		printk(KERN_DEBUG "*** UNKNOWN THREAD EXCEPTION %d ***\n",
1597 			subcode);
1598 		break;
1599 	}
1600 	die_if_kernel("MIPS MT Thread exception in kernel", regs);
1601 
1602 	force_sig(SIGILL, current);
1603 }
1604 
1605 
1606 asmlinkage void do_dsp(struct pt_regs *regs)
1607 {
1608 	if (cpu_has_dsp)
1609 		panic("Unexpected DSP exception");
1610 
1611 	force_sig(SIGILL, current);
1612 }
1613 
1614 asmlinkage void do_reserved(struct pt_regs *regs)
1615 {
1616 	/*
1617 	 * Game over - no way to handle this if it ever occurs.	 Most probably
1618 	 * caused by a new unknown cpu type or after another deadly
1619 	 * hard/software error.
1620 	 */
1621 	show_regs(regs);
1622 	panic("Caught reserved exception %ld - should not happen.",
1623 	      (regs->cp0_cause & 0x7f) >> 2);
1624 }
1625 
1626 static int __initdata l1parity = 1;
1627 static int __init nol1parity(char *s)
1628 {
1629 	l1parity = 0;
1630 	return 1;
1631 }
1632 __setup("nol1par", nol1parity);
1633 static int __initdata l2parity = 1;
1634 static int __init nol2parity(char *s)
1635 {
1636 	l2parity = 0;
1637 	return 1;
1638 }
1639 __setup("nol2par", nol2parity);
1640 
1641 /*
1642  * Some MIPS CPUs can enable/disable for cache parity detection, but do
1643  * it different ways.
1644  */
1645 static inline void parity_protection_init(void)
1646 {
1647 #define ERRCTL_PE	0x80000000
1648 #define ERRCTL_L2P	0x00800000
1649 
1650 	if (mips_cm_revision() >= CM_REV_CM3) {
1651 		ulong gcr_ectl, cp0_ectl;
1652 
1653 		/*
1654 		 * With CM3 systems we need to ensure that the L1 & L2
1655 		 * parity enables are set to the same value, since this
1656 		 * is presumed by the hardware engineers.
1657 		 *
1658 		 * If the user disabled either of L1 or L2 ECC checking,
1659 		 * disable both.
1660 		 */
1661 		l1parity &= l2parity;
1662 		l2parity &= l1parity;
1663 
1664 		/* Probe L1 ECC support */
1665 		cp0_ectl = read_c0_ecc();
1666 		write_c0_ecc(cp0_ectl | ERRCTL_PE);
1667 		back_to_back_c0_hazard();
1668 		cp0_ectl = read_c0_ecc();
1669 
1670 		/* Probe L2 ECC support */
1671 		gcr_ectl = read_gcr_err_control();
1672 
1673 		if (!(gcr_ectl & CM_GCR_ERR_CONTROL_L2_ECC_SUPPORT) ||
1674 		    !(cp0_ectl & ERRCTL_PE)) {
1675 			/*
1676 			 * One of L1 or L2 ECC checking isn't supported,
1677 			 * so we cannot enable either.
1678 			 */
1679 			l1parity = l2parity = 0;
1680 		}
1681 
1682 		/* Configure L1 ECC checking */
1683 		if (l1parity)
1684 			cp0_ectl |= ERRCTL_PE;
1685 		else
1686 			cp0_ectl &= ~ERRCTL_PE;
1687 		write_c0_ecc(cp0_ectl);
1688 		back_to_back_c0_hazard();
1689 		WARN_ON(!!(read_c0_ecc() & ERRCTL_PE) != l1parity);
1690 
1691 		/* Configure L2 ECC checking */
1692 		if (l2parity)
1693 			gcr_ectl |= CM_GCR_ERR_CONTROL_L2_ECC_EN;
1694 		else
1695 			gcr_ectl &= ~CM_GCR_ERR_CONTROL_L2_ECC_EN;
1696 		write_gcr_err_control(gcr_ectl);
1697 		gcr_ectl = read_gcr_err_control();
1698 		gcr_ectl &= CM_GCR_ERR_CONTROL_L2_ECC_EN;
1699 		WARN_ON(!!gcr_ectl != l2parity);
1700 
1701 		pr_info("Cache parity protection %sabled\n",
1702 			l1parity ? "en" : "dis");
1703 		return;
1704 	}
1705 
1706 	switch (current_cpu_type()) {
1707 	case CPU_24K:
1708 	case CPU_34K:
1709 	case CPU_74K:
1710 	case CPU_1004K:
1711 	case CPU_1074K:
1712 	case CPU_INTERAPTIV:
1713 	case CPU_PROAPTIV:
1714 	case CPU_P5600:
1715 	case CPU_QEMU_GENERIC:
1716 	case CPU_P6600:
1717 		{
1718 			unsigned long errctl;
1719 			unsigned int l1parity_present, l2parity_present;
1720 
1721 			errctl = read_c0_ecc();
1722 			errctl &= ~(ERRCTL_PE|ERRCTL_L2P);
1723 
1724 			/* probe L1 parity support */
1725 			write_c0_ecc(errctl | ERRCTL_PE);
1726 			back_to_back_c0_hazard();
1727 			l1parity_present = (read_c0_ecc() & ERRCTL_PE);
1728 
1729 			/* probe L2 parity support */
1730 			write_c0_ecc(errctl|ERRCTL_L2P);
1731 			back_to_back_c0_hazard();
1732 			l2parity_present = (read_c0_ecc() & ERRCTL_L2P);
1733 
1734 			if (l1parity_present && l2parity_present) {
1735 				if (l1parity)
1736 					errctl |= ERRCTL_PE;
1737 				if (l1parity ^ l2parity)
1738 					errctl |= ERRCTL_L2P;
1739 			} else if (l1parity_present) {
1740 				if (l1parity)
1741 					errctl |= ERRCTL_PE;
1742 			} else if (l2parity_present) {
1743 				if (l2parity)
1744 					errctl |= ERRCTL_L2P;
1745 			} else {
1746 				/* No parity available */
1747 			}
1748 
1749 			printk(KERN_INFO "Writing ErrCtl register=%08lx\n", errctl);
1750 
1751 			write_c0_ecc(errctl);
1752 			back_to_back_c0_hazard();
1753 			errctl = read_c0_ecc();
1754 			printk(KERN_INFO "Readback ErrCtl register=%08lx\n", errctl);
1755 
1756 			if (l1parity_present)
1757 				printk(KERN_INFO "Cache parity protection %sabled\n",
1758 				       (errctl & ERRCTL_PE) ? "en" : "dis");
1759 
1760 			if (l2parity_present) {
1761 				if (l1parity_present && l1parity)
1762 					errctl ^= ERRCTL_L2P;
1763 				printk(KERN_INFO "L2 cache parity protection %sabled\n",
1764 				       (errctl & ERRCTL_L2P) ? "en" : "dis");
1765 			}
1766 		}
1767 		break;
1768 
1769 	case CPU_5KC:
1770 	case CPU_5KE:
1771 	case CPU_LOONGSON1:
1772 		write_c0_ecc(0x80000000);
1773 		back_to_back_c0_hazard();
1774 		/* Set the PE bit (bit 31) in the c0_errctl register. */
1775 		printk(KERN_INFO "Cache parity protection %sabled\n",
1776 		       (read_c0_ecc() & 0x80000000) ? "en" : "dis");
1777 		break;
1778 	case CPU_20KC:
1779 	case CPU_25KF:
1780 		/* Clear the DE bit (bit 16) in the c0_status register. */
1781 		printk(KERN_INFO "Enable cache parity protection for "
1782 		       "MIPS 20KC/25KF CPUs.\n");
1783 		clear_c0_status(ST0_DE);
1784 		break;
1785 	default:
1786 		break;
1787 	}
1788 }
1789 
1790 asmlinkage void cache_parity_error(void)
1791 {
1792 	const int field = 2 * sizeof(unsigned long);
1793 	unsigned int reg_val;
1794 
1795 	/* For the moment, report the problem and hang. */
1796 	printk("Cache error exception:\n");
1797 	printk("cp0_errorepc == %0*lx\n", field, read_c0_errorepc());
1798 	reg_val = read_c0_cacheerr();
1799 	printk("c0_cacheerr == %08x\n", reg_val);
1800 
1801 	printk("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
1802 	       reg_val & (1<<30) ? "secondary" : "primary",
1803 	       reg_val & (1<<31) ? "data" : "insn");
1804 	if ((cpu_has_mips_r2_r6) &&
1805 	    ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) {
1806 		pr_err("Error bits: %s%s%s%s%s%s%s%s\n",
1807 			reg_val & (1<<29) ? "ED " : "",
1808 			reg_val & (1<<28) ? "ET " : "",
1809 			reg_val & (1<<27) ? "ES " : "",
1810 			reg_val & (1<<26) ? "EE " : "",
1811 			reg_val & (1<<25) ? "EB " : "",
1812 			reg_val & (1<<24) ? "EI " : "",
1813 			reg_val & (1<<23) ? "E1 " : "",
1814 			reg_val & (1<<22) ? "E0 " : "");
1815 	} else {
1816 		pr_err("Error bits: %s%s%s%s%s%s%s\n",
1817 			reg_val & (1<<29) ? "ED " : "",
1818 			reg_val & (1<<28) ? "ET " : "",
1819 			reg_val & (1<<26) ? "EE " : "",
1820 			reg_val & (1<<25) ? "EB " : "",
1821 			reg_val & (1<<24) ? "EI " : "",
1822 			reg_val & (1<<23) ? "E1 " : "",
1823 			reg_val & (1<<22) ? "E0 " : "");
1824 	}
1825 	printk("IDX: 0x%08x\n", reg_val & ((1<<22)-1));
1826 
1827 #if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64)
1828 	if (reg_val & (1<<22))
1829 		printk("DErrAddr0: 0x%0*lx\n", field, read_c0_derraddr0());
1830 
1831 	if (reg_val & (1<<23))
1832 		printk("DErrAddr1: 0x%0*lx\n", field, read_c0_derraddr1());
1833 #endif
1834 
1835 	panic("Can't handle the cache error!");
1836 }
1837 
1838 asmlinkage void do_ftlb(void)
1839 {
1840 	const int field = 2 * sizeof(unsigned long);
1841 	unsigned int reg_val;
1842 
1843 	/* For the moment, report the problem and hang. */
1844 	if ((cpu_has_mips_r2_r6) &&
1845 	    (((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS) ||
1846 	    ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_LOONGSON))) {
1847 		pr_err("FTLB error exception, cp0_ecc=0x%08x:\n",
1848 		       read_c0_ecc());
1849 		pr_err("cp0_errorepc == %0*lx\n", field, read_c0_errorepc());
1850 		reg_val = read_c0_cacheerr();
1851 		pr_err("c0_cacheerr == %08x\n", reg_val);
1852 
1853 		if ((reg_val & 0xc0000000) == 0xc0000000) {
1854 			pr_err("Decoded c0_cacheerr: FTLB parity error\n");
1855 		} else {
1856 			pr_err("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
1857 			       reg_val & (1<<30) ? "secondary" : "primary",
1858 			       reg_val & (1<<31) ? "data" : "insn");
1859 		}
1860 	} else {
1861 		pr_err("FTLB error exception\n");
1862 	}
1863 	/* Just print the cacheerr bits for now */
1864 	cache_parity_error();
1865 }
1866 
1867 /*
1868  * SDBBP EJTAG debug exception handler.
1869  * We skip the instruction and return to the next instruction.
1870  */
1871 void ejtag_exception_handler(struct pt_regs *regs)
1872 {
1873 	const int field = 2 * sizeof(unsigned long);
1874 	unsigned long depc, old_epc, old_ra;
1875 	unsigned int debug;
1876 
1877 	printk(KERN_DEBUG "SDBBP EJTAG debug exception - not handled yet, just ignored!\n");
1878 	depc = read_c0_depc();
1879 	debug = read_c0_debug();
1880 	printk(KERN_DEBUG "c0_depc = %0*lx, DEBUG = %08x\n", field, depc, debug);
1881 	if (debug & 0x80000000) {
1882 		/*
1883 		 * In branch delay slot.
1884 		 * We cheat a little bit here and use EPC to calculate the
1885 		 * debug return address (DEPC). EPC is restored after the
1886 		 * calculation.
1887 		 */
1888 		old_epc = regs->cp0_epc;
1889 		old_ra = regs->regs[31];
1890 		regs->cp0_epc = depc;
1891 		compute_return_epc(regs);
1892 		depc = regs->cp0_epc;
1893 		regs->cp0_epc = old_epc;
1894 		regs->regs[31] = old_ra;
1895 	} else
1896 		depc += 4;
1897 	write_c0_depc(depc);
1898 
1899 #if 0
1900 	printk(KERN_DEBUG "\n\n----- Enable EJTAG single stepping ----\n\n");
1901 	write_c0_debug(debug | 0x100);
1902 #endif
1903 }
1904 
1905 /*
1906  * NMI exception handler.
1907  * No lock; only written during early bootup by CPU 0.
1908  */
1909 static RAW_NOTIFIER_HEAD(nmi_chain);
1910 
1911 int register_nmi_notifier(struct notifier_block *nb)
1912 {
1913 	return raw_notifier_chain_register(&nmi_chain, nb);
1914 }
1915 
1916 void __noreturn nmi_exception_handler(struct pt_regs *regs)
1917 {
1918 	char str[100];
1919 
1920 	nmi_enter();
1921 	raw_notifier_call_chain(&nmi_chain, 0, regs);
1922 	bust_spinlocks(1);
1923 	snprintf(str, 100, "CPU%d NMI taken, CP0_EPC=%lx\n",
1924 		 smp_processor_id(), regs->cp0_epc);
1925 	regs->cp0_epc = read_c0_errorepc();
1926 	die(str, regs);
1927 	nmi_exit();
1928 }
1929 
1930 #define VECTORSPACING 0x100	/* for EI/VI mode */
1931 
1932 unsigned long ebase;
1933 EXPORT_SYMBOL_GPL(ebase);
1934 unsigned long exception_handlers[32];
1935 unsigned long vi_handlers[64];
1936 
1937 void __init *set_except_vector(int n, void *addr)
1938 {
1939 	unsigned long handler = (unsigned long) addr;
1940 	unsigned long old_handler;
1941 
1942 #ifdef CONFIG_CPU_MICROMIPS
1943 	/*
1944 	 * Only the TLB handlers are cache aligned with an even
1945 	 * address. All other handlers are on an odd address and
1946 	 * require no modification. Otherwise, MIPS32 mode will
1947 	 * be entered when handling any TLB exceptions. That
1948 	 * would be bad...since we must stay in microMIPS mode.
1949 	 */
1950 	if (!(handler & 0x1))
1951 		handler |= 1;
1952 #endif
1953 	old_handler = xchg(&exception_handlers[n], handler);
1954 
1955 	if (n == 0 && cpu_has_divec) {
1956 #ifdef CONFIG_CPU_MICROMIPS
1957 		unsigned long jump_mask = ~((1 << 27) - 1);
1958 #else
1959 		unsigned long jump_mask = ~((1 << 28) - 1);
1960 #endif
1961 		u32 *buf = (u32 *)(ebase + 0x200);
1962 		unsigned int k0 = 26;
1963 		if ((handler & jump_mask) == ((ebase + 0x200) & jump_mask)) {
1964 			uasm_i_j(&buf, handler & ~jump_mask);
1965 			uasm_i_nop(&buf);
1966 		} else {
1967 			UASM_i_LA(&buf, k0, handler);
1968 			uasm_i_jr(&buf, k0);
1969 			uasm_i_nop(&buf);
1970 		}
1971 		local_flush_icache_range(ebase + 0x200, (unsigned long)buf);
1972 	}
1973 	return (void *)old_handler;
1974 }
1975 
1976 static void do_default_vi(void)
1977 {
1978 	show_regs(get_irq_regs());
1979 	panic("Caught unexpected vectored interrupt.");
1980 }
1981 
1982 static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
1983 {
1984 	unsigned long handler;
1985 	unsigned long old_handler = vi_handlers[n];
1986 	int srssets = current_cpu_data.srsets;
1987 	u16 *h;
1988 	unsigned char *b;
1989 
1990 	BUG_ON(!cpu_has_veic && !cpu_has_vint);
1991 
1992 	if (addr == NULL) {
1993 		handler = (unsigned long) do_default_vi;
1994 		srs = 0;
1995 	} else
1996 		handler = (unsigned long) addr;
1997 	vi_handlers[n] = handler;
1998 
1999 	b = (unsigned char *)(ebase + 0x200 + n*VECTORSPACING);
2000 
2001 	if (srs >= srssets)
2002 		panic("Shadow register set %d not supported", srs);
2003 
2004 	if (cpu_has_veic) {
2005 		if (board_bind_eic_interrupt)
2006 			board_bind_eic_interrupt(n, srs);
2007 	} else if (cpu_has_vint) {
2008 		/* SRSMap is only defined if shadow sets are implemented */
2009 		if (srssets > 1)
2010 			change_c0_srsmap(0xf << n*4, srs << n*4);
2011 	}
2012 
2013 	if (srs == 0) {
2014 		/*
2015 		 * If no shadow set is selected then use the default handler
2016 		 * that does normal register saving and standard interrupt exit
2017 		 */
2018 		extern char except_vec_vi, except_vec_vi_lui;
2019 		extern char except_vec_vi_ori, except_vec_vi_end;
2020 		extern char rollback_except_vec_vi;
2021 		char *vec_start = using_rollback_handler() ?
2022 			&rollback_except_vec_vi : &except_vec_vi;
2023 #if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN)
2024 		const int lui_offset = &except_vec_vi_lui - vec_start + 2;
2025 		const int ori_offset = &except_vec_vi_ori - vec_start + 2;
2026 #else
2027 		const int lui_offset = &except_vec_vi_lui - vec_start;
2028 		const int ori_offset = &except_vec_vi_ori - vec_start;
2029 #endif
2030 		const int handler_len = &except_vec_vi_end - vec_start;
2031 
2032 		if (handler_len > VECTORSPACING) {
2033 			/*
2034 			 * Sigh... panicing won't help as the console
2035 			 * is probably not configured :(
2036 			 */
2037 			panic("VECTORSPACING too small");
2038 		}
2039 
2040 		set_handler(((unsigned long)b - ebase), vec_start,
2041 #ifdef CONFIG_CPU_MICROMIPS
2042 				(handler_len - 1));
2043 #else
2044 				handler_len);
2045 #endif
2046 		h = (u16 *)(b + lui_offset);
2047 		*h = (handler >> 16) & 0xffff;
2048 		h = (u16 *)(b + ori_offset);
2049 		*h = (handler & 0xffff);
2050 		local_flush_icache_range((unsigned long)b,
2051 					 (unsigned long)(b+handler_len));
2052 	}
2053 	else {
2054 		/*
2055 		 * In other cases jump directly to the interrupt handler. It
2056 		 * is the handler's responsibility to save registers if required
2057 		 * (eg hi/lo) and return from the exception using "eret".
2058 		 */
2059 		u32 insn;
2060 
2061 		h = (u16 *)b;
2062 		/* j handler */
2063 #ifdef CONFIG_CPU_MICROMIPS
2064 		insn = 0xd4000000 | (((u32)handler & 0x07ffffff) >> 1);
2065 #else
2066 		insn = 0x08000000 | (((u32)handler & 0x0fffffff) >> 2);
2067 #endif
2068 		h[0] = (insn >> 16) & 0xffff;
2069 		h[1] = insn & 0xffff;
2070 		h[2] = 0;
2071 		h[3] = 0;
2072 		local_flush_icache_range((unsigned long)b,
2073 					 (unsigned long)(b+8));
2074 	}
2075 
2076 	return (void *)old_handler;
2077 }
2078 
2079 void *set_vi_handler(int n, vi_handler_t addr)
2080 {
2081 	return set_vi_srs_handler(n, addr, 0);
2082 }
2083 
2084 extern void tlb_init(void);
2085 
2086 /*
2087  * Timer interrupt
2088  */
2089 int cp0_compare_irq;
2090 EXPORT_SYMBOL_GPL(cp0_compare_irq);
2091 int cp0_compare_irq_shift;
2092 
2093 /*
2094  * Performance counter IRQ or -1 if shared with timer
2095  */
2096 int cp0_perfcount_irq;
2097 EXPORT_SYMBOL_GPL(cp0_perfcount_irq);
2098 
2099 /*
2100  * Fast debug channel IRQ or -1 if not present
2101  */
2102 int cp0_fdc_irq;
2103 EXPORT_SYMBOL_GPL(cp0_fdc_irq);
2104 
2105 static int noulri;
2106 
2107 static int __init ulri_disable(char *s)
2108 {
2109 	pr_info("Disabling ulri\n");
2110 	noulri = 1;
2111 
2112 	return 1;
2113 }
2114 __setup("noulri", ulri_disable);
2115 
2116 /* configure STATUS register */
2117 static void configure_status(void)
2118 {
2119 	/*
2120 	 * Disable coprocessors and select 32-bit or 64-bit addressing
2121 	 * and the 16/32 or 32/32 FPR register model.  Reset the BEV
2122 	 * flag that some firmware may have left set and the TS bit (for
2123 	 * IP27).  Set XX for ISA IV code to work.
2124 	 */
2125 	unsigned int status_set = ST0_CU0;
2126 #ifdef CONFIG_64BIT
2127 	status_set |= ST0_FR|ST0_KX|ST0_SX|ST0_UX;
2128 #endif
2129 	if (current_cpu_data.isa_level & MIPS_CPU_ISA_IV)
2130 		status_set |= ST0_XX;
2131 	if (cpu_has_dsp)
2132 		status_set |= ST0_MX;
2133 
2134 	change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX,
2135 			 status_set);
2136 }
2137 
2138 unsigned int hwrena;
2139 EXPORT_SYMBOL_GPL(hwrena);
2140 
2141 /* configure HWRENA register */
2142 static void configure_hwrena(void)
2143 {
2144 	hwrena = cpu_hwrena_impl_bits;
2145 
2146 	if (cpu_has_mips_r2_r6)
2147 		hwrena |= MIPS_HWRENA_CPUNUM |
2148 			  MIPS_HWRENA_SYNCISTEP |
2149 			  MIPS_HWRENA_CC |
2150 			  MIPS_HWRENA_CCRES;
2151 
2152 	if (!noulri && cpu_has_userlocal)
2153 		hwrena |= MIPS_HWRENA_ULR;
2154 
2155 	if (hwrena)
2156 		write_c0_hwrena(hwrena);
2157 }
2158 
2159 static void configure_exception_vector(void)
2160 {
2161 	if (cpu_has_veic || cpu_has_vint) {
2162 		unsigned long sr = set_c0_status(ST0_BEV);
2163 		/* If available, use WG to set top bits of EBASE */
2164 		if (cpu_has_ebase_wg) {
2165 #ifdef CONFIG_64BIT
2166 			write_c0_ebase_64(ebase | MIPS_EBASE_WG);
2167 #else
2168 			write_c0_ebase(ebase | MIPS_EBASE_WG);
2169 #endif
2170 		}
2171 		write_c0_ebase(ebase);
2172 		write_c0_status(sr);
2173 		/* Setting vector spacing enables EI/VI mode  */
2174 		change_c0_intctl(0x3e0, VECTORSPACING);
2175 	}
2176 	if (cpu_has_divec) {
2177 		if (cpu_has_mipsmt) {
2178 			unsigned int vpflags = dvpe();
2179 			set_c0_cause(CAUSEF_IV);
2180 			evpe(vpflags);
2181 		} else
2182 			set_c0_cause(CAUSEF_IV);
2183 	}
2184 }
2185 
2186 void per_cpu_trap_init(bool is_boot_cpu)
2187 {
2188 	unsigned int cpu = smp_processor_id();
2189 
2190 	configure_status();
2191 	configure_hwrena();
2192 
2193 	configure_exception_vector();
2194 
2195 	/*
2196 	 * Before R2 both interrupt numbers were fixed to 7, so on R2 only:
2197 	 *
2198 	 *  o read IntCtl.IPTI to determine the timer interrupt
2199 	 *  o read IntCtl.IPPCI to determine the performance counter interrupt
2200 	 *  o read IntCtl.IPFDC to determine the fast debug channel interrupt
2201 	 */
2202 	if (cpu_has_mips_r2_r6) {
2203 		/*
2204 		 * We shouldn't trust a secondary core has a sane EBASE register
2205 		 * so use the one calculated by the boot CPU.
2206 		 */
2207 		if (!is_boot_cpu) {
2208 			/* If available, use WG to set top bits of EBASE */
2209 			if (cpu_has_ebase_wg) {
2210 #ifdef CONFIG_64BIT
2211 				write_c0_ebase_64(ebase | MIPS_EBASE_WG);
2212 #else
2213 				write_c0_ebase(ebase | MIPS_EBASE_WG);
2214 #endif
2215 			}
2216 			write_c0_ebase(ebase);
2217 		}
2218 
2219 		cp0_compare_irq_shift = CAUSEB_TI - CAUSEB_IP;
2220 		cp0_compare_irq = (read_c0_intctl() >> INTCTLB_IPTI) & 7;
2221 		cp0_perfcount_irq = (read_c0_intctl() >> INTCTLB_IPPCI) & 7;
2222 		cp0_fdc_irq = (read_c0_intctl() >> INTCTLB_IPFDC) & 7;
2223 		if (!cp0_fdc_irq)
2224 			cp0_fdc_irq = -1;
2225 
2226 	} else {
2227 		cp0_compare_irq = CP0_LEGACY_COMPARE_IRQ;
2228 		cp0_compare_irq_shift = CP0_LEGACY_PERFCNT_IRQ;
2229 		cp0_perfcount_irq = -1;
2230 		cp0_fdc_irq = -1;
2231 	}
2232 
2233 	if (!cpu_data[cpu].asid_cache)
2234 		cpu_data[cpu].asid_cache = asid_first_version(cpu);
2235 
2236 	mmgrab(&init_mm);
2237 	current->active_mm = &init_mm;
2238 	BUG_ON(current->mm);
2239 	enter_lazy_tlb(&init_mm, current);
2240 
2241 	/* Boot CPU's cache setup in setup_arch(). */
2242 	if (!is_boot_cpu)
2243 		cpu_cache_init();
2244 	tlb_init();
2245 	TLBMISS_HANDLER_SETUP();
2246 }
2247 
2248 /* Install CPU exception handler */
2249 void set_handler(unsigned long offset, void *addr, unsigned long size)
2250 {
2251 #ifdef CONFIG_CPU_MICROMIPS
2252 	memcpy((void *)(ebase + offset), ((unsigned char *)addr - 1), size);
2253 #else
2254 	memcpy((void *)(ebase + offset), addr, size);
2255 #endif
2256 	local_flush_icache_range(ebase + offset, ebase + offset + size);
2257 }
2258 
2259 static const char panic_null_cerr[] =
2260 	"Trying to set NULL cache error exception handler\n";
2261 
2262 /*
2263  * Install uncached CPU exception handler.
2264  * This is suitable only for the cache error exception which is the only
2265  * exception handler that is being run uncached.
2266  */
2267 void set_uncached_handler(unsigned long offset, void *addr,
2268 	unsigned long size)
2269 {
2270 	unsigned long uncached_ebase = CKSEG1ADDR(ebase);
2271 
2272 	if (!addr)
2273 		panic(panic_null_cerr);
2274 
2275 	memcpy((void *)(uncached_ebase + offset), addr, size);
2276 }
2277 
2278 static int __initdata rdhwr_noopt;
2279 static int __init set_rdhwr_noopt(char *str)
2280 {
2281 	rdhwr_noopt = 1;
2282 	return 1;
2283 }
2284 
2285 __setup("rdhwr_noopt", set_rdhwr_noopt);
2286 
2287 void __init trap_init(void)
2288 {
2289 	extern char except_vec3_generic;
2290 	extern char except_vec4;
2291 	extern char except_vec3_r4000;
2292 	unsigned long i;
2293 
2294 	check_wait();
2295 
2296 	if (cpu_has_veic || cpu_has_vint) {
2297 		unsigned long size = 0x200 + VECTORSPACING*64;
2298 		phys_addr_t ebase_pa;
2299 
2300 		ebase = (unsigned long)
2301 			__alloc_bootmem(size, 1 << fls(size), 0);
2302 
2303 		/*
2304 		 * Try to ensure ebase resides in KSeg0 if possible.
2305 		 *
2306 		 * It shouldn't generally be in XKPhys on MIPS64 to avoid
2307 		 * hitting a poorly defined exception base for Cache Errors.
2308 		 * The allocation is likely to be in the low 512MB of physical,
2309 		 * in which case we should be able to convert to KSeg0.
2310 		 *
2311 		 * EVA is special though as it allows segments to be rearranged
2312 		 * and to become uncached during cache error handling.
2313 		 */
2314 		ebase_pa = __pa(ebase);
2315 		if (!IS_ENABLED(CONFIG_EVA) && !WARN_ON(ebase_pa >= 0x20000000))
2316 			ebase = CKSEG0ADDR(ebase_pa);
2317 	} else {
2318 		ebase = CAC_BASE;
2319 
2320 		if (cpu_has_mips_r2_r6) {
2321 			if (cpu_has_ebase_wg) {
2322 #ifdef CONFIG_64BIT
2323 				ebase = (read_c0_ebase_64() & ~0xfff);
2324 #else
2325 				ebase = (read_c0_ebase() & ~0xfff);
2326 #endif
2327 			} else {
2328 				ebase += (read_c0_ebase() & 0x3ffff000);
2329 			}
2330 		}
2331 	}
2332 
2333 	if (cpu_has_mmips) {
2334 		unsigned int config3 = read_c0_config3();
2335 
2336 		if (IS_ENABLED(CONFIG_CPU_MICROMIPS))
2337 			write_c0_config3(config3 | MIPS_CONF3_ISA_OE);
2338 		else
2339 			write_c0_config3(config3 & ~MIPS_CONF3_ISA_OE);
2340 	}
2341 
2342 	if (board_ebase_setup)
2343 		board_ebase_setup();
2344 	per_cpu_trap_init(true);
2345 
2346 	/*
2347 	 * Copy the generic exception handlers to their final destination.
2348 	 * This will be overridden later as suitable for a particular
2349 	 * configuration.
2350 	 */
2351 	set_handler(0x180, &except_vec3_generic, 0x80);
2352 
2353 	/*
2354 	 * Setup default vectors
2355 	 */
2356 	for (i = 0; i <= 31; i++)
2357 		set_except_vector(i, handle_reserved);
2358 
2359 	/*
2360 	 * Copy the EJTAG debug exception vector handler code to it's final
2361 	 * destination.
2362 	 */
2363 	if (cpu_has_ejtag && board_ejtag_handler_setup)
2364 		board_ejtag_handler_setup();
2365 
2366 	/*
2367 	 * Only some CPUs have the watch exceptions.
2368 	 */
2369 	if (cpu_has_watch)
2370 		set_except_vector(EXCCODE_WATCH, handle_watch);
2371 
2372 	/*
2373 	 * Initialise interrupt handlers
2374 	 */
2375 	if (cpu_has_veic || cpu_has_vint) {
2376 		int nvec = cpu_has_veic ? 64 : 8;
2377 		for (i = 0; i < nvec; i++)
2378 			set_vi_handler(i, NULL);
2379 	}
2380 	else if (cpu_has_divec)
2381 		set_handler(0x200, &except_vec4, 0x8);
2382 
2383 	/*
2384 	 * Some CPUs can enable/disable for cache parity detection, but does
2385 	 * it different ways.
2386 	 */
2387 	parity_protection_init();
2388 
2389 	/*
2390 	 * The Data Bus Errors / Instruction Bus Errors are signaled
2391 	 * by external hardware.  Therefore these two exceptions
2392 	 * may have board specific handlers.
2393 	 */
2394 	if (board_be_init)
2395 		board_be_init();
2396 
2397 	set_except_vector(EXCCODE_INT, using_rollback_handler() ?
2398 					rollback_handle_int : handle_int);
2399 	set_except_vector(EXCCODE_MOD, handle_tlbm);
2400 	set_except_vector(EXCCODE_TLBL, handle_tlbl);
2401 	set_except_vector(EXCCODE_TLBS, handle_tlbs);
2402 
2403 	set_except_vector(EXCCODE_ADEL, handle_adel);
2404 	set_except_vector(EXCCODE_ADES, handle_ades);
2405 
2406 	set_except_vector(EXCCODE_IBE, handle_ibe);
2407 	set_except_vector(EXCCODE_DBE, handle_dbe);
2408 
2409 	set_except_vector(EXCCODE_SYS, handle_sys);
2410 	set_except_vector(EXCCODE_BP, handle_bp);
2411 
2412 	if (rdhwr_noopt)
2413 		set_except_vector(EXCCODE_RI, handle_ri);
2414 	else {
2415 		if (cpu_has_vtag_icache)
2416 			set_except_vector(EXCCODE_RI, handle_ri_rdhwr_tlbp);
2417 		else if (current_cpu_type() == CPU_LOONGSON3)
2418 			set_except_vector(EXCCODE_RI, handle_ri_rdhwr_tlbp);
2419 		else
2420 			set_except_vector(EXCCODE_RI, handle_ri_rdhwr);
2421 	}
2422 
2423 	set_except_vector(EXCCODE_CPU, handle_cpu);
2424 	set_except_vector(EXCCODE_OV, handle_ov);
2425 	set_except_vector(EXCCODE_TR, handle_tr);
2426 	set_except_vector(EXCCODE_MSAFPE, handle_msa_fpe);
2427 
2428 	if (board_nmi_handler_setup)
2429 		board_nmi_handler_setup();
2430 
2431 	if (cpu_has_fpu && !cpu_has_nofpuex)
2432 		set_except_vector(EXCCODE_FPE, handle_fpe);
2433 
2434 	set_except_vector(MIPS_EXCCODE_TLBPAR, handle_ftlb);
2435 
2436 	if (cpu_has_rixiex) {
2437 		set_except_vector(EXCCODE_TLBRI, tlb_do_page_fault_0);
2438 		set_except_vector(EXCCODE_TLBXI, tlb_do_page_fault_0);
2439 	}
2440 
2441 	set_except_vector(EXCCODE_MSADIS, handle_msa);
2442 	set_except_vector(EXCCODE_MDMX, handle_mdmx);
2443 
2444 	if (cpu_has_mcheck)
2445 		set_except_vector(EXCCODE_MCHECK, handle_mcheck);
2446 
2447 	if (cpu_has_mipsmt)
2448 		set_except_vector(EXCCODE_THREAD, handle_mt);
2449 
2450 	set_except_vector(EXCCODE_DSPDIS, handle_dsp);
2451 
2452 	if (board_cache_error_setup)
2453 		board_cache_error_setup();
2454 
2455 	if (cpu_has_vce)
2456 		/* Special exception: R4[04]00 uses also the divec space. */
2457 		set_handler(0x180, &except_vec3_r4000, 0x100);
2458 	else if (cpu_has_4kex)
2459 		set_handler(0x180, &except_vec3_generic, 0x80);
2460 	else
2461 		set_handler(0x080, &except_vec3_generic, 0x80);
2462 
2463 	local_flush_icache_range(ebase, ebase + 0x400);
2464 
2465 	sort_extable(__start___dbe_table, __stop___dbe_table);
2466 
2467 	cu2_notifier(default_cu2_call, 0x80000000);	/* Run last  */
2468 }
2469 
2470 static int trap_pm_notifier(struct notifier_block *self, unsigned long cmd,
2471 			    void *v)
2472 {
2473 	switch (cmd) {
2474 	case CPU_PM_ENTER_FAILED:
2475 	case CPU_PM_EXIT:
2476 		configure_status();
2477 		configure_hwrena();
2478 		configure_exception_vector();
2479 
2480 		/* Restore register with CPU number for TLB handlers */
2481 		TLBMISS_HANDLER_RESTORE();
2482 
2483 		break;
2484 	}
2485 
2486 	return NOTIFY_OK;
2487 }
2488 
2489 static struct notifier_block trap_pm_notifier_block = {
2490 	.notifier_call = trap_pm_notifier,
2491 };
2492 
2493 static int __init trap_pm_init(void)
2494 {
2495 	return cpu_pm_register_notifier(&trap_pm_notifier_block);
2496 }
2497 arch_initcall(trap_pm_init);
2498