xref: /openbmc/linux/arch/mips/kernel/traps.c (revision cd4d09ec)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1994 - 1999, 2000, 01, 06 Ralf Baechle
7  * Copyright (C) 1995, 1996 Paul M. Antoine
8  * Copyright (C) 1998 Ulf Carlsson
9  * Copyright (C) 1999 Silicon Graphics, Inc.
10  * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
11  * Copyright (C) 2002, 2003, 2004, 2005, 2007  Maciej W. Rozycki
12  * Copyright (C) 2000, 2001, 2012 MIPS Technologies, Inc.  All rights reserved.
13  * Copyright (C) 2014, Imagination Technologies Ltd.
14  */
15 #include <linux/bitops.h>
16 #include <linux/bug.h>
17 #include <linux/compiler.h>
18 #include <linux/context_tracking.h>
19 #include <linux/cpu_pm.h>
20 #include <linux/kexec.h>
21 #include <linux/init.h>
22 #include <linux/kernel.h>
23 #include <linux/module.h>
24 #include <linux/mm.h>
25 #include <linux/sched.h>
26 #include <linux/smp.h>
27 #include <linux/spinlock.h>
28 #include <linux/kallsyms.h>
29 #include <linux/bootmem.h>
30 #include <linux/interrupt.h>
31 #include <linux/ptrace.h>
32 #include <linux/kgdb.h>
33 #include <linux/kdebug.h>
34 #include <linux/kprobes.h>
35 #include <linux/notifier.h>
36 #include <linux/kdb.h>
37 #include <linux/irq.h>
38 #include <linux/perf_event.h>
39 
40 #include <asm/addrspace.h>
41 #include <asm/bootinfo.h>
42 #include <asm/branch.h>
43 #include <asm/break.h>
44 #include <asm/cop2.h>
45 #include <asm/cpu.h>
46 #include <asm/cpu-type.h>
47 #include <asm/dsp.h>
48 #include <asm/fpu.h>
49 #include <asm/fpu_emulator.h>
50 #include <asm/idle.h>
51 #include <asm/mips-r2-to-r6-emul.h>
52 #include <asm/mipsregs.h>
53 #include <asm/mipsmtregs.h>
54 #include <asm/module.h>
55 #include <asm/msa.h>
56 #include <asm/pgtable.h>
57 #include <asm/ptrace.h>
58 #include <asm/sections.h>
59 #include <asm/tlbdebug.h>
60 #include <asm/traps.h>
61 #include <asm/uaccess.h>
62 #include <asm/watch.h>
63 #include <asm/mmu_context.h>
64 #include <asm/types.h>
65 #include <asm/stacktrace.h>
66 #include <asm/uasm.h>
67 
68 extern void check_wait(void);
69 extern asmlinkage void rollback_handle_int(void);
70 extern asmlinkage void handle_int(void);
71 extern u32 handle_tlbl[];
72 extern u32 handle_tlbs[];
73 extern u32 handle_tlbm[];
74 extern asmlinkage void handle_adel(void);
75 extern asmlinkage void handle_ades(void);
76 extern asmlinkage void handle_ibe(void);
77 extern asmlinkage void handle_dbe(void);
78 extern asmlinkage void handle_sys(void);
79 extern asmlinkage void handle_bp(void);
80 extern asmlinkage void handle_ri(void);
81 extern asmlinkage void handle_ri_rdhwr_vivt(void);
82 extern asmlinkage void handle_ri_rdhwr(void);
83 extern asmlinkage void handle_cpu(void);
84 extern asmlinkage void handle_ov(void);
85 extern asmlinkage void handle_tr(void);
86 extern asmlinkage void handle_msa_fpe(void);
87 extern asmlinkage void handle_fpe(void);
88 extern asmlinkage void handle_ftlb(void);
89 extern asmlinkage void handle_msa(void);
90 extern asmlinkage void handle_mdmx(void);
91 extern asmlinkage void handle_watch(void);
92 extern asmlinkage void handle_mt(void);
93 extern asmlinkage void handle_dsp(void);
94 extern asmlinkage void handle_mcheck(void);
95 extern asmlinkage void handle_reserved(void);
96 extern void tlb_do_page_fault_0(void);
97 
98 void (*board_be_init)(void);
99 int (*board_be_handler)(struct pt_regs *regs, int is_fixup);
100 void (*board_nmi_handler_setup)(void);
101 void (*board_ejtag_handler_setup)(void);
102 void (*board_bind_eic_interrupt)(int irq, int regset);
103 void (*board_ebase_setup)(void);
104 void(*board_cache_error_setup)(void);
105 
106 static void show_raw_backtrace(unsigned long reg29)
107 {
108 	unsigned long *sp = (unsigned long *)(reg29 & ~3);
109 	unsigned long addr;
110 
111 	printk("Call Trace:");
112 #ifdef CONFIG_KALLSYMS
113 	printk("\n");
114 #endif
115 	while (!kstack_end(sp)) {
116 		unsigned long __user *p =
117 			(unsigned long __user *)(unsigned long)sp++;
118 		if (__get_user(addr, p)) {
119 			printk(" (Bad stack address)");
120 			break;
121 		}
122 		if (__kernel_text_address(addr))
123 			print_ip_sym(addr);
124 	}
125 	printk("\n");
126 }
127 
128 #ifdef CONFIG_KALLSYMS
129 int raw_show_trace;
130 static int __init set_raw_show_trace(char *str)
131 {
132 	raw_show_trace = 1;
133 	return 1;
134 }
135 __setup("raw_show_trace", set_raw_show_trace);
136 #endif
137 
138 static void show_backtrace(struct task_struct *task, const struct pt_regs *regs)
139 {
140 	unsigned long sp = regs->regs[29];
141 	unsigned long ra = regs->regs[31];
142 	unsigned long pc = regs->cp0_epc;
143 
144 	if (!task)
145 		task = current;
146 
147 	if (raw_show_trace || !__kernel_text_address(pc)) {
148 		show_raw_backtrace(sp);
149 		return;
150 	}
151 	printk("Call Trace:\n");
152 	do {
153 		print_ip_sym(pc);
154 		pc = unwind_stack(task, &sp, pc, &ra);
155 	} while (pc);
156 	printk("\n");
157 }
158 
159 /*
160  * This routine abuses get_user()/put_user() to reference pointers
161  * with at least a bit of error checking ...
162  */
163 static void show_stacktrace(struct task_struct *task,
164 	const struct pt_regs *regs)
165 {
166 	const int field = 2 * sizeof(unsigned long);
167 	long stackdata;
168 	int i;
169 	unsigned long __user *sp = (unsigned long __user *)regs->regs[29];
170 
171 	printk("Stack :");
172 	i = 0;
173 	while ((unsigned long) sp & (PAGE_SIZE - 1)) {
174 		if (i && ((i % (64 / field)) == 0))
175 			printk("\n	 ");
176 		if (i > 39) {
177 			printk(" ...");
178 			break;
179 		}
180 
181 		if (__get_user(stackdata, sp++)) {
182 			printk(" (Bad stack address)");
183 			break;
184 		}
185 
186 		printk(" %0*lx", field, stackdata);
187 		i++;
188 	}
189 	printk("\n");
190 	show_backtrace(task, regs);
191 }
192 
193 void show_stack(struct task_struct *task, unsigned long *sp)
194 {
195 	struct pt_regs regs;
196 	mm_segment_t old_fs = get_fs();
197 	if (sp) {
198 		regs.regs[29] = (unsigned long)sp;
199 		regs.regs[31] = 0;
200 		regs.cp0_epc = 0;
201 	} else {
202 		if (task && task != current) {
203 			regs.regs[29] = task->thread.reg29;
204 			regs.regs[31] = 0;
205 			regs.cp0_epc = task->thread.reg31;
206 #ifdef CONFIG_KGDB_KDB
207 		} else if (atomic_read(&kgdb_active) != -1 &&
208 			   kdb_current_regs) {
209 			memcpy(&regs, kdb_current_regs, sizeof(regs));
210 #endif /* CONFIG_KGDB_KDB */
211 		} else {
212 			prepare_frametrace(&regs);
213 		}
214 	}
215 	/*
216 	 * show_stack() deals exclusively with kernel mode, so be sure to access
217 	 * the stack in the kernel (not user) address space.
218 	 */
219 	set_fs(KERNEL_DS);
220 	show_stacktrace(task, &regs);
221 	set_fs(old_fs);
222 }
223 
224 static void show_code(unsigned int __user *pc)
225 {
226 	long i;
227 	unsigned short __user *pc16 = NULL;
228 
229 	printk("\nCode:");
230 
231 	if ((unsigned long)pc & 1)
232 		pc16 = (unsigned short __user *)((unsigned long)pc & ~1);
233 	for(i = -3 ; i < 6 ; i++) {
234 		unsigned int insn;
235 		if (pc16 ? __get_user(insn, pc16 + i) : __get_user(insn, pc + i)) {
236 			printk(" (Bad address in epc)\n");
237 			break;
238 		}
239 		printk("%c%0*x%c", (i?' ':'<'), pc16 ? 4 : 8, insn, (i?' ':'>'));
240 	}
241 }
242 
243 static void __show_regs(const struct pt_regs *regs)
244 {
245 	const int field = 2 * sizeof(unsigned long);
246 	unsigned int cause = regs->cp0_cause;
247 	unsigned int exccode;
248 	int i;
249 
250 	show_regs_print_info(KERN_DEFAULT);
251 
252 	/*
253 	 * Saved main processor registers
254 	 */
255 	for (i = 0; i < 32; ) {
256 		if ((i % 4) == 0)
257 			printk("$%2d   :", i);
258 		if (i == 0)
259 			printk(" %0*lx", field, 0UL);
260 		else if (i == 26 || i == 27)
261 			printk(" %*s", field, "");
262 		else
263 			printk(" %0*lx", field, regs->regs[i]);
264 
265 		i++;
266 		if ((i % 4) == 0)
267 			printk("\n");
268 	}
269 
270 #ifdef CONFIG_CPU_HAS_SMARTMIPS
271 	printk("Acx    : %0*lx\n", field, regs->acx);
272 #endif
273 	printk("Hi    : %0*lx\n", field, regs->hi);
274 	printk("Lo    : %0*lx\n", field, regs->lo);
275 
276 	/*
277 	 * Saved cp0 registers
278 	 */
279 	printk("epc   : %0*lx %pS\n", field, regs->cp0_epc,
280 	       (void *) regs->cp0_epc);
281 	printk("ra    : %0*lx %pS\n", field, regs->regs[31],
282 	       (void *) regs->regs[31]);
283 
284 	printk("Status: %08x	", (uint32_t) regs->cp0_status);
285 
286 	if (cpu_has_3kex) {
287 		if (regs->cp0_status & ST0_KUO)
288 			printk("KUo ");
289 		if (regs->cp0_status & ST0_IEO)
290 			printk("IEo ");
291 		if (regs->cp0_status & ST0_KUP)
292 			printk("KUp ");
293 		if (regs->cp0_status & ST0_IEP)
294 			printk("IEp ");
295 		if (regs->cp0_status & ST0_KUC)
296 			printk("KUc ");
297 		if (regs->cp0_status & ST0_IEC)
298 			printk("IEc ");
299 	} else if (cpu_has_4kex) {
300 		if (regs->cp0_status & ST0_KX)
301 			printk("KX ");
302 		if (regs->cp0_status & ST0_SX)
303 			printk("SX ");
304 		if (regs->cp0_status & ST0_UX)
305 			printk("UX ");
306 		switch (regs->cp0_status & ST0_KSU) {
307 		case KSU_USER:
308 			printk("USER ");
309 			break;
310 		case KSU_SUPERVISOR:
311 			printk("SUPERVISOR ");
312 			break;
313 		case KSU_KERNEL:
314 			printk("KERNEL ");
315 			break;
316 		default:
317 			printk("BAD_MODE ");
318 			break;
319 		}
320 		if (regs->cp0_status & ST0_ERL)
321 			printk("ERL ");
322 		if (regs->cp0_status & ST0_EXL)
323 			printk("EXL ");
324 		if (regs->cp0_status & ST0_IE)
325 			printk("IE ");
326 	}
327 	printk("\n");
328 
329 	exccode = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE;
330 	printk("Cause : %08x (ExcCode %02x)\n", cause, exccode);
331 
332 	if (1 <= exccode && exccode <= 5)
333 		printk("BadVA : %0*lx\n", field, regs->cp0_badvaddr);
334 
335 	printk("PrId  : %08x (%s)\n", read_c0_prid(),
336 	       cpu_name_string());
337 }
338 
339 /*
340  * FIXME: really the generic show_regs should take a const pointer argument.
341  */
342 void show_regs(struct pt_regs *regs)
343 {
344 	__show_regs((struct pt_regs *)regs);
345 }
346 
347 void show_registers(struct pt_regs *regs)
348 {
349 	const int field = 2 * sizeof(unsigned long);
350 	mm_segment_t old_fs = get_fs();
351 
352 	__show_regs(regs);
353 	print_modules();
354 	printk("Process %s (pid: %d, threadinfo=%p, task=%p, tls=%0*lx)\n",
355 	       current->comm, current->pid, current_thread_info(), current,
356 	      field, current_thread_info()->tp_value);
357 	if (cpu_has_userlocal) {
358 		unsigned long tls;
359 
360 		tls = read_c0_userlocal();
361 		if (tls != current_thread_info()->tp_value)
362 			printk("*HwTLS: %0*lx\n", field, tls);
363 	}
364 
365 	if (!user_mode(regs))
366 		/* Necessary for getting the correct stack content */
367 		set_fs(KERNEL_DS);
368 	show_stacktrace(current, regs);
369 	show_code((unsigned int __user *) regs->cp0_epc);
370 	printk("\n");
371 	set_fs(old_fs);
372 }
373 
374 static DEFINE_RAW_SPINLOCK(die_lock);
375 
376 void __noreturn die(const char *str, struct pt_regs *regs)
377 {
378 	static int die_counter;
379 	int sig = SIGSEGV;
380 
381 	oops_enter();
382 
383 	if (notify_die(DIE_OOPS, str, regs, 0, current->thread.trap_nr,
384 		       SIGSEGV) == NOTIFY_STOP)
385 		sig = 0;
386 
387 	console_verbose();
388 	raw_spin_lock_irq(&die_lock);
389 	bust_spinlocks(1);
390 
391 	printk("%s[#%d]:\n", str, ++die_counter);
392 	show_registers(regs);
393 	add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
394 	raw_spin_unlock_irq(&die_lock);
395 
396 	oops_exit();
397 
398 	if (in_interrupt())
399 		panic("Fatal exception in interrupt");
400 
401 	if (panic_on_oops) {
402 		printk(KERN_EMERG "Fatal exception: panic in 5 seconds");
403 		ssleep(5);
404 		panic("Fatal exception");
405 	}
406 
407 	if (regs && kexec_should_crash(current))
408 		crash_kexec(regs);
409 
410 	do_exit(sig);
411 }
412 
413 extern struct exception_table_entry __start___dbe_table[];
414 extern struct exception_table_entry __stop___dbe_table[];
415 
416 __asm__(
417 "	.section	__dbe_table, \"a\"\n"
418 "	.previous			\n");
419 
420 /* Given an address, look for it in the exception tables. */
421 static const struct exception_table_entry *search_dbe_tables(unsigned long addr)
422 {
423 	const struct exception_table_entry *e;
424 
425 	e = search_extable(__start___dbe_table, __stop___dbe_table - 1, addr);
426 	if (!e)
427 		e = search_module_dbetables(addr);
428 	return e;
429 }
430 
431 asmlinkage void do_be(struct pt_regs *regs)
432 {
433 	const int field = 2 * sizeof(unsigned long);
434 	const struct exception_table_entry *fixup = NULL;
435 	int data = regs->cp0_cause & 4;
436 	int action = MIPS_BE_FATAL;
437 	enum ctx_state prev_state;
438 
439 	prev_state = exception_enter();
440 	/* XXX For now.	 Fixme, this searches the wrong table ...  */
441 	if (data && !user_mode(regs))
442 		fixup = search_dbe_tables(exception_epc(regs));
443 
444 	if (fixup)
445 		action = MIPS_BE_FIXUP;
446 
447 	if (board_be_handler)
448 		action = board_be_handler(regs, fixup != NULL);
449 
450 	switch (action) {
451 	case MIPS_BE_DISCARD:
452 		goto out;
453 	case MIPS_BE_FIXUP:
454 		if (fixup) {
455 			regs->cp0_epc = fixup->nextinsn;
456 			goto out;
457 		}
458 		break;
459 	default:
460 		break;
461 	}
462 
463 	/*
464 	 * Assume it would be too dangerous to continue ...
465 	 */
466 	printk(KERN_ALERT "%s bus error, epc == %0*lx, ra == %0*lx\n",
467 	       data ? "Data" : "Instruction",
468 	       field, regs->cp0_epc, field, regs->regs[31]);
469 	if (notify_die(DIE_OOPS, "bus error", regs, 0, current->thread.trap_nr,
470 		       SIGBUS) == NOTIFY_STOP)
471 		goto out;
472 
473 	die_if_kernel("Oops", regs);
474 	force_sig(SIGBUS, current);
475 
476 out:
477 	exception_exit(prev_state);
478 }
479 
480 /*
481  * ll/sc, rdhwr, sync emulation
482  */
483 
484 #define OPCODE 0xfc000000
485 #define BASE   0x03e00000
486 #define RT     0x001f0000
487 #define OFFSET 0x0000ffff
488 #define LL     0xc0000000
489 #define SC     0xe0000000
490 #define SPEC0  0x00000000
491 #define SPEC3  0x7c000000
492 #define RD     0x0000f800
493 #define FUNC   0x0000003f
494 #define SYNC   0x0000000f
495 #define RDHWR  0x0000003b
496 
497 /*  microMIPS definitions   */
498 #define MM_POOL32A_FUNC 0xfc00ffff
499 #define MM_RDHWR        0x00006b3c
500 #define MM_RS           0x001f0000
501 #define MM_RT           0x03e00000
502 
503 /*
504  * The ll_bit is cleared by r*_switch.S
505  */
506 
507 unsigned int ll_bit;
508 struct task_struct *ll_task;
509 
510 static inline int simulate_ll(struct pt_regs *regs, unsigned int opcode)
511 {
512 	unsigned long value, __user *vaddr;
513 	long offset;
514 
515 	/*
516 	 * analyse the ll instruction that just caused a ri exception
517 	 * and put the referenced address to addr.
518 	 */
519 
520 	/* sign extend offset */
521 	offset = opcode & OFFSET;
522 	offset <<= 16;
523 	offset >>= 16;
524 
525 	vaddr = (unsigned long __user *)
526 		((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
527 
528 	if ((unsigned long)vaddr & 3)
529 		return SIGBUS;
530 	if (get_user(value, vaddr))
531 		return SIGSEGV;
532 
533 	preempt_disable();
534 
535 	if (ll_task == NULL || ll_task == current) {
536 		ll_bit = 1;
537 	} else {
538 		ll_bit = 0;
539 	}
540 	ll_task = current;
541 
542 	preempt_enable();
543 
544 	regs->regs[(opcode & RT) >> 16] = value;
545 
546 	return 0;
547 }
548 
549 static inline int simulate_sc(struct pt_regs *regs, unsigned int opcode)
550 {
551 	unsigned long __user *vaddr;
552 	unsigned long reg;
553 	long offset;
554 
555 	/*
556 	 * analyse the sc instruction that just caused a ri exception
557 	 * and put the referenced address to addr.
558 	 */
559 
560 	/* sign extend offset */
561 	offset = opcode & OFFSET;
562 	offset <<= 16;
563 	offset >>= 16;
564 
565 	vaddr = (unsigned long __user *)
566 		((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
567 	reg = (opcode & RT) >> 16;
568 
569 	if ((unsigned long)vaddr & 3)
570 		return SIGBUS;
571 
572 	preempt_disable();
573 
574 	if (ll_bit == 0 || ll_task != current) {
575 		regs->regs[reg] = 0;
576 		preempt_enable();
577 		return 0;
578 	}
579 
580 	preempt_enable();
581 
582 	if (put_user(regs->regs[reg], vaddr))
583 		return SIGSEGV;
584 
585 	regs->regs[reg] = 1;
586 
587 	return 0;
588 }
589 
590 /*
591  * ll uses the opcode of lwc0 and sc uses the opcode of swc0.  That is both
592  * opcodes are supposed to result in coprocessor unusable exceptions if
593  * executed on ll/sc-less processors.  That's the theory.  In practice a
594  * few processors such as NEC's VR4100 throw reserved instruction exceptions
595  * instead, so we're doing the emulation thing in both exception handlers.
596  */
597 static int simulate_llsc(struct pt_regs *regs, unsigned int opcode)
598 {
599 	if ((opcode & OPCODE) == LL) {
600 		perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
601 				1, regs, 0);
602 		return simulate_ll(regs, opcode);
603 	}
604 	if ((opcode & OPCODE) == SC) {
605 		perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
606 				1, regs, 0);
607 		return simulate_sc(regs, opcode);
608 	}
609 
610 	return -1;			/* Must be something else ... */
611 }
612 
613 /*
614  * Simulate trapping 'rdhwr' instructions to provide user accessible
615  * registers not implemented in hardware.
616  */
617 static int simulate_rdhwr(struct pt_regs *regs, int rd, int rt)
618 {
619 	struct thread_info *ti = task_thread_info(current);
620 
621 	perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
622 			1, regs, 0);
623 	switch (rd) {
624 	case 0:		/* CPU number */
625 		regs->regs[rt] = smp_processor_id();
626 		return 0;
627 	case 1:		/* SYNCI length */
628 		regs->regs[rt] = min(current_cpu_data.dcache.linesz,
629 				     current_cpu_data.icache.linesz);
630 		return 0;
631 	case 2:		/* Read count register */
632 		regs->regs[rt] = read_c0_count();
633 		return 0;
634 	case 3:		/* Count register resolution */
635 		switch (current_cpu_type()) {
636 		case CPU_20KC:
637 		case CPU_25KF:
638 			regs->regs[rt] = 1;
639 			break;
640 		default:
641 			regs->regs[rt] = 2;
642 		}
643 		return 0;
644 	case 29:
645 		regs->regs[rt] = ti->tp_value;
646 		return 0;
647 	default:
648 		return -1;
649 	}
650 }
651 
652 static int simulate_rdhwr_normal(struct pt_regs *regs, unsigned int opcode)
653 {
654 	if ((opcode & OPCODE) == SPEC3 && (opcode & FUNC) == RDHWR) {
655 		int rd = (opcode & RD) >> 11;
656 		int rt = (opcode & RT) >> 16;
657 
658 		simulate_rdhwr(regs, rd, rt);
659 		return 0;
660 	}
661 
662 	/* Not ours.  */
663 	return -1;
664 }
665 
666 static int simulate_rdhwr_mm(struct pt_regs *regs, unsigned short opcode)
667 {
668 	if ((opcode & MM_POOL32A_FUNC) == MM_RDHWR) {
669 		int rd = (opcode & MM_RS) >> 16;
670 		int rt = (opcode & MM_RT) >> 21;
671 		simulate_rdhwr(regs, rd, rt);
672 		return 0;
673 	}
674 
675 	/* Not ours.  */
676 	return -1;
677 }
678 
679 static int simulate_sync(struct pt_regs *regs, unsigned int opcode)
680 {
681 	if ((opcode & OPCODE) == SPEC0 && (opcode & FUNC) == SYNC) {
682 		perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
683 				1, regs, 0);
684 		return 0;
685 	}
686 
687 	return -1;			/* Must be something else ... */
688 }
689 
690 asmlinkage void do_ov(struct pt_regs *regs)
691 {
692 	enum ctx_state prev_state;
693 	siginfo_t info;
694 
695 	prev_state = exception_enter();
696 	die_if_kernel("Integer overflow", regs);
697 
698 	info.si_code = FPE_INTOVF;
699 	info.si_signo = SIGFPE;
700 	info.si_errno = 0;
701 	info.si_addr = (void __user *) regs->cp0_epc;
702 	force_sig_info(SIGFPE, &info, current);
703 	exception_exit(prev_state);
704 }
705 
706 int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcr31)
707 {
708 	struct siginfo si = { 0 };
709 
710 	switch (sig) {
711 	case 0:
712 		return 0;
713 
714 	case SIGFPE:
715 		si.si_addr = fault_addr;
716 		si.si_signo = sig;
717 		/*
718 		 * Inexact can happen together with Overflow or Underflow.
719 		 * Respect the mask to deliver the correct exception.
720 		 */
721 		fcr31 &= (fcr31 & FPU_CSR_ALL_E) <<
722 			 (ffs(FPU_CSR_ALL_X) - ffs(FPU_CSR_ALL_E));
723 		if (fcr31 & FPU_CSR_INV_X)
724 			si.si_code = FPE_FLTINV;
725 		else if (fcr31 & FPU_CSR_DIV_X)
726 			si.si_code = FPE_FLTDIV;
727 		else if (fcr31 & FPU_CSR_OVF_X)
728 			si.si_code = FPE_FLTOVF;
729 		else if (fcr31 & FPU_CSR_UDF_X)
730 			si.si_code = FPE_FLTUND;
731 		else if (fcr31 & FPU_CSR_INE_X)
732 			si.si_code = FPE_FLTRES;
733 		else
734 			si.si_code = __SI_FAULT;
735 		force_sig_info(sig, &si, current);
736 		return 1;
737 
738 	case SIGBUS:
739 		si.si_addr = fault_addr;
740 		si.si_signo = sig;
741 		si.si_code = BUS_ADRERR;
742 		force_sig_info(sig, &si, current);
743 		return 1;
744 
745 	case SIGSEGV:
746 		si.si_addr = fault_addr;
747 		si.si_signo = sig;
748 		down_read(&current->mm->mmap_sem);
749 		if (find_vma(current->mm, (unsigned long)fault_addr))
750 			si.si_code = SEGV_ACCERR;
751 		else
752 			si.si_code = SEGV_MAPERR;
753 		up_read(&current->mm->mmap_sem);
754 		force_sig_info(sig, &si, current);
755 		return 1;
756 
757 	default:
758 		force_sig(sig, current);
759 		return 1;
760 	}
761 }
762 
763 static int simulate_fp(struct pt_regs *regs, unsigned int opcode,
764 		       unsigned long old_epc, unsigned long old_ra)
765 {
766 	union mips_instruction inst = { .word = opcode };
767 	void __user *fault_addr;
768 	unsigned long fcr31;
769 	int sig;
770 
771 	/* If it's obviously not an FP instruction, skip it */
772 	switch (inst.i_format.opcode) {
773 	case cop1_op:
774 	case cop1x_op:
775 	case lwc1_op:
776 	case ldc1_op:
777 	case swc1_op:
778 	case sdc1_op:
779 		break;
780 
781 	default:
782 		return -1;
783 	}
784 
785 	/*
786 	 * do_ri skipped over the instruction via compute_return_epc, undo
787 	 * that for the FPU emulator.
788 	 */
789 	regs->cp0_epc = old_epc;
790 	regs->regs[31] = old_ra;
791 
792 	/* Save the FP context to struct thread_struct */
793 	lose_fpu(1);
794 
795 	/* Run the emulator */
796 	sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
797 				       &fault_addr);
798 	fcr31 = current->thread.fpu.fcr31;
799 
800 	/*
801 	 * We can't allow the emulated instruction to leave any of
802 	 * the cause bits set in $fcr31.
803 	 */
804 	current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
805 
806 	/* Restore the hardware register state */
807 	own_fpu(1);
808 
809 	/* Send a signal if required.  */
810 	process_fpemu_return(sig, fault_addr, fcr31);
811 
812 	return 0;
813 }
814 
815 /*
816  * XXX Delayed fp exceptions when doing a lazy ctx switch XXX
817  */
818 asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
819 {
820 	enum ctx_state prev_state;
821 	void __user *fault_addr;
822 	int sig;
823 
824 	prev_state = exception_enter();
825 	if (notify_die(DIE_FP, "FP exception", regs, 0, current->thread.trap_nr,
826 		       SIGFPE) == NOTIFY_STOP)
827 		goto out;
828 
829 	/* Clear FCSR.Cause before enabling interrupts */
830 	write_32bit_cp1_register(CP1_STATUS, fcr31 & ~FPU_CSR_ALL_X);
831 	local_irq_enable();
832 
833 	die_if_kernel("FP exception in kernel code", regs);
834 
835 	if (fcr31 & FPU_CSR_UNI_X) {
836 		/*
837 		 * Unimplemented operation exception.  If we've got the full
838 		 * software emulator on-board, let's use it...
839 		 *
840 		 * Force FPU to dump state into task/thread context.  We're
841 		 * moving a lot of data here for what is probably a single
842 		 * instruction, but the alternative is to pre-decode the FP
843 		 * register operands before invoking the emulator, which seems
844 		 * a bit extreme for what should be an infrequent event.
845 		 */
846 		/* Ensure 'resume' not overwrite saved fp context again. */
847 		lose_fpu(1);
848 
849 		/* Run the emulator */
850 		sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
851 					       &fault_addr);
852 		fcr31 = current->thread.fpu.fcr31;
853 
854 		/*
855 		 * We can't allow the emulated instruction to leave any of
856 		 * the cause bits set in $fcr31.
857 		 */
858 		current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
859 
860 		/* Restore the hardware register state */
861 		own_fpu(1);	/* Using the FPU again.	 */
862 	} else {
863 		sig = SIGFPE;
864 		fault_addr = (void __user *) regs->cp0_epc;
865 	}
866 
867 	/* Send a signal if required.  */
868 	process_fpemu_return(sig, fault_addr, fcr31);
869 
870 out:
871 	exception_exit(prev_state);
872 }
873 
874 void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
875 	const char *str)
876 {
877 	siginfo_t info;
878 	char b[40];
879 
880 #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
881 	if (kgdb_ll_trap(DIE_TRAP, str, regs, code, current->thread.trap_nr,
882 			 SIGTRAP) == NOTIFY_STOP)
883 		return;
884 #endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
885 
886 	if (notify_die(DIE_TRAP, str, regs, code, current->thread.trap_nr,
887 		       SIGTRAP) == NOTIFY_STOP)
888 		return;
889 
890 	/*
891 	 * A short test says that IRIX 5.3 sends SIGTRAP for all trap
892 	 * insns, even for trap and break codes that indicate arithmetic
893 	 * failures.  Weird ...
894 	 * But should we continue the brokenness???  --macro
895 	 */
896 	switch (code) {
897 	case BRK_OVERFLOW:
898 	case BRK_DIVZERO:
899 		scnprintf(b, sizeof(b), "%s instruction in kernel code", str);
900 		die_if_kernel(b, regs);
901 		if (code == BRK_DIVZERO)
902 			info.si_code = FPE_INTDIV;
903 		else
904 			info.si_code = FPE_INTOVF;
905 		info.si_signo = SIGFPE;
906 		info.si_errno = 0;
907 		info.si_addr = (void __user *) regs->cp0_epc;
908 		force_sig_info(SIGFPE, &info, current);
909 		break;
910 	case BRK_BUG:
911 		die_if_kernel("Kernel bug detected", regs);
912 		force_sig(SIGTRAP, current);
913 		break;
914 	case BRK_MEMU:
915 		/*
916 		 * This breakpoint code is used by the FPU emulator to retake
917 		 * control of the CPU after executing the instruction from the
918 		 * delay slot of an emulated branch.
919 		 *
920 		 * Terminate if exception was recognized as a delay slot return
921 		 * otherwise handle as normal.
922 		 */
923 		if (do_dsemulret(regs))
924 			return;
925 
926 		die_if_kernel("Math emu break/trap", regs);
927 		force_sig(SIGTRAP, current);
928 		break;
929 	default:
930 		scnprintf(b, sizeof(b), "%s instruction in kernel code", str);
931 		die_if_kernel(b, regs);
932 		force_sig(SIGTRAP, current);
933 	}
934 }
935 
936 asmlinkage void do_bp(struct pt_regs *regs)
937 {
938 	unsigned long epc = msk_isa16_mode(exception_epc(regs));
939 	unsigned int opcode, bcode;
940 	enum ctx_state prev_state;
941 	mm_segment_t seg;
942 
943 	seg = get_fs();
944 	if (!user_mode(regs))
945 		set_fs(KERNEL_DS);
946 
947 	prev_state = exception_enter();
948 	current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
949 	if (get_isa16_mode(regs->cp0_epc)) {
950 		u16 instr[2];
951 
952 		if (__get_user(instr[0], (u16 __user *)epc))
953 			goto out_sigsegv;
954 
955 		if (!cpu_has_mmips) {
956 			/* MIPS16e mode */
957 			bcode = (instr[0] >> 5) & 0x3f;
958 		} else if (mm_insn_16bit(instr[0])) {
959 			/* 16-bit microMIPS BREAK */
960 			bcode = instr[0] & 0xf;
961 		} else {
962 			/* 32-bit microMIPS BREAK */
963 			if (__get_user(instr[1], (u16 __user *)(epc + 2)))
964 				goto out_sigsegv;
965 			opcode = (instr[0] << 16) | instr[1];
966 			bcode = (opcode >> 6) & ((1 << 20) - 1);
967 		}
968 	} else {
969 		if (__get_user(opcode, (unsigned int __user *)epc))
970 			goto out_sigsegv;
971 		bcode = (opcode >> 6) & ((1 << 20) - 1);
972 	}
973 
974 	/*
975 	 * There is the ancient bug in the MIPS assemblers that the break
976 	 * code starts left to bit 16 instead to bit 6 in the opcode.
977 	 * Gas is bug-compatible, but not always, grrr...
978 	 * We handle both cases with a simple heuristics.  --macro
979 	 */
980 	if (bcode >= (1 << 10))
981 		bcode = ((bcode & ((1 << 10) - 1)) << 10) | (bcode >> 10);
982 
983 	/*
984 	 * notify the kprobe handlers, if instruction is likely to
985 	 * pertain to them.
986 	 */
987 	switch (bcode) {
988 	case BRK_UPROBE:
989 		if (notify_die(DIE_UPROBE, "uprobe", regs, bcode,
990 			       current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
991 			goto out;
992 		else
993 			break;
994 	case BRK_UPROBE_XOL:
995 		if (notify_die(DIE_UPROBE_XOL, "uprobe_xol", regs, bcode,
996 			       current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
997 			goto out;
998 		else
999 			break;
1000 	case BRK_KPROBE_BP:
1001 		if (notify_die(DIE_BREAK, "debug", regs, bcode,
1002 			       current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
1003 			goto out;
1004 		else
1005 			break;
1006 	case BRK_KPROBE_SSTEPBP:
1007 		if (notify_die(DIE_SSTEPBP, "single_step", regs, bcode,
1008 			       current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
1009 			goto out;
1010 		else
1011 			break;
1012 	default:
1013 		break;
1014 	}
1015 
1016 	do_trap_or_bp(regs, bcode, "Break");
1017 
1018 out:
1019 	set_fs(seg);
1020 	exception_exit(prev_state);
1021 	return;
1022 
1023 out_sigsegv:
1024 	force_sig(SIGSEGV, current);
1025 	goto out;
1026 }
1027 
1028 asmlinkage void do_tr(struct pt_regs *regs)
1029 {
1030 	u32 opcode, tcode = 0;
1031 	enum ctx_state prev_state;
1032 	u16 instr[2];
1033 	mm_segment_t seg;
1034 	unsigned long epc = msk_isa16_mode(exception_epc(regs));
1035 
1036 	seg = get_fs();
1037 	if (!user_mode(regs))
1038 		set_fs(get_ds());
1039 
1040 	prev_state = exception_enter();
1041 	current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
1042 	if (get_isa16_mode(regs->cp0_epc)) {
1043 		if (__get_user(instr[0], (u16 __user *)(epc + 0)) ||
1044 		    __get_user(instr[1], (u16 __user *)(epc + 2)))
1045 			goto out_sigsegv;
1046 		opcode = (instr[0] << 16) | instr[1];
1047 		/* Immediate versions don't provide a code.  */
1048 		if (!(opcode & OPCODE))
1049 			tcode = (opcode >> 12) & ((1 << 4) - 1);
1050 	} else {
1051 		if (__get_user(opcode, (u32 __user *)epc))
1052 			goto out_sigsegv;
1053 		/* Immediate versions don't provide a code.  */
1054 		if (!(opcode & OPCODE))
1055 			tcode = (opcode >> 6) & ((1 << 10) - 1);
1056 	}
1057 
1058 	do_trap_or_bp(regs, tcode, "Trap");
1059 
1060 out:
1061 	set_fs(seg);
1062 	exception_exit(prev_state);
1063 	return;
1064 
1065 out_sigsegv:
1066 	force_sig(SIGSEGV, current);
1067 	goto out;
1068 }
1069 
1070 asmlinkage void do_ri(struct pt_regs *regs)
1071 {
1072 	unsigned int __user *epc = (unsigned int __user *)exception_epc(regs);
1073 	unsigned long old_epc = regs->cp0_epc;
1074 	unsigned long old31 = regs->regs[31];
1075 	enum ctx_state prev_state;
1076 	unsigned int opcode = 0;
1077 	int status = -1;
1078 
1079 	/*
1080 	 * Avoid any kernel code. Just emulate the R2 instruction
1081 	 * as quickly as possible.
1082 	 */
1083 	if (mipsr2_emulation && cpu_has_mips_r6 &&
1084 	    likely(user_mode(regs)) &&
1085 	    likely(get_user(opcode, epc) >= 0)) {
1086 		unsigned long fcr31 = 0;
1087 
1088 		status = mipsr2_decoder(regs, opcode, &fcr31);
1089 		switch (status) {
1090 		case 0:
1091 		case SIGEMT:
1092 			task_thread_info(current)->r2_emul_return = 1;
1093 			return;
1094 		case SIGILL:
1095 			goto no_r2_instr;
1096 		default:
1097 			process_fpemu_return(status,
1098 					     &current->thread.cp0_baduaddr,
1099 					     fcr31);
1100 			task_thread_info(current)->r2_emul_return = 1;
1101 			return;
1102 		}
1103 	}
1104 
1105 no_r2_instr:
1106 
1107 	prev_state = exception_enter();
1108 	current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
1109 
1110 	if (notify_die(DIE_RI, "RI Fault", regs, 0, current->thread.trap_nr,
1111 		       SIGILL) == NOTIFY_STOP)
1112 		goto out;
1113 
1114 	die_if_kernel("Reserved instruction in kernel code", regs);
1115 
1116 	if (unlikely(compute_return_epc(regs) < 0))
1117 		goto out;
1118 
1119 	if (get_isa16_mode(regs->cp0_epc)) {
1120 		unsigned short mmop[2] = { 0 };
1121 
1122 		if (unlikely(get_user(mmop[0], epc) < 0))
1123 			status = SIGSEGV;
1124 		if (unlikely(get_user(mmop[1], epc) < 0))
1125 			status = SIGSEGV;
1126 		opcode = (mmop[0] << 16) | mmop[1];
1127 
1128 		if (status < 0)
1129 			status = simulate_rdhwr_mm(regs, opcode);
1130 	} else {
1131 		if (unlikely(get_user(opcode, epc) < 0))
1132 			status = SIGSEGV;
1133 
1134 		if (!cpu_has_llsc && status < 0)
1135 			status = simulate_llsc(regs, opcode);
1136 
1137 		if (status < 0)
1138 			status = simulate_rdhwr_normal(regs, opcode);
1139 
1140 		if (status < 0)
1141 			status = simulate_sync(regs, opcode);
1142 
1143 		if (status < 0)
1144 			status = simulate_fp(regs, opcode, old_epc, old31);
1145 	}
1146 
1147 	if (status < 0)
1148 		status = SIGILL;
1149 
1150 	if (unlikely(status > 0)) {
1151 		regs->cp0_epc = old_epc;		/* Undo skip-over.  */
1152 		regs->regs[31] = old31;
1153 		force_sig(status, current);
1154 	}
1155 
1156 out:
1157 	exception_exit(prev_state);
1158 }
1159 
1160 /*
1161  * MIPS MT processors may have fewer FPU contexts than CPU threads. If we've
1162  * emulated more than some threshold number of instructions, force migration to
1163  * a "CPU" that has FP support.
1164  */
1165 static void mt_ase_fp_affinity(void)
1166 {
1167 #ifdef CONFIG_MIPS_MT_FPAFF
1168 	if (mt_fpemul_threshold > 0 &&
1169 	     ((current->thread.emulated_fp++ > mt_fpemul_threshold))) {
1170 		/*
1171 		 * If there's no FPU present, or if the application has already
1172 		 * restricted the allowed set to exclude any CPUs with FPUs,
1173 		 * we'll skip the procedure.
1174 		 */
1175 		if (cpumask_intersects(&current->cpus_allowed, &mt_fpu_cpumask)) {
1176 			cpumask_t tmask;
1177 
1178 			current->thread.user_cpus_allowed
1179 				= current->cpus_allowed;
1180 			cpumask_and(&tmask, &current->cpus_allowed,
1181 				    &mt_fpu_cpumask);
1182 			set_cpus_allowed_ptr(current, &tmask);
1183 			set_thread_flag(TIF_FPUBOUND);
1184 		}
1185 	}
1186 #endif /* CONFIG_MIPS_MT_FPAFF */
1187 }
1188 
1189 /*
1190  * No lock; only written during early bootup by CPU 0.
1191  */
1192 static RAW_NOTIFIER_HEAD(cu2_chain);
1193 
1194 int __ref register_cu2_notifier(struct notifier_block *nb)
1195 {
1196 	return raw_notifier_chain_register(&cu2_chain, nb);
1197 }
1198 
1199 int cu2_notifier_call_chain(unsigned long val, void *v)
1200 {
1201 	return raw_notifier_call_chain(&cu2_chain, val, v);
1202 }
1203 
1204 static int default_cu2_call(struct notifier_block *nfb, unsigned long action,
1205 	void *data)
1206 {
1207 	struct pt_regs *regs = data;
1208 
1209 	die_if_kernel("COP2: Unhandled kernel unaligned access or invalid "
1210 			      "instruction", regs);
1211 	force_sig(SIGILL, current);
1212 
1213 	return NOTIFY_OK;
1214 }
1215 
1216 static int wait_on_fp_mode_switch(atomic_t *p)
1217 {
1218 	/*
1219 	 * The FP mode for this task is currently being switched. That may
1220 	 * involve modifications to the format of this tasks FP context which
1221 	 * make it unsafe to proceed with execution for the moment. Instead,
1222 	 * schedule some other task.
1223 	 */
1224 	schedule();
1225 	return 0;
1226 }
1227 
1228 static int enable_restore_fp_context(int msa)
1229 {
1230 	int err, was_fpu_owner, prior_msa;
1231 
1232 	/*
1233 	 * If an FP mode switch is currently underway, wait for it to
1234 	 * complete before proceeding.
1235 	 */
1236 	wait_on_atomic_t(&current->mm->context.fp_mode_switching,
1237 			 wait_on_fp_mode_switch, TASK_KILLABLE);
1238 
1239 	if (!used_math()) {
1240 		/* First time FP context user. */
1241 		preempt_disable();
1242 		err = init_fpu();
1243 		if (msa && !err) {
1244 			enable_msa();
1245 			_init_msa_upper();
1246 			set_thread_flag(TIF_USEDMSA);
1247 			set_thread_flag(TIF_MSA_CTX_LIVE);
1248 		}
1249 		preempt_enable();
1250 		if (!err)
1251 			set_used_math();
1252 		return err;
1253 	}
1254 
1255 	/*
1256 	 * This task has formerly used the FP context.
1257 	 *
1258 	 * If this thread has no live MSA vector context then we can simply
1259 	 * restore the scalar FP context. If it has live MSA vector context
1260 	 * (that is, it has or may have used MSA since last performing a
1261 	 * function call) then we'll need to restore the vector context. This
1262 	 * applies even if we're currently only executing a scalar FP
1263 	 * instruction. This is because if we were to later execute an MSA
1264 	 * instruction then we'd either have to:
1265 	 *
1266 	 *  - Restore the vector context & clobber any registers modified by
1267 	 *    scalar FP instructions between now & then.
1268 	 *
1269 	 * or
1270 	 *
1271 	 *  - Not restore the vector context & lose the most significant bits
1272 	 *    of all vector registers.
1273 	 *
1274 	 * Neither of those options is acceptable. We cannot restore the least
1275 	 * significant bits of the registers now & only restore the most
1276 	 * significant bits later because the most significant bits of any
1277 	 * vector registers whose aliased FP register is modified now will have
1278 	 * been zeroed. We'd have no way to know that when restoring the vector
1279 	 * context & thus may load an outdated value for the most significant
1280 	 * bits of a vector register.
1281 	 */
1282 	if (!msa && !thread_msa_context_live())
1283 		return own_fpu(1);
1284 
1285 	/*
1286 	 * This task is using or has previously used MSA. Thus we require
1287 	 * that Status.FR == 1.
1288 	 */
1289 	preempt_disable();
1290 	was_fpu_owner = is_fpu_owner();
1291 	err = own_fpu_inatomic(0);
1292 	if (err)
1293 		goto out;
1294 
1295 	enable_msa();
1296 	write_msa_csr(current->thread.fpu.msacsr);
1297 	set_thread_flag(TIF_USEDMSA);
1298 
1299 	/*
1300 	 * If this is the first time that the task is using MSA and it has
1301 	 * previously used scalar FP in this time slice then we already nave
1302 	 * FP context which we shouldn't clobber. We do however need to clear
1303 	 * the upper 64b of each vector register so that this task has no
1304 	 * opportunity to see data left behind by another.
1305 	 */
1306 	prior_msa = test_and_set_thread_flag(TIF_MSA_CTX_LIVE);
1307 	if (!prior_msa && was_fpu_owner) {
1308 		_init_msa_upper();
1309 
1310 		goto out;
1311 	}
1312 
1313 	if (!prior_msa) {
1314 		/*
1315 		 * Restore the least significant 64b of each vector register
1316 		 * from the existing scalar FP context.
1317 		 */
1318 		_restore_fp(current);
1319 
1320 		/*
1321 		 * The task has not formerly used MSA, so clear the upper 64b
1322 		 * of each vector register such that it cannot see data left
1323 		 * behind by another task.
1324 		 */
1325 		_init_msa_upper();
1326 	} else {
1327 		/* We need to restore the vector context. */
1328 		restore_msa(current);
1329 
1330 		/* Restore the scalar FP control & status register */
1331 		if (!was_fpu_owner)
1332 			write_32bit_cp1_register(CP1_STATUS,
1333 						 current->thread.fpu.fcr31);
1334 	}
1335 
1336 out:
1337 	preempt_enable();
1338 
1339 	return 0;
1340 }
1341 
1342 asmlinkage void do_cpu(struct pt_regs *regs)
1343 {
1344 	enum ctx_state prev_state;
1345 	unsigned int __user *epc;
1346 	unsigned long old_epc, old31;
1347 	void __user *fault_addr;
1348 	unsigned int opcode;
1349 	unsigned long fcr31;
1350 	unsigned int cpid;
1351 	int status, err;
1352 	unsigned long __maybe_unused flags;
1353 	int sig;
1354 
1355 	prev_state = exception_enter();
1356 	cpid = (regs->cp0_cause >> CAUSEB_CE) & 3;
1357 
1358 	if (cpid != 2)
1359 		die_if_kernel("do_cpu invoked from kernel context!", regs);
1360 
1361 	switch (cpid) {
1362 	case 0:
1363 		epc = (unsigned int __user *)exception_epc(regs);
1364 		old_epc = regs->cp0_epc;
1365 		old31 = regs->regs[31];
1366 		opcode = 0;
1367 		status = -1;
1368 
1369 		if (unlikely(compute_return_epc(regs) < 0))
1370 			break;
1371 
1372 		if (get_isa16_mode(regs->cp0_epc)) {
1373 			unsigned short mmop[2] = { 0 };
1374 
1375 			if (unlikely(get_user(mmop[0], epc) < 0))
1376 				status = SIGSEGV;
1377 			if (unlikely(get_user(mmop[1], epc) < 0))
1378 				status = SIGSEGV;
1379 			opcode = (mmop[0] << 16) | mmop[1];
1380 
1381 			if (status < 0)
1382 				status = simulate_rdhwr_mm(regs, opcode);
1383 		} else {
1384 			if (unlikely(get_user(opcode, epc) < 0))
1385 				status = SIGSEGV;
1386 
1387 			if (!cpu_has_llsc && status < 0)
1388 				status = simulate_llsc(regs, opcode);
1389 
1390 			if (status < 0)
1391 				status = simulate_rdhwr_normal(regs, opcode);
1392 		}
1393 
1394 		if (status < 0)
1395 			status = SIGILL;
1396 
1397 		if (unlikely(status > 0)) {
1398 			regs->cp0_epc = old_epc;	/* Undo skip-over.  */
1399 			regs->regs[31] = old31;
1400 			force_sig(status, current);
1401 		}
1402 
1403 		break;
1404 
1405 	case 3:
1406 		/*
1407 		 * The COP3 opcode space and consequently the CP0.Status.CU3
1408 		 * bit and the CP0.Cause.CE=3 encoding have been removed as
1409 		 * of the MIPS III ISA.  From the MIPS IV and MIPS32r2 ISAs
1410 		 * up the space has been reused for COP1X instructions, that
1411 		 * are enabled by the CP0.Status.CU1 bit and consequently
1412 		 * use the CP0.Cause.CE=1 encoding for Coprocessor Unusable
1413 		 * exceptions.  Some FPU-less processors that implement one
1414 		 * of these ISAs however use this code erroneously for COP1X
1415 		 * instructions.  Therefore we redirect this trap to the FP
1416 		 * emulator too.
1417 		 */
1418 		if (raw_cpu_has_fpu || !cpu_has_mips_4_5_64_r2_r6) {
1419 			force_sig(SIGILL, current);
1420 			break;
1421 		}
1422 		/* Fall through.  */
1423 
1424 	case 1:
1425 		err = enable_restore_fp_context(0);
1426 
1427 		if (raw_cpu_has_fpu && !err)
1428 			break;
1429 
1430 		sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 0,
1431 					       &fault_addr);
1432 		fcr31 = current->thread.fpu.fcr31;
1433 
1434 		/*
1435 		 * We can't allow the emulated instruction to leave
1436 		 * any of the cause bits set in $fcr31.
1437 		 */
1438 		current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
1439 
1440 		/* Send a signal if required.  */
1441 		if (!process_fpemu_return(sig, fault_addr, fcr31) && !err)
1442 			mt_ase_fp_affinity();
1443 
1444 		break;
1445 
1446 	case 2:
1447 		raw_notifier_call_chain(&cu2_chain, CU2_EXCEPTION, regs);
1448 		break;
1449 	}
1450 
1451 	exception_exit(prev_state);
1452 }
1453 
1454 asmlinkage void do_msa_fpe(struct pt_regs *regs, unsigned int msacsr)
1455 {
1456 	enum ctx_state prev_state;
1457 
1458 	prev_state = exception_enter();
1459 	current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
1460 	if (notify_die(DIE_MSAFP, "MSA FP exception", regs, 0,
1461 		       current->thread.trap_nr, SIGFPE) == NOTIFY_STOP)
1462 		goto out;
1463 
1464 	/* Clear MSACSR.Cause before enabling interrupts */
1465 	write_msa_csr(msacsr & ~MSA_CSR_CAUSEF);
1466 	local_irq_enable();
1467 
1468 	die_if_kernel("do_msa_fpe invoked from kernel context!", regs);
1469 	force_sig(SIGFPE, current);
1470 out:
1471 	exception_exit(prev_state);
1472 }
1473 
1474 asmlinkage void do_msa(struct pt_regs *regs)
1475 {
1476 	enum ctx_state prev_state;
1477 	int err;
1478 
1479 	prev_state = exception_enter();
1480 
1481 	if (!cpu_has_msa || test_thread_flag(TIF_32BIT_FPREGS)) {
1482 		force_sig(SIGILL, current);
1483 		goto out;
1484 	}
1485 
1486 	die_if_kernel("do_msa invoked from kernel context!", regs);
1487 
1488 	err = enable_restore_fp_context(1);
1489 	if (err)
1490 		force_sig(SIGILL, current);
1491 out:
1492 	exception_exit(prev_state);
1493 }
1494 
1495 asmlinkage void do_mdmx(struct pt_regs *regs)
1496 {
1497 	enum ctx_state prev_state;
1498 
1499 	prev_state = exception_enter();
1500 	force_sig(SIGILL, current);
1501 	exception_exit(prev_state);
1502 }
1503 
1504 /*
1505  * Called with interrupts disabled.
1506  */
1507 asmlinkage void do_watch(struct pt_regs *regs)
1508 {
1509 	enum ctx_state prev_state;
1510 	u32 cause;
1511 
1512 	prev_state = exception_enter();
1513 	/*
1514 	 * Clear WP (bit 22) bit of cause register so we don't loop
1515 	 * forever.
1516 	 */
1517 	cause = read_c0_cause();
1518 	cause &= ~(1 << 22);
1519 	write_c0_cause(cause);
1520 
1521 	/*
1522 	 * If the current thread has the watch registers loaded, save
1523 	 * their values and send SIGTRAP.  Otherwise another thread
1524 	 * left the registers set, clear them and continue.
1525 	 */
1526 	if (test_tsk_thread_flag(current, TIF_LOAD_WATCH)) {
1527 		mips_read_watch_registers();
1528 		local_irq_enable();
1529 		force_sig(SIGTRAP, current);
1530 	} else {
1531 		mips_clear_watch_registers();
1532 		local_irq_enable();
1533 	}
1534 	exception_exit(prev_state);
1535 }
1536 
1537 asmlinkage void do_mcheck(struct pt_regs *regs)
1538 {
1539 	int multi_match = regs->cp0_status & ST0_TS;
1540 	enum ctx_state prev_state;
1541 	mm_segment_t old_fs = get_fs();
1542 
1543 	prev_state = exception_enter();
1544 	show_regs(regs);
1545 
1546 	if (multi_match) {
1547 		dump_tlb_regs();
1548 		pr_info("\n");
1549 		dump_tlb_all();
1550 	}
1551 
1552 	if (!user_mode(regs))
1553 		set_fs(KERNEL_DS);
1554 
1555 	show_code((unsigned int __user *) regs->cp0_epc);
1556 
1557 	set_fs(old_fs);
1558 
1559 	/*
1560 	 * Some chips may have other causes of machine check (e.g. SB1
1561 	 * graduation timer)
1562 	 */
1563 	panic("Caught Machine Check exception - %scaused by multiple "
1564 	      "matching entries in the TLB.",
1565 	      (multi_match) ? "" : "not ");
1566 }
1567 
1568 asmlinkage void do_mt(struct pt_regs *regs)
1569 {
1570 	int subcode;
1571 
1572 	subcode = (read_vpe_c0_vpecontrol() & VPECONTROL_EXCPT)
1573 			>> VPECONTROL_EXCPT_SHIFT;
1574 	switch (subcode) {
1575 	case 0:
1576 		printk(KERN_DEBUG "Thread Underflow\n");
1577 		break;
1578 	case 1:
1579 		printk(KERN_DEBUG "Thread Overflow\n");
1580 		break;
1581 	case 2:
1582 		printk(KERN_DEBUG "Invalid YIELD Qualifier\n");
1583 		break;
1584 	case 3:
1585 		printk(KERN_DEBUG "Gating Storage Exception\n");
1586 		break;
1587 	case 4:
1588 		printk(KERN_DEBUG "YIELD Scheduler Exception\n");
1589 		break;
1590 	case 5:
1591 		printk(KERN_DEBUG "Gating Storage Scheduler Exception\n");
1592 		break;
1593 	default:
1594 		printk(KERN_DEBUG "*** UNKNOWN THREAD EXCEPTION %d ***\n",
1595 			subcode);
1596 		break;
1597 	}
1598 	die_if_kernel("MIPS MT Thread exception in kernel", regs);
1599 
1600 	force_sig(SIGILL, current);
1601 }
1602 
1603 
1604 asmlinkage void do_dsp(struct pt_regs *regs)
1605 {
1606 	if (cpu_has_dsp)
1607 		panic("Unexpected DSP exception");
1608 
1609 	force_sig(SIGILL, current);
1610 }
1611 
1612 asmlinkage void do_reserved(struct pt_regs *regs)
1613 {
1614 	/*
1615 	 * Game over - no way to handle this if it ever occurs.	 Most probably
1616 	 * caused by a new unknown cpu type or after another deadly
1617 	 * hard/software error.
1618 	 */
1619 	show_regs(regs);
1620 	panic("Caught reserved exception %ld - should not happen.",
1621 	      (regs->cp0_cause & 0x7f) >> 2);
1622 }
1623 
1624 static int __initdata l1parity = 1;
1625 static int __init nol1parity(char *s)
1626 {
1627 	l1parity = 0;
1628 	return 1;
1629 }
1630 __setup("nol1par", nol1parity);
1631 static int __initdata l2parity = 1;
1632 static int __init nol2parity(char *s)
1633 {
1634 	l2parity = 0;
1635 	return 1;
1636 }
1637 __setup("nol2par", nol2parity);
1638 
1639 /*
1640  * Some MIPS CPUs can enable/disable for cache parity detection, but do
1641  * it different ways.
1642  */
1643 static inline void parity_protection_init(void)
1644 {
1645 	switch (current_cpu_type()) {
1646 	case CPU_24K:
1647 	case CPU_34K:
1648 	case CPU_74K:
1649 	case CPU_1004K:
1650 	case CPU_1074K:
1651 	case CPU_INTERAPTIV:
1652 	case CPU_PROAPTIV:
1653 	case CPU_P5600:
1654 	case CPU_QEMU_GENERIC:
1655 	case CPU_I6400:
1656 		{
1657 #define ERRCTL_PE	0x80000000
1658 #define ERRCTL_L2P	0x00800000
1659 			unsigned long errctl;
1660 			unsigned int l1parity_present, l2parity_present;
1661 
1662 			errctl = read_c0_ecc();
1663 			errctl &= ~(ERRCTL_PE|ERRCTL_L2P);
1664 
1665 			/* probe L1 parity support */
1666 			write_c0_ecc(errctl | ERRCTL_PE);
1667 			back_to_back_c0_hazard();
1668 			l1parity_present = (read_c0_ecc() & ERRCTL_PE);
1669 
1670 			/* probe L2 parity support */
1671 			write_c0_ecc(errctl|ERRCTL_L2P);
1672 			back_to_back_c0_hazard();
1673 			l2parity_present = (read_c0_ecc() & ERRCTL_L2P);
1674 
1675 			if (l1parity_present && l2parity_present) {
1676 				if (l1parity)
1677 					errctl |= ERRCTL_PE;
1678 				if (l1parity ^ l2parity)
1679 					errctl |= ERRCTL_L2P;
1680 			} else if (l1parity_present) {
1681 				if (l1parity)
1682 					errctl |= ERRCTL_PE;
1683 			} else if (l2parity_present) {
1684 				if (l2parity)
1685 					errctl |= ERRCTL_L2P;
1686 			} else {
1687 				/* No parity available */
1688 			}
1689 
1690 			printk(KERN_INFO "Writing ErrCtl register=%08lx\n", errctl);
1691 
1692 			write_c0_ecc(errctl);
1693 			back_to_back_c0_hazard();
1694 			errctl = read_c0_ecc();
1695 			printk(KERN_INFO "Readback ErrCtl register=%08lx\n", errctl);
1696 
1697 			if (l1parity_present)
1698 				printk(KERN_INFO "Cache parity protection %sabled\n",
1699 				       (errctl & ERRCTL_PE) ? "en" : "dis");
1700 
1701 			if (l2parity_present) {
1702 				if (l1parity_present && l1parity)
1703 					errctl ^= ERRCTL_L2P;
1704 				printk(KERN_INFO "L2 cache parity protection %sabled\n",
1705 				       (errctl & ERRCTL_L2P) ? "en" : "dis");
1706 			}
1707 		}
1708 		break;
1709 
1710 	case CPU_5KC:
1711 	case CPU_5KE:
1712 	case CPU_LOONGSON1:
1713 		write_c0_ecc(0x80000000);
1714 		back_to_back_c0_hazard();
1715 		/* Set the PE bit (bit 31) in the c0_errctl register. */
1716 		printk(KERN_INFO "Cache parity protection %sabled\n",
1717 		       (read_c0_ecc() & 0x80000000) ? "en" : "dis");
1718 		break;
1719 	case CPU_20KC:
1720 	case CPU_25KF:
1721 		/* Clear the DE bit (bit 16) in the c0_status register. */
1722 		printk(KERN_INFO "Enable cache parity protection for "
1723 		       "MIPS 20KC/25KF CPUs.\n");
1724 		clear_c0_status(ST0_DE);
1725 		break;
1726 	default:
1727 		break;
1728 	}
1729 }
1730 
1731 asmlinkage void cache_parity_error(void)
1732 {
1733 	const int field = 2 * sizeof(unsigned long);
1734 	unsigned int reg_val;
1735 
1736 	/* For the moment, report the problem and hang. */
1737 	printk("Cache error exception:\n");
1738 	printk("cp0_errorepc == %0*lx\n", field, read_c0_errorepc());
1739 	reg_val = read_c0_cacheerr();
1740 	printk("c0_cacheerr == %08x\n", reg_val);
1741 
1742 	printk("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
1743 	       reg_val & (1<<30) ? "secondary" : "primary",
1744 	       reg_val & (1<<31) ? "data" : "insn");
1745 	if ((cpu_has_mips_r2_r6) &&
1746 	    ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) {
1747 		pr_err("Error bits: %s%s%s%s%s%s%s%s\n",
1748 			reg_val & (1<<29) ? "ED " : "",
1749 			reg_val & (1<<28) ? "ET " : "",
1750 			reg_val & (1<<27) ? "ES " : "",
1751 			reg_val & (1<<26) ? "EE " : "",
1752 			reg_val & (1<<25) ? "EB " : "",
1753 			reg_val & (1<<24) ? "EI " : "",
1754 			reg_val & (1<<23) ? "E1 " : "",
1755 			reg_val & (1<<22) ? "E0 " : "");
1756 	} else {
1757 		pr_err("Error bits: %s%s%s%s%s%s%s\n",
1758 			reg_val & (1<<29) ? "ED " : "",
1759 			reg_val & (1<<28) ? "ET " : "",
1760 			reg_val & (1<<26) ? "EE " : "",
1761 			reg_val & (1<<25) ? "EB " : "",
1762 			reg_val & (1<<24) ? "EI " : "",
1763 			reg_val & (1<<23) ? "E1 " : "",
1764 			reg_val & (1<<22) ? "E0 " : "");
1765 	}
1766 	printk("IDX: 0x%08x\n", reg_val & ((1<<22)-1));
1767 
1768 #if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64)
1769 	if (reg_val & (1<<22))
1770 		printk("DErrAddr0: 0x%0*lx\n", field, read_c0_derraddr0());
1771 
1772 	if (reg_val & (1<<23))
1773 		printk("DErrAddr1: 0x%0*lx\n", field, read_c0_derraddr1());
1774 #endif
1775 
1776 	panic("Can't handle the cache error!");
1777 }
1778 
1779 asmlinkage void do_ftlb(void)
1780 {
1781 	const int field = 2 * sizeof(unsigned long);
1782 	unsigned int reg_val;
1783 
1784 	/* For the moment, report the problem and hang. */
1785 	if ((cpu_has_mips_r2_r6) &&
1786 	    ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) {
1787 		pr_err("FTLB error exception, cp0_ecc=0x%08x:\n",
1788 		       read_c0_ecc());
1789 		pr_err("cp0_errorepc == %0*lx\n", field, read_c0_errorepc());
1790 		reg_val = read_c0_cacheerr();
1791 		pr_err("c0_cacheerr == %08x\n", reg_val);
1792 
1793 		if ((reg_val & 0xc0000000) == 0xc0000000) {
1794 			pr_err("Decoded c0_cacheerr: FTLB parity error\n");
1795 		} else {
1796 			pr_err("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
1797 			       reg_val & (1<<30) ? "secondary" : "primary",
1798 			       reg_val & (1<<31) ? "data" : "insn");
1799 		}
1800 	} else {
1801 		pr_err("FTLB error exception\n");
1802 	}
1803 	/* Just print the cacheerr bits for now */
1804 	cache_parity_error();
1805 }
1806 
1807 /*
1808  * SDBBP EJTAG debug exception handler.
1809  * We skip the instruction and return to the next instruction.
1810  */
1811 void ejtag_exception_handler(struct pt_regs *regs)
1812 {
1813 	const int field = 2 * sizeof(unsigned long);
1814 	unsigned long depc, old_epc, old_ra;
1815 	unsigned int debug;
1816 
1817 	printk(KERN_DEBUG "SDBBP EJTAG debug exception - not handled yet, just ignored!\n");
1818 	depc = read_c0_depc();
1819 	debug = read_c0_debug();
1820 	printk(KERN_DEBUG "c0_depc = %0*lx, DEBUG = %08x\n", field, depc, debug);
1821 	if (debug & 0x80000000) {
1822 		/*
1823 		 * In branch delay slot.
1824 		 * We cheat a little bit here and use EPC to calculate the
1825 		 * debug return address (DEPC). EPC is restored after the
1826 		 * calculation.
1827 		 */
1828 		old_epc = regs->cp0_epc;
1829 		old_ra = regs->regs[31];
1830 		regs->cp0_epc = depc;
1831 		compute_return_epc(regs);
1832 		depc = regs->cp0_epc;
1833 		regs->cp0_epc = old_epc;
1834 		regs->regs[31] = old_ra;
1835 	} else
1836 		depc += 4;
1837 	write_c0_depc(depc);
1838 
1839 #if 0
1840 	printk(KERN_DEBUG "\n\n----- Enable EJTAG single stepping ----\n\n");
1841 	write_c0_debug(debug | 0x100);
1842 #endif
1843 }
1844 
1845 /*
1846  * NMI exception handler.
1847  * No lock; only written during early bootup by CPU 0.
1848  */
1849 static RAW_NOTIFIER_HEAD(nmi_chain);
1850 
1851 int register_nmi_notifier(struct notifier_block *nb)
1852 {
1853 	return raw_notifier_chain_register(&nmi_chain, nb);
1854 }
1855 
1856 void __noreturn nmi_exception_handler(struct pt_regs *regs)
1857 {
1858 	char str[100];
1859 
1860 	nmi_enter();
1861 	raw_notifier_call_chain(&nmi_chain, 0, regs);
1862 	bust_spinlocks(1);
1863 	snprintf(str, 100, "CPU%d NMI taken, CP0_EPC=%lx\n",
1864 		 smp_processor_id(), regs->cp0_epc);
1865 	regs->cp0_epc = read_c0_errorepc();
1866 	die(str, regs);
1867 	nmi_exit();
1868 }
1869 
1870 #define VECTORSPACING 0x100	/* for EI/VI mode */
1871 
1872 unsigned long ebase;
1873 unsigned long exception_handlers[32];
1874 unsigned long vi_handlers[64];
1875 
1876 void __init *set_except_vector(int n, void *addr)
1877 {
1878 	unsigned long handler = (unsigned long) addr;
1879 	unsigned long old_handler;
1880 
1881 #ifdef CONFIG_CPU_MICROMIPS
1882 	/*
1883 	 * Only the TLB handlers are cache aligned with an even
1884 	 * address. All other handlers are on an odd address and
1885 	 * require no modification. Otherwise, MIPS32 mode will
1886 	 * be entered when handling any TLB exceptions. That
1887 	 * would be bad...since we must stay in microMIPS mode.
1888 	 */
1889 	if (!(handler & 0x1))
1890 		handler |= 1;
1891 #endif
1892 	old_handler = xchg(&exception_handlers[n], handler);
1893 
1894 	if (n == 0 && cpu_has_divec) {
1895 #ifdef CONFIG_CPU_MICROMIPS
1896 		unsigned long jump_mask = ~((1 << 27) - 1);
1897 #else
1898 		unsigned long jump_mask = ~((1 << 28) - 1);
1899 #endif
1900 		u32 *buf = (u32 *)(ebase + 0x200);
1901 		unsigned int k0 = 26;
1902 		if ((handler & jump_mask) == ((ebase + 0x200) & jump_mask)) {
1903 			uasm_i_j(&buf, handler & ~jump_mask);
1904 			uasm_i_nop(&buf);
1905 		} else {
1906 			UASM_i_LA(&buf, k0, handler);
1907 			uasm_i_jr(&buf, k0);
1908 			uasm_i_nop(&buf);
1909 		}
1910 		local_flush_icache_range(ebase + 0x200, (unsigned long)buf);
1911 	}
1912 	return (void *)old_handler;
1913 }
1914 
1915 static void do_default_vi(void)
1916 {
1917 	show_regs(get_irq_regs());
1918 	panic("Caught unexpected vectored interrupt.");
1919 }
1920 
1921 static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
1922 {
1923 	unsigned long handler;
1924 	unsigned long old_handler = vi_handlers[n];
1925 	int srssets = current_cpu_data.srsets;
1926 	u16 *h;
1927 	unsigned char *b;
1928 
1929 	BUG_ON(!cpu_has_veic && !cpu_has_vint);
1930 
1931 	if (addr == NULL) {
1932 		handler = (unsigned long) do_default_vi;
1933 		srs = 0;
1934 	} else
1935 		handler = (unsigned long) addr;
1936 	vi_handlers[n] = handler;
1937 
1938 	b = (unsigned char *)(ebase + 0x200 + n*VECTORSPACING);
1939 
1940 	if (srs >= srssets)
1941 		panic("Shadow register set %d not supported", srs);
1942 
1943 	if (cpu_has_veic) {
1944 		if (board_bind_eic_interrupt)
1945 			board_bind_eic_interrupt(n, srs);
1946 	} else if (cpu_has_vint) {
1947 		/* SRSMap is only defined if shadow sets are implemented */
1948 		if (srssets > 1)
1949 			change_c0_srsmap(0xf << n*4, srs << n*4);
1950 	}
1951 
1952 	if (srs == 0) {
1953 		/*
1954 		 * If no shadow set is selected then use the default handler
1955 		 * that does normal register saving and standard interrupt exit
1956 		 */
1957 		extern char except_vec_vi, except_vec_vi_lui;
1958 		extern char except_vec_vi_ori, except_vec_vi_end;
1959 		extern char rollback_except_vec_vi;
1960 		char *vec_start = using_rollback_handler() ?
1961 			&rollback_except_vec_vi : &except_vec_vi;
1962 #if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN)
1963 		const int lui_offset = &except_vec_vi_lui - vec_start + 2;
1964 		const int ori_offset = &except_vec_vi_ori - vec_start + 2;
1965 #else
1966 		const int lui_offset = &except_vec_vi_lui - vec_start;
1967 		const int ori_offset = &except_vec_vi_ori - vec_start;
1968 #endif
1969 		const int handler_len = &except_vec_vi_end - vec_start;
1970 
1971 		if (handler_len > VECTORSPACING) {
1972 			/*
1973 			 * Sigh... panicing won't help as the console
1974 			 * is probably not configured :(
1975 			 */
1976 			panic("VECTORSPACING too small");
1977 		}
1978 
1979 		set_handler(((unsigned long)b - ebase), vec_start,
1980 #ifdef CONFIG_CPU_MICROMIPS
1981 				(handler_len - 1));
1982 #else
1983 				handler_len);
1984 #endif
1985 		h = (u16 *)(b + lui_offset);
1986 		*h = (handler >> 16) & 0xffff;
1987 		h = (u16 *)(b + ori_offset);
1988 		*h = (handler & 0xffff);
1989 		local_flush_icache_range((unsigned long)b,
1990 					 (unsigned long)(b+handler_len));
1991 	}
1992 	else {
1993 		/*
1994 		 * In other cases jump directly to the interrupt handler. It
1995 		 * is the handler's responsibility to save registers if required
1996 		 * (eg hi/lo) and return from the exception using "eret".
1997 		 */
1998 		u32 insn;
1999 
2000 		h = (u16 *)b;
2001 		/* j handler */
2002 #ifdef CONFIG_CPU_MICROMIPS
2003 		insn = 0xd4000000 | (((u32)handler & 0x07ffffff) >> 1);
2004 #else
2005 		insn = 0x08000000 | (((u32)handler & 0x0fffffff) >> 2);
2006 #endif
2007 		h[0] = (insn >> 16) & 0xffff;
2008 		h[1] = insn & 0xffff;
2009 		h[2] = 0;
2010 		h[3] = 0;
2011 		local_flush_icache_range((unsigned long)b,
2012 					 (unsigned long)(b+8));
2013 	}
2014 
2015 	return (void *)old_handler;
2016 }
2017 
2018 void *set_vi_handler(int n, vi_handler_t addr)
2019 {
2020 	return set_vi_srs_handler(n, addr, 0);
2021 }
2022 
2023 extern void tlb_init(void);
2024 
2025 /*
2026  * Timer interrupt
2027  */
2028 int cp0_compare_irq;
2029 EXPORT_SYMBOL_GPL(cp0_compare_irq);
2030 int cp0_compare_irq_shift;
2031 
2032 /*
2033  * Performance counter IRQ or -1 if shared with timer
2034  */
2035 int cp0_perfcount_irq;
2036 EXPORT_SYMBOL_GPL(cp0_perfcount_irq);
2037 
2038 /*
2039  * Fast debug channel IRQ or -1 if not present
2040  */
2041 int cp0_fdc_irq;
2042 EXPORT_SYMBOL_GPL(cp0_fdc_irq);
2043 
2044 static int noulri;
2045 
2046 static int __init ulri_disable(char *s)
2047 {
2048 	pr_info("Disabling ulri\n");
2049 	noulri = 1;
2050 
2051 	return 1;
2052 }
2053 __setup("noulri", ulri_disable);
2054 
2055 /* configure STATUS register */
2056 static void configure_status(void)
2057 {
2058 	/*
2059 	 * Disable coprocessors and select 32-bit or 64-bit addressing
2060 	 * and the 16/32 or 32/32 FPR register model.  Reset the BEV
2061 	 * flag that some firmware may have left set and the TS bit (for
2062 	 * IP27).  Set XX for ISA IV code to work.
2063 	 */
2064 	unsigned int status_set = ST0_CU0;
2065 #ifdef CONFIG_64BIT
2066 	status_set |= ST0_FR|ST0_KX|ST0_SX|ST0_UX;
2067 #endif
2068 	if (current_cpu_data.isa_level & MIPS_CPU_ISA_IV)
2069 		status_set |= ST0_XX;
2070 	if (cpu_has_dsp)
2071 		status_set |= ST0_MX;
2072 
2073 	change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX,
2074 			 status_set);
2075 }
2076 
2077 /* configure HWRENA register */
2078 static void configure_hwrena(void)
2079 {
2080 	unsigned int hwrena = cpu_hwrena_impl_bits;
2081 
2082 	if (cpu_has_mips_r2_r6)
2083 		hwrena |= 0x0000000f;
2084 
2085 	if (!noulri && cpu_has_userlocal)
2086 		hwrena |= (1 << 29);
2087 
2088 	if (hwrena)
2089 		write_c0_hwrena(hwrena);
2090 }
2091 
2092 static void configure_exception_vector(void)
2093 {
2094 	if (cpu_has_veic || cpu_has_vint) {
2095 		unsigned long sr = set_c0_status(ST0_BEV);
2096 		write_c0_ebase(ebase);
2097 		write_c0_status(sr);
2098 		/* Setting vector spacing enables EI/VI mode  */
2099 		change_c0_intctl(0x3e0, VECTORSPACING);
2100 	}
2101 	if (cpu_has_divec) {
2102 		if (cpu_has_mipsmt) {
2103 			unsigned int vpflags = dvpe();
2104 			set_c0_cause(CAUSEF_IV);
2105 			evpe(vpflags);
2106 		} else
2107 			set_c0_cause(CAUSEF_IV);
2108 	}
2109 }
2110 
2111 void per_cpu_trap_init(bool is_boot_cpu)
2112 {
2113 	unsigned int cpu = smp_processor_id();
2114 
2115 	configure_status();
2116 	configure_hwrena();
2117 
2118 	configure_exception_vector();
2119 
2120 	/*
2121 	 * Before R2 both interrupt numbers were fixed to 7, so on R2 only:
2122 	 *
2123 	 *  o read IntCtl.IPTI to determine the timer interrupt
2124 	 *  o read IntCtl.IPPCI to determine the performance counter interrupt
2125 	 *  o read IntCtl.IPFDC to determine the fast debug channel interrupt
2126 	 */
2127 	if (cpu_has_mips_r2_r6) {
2128 		cp0_compare_irq_shift = CAUSEB_TI - CAUSEB_IP;
2129 		cp0_compare_irq = (read_c0_intctl() >> INTCTLB_IPTI) & 7;
2130 		cp0_perfcount_irq = (read_c0_intctl() >> INTCTLB_IPPCI) & 7;
2131 		cp0_fdc_irq = (read_c0_intctl() >> INTCTLB_IPFDC) & 7;
2132 		if (!cp0_fdc_irq)
2133 			cp0_fdc_irq = -1;
2134 
2135 	} else {
2136 		cp0_compare_irq = CP0_LEGACY_COMPARE_IRQ;
2137 		cp0_compare_irq_shift = CP0_LEGACY_PERFCNT_IRQ;
2138 		cp0_perfcount_irq = -1;
2139 		cp0_fdc_irq = -1;
2140 	}
2141 
2142 	if (!cpu_data[cpu].asid_cache)
2143 		cpu_data[cpu].asid_cache = ASID_FIRST_VERSION;
2144 
2145 	atomic_inc(&init_mm.mm_count);
2146 	current->active_mm = &init_mm;
2147 	BUG_ON(current->mm);
2148 	enter_lazy_tlb(&init_mm, current);
2149 
2150 	/* Boot CPU's cache setup in setup_arch(). */
2151 	if (!is_boot_cpu)
2152 		cpu_cache_init();
2153 	tlb_init();
2154 	TLBMISS_HANDLER_SETUP();
2155 }
2156 
2157 /* Install CPU exception handler */
2158 void set_handler(unsigned long offset, void *addr, unsigned long size)
2159 {
2160 #ifdef CONFIG_CPU_MICROMIPS
2161 	memcpy((void *)(ebase + offset), ((unsigned char *)addr - 1), size);
2162 #else
2163 	memcpy((void *)(ebase + offset), addr, size);
2164 #endif
2165 	local_flush_icache_range(ebase + offset, ebase + offset + size);
2166 }
2167 
2168 static char panic_null_cerr[] =
2169 	"Trying to set NULL cache error exception handler";
2170 
2171 /*
2172  * Install uncached CPU exception handler.
2173  * This is suitable only for the cache error exception which is the only
2174  * exception handler that is being run uncached.
2175  */
2176 void set_uncached_handler(unsigned long offset, void *addr,
2177 	unsigned long size)
2178 {
2179 	unsigned long uncached_ebase = CKSEG1ADDR(ebase);
2180 
2181 	if (!addr)
2182 		panic(panic_null_cerr);
2183 
2184 	memcpy((void *)(uncached_ebase + offset), addr, size);
2185 }
2186 
2187 static int __initdata rdhwr_noopt;
2188 static int __init set_rdhwr_noopt(char *str)
2189 {
2190 	rdhwr_noopt = 1;
2191 	return 1;
2192 }
2193 
2194 __setup("rdhwr_noopt", set_rdhwr_noopt);
2195 
2196 void __init trap_init(void)
2197 {
2198 	extern char except_vec3_generic;
2199 	extern char except_vec4;
2200 	extern char except_vec3_r4000;
2201 	unsigned long i;
2202 
2203 	check_wait();
2204 
2205 	if (cpu_has_veic || cpu_has_vint) {
2206 		unsigned long size = 0x200 + VECTORSPACING*64;
2207 		ebase = (unsigned long)
2208 			__alloc_bootmem(size, 1 << fls(size), 0);
2209 	} else {
2210 		ebase = CAC_BASE;
2211 
2212 		if (cpu_has_mips_r2_r6)
2213 			ebase += (read_c0_ebase() & 0x3ffff000);
2214 	}
2215 
2216 	if (cpu_has_mmips) {
2217 		unsigned int config3 = read_c0_config3();
2218 
2219 		if (IS_ENABLED(CONFIG_CPU_MICROMIPS))
2220 			write_c0_config3(config3 | MIPS_CONF3_ISA_OE);
2221 		else
2222 			write_c0_config3(config3 & ~MIPS_CONF3_ISA_OE);
2223 	}
2224 
2225 	if (board_ebase_setup)
2226 		board_ebase_setup();
2227 	per_cpu_trap_init(true);
2228 
2229 	/*
2230 	 * Copy the generic exception handlers to their final destination.
2231 	 * This will be overriden later as suitable for a particular
2232 	 * configuration.
2233 	 */
2234 	set_handler(0x180, &except_vec3_generic, 0x80);
2235 
2236 	/*
2237 	 * Setup default vectors
2238 	 */
2239 	for (i = 0; i <= 31; i++)
2240 		set_except_vector(i, handle_reserved);
2241 
2242 	/*
2243 	 * Copy the EJTAG debug exception vector handler code to it's final
2244 	 * destination.
2245 	 */
2246 	if (cpu_has_ejtag && board_ejtag_handler_setup)
2247 		board_ejtag_handler_setup();
2248 
2249 	/*
2250 	 * Only some CPUs have the watch exceptions.
2251 	 */
2252 	if (cpu_has_watch)
2253 		set_except_vector(EXCCODE_WATCH, handle_watch);
2254 
2255 	/*
2256 	 * Initialise interrupt handlers
2257 	 */
2258 	if (cpu_has_veic || cpu_has_vint) {
2259 		int nvec = cpu_has_veic ? 64 : 8;
2260 		for (i = 0; i < nvec; i++)
2261 			set_vi_handler(i, NULL);
2262 	}
2263 	else if (cpu_has_divec)
2264 		set_handler(0x200, &except_vec4, 0x8);
2265 
2266 	/*
2267 	 * Some CPUs can enable/disable for cache parity detection, but does
2268 	 * it different ways.
2269 	 */
2270 	parity_protection_init();
2271 
2272 	/*
2273 	 * The Data Bus Errors / Instruction Bus Errors are signaled
2274 	 * by external hardware.  Therefore these two exceptions
2275 	 * may have board specific handlers.
2276 	 */
2277 	if (board_be_init)
2278 		board_be_init();
2279 
2280 	set_except_vector(EXCCODE_INT, using_rollback_handler() ?
2281 					rollback_handle_int : handle_int);
2282 	set_except_vector(EXCCODE_MOD, handle_tlbm);
2283 	set_except_vector(EXCCODE_TLBL, handle_tlbl);
2284 	set_except_vector(EXCCODE_TLBS, handle_tlbs);
2285 
2286 	set_except_vector(EXCCODE_ADEL, handle_adel);
2287 	set_except_vector(EXCCODE_ADES, handle_ades);
2288 
2289 	set_except_vector(EXCCODE_IBE, handle_ibe);
2290 	set_except_vector(EXCCODE_DBE, handle_dbe);
2291 
2292 	set_except_vector(EXCCODE_SYS, handle_sys);
2293 	set_except_vector(EXCCODE_BP, handle_bp);
2294 	set_except_vector(EXCCODE_RI, rdhwr_noopt ? handle_ri :
2295 			  (cpu_has_vtag_icache ?
2296 			   handle_ri_rdhwr_vivt : handle_ri_rdhwr));
2297 	set_except_vector(EXCCODE_CPU, handle_cpu);
2298 	set_except_vector(EXCCODE_OV, handle_ov);
2299 	set_except_vector(EXCCODE_TR, handle_tr);
2300 	set_except_vector(EXCCODE_MSAFPE, handle_msa_fpe);
2301 
2302 	if (current_cpu_type() == CPU_R6000 ||
2303 	    current_cpu_type() == CPU_R6000A) {
2304 		/*
2305 		 * The R6000 is the only R-series CPU that features a machine
2306 		 * check exception (similar to the R4000 cache error) and
2307 		 * unaligned ldc1/sdc1 exception.  The handlers have not been
2308 		 * written yet.	 Well, anyway there is no R6000 machine on the
2309 		 * current list of targets for Linux/MIPS.
2310 		 * (Duh, crap, there is someone with a triple R6k machine)
2311 		 */
2312 		//set_except_vector(14, handle_mc);
2313 		//set_except_vector(15, handle_ndc);
2314 	}
2315 
2316 
2317 	if (board_nmi_handler_setup)
2318 		board_nmi_handler_setup();
2319 
2320 	if (cpu_has_fpu && !cpu_has_nofpuex)
2321 		set_except_vector(EXCCODE_FPE, handle_fpe);
2322 
2323 	set_except_vector(MIPS_EXCCODE_TLBPAR, handle_ftlb);
2324 
2325 	if (cpu_has_rixiex) {
2326 		set_except_vector(EXCCODE_TLBRI, tlb_do_page_fault_0);
2327 		set_except_vector(EXCCODE_TLBXI, tlb_do_page_fault_0);
2328 	}
2329 
2330 	set_except_vector(EXCCODE_MSADIS, handle_msa);
2331 	set_except_vector(EXCCODE_MDMX, handle_mdmx);
2332 
2333 	if (cpu_has_mcheck)
2334 		set_except_vector(EXCCODE_MCHECK, handle_mcheck);
2335 
2336 	if (cpu_has_mipsmt)
2337 		set_except_vector(EXCCODE_THREAD, handle_mt);
2338 
2339 	set_except_vector(EXCCODE_DSPDIS, handle_dsp);
2340 
2341 	if (board_cache_error_setup)
2342 		board_cache_error_setup();
2343 
2344 	if (cpu_has_vce)
2345 		/* Special exception: R4[04]00 uses also the divec space. */
2346 		set_handler(0x180, &except_vec3_r4000, 0x100);
2347 	else if (cpu_has_4kex)
2348 		set_handler(0x180, &except_vec3_generic, 0x80);
2349 	else
2350 		set_handler(0x080, &except_vec3_generic, 0x80);
2351 
2352 	local_flush_icache_range(ebase, ebase + 0x400);
2353 
2354 	sort_extable(__start___dbe_table, __stop___dbe_table);
2355 
2356 	cu2_notifier(default_cu2_call, 0x80000000);	/* Run last  */
2357 }
2358 
2359 static int trap_pm_notifier(struct notifier_block *self, unsigned long cmd,
2360 			    void *v)
2361 {
2362 	switch (cmd) {
2363 	case CPU_PM_ENTER_FAILED:
2364 	case CPU_PM_EXIT:
2365 		configure_status();
2366 		configure_hwrena();
2367 		configure_exception_vector();
2368 
2369 		/* Restore register with CPU number for TLB handlers */
2370 		TLBMISS_HANDLER_RESTORE();
2371 
2372 		break;
2373 	}
2374 
2375 	return NOTIFY_OK;
2376 }
2377 
2378 static struct notifier_block trap_pm_notifier_block = {
2379 	.notifier_call = trap_pm_notifier,
2380 };
2381 
2382 static int __init trap_pm_init(void)
2383 {
2384 	return cpu_pm_register_notifier(&trap_pm_notifier_block);
2385 }
2386 arch_initcall(trap_pm_init);
2387