xref: /openbmc/linux/arch/mips/kernel/traps.c (revision 3e26a691)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1994 - 1999, 2000, 01, 06 Ralf Baechle
7  * Copyright (C) 1995, 1996 Paul M. Antoine
8  * Copyright (C) 1998 Ulf Carlsson
9  * Copyright (C) 1999 Silicon Graphics, Inc.
10  * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
11  * Copyright (C) 2002, 2003, 2004, 2005, 2007  Maciej W. Rozycki
12  * Copyright (C) 2000, 2001, 2012 MIPS Technologies, Inc.  All rights reserved.
13  * Copyright (C) 2014, Imagination Technologies Ltd.
14  */
15 #include <linux/bitops.h>
16 #include <linux/bug.h>
17 #include <linux/compiler.h>
18 #include <linux/context_tracking.h>
19 #include <linux/cpu_pm.h>
20 #include <linux/kexec.h>
21 #include <linux/init.h>
22 #include <linux/kernel.h>
23 #include <linux/module.h>
24 #include <linux/mm.h>
25 #include <linux/sched.h>
26 #include <linux/smp.h>
27 #include <linux/spinlock.h>
28 #include <linux/kallsyms.h>
29 #include <linux/bootmem.h>
30 #include <linux/interrupt.h>
31 #include <linux/ptrace.h>
32 #include <linux/kgdb.h>
33 #include <linux/kdebug.h>
34 #include <linux/kprobes.h>
35 #include <linux/notifier.h>
36 #include <linux/kdb.h>
37 #include <linux/irq.h>
38 #include <linux/perf_event.h>
39 
40 #include <asm/addrspace.h>
41 #include <asm/bootinfo.h>
42 #include <asm/branch.h>
43 #include <asm/break.h>
44 #include <asm/cop2.h>
45 #include <asm/cpu.h>
46 #include <asm/cpu-type.h>
47 #include <asm/dsp.h>
48 #include <asm/fpu.h>
49 #include <asm/fpu_emulator.h>
50 #include <asm/idle.h>
51 #include <asm/mips-r2-to-r6-emul.h>
52 #include <asm/mipsregs.h>
53 #include <asm/mipsmtregs.h>
54 #include <asm/module.h>
55 #include <asm/msa.h>
56 #include <asm/pgtable.h>
57 #include <asm/ptrace.h>
58 #include <asm/sections.h>
59 #include <asm/tlbdebug.h>
60 #include <asm/traps.h>
61 #include <asm/uaccess.h>
62 #include <asm/watch.h>
63 #include <asm/mmu_context.h>
64 #include <asm/types.h>
65 #include <asm/stacktrace.h>
66 #include <asm/uasm.h>
67 
68 extern void check_wait(void);
69 extern asmlinkage void rollback_handle_int(void);
70 extern asmlinkage void handle_int(void);
71 extern u32 handle_tlbl[];
72 extern u32 handle_tlbs[];
73 extern u32 handle_tlbm[];
74 extern asmlinkage void handle_adel(void);
75 extern asmlinkage void handle_ades(void);
76 extern asmlinkage void handle_ibe(void);
77 extern asmlinkage void handle_dbe(void);
78 extern asmlinkage void handle_sys(void);
79 extern asmlinkage void handle_bp(void);
80 extern asmlinkage void handle_ri(void);
81 extern asmlinkage void handle_ri_rdhwr_vivt(void);
82 extern asmlinkage void handle_ri_rdhwr(void);
83 extern asmlinkage void handle_cpu(void);
84 extern asmlinkage void handle_ov(void);
85 extern asmlinkage void handle_tr(void);
86 extern asmlinkage void handle_msa_fpe(void);
87 extern asmlinkage void handle_fpe(void);
88 extern asmlinkage void handle_ftlb(void);
89 extern asmlinkage void handle_msa(void);
90 extern asmlinkage void handle_mdmx(void);
91 extern asmlinkage void handle_watch(void);
92 extern asmlinkage void handle_mt(void);
93 extern asmlinkage void handle_dsp(void);
94 extern asmlinkage void handle_mcheck(void);
95 extern asmlinkage void handle_reserved(void);
96 extern void tlb_do_page_fault_0(void);
97 
98 void (*board_be_init)(void);
99 int (*board_be_handler)(struct pt_regs *regs, int is_fixup);
100 void (*board_nmi_handler_setup)(void);
101 void (*board_ejtag_handler_setup)(void);
102 void (*board_bind_eic_interrupt)(int irq, int regset);
103 void (*board_ebase_setup)(void);
104 void(*board_cache_error_setup)(void);
105 
106 static void show_raw_backtrace(unsigned long reg29)
107 {
108 	unsigned long *sp = (unsigned long *)(reg29 & ~3);
109 	unsigned long addr;
110 
111 	printk("Call Trace:");
112 #ifdef CONFIG_KALLSYMS
113 	printk("\n");
114 #endif
115 	while (!kstack_end(sp)) {
116 		unsigned long __user *p =
117 			(unsigned long __user *)(unsigned long)sp++;
118 		if (__get_user(addr, p)) {
119 			printk(" (Bad stack address)");
120 			break;
121 		}
122 		if (__kernel_text_address(addr))
123 			print_ip_sym(addr);
124 	}
125 	printk("\n");
126 }
127 
128 #ifdef CONFIG_KALLSYMS
129 int raw_show_trace;
130 static int __init set_raw_show_trace(char *str)
131 {
132 	raw_show_trace = 1;
133 	return 1;
134 }
135 __setup("raw_show_trace", set_raw_show_trace);
136 #endif
137 
138 static void show_backtrace(struct task_struct *task, const struct pt_regs *regs)
139 {
140 	unsigned long sp = regs->regs[29];
141 	unsigned long ra = regs->regs[31];
142 	unsigned long pc = regs->cp0_epc;
143 
144 	if (!task)
145 		task = current;
146 
147 	if (raw_show_trace || !__kernel_text_address(pc)) {
148 		show_raw_backtrace(sp);
149 		return;
150 	}
151 	printk("Call Trace:\n");
152 	do {
153 		print_ip_sym(pc);
154 		pc = unwind_stack(task, &sp, pc, &ra);
155 	} while (pc);
156 	printk("\n");
157 }
158 
159 /*
160  * This routine abuses get_user()/put_user() to reference pointers
161  * with at least a bit of error checking ...
162  */
163 static void show_stacktrace(struct task_struct *task,
164 	const struct pt_regs *regs)
165 {
166 	const int field = 2 * sizeof(unsigned long);
167 	long stackdata;
168 	int i;
169 	unsigned long __user *sp = (unsigned long __user *)regs->regs[29];
170 
171 	printk("Stack :");
172 	i = 0;
173 	while ((unsigned long) sp & (PAGE_SIZE - 1)) {
174 		if (i && ((i % (64 / field)) == 0))
175 			printk("\n	 ");
176 		if (i > 39) {
177 			printk(" ...");
178 			break;
179 		}
180 
181 		if (__get_user(stackdata, sp++)) {
182 			printk(" (Bad stack address)");
183 			break;
184 		}
185 
186 		printk(" %0*lx", field, stackdata);
187 		i++;
188 	}
189 	printk("\n");
190 	show_backtrace(task, regs);
191 }
192 
193 void show_stack(struct task_struct *task, unsigned long *sp)
194 {
195 	struct pt_regs regs;
196 	mm_segment_t old_fs = get_fs();
197 	if (sp) {
198 		regs.regs[29] = (unsigned long)sp;
199 		regs.regs[31] = 0;
200 		regs.cp0_epc = 0;
201 	} else {
202 		if (task && task != current) {
203 			regs.regs[29] = task->thread.reg29;
204 			regs.regs[31] = 0;
205 			regs.cp0_epc = task->thread.reg31;
206 #ifdef CONFIG_KGDB_KDB
207 		} else if (atomic_read(&kgdb_active) != -1 &&
208 			   kdb_current_regs) {
209 			memcpy(&regs, kdb_current_regs, sizeof(regs));
210 #endif /* CONFIG_KGDB_KDB */
211 		} else {
212 			prepare_frametrace(&regs);
213 		}
214 	}
215 	/*
216 	 * show_stack() deals exclusively with kernel mode, so be sure to access
217 	 * the stack in the kernel (not user) address space.
218 	 */
219 	set_fs(KERNEL_DS);
220 	show_stacktrace(task, &regs);
221 	set_fs(old_fs);
222 }
223 
224 static void show_code(unsigned int __user *pc)
225 {
226 	long i;
227 	unsigned short __user *pc16 = NULL;
228 
229 	printk("\nCode:");
230 
231 	if ((unsigned long)pc & 1)
232 		pc16 = (unsigned short __user *)((unsigned long)pc & ~1);
233 	for(i = -3 ; i < 6 ; i++) {
234 		unsigned int insn;
235 		if (pc16 ? __get_user(insn, pc16 + i) : __get_user(insn, pc + i)) {
236 			printk(" (Bad address in epc)\n");
237 			break;
238 		}
239 		printk("%c%0*x%c", (i?' ':'<'), pc16 ? 4 : 8, insn, (i?' ':'>'));
240 	}
241 }
242 
243 static void __show_regs(const struct pt_regs *regs)
244 {
245 	const int field = 2 * sizeof(unsigned long);
246 	unsigned int cause = regs->cp0_cause;
247 	unsigned int exccode;
248 	int i;
249 
250 	show_regs_print_info(KERN_DEFAULT);
251 
252 	/*
253 	 * Saved main processor registers
254 	 */
255 	for (i = 0; i < 32; ) {
256 		if ((i % 4) == 0)
257 			printk("$%2d   :", i);
258 		if (i == 0)
259 			printk(" %0*lx", field, 0UL);
260 		else if (i == 26 || i == 27)
261 			printk(" %*s", field, "");
262 		else
263 			printk(" %0*lx", field, regs->regs[i]);
264 
265 		i++;
266 		if ((i % 4) == 0)
267 			printk("\n");
268 	}
269 
270 #ifdef CONFIG_CPU_HAS_SMARTMIPS
271 	printk("Acx    : %0*lx\n", field, regs->acx);
272 #endif
273 	printk("Hi    : %0*lx\n", field, regs->hi);
274 	printk("Lo    : %0*lx\n", field, regs->lo);
275 
276 	/*
277 	 * Saved cp0 registers
278 	 */
279 	printk("epc   : %0*lx %pS\n", field, regs->cp0_epc,
280 	       (void *) regs->cp0_epc);
281 	printk("ra    : %0*lx %pS\n", field, regs->regs[31],
282 	       (void *) regs->regs[31]);
283 
284 	printk("Status: %08x	", (uint32_t) regs->cp0_status);
285 
286 	if (cpu_has_3kex) {
287 		if (regs->cp0_status & ST0_KUO)
288 			printk("KUo ");
289 		if (regs->cp0_status & ST0_IEO)
290 			printk("IEo ");
291 		if (regs->cp0_status & ST0_KUP)
292 			printk("KUp ");
293 		if (regs->cp0_status & ST0_IEP)
294 			printk("IEp ");
295 		if (regs->cp0_status & ST0_KUC)
296 			printk("KUc ");
297 		if (regs->cp0_status & ST0_IEC)
298 			printk("IEc ");
299 	} else if (cpu_has_4kex) {
300 		if (regs->cp0_status & ST0_KX)
301 			printk("KX ");
302 		if (regs->cp0_status & ST0_SX)
303 			printk("SX ");
304 		if (regs->cp0_status & ST0_UX)
305 			printk("UX ");
306 		switch (regs->cp0_status & ST0_KSU) {
307 		case KSU_USER:
308 			printk("USER ");
309 			break;
310 		case KSU_SUPERVISOR:
311 			printk("SUPERVISOR ");
312 			break;
313 		case KSU_KERNEL:
314 			printk("KERNEL ");
315 			break;
316 		default:
317 			printk("BAD_MODE ");
318 			break;
319 		}
320 		if (regs->cp0_status & ST0_ERL)
321 			printk("ERL ");
322 		if (regs->cp0_status & ST0_EXL)
323 			printk("EXL ");
324 		if (regs->cp0_status & ST0_IE)
325 			printk("IE ");
326 	}
327 	printk("\n");
328 
329 	exccode = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE;
330 	printk("Cause : %08x (ExcCode %02x)\n", cause, exccode);
331 
332 	if (1 <= exccode && exccode <= 5)
333 		printk("BadVA : %0*lx\n", field, regs->cp0_badvaddr);
334 
335 	printk("PrId  : %08x (%s)\n", read_c0_prid(),
336 	       cpu_name_string());
337 }
338 
339 /*
340  * FIXME: really the generic show_regs should take a const pointer argument.
341  */
342 void show_regs(struct pt_regs *regs)
343 {
344 	__show_regs((struct pt_regs *)regs);
345 }
346 
347 void show_registers(struct pt_regs *regs)
348 {
349 	const int field = 2 * sizeof(unsigned long);
350 	mm_segment_t old_fs = get_fs();
351 
352 	__show_regs(regs);
353 	print_modules();
354 	printk("Process %s (pid: %d, threadinfo=%p, task=%p, tls=%0*lx)\n",
355 	       current->comm, current->pid, current_thread_info(), current,
356 	      field, current_thread_info()->tp_value);
357 	if (cpu_has_userlocal) {
358 		unsigned long tls;
359 
360 		tls = read_c0_userlocal();
361 		if (tls != current_thread_info()->tp_value)
362 			printk("*HwTLS: %0*lx\n", field, tls);
363 	}
364 
365 	if (!user_mode(regs))
366 		/* Necessary for getting the correct stack content */
367 		set_fs(KERNEL_DS);
368 	show_stacktrace(current, regs);
369 	show_code((unsigned int __user *) regs->cp0_epc);
370 	printk("\n");
371 	set_fs(old_fs);
372 }
373 
374 static DEFINE_RAW_SPINLOCK(die_lock);
375 
376 void __noreturn die(const char *str, struct pt_regs *regs)
377 {
378 	static int die_counter;
379 	int sig = SIGSEGV;
380 
381 	oops_enter();
382 
383 	if (notify_die(DIE_OOPS, str, regs, 0, current->thread.trap_nr,
384 		       SIGSEGV) == NOTIFY_STOP)
385 		sig = 0;
386 
387 	console_verbose();
388 	raw_spin_lock_irq(&die_lock);
389 	bust_spinlocks(1);
390 
391 	printk("%s[#%d]:\n", str, ++die_counter);
392 	show_registers(regs);
393 	add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
394 	raw_spin_unlock_irq(&die_lock);
395 
396 	oops_exit();
397 
398 	if (in_interrupt())
399 		panic("Fatal exception in interrupt");
400 
401 	if (panic_on_oops) {
402 		printk(KERN_EMERG "Fatal exception: panic in 5 seconds");
403 		ssleep(5);
404 		panic("Fatal exception");
405 	}
406 
407 	if (regs && kexec_should_crash(current))
408 		crash_kexec(regs);
409 
410 	do_exit(sig);
411 }
412 
413 extern struct exception_table_entry __start___dbe_table[];
414 extern struct exception_table_entry __stop___dbe_table[];
415 
416 __asm__(
417 "	.section	__dbe_table, \"a\"\n"
418 "	.previous			\n");
419 
420 /* Given an address, look for it in the exception tables. */
421 static const struct exception_table_entry *search_dbe_tables(unsigned long addr)
422 {
423 	const struct exception_table_entry *e;
424 
425 	e = search_extable(__start___dbe_table, __stop___dbe_table - 1, addr);
426 	if (!e)
427 		e = search_module_dbetables(addr);
428 	return e;
429 }
430 
431 asmlinkage void do_be(struct pt_regs *regs)
432 {
433 	const int field = 2 * sizeof(unsigned long);
434 	const struct exception_table_entry *fixup = NULL;
435 	int data = regs->cp0_cause & 4;
436 	int action = MIPS_BE_FATAL;
437 	enum ctx_state prev_state;
438 
439 	prev_state = exception_enter();
440 	/* XXX For now.	 Fixme, this searches the wrong table ...  */
441 	if (data && !user_mode(regs))
442 		fixup = search_dbe_tables(exception_epc(regs));
443 
444 	if (fixup)
445 		action = MIPS_BE_FIXUP;
446 
447 	if (board_be_handler)
448 		action = board_be_handler(regs, fixup != NULL);
449 
450 	switch (action) {
451 	case MIPS_BE_DISCARD:
452 		goto out;
453 	case MIPS_BE_FIXUP:
454 		if (fixup) {
455 			regs->cp0_epc = fixup->nextinsn;
456 			goto out;
457 		}
458 		break;
459 	default:
460 		break;
461 	}
462 
463 	/*
464 	 * Assume it would be too dangerous to continue ...
465 	 */
466 	printk(KERN_ALERT "%s bus error, epc == %0*lx, ra == %0*lx\n",
467 	       data ? "Data" : "Instruction",
468 	       field, regs->cp0_epc, field, regs->regs[31]);
469 	if (notify_die(DIE_OOPS, "bus error", regs, 0, current->thread.trap_nr,
470 		       SIGBUS) == NOTIFY_STOP)
471 		goto out;
472 
473 	die_if_kernel("Oops", regs);
474 	force_sig(SIGBUS, current);
475 
476 out:
477 	exception_exit(prev_state);
478 }
479 
480 /*
481  * ll/sc, rdhwr, sync emulation
482  */
483 
484 #define OPCODE 0xfc000000
485 #define BASE   0x03e00000
486 #define RT     0x001f0000
487 #define OFFSET 0x0000ffff
488 #define LL     0xc0000000
489 #define SC     0xe0000000
490 #define SPEC0  0x00000000
491 #define SPEC3  0x7c000000
492 #define RD     0x0000f800
493 #define FUNC   0x0000003f
494 #define SYNC   0x0000000f
495 #define RDHWR  0x0000003b
496 
497 /*  microMIPS definitions   */
498 #define MM_POOL32A_FUNC 0xfc00ffff
499 #define MM_RDHWR        0x00006b3c
500 #define MM_RS           0x001f0000
501 #define MM_RT           0x03e00000
502 
503 /*
504  * The ll_bit is cleared by r*_switch.S
505  */
506 
507 unsigned int ll_bit;
508 struct task_struct *ll_task;
509 
510 static inline int simulate_ll(struct pt_regs *regs, unsigned int opcode)
511 {
512 	unsigned long value, __user *vaddr;
513 	long offset;
514 
515 	/*
516 	 * analyse the ll instruction that just caused a ri exception
517 	 * and put the referenced address to addr.
518 	 */
519 
520 	/* sign extend offset */
521 	offset = opcode & OFFSET;
522 	offset <<= 16;
523 	offset >>= 16;
524 
525 	vaddr = (unsigned long __user *)
526 		((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
527 
528 	if ((unsigned long)vaddr & 3)
529 		return SIGBUS;
530 	if (get_user(value, vaddr))
531 		return SIGSEGV;
532 
533 	preempt_disable();
534 
535 	if (ll_task == NULL || ll_task == current) {
536 		ll_bit = 1;
537 	} else {
538 		ll_bit = 0;
539 	}
540 	ll_task = current;
541 
542 	preempt_enable();
543 
544 	regs->regs[(opcode & RT) >> 16] = value;
545 
546 	return 0;
547 }
548 
549 static inline int simulate_sc(struct pt_regs *regs, unsigned int opcode)
550 {
551 	unsigned long __user *vaddr;
552 	unsigned long reg;
553 	long offset;
554 
555 	/*
556 	 * analyse the sc instruction that just caused a ri exception
557 	 * and put the referenced address to addr.
558 	 */
559 
560 	/* sign extend offset */
561 	offset = opcode & OFFSET;
562 	offset <<= 16;
563 	offset >>= 16;
564 
565 	vaddr = (unsigned long __user *)
566 		((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
567 	reg = (opcode & RT) >> 16;
568 
569 	if ((unsigned long)vaddr & 3)
570 		return SIGBUS;
571 
572 	preempt_disable();
573 
574 	if (ll_bit == 0 || ll_task != current) {
575 		regs->regs[reg] = 0;
576 		preempt_enable();
577 		return 0;
578 	}
579 
580 	preempt_enable();
581 
582 	if (put_user(regs->regs[reg], vaddr))
583 		return SIGSEGV;
584 
585 	regs->regs[reg] = 1;
586 
587 	return 0;
588 }
589 
590 /*
591  * ll uses the opcode of lwc0 and sc uses the opcode of swc0.  That is both
592  * opcodes are supposed to result in coprocessor unusable exceptions if
593  * executed on ll/sc-less processors.  That's the theory.  In practice a
594  * few processors such as NEC's VR4100 throw reserved instruction exceptions
595  * instead, so we're doing the emulation thing in both exception handlers.
596  */
597 static int simulate_llsc(struct pt_regs *regs, unsigned int opcode)
598 {
599 	if ((opcode & OPCODE) == LL) {
600 		perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
601 				1, regs, 0);
602 		return simulate_ll(regs, opcode);
603 	}
604 	if ((opcode & OPCODE) == SC) {
605 		perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
606 				1, regs, 0);
607 		return simulate_sc(regs, opcode);
608 	}
609 
610 	return -1;			/* Must be something else ... */
611 }
612 
613 /*
614  * Simulate trapping 'rdhwr' instructions to provide user accessible
615  * registers not implemented in hardware.
616  */
617 static int simulate_rdhwr(struct pt_regs *regs, int rd, int rt)
618 {
619 	struct thread_info *ti = task_thread_info(current);
620 
621 	perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
622 			1, regs, 0);
623 	switch (rd) {
624 	case 0:		/* CPU number */
625 		regs->regs[rt] = smp_processor_id();
626 		return 0;
627 	case 1:		/* SYNCI length */
628 		regs->regs[rt] = min(current_cpu_data.dcache.linesz,
629 				     current_cpu_data.icache.linesz);
630 		return 0;
631 	case 2:		/* Read count register */
632 		regs->regs[rt] = read_c0_count();
633 		return 0;
634 	case 3:		/* Count register resolution */
635 		switch (current_cpu_type()) {
636 		case CPU_20KC:
637 		case CPU_25KF:
638 			regs->regs[rt] = 1;
639 			break;
640 		default:
641 			regs->regs[rt] = 2;
642 		}
643 		return 0;
644 	case 29:
645 		regs->regs[rt] = ti->tp_value;
646 		return 0;
647 	default:
648 		return -1;
649 	}
650 }
651 
652 static int simulate_rdhwr_normal(struct pt_regs *regs, unsigned int opcode)
653 {
654 	if ((opcode & OPCODE) == SPEC3 && (opcode & FUNC) == RDHWR) {
655 		int rd = (opcode & RD) >> 11;
656 		int rt = (opcode & RT) >> 16;
657 
658 		simulate_rdhwr(regs, rd, rt);
659 		return 0;
660 	}
661 
662 	/* Not ours.  */
663 	return -1;
664 }
665 
666 static int simulate_rdhwr_mm(struct pt_regs *regs, unsigned int opcode)
667 {
668 	if ((opcode & MM_POOL32A_FUNC) == MM_RDHWR) {
669 		int rd = (opcode & MM_RS) >> 16;
670 		int rt = (opcode & MM_RT) >> 21;
671 		simulate_rdhwr(regs, rd, rt);
672 		return 0;
673 	}
674 
675 	/* Not ours.  */
676 	return -1;
677 }
678 
679 static int simulate_sync(struct pt_regs *regs, unsigned int opcode)
680 {
681 	if ((opcode & OPCODE) == SPEC0 && (opcode & FUNC) == SYNC) {
682 		perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
683 				1, regs, 0);
684 		return 0;
685 	}
686 
687 	return -1;			/* Must be something else ... */
688 }
689 
690 asmlinkage void do_ov(struct pt_regs *regs)
691 {
692 	enum ctx_state prev_state;
693 	siginfo_t info = {
694 		.si_signo = SIGFPE,
695 		.si_code = FPE_INTOVF,
696 		.si_addr = (void __user *)regs->cp0_epc,
697 	};
698 
699 	prev_state = exception_enter();
700 	die_if_kernel("Integer overflow", regs);
701 
702 	force_sig_info(SIGFPE, &info, current);
703 	exception_exit(prev_state);
704 }
705 
706 int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcr31)
707 {
708 	struct siginfo si = { 0 };
709 
710 	switch (sig) {
711 	case 0:
712 		return 0;
713 
714 	case SIGFPE:
715 		si.si_addr = fault_addr;
716 		si.si_signo = sig;
717 		/*
718 		 * Inexact can happen together with Overflow or Underflow.
719 		 * Respect the mask to deliver the correct exception.
720 		 */
721 		fcr31 &= (fcr31 & FPU_CSR_ALL_E) <<
722 			 (ffs(FPU_CSR_ALL_X) - ffs(FPU_CSR_ALL_E));
723 		if (fcr31 & FPU_CSR_INV_X)
724 			si.si_code = FPE_FLTINV;
725 		else if (fcr31 & FPU_CSR_DIV_X)
726 			si.si_code = FPE_FLTDIV;
727 		else if (fcr31 & FPU_CSR_OVF_X)
728 			si.si_code = FPE_FLTOVF;
729 		else if (fcr31 & FPU_CSR_UDF_X)
730 			si.si_code = FPE_FLTUND;
731 		else if (fcr31 & FPU_CSR_INE_X)
732 			si.si_code = FPE_FLTRES;
733 		else
734 			si.si_code = __SI_FAULT;
735 		force_sig_info(sig, &si, current);
736 		return 1;
737 
738 	case SIGBUS:
739 		si.si_addr = fault_addr;
740 		si.si_signo = sig;
741 		si.si_code = BUS_ADRERR;
742 		force_sig_info(sig, &si, current);
743 		return 1;
744 
745 	case SIGSEGV:
746 		si.si_addr = fault_addr;
747 		si.si_signo = sig;
748 		down_read(&current->mm->mmap_sem);
749 		if (find_vma(current->mm, (unsigned long)fault_addr))
750 			si.si_code = SEGV_ACCERR;
751 		else
752 			si.si_code = SEGV_MAPERR;
753 		up_read(&current->mm->mmap_sem);
754 		force_sig_info(sig, &si, current);
755 		return 1;
756 
757 	default:
758 		force_sig(sig, current);
759 		return 1;
760 	}
761 }
762 
763 static int simulate_fp(struct pt_regs *regs, unsigned int opcode,
764 		       unsigned long old_epc, unsigned long old_ra)
765 {
766 	union mips_instruction inst = { .word = opcode };
767 	void __user *fault_addr;
768 	unsigned long fcr31;
769 	int sig;
770 
771 	/* If it's obviously not an FP instruction, skip it */
772 	switch (inst.i_format.opcode) {
773 	case cop1_op:
774 	case cop1x_op:
775 	case lwc1_op:
776 	case ldc1_op:
777 	case swc1_op:
778 	case sdc1_op:
779 		break;
780 
781 	default:
782 		return -1;
783 	}
784 
785 	/*
786 	 * do_ri skipped over the instruction via compute_return_epc, undo
787 	 * that for the FPU emulator.
788 	 */
789 	regs->cp0_epc = old_epc;
790 	regs->regs[31] = old_ra;
791 
792 	/* Save the FP context to struct thread_struct */
793 	lose_fpu(1);
794 
795 	/* Run the emulator */
796 	sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
797 				       &fault_addr);
798 	fcr31 = current->thread.fpu.fcr31;
799 
800 	/*
801 	 * We can't allow the emulated instruction to leave any of
802 	 * the cause bits set in $fcr31.
803 	 */
804 	current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
805 
806 	/* Restore the hardware register state */
807 	own_fpu(1);
808 
809 	/* Send a signal if required.  */
810 	process_fpemu_return(sig, fault_addr, fcr31);
811 
812 	return 0;
813 }
814 
815 /*
816  * XXX Delayed fp exceptions when doing a lazy ctx switch XXX
817  */
818 asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
819 {
820 	enum ctx_state prev_state;
821 	void __user *fault_addr;
822 	int sig;
823 
824 	prev_state = exception_enter();
825 	if (notify_die(DIE_FP, "FP exception", regs, 0, current->thread.trap_nr,
826 		       SIGFPE) == NOTIFY_STOP)
827 		goto out;
828 
829 	/* Clear FCSR.Cause before enabling interrupts */
830 	write_32bit_cp1_register(CP1_STATUS, fcr31 & ~FPU_CSR_ALL_X);
831 	local_irq_enable();
832 
833 	die_if_kernel("FP exception in kernel code", regs);
834 
835 	if (fcr31 & FPU_CSR_UNI_X) {
836 		/*
837 		 * Unimplemented operation exception.  If we've got the full
838 		 * software emulator on-board, let's use it...
839 		 *
840 		 * Force FPU to dump state into task/thread context.  We're
841 		 * moving a lot of data here for what is probably a single
842 		 * instruction, but the alternative is to pre-decode the FP
843 		 * register operands before invoking the emulator, which seems
844 		 * a bit extreme for what should be an infrequent event.
845 		 */
846 		/* Ensure 'resume' not overwrite saved fp context again. */
847 		lose_fpu(1);
848 
849 		/* Run the emulator */
850 		sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
851 					       &fault_addr);
852 		fcr31 = current->thread.fpu.fcr31;
853 
854 		/*
855 		 * We can't allow the emulated instruction to leave any of
856 		 * the cause bits set in $fcr31.
857 		 */
858 		current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
859 
860 		/* Restore the hardware register state */
861 		own_fpu(1);	/* Using the FPU again.	 */
862 	} else {
863 		sig = SIGFPE;
864 		fault_addr = (void __user *) regs->cp0_epc;
865 	}
866 
867 	/* Send a signal if required.  */
868 	process_fpemu_return(sig, fault_addr, fcr31);
869 
870 out:
871 	exception_exit(prev_state);
872 }
873 
874 void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
875 	const char *str)
876 {
877 	siginfo_t info = { 0 };
878 	char b[40];
879 
880 #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
881 	if (kgdb_ll_trap(DIE_TRAP, str, regs, code, current->thread.trap_nr,
882 			 SIGTRAP) == NOTIFY_STOP)
883 		return;
884 #endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
885 
886 	if (notify_die(DIE_TRAP, str, regs, code, current->thread.trap_nr,
887 		       SIGTRAP) == NOTIFY_STOP)
888 		return;
889 
890 	/*
891 	 * A short test says that IRIX 5.3 sends SIGTRAP for all trap
892 	 * insns, even for trap and break codes that indicate arithmetic
893 	 * failures.  Weird ...
894 	 * But should we continue the brokenness???  --macro
895 	 */
896 	switch (code) {
897 	case BRK_OVERFLOW:
898 	case BRK_DIVZERO:
899 		scnprintf(b, sizeof(b), "%s instruction in kernel code", str);
900 		die_if_kernel(b, regs);
901 		if (code == BRK_DIVZERO)
902 			info.si_code = FPE_INTDIV;
903 		else
904 			info.si_code = FPE_INTOVF;
905 		info.si_signo = SIGFPE;
906 		info.si_addr = (void __user *) regs->cp0_epc;
907 		force_sig_info(SIGFPE, &info, current);
908 		break;
909 	case BRK_BUG:
910 		die_if_kernel("Kernel bug detected", regs);
911 		force_sig(SIGTRAP, current);
912 		break;
913 	case BRK_MEMU:
914 		/*
915 		 * This breakpoint code is used by the FPU emulator to retake
916 		 * control of the CPU after executing the instruction from the
917 		 * delay slot of an emulated branch.
918 		 *
919 		 * Terminate if exception was recognized as a delay slot return
920 		 * otherwise handle as normal.
921 		 */
922 		if (do_dsemulret(regs))
923 			return;
924 
925 		die_if_kernel("Math emu break/trap", regs);
926 		force_sig(SIGTRAP, current);
927 		break;
928 	default:
929 		scnprintf(b, sizeof(b), "%s instruction in kernel code", str);
930 		die_if_kernel(b, regs);
931 		force_sig(SIGTRAP, current);
932 	}
933 }
934 
935 asmlinkage void do_bp(struct pt_regs *regs)
936 {
937 	unsigned long epc = msk_isa16_mode(exception_epc(regs));
938 	unsigned int opcode, bcode;
939 	enum ctx_state prev_state;
940 	mm_segment_t seg;
941 
942 	seg = get_fs();
943 	if (!user_mode(regs))
944 		set_fs(KERNEL_DS);
945 
946 	prev_state = exception_enter();
947 	current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
948 	if (get_isa16_mode(regs->cp0_epc)) {
949 		u16 instr[2];
950 
951 		if (__get_user(instr[0], (u16 __user *)epc))
952 			goto out_sigsegv;
953 
954 		if (!cpu_has_mmips) {
955 			/* MIPS16e mode */
956 			bcode = (instr[0] >> 5) & 0x3f;
957 		} else if (mm_insn_16bit(instr[0])) {
958 			/* 16-bit microMIPS BREAK */
959 			bcode = instr[0] & 0xf;
960 		} else {
961 			/* 32-bit microMIPS BREAK */
962 			if (__get_user(instr[1], (u16 __user *)(epc + 2)))
963 				goto out_sigsegv;
964 			opcode = (instr[0] << 16) | instr[1];
965 			bcode = (opcode >> 6) & ((1 << 20) - 1);
966 		}
967 	} else {
968 		if (__get_user(opcode, (unsigned int __user *)epc))
969 			goto out_sigsegv;
970 		bcode = (opcode >> 6) & ((1 << 20) - 1);
971 	}
972 
973 	/*
974 	 * There is the ancient bug in the MIPS assemblers that the break
975 	 * code starts left to bit 16 instead to bit 6 in the opcode.
976 	 * Gas is bug-compatible, but not always, grrr...
977 	 * We handle both cases with a simple heuristics.  --macro
978 	 */
979 	if (bcode >= (1 << 10))
980 		bcode = ((bcode & ((1 << 10) - 1)) << 10) | (bcode >> 10);
981 
982 	/*
983 	 * notify the kprobe handlers, if instruction is likely to
984 	 * pertain to them.
985 	 */
986 	switch (bcode) {
987 	case BRK_UPROBE:
988 		if (notify_die(DIE_UPROBE, "uprobe", regs, bcode,
989 			       current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
990 			goto out;
991 		else
992 			break;
993 	case BRK_UPROBE_XOL:
994 		if (notify_die(DIE_UPROBE_XOL, "uprobe_xol", regs, bcode,
995 			       current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
996 			goto out;
997 		else
998 			break;
999 	case BRK_KPROBE_BP:
1000 		if (notify_die(DIE_BREAK, "debug", regs, bcode,
1001 			       current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
1002 			goto out;
1003 		else
1004 			break;
1005 	case BRK_KPROBE_SSTEPBP:
1006 		if (notify_die(DIE_SSTEPBP, "single_step", regs, bcode,
1007 			       current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
1008 			goto out;
1009 		else
1010 			break;
1011 	default:
1012 		break;
1013 	}
1014 
1015 	do_trap_or_bp(regs, bcode, "Break");
1016 
1017 out:
1018 	set_fs(seg);
1019 	exception_exit(prev_state);
1020 	return;
1021 
1022 out_sigsegv:
1023 	force_sig(SIGSEGV, current);
1024 	goto out;
1025 }
1026 
1027 asmlinkage void do_tr(struct pt_regs *regs)
1028 {
1029 	u32 opcode, tcode = 0;
1030 	enum ctx_state prev_state;
1031 	u16 instr[2];
1032 	mm_segment_t seg;
1033 	unsigned long epc = msk_isa16_mode(exception_epc(regs));
1034 
1035 	seg = get_fs();
1036 	if (!user_mode(regs))
1037 		set_fs(get_ds());
1038 
1039 	prev_state = exception_enter();
1040 	current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
1041 	if (get_isa16_mode(regs->cp0_epc)) {
1042 		if (__get_user(instr[0], (u16 __user *)(epc + 0)) ||
1043 		    __get_user(instr[1], (u16 __user *)(epc + 2)))
1044 			goto out_sigsegv;
1045 		opcode = (instr[0] << 16) | instr[1];
1046 		/* Immediate versions don't provide a code.  */
1047 		if (!(opcode & OPCODE))
1048 			tcode = (opcode >> 12) & ((1 << 4) - 1);
1049 	} else {
1050 		if (__get_user(opcode, (u32 __user *)epc))
1051 			goto out_sigsegv;
1052 		/* Immediate versions don't provide a code.  */
1053 		if (!(opcode & OPCODE))
1054 			tcode = (opcode >> 6) & ((1 << 10) - 1);
1055 	}
1056 
1057 	do_trap_or_bp(regs, tcode, "Trap");
1058 
1059 out:
1060 	set_fs(seg);
1061 	exception_exit(prev_state);
1062 	return;
1063 
1064 out_sigsegv:
1065 	force_sig(SIGSEGV, current);
1066 	goto out;
1067 }
1068 
1069 asmlinkage void do_ri(struct pt_regs *regs)
1070 {
1071 	unsigned int __user *epc = (unsigned int __user *)exception_epc(regs);
1072 	unsigned long old_epc = regs->cp0_epc;
1073 	unsigned long old31 = regs->regs[31];
1074 	enum ctx_state prev_state;
1075 	unsigned int opcode = 0;
1076 	int status = -1;
1077 
1078 	/*
1079 	 * Avoid any kernel code. Just emulate the R2 instruction
1080 	 * as quickly as possible.
1081 	 */
1082 	if (mipsr2_emulation && cpu_has_mips_r6 &&
1083 	    likely(user_mode(regs)) &&
1084 	    likely(get_user(opcode, epc) >= 0)) {
1085 		unsigned long fcr31 = 0;
1086 
1087 		status = mipsr2_decoder(regs, opcode, &fcr31);
1088 		switch (status) {
1089 		case 0:
1090 		case SIGEMT:
1091 			task_thread_info(current)->r2_emul_return = 1;
1092 			return;
1093 		case SIGILL:
1094 			goto no_r2_instr;
1095 		default:
1096 			process_fpemu_return(status,
1097 					     &current->thread.cp0_baduaddr,
1098 					     fcr31);
1099 			task_thread_info(current)->r2_emul_return = 1;
1100 			return;
1101 		}
1102 	}
1103 
1104 no_r2_instr:
1105 
1106 	prev_state = exception_enter();
1107 	current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
1108 
1109 	if (notify_die(DIE_RI, "RI Fault", regs, 0, current->thread.trap_nr,
1110 		       SIGILL) == NOTIFY_STOP)
1111 		goto out;
1112 
1113 	die_if_kernel("Reserved instruction in kernel code", regs);
1114 
1115 	if (unlikely(compute_return_epc(regs) < 0))
1116 		goto out;
1117 
1118 	if (get_isa16_mode(regs->cp0_epc)) {
1119 		unsigned short mmop[2] = { 0 };
1120 
1121 		if (unlikely(get_user(mmop[0], (u16 __user *)epc + 0) < 0))
1122 			status = SIGSEGV;
1123 		if (unlikely(get_user(mmop[1], (u16 __user *)epc + 1) < 0))
1124 			status = SIGSEGV;
1125 		opcode = mmop[0];
1126 		opcode = (opcode << 16) | mmop[1];
1127 
1128 		if (status < 0)
1129 			status = simulate_rdhwr_mm(regs, opcode);
1130 	} else {
1131 		if (unlikely(get_user(opcode, epc) < 0))
1132 			status = SIGSEGV;
1133 
1134 		if (!cpu_has_llsc && status < 0)
1135 			status = simulate_llsc(regs, opcode);
1136 
1137 		if (status < 0)
1138 			status = simulate_rdhwr_normal(regs, opcode);
1139 
1140 		if (status < 0)
1141 			status = simulate_sync(regs, opcode);
1142 
1143 		if (status < 0)
1144 			status = simulate_fp(regs, opcode, old_epc, old31);
1145 	}
1146 
1147 	if (status < 0)
1148 		status = SIGILL;
1149 
1150 	if (unlikely(status > 0)) {
1151 		regs->cp0_epc = old_epc;		/* Undo skip-over.  */
1152 		regs->regs[31] = old31;
1153 		force_sig(status, current);
1154 	}
1155 
1156 out:
1157 	exception_exit(prev_state);
1158 }
1159 
1160 /*
1161  * MIPS MT processors may have fewer FPU contexts than CPU threads. If we've
1162  * emulated more than some threshold number of instructions, force migration to
1163  * a "CPU" that has FP support.
1164  */
1165 static void mt_ase_fp_affinity(void)
1166 {
1167 #ifdef CONFIG_MIPS_MT_FPAFF
1168 	if (mt_fpemul_threshold > 0 &&
1169 	     ((current->thread.emulated_fp++ > mt_fpemul_threshold))) {
1170 		/*
1171 		 * If there's no FPU present, or if the application has already
1172 		 * restricted the allowed set to exclude any CPUs with FPUs,
1173 		 * we'll skip the procedure.
1174 		 */
1175 		if (cpumask_intersects(&current->cpus_allowed, &mt_fpu_cpumask)) {
1176 			cpumask_t tmask;
1177 
1178 			current->thread.user_cpus_allowed
1179 				= current->cpus_allowed;
1180 			cpumask_and(&tmask, &current->cpus_allowed,
1181 				    &mt_fpu_cpumask);
1182 			set_cpus_allowed_ptr(current, &tmask);
1183 			set_thread_flag(TIF_FPUBOUND);
1184 		}
1185 	}
1186 #endif /* CONFIG_MIPS_MT_FPAFF */
1187 }
1188 
1189 /*
1190  * No lock; only written during early bootup by CPU 0.
1191  */
1192 static RAW_NOTIFIER_HEAD(cu2_chain);
1193 
1194 int __ref register_cu2_notifier(struct notifier_block *nb)
1195 {
1196 	return raw_notifier_chain_register(&cu2_chain, nb);
1197 }
1198 
1199 int cu2_notifier_call_chain(unsigned long val, void *v)
1200 {
1201 	return raw_notifier_call_chain(&cu2_chain, val, v);
1202 }
1203 
1204 static int default_cu2_call(struct notifier_block *nfb, unsigned long action,
1205 	void *data)
1206 {
1207 	struct pt_regs *regs = data;
1208 
1209 	die_if_kernel("COP2: Unhandled kernel unaligned access or invalid "
1210 			      "instruction", regs);
1211 	force_sig(SIGILL, current);
1212 
1213 	return NOTIFY_OK;
1214 }
1215 
1216 static int wait_on_fp_mode_switch(atomic_t *p)
1217 {
1218 	/*
1219 	 * The FP mode for this task is currently being switched. That may
1220 	 * involve modifications to the format of this tasks FP context which
1221 	 * make it unsafe to proceed with execution for the moment. Instead,
1222 	 * schedule some other task.
1223 	 */
1224 	schedule();
1225 	return 0;
1226 }
1227 
1228 static int enable_restore_fp_context(int msa)
1229 {
1230 	int err, was_fpu_owner, prior_msa;
1231 
1232 	/*
1233 	 * If an FP mode switch is currently underway, wait for it to
1234 	 * complete before proceeding.
1235 	 */
1236 	wait_on_atomic_t(&current->mm->context.fp_mode_switching,
1237 			 wait_on_fp_mode_switch, TASK_KILLABLE);
1238 
1239 	if (!used_math()) {
1240 		/* First time FP context user. */
1241 		preempt_disable();
1242 		err = init_fpu();
1243 		if (msa && !err) {
1244 			enable_msa();
1245 			_init_msa_upper();
1246 			set_thread_flag(TIF_USEDMSA);
1247 			set_thread_flag(TIF_MSA_CTX_LIVE);
1248 		}
1249 		preempt_enable();
1250 		if (!err)
1251 			set_used_math();
1252 		return err;
1253 	}
1254 
1255 	/*
1256 	 * This task has formerly used the FP context.
1257 	 *
1258 	 * If this thread has no live MSA vector context then we can simply
1259 	 * restore the scalar FP context. If it has live MSA vector context
1260 	 * (that is, it has or may have used MSA since last performing a
1261 	 * function call) then we'll need to restore the vector context. This
1262 	 * applies even if we're currently only executing a scalar FP
1263 	 * instruction. This is because if we were to later execute an MSA
1264 	 * instruction then we'd either have to:
1265 	 *
1266 	 *  - Restore the vector context & clobber any registers modified by
1267 	 *    scalar FP instructions between now & then.
1268 	 *
1269 	 * or
1270 	 *
1271 	 *  - Not restore the vector context & lose the most significant bits
1272 	 *    of all vector registers.
1273 	 *
1274 	 * Neither of those options is acceptable. We cannot restore the least
1275 	 * significant bits of the registers now & only restore the most
1276 	 * significant bits later because the most significant bits of any
1277 	 * vector registers whose aliased FP register is modified now will have
1278 	 * been zeroed. We'd have no way to know that when restoring the vector
1279 	 * context & thus may load an outdated value for the most significant
1280 	 * bits of a vector register.
1281 	 */
1282 	if (!msa && !thread_msa_context_live())
1283 		return own_fpu(1);
1284 
1285 	/*
1286 	 * This task is using or has previously used MSA. Thus we require
1287 	 * that Status.FR == 1.
1288 	 */
1289 	preempt_disable();
1290 	was_fpu_owner = is_fpu_owner();
1291 	err = own_fpu_inatomic(0);
1292 	if (err)
1293 		goto out;
1294 
1295 	enable_msa();
1296 	write_msa_csr(current->thread.fpu.msacsr);
1297 	set_thread_flag(TIF_USEDMSA);
1298 
1299 	/*
1300 	 * If this is the first time that the task is using MSA and it has
1301 	 * previously used scalar FP in this time slice then we already nave
1302 	 * FP context which we shouldn't clobber. We do however need to clear
1303 	 * the upper 64b of each vector register so that this task has no
1304 	 * opportunity to see data left behind by another.
1305 	 */
1306 	prior_msa = test_and_set_thread_flag(TIF_MSA_CTX_LIVE);
1307 	if (!prior_msa && was_fpu_owner) {
1308 		_init_msa_upper();
1309 
1310 		goto out;
1311 	}
1312 
1313 	if (!prior_msa) {
1314 		/*
1315 		 * Restore the least significant 64b of each vector register
1316 		 * from the existing scalar FP context.
1317 		 */
1318 		_restore_fp(current);
1319 
1320 		/*
1321 		 * The task has not formerly used MSA, so clear the upper 64b
1322 		 * of each vector register such that it cannot see data left
1323 		 * behind by another task.
1324 		 */
1325 		_init_msa_upper();
1326 	} else {
1327 		/* We need to restore the vector context. */
1328 		restore_msa(current);
1329 
1330 		/* Restore the scalar FP control & status register */
1331 		if (!was_fpu_owner)
1332 			write_32bit_cp1_register(CP1_STATUS,
1333 						 current->thread.fpu.fcr31);
1334 	}
1335 
1336 out:
1337 	preempt_enable();
1338 
1339 	return 0;
1340 }
1341 
1342 asmlinkage void do_cpu(struct pt_regs *regs)
1343 {
1344 	enum ctx_state prev_state;
1345 	unsigned int __user *epc;
1346 	unsigned long old_epc, old31;
1347 	void __user *fault_addr;
1348 	unsigned int opcode;
1349 	unsigned long fcr31;
1350 	unsigned int cpid;
1351 	int status, err;
1352 	unsigned long __maybe_unused flags;
1353 	int sig;
1354 
1355 	prev_state = exception_enter();
1356 	cpid = (regs->cp0_cause >> CAUSEB_CE) & 3;
1357 
1358 	if (cpid != 2)
1359 		die_if_kernel("do_cpu invoked from kernel context!", regs);
1360 
1361 	switch (cpid) {
1362 	case 0:
1363 		epc = (unsigned int __user *)exception_epc(regs);
1364 		old_epc = regs->cp0_epc;
1365 		old31 = regs->regs[31];
1366 		opcode = 0;
1367 		status = -1;
1368 
1369 		if (unlikely(compute_return_epc(regs) < 0))
1370 			break;
1371 
1372 		if (!get_isa16_mode(regs->cp0_epc)) {
1373 			if (unlikely(get_user(opcode, epc) < 0))
1374 				status = SIGSEGV;
1375 
1376 			if (!cpu_has_llsc && status < 0)
1377 				status = simulate_llsc(regs, opcode);
1378 		}
1379 
1380 		if (status < 0)
1381 			status = SIGILL;
1382 
1383 		if (unlikely(status > 0)) {
1384 			regs->cp0_epc = old_epc;	/* Undo skip-over.  */
1385 			regs->regs[31] = old31;
1386 			force_sig(status, current);
1387 		}
1388 
1389 		break;
1390 
1391 	case 3:
1392 		/*
1393 		 * The COP3 opcode space and consequently the CP0.Status.CU3
1394 		 * bit and the CP0.Cause.CE=3 encoding have been removed as
1395 		 * of the MIPS III ISA.  From the MIPS IV and MIPS32r2 ISAs
1396 		 * up the space has been reused for COP1X instructions, that
1397 		 * are enabled by the CP0.Status.CU1 bit and consequently
1398 		 * use the CP0.Cause.CE=1 encoding for Coprocessor Unusable
1399 		 * exceptions.  Some FPU-less processors that implement one
1400 		 * of these ISAs however use this code erroneously for COP1X
1401 		 * instructions.  Therefore we redirect this trap to the FP
1402 		 * emulator too.
1403 		 */
1404 		if (raw_cpu_has_fpu || !cpu_has_mips_4_5_64_r2_r6) {
1405 			force_sig(SIGILL, current);
1406 			break;
1407 		}
1408 		/* Fall through.  */
1409 
1410 	case 1:
1411 		err = enable_restore_fp_context(0);
1412 
1413 		if (raw_cpu_has_fpu && !err)
1414 			break;
1415 
1416 		sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 0,
1417 					       &fault_addr);
1418 		fcr31 = current->thread.fpu.fcr31;
1419 
1420 		/*
1421 		 * We can't allow the emulated instruction to leave
1422 		 * any of the cause bits set in $fcr31.
1423 		 */
1424 		current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
1425 
1426 		/* Send a signal if required.  */
1427 		if (!process_fpemu_return(sig, fault_addr, fcr31) && !err)
1428 			mt_ase_fp_affinity();
1429 
1430 		break;
1431 
1432 	case 2:
1433 		raw_notifier_call_chain(&cu2_chain, CU2_EXCEPTION, regs);
1434 		break;
1435 	}
1436 
1437 	exception_exit(prev_state);
1438 }
1439 
1440 asmlinkage void do_msa_fpe(struct pt_regs *regs, unsigned int msacsr)
1441 {
1442 	enum ctx_state prev_state;
1443 
1444 	prev_state = exception_enter();
1445 	current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
1446 	if (notify_die(DIE_MSAFP, "MSA FP exception", regs, 0,
1447 		       current->thread.trap_nr, SIGFPE) == NOTIFY_STOP)
1448 		goto out;
1449 
1450 	/* Clear MSACSR.Cause before enabling interrupts */
1451 	write_msa_csr(msacsr & ~MSA_CSR_CAUSEF);
1452 	local_irq_enable();
1453 
1454 	die_if_kernel("do_msa_fpe invoked from kernel context!", regs);
1455 	force_sig(SIGFPE, current);
1456 out:
1457 	exception_exit(prev_state);
1458 }
1459 
1460 asmlinkage void do_msa(struct pt_regs *regs)
1461 {
1462 	enum ctx_state prev_state;
1463 	int err;
1464 
1465 	prev_state = exception_enter();
1466 
1467 	if (!cpu_has_msa || test_thread_flag(TIF_32BIT_FPREGS)) {
1468 		force_sig(SIGILL, current);
1469 		goto out;
1470 	}
1471 
1472 	die_if_kernel("do_msa invoked from kernel context!", regs);
1473 
1474 	err = enable_restore_fp_context(1);
1475 	if (err)
1476 		force_sig(SIGILL, current);
1477 out:
1478 	exception_exit(prev_state);
1479 }
1480 
1481 asmlinkage void do_mdmx(struct pt_regs *regs)
1482 {
1483 	enum ctx_state prev_state;
1484 
1485 	prev_state = exception_enter();
1486 	force_sig(SIGILL, current);
1487 	exception_exit(prev_state);
1488 }
1489 
1490 /*
1491  * Called with interrupts disabled.
1492  */
1493 asmlinkage void do_watch(struct pt_regs *regs)
1494 {
1495 	enum ctx_state prev_state;
1496 	u32 cause;
1497 
1498 	prev_state = exception_enter();
1499 	/*
1500 	 * Clear WP (bit 22) bit of cause register so we don't loop
1501 	 * forever.
1502 	 */
1503 	cause = read_c0_cause();
1504 	cause &= ~(1 << 22);
1505 	write_c0_cause(cause);
1506 
1507 	/*
1508 	 * If the current thread has the watch registers loaded, save
1509 	 * their values and send SIGTRAP.  Otherwise another thread
1510 	 * left the registers set, clear them and continue.
1511 	 */
1512 	if (test_tsk_thread_flag(current, TIF_LOAD_WATCH)) {
1513 		mips_read_watch_registers();
1514 		local_irq_enable();
1515 		force_sig(SIGTRAP, current);
1516 	} else {
1517 		mips_clear_watch_registers();
1518 		local_irq_enable();
1519 	}
1520 	exception_exit(prev_state);
1521 }
1522 
1523 asmlinkage void do_mcheck(struct pt_regs *regs)
1524 {
1525 	int multi_match = regs->cp0_status & ST0_TS;
1526 	enum ctx_state prev_state;
1527 	mm_segment_t old_fs = get_fs();
1528 
1529 	prev_state = exception_enter();
1530 	show_regs(regs);
1531 
1532 	if (multi_match) {
1533 		dump_tlb_regs();
1534 		pr_info("\n");
1535 		dump_tlb_all();
1536 	}
1537 
1538 	if (!user_mode(regs))
1539 		set_fs(KERNEL_DS);
1540 
1541 	show_code((unsigned int __user *) regs->cp0_epc);
1542 
1543 	set_fs(old_fs);
1544 
1545 	/*
1546 	 * Some chips may have other causes of machine check (e.g. SB1
1547 	 * graduation timer)
1548 	 */
1549 	panic("Caught Machine Check exception - %scaused by multiple "
1550 	      "matching entries in the TLB.",
1551 	      (multi_match) ? "" : "not ");
1552 }
1553 
1554 asmlinkage void do_mt(struct pt_regs *regs)
1555 {
1556 	int subcode;
1557 
1558 	subcode = (read_vpe_c0_vpecontrol() & VPECONTROL_EXCPT)
1559 			>> VPECONTROL_EXCPT_SHIFT;
1560 	switch (subcode) {
1561 	case 0:
1562 		printk(KERN_DEBUG "Thread Underflow\n");
1563 		break;
1564 	case 1:
1565 		printk(KERN_DEBUG "Thread Overflow\n");
1566 		break;
1567 	case 2:
1568 		printk(KERN_DEBUG "Invalid YIELD Qualifier\n");
1569 		break;
1570 	case 3:
1571 		printk(KERN_DEBUG "Gating Storage Exception\n");
1572 		break;
1573 	case 4:
1574 		printk(KERN_DEBUG "YIELD Scheduler Exception\n");
1575 		break;
1576 	case 5:
1577 		printk(KERN_DEBUG "Gating Storage Scheduler Exception\n");
1578 		break;
1579 	default:
1580 		printk(KERN_DEBUG "*** UNKNOWN THREAD EXCEPTION %d ***\n",
1581 			subcode);
1582 		break;
1583 	}
1584 	die_if_kernel("MIPS MT Thread exception in kernel", regs);
1585 
1586 	force_sig(SIGILL, current);
1587 }
1588 
1589 
1590 asmlinkage void do_dsp(struct pt_regs *regs)
1591 {
1592 	if (cpu_has_dsp)
1593 		panic("Unexpected DSP exception");
1594 
1595 	force_sig(SIGILL, current);
1596 }
1597 
1598 asmlinkage void do_reserved(struct pt_regs *regs)
1599 {
1600 	/*
1601 	 * Game over - no way to handle this if it ever occurs.	 Most probably
1602 	 * caused by a new unknown cpu type or after another deadly
1603 	 * hard/software error.
1604 	 */
1605 	show_regs(regs);
1606 	panic("Caught reserved exception %ld - should not happen.",
1607 	      (regs->cp0_cause & 0x7f) >> 2);
1608 }
1609 
1610 static int __initdata l1parity = 1;
1611 static int __init nol1parity(char *s)
1612 {
1613 	l1parity = 0;
1614 	return 1;
1615 }
1616 __setup("nol1par", nol1parity);
1617 static int __initdata l2parity = 1;
1618 static int __init nol2parity(char *s)
1619 {
1620 	l2parity = 0;
1621 	return 1;
1622 }
1623 __setup("nol2par", nol2parity);
1624 
1625 /*
1626  * Some MIPS CPUs can enable/disable for cache parity detection, but do
1627  * it different ways.
1628  */
1629 static inline void parity_protection_init(void)
1630 {
1631 	switch (current_cpu_type()) {
1632 	case CPU_24K:
1633 	case CPU_34K:
1634 	case CPU_74K:
1635 	case CPU_1004K:
1636 	case CPU_1074K:
1637 	case CPU_INTERAPTIV:
1638 	case CPU_PROAPTIV:
1639 	case CPU_P5600:
1640 	case CPU_QEMU_GENERIC:
1641 	case CPU_I6400:
1642 		{
1643 #define ERRCTL_PE	0x80000000
1644 #define ERRCTL_L2P	0x00800000
1645 			unsigned long errctl;
1646 			unsigned int l1parity_present, l2parity_present;
1647 
1648 			errctl = read_c0_ecc();
1649 			errctl &= ~(ERRCTL_PE|ERRCTL_L2P);
1650 
1651 			/* probe L1 parity support */
1652 			write_c0_ecc(errctl | ERRCTL_PE);
1653 			back_to_back_c0_hazard();
1654 			l1parity_present = (read_c0_ecc() & ERRCTL_PE);
1655 
1656 			/* probe L2 parity support */
1657 			write_c0_ecc(errctl|ERRCTL_L2P);
1658 			back_to_back_c0_hazard();
1659 			l2parity_present = (read_c0_ecc() & ERRCTL_L2P);
1660 
1661 			if (l1parity_present && l2parity_present) {
1662 				if (l1parity)
1663 					errctl |= ERRCTL_PE;
1664 				if (l1parity ^ l2parity)
1665 					errctl |= ERRCTL_L2P;
1666 			} else if (l1parity_present) {
1667 				if (l1parity)
1668 					errctl |= ERRCTL_PE;
1669 			} else if (l2parity_present) {
1670 				if (l2parity)
1671 					errctl |= ERRCTL_L2P;
1672 			} else {
1673 				/* No parity available */
1674 			}
1675 
1676 			printk(KERN_INFO "Writing ErrCtl register=%08lx\n", errctl);
1677 
1678 			write_c0_ecc(errctl);
1679 			back_to_back_c0_hazard();
1680 			errctl = read_c0_ecc();
1681 			printk(KERN_INFO "Readback ErrCtl register=%08lx\n", errctl);
1682 
1683 			if (l1parity_present)
1684 				printk(KERN_INFO "Cache parity protection %sabled\n",
1685 				       (errctl & ERRCTL_PE) ? "en" : "dis");
1686 
1687 			if (l2parity_present) {
1688 				if (l1parity_present && l1parity)
1689 					errctl ^= ERRCTL_L2P;
1690 				printk(KERN_INFO "L2 cache parity protection %sabled\n",
1691 				       (errctl & ERRCTL_L2P) ? "en" : "dis");
1692 			}
1693 		}
1694 		break;
1695 
1696 	case CPU_5KC:
1697 	case CPU_5KE:
1698 	case CPU_LOONGSON1:
1699 		write_c0_ecc(0x80000000);
1700 		back_to_back_c0_hazard();
1701 		/* Set the PE bit (bit 31) in the c0_errctl register. */
1702 		printk(KERN_INFO "Cache parity protection %sabled\n",
1703 		       (read_c0_ecc() & 0x80000000) ? "en" : "dis");
1704 		break;
1705 	case CPU_20KC:
1706 	case CPU_25KF:
1707 		/* Clear the DE bit (bit 16) in the c0_status register. */
1708 		printk(KERN_INFO "Enable cache parity protection for "
1709 		       "MIPS 20KC/25KF CPUs.\n");
1710 		clear_c0_status(ST0_DE);
1711 		break;
1712 	default:
1713 		break;
1714 	}
1715 }
1716 
1717 asmlinkage void cache_parity_error(void)
1718 {
1719 	const int field = 2 * sizeof(unsigned long);
1720 	unsigned int reg_val;
1721 
1722 	/* For the moment, report the problem and hang. */
1723 	printk("Cache error exception:\n");
1724 	printk("cp0_errorepc == %0*lx\n", field, read_c0_errorepc());
1725 	reg_val = read_c0_cacheerr();
1726 	printk("c0_cacheerr == %08x\n", reg_val);
1727 
1728 	printk("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
1729 	       reg_val & (1<<30) ? "secondary" : "primary",
1730 	       reg_val & (1<<31) ? "data" : "insn");
1731 	if ((cpu_has_mips_r2_r6) &&
1732 	    ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) {
1733 		pr_err("Error bits: %s%s%s%s%s%s%s%s\n",
1734 			reg_val & (1<<29) ? "ED " : "",
1735 			reg_val & (1<<28) ? "ET " : "",
1736 			reg_val & (1<<27) ? "ES " : "",
1737 			reg_val & (1<<26) ? "EE " : "",
1738 			reg_val & (1<<25) ? "EB " : "",
1739 			reg_val & (1<<24) ? "EI " : "",
1740 			reg_val & (1<<23) ? "E1 " : "",
1741 			reg_val & (1<<22) ? "E0 " : "");
1742 	} else {
1743 		pr_err("Error bits: %s%s%s%s%s%s%s\n",
1744 			reg_val & (1<<29) ? "ED " : "",
1745 			reg_val & (1<<28) ? "ET " : "",
1746 			reg_val & (1<<26) ? "EE " : "",
1747 			reg_val & (1<<25) ? "EB " : "",
1748 			reg_val & (1<<24) ? "EI " : "",
1749 			reg_val & (1<<23) ? "E1 " : "",
1750 			reg_val & (1<<22) ? "E0 " : "");
1751 	}
1752 	printk("IDX: 0x%08x\n", reg_val & ((1<<22)-1));
1753 
1754 #if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64)
1755 	if (reg_val & (1<<22))
1756 		printk("DErrAddr0: 0x%0*lx\n", field, read_c0_derraddr0());
1757 
1758 	if (reg_val & (1<<23))
1759 		printk("DErrAddr1: 0x%0*lx\n", field, read_c0_derraddr1());
1760 #endif
1761 
1762 	panic("Can't handle the cache error!");
1763 }
1764 
1765 asmlinkage void do_ftlb(void)
1766 {
1767 	const int field = 2 * sizeof(unsigned long);
1768 	unsigned int reg_val;
1769 
1770 	/* For the moment, report the problem and hang. */
1771 	if ((cpu_has_mips_r2_r6) &&
1772 	    ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) {
1773 		pr_err("FTLB error exception, cp0_ecc=0x%08x:\n",
1774 		       read_c0_ecc());
1775 		pr_err("cp0_errorepc == %0*lx\n", field, read_c0_errorepc());
1776 		reg_val = read_c0_cacheerr();
1777 		pr_err("c0_cacheerr == %08x\n", reg_val);
1778 
1779 		if ((reg_val & 0xc0000000) == 0xc0000000) {
1780 			pr_err("Decoded c0_cacheerr: FTLB parity error\n");
1781 		} else {
1782 			pr_err("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
1783 			       reg_val & (1<<30) ? "secondary" : "primary",
1784 			       reg_val & (1<<31) ? "data" : "insn");
1785 		}
1786 	} else {
1787 		pr_err("FTLB error exception\n");
1788 	}
1789 	/* Just print the cacheerr bits for now */
1790 	cache_parity_error();
1791 }
1792 
1793 /*
1794  * SDBBP EJTAG debug exception handler.
1795  * We skip the instruction and return to the next instruction.
1796  */
1797 void ejtag_exception_handler(struct pt_regs *regs)
1798 {
1799 	const int field = 2 * sizeof(unsigned long);
1800 	unsigned long depc, old_epc, old_ra;
1801 	unsigned int debug;
1802 
1803 	printk(KERN_DEBUG "SDBBP EJTAG debug exception - not handled yet, just ignored!\n");
1804 	depc = read_c0_depc();
1805 	debug = read_c0_debug();
1806 	printk(KERN_DEBUG "c0_depc = %0*lx, DEBUG = %08x\n", field, depc, debug);
1807 	if (debug & 0x80000000) {
1808 		/*
1809 		 * In branch delay slot.
1810 		 * We cheat a little bit here and use EPC to calculate the
1811 		 * debug return address (DEPC). EPC is restored after the
1812 		 * calculation.
1813 		 */
1814 		old_epc = regs->cp0_epc;
1815 		old_ra = regs->regs[31];
1816 		regs->cp0_epc = depc;
1817 		compute_return_epc(regs);
1818 		depc = regs->cp0_epc;
1819 		regs->cp0_epc = old_epc;
1820 		regs->regs[31] = old_ra;
1821 	} else
1822 		depc += 4;
1823 	write_c0_depc(depc);
1824 
1825 #if 0
1826 	printk(KERN_DEBUG "\n\n----- Enable EJTAG single stepping ----\n\n");
1827 	write_c0_debug(debug | 0x100);
1828 #endif
1829 }
1830 
1831 /*
1832  * NMI exception handler.
1833  * No lock; only written during early bootup by CPU 0.
1834  */
1835 static RAW_NOTIFIER_HEAD(nmi_chain);
1836 
1837 int register_nmi_notifier(struct notifier_block *nb)
1838 {
1839 	return raw_notifier_chain_register(&nmi_chain, nb);
1840 }
1841 
1842 void __noreturn nmi_exception_handler(struct pt_regs *regs)
1843 {
1844 	char str[100];
1845 
1846 	nmi_enter();
1847 	raw_notifier_call_chain(&nmi_chain, 0, regs);
1848 	bust_spinlocks(1);
1849 	snprintf(str, 100, "CPU%d NMI taken, CP0_EPC=%lx\n",
1850 		 smp_processor_id(), regs->cp0_epc);
1851 	regs->cp0_epc = read_c0_errorepc();
1852 	die(str, regs);
1853 	nmi_exit();
1854 }
1855 
1856 #define VECTORSPACING 0x100	/* for EI/VI mode */
1857 
1858 unsigned long ebase;
1859 unsigned long exception_handlers[32];
1860 unsigned long vi_handlers[64];
1861 
1862 void __init *set_except_vector(int n, void *addr)
1863 {
1864 	unsigned long handler = (unsigned long) addr;
1865 	unsigned long old_handler;
1866 
1867 #ifdef CONFIG_CPU_MICROMIPS
1868 	/*
1869 	 * Only the TLB handlers are cache aligned with an even
1870 	 * address. All other handlers are on an odd address and
1871 	 * require no modification. Otherwise, MIPS32 mode will
1872 	 * be entered when handling any TLB exceptions. That
1873 	 * would be bad...since we must stay in microMIPS mode.
1874 	 */
1875 	if (!(handler & 0x1))
1876 		handler |= 1;
1877 #endif
1878 	old_handler = xchg(&exception_handlers[n], handler);
1879 
1880 	if (n == 0 && cpu_has_divec) {
1881 #ifdef CONFIG_CPU_MICROMIPS
1882 		unsigned long jump_mask = ~((1 << 27) - 1);
1883 #else
1884 		unsigned long jump_mask = ~((1 << 28) - 1);
1885 #endif
1886 		u32 *buf = (u32 *)(ebase + 0x200);
1887 		unsigned int k0 = 26;
1888 		if ((handler & jump_mask) == ((ebase + 0x200) & jump_mask)) {
1889 			uasm_i_j(&buf, handler & ~jump_mask);
1890 			uasm_i_nop(&buf);
1891 		} else {
1892 			UASM_i_LA(&buf, k0, handler);
1893 			uasm_i_jr(&buf, k0);
1894 			uasm_i_nop(&buf);
1895 		}
1896 		local_flush_icache_range(ebase + 0x200, (unsigned long)buf);
1897 	}
1898 	return (void *)old_handler;
1899 }
1900 
1901 static void do_default_vi(void)
1902 {
1903 	show_regs(get_irq_regs());
1904 	panic("Caught unexpected vectored interrupt.");
1905 }
1906 
1907 static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
1908 {
1909 	unsigned long handler;
1910 	unsigned long old_handler = vi_handlers[n];
1911 	int srssets = current_cpu_data.srsets;
1912 	u16 *h;
1913 	unsigned char *b;
1914 
1915 	BUG_ON(!cpu_has_veic && !cpu_has_vint);
1916 
1917 	if (addr == NULL) {
1918 		handler = (unsigned long) do_default_vi;
1919 		srs = 0;
1920 	} else
1921 		handler = (unsigned long) addr;
1922 	vi_handlers[n] = handler;
1923 
1924 	b = (unsigned char *)(ebase + 0x200 + n*VECTORSPACING);
1925 
1926 	if (srs >= srssets)
1927 		panic("Shadow register set %d not supported", srs);
1928 
1929 	if (cpu_has_veic) {
1930 		if (board_bind_eic_interrupt)
1931 			board_bind_eic_interrupt(n, srs);
1932 	} else if (cpu_has_vint) {
1933 		/* SRSMap is only defined if shadow sets are implemented */
1934 		if (srssets > 1)
1935 			change_c0_srsmap(0xf << n*4, srs << n*4);
1936 	}
1937 
1938 	if (srs == 0) {
1939 		/*
1940 		 * If no shadow set is selected then use the default handler
1941 		 * that does normal register saving and standard interrupt exit
1942 		 */
1943 		extern char except_vec_vi, except_vec_vi_lui;
1944 		extern char except_vec_vi_ori, except_vec_vi_end;
1945 		extern char rollback_except_vec_vi;
1946 		char *vec_start = using_rollback_handler() ?
1947 			&rollback_except_vec_vi : &except_vec_vi;
1948 #if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN)
1949 		const int lui_offset = &except_vec_vi_lui - vec_start + 2;
1950 		const int ori_offset = &except_vec_vi_ori - vec_start + 2;
1951 #else
1952 		const int lui_offset = &except_vec_vi_lui - vec_start;
1953 		const int ori_offset = &except_vec_vi_ori - vec_start;
1954 #endif
1955 		const int handler_len = &except_vec_vi_end - vec_start;
1956 
1957 		if (handler_len > VECTORSPACING) {
1958 			/*
1959 			 * Sigh... panicing won't help as the console
1960 			 * is probably not configured :(
1961 			 */
1962 			panic("VECTORSPACING too small");
1963 		}
1964 
1965 		set_handler(((unsigned long)b - ebase), vec_start,
1966 #ifdef CONFIG_CPU_MICROMIPS
1967 				(handler_len - 1));
1968 #else
1969 				handler_len);
1970 #endif
1971 		h = (u16 *)(b + lui_offset);
1972 		*h = (handler >> 16) & 0xffff;
1973 		h = (u16 *)(b + ori_offset);
1974 		*h = (handler & 0xffff);
1975 		local_flush_icache_range((unsigned long)b,
1976 					 (unsigned long)(b+handler_len));
1977 	}
1978 	else {
1979 		/*
1980 		 * In other cases jump directly to the interrupt handler. It
1981 		 * is the handler's responsibility to save registers if required
1982 		 * (eg hi/lo) and return from the exception using "eret".
1983 		 */
1984 		u32 insn;
1985 
1986 		h = (u16 *)b;
1987 		/* j handler */
1988 #ifdef CONFIG_CPU_MICROMIPS
1989 		insn = 0xd4000000 | (((u32)handler & 0x07ffffff) >> 1);
1990 #else
1991 		insn = 0x08000000 | (((u32)handler & 0x0fffffff) >> 2);
1992 #endif
1993 		h[0] = (insn >> 16) & 0xffff;
1994 		h[1] = insn & 0xffff;
1995 		h[2] = 0;
1996 		h[3] = 0;
1997 		local_flush_icache_range((unsigned long)b,
1998 					 (unsigned long)(b+8));
1999 	}
2000 
2001 	return (void *)old_handler;
2002 }
2003 
2004 void *set_vi_handler(int n, vi_handler_t addr)
2005 {
2006 	return set_vi_srs_handler(n, addr, 0);
2007 }
2008 
2009 extern void tlb_init(void);
2010 
2011 /*
2012  * Timer interrupt
2013  */
2014 int cp0_compare_irq;
2015 EXPORT_SYMBOL_GPL(cp0_compare_irq);
2016 int cp0_compare_irq_shift;
2017 
2018 /*
2019  * Performance counter IRQ or -1 if shared with timer
2020  */
2021 int cp0_perfcount_irq;
2022 EXPORT_SYMBOL_GPL(cp0_perfcount_irq);
2023 
2024 /*
2025  * Fast debug channel IRQ or -1 if not present
2026  */
2027 int cp0_fdc_irq;
2028 EXPORT_SYMBOL_GPL(cp0_fdc_irq);
2029 
2030 static int noulri;
2031 
2032 static int __init ulri_disable(char *s)
2033 {
2034 	pr_info("Disabling ulri\n");
2035 	noulri = 1;
2036 
2037 	return 1;
2038 }
2039 __setup("noulri", ulri_disable);
2040 
2041 /* configure STATUS register */
2042 static void configure_status(void)
2043 {
2044 	/*
2045 	 * Disable coprocessors and select 32-bit or 64-bit addressing
2046 	 * and the 16/32 or 32/32 FPR register model.  Reset the BEV
2047 	 * flag that some firmware may have left set and the TS bit (for
2048 	 * IP27).  Set XX for ISA IV code to work.
2049 	 */
2050 	unsigned int status_set = ST0_CU0;
2051 #ifdef CONFIG_64BIT
2052 	status_set |= ST0_FR|ST0_KX|ST0_SX|ST0_UX;
2053 #endif
2054 	if (current_cpu_data.isa_level & MIPS_CPU_ISA_IV)
2055 		status_set |= ST0_XX;
2056 	if (cpu_has_dsp)
2057 		status_set |= ST0_MX;
2058 
2059 	change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX,
2060 			 status_set);
2061 }
2062 
2063 /* configure HWRENA register */
2064 static void configure_hwrena(void)
2065 {
2066 	unsigned int hwrena = cpu_hwrena_impl_bits;
2067 
2068 	if (cpu_has_mips_r2_r6)
2069 		hwrena |= 0x0000000f;
2070 
2071 	if (!noulri && cpu_has_userlocal)
2072 		hwrena |= (1 << 29);
2073 
2074 	if (hwrena)
2075 		write_c0_hwrena(hwrena);
2076 }
2077 
2078 static void configure_exception_vector(void)
2079 {
2080 	if (cpu_has_veic || cpu_has_vint) {
2081 		unsigned long sr = set_c0_status(ST0_BEV);
2082 		write_c0_ebase(ebase);
2083 		write_c0_status(sr);
2084 		/* Setting vector spacing enables EI/VI mode  */
2085 		change_c0_intctl(0x3e0, VECTORSPACING);
2086 	}
2087 	if (cpu_has_divec) {
2088 		if (cpu_has_mipsmt) {
2089 			unsigned int vpflags = dvpe();
2090 			set_c0_cause(CAUSEF_IV);
2091 			evpe(vpflags);
2092 		} else
2093 			set_c0_cause(CAUSEF_IV);
2094 	}
2095 }
2096 
2097 void per_cpu_trap_init(bool is_boot_cpu)
2098 {
2099 	unsigned int cpu = smp_processor_id();
2100 
2101 	configure_status();
2102 	configure_hwrena();
2103 
2104 	configure_exception_vector();
2105 
2106 	/*
2107 	 * Before R2 both interrupt numbers were fixed to 7, so on R2 only:
2108 	 *
2109 	 *  o read IntCtl.IPTI to determine the timer interrupt
2110 	 *  o read IntCtl.IPPCI to determine the performance counter interrupt
2111 	 *  o read IntCtl.IPFDC to determine the fast debug channel interrupt
2112 	 */
2113 	if (cpu_has_mips_r2_r6) {
2114 		cp0_compare_irq_shift = CAUSEB_TI - CAUSEB_IP;
2115 		cp0_compare_irq = (read_c0_intctl() >> INTCTLB_IPTI) & 7;
2116 		cp0_perfcount_irq = (read_c0_intctl() >> INTCTLB_IPPCI) & 7;
2117 		cp0_fdc_irq = (read_c0_intctl() >> INTCTLB_IPFDC) & 7;
2118 		if (!cp0_fdc_irq)
2119 			cp0_fdc_irq = -1;
2120 
2121 	} else {
2122 		cp0_compare_irq = CP0_LEGACY_COMPARE_IRQ;
2123 		cp0_compare_irq_shift = CP0_LEGACY_PERFCNT_IRQ;
2124 		cp0_perfcount_irq = -1;
2125 		cp0_fdc_irq = -1;
2126 	}
2127 
2128 	if (!cpu_data[cpu].asid_cache)
2129 		cpu_data[cpu].asid_cache = ASID_FIRST_VERSION;
2130 
2131 	atomic_inc(&init_mm.mm_count);
2132 	current->active_mm = &init_mm;
2133 	BUG_ON(current->mm);
2134 	enter_lazy_tlb(&init_mm, current);
2135 
2136 	/* Boot CPU's cache setup in setup_arch(). */
2137 	if (!is_boot_cpu)
2138 		cpu_cache_init();
2139 	tlb_init();
2140 	TLBMISS_HANDLER_SETUP();
2141 }
2142 
2143 /* Install CPU exception handler */
2144 void set_handler(unsigned long offset, void *addr, unsigned long size)
2145 {
2146 #ifdef CONFIG_CPU_MICROMIPS
2147 	memcpy((void *)(ebase + offset), ((unsigned char *)addr - 1), size);
2148 #else
2149 	memcpy((void *)(ebase + offset), addr, size);
2150 #endif
2151 	local_flush_icache_range(ebase + offset, ebase + offset + size);
2152 }
2153 
2154 static char panic_null_cerr[] =
2155 	"Trying to set NULL cache error exception handler";
2156 
2157 /*
2158  * Install uncached CPU exception handler.
2159  * This is suitable only for the cache error exception which is the only
2160  * exception handler that is being run uncached.
2161  */
2162 void set_uncached_handler(unsigned long offset, void *addr,
2163 	unsigned long size)
2164 {
2165 	unsigned long uncached_ebase = CKSEG1ADDR(ebase);
2166 
2167 	if (!addr)
2168 		panic(panic_null_cerr);
2169 
2170 	memcpy((void *)(uncached_ebase + offset), addr, size);
2171 }
2172 
2173 static int __initdata rdhwr_noopt;
2174 static int __init set_rdhwr_noopt(char *str)
2175 {
2176 	rdhwr_noopt = 1;
2177 	return 1;
2178 }
2179 
2180 __setup("rdhwr_noopt", set_rdhwr_noopt);
2181 
2182 void __init trap_init(void)
2183 {
2184 	extern char except_vec3_generic;
2185 	extern char except_vec4;
2186 	extern char except_vec3_r4000;
2187 	unsigned long i;
2188 
2189 	check_wait();
2190 
2191 	if (cpu_has_veic || cpu_has_vint) {
2192 		unsigned long size = 0x200 + VECTORSPACING*64;
2193 		ebase = (unsigned long)
2194 			__alloc_bootmem(size, 1 << fls(size), 0);
2195 	} else {
2196 		ebase = CAC_BASE;
2197 
2198 		if (cpu_has_mips_r2_r6)
2199 			ebase += (read_c0_ebase() & 0x3ffff000);
2200 	}
2201 
2202 	if (cpu_has_mmips) {
2203 		unsigned int config3 = read_c0_config3();
2204 
2205 		if (IS_ENABLED(CONFIG_CPU_MICROMIPS))
2206 			write_c0_config3(config3 | MIPS_CONF3_ISA_OE);
2207 		else
2208 			write_c0_config3(config3 & ~MIPS_CONF3_ISA_OE);
2209 	}
2210 
2211 	if (board_ebase_setup)
2212 		board_ebase_setup();
2213 	per_cpu_trap_init(true);
2214 
2215 	/*
2216 	 * Copy the generic exception handlers to their final destination.
2217 	 * This will be overriden later as suitable for a particular
2218 	 * configuration.
2219 	 */
2220 	set_handler(0x180, &except_vec3_generic, 0x80);
2221 
2222 	/*
2223 	 * Setup default vectors
2224 	 */
2225 	for (i = 0; i <= 31; i++)
2226 		set_except_vector(i, handle_reserved);
2227 
2228 	/*
2229 	 * Copy the EJTAG debug exception vector handler code to it's final
2230 	 * destination.
2231 	 */
2232 	if (cpu_has_ejtag && board_ejtag_handler_setup)
2233 		board_ejtag_handler_setup();
2234 
2235 	/*
2236 	 * Only some CPUs have the watch exceptions.
2237 	 */
2238 	if (cpu_has_watch)
2239 		set_except_vector(EXCCODE_WATCH, handle_watch);
2240 
2241 	/*
2242 	 * Initialise interrupt handlers
2243 	 */
2244 	if (cpu_has_veic || cpu_has_vint) {
2245 		int nvec = cpu_has_veic ? 64 : 8;
2246 		for (i = 0; i < nvec; i++)
2247 			set_vi_handler(i, NULL);
2248 	}
2249 	else if (cpu_has_divec)
2250 		set_handler(0x200, &except_vec4, 0x8);
2251 
2252 	/*
2253 	 * Some CPUs can enable/disable for cache parity detection, but does
2254 	 * it different ways.
2255 	 */
2256 	parity_protection_init();
2257 
2258 	/*
2259 	 * The Data Bus Errors / Instruction Bus Errors are signaled
2260 	 * by external hardware.  Therefore these two exceptions
2261 	 * may have board specific handlers.
2262 	 */
2263 	if (board_be_init)
2264 		board_be_init();
2265 
2266 	set_except_vector(EXCCODE_INT, using_rollback_handler() ?
2267 					rollback_handle_int : handle_int);
2268 	set_except_vector(EXCCODE_MOD, handle_tlbm);
2269 	set_except_vector(EXCCODE_TLBL, handle_tlbl);
2270 	set_except_vector(EXCCODE_TLBS, handle_tlbs);
2271 
2272 	set_except_vector(EXCCODE_ADEL, handle_adel);
2273 	set_except_vector(EXCCODE_ADES, handle_ades);
2274 
2275 	set_except_vector(EXCCODE_IBE, handle_ibe);
2276 	set_except_vector(EXCCODE_DBE, handle_dbe);
2277 
2278 	set_except_vector(EXCCODE_SYS, handle_sys);
2279 	set_except_vector(EXCCODE_BP, handle_bp);
2280 	set_except_vector(EXCCODE_RI, rdhwr_noopt ? handle_ri :
2281 			  (cpu_has_vtag_icache ?
2282 			   handle_ri_rdhwr_vivt : handle_ri_rdhwr));
2283 	set_except_vector(EXCCODE_CPU, handle_cpu);
2284 	set_except_vector(EXCCODE_OV, handle_ov);
2285 	set_except_vector(EXCCODE_TR, handle_tr);
2286 	set_except_vector(EXCCODE_MSAFPE, handle_msa_fpe);
2287 
2288 	if (current_cpu_type() == CPU_R6000 ||
2289 	    current_cpu_type() == CPU_R6000A) {
2290 		/*
2291 		 * The R6000 is the only R-series CPU that features a machine
2292 		 * check exception (similar to the R4000 cache error) and
2293 		 * unaligned ldc1/sdc1 exception.  The handlers have not been
2294 		 * written yet.	 Well, anyway there is no R6000 machine on the
2295 		 * current list of targets for Linux/MIPS.
2296 		 * (Duh, crap, there is someone with a triple R6k machine)
2297 		 */
2298 		//set_except_vector(14, handle_mc);
2299 		//set_except_vector(15, handle_ndc);
2300 	}
2301 
2302 
2303 	if (board_nmi_handler_setup)
2304 		board_nmi_handler_setup();
2305 
2306 	if (cpu_has_fpu && !cpu_has_nofpuex)
2307 		set_except_vector(EXCCODE_FPE, handle_fpe);
2308 
2309 	set_except_vector(MIPS_EXCCODE_TLBPAR, handle_ftlb);
2310 
2311 	if (cpu_has_rixiex) {
2312 		set_except_vector(EXCCODE_TLBRI, tlb_do_page_fault_0);
2313 		set_except_vector(EXCCODE_TLBXI, tlb_do_page_fault_0);
2314 	}
2315 
2316 	set_except_vector(EXCCODE_MSADIS, handle_msa);
2317 	set_except_vector(EXCCODE_MDMX, handle_mdmx);
2318 
2319 	if (cpu_has_mcheck)
2320 		set_except_vector(EXCCODE_MCHECK, handle_mcheck);
2321 
2322 	if (cpu_has_mipsmt)
2323 		set_except_vector(EXCCODE_THREAD, handle_mt);
2324 
2325 	set_except_vector(EXCCODE_DSPDIS, handle_dsp);
2326 
2327 	if (board_cache_error_setup)
2328 		board_cache_error_setup();
2329 
2330 	if (cpu_has_vce)
2331 		/* Special exception: R4[04]00 uses also the divec space. */
2332 		set_handler(0x180, &except_vec3_r4000, 0x100);
2333 	else if (cpu_has_4kex)
2334 		set_handler(0x180, &except_vec3_generic, 0x80);
2335 	else
2336 		set_handler(0x080, &except_vec3_generic, 0x80);
2337 
2338 	local_flush_icache_range(ebase, ebase + 0x400);
2339 
2340 	sort_extable(__start___dbe_table, __stop___dbe_table);
2341 
2342 	cu2_notifier(default_cu2_call, 0x80000000);	/* Run last  */
2343 }
2344 
2345 static int trap_pm_notifier(struct notifier_block *self, unsigned long cmd,
2346 			    void *v)
2347 {
2348 	switch (cmd) {
2349 	case CPU_PM_ENTER_FAILED:
2350 	case CPU_PM_EXIT:
2351 		configure_status();
2352 		configure_hwrena();
2353 		configure_exception_vector();
2354 
2355 		/* Restore register with CPU number for TLB handlers */
2356 		TLBMISS_HANDLER_RESTORE();
2357 
2358 		break;
2359 	}
2360 
2361 	return NOTIFY_OK;
2362 }
2363 
2364 static struct notifier_block trap_pm_notifier_block = {
2365 	.notifier_call = trap_pm_notifier,
2366 };
2367 
2368 static int __init trap_pm_init(void)
2369 {
2370 	return cpu_pm_register_notifier(&trap_pm_notifier_block);
2371 }
2372 arch_initcall(trap_pm_init);
2373