xref: /openbmc/linux/arch/mips/kernel/traps.c (revision 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1994 - 1999, 2000, 01 Ralf Baechle
7  * Copyright (C) 1995, 1996 Paul M. Antoine
8  * Copyright (C) 1998 Ulf Carlsson
9  * Copyright (C) 1999 Silicon Graphics, Inc.
10  * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
11  * Copyright (C) 2000, 01 MIPS Technologies, Inc.
12  * Copyright (C) 2002, 2003, 2004  Maciej W. Rozycki
13  */
14 #include <linux/config.h>
15 #include <linux/init.h>
16 #include <linux/mm.h>
17 #include <linux/module.h>
18 #include <linux/sched.h>
19 #include <linux/smp.h>
20 #include <linux/smp_lock.h>
21 #include <linux/spinlock.h>
22 #include <linux/kallsyms.h>
23 
24 #include <asm/bootinfo.h>
25 #include <asm/branch.h>
26 #include <asm/break.h>
27 #include <asm/cpu.h>
28 #include <asm/fpu.h>
29 #include <asm/module.h>
30 #include <asm/pgtable.h>
31 #include <asm/ptrace.h>
32 #include <asm/sections.h>
33 #include <asm/system.h>
34 #include <asm/tlbdebug.h>
35 #include <asm/traps.h>
36 #include <asm/uaccess.h>
37 #include <asm/mmu_context.h>
38 #include <asm/watch.h>
39 #include <asm/types.h>
40 
41 extern asmlinkage void handle_tlbm(void);
42 extern asmlinkage void handle_tlbl(void);
43 extern asmlinkage void handle_tlbs(void);
44 extern asmlinkage void handle_adel(void);
45 extern asmlinkage void handle_ades(void);
46 extern asmlinkage void handle_ibe(void);
47 extern asmlinkage void handle_dbe(void);
48 extern asmlinkage void handle_sys(void);
49 extern asmlinkage void handle_bp(void);
50 extern asmlinkage void handle_ri(void);
51 extern asmlinkage void handle_cpu(void);
52 extern asmlinkage void handle_ov(void);
53 extern asmlinkage void handle_tr(void);
54 extern asmlinkage void handle_fpe(void);
55 extern asmlinkage void handle_mdmx(void);
56 extern asmlinkage void handle_watch(void);
57 extern asmlinkage void handle_mcheck(void);
58 extern asmlinkage void handle_reserved(void);
59 
60 extern int fpu_emulator_cop1Handler(int xcptno, struct pt_regs *xcp,
61 	struct mips_fpu_soft_struct *ctx);
62 
63 void (*board_be_init)(void);
64 int (*board_be_handler)(struct pt_regs *regs, int is_fixup);
65 
66 /*
67  * These constant is for searching for possible module text segments.
68  * MODULE_RANGE is a guess of how much space is likely to be vmalloced.
69  */
70 #define MODULE_RANGE (8*1024*1024)
71 
72 /*
73  * This routine abuses get_user()/put_user() to reference pointers
74  * with at least a bit of error checking ...
75  */
76 void show_stack(struct task_struct *task, unsigned long *sp)
77 {
78 	const int field = 2 * sizeof(unsigned long);
79 	long stackdata;
80 	int i;
81 
82 	if (!sp) {
83 		if (task && task != current)
84 			sp = (unsigned long *) task->thread.reg29;
85 		else
86 			sp = (unsigned long *) &sp;
87 	}
88 
89 	printk("Stack :");
90 	i = 0;
91 	while ((unsigned long) sp & (PAGE_SIZE - 1)) {
92 		if (i && ((i % (64 / field)) == 0))
93 			printk("\n       ");
94 		if (i > 39) {
95 			printk(" ...");
96 			break;
97 		}
98 
99 		if (__get_user(stackdata, sp++)) {
100 			printk(" (Bad stack address)");
101 			break;
102 		}
103 
104 		printk(" %0*lx", field, stackdata);
105 		i++;
106 	}
107 	printk("\n");
108 }
109 
110 void show_trace(struct task_struct *task, unsigned long *stack)
111 {
112 	const int field = 2 * sizeof(unsigned long);
113 	unsigned long addr;
114 
115 	if (!stack) {
116 		if (task && task != current)
117 			stack = (unsigned long *) task->thread.reg29;
118 		else
119 			stack = (unsigned long *) &stack;
120 	}
121 
122 	printk("Call Trace:");
123 #ifdef CONFIG_KALLSYMS
124 	printk("\n");
125 #endif
126 	while (!kstack_end(stack)) {
127 		addr = *stack++;
128 		if (__kernel_text_address(addr)) {
129 			printk(" [<%0*lx>] ", field, addr);
130 			print_symbol("%s\n", addr);
131 		}
132 	}
133 	printk("\n");
134 }
135 
136 /*
137  * The architecture-independent dump_stack generator
138  */
139 void dump_stack(void)
140 {
141 	unsigned long stack;
142 
143 	show_trace(current, &stack);
144 }
145 
146 EXPORT_SYMBOL(dump_stack);
147 
148 void show_code(unsigned int *pc)
149 {
150 	long i;
151 
152 	printk("\nCode:");
153 
154 	for(i = -3 ; i < 6 ; i++) {
155 		unsigned int insn;
156 		if (__get_user(insn, pc + i)) {
157 			printk(" (Bad address in epc)\n");
158 			break;
159 		}
160 		printk("%c%08x%c", (i?' ':'<'), insn, (i?' ':'>'));
161 	}
162 }
163 
164 void show_regs(struct pt_regs *regs)
165 {
166 	const int field = 2 * sizeof(unsigned long);
167 	unsigned int cause = regs->cp0_cause;
168 	int i;
169 
170 	printk("Cpu %d\n", smp_processor_id());
171 
172 	/*
173 	 * Saved main processor registers
174 	 */
175 	for (i = 0; i < 32; ) {
176 		if ((i % 4) == 0)
177 			printk("$%2d   :", i);
178 		if (i == 0)
179 			printk(" %0*lx", field, 0UL);
180 		else if (i == 26 || i == 27)
181 			printk(" %*s", field, "");
182 		else
183 			printk(" %0*lx", field, regs->regs[i]);
184 
185 		i++;
186 		if ((i % 4) == 0)
187 			printk("\n");
188 	}
189 
190 	printk("Hi    : %0*lx\n", field, regs->hi);
191 	printk("Lo    : %0*lx\n", field, regs->lo);
192 
193 	/*
194 	 * Saved cp0 registers
195 	 */
196 	printk("epc   : %0*lx ", field, regs->cp0_epc);
197 	print_symbol("%s ", regs->cp0_epc);
198 	printk("    %s\n", print_tainted());
199 	printk("ra    : %0*lx ", field, regs->regs[31]);
200 	print_symbol("%s\n", regs->regs[31]);
201 
202 	printk("Status: %08x    ", (uint32_t) regs->cp0_status);
203 
204 	if (regs->cp0_status & ST0_KX)
205 		printk("KX ");
206 	if (regs->cp0_status & ST0_SX)
207 		printk("SX ");
208 	if (regs->cp0_status & ST0_UX)
209 		printk("UX ");
210 	switch (regs->cp0_status & ST0_KSU) {
211 	case KSU_USER:
212 		printk("USER ");
213 		break;
214 	case KSU_SUPERVISOR:
215 		printk("SUPERVISOR ");
216 		break;
217 	case KSU_KERNEL:
218 		printk("KERNEL ");
219 		break;
220 	default:
221 		printk("BAD_MODE ");
222 		break;
223 	}
224 	if (regs->cp0_status & ST0_ERL)
225 		printk("ERL ");
226 	if (regs->cp0_status & ST0_EXL)
227 		printk("EXL ");
228 	if (regs->cp0_status & ST0_IE)
229 		printk("IE ");
230 	printk("\n");
231 
232 	printk("Cause : %08x\n", cause);
233 
234 	cause = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE;
235 	if (1 <= cause && cause <= 5)
236 		printk("BadVA : %0*lx\n", field, regs->cp0_badvaddr);
237 
238 	printk("PrId  : %08x\n", read_c0_prid());
239 }
240 
241 void show_registers(struct pt_regs *regs)
242 {
243 	show_regs(regs);
244 	print_modules();
245 	printk("Process %s (pid: %d, threadinfo=%p, task=%p)\n",
246 	        current->comm, current->pid, current_thread_info(), current);
247 	show_stack(current, (long *) regs->regs[29]);
248 	show_trace(current, (long *) regs->regs[29]);
249 	show_code((unsigned int *) regs->cp0_epc);
250 	printk("\n");
251 }
252 
253 static DEFINE_SPINLOCK(die_lock);
254 
255 NORET_TYPE void __die(const char * str, struct pt_regs * regs,
256 	const char * file, const char * func, unsigned long line)
257 {
258 	static int die_counter;
259 
260 	console_verbose();
261 	spin_lock_irq(&die_lock);
262 	printk("%s", str);
263 	if (file && func)
264 		printk(" in %s:%s, line %ld", file, func, line);
265 	printk("[#%d]:\n", ++die_counter);
266 	show_registers(regs);
267 	spin_unlock_irq(&die_lock);
268 	do_exit(SIGSEGV);
269 }
270 
271 void __die_if_kernel(const char * str, struct pt_regs * regs,
272 		     const char * file, const char * func, unsigned long line)
273 {
274 	if (!user_mode(regs))
275 		__die(str, regs, file, func, line);
276 }
277 
278 extern const struct exception_table_entry __start___dbe_table[];
279 extern const struct exception_table_entry __stop___dbe_table[];
280 
281 void __declare_dbe_table(void)
282 {
283 	__asm__ __volatile__(
284 	".section\t__dbe_table,\"a\"\n\t"
285 	".previous"
286 	);
287 }
288 
289 /* Given an address, look for it in the exception tables. */
290 static const struct exception_table_entry *search_dbe_tables(unsigned long addr)
291 {
292 	const struct exception_table_entry *e;
293 
294 	e = search_extable(__start___dbe_table, __stop___dbe_table - 1, addr);
295 	if (!e)
296 		e = search_module_dbetables(addr);
297 	return e;
298 }
299 
300 asmlinkage void do_be(struct pt_regs *regs)
301 {
302 	const int field = 2 * sizeof(unsigned long);
303 	const struct exception_table_entry *fixup = NULL;
304 	int data = regs->cp0_cause & 4;
305 	int action = MIPS_BE_FATAL;
306 
307 	/* XXX For now.  Fixme, this searches the wrong table ...  */
308 	if (data && !user_mode(regs))
309 		fixup = search_dbe_tables(exception_epc(regs));
310 
311 	if (fixup)
312 		action = MIPS_BE_FIXUP;
313 
314 	if (board_be_handler)
315 		action = board_be_handler(regs, fixup != 0);
316 
317 	switch (action) {
318 	case MIPS_BE_DISCARD:
319 		return;
320 	case MIPS_BE_FIXUP:
321 		if (fixup) {
322 			regs->cp0_epc = fixup->nextinsn;
323 			return;
324 		}
325 		break;
326 	default:
327 		break;
328 	}
329 
330 	/*
331 	 * Assume it would be too dangerous to continue ...
332 	 */
333 	printk(KERN_ALERT "%s bus error, epc == %0*lx, ra == %0*lx\n",
334 	       data ? "Data" : "Instruction",
335 	       field, regs->cp0_epc, field, regs->regs[31]);
336 	die_if_kernel("Oops", regs);
337 	force_sig(SIGBUS, current);
338 }
339 
340 static inline int get_insn_opcode(struct pt_regs *regs, unsigned int *opcode)
341 {
342 	unsigned int *epc;
343 
344 	epc = (unsigned int *) regs->cp0_epc +
345 	      ((regs->cp0_cause & CAUSEF_BD) != 0);
346 	if (!get_user(*opcode, epc))
347 		return 0;
348 
349 	force_sig(SIGSEGV, current);
350 	return 1;
351 }
352 
353 /*
354  * ll/sc emulation
355  */
356 
357 #define OPCODE 0xfc000000
358 #define BASE   0x03e00000
359 #define RT     0x001f0000
360 #define OFFSET 0x0000ffff
361 #define LL     0xc0000000
362 #define SC     0xe0000000
363 
364 /*
365  * The ll_bit is cleared by r*_switch.S
366  */
367 
368 unsigned long ll_bit;
369 
370 static struct task_struct *ll_task = NULL;
371 
372 static inline void simulate_ll(struct pt_regs *regs, unsigned int opcode)
373 {
374 	unsigned long value, *vaddr;
375 	long offset;
376 	int signal = 0;
377 
378 	/*
379 	 * analyse the ll instruction that just caused a ri exception
380 	 * and put the referenced address to addr.
381 	 */
382 
383 	/* sign extend offset */
384 	offset = opcode & OFFSET;
385 	offset <<= 16;
386 	offset >>= 16;
387 
388 	vaddr = (unsigned long *)((long)(regs->regs[(opcode & BASE) >> 21]) + offset);
389 
390 	if ((unsigned long)vaddr & 3) {
391 		signal = SIGBUS;
392 		goto sig;
393 	}
394 	if (get_user(value, vaddr)) {
395 		signal = SIGSEGV;
396 		goto sig;
397 	}
398 
399 	preempt_disable();
400 
401 	if (ll_task == NULL || ll_task == current) {
402 		ll_bit = 1;
403 	} else {
404 		ll_bit = 0;
405 	}
406 	ll_task = current;
407 
408 	preempt_enable();
409 
410 	regs->regs[(opcode & RT) >> 16] = value;
411 
412 	compute_return_epc(regs);
413 	return;
414 
415 sig:
416 	force_sig(signal, current);
417 }
418 
419 static inline void simulate_sc(struct pt_regs *regs, unsigned int opcode)
420 {
421 	unsigned long *vaddr, reg;
422 	long offset;
423 	int signal = 0;
424 
425 	/*
426 	 * analyse the sc instruction that just caused a ri exception
427 	 * and put the referenced address to addr.
428 	 */
429 
430 	/* sign extend offset */
431 	offset = opcode & OFFSET;
432 	offset <<= 16;
433 	offset >>= 16;
434 
435 	vaddr = (unsigned long *)((long)(regs->regs[(opcode & BASE) >> 21]) + offset);
436 	reg = (opcode & RT) >> 16;
437 
438 	if ((unsigned long)vaddr & 3) {
439 		signal = SIGBUS;
440 		goto sig;
441 	}
442 
443 	preempt_disable();
444 
445 	if (ll_bit == 0 || ll_task != current) {
446 		regs->regs[reg] = 0;
447 		preempt_enable();
448 		compute_return_epc(regs);
449 		return;
450 	}
451 
452 	preempt_enable();
453 
454 	if (put_user(regs->regs[reg], vaddr)) {
455 		signal = SIGSEGV;
456 		goto sig;
457 	}
458 
459 	regs->regs[reg] = 1;
460 
461 	compute_return_epc(regs);
462 	return;
463 
464 sig:
465 	force_sig(signal, current);
466 }
467 
468 /*
469  * ll uses the opcode of lwc0 and sc uses the opcode of swc0.  That is both
470  * opcodes are supposed to result in coprocessor unusable exceptions if
471  * executed on ll/sc-less processors.  That's the theory.  In practice a
472  * few processors such as NEC's VR4100 throw reserved instruction exceptions
473  * instead, so we're doing the emulation thing in both exception handlers.
474  */
475 static inline int simulate_llsc(struct pt_regs *regs)
476 {
477 	unsigned int opcode;
478 
479 	if (unlikely(get_insn_opcode(regs, &opcode)))
480 		return -EFAULT;
481 
482 	if ((opcode & OPCODE) == LL) {
483 		simulate_ll(regs, opcode);
484 		return 0;
485 	}
486 	if ((opcode & OPCODE) == SC) {
487 		simulate_sc(regs, opcode);
488 		return 0;
489 	}
490 
491 	return -EFAULT;			/* Strange things going on ... */
492 }
493 
494 asmlinkage void do_ov(struct pt_regs *regs)
495 {
496 	siginfo_t info;
497 
498 	info.si_code = FPE_INTOVF;
499 	info.si_signo = SIGFPE;
500 	info.si_errno = 0;
501 	info.si_addr = (void *)regs->cp0_epc;
502 	force_sig_info(SIGFPE, &info, current);
503 }
504 
505 /*
506  * XXX Delayed fp exceptions when doing a lazy ctx switch XXX
507  */
508 asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
509 {
510 	if (fcr31 & FPU_CSR_UNI_X) {
511 		int sig;
512 
513 		preempt_disable();
514 
515 		/*
516 	 	 * Unimplemented operation exception.  If we've got the full
517 		 * software emulator on-board, let's use it...
518 		 *
519 		 * Force FPU to dump state into task/thread context.  We're
520 		 * moving a lot of data here for what is probably a single
521 		 * instruction, but the alternative is to pre-decode the FP
522 		 * register operands before invoking the emulator, which seems
523 		 * a bit extreme for what should be an infrequent event.
524 		 */
525 		save_fp(current);
526 
527 		/* Run the emulator */
528 		sig = fpu_emulator_cop1Handler (0, regs,
529 			&current->thread.fpu.soft);
530 
531 		/*
532 		 * We can't allow the emulated instruction to leave any of
533 		 * the cause bit set in $fcr31.
534 		 */
535 		current->thread.fpu.soft.fcr31 &= ~FPU_CSR_ALL_X;
536 
537 		/* Restore the hardware register state */
538 		restore_fp(current);
539 
540 		preempt_enable();
541 
542 		/* If something went wrong, signal */
543 		if (sig)
544 			force_sig(sig, current);
545 
546 		return;
547 	}
548 
549 	force_sig(SIGFPE, current);
550 }
551 
552 asmlinkage void do_bp(struct pt_regs *regs)
553 {
554 	unsigned int opcode, bcode;
555 	siginfo_t info;
556 
557 	die_if_kernel("Break instruction in kernel code", regs);
558 
559 	if (get_insn_opcode(regs, &opcode))
560 		return;
561 
562 	/*
563 	 * There is the ancient bug in the MIPS assemblers that the break
564 	 * code starts left to bit 16 instead to bit 6 in the opcode.
565 	 * Gas is bug-compatible, but not always, grrr...
566 	 * We handle both cases with a simple heuristics.  --macro
567 	 */
568 	bcode = ((opcode >> 6) & ((1 << 20) - 1));
569 	if (bcode < (1 << 10))
570 		bcode <<= 10;
571 
572 	/*
573 	 * (A short test says that IRIX 5.3 sends SIGTRAP for all break
574 	 * insns, even for break codes that indicate arithmetic failures.
575 	 * Weird ...)
576 	 * But should we continue the brokenness???  --macro
577 	 */
578 	switch (bcode) {
579 	case BRK_OVERFLOW << 10:
580 	case BRK_DIVZERO << 10:
581 		if (bcode == (BRK_DIVZERO << 10))
582 			info.si_code = FPE_INTDIV;
583 		else
584 			info.si_code = FPE_INTOVF;
585 		info.si_signo = SIGFPE;
586 		info.si_errno = 0;
587 		info.si_addr = (void *)regs->cp0_epc;
588 		force_sig_info(SIGFPE, &info, current);
589 		break;
590 	default:
591 		force_sig(SIGTRAP, current);
592 	}
593 }
594 
595 asmlinkage void do_tr(struct pt_regs *regs)
596 {
597 	unsigned int opcode, tcode = 0;
598 	siginfo_t info;
599 
600 	die_if_kernel("Trap instruction in kernel code", regs);
601 
602 	if (get_insn_opcode(regs, &opcode))
603 		return;
604 
605 	/* Immediate versions don't provide a code.  */
606 	if (!(opcode & OPCODE))
607 		tcode = ((opcode >> 6) & ((1 << 10) - 1));
608 
609 	/*
610 	 * (A short test says that IRIX 5.3 sends SIGTRAP for all trap
611 	 * insns, even for trap codes that indicate arithmetic failures.
612 	 * Weird ...)
613 	 * But should we continue the brokenness???  --macro
614 	 */
615 	switch (tcode) {
616 	case BRK_OVERFLOW:
617 	case BRK_DIVZERO:
618 		if (tcode == BRK_DIVZERO)
619 			info.si_code = FPE_INTDIV;
620 		else
621 			info.si_code = FPE_INTOVF;
622 		info.si_signo = SIGFPE;
623 		info.si_errno = 0;
624 		info.si_addr = (void *)regs->cp0_epc;
625 		force_sig_info(SIGFPE, &info, current);
626 		break;
627 	default:
628 		force_sig(SIGTRAP, current);
629 	}
630 }
631 
632 asmlinkage void do_ri(struct pt_regs *regs)
633 {
634 	die_if_kernel("Reserved instruction in kernel code", regs);
635 
636 	if (!cpu_has_llsc)
637 		if (!simulate_llsc(regs))
638 			return;
639 
640 	force_sig(SIGILL, current);
641 }
642 
643 asmlinkage void do_cpu(struct pt_regs *regs)
644 {
645 	unsigned int cpid;
646 
647 	die_if_kernel("do_cpu invoked from kernel context!", regs);
648 
649 	cpid = (regs->cp0_cause >> CAUSEB_CE) & 3;
650 
651 	switch (cpid) {
652 	case 0:
653 		if (cpu_has_llsc)
654 			break;
655 
656 		if (!simulate_llsc(regs))
657 			return;
658 		break;
659 
660 	case 1:
661 		preempt_disable();
662 
663 		own_fpu();
664 		if (used_math()) {	/* Using the FPU again.  */
665 			restore_fp(current);
666 		} else {			/* First time FPU user.  */
667 			init_fpu();
668 			set_used_math();
669 		}
670 
671 		if (!cpu_has_fpu) {
672 			int sig = fpu_emulator_cop1Handler(0, regs,
673 						&current->thread.fpu.soft);
674 			if (sig)
675 				force_sig(sig, current);
676 		}
677 
678 		preempt_enable();
679 
680 		return;
681 
682 	case 2:
683 	case 3:
684 		break;
685 	}
686 
687 	force_sig(SIGILL, current);
688 }
689 
690 asmlinkage void do_mdmx(struct pt_regs *regs)
691 {
692 	force_sig(SIGILL, current);
693 }
694 
695 asmlinkage void do_watch(struct pt_regs *regs)
696 {
697 	/*
698 	 * We use the watch exception where available to detect stack
699 	 * overflows.
700 	 */
701 	dump_tlb_all();
702 	show_regs(regs);
703 	panic("Caught WATCH exception - probably caused by stack overflow.");
704 }
705 
706 asmlinkage void do_mcheck(struct pt_regs *regs)
707 {
708 	show_regs(regs);
709 	dump_tlb_all();
710 	/*
711 	 * Some chips may have other causes of machine check (e.g. SB1
712 	 * graduation timer)
713 	 */
714 	panic("Caught Machine Check exception - %scaused by multiple "
715 	      "matching entries in the TLB.",
716 	      (regs->cp0_status & ST0_TS) ? "" : "not ");
717 }
718 
719 asmlinkage void do_reserved(struct pt_regs *regs)
720 {
721 	/*
722 	 * Game over - no way to handle this if it ever occurs.  Most probably
723 	 * caused by a new unknown cpu type or after another deadly
724 	 * hard/software error.
725 	 */
726 	show_regs(regs);
727 	panic("Caught reserved exception %ld - should not happen.",
728 	      (regs->cp0_cause & 0x7f) >> 2);
729 }
730 
731 /*
732  * Some MIPS CPUs can enable/disable for cache parity detection, but do
733  * it different ways.
734  */
735 static inline void parity_protection_init(void)
736 {
737 	switch (current_cpu_data.cputype) {
738 	case CPU_24K:
739 		/* 24K cache parity not currently implemented in FPGA */
740 		printk(KERN_INFO "Disable cache parity protection for "
741 		       "MIPS 24K CPU.\n");
742 		write_c0_ecc(read_c0_ecc() & ~0x80000000);
743 		break;
744 	case CPU_5KC:
745 		/* Set the PE bit (bit 31) in the c0_ecc register. */
746 		printk(KERN_INFO "Enable cache parity protection for "
747 		       "MIPS 5KC/24K CPUs.\n");
748 		write_c0_ecc(read_c0_ecc() | 0x80000000);
749 		break;
750 	case CPU_20KC:
751 	case CPU_25KF:
752 		/* Clear the DE bit (bit 16) in the c0_status register. */
753 		printk(KERN_INFO "Enable cache parity protection for "
754 		       "MIPS 20KC/25KF CPUs.\n");
755 		clear_c0_status(ST0_DE);
756 		break;
757 	default:
758 		break;
759 	}
760 }
761 
762 asmlinkage void cache_parity_error(void)
763 {
764 	const int field = 2 * sizeof(unsigned long);
765 	unsigned int reg_val;
766 
767 	/* For the moment, report the problem and hang. */
768 	printk("Cache error exception:\n");
769 	printk("cp0_errorepc == %0*lx\n", field, read_c0_errorepc());
770 	reg_val = read_c0_cacheerr();
771 	printk("c0_cacheerr == %08x\n", reg_val);
772 
773 	printk("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
774 	       reg_val & (1<<30) ? "secondary" : "primary",
775 	       reg_val & (1<<31) ? "data" : "insn");
776 	printk("Error bits: %s%s%s%s%s%s%s\n",
777 	       reg_val & (1<<29) ? "ED " : "",
778 	       reg_val & (1<<28) ? "ET " : "",
779 	       reg_val & (1<<26) ? "EE " : "",
780 	       reg_val & (1<<25) ? "EB " : "",
781 	       reg_val & (1<<24) ? "EI " : "",
782 	       reg_val & (1<<23) ? "E1 " : "",
783 	       reg_val & (1<<22) ? "E0 " : "");
784 	printk("IDX: 0x%08x\n", reg_val & ((1<<22)-1));
785 
786 #if defined(CONFIG_CPU_MIPS32) || defined (CONFIG_CPU_MIPS64)
787 	if (reg_val & (1<<22))
788 		printk("DErrAddr0: 0x%0*lx\n", field, read_c0_derraddr0());
789 
790 	if (reg_val & (1<<23))
791 		printk("DErrAddr1: 0x%0*lx\n", field, read_c0_derraddr1());
792 #endif
793 
794 	panic("Can't handle the cache error!");
795 }
796 
797 /*
798  * SDBBP EJTAG debug exception handler.
799  * We skip the instruction and return to the next instruction.
800  */
801 void ejtag_exception_handler(struct pt_regs *regs)
802 {
803 	const int field = 2 * sizeof(unsigned long);
804 	unsigned long depc, old_epc;
805 	unsigned int debug;
806 
807 	printk("SDBBP EJTAG debug exception - not handled yet, just ignored!\n");
808 	depc = read_c0_depc();
809 	debug = read_c0_debug();
810 	printk("c0_depc = %0*lx, DEBUG = %08x\n", field, depc, debug);
811 	if (debug & 0x80000000) {
812 		/*
813 		 * In branch delay slot.
814 		 * We cheat a little bit here and use EPC to calculate the
815 		 * debug return address (DEPC). EPC is restored after the
816 		 * calculation.
817 		 */
818 		old_epc = regs->cp0_epc;
819 		regs->cp0_epc = depc;
820 		__compute_return_epc(regs);
821 		depc = regs->cp0_epc;
822 		regs->cp0_epc = old_epc;
823 	} else
824 		depc += 4;
825 	write_c0_depc(depc);
826 
827 #if 0
828 	printk("\n\n----- Enable EJTAG single stepping ----\n\n");
829 	write_c0_debug(debug | 0x100);
830 #endif
831 }
832 
833 /*
834  * NMI exception handler.
835  */
836 void nmi_exception_handler(struct pt_regs *regs)
837 {
838 	printk("NMI taken!!!!\n");
839 	die("NMI", regs);
840 	while(1) ;
841 }
842 
843 unsigned long exception_handlers[32];
844 
845 /*
846  * As a side effect of the way this is implemented we're limited
847  * to interrupt handlers in the address range from
848  * KSEG0 <= x < KSEG0 + 256mb on the Nevada.  Oh well ...
849  */
850 void *set_except_vector(int n, void *addr)
851 {
852 	unsigned long handler = (unsigned long) addr;
853 	unsigned long old_handler = exception_handlers[n];
854 
855 	exception_handlers[n] = handler;
856 	if (n == 0 && cpu_has_divec) {
857 		*(volatile u32 *)(CAC_BASE + 0x200) = 0x08000000 |
858 		                                 (0x03ffffff & (handler >> 2));
859 		flush_icache_range(CAC_BASE + 0x200, CAC_BASE + 0x204);
860 	}
861 	return (void *)old_handler;
862 }
863 
864 /*
865  * This is used by native signal handling
866  */
867 asmlinkage int (*save_fp_context)(struct sigcontext *sc);
868 asmlinkage int (*restore_fp_context)(struct sigcontext *sc);
869 
870 extern asmlinkage int _save_fp_context(struct sigcontext *sc);
871 extern asmlinkage int _restore_fp_context(struct sigcontext *sc);
872 
873 extern asmlinkage int fpu_emulator_save_context(struct sigcontext *sc);
874 extern asmlinkage int fpu_emulator_restore_context(struct sigcontext *sc);
875 
876 static inline void signal_init(void)
877 {
878 	if (cpu_has_fpu) {
879 		save_fp_context = _save_fp_context;
880 		restore_fp_context = _restore_fp_context;
881 	} else {
882 		save_fp_context = fpu_emulator_save_context;
883 		restore_fp_context = fpu_emulator_restore_context;
884 	}
885 }
886 
887 #ifdef CONFIG_MIPS32_COMPAT
888 
889 /*
890  * This is used by 32-bit signal stuff on the 64-bit kernel
891  */
892 asmlinkage int (*save_fp_context32)(struct sigcontext32 *sc);
893 asmlinkage int (*restore_fp_context32)(struct sigcontext32 *sc);
894 
895 extern asmlinkage int _save_fp_context32(struct sigcontext32 *sc);
896 extern asmlinkage int _restore_fp_context32(struct sigcontext32 *sc);
897 
898 extern asmlinkage int fpu_emulator_save_context32(struct sigcontext32 *sc);
899 extern asmlinkage int fpu_emulator_restore_context32(struct sigcontext32 *sc);
900 
901 static inline void signal32_init(void)
902 {
903 	if (cpu_has_fpu) {
904 		save_fp_context32 = _save_fp_context32;
905 		restore_fp_context32 = _restore_fp_context32;
906 	} else {
907 		save_fp_context32 = fpu_emulator_save_context32;
908 		restore_fp_context32 = fpu_emulator_restore_context32;
909 	}
910 }
911 #endif
912 
913 extern void cpu_cache_init(void);
914 extern void tlb_init(void);
915 
916 void __init per_cpu_trap_init(void)
917 {
918 	unsigned int cpu = smp_processor_id();
919 	unsigned int status_set = ST0_CU0;
920 
921 	/*
922 	 * Disable coprocessors and select 32-bit or 64-bit addressing
923 	 * and the 16/32 or 32/32 FPR register model.  Reset the BEV
924 	 * flag that some firmware may have left set and the TS bit (for
925 	 * IP27).  Set XX for ISA IV code to work.
926 	 */
927 #ifdef CONFIG_MIPS64
928 	status_set |= ST0_FR|ST0_KX|ST0_SX|ST0_UX;
929 #endif
930 	if (current_cpu_data.isa_level == MIPS_CPU_ISA_IV)
931 		status_set |= ST0_XX;
932 	change_c0_status(ST0_CU|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX,
933 			 status_set);
934 
935 	/*
936 	 * Some MIPS CPUs have a dedicated interrupt vector which reduces the
937 	 * interrupt processing overhead.  Use it where available.
938 	 */
939 	if (cpu_has_divec)
940 		set_c0_cause(CAUSEF_IV);
941 
942 	cpu_data[cpu].asid_cache = ASID_FIRST_VERSION;
943 	TLBMISS_HANDLER_SETUP();
944 
945 	atomic_inc(&init_mm.mm_count);
946 	current->active_mm = &init_mm;
947 	BUG_ON(current->mm);
948 	enter_lazy_tlb(&init_mm, current);
949 
950 	cpu_cache_init();
951 	tlb_init();
952 }
953 
954 void __init trap_init(void)
955 {
956 	extern char except_vec3_generic, except_vec3_r4000;
957 	extern char except_vec_ejtag_debug;
958 	extern char except_vec4;
959 	unsigned long i;
960 
961 	per_cpu_trap_init();
962 
963 	/*
964 	 * Copy the generic exception handlers to their final destination.
965 	 * This will be overriden later as suitable for a particular
966 	 * configuration.
967 	 */
968 	memcpy((void *)(CAC_BASE + 0x180), &except_vec3_generic, 0x80);
969 
970 	/*
971 	 * Setup default vectors
972 	 */
973 	for (i = 0; i <= 31; i++)
974 		set_except_vector(i, handle_reserved);
975 
976 	/*
977 	 * Copy the EJTAG debug exception vector handler code to it's final
978 	 * destination.
979 	 */
980 	if (cpu_has_ejtag)
981 		memcpy((void *)(CAC_BASE + 0x300), &except_vec_ejtag_debug, 0x80);
982 
983 	/*
984 	 * Only some CPUs have the watch exceptions.
985 	 */
986 	if (cpu_has_watch)
987 		set_except_vector(23, handle_watch);
988 
989 	/*
990 	 * Some MIPS CPUs have a dedicated interrupt vector which reduces the
991 	 * interrupt processing overhead.  Use it where available.
992 	 */
993 	if (cpu_has_divec)
994 		memcpy((void *)(CAC_BASE + 0x200), &except_vec4, 0x8);
995 
996 	/*
997 	 * Some CPUs can enable/disable for cache parity detection, but does
998 	 * it different ways.
999 	 */
1000 	parity_protection_init();
1001 
1002 	/*
1003 	 * The Data Bus Errors / Instruction Bus Errors are signaled
1004 	 * by external hardware.  Therefore these two exceptions
1005 	 * may have board specific handlers.
1006 	 */
1007 	if (board_be_init)
1008 		board_be_init();
1009 
1010 	set_except_vector(1, handle_tlbm);
1011 	set_except_vector(2, handle_tlbl);
1012 	set_except_vector(3, handle_tlbs);
1013 
1014 	set_except_vector(4, handle_adel);
1015 	set_except_vector(5, handle_ades);
1016 
1017 	set_except_vector(6, handle_ibe);
1018 	set_except_vector(7, handle_dbe);
1019 
1020 	set_except_vector(8, handle_sys);
1021 	set_except_vector(9, handle_bp);
1022 	set_except_vector(10, handle_ri);
1023 	set_except_vector(11, handle_cpu);
1024 	set_except_vector(12, handle_ov);
1025 	set_except_vector(13, handle_tr);
1026 	set_except_vector(22, handle_mdmx);
1027 
1028 	if (cpu_has_fpu && !cpu_has_nofpuex)
1029 		set_except_vector(15, handle_fpe);
1030 
1031 	if (cpu_has_mcheck)
1032 		set_except_vector(24, handle_mcheck);
1033 
1034 	if (cpu_has_vce)
1035 		/* Special exception: R4[04]00 uses also the divec space. */
1036 		memcpy((void *)(CAC_BASE + 0x180), &except_vec3_r4000, 0x100);
1037 	else if (cpu_has_4kex)
1038 		memcpy((void *)(CAC_BASE + 0x180), &except_vec3_generic, 0x80);
1039 	else
1040 		memcpy((void *)(CAC_BASE + 0x080), &except_vec3_generic, 0x80);
1041 
1042 	if (current_cpu_data.cputype == CPU_R6000 ||
1043 	    current_cpu_data.cputype == CPU_R6000A) {
1044 		/*
1045 		 * The R6000 is the only R-series CPU that features a machine
1046 		 * check exception (similar to the R4000 cache error) and
1047 		 * unaligned ldc1/sdc1 exception.  The handlers have not been
1048 		 * written yet.  Well, anyway there is no R6000 machine on the
1049 		 * current list of targets for Linux/MIPS.
1050 		 * (Duh, crap, there is someone with a triple R6k machine)
1051 		 */
1052 		//set_except_vector(14, handle_mc);
1053 		//set_except_vector(15, handle_ndc);
1054 	}
1055 
1056 	signal_init();
1057 #ifdef CONFIG_MIPS32_COMPAT
1058 	signal32_init();
1059 #endif
1060 
1061 	flush_icache_range(CAC_BASE, CAC_BASE + 0x400);
1062 }
1063