xref: /openbmc/linux/arch/powerpc/kernel/traps.c (revision a09d2831)
1 /*
2  *  Copyright (C) 1995-1996  Gary Thomas (gdt@linuxppc.org)
3  *
4  *  This program is free software; you can redistribute it and/or
5  *  modify it under the terms of the GNU General Public License
6  *  as published by the Free Software Foundation; either version
7  *  2 of the License, or (at your option) any later version.
8  *
9  *  Modified by Cort Dougan (cort@cs.nmt.edu)
10  *  and Paul Mackerras (paulus@samba.org)
11  */
12 
13 /*
14  * This file handles the architecture-dependent parts of hardware exceptions
15  */
16 
17 #include <linux/errno.h>
18 #include <linux/sched.h>
19 #include <linux/kernel.h>
20 #include <linux/mm.h>
21 #include <linux/stddef.h>
22 #include <linux/unistd.h>
23 #include <linux/ptrace.h>
24 #include <linux/slab.h>
25 #include <linux/user.h>
26 #include <linux/interrupt.h>
27 #include <linux/init.h>
28 #include <linux/module.h>
29 #include <linux/prctl.h>
30 #include <linux/delay.h>
31 #include <linux/kprobes.h>
32 #include <linux/kexec.h>
33 #include <linux/backlight.h>
34 #include <linux/bug.h>
35 #include <linux/kdebug.h>
36 #include <linux/debugfs.h>
37 
38 #include <asm/emulated_ops.h>
39 #include <asm/pgtable.h>
40 #include <asm/uaccess.h>
41 #include <asm/system.h>
42 #include <asm/io.h>
43 #include <asm/machdep.h>
44 #include <asm/rtas.h>
45 #include <asm/pmc.h>
46 #ifdef CONFIG_PPC32
47 #include <asm/reg.h>
48 #endif
49 #ifdef CONFIG_PMAC_BACKLIGHT
50 #include <asm/backlight.h>
51 #endif
52 #ifdef CONFIG_PPC64
53 #include <asm/firmware.h>
54 #include <asm/processor.h>
55 #endif
56 #include <asm/kexec.h>
57 #include <asm/ppc-opcode.h>
58 #ifdef CONFIG_FSL_BOOKE
59 #include <asm/dbell.h>
60 #endif
61 
62 #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC)
63 int (*__debugger)(struct pt_regs *regs);
64 int (*__debugger_ipi)(struct pt_regs *regs);
65 int (*__debugger_bpt)(struct pt_regs *regs);
66 int (*__debugger_sstep)(struct pt_regs *regs);
67 int (*__debugger_iabr_match)(struct pt_regs *regs);
68 int (*__debugger_dabr_match)(struct pt_regs *regs);
69 int (*__debugger_fault_handler)(struct pt_regs *regs);
70 
71 EXPORT_SYMBOL(__debugger);
72 EXPORT_SYMBOL(__debugger_ipi);
73 EXPORT_SYMBOL(__debugger_bpt);
74 EXPORT_SYMBOL(__debugger_sstep);
75 EXPORT_SYMBOL(__debugger_iabr_match);
76 EXPORT_SYMBOL(__debugger_dabr_match);
77 EXPORT_SYMBOL(__debugger_fault_handler);
78 #endif
79 
80 /*
81  * Trap & Exception support
82  */
83 
84 #ifdef CONFIG_PMAC_BACKLIGHT
85 static void pmac_backlight_unblank(void)
86 {
87 	mutex_lock(&pmac_backlight_mutex);
88 	if (pmac_backlight) {
89 		struct backlight_properties *props;
90 
91 		props = &pmac_backlight->props;
92 		props->brightness = props->max_brightness;
93 		props->power = FB_BLANK_UNBLANK;
94 		backlight_update_status(pmac_backlight);
95 	}
96 	mutex_unlock(&pmac_backlight_mutex);
97 }
98 #else
99 static inline void pmac_backlight_unblank(void) { }
100 #endif
101 
102 int die(const char *str, struct pt_regs *regs, long err)
103 {
104 	static struct {
105 		spinlock_t lock;
106 		u32 lock_owner;
107 		int lock_owner_depth;
108 	} die = {
109 		.lock =			__SPIN_LOCK_UNLOCKED(die.lock),
110 		.lock_owner =		-1,
111 		.lock_owner_depth =	0
112 	};
113 	static int die_counter;
114 	unsigned long flags;
115 
116 	if (debugger(regs))
117 		return 1;
118 
119 	oops_enter();
120 
121 	if (die.lock_owner != raw_smp_processor_id()) {
122 		console_verbose();
123 		spin_lock_irqsave(&die.lock, flags);
124 		die.lock_owner = smp_processor_id();
125 		die.lock_owner_depth = 0;
126 		bust_spinlocks(1);
127 		if (machine_is(powermac))
128 			pmac_backlight_unblank();
129 	} else {
130 		local_save_flags(flags);
131 	}
132 
133 	if (++die.lock_owner_depth < 3) {
134 		printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter);
135 #ifdef CONFIG_PREEMPT
136 		printk("PREEMPT ");
137 #endif
138 #ifdef CONFIG_SMP
139 		printk("SMP NR_CPUS=%d ", NR_CPUS);
140 #endif
141 #ifdef CONFIG_DEBUG_PAGEALLOC
142 		printk("DEBUG_PAGEALLOC ");
143 #endif
144 #ifdef CONFIG_NUMA
145 		printk("NUMA ");
146 #endif
147 		printk("%s\n", ppc_md.name ? ppc_md.name : "");
148 
149 		print_modules();
150 		show_regs(regs);
151 	} else {
152 		printk("Recursive die() failure, output suppressed\n");
153 	}
154 
155 	bust_spinlocks(0);
156 	die.lock_owner = -1;
157 	add_taint(TAINT_DIE);
158 	spin_unlock_irqrestore(&die.lock, flags);
159 
160 	if (kexec_should_crash(current) ||
161 		kexec_sr_activated(smp_processor_id()))
162 		crash_kexec(regs);
163 	crash_kexec_secondary(regs);
164 
165 	if (in_interrupt())
166 		panic("Fatal exception in interrupt");
167 
168 	if (panic_on_oops)
169 		panic("Fatal exception");
170 
171 	oops_exit();
172 	do_exit(err);
173 
174 	return 0;
175 }
176 
177 void user_single_step_siginfo(struct task_struct *tsk,
178 				struct pt_regs *regs, siginfo_t *info)
179 {
180 	memset(info, 0, sizeof(*info));
181 	info->si_signo = SIGTRAP;
182 	info->si_code = TRAP_TRACE;
183 	info->si_addr = (void __user *)regs->nip;
184 }
185 
186 void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
187 {
188 	siginfo_t info;
189 	const char fmt32[] = KERN_INFO "%s[%d]: unhandled signal %d " \
190 			"at %08lx nip %08lx lr %08lx code %x\n";
191 	const char fmt64[] = KERN_INFO "%s[%d]: unhandled signal %d " \
192 			"at %016lx nip %016lx lr %016lx code %x\n";
193 
194 	if (!user_mode(regs)) {
195 		if (die("Exception in kernel mode", regs, signr))
196 			return;
197 	} else if (show_unhandled_signals &&
198 		    unhandled_signal(current, signr) &&
199 		    printk_ratelimit()) {
200 			printk(regs->msr & MSR_SF ? fmt64 : fmt32,
201 				current->comm, current->pid, signr,
202 				addr, regs->nip, regs->link, code);
203 		}
204 
205 	memset(&info, 0, sizeof(info));
206 	info.si_signo = signr;
207 	info.si_code = code;
208 	info.si_addr = (void __user *) addr;
209 	force_sig_info(signr, &info, current);
210 }
211 
212 #ifdef CONFIG_PPC64
213 void system_reset_exception(struct pt_regs *regs)
214 {
215 	/* See if any machine dependent calls */
216 	if (ppc_md.system_reset_exception) {
217 		if (ppc_md.system_reset_exception(regs))
218 			return;
219 	}
220 
221 #ifdef CONFIG_KEXEC
222 	cpu_set(smp_processor_id(), cpus_in_sr);
223 #endif
224 
225 	die("System Reset", regs, SIGABRT);
226 
227 	/*
228 	 * Some CPUs when released from the debugger will execute this path.
229 	 * These CPUs entered the debugger via a soft-reset. If the CPU was
230 	 * hung before entering the debugger it will return to the hung
231 	 * state when exiting this function.  This causes a problem in
232 	 * kdump since the hung CPU(s) will not respond to the IPI sent
233 	 * from kdump. To prevent the problem we call crash_kexec_secondary()
234 	 * here. If a kdump had not been initiated or we exit the debugger
235 	 * with the "exit and recover" command (x) crash_kexec_secondary()
236 	 * will return after 5ms and the CPU returns to its previous state.
237 	 */
238 	crash_kexec_secondary(regs);
239 
240 	/* Must die if the interrupt is not recoverable */
241 	if (!(regs->msr & MSR_RI))
242 		panic("Unrecoverable System Reset");
243 
244 	/* What should we do here? We could issue a shutdown or hard reset. */
245 }
246 #endif
247 
248 /*
249  * I/O accesses can cause machine checks on powermacs.
250  * Check if the NIP corresponds to the address of a sync
251  * instruction for which there is an entry in the exception
252  * table.
253  * Note that the 601 only takes a machine check on TEA
254  * (transfer error ack) signal assertion, and does not
255  * set any of the top 16 bits of SRR1.
256  *  -- paulus.
257  */
258 static inline int check_io_access(struct pt_regs *regs)
259 {
260 #ifdef CONFIG_PPC32
261 	unsigned long msr = regs->msr;
262 	const struct exception_table_entry *entry;
263 	unsigned int *nip = (unsigned int *)regs->nip;
264 
265 	if (((msr & 0xffff0000) == 0 || (msr & (0x80000 | 0x40000)))
266 	    && (entry = search_exception_tables(regs->nip)) != NULL) {
267 		/*
268 		 * Check that it's a sync instruction, or somewhere
269 		 * in the twi; isync; nop sequence that inb/inw/inl uses.
270 		 * As the address is in the exception table
271 		 * we should be able to read the instr there.
272 		 * For the debug message, we look at the preceding
273 		 * load or store.
274 		 */
275 		if (*nip == 0x60000000)		/* nop */
276 			nip -= 2;
277 		else if (*nip == 0x4c00012c)	/* isync */
278 			--nip;
279 		if (*nip == 0x7c0004ac || (*nip >> 26) == 3) {
280 			/* sync or twi */
281 			unsigned int rb;
282 
283 			--nip;
284 			rb = (*nip >> 11) & 0x1f;
285 			printk(KERN_DEBUG "%s bad port %lx at %p\n",
286 			       (*nip & 0x100)? "OUT to": "IN from",
287 			       regs->gpr[rb] - _IO_BASE, nip);
288 			regs->msr |= MSR_RI;
289 			regs->nip = entry->fixup;
290 			return 1;
291 		}
292 	}
293 #endif /* CONFIG_PPC32 */
294 	return 0;
295 }
296 
297 #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
298 /* On 4xx, the reason for the machine check or program exception
299    is in the ESR. */
300 #define get_reason(regs)	((regs)->dsisr)
301 #ifndef CONFIG_FSL_BOOKE
302 #define get_mc_reason(regs)	((regs)->dsisr)
303 #else
304 #define get_mc_reason(regs)	(mfspr(SPRN_MCSR) & MCSR_MASK)
305 #endif
306 #define REASON_FP		ESR_FP
307 #define REASON_ILLEGAL		(ESR_PIL | ESR_PUO)
308 #define REASON_PRIVILEGED	ESR_PPR
309 #define REASON_TRAP		ESR_PTR
310 
311 /* single-step stuff */
312 #define single_stepping(regs)	(current->thread.dbcr0 & DBCR0_IC)
313 #define clear_single_step(regs)	(current->thread.dbcr0 &= ~DBCR0_IC)
314 
315 #else
316 /* On non-4xx, the reason for the machine check or program
317    exception is in the MSR. */
318 #define get_reason(regs)	((regs)->msr)
319 #define get_mc_reason(regs)	((regs)->msr)
320 #define REASON_FP		0x100000
321 #define REASON_ILLEGAL		0x80000
322 #define REASON_PRIVILEGED	0x40000
323 #define REASON_TRAP		0x20000
324 
325 #define single_stepping(regs)	((regs)->msr & MSR_SE)
326 #define clear_single_step(regs)	((regs)->msr &= ~MSR_SE)
327 #endif
328 
329 #if defined(CONFIG_4xx)
330 int machine_check_4xx(struct pt_regs *regs)
331 {
332 	unsigned long reason = get_mc_reason(regs);
333 
334 	if (reason & ESR_IMCP) {
335 		printk("Instruction");
336 		mtspr(SPRN_ESR, reason & ~ESR_IMCP);
337 	} else
338 		printk("Data");
339 	printk(" machine check in kernel mode.\n");
340 
341 	return 0;
342 }
343 
344 int machine_check_440A(struct pt_regs *regs)
345 {
346 	unsigned long reason = get_mc_reason(regs);
347 
348 	printk("Machine check in kernel mode.\n");
349 	if (reason & ESR_IMCP){
350 		printk("Instruction Synchronous Machine Check exception\n");
351 		mtspr(SPRN_ESR, reason & ~ESR_IMCP);
352 	}
353 	else {
354 		u32 mcsr = mfspr(SPRN_MCSR);
355 		if (mcsr & MCSR_IB)
356 			printk("Instruction Read PLB Error\n");
357 		if (mcsr & MCSR_DRB)
358 			printk("Data Read PLB Error\n");
359 		if (mcsr & MCSR_DWB)
360 			printk("Data Write PLB Error\n");
361 		if (mcsr & MCSR_TLBP)
362 			printk("TLB Parity Error\n");
363 		if (mcsr & MCSR_ICP){
364 			flush_instruction_cache();
365 			printk("I-Cache Parity Error\n");
366 		}
367 		if (mcsr & MCSR_DCSP)
368 			printk("D-Cache Search Parity Error\n");
369 		if (mcsr & MCSR_DCFP)
370 			printk("D-Cache Flush Parity Error\n");
371 		if (mcsr & MCSR_IMPE)
372 			printk("Machine Check exception is imprecise\n");
373 
374 		/* Clear MCSR */
375 		mtspr(SPRN_MCSR, mcsr);
376 	}
377 	return 0;
378 }
379 #elif defined(CONFIG_E500)
380 int machine_check_e500(struct pt_regs *regs)
381 {
382 	unsigned long reason = get_mc_reason(regs);
383 
384 	printk("Machine check in kernel mode.\n");
385 	printk("Caused by (from MCSR=%lx): ", reason);
386 
387 	if (reason & MCSR_MCP)
388 		printk("Machine Check Signal\n");
389 	if (reason & MCSR_ICPERR)
390 		printk("Instruction Cache Parity Error\n");
391 	if (reason & MCSR_DCP_PERR)
392 		printk("Data Cache Push Parity Error\n");
393 	if (reason & MCSR_DCPERR)
394 		printk("Data Cache Parity Error\n");
395 	if (reason & MCSR_BUS_IAERR)
396 		printk("Bus - Instruction Address Error\n");
397 	if (reason & MCSR_BUS_RAERR)
398 		printk("Bus - Read Address Error\n");
399 	if (reason & MCSR_BUS_WAERR)
400 		printk("Bus - Write Address Error\n");
401 	if (reason & MCSR_BUS_IBERR)
402 		printk("Bus - Instruction Data Error\n");
403 	if (reason & MCSR_BUS_RBERR)
404 		printk("Bus - Read Data Bus Error\n");
405 	if (reason & MCSR_BUS_WBERR)
406 		printk("Bus - Read Data Bus Error\n");
407 	if (reason & MCSR_BUS_IPERR)
408 		printk("Bus - Instruction Parity Error\n");
409 	if (reason & MCSR_BUS_RPERR)
410 		printk("Bus - Read Parity Error\n");
411 
412 	return 0;
413 }
414 #elif defined(CONFIG_E200)
415 int machine_check_e200(struct pt_regs *regs)
416 {
417 	unsigned long reason = get_mc_reason(regs);
418 
419 	printk("Machine check in kernel mode.\n");
420 	printk("Caused by (from MCSR=%lx): ", reason);
421 
422 	if (reason & MCSR_MCP)
423 		printk("Machine Check Signal\n");
424 	if (reason & MCSR_CP_PERR)
425 		printk("Cache Push Parity Error\n");
426 	if (reason & MCSR_CPERR)
427 		printk("Cache Parity Error\n");
428 	if (reason & MCSR_EXCP_ERR)
429 		printk("ISI, ITLB, or Bus Error on first instruction fetch for an exception handler\n");
430 	if (reason & MCSR_BUS_IRERR)
431 		printk("Bus - Read Bus Error on instruction fetch\n");
432 	if (reason & MCSR_BUS_DRERR)
433 		printk("Bus - Read Bus Error on data load\n");
434 	if (reason & MCSR_BUS_WRERR)
435 		printk("Bus - Write Bus Error on buffered store or cache line push\n");
436 
437 	return 0;
438 }
439 #else
440 int machine_check_generic(struct pt_regs *regs)
441 {
442 	unsigned long reason = get_mc_reason(regs);
443 
444 	printk("Machine check in kernel mode.\n");
445 	printk("Caused by (from SRR1=%lx): ", reason);
446 	switch (reason & 0x601F0000) {
447 	case 0x80000:
448 		printk("Machine check signal\n");
449 		break;
450 	case 0:		/* for 601 */
451 	case 0x40000:
452 	case 0x140000:	/* 7450 MSS error and TEA */
453 		printk("Transfer error ack signal\n");
454 		break;
455 	case 0x20000:
456 		printk("Data parity error signal\n");
457 		break;
458 	case 0x10000:
459 		printk("Address parity error signal\n");
460 		break;
461 	case 0x20000000:
462 		printk("L1 Data Cache error\n");
463 		break;
464 	case 0x40000000:
465 		printk("L1 Instruction Cache error\n");
466 		break;
467 	case 0x00100000:
468 		printk("L2 data cache parity error\n");
469 		break;
470 	default:
471 		printk("Unknown values in msr\n");
472 	}
473 	return 0;
474 }
475 #endif /* everything else */
476 
477 void machine_check_exception(struct pt_regs *regs)
478 {
479 	int recover = 0;
480 
481 	/* See if any machine dependent calls. In theory, we would want
482 	 * to call the CPU first, and call the ppc_md. one if the CPU
483 	 * one returns a positive number. However there is existing code
484 	 * that assumes the board gets a first chance, so let's keep it
485 	 * that way for now and fix things later. --BenH.
486 	 */
487 	if (ppc_md.machine_check_exception)
488 		recover = ppc_md.machine_check_exception(regs);
489 	else if (cur_cpu_spec->machine_check)
490 		recover = cur_cpu_spec->machine_check(regs);
491 
492 	if (recover > 0)
493 		return;
494 
495 	if (user_mode(regs)) {
496 		regs->msr |= MSR_RI;
497 		_exception(SIGBUS, regs, BUS_ADRERR, regs->nip);
498 		return;
499 	}
500 
501 #if defined(CONFIG_8xx) && defined(CONFIG_PCI)
502 	/* the qspan pci read routines can cause machine checks -- Cort
503 	 *
504 	 * yuck !!! that totally needs to go away ! There are better ways
505 	 * to deal with that than having a wart in the mcheck handler.
506 	 * -- BenH
507 	 */
508 	bad_page_fault(regs, regs->dar, SIGBUS);
509 	return;
510 #endif
511 
512 	if (debugger_fault_handler(regs)) {
513 		regs->msr |= MSR_RI;
514 		return;
515 	}
516 
517 	if (check_io_access(regs))
518 		return;
519 
520 	if (debugger_fault_handler(regs))
521 		return;
522 	die("Machine check", regs, SIGBUS);
523 
524 	/* Must die if the interrupt is not recoverable */
525 	if (!(regs->msr & MSR_RI))
526 		panic("Unrecoverable Machine check");
527 }
528 
529 void SMIException(struct pt_regs *regs)
530 {
531 	die("System Management Interrupt", regs, SIGABRT);
532 }
533 
534 void unknown_exception(struct pt_regs *regs)
535 {
536 	printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n",
537 	       regs->nip, regs->msr, regs->trap);
538 
539 	_exception(SIGTRAP, regs, 0, 0);
540 }
541 
542 void instruction_breakpoint_exception(struct pt_regs *regs)
543 {
544 	if (notify_die(DIE_IABR_MATCH, "iabr_match", regs, 5,
545 					5, SIGTRAP) == NOTIFY_STOP)
546 		return;
547 	if (debugger_iabr_match(regs))
548 		return;
549 	_exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
550 }
551 
552 void RunModeException(struct pt_regs *regs)
553 {
554 	_exception(SIGTRAP, regs, 0, 0);
555 }
556 
557 void __kprobes single_step_exception(struct pt_regs *regs)
558 {
559 	regs->msr &= ~(MSR_SE | MSR_BE);  /* Turn off 'trace' bits */
560 
561 	if (notify_die(DIE_SSTEP, "single_step", regs, 5,
562 					5, SIGTRAP) == NOTIFY_STOP)
563 		return;
564 	if (debugger_sstep(regs))
565 		return;
566 
567 	_exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
568 }
569 
570 /*
571  * After we have successfully emulated an instruction, we have to
572  * check if the instruction was being single-stepped, and if so,
573  * pretend we got a single-step exception.  This was pointed out
574  * by Kumar Gala.  -- paulus
575  */
576 static void emulate_single_step(struct pt_regs *regs)
577 {
578 	if (single_stepping(regs)) {
579 		clear_single_step(regs);
580 		_exception(SIGTRAP, regs, TRAP_TRACE, 0);
581 	}
582 }
583 
584 static inline int __parse_fpscr(unsigned long fpscr)
585 {
586 	int ret = 0;
587 
588 	/* Invalid operation */
589 	if ((fpscr & FPSCR_VE) && (fpscr & FPSCR_VX))
590 		ret = FPE_FLTINV;
591 
592 	/* Overflow */
593 	else if ((fpscr & FPSCR_OE) && (fpscr & FPSCR_OX))
594 		ret = FPE_FLTOVF;
595 
596 	/* Underflow */
597 	else if ((fpscr & FPSCR_UE) && (fpscr & FPSCR_UX))
598 		ret = FPE_FLTUND;
599 
600 	/* Divide by zero */
601 	else if ((fpscr & FPSCR_ZE) && (fpscr & FPSCR_ZX))
602 		ret = FPE_FLTDIV;
603 
604 	/* Inexact result */
605 	else if ((fpscr & FPSCR_XE) && (fpscr & FPSCR_XX))
606 		ret = FPE_FLTRES;
607 
608 	return ret;
609 }
610 
611 static void parse_fpe(struct pt_regs *regs)
612 {
613 	int code = 0;
614 
615 	flush_fp_to_thread(current);
616 
617 	code = __parse_fpscr(current->thread.fpscr.val);
618 
619 	_exception(SIGFPE, regs, code, regs->nip);
620 }
621 
622 /*
623  * Illegal instruction emulation support.  Originally written to
624  * provide the PVR to user applications using the mfspr rd, PVR.
625  * Return non-zero if we can't emulate, or -EFAULT if the associated
626  * memory access caused an access fault.  Return zero on success.
627  *
628  * There are a couple of ways to do this, either "decode" the instruction
629  * or directly match lots of bits.  In this case, matching lots of
630  * bits is faster and easier.
631  *
632  */
633 static int emulate_string_inst(struct pt_regs *regs, u32 instword)
634 {
635 	u8 rT = (instword >> 21) & 0x1f;
636 	u8 rA = (instword >> 16) & 0x1f;
637 	u8 NB_RB = (instword >> 11) & 0x1f;
638 	u32 num_bytes;
639 	unsigned long EA;
640 	int pos = 0;
641 
642 	/* Early out if we are an invalid form of lswx */
643 	if ((instword & PPC_INST_STRING_MASK) == PPC_INST_LSWX)
644 		if ((rT == rA) || (rT == NB_RB))
645 			return -EINVAL;
646 
647 	EA = (rA == 0) ? 0 : regs->gpr[rA];
648 
649 	switch (instword & PPC_INST_STRING_MASK) {
650 		case PPC_INST_LSWX:
651 		case PPC_INST_STSWX:
652 			EA += NB_RB;
653 			num_bytes = regs->xer & 0x7f;
654 			break;
655 		case PPC_INST_LSWI:
656 		case PPC_INST_STSWI:
657 			num_bytes = (NB_RB == 0) ? 32 : NB_RB;
658 			break;
659 		default:
660 			return -EINVAL;
661 	}
662 
663 	while (num_bytes != 0)
664 	{
665 		u8 val;
666 		u32 shift = 8 * (3 - (pos & 0x3));
667 
668 		switch ((instword & PPC_INST_STRING_MASK)) {
669 			case PPC_INST_LSWX:
670 			case PPC_INST_LSWI:
671 				if (get_user(val, (u8 __user *)EA))
672 					return -EFAULT;
673 				/* first time updating this reg,
674 				 * zero it out */
675 				if (pos == 0)
676 					regs->gpr[rT] = 0;
677 				regs->gpr[rT] |= val << shift;
678 				break;
679 			case PPC_INST_STSWI:
680 			case PPC_INST_STSWX:
681 				val = regs->gpr[rT] >> shift;
682 				if (put_user(val, (u8 __user *)EA))
683 					return -EFAULT;
684 				break;
685 		}
686 		/* move EA to next address */
687 		EA += 1;
688 		num_bytes--;
689 
690 		/* manage our position within the register */
691 		if (++pos == 4) {
692 			pos = 0;
693 			if (++rT == 32)
694 				rT = 0;
695 		}
696 	}
697 
698 	return 0;
699 }
700 
701 static int emulate_popcntb_inst(struct pt_regs *regs, u32 instword)
702 {
703 	u32 ra,rs;
704 	unsigned long tmp;
705 
706 	ra = (instword >> 16) & 0x1f;
707 	rs = (instword >> 21) & 0x1f;
708 
709 	tmp = regs->gpr[rs];
710 	tmp = tmp - ((tmp >> 1) & 0x5555555555555555ULL);
711 	tmp = (tmp & 0x3333333333333333ULL) + ((tmp >> 2) & 0x3333333333333333ULL);
712 	tmp = (tmp + (tmp >> 4)) & 0x0f0f0f0f0f0f0f0fULL;
713 	regs->gpr[ra] = tmp;
714 
715 	return 0;
716 }
717 
718 static int emulate_isel(struct pt_regs *regs, u32 instword)
719 {
720 	u8 rT = (instword >> 21) & 0x1f;
721 	u8 rA = (instword >> 16) & 0x1f;
722 	u8 rB = (instword >> 11) & 0x1f;
723 	u8 BC = (instword >> 6) & 0x1f;
724 	u8 bit;
725 	unsigned long tmp;
726 
727 	tmp = (rA == 0) ? 0 : regs->gpr[rA];
728 	bit = (regs->ccr >> (31 - BC)) & 0x1;
729 
730 	regs->gpr[rT] = bit ? tmp : regs->gpr[rB];
731 
732 	return 0;
733 }
734 
735 static int emulate_instruction(struct pt_regs *regs)
736 {
737 	u32 instword;
738 	u32 rd;
739 
740 	if (!user_mode(regs) || (regs->msr & MSR_LE))
741 		return -EINVAL;
742 	CHECK_FULL_REGS(regs);
743 
744 	if (get_user(instword, (u32 __user *)(regs->nip)))
745 		return -EFAULT;
746 
747 	/* Emulate the mfspr rD, PVR. */
748 	if ((instword & PPC_INST_MFSPR_PVR_MASK) == PPC_INST_MFSPR_PVR) {
749 		PPC_WARN_EMULATED(mfpvr, regs);
750 		rd = (instword >> 21) & 0x1f;
751 		regs->gpr[rd] = mfspr(SPRN_PVR);
752 		return 0;
753 	}
754 
755 	/* Emulating the dcba insn is just a no-op.  */
756 	if ((instword & PPC_INST_DCBA_MASK) == PPC_INST_DCBA) {
757 		PPC_WARN_EMULATED(dcba, regs);
758 		return 0;
759 	}
760 
761 	/* Emulate the mcrxr insn.  */
762 	if ((instword & PPC_INST_MCRXR_MASK) == PPC_INST_MCRXR) {
763 		int shift = (instword >> 21) & 0x1c;
764 		unsigned long msk = 0xf0000000UL >> shift;
765 
766 		PPC_WARN_EMULATED(mcrxr, regs);
767 		regs->ccr = (regs->ccr & ~msk) | ((regs->xer >> shift) & msk);
768 		regs->xer &= ~0xf0000000UL;
769 		return 0;
770 	}
771 
772 	/* Emulate load/store string insn. */
773 	if ((instword & PPC_INST_STRING_GEN_MASK) == PPC_INST_STRING) {
774 		PPC_WARN_EMULATED(string, regs);
775 		return emulate_string_inst(regs, instword);
776 	}
777 
778 	/* Emulate the popcntb (Population Count Bytes) instruction. */
779 	if ((instword & PPC_INST_POPCNTB_MASK) == PPC_INST_POPCNTB) {
780 		PPC_WARN_EMULATED(popcntb, regs);
781 		return emulate_popcntb_inst(regs, instword);
782 	}
783 
784 	/* Emulate isel (Integer Select) instruction */
785 	if ((instword & PPC_INST_ISEL_MASK) == PPC_INST_ISEL) {
786 		PPC_WARN_EMULATED(isel, regs);
787 		return emulate_isel(regs, instword);
788 	}
789 
790 	return -EINVAL;
791 }
792 
793 int is_valid_bugaddr(unsigned long addr)
794 {
795 	return is_kernel_addr(addr);
796 }
797 
798 void __kprobes program_check_exception(struct pt_regs *regs)
799 {
800 	unsigned int reason = get_reason(regs);
801 	extern int do_mathemu(struct pt_regs *regs);
802 
803 	/* We can now get here via a FP Unavailable exception if the core
804 	 * has no FPU, in that case the reason flags will be 0 */
805 
806 	if (reason & REASON_FP) {
807 		/* IEEE FP exception */
808 		parse_fpe(regs);
809 		return;
810 	}
811 	if (reason & REASON_TRAP) {
812 		/* trap exception */
813 		if (notify_die(DIE_BPT, "breakpoint", regs, 5, 5, SIGTRAP)
814 				== NOTIFY_STOP)
815 			return;
816 		if (debugger_bpt(regs))
817 			return;
818 
819 		if (!(regs->msr & MSR_PR) &&  /* not user-mode */
820 		    report_bug(regs->nip, regs) == BUG_TRAP_TYPE_WARN) {
821 			regs->nip += 4;
822 			return;
823 		}
824 		_exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
825 		return;
826 	}
827 
828 	local_irq_enable();
829 
830 #ifdef CONFIG_MATH_EMULATION
831 	/* (reason & REASON_ILLEGAL) would be the obvious thing here,
832 	 * but there seems to be a hardware bug on the 405GP (RevD)
833 	 * that means ESR is sometimes set incorrectly - either to
834 	 * ESR_DST (!?) or 0.  In the process of chasing this with the
835 	 * hardware people - not sure if it can happen on any illegal
836 	 * instruction or only on FP instructions, whether there is a
837 	 * pattern to occurences etc. -dgibson 31/Mar/2003 */
838 	switch (do_mathemu(regs)) {
839 	case 0:
840 		emulate_single_step(regs);
841 		return;
842 	case 1: {
843 			int code = 0;
844 			code = __parse_fpscr(current->thread.fpscr.val);
845 			_exception(SIGFPE, regs, code, regs->nip);
846 			return;
847 		}
848 	case -EFAULT:
849 		_exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
850 		return;
851 	}
852 	/* fall through on any other errors */
853 #endif /* CONFIG_MATH_EMULATION */
854 
855 	/* Try to emulate it if we should. */
856 	if (reason & (REASON_ILLEGAL | REASON_PRIVILEGED)) {
857 		switch (emulate_instruction(regs)) {
858 		case 0:
859 			regs->nip += 4;
860 			emulate_single_step(regs);
861 			return;
862 		case -EFAULT:
863 			_exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
864 			return;
865 		}
866 	}
867 
868 	if (reason & REASON_PRIVILEGED)
869 		_exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
870 	else
871 		_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
872 }
873 
874 void alignment_exception(struct pt_regs *regs)
875 {
876 	int sig, code, fixed = 0;
877 
878 	/* we don't implement logging of alignment exceptions */
879 	if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS))
880 		fixed = fix_alignment(regs);
881 
882 	if (fixed == 1) {
883 		regs->nip += 4;	/* skip over emulated instruction */
884 		emulate_single_step(regs);
885 		return;
886 	}
887 
888 	/* Operand address was bad */
889 	if (fixed == -EFAULT) {
890 		sig = SIGSEGV;
891 		code = SEGV_ACCERR;
892 	} else {
893 		sig = SIGBUS;
894 		code = BUS_ADRALN;
895 	}
896 	if (user_mode(regs))
897 		_exception(sig, regs, code, regs->dar);
898 	else
899 		bad_page_fault(regs, regs->dar, sig);
900 }
901 
902 void StackOverflow(struct pt_regs *regs)
903 {
904 	printk(KERN_CRIT "Kernel stack overflow in process %p, r1=%lx\n",
905 	       current, regs->gpr[1]);
906 	debugger(regs);
907 	show_regs(regs);
908 	panic("kernel stack overflow");
909 }
910 
911 void nonrecoverable_exception(struct pt_regs *regs)
912 {
913 	printk(KERN_ERR "Non-recoverable exception at PC=%lx MSR=%lx\n",
914 	       regs->nip, regs->msr);
915 	debugger(regs);
916 	die("nonrecoverable exception", regs, SIGKILL);
917 }
918 
919 void trace_syscall(struct pt_regs *regs)
920 {
921 	printk("Task: %p(%d), PC: %08lX/%08lX, Syscall: %3ld, Result: %s%ld    %s\n",
922 	       current, task_pid_nr(current), regs->nip, regs->link, regs->gpr[0],
923 	       regs->ccr&0x10000000?"Error=":"", regs->gpr[3], print_tainted());
924 }
925 
926 void kernel_fp_unavailable_exception(struct pt_regs *regs)
927 {
928 	printk(KERN_EMERG "Unrecoverable FP Unavailable Exception "
929 			  "%lx at %lx\n", regs->trap, regs->nip);
930 	die("Unrecoverable FP Unavailable Exception", regs, SIGABRT);
931 }
932 
933 void altivec_unavailable_exception(struct pt_regs *regs)
934 {
935 	if (user_mode(regs)) {
936 		/* A user program has executed an altivec instruction,
937 		   but this kernel doesn't support altivec. */
938 		_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
939 		return;
940 	}
941 
942 	printk(KERN_EMERG "Unrecoverable VMX/Altivec Unavailable Exception "
943 			"%lx at %lx\n", regs->trap, regs->nip);
944 	die("Unrecoverable VMX/Altivec Unavailable Exception", regs, SIGABRT);
945 }
946 
947 void vsx_unavailable_exception(struct pt_regs *regs)
948 {
949 	if (user_mode(regs)) {
950 		/* A user program has executed an vsx instruction,
951 		   but this kernel doesn't support vsx. */
952 		_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
953 		return;
954 	}
955 
956 	printk(KERN_EMERG "Unrecoverable VSX Unavailable Exception "
957 			"%lx at %lx\n", regs->trap, regs->nip);
958 	die("Unrecoverable VSX Unavailable Exception", regs, SIGABRT);
959 }
960 
961 void performance_monitor_exception(struct pt_regs *regs)
962 {
963 	perf_irq(regs);
964 }
965 
966 #ifdef CONFIG_8xx
967 void SoftwareEmulation(struct pt_regs *regs)
968 {
969 	extern int do_mathemu(struct pt_regs *);
970 	extern int Soft_emulate_8xx(struct pt_regs *);
971 #if defined(CONFIG_MATH_EMULATION) || defined(CONFIG_8XX_MINIMAL_FPEMU)
972 	int errcode;
973 #endif
974 
975 	CHECK_FULL_REGS(regs);
976 
977 	if (!user_mode(regs)) {
978 		debugger(regs);
979 		die("Kernel Mode Software FPU Emulation", regs, SIGFPE);
980 	}
981 
982 #ifdef CONFIG_MATH_EMULATION
983 	errcode = do_mathemu(regs);
984 	if (errcode >= 0)
985 		PPC_WARN_EMULATED(math, regs);
986 
987 	switch (errcode) {
988 	case 0:
989 		emulate_single_step(regs);
990 		return;
991 	case 1: {
992 			int code = 0;
993 			code = __parse_fpscr(current->thread.fpscr.val);
994 			_exception(SIGFPE, regs, code, regs->nip);
995 			return;
996 		}
997 	case -EFAULT:
998 		_exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
999 		return;
1000 	default:
1001 		_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1002 		return;
1003 	}
1004 
1005 #elif defined(CONFIG_8XX_MINIMAL_FPEMU)
1006 	errcode = Soft_emulate_8xx(regs);
1007 	if (errcode >= 0)
1008 		PPC_WARN_EMULATED(8xx, regs);
1009 
1010 	switch (errcode) {
1011 	case 0:
1012 		emulate_single_step(regs);
1013 		return;
1014 	case 1:
1015 		_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1016 		return;
1017 	case -EFAULT:
1018 		_exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
1019 		return;
1020 	}
1021 #else
1022 	_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1023 #endif
1024 }
1025 #endif /* CONFIG_8xx */
1026 
1027 #if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
1028 
1029 void __kprobes DebugException(struct pt_regs *regs, unsigned long debug_status)
1030 {
1031 	/* Hack alert: On BookE, Branch Taken stops on the branch itself, while
1032 	 * on server, it stops on the target of the branch. In order to simulate
1033 	 * the server behaviour, we thus restart right away with a single step
1034 	 * instead of stopping here when hitting a BT
1035 	 */
1036 	if (debug_status & DBSR_BT) {
1037 		regs->msr &= ~MSR_DE;
1038 
1039 		/* Disable BT */
1040 		mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_BT);
1041 		/* Clear the BT event */
1042 		mtspr(SPRN_DBSR, DBSR_BT);
1043 
1044 		/* Do the single step trick only when coming from userspace */
1045 		if (user_mode(regs)) {
1046 			current->thread.dbcr0 &= ~DBCR0_BT;
1047 			current->thread.dbcr0 |= DBCR0_IDM | DBCR0_IC;
1048 			regs->msr |= MSR_DE;
1049 			return;
1050 		}
1051 
1052 		if (notify_die(DIE_SSTEP, "block_step", regs, 5,
1053 			       5, SIGTRAP) == NOTIFY_STOP) {
1054 			return;
1055 		}
1056 		if (debugger_sstep(regs))
1057 			return;
1058 	} else if (debug_status & DBSR_IC) { 	/* Instruction complete */
1059 		regs->msr &= ~MSR_DE;
1060 
1061 		/* Disable instruction completion */
1062 		mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_IC);
1063 		/* Clear the instruction completion event */
1064 		mtspr(SPRN_DBSR, DBSR_IC);
1065 
1066 		if (notify_die(DIE_SSTEP, "single_step", regs, 5,
1067 			       5, SIGTRAP) == NOTIFY_STOP) {
1068 			return;
1069 		}
1070 
1071 		if (debugger_sstep(regs))
1072 			return;
1073 
1074 		if (user_mode(regs))
1075 			current->thread.dbcr0 &= ~(DBCR0_IC);
1076 
1077 		_exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
1078 	} else if (debug_status & (DBSR_DAC1R | DBSR_DAC1W)) {
1079 		regs->msr &= ~MSR_DE;
1080 
1081 		if (user_mode(regs)) {
1082 			current->thread.dbcr0 &= ~(DBSR_DAC1R | DBSR_DAC1W |
1083 								DBCR0_IDM);
1084 		} else {
1085 			/* Disable DAC interupts */
1086 			mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~(DBSR_DAC1R |
1087 						DBSR_DAC1W | DBCR0_IDM));
1088 
1089 			/* Clear the DAC event */
1090 			mtspr(SPRN_DBSR, (DBSR_DAC1R | DBSR_DAC1W));
1091 		}
1092 		/* Setup and send the trap to the handler */
1093 		do_dabr(regs, mfspr(SPRN_DAC1), debug_status);
1094 	}
1095 }
1096 #endif /* CONFIG_4xx || CONFIG_BOOKE */
1097 
1098 #if !defined(CONFIG_TAU_INT)
1099 void TAUException(struct pt_regs *regs)
1100 {
1101 	printk("TAU trap at PC: %lx, MSR: %lx, vector=%lx    %s\n",
1102 	       regs->nip, regs->msr, regs->trap, print_tainted());
1103 }
1104 #endif /* CONFIG_INT_TAU */
1105 
1106 #ifdef CONFIG_ALTIVEC
1107 void altivec_assist_exception(struct pt_regs *regs)
1108 {
1109 	int err;
1110 
1111 	if (!user_mode(regs)) {
1112 		printk(KERN_EMERG "VMX/Altivec assist exception in kernel mode"
1113 		       " at %lx\n", regs->nip);
1114 		die("Kernel VMX/Altivec assist exception", regs, SIGILL);
1115 	}
1116 
1117 	flush_altivec_to_thread(current);
1118 
1119 	PPC_WARN_EMULATED(altivec, regs);
1120 	err = emulate_altivec(regs);
1121 	if (err == 0) {
1122 		regs->nip += 4;		/* skip emulated instruction */
1123 		emulate_single_step(regs);
1124 		return;
1125 	}
1126 
1127 	if (err == -EFAULT) {
1128 		/* got an error reading the instruction */
1129 		_exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
1130 	} else {
1131 		/* didn't recognize the instruction */
1132 		/* XXX quick hack for now: set the non-Java bit in the VSCR */
1133 		if (printk_ratelimit())
1134 			printk(KERN_ERR "Unrecognized altivec instruction "
1135 			       "in %s at %lx\n", current->comm, regs->nip);
1136 		current->thread.vscr.u[3] |= 0x10000;
1137 	}
1138 }
1139 #endif /* CONFIG_ALTIVEC */
1140 
1141 #ifdef CONFIG_VSX
1142 void vsx_assist_exception(struct pt_regs *regs)
1143 {
1144 	if (!user_mode(regs)) {
1145 		printk(KERN_EMERG "VSX assist exception in kernel mode"
1146 		       " at %lx\n", regs->nip);
1147 		die("Kernel VSX assist exception", regs, SIGILL);
1148 	}
1149 
1150 	flush_vsx_to_thread(current);
1151 	printk(KERN_INFO "VSX assist not supported at %lx\n", regs->nip);
1152 	_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1153 }
1154 #endif /* CONFIG_VSX */
1155 
1156 #ifdef CONFIG_FSL_BOOKE
1157 
1158 void doorbell_exception(struct pt_regs *regs)
1159 {
1160 #ifdef CONFIG_SMP
1161 	int cpu = smp_processor_id();
1162 	int msg;
1163 
1164 	if (num_online_cpus() < 2)
1165 		return;
1166 
1167 	for (msg = 0; msg < 4; msg++)
1168 		if (test_and_clear_bit(msg, &dbell_smp_message[cpu]))
1169 			smp_message_recv(msg);
1170 #else
1171 	printk(KERN_WARNING "Received doorbell on non-smp system\n");
1172 #endif
1173 }
1174 
1175 void CacheLockingException(struct pt_regs *regs, unsigned long address,
1176 			   unsigned long error_code)
1177 {
1178 	/* We treat cache locking instructions from the user
1179 	 * as priv ops, in the future we could try to do
1180 	 * something smarter
1181 	 */
1182 	if (error_code & (ESR_DLK|ESR_ILK))
1183 		_exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
1184 	return;
1185 }
1186 #endif /* CONFIG_FSL_BOOKE */
1187 
1188 #ifdef CONFIG_SPE
1189 void SPEFloatingPointException(struct pt_regs *regs)
1190 {
1191 	extern int do_spe_mathemu(struct pt_regs *regs);
1192 	unsigned long spefscr;
1193 	int fpexc_mode;
1194 	int code = 0;
1195 	int err;
1196 
1197 	preempt_disable();
1198 	if (regs->msr & MSR_SPE)
1199 		giveup_spe(current);
1200 	preempt_enable();
1201 
1202 	spefscr = current->thread.spefscr;
1203 	fpexc_mode = current->thread.fpexc_mode;
1204 
1205 	if ((spefscr & SPEFSCR_FOVF) && (fpexc_mode & PR_FP_EXC_OVF)) {
1206 		code = FPE_FLTOVF;
1207 	}
1208 	else if ((spefscr & SPEFSCR_FUNF) && (fpexc_mode & PR_FP_EXC_UND)) {
1209 		code = FPE_FLTUND;
1210 	}
1211 	else if ((spefscr & SPEFSCR_FDBZ) && (fpexc_mode & PR_FP_EXC_DIV))
1212 		code = FPE_FLTDIV;
1213 	else if ((spefscr & SPEFSCR_FINV) && (fpexc_mode & PR_FP_EXC_INV)) {
1214 		code = FPE_FLTINV;
1215 	}
1216 	else if ((spefscr & (SPEFSCR_FG | SPEFSCR_FX)) && (fpexc_mode & PR_FP_EXC_RES))
1217 		code = FPE_FLTRES;
1218 
1219 	err = do_spe_mathemu(regs);
1220 	if (err == 0) {
1221 		regs->nip += 4;		/* skip emulated instruction */
1222 		emulate_single_step(regs);
1223 		return;
1224 	}
1225 
1226 	if (err == -EFAULT) {
1227 		/* got an error reading the instruction */
1228 		_exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
1229 	} else if (err == -EINVAL) {
1230 		/* didn't recognize the instruction */
1231 		printk(KERN_ERR "unrecognized spe instruction "
1232 		       "in %s at %lx\n", current->comm, regs->nip);
1233 	} else {
1234 		_exception(SIGFPE, regs, code, regs->nip);
1235 	}
1236 
1237 	return;
1238 }
1239 
1240 void SPEFloatingPointRoundException(struct pt_regs *regs)
1241 {
1242 	extern int speround_handler(struct pt_regs *regs);
1243 	int err;
1244 
1245 	preempt_disable();
1246 	if (regs->msr & MSR_SPE)
1247 		giveup_spe(current);
1248 	preempt_enable();
1249 
1250 	regs->nip -= 4;
1251 	err = speround_handler(regs);
1252 	if (err == 0) {
1253 		regs->nip += 4;		/* skip emulated instruction */
1254 		emulate_single_step(regs);
1255 		return;
1256 	}
1257 
1258 	if (err == -EFAULT) {
1259 		/* got an error reading the instruction */
1260 		_exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
1261 	} else if (err == -EINVAL) {
1262 		/* didn't recognize the instruction */
1263 		printk(KERN_ERR "unrecognized spe instruction "
1264 		       "in %s at %lx\n", current->comm, regs->nip);
1265 	} else {
1266 		_exception(SIGFPE, regs, 0, regs->nip);
1267 		return;
1268 	}
1269 }
1270 #endif
1271 
1272 /*
1273  * We enter here if we get an unrecoverable exception, that is, one
1274  * that happened at a point where the RI (recoverable interrupt) bit
1275  * in the MSR is 0.  This indicates that SRR0/1 are live, and that
1276  * we therefore lost state by taking this exception.
1277  */
1278 void unrecoverable_exception(struct pt_regs *regs)
1279 {
1280 	printk(KERN_EMERG "Unrecoverable exception %lx at %lx\n",
1281 	       regs->trap, regs->nip);
1282 	die("Unrecoverable exception", regs, SIGABRT);
1283 }
1284 
1285 #ifdef CONFIG_BOOKE_WDT
1286 /*
1287  * Default handler for a Watchdog exception,
1288  * spins until a reboot occurs
1289  */
1290 void __attribute__ ((weak)) WatchdogHandler(struct pt_regs *regs)
1291 {
1292 	/* Generic WatchdogHandler, implement your own */
1293 	mtspr(SPRN_TCR, mfspr(SPRN_TCR)&(~TCR_WIE));
1294 	return;
1295 }
1296 
1297 void WatchdogException(struct pt_regs *regs)
1298 {
1299 	printk (KERN_EMERG "PowerPC Book-E Watchdog Exception\n");
1300 	WatchdogHandler(regs);
1301 }
1302 #endif
1303 
1304 /*
1305  * We enter here if we discover during exception entry that we are
1306  * running in supervisor mode with a userspace value in the stack pointer.
1307  */
1308 void kernel_bad_stack(struct pt_regs *regs)
1309 {
1310 	printk(KERN_EMERG "Bad kernel stack pointer %lx at %lx\n",
1311 	       regs->gpr[1], regs->nip);
1312 	die("Bad kernel stack pointer", regs, SIGABRT);
1313 }
1314 
1315 void __init trap_init(void)
1316 {
1317 }
1318 
1319 
1320 #ifdef CONFIG_PPC_EMULATED_STATS
1321 
1322 #define WARN_EMULATED_SETUP(type)	.type = { .name = #type }
1323 
1324 struct ppc_emulated ppc_emulated = {
1325 #ifdef CONFIG_ALTIVEC
1326 	WARN_EMULATED_SETUP(altivec),
1327 #endif
1328 	WARN_EMULATED_SETUP(dcba),
1329 	WARN_EMULATED_SETUP(dcbz),
1330 	WARN_EMULATED_SETUP(fp_pair),
1331 	WARN_EMULATED_SETUP(isel),
1332 	WARN_EMULATED_SETUP(mcrxr),
1333 	WARN_EMULATED_SETUP(mfpvr),
1334 	WARN_EMULATED_SETUP(multiple),
1335 	WARN_EMULATED_SETUP(popcntb),
1336 	WARN_EMULATED_SETUP(spe),
1337 	WARN_EMULATED_SETUP(string),
1338 	WARN_EMULATED_SETUP(unaligned),
1339 #ifdef CONFIG_MATH_EMULATION
1340 	WARN_EMULATED_SETUP(math),
1341 #elif defined(CONFIG_8XX_MINIMAL_FPEMU)
1342 	WARN_EMULATED_SETUP(8xx),
1343 #endif
1344 #ifdef CONFIG_VSX
1345 	WARN_EMULATED_SETUP(vsx),
1346 #endif
1347 };
1348 
1349 u32 ppc_warn_emulated;
1350 
1351 void ppc_warn_emulated_print(const char *type)
1352 {
1353 	if (printk_ratelimit())
1354 		pr_warning("%s used emulated %s instruction\n", current->comm,
1355 			   type);
1356 }
1357 
1358 static int __init ppc_warn_emulated_init(void)
1359 {
1360 	struct dentry *dir, *d;
1361 	unsigned int i;
1362 	struct ppc_emulated_entry *entries = (void *)&ppc_emulated;
1363 
1364 	if (!powerpc_debugfs_root)
1365 		return -ENODEV;
1366 
1367 	dir = debugfs_create_dir("emulated_instructions",
1368 				 powerpc_debugfs_root);
1369 	if (!dir)
1370 		return -ENOMEM;
1371 
1372 	d = debugfs_create_u32("do_warn", S_IRUGO | S_IWUSR, dir,
1373 			       &ppc_warn_emulated);
1374 	if (!d)
1375 		goto fail;
1376 
1377 	for (i = 0; i < sizeof(ppc_emulated)/sizeof(*entries); i++) {
1378 		d = debugfs_create_u32(entries[i].name, S_IRUGO | S_IWUSR, dir,
1379 				       (u32 *)&entries[i].val.counter);
1380 		if (!d)
1381 			goto fail;
1382 	}
1383 
1384 	return 0;
1385 
1386 fail:
1387 	debugfs_remove_recursive(dir);
1388 	return -ENOMEM;
1389 }
1390 
1391 device_initcall(ppc_warn_emulated_init);
1392 
1393 #endif /* CONFIG_PPC_EMULATED_STATS */
1394