xref: /openbmc/linux/arch/powerpc/kernel/traps.c (revision e983940270f10fe8551baf0098be76ea478294a3)
1 /*
2  *  Copyright (C) 1995-1996  Gary Thomas (gdt@linuxppc.org)
3  *  Copyright 2007-2010 Freescale Semiconductor, Inc.
4  *
5  *  This program is free software; you can redistribute it and/or
6  *  modify it under the terms of the GNU General Public License
7  *  as published by the Free Software Foundation; either version
8  *  2 of the License, or (at your option) any later version.
9  *
10  *  Modified by Cort Dougan (cort@cs.nmt.edu)
11  *  and Paul Mackerras (paulus@samba.org)
12  */
13 
14 /*
15  * This file handles the architecture-dependent parts of hardware exceptions
16  */
17 
18 #include <linux/errno.h>
19 #include <linux/sched.h>
20 #include <linux/kernel.h>
21 #include <linux/mm.h>
22 #include <linux/stddef.h>
23 #include <linux/unistd.h>
24 #include <linux/ptrace.h>
25 #include <linux/user.h>
26 #include <linux/interrupt.h>
27 #include <linux/init.h>
28 #include <linux/extable.h>
29 #include <linux/module.h>	/* print_modules */
30 #include <linux/prctl.h>
31 #include <linux/delay.h>
32 #include <linux/kprobes.h>
33 #include <linux/kexec.h>
34 #include <linux/backlight.h>
35 #include <linux/bug.h>
36 #include <linux/kdebug.h>
37 #include <linux/debugfs.h>
38 #include <linux/ratelimit.h>
39 #include <linux/context_tracking.h>
40 
41 #include <asm/emulated_ops.h>
42 #include <asm/pgtable.h>
43 #include <asm/uaccess.h>
44 #include <asm/io.h>
45 #include <asm/machdep.h>
46 #include <asm/rtas.h>
47 #include <asm/pmc.h>
48 #include <asm/reg.h>
49 #ifdef CONFIG_PMAC_BACKLIGHT
50 #include <asm/backlight.h>
51 #endif
52 #ifdef CONFIG_PPC64
53 #include <asm/firmware.h>
54 #include <asm/processor.h>
55 #include <asm/tm.h>
56 #endif
57 #include <asm/kexec.h>
58 #include <asm/ppc-opcode.h>
59 #include <asm/rio.h>
60 #include <asm/fadump.h>
61 #include <asm/switch_to.h>
62 #include <asm/tm.h>
63 #include <asm/debug.h>
64 #include <asm/asm-prototypes.h>
65 #include <asm/hmi.h>
66 #include <sysdev/fsl_pci.h>
67 
68 #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC)
69 int (*__debugger)(struct pt_regs *regs) __read_mostly;
70 int (*__debugger_ipi)(struct pt_regs *regs) __read_mostly;
71 int (*__debugger_bpt)(struct pt_regs *regs) __read_mostly;
72 int (*__debugger_sstep)(struct pt_regs *regs) __read_mostly;
73 int (*__debugger_iabr_match)(struct pt_regs *regs) __read_mostly;
74 int (*__debugger_break_match)(struct pt_regs *regs) __read_mostly;
75 int (*__debugger_fault_handler)(struct pt_regs *regs) __read_mostly;
76 
77 EXPORT_SYMBOL(__debugger);
78 EXPORT_SYMBOL(__debugger_ipi);
79 EXPORT_SYMBOL(__debugger_bpt);
80 EXPORT_SYMBOL(__debugger_sstep);
81 EXPORT_SYMBOL(__debugger_iabr_match);
82 EXPORT_SYMBOL(__debugger_break_match);
83 EXPORT_SYMBOL(__debugger_fault_handler);
84 #endif
85 
86 /* Transactional Memory trap debug */
87 #ifdef TM_DEBUG_SW
88 #define TM_DEBUG(x...) printk(KERN_INFO x)
89 #else
90 #define TM_DEBUG(x...) do { } while(0)
91 #endif
92 
93 /*
94  * Trap & Exception support
95  */
96 
97 #ifdef CONFIG_PMAC_BACKLIGHT
98 static void pmac_backlight_unblank(void)
99 {
100 	mutex_lock(&pmac_backlight_mutex);
101 	if (pmac_backlight) {
102 		struct backlight_properties *props;
103 
104 		props = &pmac_backlight->props;
105 		props->brightness = props->max_brightness;
106 		props->power = FB_BLANK_UNBLANK;
107 		backlight_update_status(pmac_backlight);
108 	}
109 	mutex_unlock(&pmac_backlight_mutex);
110 }
111 #else
112 static inline void pmac_backlight_unblank(void) { }
113 #endif
114 
115 static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
116 static int die_owner = -1;
117 static unsigned int die_nest_count;
118 static int die_counter;
119 
120 static unsigned long oops_begin(struct pt_regs *regs)
121 {
122 	int cpu;
123 	unsigned long flags;
124 
125 	if (debugger(regs))
126 		return 1;
127 
128 	oops_enter();
129 
130 	/* racy, but better than risking deadlock. */
131 	raw_local_irq_save(flags);
132 	cpu = smp_processor_id();
133 	if (!arch_spin_trylock(&die_lock)) {
134 		if (cpu == die_owner)
135 			/* nested oops. should stop eventually */;
136 		else
137 			arch_spin_lock(&die_lock);
138 	}
139 	die_nest_count++;
140 	die_owner = cpu;
141 	console_verbose();
142 	bust_spinlocks(1);
143 	if (machine_is(powermac))
144 		pmac_backlight_unblank();
145 	return flags;
146 }
147 NOKPROBE_SYMBOL(oops_begin);
148 
149 static void oops_end(unsigned long flags, struct pt_regs *regs,
150 			       int signr)
151 {
152 	bust_spinlocks(0);
153 	die_owner = -1;
154 	add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
155 	die_nest_count--;
156 	oops_exit();
157 	printk("\n");
158 	if (!die_nest_count)
159 		/* Nest count reaches zero, release the lock. */
160 		arch_spin_unlock(&die_lock);
161 	raw_local_irq_restore(flags);
162 
163 	crash_fadump(regs, "die oops");
164 
165 	/*
166 	 * A system reset (0x100) is a request to dump, so we always send
167 	 * it through the crashdump code.
168 	 */
169 	if (kexec_should_crash(current) || (TRAP(regs) == 0x100)) {
170 		crash_kexec(regs);
171 
172 		/*
173 		 * We aren't the primary crash CPU. We need to send it
174 		 * to a holding pattern to avoid it ending up in the panic
175 		 * code.
176 		 */
177 		crash_kexec_secondary(regs);
178 	}
179 
180 	if (!signr)
181 		return;
182 
183 	/*
184 	 * While our oops output is serialised by a spinlock, output
185 	 * from panic() called below can race and corrupt it. If we
186 	 * know we are going to panic, delay for 1 second so we have a
187 	 * chance to get clean backtraces from all CPUs that are oopsing.
188 	 */
189 	if (in_interrupt() || panic_on_oops || !current->pid ||
190 	    is_global_init(current)) {
191 		mdelay(MSEC_PER_SEC);
192 	}
193 
194 	if (in_interrupt())
195 		panic("Fatal exception in interrupt");
196 	if (panic_on_oops)
197 		panic("Fatal exception");
198 	do_exit(signr);
199 }
200 NOKPROBE_SYMBOL(oops_end);
201 
202 static int __die(const char *str, struct pt_regs *regs, long err)
203 {
204 	printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter);
205 #ifdef CONFIG_PREEMPT
206 	printk("PREEMPT ");
207 #endif
208 #ifdef CONFIG_SMP
209 	printk("SMP NR_CPUS=%d ", NR_CPUS);
210 #endif
211 	if (debug_pagealloc_enabled())
212 		printk("DEBUG_PAGEALLOC ");
213 #ifdef CONFIG_NUMA
214 	printk("NUMA ");
215 #endif
216 	printk("%s\n", ppc_md.name ? ppc_md.name : "");
217 
218 	if (notify_die(DIE_OOPS, str, regs, err, 255, SIGSEGV) == NOTIFY_STOP)
219 		return 1;
220 
221 	print_modules();
222 	show_regs(regs);
223 
224 	return 0;
225 }
226 NOKPROBE_SYMBOL(__die);
227 
228 void die(const char *str, struct pt_regs *regs, long err)
229 {
230 	unsigned long flags = oops_begin(regs);
231 
232 	if (__die(str, regs, err))
233 		err = 0;
234 	oops_end(flags, regs, err);
235 }
236 
237 void user_single_step_siginfo(struct task_struct *tsk,
238 				struct pt_regs *regs, siginfo_t *info)
239 {
240 	memset(info, 0, sizeof(*info));
241 	info->si_signo = SIGTRAP;
242 	info->si_code = TRAP_TRACE;
243 	info->si_addr = (void __user *)regs->nip;
244 }
245 
246 void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
247 {
248 	siginfo_t info;
249 	const char fmt32[] = KERN_INFO "%s[%d]: unhandled signal %d " \
250 			"at %08lx nip %08lx lr %08lx code %x\n";
251 	const char fmt64[] = KERN_INFO "%s[%d]: unhandled signal %d " \
252 			"at %016lx nip %016lx lr %016lx code %x\n";
253 
254 	if (!user_mode(regs)) {
255 		die("Exception in kernel mode", regs, signr);
256 		return;
257 	}
258 
259 	if (show_unhandled_signals && unhandled_signal(current, signr)) {
260 		printk_ratelimited(regs->msr & MSR_64BIT ? fmt64 : fmt32,
261 				   current->comm, current->pid, signr,
262 				   addr, regs->nip, regs->link, code);
263 	}
264 
265 	if (arch_irqs_disabled() && !arch_irq_disabled_regs(regs))
266 		local_irq_enable();
267 
268 	current->thread.trap_nr = code;
269 	memset(&info, 0, sizeof(info));
270 	info.si_signo = signr;
271 	info.si_code = code;
272 	info.si_addr = (void __user *) addr;
273 	force_sig_info(signr, &info, current);
274 }
275 
276 void system_reset_exception(struct pt_regs *regs)
277 {
278 	/* See if any machine dependent calls */
279 	if (ppc_md.system_reset_exception) {
280 		if (ppc_md.system_reset_exception(regs))
281 			return;
282 	}
283 
284 	die("System Reset", regs, SIGABRT);
285 
286 	/* Must die if the interrupt is not recoverable */
287 	if (!(regs->msr & MSR_RI))
288 		panic("Unrecoverable System Reset");
289 
290 	/* What should we do here? We could issue a shutdown or hard reset. */
291 }
292 
293 #ifdef CONFIG_PPC64
294 /*
295  * This function is called in real mode. Strictly no printk's please.
296  *
297  * regs->nip and regs->msr contains srr0 and ssr1.
298  */
299 long machine_check_early(struct pt_regs *regs)
300 {
301 	long handled = 0;
302 
303 	__this_cpu_inc(irq_stat.mce_exceptions);
304 
305 	add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
306 
307 	if (cur_cpu_spec && cur_cpu_spec->machine_check_early)
308 		handled = cur_cpu_spec->machine_check_early(regs);
309 	return handled;
310 }
311 
312 long hmi_exception_realmode(struct pt_regs *regs)
313 {
314 	__this_cpu_inc(irq_stat.hmi_exceptions);
315 
316 	wait_for_subcore_guest_exit();
317 
318 	if (ppc_md.hmi_exception_early)
319 		ppc_md.hmi_exception_early(regs);
320 
321 	wait_for_tb_resync();
322 
323 	return 0;
324 }
325 
326 #endif
327 
328 /*
329  * I/O accesses can cause machine checks on powermacs.
330  * Check if the NIP corresponds to the address of a sync
331  * instruction for which there is an entry in the exception
332  * table.
333  * Note that the 601 only takes a machine check on TEA
334  * (transfer error ack) signal assertion, and does not
335  * set any of the top 16 bits of SRR1.
336  *  -- paulus.
337  */
338 static inline int check_io_access(struct pt_regs *regs)
339 {
340 #ifdef CONFIG_PPC32
341 	unsigned long msr = regs->msr;
342 	const struct exception_table_entry *entry;
343 	unsigned int *nip = (unsigned int *)regs->nip;
344 
345 	if (((msr & 0xffff0000) == 0 || (msr & (0x80000 | 0x40000)))
346 	    && (entry = search_exception_tables(regs->nip)) != NULL) {
347 		/*
348 		 * Check that it's a sync instruction, or somewhere
349 		 * in the twi; isync; nop sequence that inb/inw/inl uses.
350 		 * As the address is in the exception table
351 		 * we should be able to read the instr there.
352 		 * For the debug message, we look at the preceding
353 		 * load or store.
354 		 */
355 		if (*nip == PPC_INST_NOP)
356 			nip -= 2;
357 		else if (*nip == PPC_INST_ISYNC)
358 			--nip;
359 		if (*nip == PPC_INST_SYNC || (*nip >> 26) == OP_TRAP) {
360 			unsigned int rb;
361 
362 			--nip;
363 			rb = (*nip >> 11) & 0x1f;
364 			printk(KERN_DEBUG "%s bad port %lx at %p\n",
365 			       (*nip & 0x100)? "OUT to": "IN from",
366 			       regs->gpr[rb] - _IO_BASE, nip);
367 			regs->msr |= MSR_RI;
368 			regs->nip = entry->fixup;
369 			return 1;
370 		}
371 	}
372 #endif /* CONFIG_PPC32 */
373 	return 0;
374 }
375 
376 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
377 /* On 4xx, the reason for the machine check or program exception
378    is in the ESR. */
379 #define get_reason(regs)	((regs)->dsisr)
380 #ifndef CONFIG_FSL_BOOKE
381 #define get_mc_reason(regs)	((regs)->dsisr)
382 #else
383 #define get_mc_reason(regs)	(mfspr(SPRN_MCSR))
384 #endif
385 #define REASON_FP		ESR_FP
386 #define REASON_ILLEGAL		(ESR_PIL | ESR_PUO)
387 #define REASON_PRIVILEGED	ESR_PPR
388 #define REASON_TRAP		ESR_PTR
389 
390 /* single-step stuff */
391 #define single_stepping(regs)	(current->thread.debug.dbcr0 & DBCR0_IC)
392 #define clear_single_step(regs)	(current->thread.debug.dbcr0 &= ~DBCR0_IC)
393 
394 #else
395 /* On non-4xx, the reason for the machine check or program
396    exception is in the MSR. */
397 #define get_reason(regs)	((regs)->msr)
398 #define get_mc_reason(regs)	((regs)->msr)
399 #define REASON_TM		0x200000
400 #define REASON_FP		0x100000
401 #define REASON_ILLEGAL		0x80000
402 #define REASON_PRIVILEGED	0x40000
403 #define REASON_TRAP		0x20000
404 
405 #define single_stepping(regs)	((regs)->msr & MSR_SE)
406 #define clear_single_step(regs)	((regs)->msr &= ~MSR_SE)
407 #endif
408 
409 #if defined(CONFIG_4xx)
410 int machine_check_4xx(struct pt_regs *regs)
411 {
412 	unsigned long reason = get_mc_reason(regs);
413 
414 	if (reason & ESR_IMCP) {
415 		printk("Instruction");
416 		mtspr(SPRN_ESR, reason & ~ESR_IMCP);
417 	} else
418 		printk("Data");
419 	printk(" machine check in kernel mode.\n");
420 
421 	return 0;
422 }
423 
424 int machine_check_440A(struct pt_regs *regs)
425 {
426 	unsigned long reason = get_mc_reason(regs);
427 
428 	printk("Machine check in kernel mode.\n");
429 	if (reason & ESR_IMCP){
430 		printk("Instruction Synchronous Machine Check exception\n");
431 		mtspr(SPRN_ESR, reason & ~ESR_IMCP);
432 	}
433 	else {
434 		u32 mcsr = mfspr(SPRN_MCSR);
435 		if (mcsr & MCSR_IB)
436 			printk("Instruction Read PLB Error\n");
437 		if (mcsr & MCSR_DRB)
438 			printk("Data Read PLB Error\n");
439 		if (mcsr & MCSR_DWB)
440 			printk("Data Write PLB Error\n");
441 		if (mcsr & MCSR_TLBP)
442 			printk("TLB Parity Error\n");
443 		if (mcsr & MCSR_ICP){
444 			flush_instruction_cache();
445 			printk("I-Cache Parity Error\n");
446 		}
447 		if (mcsr & MCSR_DCSP)
448 			printk("D-Cache Search Parity Error\n");
449 		if (mcsr & MCSR_DCFP)
450 			printk("D-Cache Flush Parity Error\n");
451 		if (mcsr & MCSR_IMPE)
452 			printk("Machine Check exception is imprecise\n");
453 
454 		/* Clear MCSR */
455 		mtspr(SPRN_MCSR, mcsr);
456 	}
457 	return 0;
458 }
459 
460 int machine_check_47x(struct pt_regs *regs)
461 {
462 	unsigned long reason = get_mc_reason(regs);
463 	u32 mcsr;
464 
465 	printk(KERN_ERR "Machine check in kernel mode.\n");
466 	if (reason & ESR_IMCP) {
467 		printk(KERN_ERR
468 		       "Instruction Synchronous Machine Check exception\n");
469 		mtspr(SPRN_ESR, reason & ~ESR_IMCP);
470 		return 0;
471 	}
472 	mcsr = mfspr(SPRN_MCSR);
473 	if (mcsr & MCSR_IB)
474 		printk(KERN_ERR "Instruction Read PLB Error\n");
475 	if (mcsr & MCSR_DRB)
476 		printk(KERN_ERR "Data Read PLB Error\n");
477 	if (mcsr & MCSR_DWB)
478 		printk(KERN_ERR "Data Write PLB Error\n");
479 	if (mcsr & MCSR_TLBP)
480 		printk(KERN_ERR "TLB Parity Error\n");
481 	if (mcsr & MCSR_ICP) {
482 		flush_instruction_cache();
483 		printk(KERN_ERR "I-Cache Parity Error\n");
484 	}
485 	if (mcsr & MCSR_DCSP)
486 		printk(KERN_ERR "D-Cache Search Parity Error\n");
487 	if (mcsr & PPC47x_MCSR_GPR)
488 		printk(KERN_ERR "GPR Parity Error\n");
489 	if (mcsr & PPC47x_MCSR_FPR)
490 		printk(KERN_ERR "FPR Parity Error\n");
491 	if (mcsr & PPC47x_MCSR_IPR)
492 		printk(KERN_ERR "Machine Check exception is imprecise\n");
493 
494 	/* Clear MCSR */
495 	mtspr(SPRN_MCSR, mcsr);
496 
497 	return 0;
498 }
499 #elif defined(CONFIG_E500)
500 int machine_check_e500mc(struct pt_regs *regs)
501 {
502 	unsigned long mcsr = mfspr(SPRN_MCSR);
503 	unsigned long reason = mcsr;
504 	int recoverable = 1;
505 
506 	if (reason & MCSR_LD) {
507 		recoverable = fsl_rio_mcheck_exception(regs);
508 		if (recoverable == 1)
509 			goto silent_out;
510 	}
511 
512 	printk("Machine check in kernel mode.\n");
513 	printk("Caused by (from MCSR=%lx): ", reason);
514 
515 	if (reason & MCSR_MCP)
516 		printk("Machine Check Signal\n");
517 
518 	if (reason & MCSR_ICPERR) {
519 		printk("Instruction Cache Parity Error\n");
520 
521 		/*
522 		 * This is recoverable by invalidating the i-cache.
523 		 */
524 		mtspr(SPRN_L1CSR1, mfspr(SPRN_L1CSR1) | L1CSR1_ICFI);
525 		while (mfspr(SPRN_L1CSR1) & L1CSR1_ICFI)
526 			;
527 
528 		/*
529 		 * This will generally be accompanied by an instruction
530 		 * fetch error report -- only treat MCSR_IF as fatal
531 		 * if it wasn't due to an L1 parity error.
532 		 */
533 		reason &= ~MCSR_IF;
534 	}
535 
536 	if (reason & MCSR_DCPERR_MC) {
537 		printk("Data Cache Parity Error\n");
538 
539 		/*
540 		 * In write shadow mode we auto-recover from the error, but it
541 		 * may still get logged and cause a machine check.  We should
542 		 * only treat the non-write shadow case as non-recoverable.
543 		 */
544 		if (!(mfspr(SPRN_L1CSR2) & L1CSR2_DCWS))
545 			recoverable = 0;
546 	}
547 
548 	if (reason & MCSR_L2MMU_MHIT) {
549 		printk("Hit on multiple TLB entries\n");
550 		recoverable = 0;
551 	}
552 
553 	if (reason & MCSR_NMI)
554 		printk("Non-maskable interrupt\n");
555 
556 	if (reason & MCSR_IF) {
557 		printk("Instruction Fetch Error Report\n");
558 		recoverable = 0;
559 	}
560 
561 	if (reason & MCSR_LD) {
562 		printk("Load Error Report\n");
563 		recoverable = 0;
564 	}
565 
566 	if (reason & MCSR_ST) {
567 		printk("Store Error Report\n");
568 		recoverable = 0;
569 	}
570 
571 	if (reason & MCSR_LDG) {
572 		printk("Guarded Load Error Report\n");
573 		recoverable = 0;
574 	}
575 
576 	if (reason & MCSR_TLBSYNC)
577 		printk("Simultaneous tlbsync operations\n");
578 
579 	if (reason & MCSR_BSL2_ERR) {
580 		printk("Level 2 Cache Error\n");
581 		recoverable = 0;
582 	}
583 
584 	if (reason & MCSR_MAV) {
585 		u64 addr;
586 
587 		addr = mfspr(SPRN_MCAR);
588 		addr |= (u64)mfspr(SPRN_MCARU) << 32;
589 
590 		printk("Machine Check %s Address: %#llx\n",
591 		       reason & MCSR_MEA ? "Effective" : "Physical", addr);
592 	}
593 
594 silent_out:
595 	mtspr(SPRN_MCSR, mcsr);
596 	return mfspr(SPRN_MCSR) == 0 && recoverable;
597 }
598 
599 int machine_check_e500(struct pt_regs *regs)
600 {
601 	unsigned long reason = get_mc_reason(regs);
602 
603 	if (reason & MCSR_BUS_RBERR) {
604 		if (fsl_rio_mcheck_exception(regs))
605 			return 1;
606 		if (fsl_pci_mcheck_exception(regs))
607 			return 1;
608 	}
609 
610 	printk("Machine check in kernel mode.\n");
611 	printk("Caused by (from MCSR=%lx): ", reason);
612 
613 	if (reason & MCSR_MCP)
614 		printk("Machine Check Signal\n");
615 	if (reason & MCSR_ICPERR)
616 		printk("Instruction Cache Parity Error\n");
617 	if (reason & MCSR_DCP_PERR)
618 		printk("Data Cache Push Parity Error\n");
619 	if (reason & MCSR_DCPERR)
620 		printk("Data Cache Parity Error\n");
621 	if (reason & MCSR_BUS_IAERR)
622 		printk("Bus - Instruction Address Error\n");
623 	if (reason & MCSR_BUS_RAERR)
624 		printk("Bus - Read Address Error\n");
625 	if (reason & MCSR_BUS_WAERR)
626 		printk("Bus - Write Address Error\n");
627 	if (reason & MCSR_BUS_IBERR)
628 		printk("Bus - Instruction Data Error\n");
629 	if (reason & MCSR_BUS_RBERR)
630 		printk("Bus - Read Data Bus Error\n");
631 	if (reason & MCSR_BUS_WBERR)
632 		printk("Bus - Write Data Bus Error\n");
633 	if (reason & MCSR_BUS_IPERR)
634 		printk("Bus - Instruction Parity Error\n");
635 	if (reason & MCSR_BUS_RPERR)
636 		printk("Bus - Read Parity Error\n");
637 
638 	return 0;
639 }
640 
641 int machine_check_generic(struct pt_regs *regs)
642 {
643 	return 0;
644 }
645 #elif defined(CONFIG_E200)
646 int machine_check_e200(struct pt_regs *regs)
647 {
648 	unsigned long reason = get_mc_reason(regs);
649 
650 	printk("Machine check in kernel mode.\n");
651 	printk("Caused by (from MCSR=%lx): ", reason);
652 
653 	if (reason & MCSR_MCP)
654 		printk("Machine Check Signal\n");
655 	if (reason & MCSR_CP_PERR)
656 		printk("Cache Push Parity Error\n");
657 	if (reason & MCSR_CPERR)
658 		printk("Cache Parity Error\n");
659 	if (reason & MCSR_EXCP_ERR)
660 		printk("ISI, ITLB, or Bus Error on first instruction fetch for an exception handler\n");
661 	if (reason & MCSR_BUS_IRERR)
662 		printk("Bus - Read Bus Error on instruction fetch\n");
663 	if (reason & MCSR_BUS_DRERR)
664 		printk("Bus - Read Bus Error on data load\n");
665 	if (reason & MCSR_BUS_WRERR)
666 		printk("Bus - Write Bus Error on buffered store or cache line push\n");
667 
668 	return 0;
669 }
670 #elif defined(CONFIG_PPC_8xx)
671 int machine_check_8xx(struct pt_regs *regs)
672 {
673 	unsigned long reason = get_mc_reason(regs);
674 
675 	pr_err("Machine check in kernel mode.\n");
676 	pr_err("Caused by (from SRR1=%lx): ", reason);
677 	if (reason & 0x40000000)
678 		pr_err("Fetch error at address %lx\n", regs->nip);
679 	else
680 		pr_err("Data access error at address %lx\n", regs->dar);
681 
682 #ifdef CONFIG_PCI
683 	/* the qspan pci read routines can cause machine checks -- Cort
684 	 *
685 	 * yuck !!! that totally needs to go away ! There are better ways
686 	 * to deal with that than having a wart in the mcheck handler.
687 	 * -- BenH
688 	 */
689 	bad_page_fault(regs, regs->dar, SIGBUS);
690 	return 1;
691 #else
692 	return 0;
693 #endif
694 }
695 #else
696 int machine_check_generic(struct pt_regs *regs)
697 {
698 	unsigned long reason = get_mc_reason(regs);
699 
700 	printk("Machine check in kernel mode.\n");
701 	printk("Caused by (from SRR1=%lx): ", reason);
702 	switch (reason & 0x601F0000) {
703 	case 0x80000:
704 		printk("Machine check signal\n");
705 		break;
706 	case 0:		/* for 601 */
707 	case 0x40000:
708 	case 0x140000:	/* 7450 MSS error and TEA */
709 		printk("Transfer error ack signal\n");
710 		break;
711 	case 0x20000:
712 		printk("Data parity error signal\n");
713 		break;
714 	case 0x10000:
715 		printk("Address parity error signal\n");
716 		break;
717 	case 0x20000000:
718 		printk("L1 Data Cache error\n");
719 		break;
720 	case 0x40000000:
721 		printk("L1 Instruction Cache error\n");
722 		break;
723 	case 0x00100000:
724 		printk("L2 data cache parity error\n");
725 		break;
726 	default:
727 		printk("Unknown values in msr\n");
728 	}
729 	return 0;
730 }
731 #endif /* everything else */
732 
733 void machine_check_exception(struct pt_regs *regs)
734 {
735 	enum ctx_state prev_state = exception_enter();
736 	int recover = 0;
737 
738 	__this_cpu_inc(irq_stat.mce_exceptions);
739 
740 	/* See if any machine dependent calls. In theory, we would want
741 	 * to call the CPU first, and call the ppc_md. one if the CPU
742 	 * one returns a positive number. However there is existing code
743 	 * that assumes the board gets a first chance, so let's keep it
744 	 * that way for now and fix things later. --BenH.
745 	 */
746 	if (ppc_md.machine_check_exception)
747 		recover = ppc_md.machine_check_exception(regs);
748 	else if (cur_cpu_spec->machine_check)
749 		recover = cur_cpu_spec->machine_check(regs);
750 
751 	if (recover > 0)
752 		goto bail;
753 
754 	if (debugger_fault_handler(regs))
755 		goto bail;
756 
757 	if (check_io_access(regs))
758 		goto bail;
759 
760 	die("Machine check", regs, SIGBUS);
761 
762 	/* Must die if the interrupt is not recoverable */
763 	if (!(regs->msr & MSR_RI))
764 		panic("Unrecoverable Machine check");
765 
766 bail:
767 	exception_exit(prev_state);
768 }
769 
770 void SMIException(struct pt_regs *regs)
771 {
772 	die("System Management Interrupt", regs, SIGABRT);
773 }
774 
775 void handle_hmi_exception(struct pt_regs *regs)
776 {
777 	struct pt_regs *old_regs;
778 
779 	old_regs = set_irq_regs(regs);
780 	irq_enter();
781 
782 	if (ppc_md.handle_hmi_exception)
783 		ppc_md.handle_hmi_exception(regs);
784 
785 	irq_exit();
786 	set_irq_regs(old_regs);
787 }
788 
789 void unknown_exception(struct pt_regs *regs)
790 {
791 	enum ctx_state prev_state = exception_enter();
792 
793 	printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n",
794 	       regs->nip, regs->msr, regs->trap);
795 
796 	_exception(SIGTRAP, regs, 0, 0);
797 
798 	exception_exit(prev_state);
799 }
800 
801 void instruction_breakpoint_exception(struct pt_regs *regs)
802 {
803 	enum ctx_state prev_state = exception_enter();
804 
805 	if (notify_die(DIE_IABR_MATCH, "iabr_match", regs, 5,
806 					5, SIGTRAP) == NOTIFY_STOP)
807 		goto bail;
808 	if (debugger_iabr_match(regs))
809 		goto bail;
810 	_exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
811 
812 bail:
813 	exception_exit(prev_state);
814 }
815 
816 void RunModeException(struct pt_regs *regs)
817 {
818 	_exception(SIGTRAP, regs, 0, 0);
819 }
820 
821 void single_step_exception(struct pt_regs *regs)
822 {
823 	enum ctx_state prev_state = exception_enter();
824 
825 	clear_single_step(regs);
826 
827 	if (notify_die(DIE_SSTEP, "single_step", regs, 5,
828 					5, SIGTRAP) == NOTIFY_STOP)
829 		goto bail;
830 	if (debugger_sstep(regs))
831 		goto bail;
832 
833 	_exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
834 
835 bail:
836 	exception_exit(prev_state);
837 }
838 NOKPROBE_SYMBOL(single_step_exception);
839 
840 /*
841  * After we have successfully emulated an instruction, we have to
842  * check if the instruction was being single-stepped, and if so,
843  * pretend we got a single-step exception.  This was pointed out
844  * by Kumar Gala.  -- paulus
845  */
846 static void emulate_single_step(struct pt_regs *regs)
847 {
848 	if (single_stepping(regs))
849 		single_step_exception(regs);
850 }
851 
852 static inline int __parse_fpscr(unsigned long fpscr)
853 {
854 	int ret = 0;
855 
856 	/* Invalid operation */
857 	if ((fpscr & FPSCR_VE) && (fpscr & FPSCR_VX))
858 		ret = FPE_FLTINV;
859 
860 	/* Overflow */
861 	else if ((fpscr & FPSCR_OE) && (fpscr & FPSCR_OX))
862 		ret = FPE_FLTOVF;
863 
864 	/* Underflow */
865 	else if ((fpscr & FPSCR_UE) && (fpscr & FPSCR_UX))
866 		ret = FPE_FLTUND;
867 
868 	/* Divide by zero */
869 	else if ((fpscr & FPSCR_ZE) && (fpscr & FPSCR_ZX))
870 		ret = FPE_FLTDIV;
871 
872 	/* Inexact result */
873 	else if ((fpscr & FPSCR_XE) && (fpscr & FPSCR_XX))
874 		ret = FPE_FLTRES;
875 
876 	return ret;
877 }
878 
879 static void parse_fpe(struct pt_regs *regs)
880 {
881 	int code = 0;
882 
883 	flush_fp_to_thread(current);
884 
885 	code = __parse_fpscr(current->thread.fp_state.fpscr);
886 
887 	_exception(SIGFPE, regs, code, regs->nip);
888 }
889 
890 /*
891  * Illegal instruction emulation support.  Originally written to
892  * provide the PVR to user applications using the mfspr rd, PVR.
893  * Return non-zero if we can't emulate, or -EFAULT if the associated
894  * memory access caused an access fault.  Return zero on success.
895  *
896  * There are a couple of ways to do this, either "decode" the instruction
897  * or directly match lots of bits.  In this case, matching lots of
898  * bits is faster and easier.
899  *
900  */
901 static int emulate_string_inst(struct pt_regs *regs, u32 instword)
902 {
903 	u8 rT = (instword >> 21) & 0x1f;
904 	u8 rA = (instword >> 16) & 0x1f;
905 	u8 NB_RB = (instword >> 11) & 0x1f;
906 	u32 num_bytes;
907 	unsigned long EA;
908 	int pos = 0;
909 
910 	/* Early out if we are an invalid form of lswx */
911 	if ((instword & PPC_INST_STRING_MASK) == PPC_INST_LSWX)
912 		if ((rT == rA) || (rT == NB_RB))
913 			return -EINVAL;
914 
915 	EA = (rA == 0) ? 0 : regs->gpr[rA];
916 
917 	switch (instword & PPC_INST_STRING_MASK) {
918 		case PPC_INST_LSWX:
919 		case PPC_INST_STSWX:
920 			EA += NB_RB;
921 			num_bytes = regs->xer & 0x7f;
922 			break;
923 		case PPC_INST_LSWI:
924 		case PPC_INST_STSWI:
925 			num_bytes = (NB_RB == 0) ? 32 : NB_RB;
926 			break;
927 		default:
928 			return -EINVAL;
929 	}
930 
931 	while (num_bytes != 0)
932 	{
933 		u8 val;
934 		u32 shift = 8 * (3 - (pos & 0x3));
935 
936 		/* if process is 32-bit, clear upper 32 bits of EA */
937 		if ((regs->msr & MSR_64BIT) == 0)
938 			EA &= 0xFFFFFFFF;
939 
940 		switch ((instword & PPC_INST_STRING_MASK)) {
941 			case PPC_INST_LSWX:
942 			case PPC_INST_LSWI:
943 				if (get_user(val, (u8 __user *)EA))
944 					return -EFAULT;
945 				/* first time updating this reg,
946 				 * zero it out */
947 				if (pos == 0)
948 					regs->gpr[rT] = 0;
949 				regs->gpr[rT] |= val << shift;
950 				break;
951 			case PPC_INST_STSWI:
952 			case PPC_INST_STSWX:
953 				val = regs->gpr[rT] >> shift;
954 				if (put_user(val, (u8 __user *)EA))
955 					return -EFAULT;
956 				break;
957 		}
958 		/* move EA to next address */
959 		EA += 1;
960 		num_bytes--;
961 
962 		/* manage our position within the register */
963 		if (++pos == 4) {
964 			pos = 0;
965 			if (++rT == 32)
966 				rT = 0;
967 		}
968 	}
969 
970 	return 0;
971 }
972 
973 static int emulate_popcntb_inst(struct pt_regs *regs, u32 instword)
974 {
975 	u32 ra,rs;
976 	unsigned long tmp;
977 
978 	ra = (instword >> 16) & 0x1f;
979 	rs = (instword >> 21) & 0x1f;
980 
981 	tmp = regs->gpr[rs];
982 	tmp = tmp - ((tmp >> 1) & 0x5555555555555555ULL);
983 	tmp = (tmp & 0x3333333333333333ULL) + ((tmp >> 2) & 0x3333333333333333ULL);
984 	tmp = (tmp + (tmp >> 4)) & 0x0f0f0f0f0f0f0f0fULL;
985 	regs->gpr[ra] = tmp;
986 
987 	return 0;
988 }
989 
990 static int emulate_isel(struct pt_regs *regs, u32 instword)
991 {
992 	u8 rT = (instword >> 21) & 0x1f;
993 	u8 rA = (instword >> 16) & 0x1f;
994 	u8 rB = (instword >> 11) & 0x1f;
995 	u8 BC = (instword >> 6) & 0x1f;
996 	u8 bit;
997 	unsigned long tmp;
998 
999 	tmp = (rA == 0) ? 0 : regs->gpr[rA];
1000 	bit = (regs->ccr >> (31 - BC)) & 0x1;
1001 
1002 	regs->gpr[rT] = bit ? tmp : regs->gpr[rB];
1003 
1004 	return 0;
1005 }
1006 
1007 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1008 static inline bool tm_abort_check(struct pt_regs *regs, int cause)
1009 {
1010         /* If we're emulating a load/store in an active transaction, we cannot
1011          * emulate it as the kernel operates in transaction suspended context.
1012          * We need to abort the transaction.  This creates a persistent TM
1013          * abort so tell the user what caused it with a new code.
1014 	 */
1015 	if (MSR_TM_TRANSACTIONAL(regs->msr)) {
1016 		tm_enable();
1017 		tm_abort(cause);
1018 		return true;
1019 	}
1020 	return false;
1021 }
1022 #else
1023 static inline bool tm_abort_check(struct pt_regs *regs, int reason)
1024 {
1025 	return false;
1026 }
1027 #endif
1028 
1029 static int emulate_instruction(struct pt_regs *regs)
1030 {
1031 	u32 instword;
1032 	u32 rd;
1033 
1034 	if (!user_mode(regs))
1035 		return -EINVAL;
1036 	CHECK_FULL_REGS(regs);
1037 
1038 	if (get_user(instword, (u32 __user *)(regs->nip)))
1039 		return -EFAULT;
1040 
1041 	/* Emulate the mfspr rD, PVR. */
1042 	if ((instword & PPC_INST_MFSPR_PVR_MASK) == PPC_INST_MFSPR_PVR) {
1043 		PPC_WARN_EMULATED(mfpvr, regs);
1044 		rd = (instword >> 21) & 0x1f;
1045 		regs->gpr[rd] = mfspr(SPRN_PVR);
1046 		return 0;
1047 	}
1048 
1049 	/* Emulating the dcba insn is just a no-op.  */
1050 	if ((instword & PPC_INST_DCBA_MASK) == PPC_INST_DCBA) {
1051 		PPC_WARN_EMULATED(dcba, regs);
1052 		return 0;
1053 	}
1054 
1055 	/* Emulate the mcrxr insn.  */
1056 	if ((instword & PPC_INST_MCRXR_MASK) == PPC_INST_MCRXR) {
1057 		int shift = (instword >> 21) & 0x1c;
1058 		unsigned long msk = 0xf0000000UL >> shift;
1059 
1060 		PPC_WARN_EMULATED(mcrxr, regs);
1061 		regs->ccr = (regs->ccr & ~msk) | ((regs->xer >> shift) & msk);
1062 		regs->xer &= ~0xf0000000UL;
1063 		return 0;
1064 	}
1065 
1066 	/* Emulate load/store string insn. */
1067 	if ((instword & PPC_INST_STRING_GEN_MASK) == PPC_INST_STRING) {
1068 		if (tm_abort_check(regs,
1069 				   TM_CAUSE_EMULATE | TM_CAUSE_PERSISTENT))
1070 			return -EINVAL;
1071 		PPC_WARN_EMULATED(string, regs);
1072 		return emulate_string_inst(regs, instword);
1073 	}
1074 
1075 	/* Emulate the popcntb (Population Count Bytes) instruction. */
1076 	if ((instword & PPC_INST_POPCNTB_MASK) == PPC_INST_POPCNTB) {
1077 		PPC_WARN_EMULATED(popcntb, regs);
1078 		return emulate_popcntb_inst(regs, instword);
1079 	}
1080 
1081 	/* Emulate isel (Integer Select) instruction */
1082 	if ((instword & PPC_INST_ISEL_MASK) == PPC_INST_ISEL) {
1083 		PPC_WARN_EMULATED(isel, regs);
1084 		return emulate_isel(regs, instword);
1085 	}
1086 
1087 	/* Emulate sync instruction variants */
1088 	if ((instword & PPC_INST_SYNC_MASK) == PPC_INST_SYNC) {
1089 		PPC_WARN_EMULATED(sync, regs);
1090 		asm volatile("sync");
1091 		return 0;
1092 	}
1093 
1094 #ifdef CONFIG_PPC64
1095 	/* Emulate the mfspr rD, DSCR. */
1096 	if ((((instword & PPC_INST_MFSPR_DSCR_USER_MASK) ==
1097 		PPC_INST_MFSPR_DSCR_USER) ||
1098 	     ((instword & PPC_INST_MFSPR_DSCR_MASK) ==
1099 		PPC_INST_MFSPR_DSCR)) &&
1100 			cpu_has_feature(CPU_FTR_DSCR)) {
1101 		PPC_WARN_EMULATED(mfdscr, regs);
1102 		rd = (instword >> 21) & 0x1f;
1103 		regs->gpr[rd] = mfspr(SPRN_DSCR);
1104 		return 0;
1105 	}
1106 	/* Emulate the mtspr DSCR, rD. */
1107 	if ((((instword & PPC_INST_MTSPR_DSCR_USER_MASK) ==
1108 		PPC_INST_MTSPR_DSCR_USER) ||
1109 	     ((instword & PPC_INST_MTSPR_DSCR_MASK) ==
1110 		PPC_INST_MTSPR_DSCR)) &&
1111 			cpu_has_feature(CPU_FTR_DSCR)) {
1112 		PPC_WARN_EMULATED(mtdscr, regs);
1113 		rd = (instword >> 21) & 0x1f;
1114 		current->thread.dscr = regs->gpr[rd];
1115 		current->thread.dscr_inherit = 1;
1116 		mtspr(SPRN_DSCR, current->thread.dscr);
1117 		return 0;
1118 	}
1119 #endif
1120 
1121 	return -EINVAL;
1122 }
1123 
1124 int is_valid_bugaddr(unsigned long addr)
1125 {
1126 	return is_kernel_addr(addr);
1127 }
1128 
1129 #ifdef CONFIG_MATH_EMULATION
1130 static int emulate_math(struct pt_regs *regs)
1131 {
1132 	int ret;
1133 	extern int do_mathemu(struct pt_regs *regs);
1134 
1135 	ret = do_mathemu(regs);
1136 	if (ret >= 0)
1137 		PPC_WARN_EMULATED(math, regs);
1138 
1139 	switch (ret) {
1140 	case 0:
1141 		emulate_single_step(regs);
1142 		return 0;
1143 	case 1: {
1144 			int code = 0;
1145 			code = __parse_fpscr(current->thread.fp_state.fpscr);
1146 			_exception(SIGFPE, regs, code, regs->nip);
1147 			return 0;
1148 		}
1149 	case -EFAULT:
1150 		_exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
1151 		return 0;
1152 	}
1153 
1154 	return -1;
1155 }
1156 #else
1157 static inline int emulate_math(struct pt_regs *regs) { return -1; }
1158 #endif
1159 
1160 void program_check_exception(struct pt_regs *regs)
1161 {
1162 	enum ctx_state prev_state = exception_enter();
1163 	unsigned int reason = get_reason(regs);
1164 
1165 	/* We can now get here via a FP Unavailable exception if the core
1166 	 * has no FPU, in that case the reason flags will be 0 */
1167 
1168 	if (reason & REASON_FP) {
1169 		/* IEEE FP exception */
1170 		parse_fpe(regs);
1171 		goto bail;
1172 	}
1173 	if (reason & REASON_TRAP) {
1174 		unsigned long bugaddr;
1175 		/* Debugger is first in line to stop recursive faults in
1176 		 * rcu_lock, notify_die, or atomic_notifier_call_chain */
1177 		if (debugger_bpt(regs))
1178 			goto bail;
1179 
1180 		/* trap exception */
1181 		if (notify_die(DIE_BPT, "breakpoint", regs, 5, 5, SIGTRAP)
1182 				== NOTIFY_STOP)
1183 			goto bail;
1184 
1185 		bugaddr = regs->nip;
1186 		/*
1187 		 * Fixup bugaddr for BUG_ON() in real mode
1188 		 */
1189 		if (!is_kernel_addr(bugaddr) && !(regs->msr & MSR_IR))
1190 			bugaddr += PAGE_OFFSET;
1191 
1192 		if (!(regs->msr & MSR_PR) &&  /* not user-mode */
1193 		    report_bug(bugaddr, regs) == BUG_TRAP_TYPE_WARN) {
1194 			regs->nip += 4;
1195 			goto bail;
1196 		}
1197 		_exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
1198 		goto bail;
1199 	}
1200 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1201 	if (reason & REASON_TM) {
1202 		/* This is a TM "Bad Thing Exception" program check.
1203 		 * This occurs when:
1204 		 * -  An rfid/hrfid/mtmsrd attempts to cause an illegal
1205 		 *    transition in TM states.
1206 		 * -  A trechkpt is attempted when transactional.
1207 		 * -  A treclaim is attempted when non transactional.
1208 		 * -  A tend is illegally attempted.
1209 		 * -  writing a TM SPR when transactional.
1210 		 */
1211 		if (!user_mode(regs) &&
1212 		    report_bug(regs->nip, regs) == BUG_TRAP_TYPE_WARN) {
1213 			regs->nip += 4;
1214 			goto bail;
1215 		}
1216 		/* If usermode caused this, it's done something illegal and
1217 		 * gets a SIGILL slap on the wrist.  We call it an illegal
1218 		 * operand to distinguish from the instruction just being bad
1219 		 * (e.g. executing a 'tend' on a CPU without TM!); it's an
1220 		 * illegal /placement/ of a valid instruction.
1221 		 */
1222 		if (user_mode(regs)) {
1223 			_exception(SIGILL, regs, ILL_ILLOPN, regs->nip);
1224 			goto bail;
1225 		} else {
1226 			printk(KERN_EMERG "Unexpected TM Bad Thing exception "
1227 			       "at %lx (msr 0x%x)\n", regs->nip, reason);
1228 			die("Unrecoverable exception", regs, SIGABRT);
1229 		}
1230 	}
1231 #endif
1232 
1233 	/*
1234 	 * If we took the program check in the kernel skip down to sending a
1235 	 * SIGILL. The subsequent cases all relate to emulating instructions
1236 	 * which we should only do for userspace. We also do not want to enable
1237 	 * interrupts for kernel faults because that might lead to further
1238 	 * faults, and loose the context of the original exception.
1239 	 */
1240 	if (!user_mode(regs))
1241 		goto sigill;
1242 
1243 	/* We restore the interrupt state now */
1244 	if (!arch_irq_disabled_regs(regs))
1245 		local_irq_enable();
1246 
1247 	/* (reason & REASON_ILLEGAL) would be the obvious thing here,
1248 	 * but there seems to be a hardware bug on the 405GP (RevD)
1249 	 * that means ESR is sometimes set incorrectly - either to
1250 	 * ESR_DST (!?) or 0.  In the process of chasing this with the
1251 	 * hardware people - not sure if it can happen on any illegal
1252 	 * instruction or only on FP instructions, whether there is a
1253 	 * pattern to occurrences etc. -dgibson 31/Mar/2003
1254 	 */
1255 	if (!emulate_math(regs))
1256 		goto bail;
1257 
1258 	/* Try to emulate it if we should. */
1259 	if (reason & (REASON_ILLEGAL | REASON_PRIVILEGED)) {
1260 		switch (emulate_instruction(regs)) {
1261 		case 0:
1262 			regs->nip += 4;
1263 			emulate_single_step(regs);
1264 			goto bail;
1265 		case -EFAULT:
1266 			_exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
1267 			goto bail;
1268 		}
1269 	}
1270 
1271 sigill:
1272 	if (reason & REASON_PRIVILEGED)
1273 		_exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
1274 	else
1275 		_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1276 
1277 bail:
1278 	exception_exit(prev_state);
1279 }
1280 NOKPROBE_SYMBOL(program_check_exception);
1281 
1282 /*
1283  * This occurs when running in hypervisor mode on POWER6 or later
1284  * and an illegal instruction is encountered.
1285  */
1286 void emulation_assist_interrupt(struct pt_regs *regs)
1287 {
1288 	regs->msr |= REASON_ILLEGAL;
1289 	program_check_exception(regs);
1290 }
1291 NOKPROBE_SYMBOL(emulation_assist_interrupt);
1292 
1293 void alignment_exception(struct pt_regs *regs)
1294 {
1295 	enum ctx_state prev_state = exception_enter();
1296 	int sig, code, fixed = 0;
1297 
1298 	/* We restore the interrupt state now */
1299 	if (!arch_irq_disabled_regs(regs))
1300 		local_irq_enable();
1301 
1302 	if (tm_abort_check(regs, TM_CAUSE_ALIGNMENT | TM_CAUSE_PERSISTENT))
1303 		goto bail;
1304 
1305 	/* we don't implement logging of alignment exceptions */
1306 	if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS))
1307 		fixed = fix_alignment(regs);
1308 
1309 	if (fixed == 1) {
1310 		regs->nip += 4;	/* skip over emulated instruction */
1311 		emulate_single_step(regs);
1312 		goto bail;
1313 	}
1314 
1315 	/* Operand address was bad */
1316 	if (fixed == -EFAULT) {
1317 		sig = SIGSEGV;
1318 		code = SEGV_ACCERR;
1319 	} else {
1320 		sig = SIGBUS;
1321 		code = BUS_ADRALN;
1322 	}
1323 	if (user_mode(regs))
1324 		_exception(sig, regs, code, regs->dar);
1325 	else
1326 		bad_page_fault(regs, regs->dar, sig);
1327 
1328 bail:
1329 	exception_exit(prev_state);
1330 }
1331 
1332 void slb_miss_bad_addr(struct pt_regs *regs)
1333 {
1334 	enum ctx_state prev_state = exception_enter();
1335 
1336 	if (user_mode(regs))
1337 		_exception(SIGSEGV, regs, SEGV_BNDERR, regs->dar);
1338 	else
1339 		bad_page_fault(regs, regs->dar, SIGSEGV);
1340 
1341 	exception_exit(prev_state);
1342 }
1343 
1344 void StackOverflow(struct pt_regs *regs)
1345 {
1346 	printk(KERN_CRIT "Kernel stack overflow in process %p, r1=%lx\n",
1347 	       current, regs->gpr[1]);
1348 	debugger(regs);
1349 	show_regs(regs);
1350 	panic("kernel stack overflow");
1351 }
1352 
1353 void nonrecoverable_exception(struct pt_regs *regs)
1354 {
1355 	printk(KERN_ERR "Non-recoverable exception at PC=%lx MSR=%lx\n",
1356 	       regs->nip, regs->msr);
1357 	debugger(regs);
1358 	die("nonrecoverable exception", regs, SIGKILL);
1359 }
1360 
1361 void kernel_fp_unavailable_exception(struct pt_regs *regs)
1362 {
1363 	enum ctx_state prev_state = exception_enter();
1364 
1365 	printk(KERN_EMERG "Unrecoverable FP Unavailable Exception "
1366 			  "%lx at %lx\n", regs->trap, regs->nip);
1367 	die("Unrecoverable FP Unavailable Exception", regs, SIGABRT);
1368 
1369 	exception_exit(prev_state);
1370 }
1371 
1372 void altivec_unavailable_exception(struct pt_regs *regs)
1373 {
1374 	enum ctx_state prev_state = exception_enter();
1375 
1376 	if (user_mode(regs)) {
1377 		/* A user program has executed an altivec instruction,
1378 		   but this kernel doesn't support altivec. */
1379 		_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1380 		goto bail;
1381 	}
1382 
1383 	printk(KERN_EMERG "Unrecoverable VMX/Altivec Unavailable Exception "
1384 			"%lx at %lx\n", regs->trap, regs->nip);
1385 	die("Unrecoverable VMX/Altivec Unavailable Exception", regs, SIGABRT);
1386 
1387 bail:
1388 	exception_exit(prev_state);
1389 }
1390 
1391 void vsx_unavailable_exception(struct pt_regs *regs)
1392 {
1393 	if (user_mode(regs)) {
1394 		/* A user program has executed an vsx instruction,
1395 		   but this kernel doesn't support vsx. */
1396 		_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1397 		return;
1398 	}
1399 
1400 	printk(KERN_EMERG "Unrecoverable VSX Unavailable Exception "
1401 			"%lx at %lx\n", regs->trap, regs->nip);
1402 	die("Unrecoverable VSX Unavailable Exception", regs, SIGABRT);
1403 }
1404 
1405 #ifdef CONFIG_PPC64
1406 static void tm_unavailable(struct pt_regs *regs)
1407 {
1408 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1409 	if (user_mode(regs)) {
1410 		current->thread.load_tm++;
1411 		regs->msr |= MSR_TM;
1412 		tm_enable();
1413 		tm_restore_sprs(&current->thread);
1414 		return;
1415 	}
1416 #endif
1417 	pr_emerg("Unrecoverable TM Unavailable Exception "
1418 			"%lx at %lx\n", regs->trap, regs->nip);
1419 	die("Unrecoverable TM Unavailable Exception", regs, SIGABRT);
1420 }
1421 
1422 void facility_unavailable_exception(struct pt_regs *regs)
1423 {
1424 	static char *facility_strings[] = {
1425 		[FSCR_FP_LG] = "FPU",
1426 		[FSCR_VECVSX_LG] = "VMX/VSX",
1427 		[FSCR_DSCR_LG] = "DSCR",
1428 		[FSCR_PM_LG] = "PMU SPRs",
1429 		[FSCR_BHRB_LG] = "BHRB",
1430 		[FSCR_TM_LG] = "TM",
1431 		[FSCR_EBB_LG] = "EBB",
1432 		[FSCR_TAR_LG] = "TAR",
1433 		[FSCR_LM_LG] = "LM",
1434 	};
1435 	char *facility = "unknown";
1436 	u64 value;
1437 	u32 instword, rd;
1438 	u8 status;
1439 	bool hv;
1440 
1441 	hv = (regs->trap == 0xf80);
1442 	if (hv)
1443 		value = mfspr(SPRN_HFSCR);
1444 	else
1445 		value = mfspr(SPRN_FSCR);
1446 
1447 	status = value >> 56;
1448 	if (status == FSCR_DSCR_LG) {
1449 		/*
1450 		 * User is accessing the DSCR register using the problem
1451 		 * state only SPR number (0x03) either through a mfspr or
1452 		 * a mtspr instruction. If it is a write attempt through
1453 		 * a mtspr, then we set the inherit bit. This also allows
1454 		 * the user to write or read the register directly in the
1455 		 * future by setting via the FSCR DSCR bit. But in case it
1456 		 * is a read DSCR attempt through a mfspr instruction, we
1457 		 * just emulate the instruction instead. This code path will
1458 		 * always emulate all the mfspr instructions till the user
1459 		 * has attempted at least one mtspr instruction. This way it
1460 		 * preserves the same behaviour when the user is accessing
1461 		 * the DSCR through privilege level only SPR number (0x11)
1462 		 * which is emulated through illegal instruction exception.
1463 		 * We always leave HFSCR DSCR set.
1464 		 */
1465 		if (get_user(instword, (u32 __user *)(regs->nip))) {
1466 			pr_err("Failed to fetch the user instruction\n");
1467 			return;
1468 		}
1469 
1470 		/* Write into DSCR (mtspr 0x03, RS) */
1471 		if ((instword & PPC_INST_MTSPR_DSCR_USER_MASK)
1472 				== PPC_INST_MTSPR_DSCR_USER) {
1473 			rd = (instword >> 21) & 0x1f;
1474 			current->thread.dscr = regs->gpr[rd];
1475 			current->thread.dscr_inherit = 1;
1476 			current->thread.fscr |= FSCR_DSCR;
1477 			mtspr(SPRN_FSCR, current->thread.fscr);
1478 		}
1479 
1480 		/* Read from DSCR (mfspr RT, 0x03) */
1481 		if ((instword & PPC_INST_MFSPR_DSCR_USER_MASK)
1482 				== PPC_INST_MFSPR_DSCR_USER) {
1483 			if (emulate_instruction(regs)) {
1484 				pr_err("DSCR based mfspr emulation failed\n");
1485 				return;
1486 			}
1487 			regs->nip += 4;
1488 			emulate_single_step(regs);
1489 		}
1490 		return;
1491 	} else if ((status == FSCR_LM_LG) && cpu_has_feature(CPU_FTR_ARCH_300)) {
1492 		/*
1493 		 * This process has touched LM, so turn it on forever
1494 		 * for this process
1495 		 */
1496 		current->thread.fscr |= FSCR_LM;
1497 		mtspr(SPRN_FSCR, current->thread.fscr);
1498 		return;
1499 	}
1500 
1501 	if (status == FSCR_TM_LG) {
1502 		/*
1503 		 * If we're here then the hardware is TM aware because it
1504 		 * generated an exception with FSRM_TM set.
1505 		 *
1506 		 * If cpu_has_feature(CPU_FTR_TM) is false, then either firmware
1507 		 * told us not to do TM, or the kernel is not built with TM
1508 		 * support.
1509 		 *
1510 		 * If both of those things are true, then userspace can spam the
1511 		 * console by triggering the printk() below just by continually
1512 		 * doing tbegin (or any TM instruction). So in that case just
1513 		 * send the process a SIGILL immediately.
1514 		 */
1515 		if (!cpu_has_feature(CPU_FTR_TM))
1516 			goto out;
1517 
1518 		tm_unavailable(regs);
1519 		return;
1520 	}
1521 
1522 	if ((status < ARRAY_SIZE(facility_strings)) &&
1523 	    facility_strings[status])
1524 		facility = facility_strings[status];
1525 
1526 	/* We restore the interrupt state now */
1527 	if (!arch_irq_disabled_regs(regs))
1528 		local_irq_enable();
1529 
1530 	pr_err_ratelimited(
1531 		"%sFacility '%s' unavailable, exception at 0x%lx, MSR=%lx\n",
1532 		hv ? "Hypervisor " : "", facility, regs->nip, regs->msr);
1533 
1534 out:
1535 	if (user_mode(regs)) {
1536 		_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1537 		return;
1538 	}
1539 
1540 	die("Unexpected facility unavailable exception", regs, SIGABRT);
1541 }
1542 #endif
1543 
1544 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1545 
1546 void fp_unavailable_tm(struct pt_regs *regs)
1547 {
1548 	/* Note:  This does not handle any kind of FP laziness. */
1549 
1550 	TM_DEBUG("FP Unavailable trap whilst transactional at 0x%lx, MSR=%lx\n",
1551 		 regs->nip, regs->msr);
1552 
1553         /* We can only have got here if the task started using FP after
1554          * beginning the transaction.  So, the transactional regs are just a
1555          * copy of the checkpointed ones.  But, we still need to recheckpoint
1556          * as we're enabling FP for the process; it will return, abort the
1557          * transaction, and probably retry but now with FP enabled.  So the
1558          * checkpointed FP registers need to be loaded.
1559 	 */
1560 	tm_reclaim_current(TM_CAUSE_FAC_UNAV);
1561 	/* Reclaim didn't save out any FPRs to transact_fprs. */
1562 
1563 	/* Enable FP for the task: */
1564 	regs->msr |= (MSR_FP | current->thread.fpexc_mode);
1565 
1566 	/* This loads and recheckpoints the FP registers from
1567 	 * thread.fpr[].  They will remain in registers after the
1568 	 * checkpoint so we don't need to reload them after.
1569 	 * If VMX is in use, the VRs now hold checkpointed values,
1570 	 * so we don't want to load the VRs from the thread_struct.
1571 	 */
1572 	tm_recheckpoint(&current->thread, MSR_FP);
1573 
1574 	/* If VMX is in use, get the transactional values back */
1575 	if (regs->msr & MSR_VEC) {
1576 		msr_check_and_set(MSR_VEC);
1577 		load_vr_state(&current->thread.vr_state);
1578 		/* At this point all the VSX state is loaded, so enable it */
1579 		regs->msr |= MSR_VSX;
1580 	}
1581 }
1582 
1583 void altivec_unavailable_tm(struct pt_regs *regs)
1584 {
1585 	/* See the comments in fp_unavailable_tm().  This function operates
1586 	 * the same way.
1587 	 */
1588 
1589 	TM_DEBUG("Vector Unavailable trap whilst transactional at 0x%lx,"
1590 		 "MSR=%lx\n",
1591 		 regs->nip, regs->msr);
1592 	tm_reclaim_current(TM_CAUSE_FAC_UNAV);
1593 	regs->msr |= MSR_VEC;
1594 	tm_recheckpoint(&current->thread, MSR_VEC);
1595 	current->thread.used_vr = 1;
1596 
1597 	if (regs->msr & MSR_FP) {
1598 		msr_check_and_set(MSR_FP);
1599 		load_fp_state(&current->thread.fp_state);
1600 		regs->msr |= MSR_VSX;
1601 	}
1602 }
1603 
1604 void vsx_unavailable_tm(struct pt_regs *regs)
1605 {
1606 	unsigned long orig_msr = regs->msr;
1607 
1608 	/* See the comments in fp_unavailable_tm().  This works similarly,
1609 	 * though we're loading both FP and VEC registers in here.
1610 	 *
1611 	 * If FP isn't in use, load FP regs.  If VEC isn't in use, load VEC
1612 	 * regs.  Either way, set MSR_VSX.
1613 	 */
1614 
1615 	TM_DEBUG("VSX Unavailable trap whilst transactional at 0x%lx,"
1616 		 "MSR=%lx\n",
1617 		 regs->nip, regs->msr);
1618 
1619 	current->thread.used_vsr = 1;
1620 
1621 	/* If FP and VMX are already loaded, we have all the state we need */
1622 	if ((orig_msr & (MSR_FP | MSR_VEC)) == (MSR_FP | MSR_VEC)) {
1623 		regs->msr |= MSR_VSX;
1624 		return;
1625 	}
1626 
1627 	/* This reclaims FP and/or VR regs if they're already enabled */
1628 	tm_reclaim_current(TM_CAUSE_FAC_UNAV);
1629 
1630 	regs->msr |= MSR_VEC | MSR_FP | current->thread.fpexc_mode |
1631 		MSR_VSX;
1632 
1633 	/* This loads & recheckpoints FP and VRs; but we have
1634 	 * to be sure not to overwrite previously-valid state.
1635 	 */
1636 	tm_recheckpoint(&current->thread, regs->msr & ~orig_msr);
1637 
1638 	msr_check_and_set(orig_msr & (MSR_FP | MSR_VEC));
1639 
1640 	if (orig_msr & MSR_FP)
1641 		load_fp_state(&current->thread.fp_state);
1642 	if (orig_msr & MSR_VEC)
1643 		load_vr_state(&current->thread.vr_state);
1644 }
1645 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
1646 
1647 void performance_monitor_exception(struct pt_regs *regs)
1648 {
1649 	__this_cpu_inc(irq_stat.pmu_irqs);
1650 
1651 	perf_irq(regs);
1652 }
1653 
1654 #ifdef CONFIG_8xx
1655 void SoftwareEmulation(struct pt_regs *regs)
1656 {
1657 	CHECK_FULL_REGS(regs);
1658 
1659 	if (!user_mode(regs)) {
1660 		debugger(regs);
1661 		die("Kernel Mode Unimplemented Instruction or SW FPU Emulation",
1662 			regs, SIGFPE);
1663 	}
1664 
1665 	if (!emulate_math(regs))
1666 		return;
1667 
1668 	_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1669 }
1670 #endif /* CONFIG_8xx */
1671 
1672 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1673 static void handle_debug(struct pt_regs *regs, unsigned long debug_status)
1674 {
1675 	int changed = 0;
1676 	/*
1677 	 * Determine the cause of the debug event, clear the
1678 	 * event flags and send a trap to the handler. Torez
1679 	 */
1680 	if (debug_status & (DBSR_DAC1R | DBSR_DAC1W)) {
1681 		dbcr_dac(current) &= ~(DBCR_DAC1R | DBCR_DAC1W);
1682 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
1683 		current->thread.debug.dbcr2 &= ~DBCR2_DAC12MODE;
1684 #endif
1685 		do_send_trap(regs, mfspr(SPRN_DAC1), debug_status, TRAP_HWBKPT,
1686 			     5);
1687 		changed |= 0x01;
1688 	}  else if (debug_status & (DBSR_DAC2R | DBSR_DAC2W)) {
1689 		dbcr_dac(current) &= ~(DBCR_DAC2R | DBCR_DAC2W);
1690 		do_send_trap(regs, mfspr(SPRN_DAC2), debug_status, TRAP_HWBKPT,
1691 			     6);
1692 		changed |= 0x01;
1693 	}  else if (debug_status & DBSR_IAC1) {
1694 		current->thread.debug.dbcr0 &= ~DBCR0_IAC1;
1695 		dbcr_iac_range(current) &= ~DBCR_IAC12MODE;
1696 		do_send_trap(regs, mfspr(SPRN_IAC1), debug_status, TRAP_HWBKPT,
1697 			     1);
1698 		changed |= 0x01;
1699 	}  else if (debug_status & DBSR_IAC2) {
1700 		current->thread.debug.dbcr0 &= ~DBCR0_IAC2;
1701 		do_send_trap(regs, mfspr(SPRN_IAC2), debug_status, TRAP_HWBKPT,
1702 			     2);
1703 		changed |= 0x01;
1704 	}  else if (debug_status & DBSR_IAC3) {
1705 		current->thread.debug.dbcr0 &= ~DBCR0_IAC3;
1706 		dbcr_iac_range(current) &= ~DBCR_IAC34MODE;
1707 		do_send_trap(regs, mfspr(SPRN_IAC3), debug_status, TRAP_HWBKPT,
1708 			     3);
1709 		changed |= 0x01;
1710 	}  else if (debug_status & DBSR_IAC4) {
1711 		current->thread.debug.dbcr0 &= ~DBCR0_IAC4;
1712 		do_send_trap(regs, mfspr(SPRN_IAC4), debug_status, TRAP_HWBKPT,
1713 			     4);
1714 		changed |= 0x01;
1715 	}
1716 	/*
1717 	 * At the point this routine was called, the MSR(DE) was turned off.
1718 	 * Check all other debug flags and see if that bit needs to be turned
1719 	 * back on or not.
1720 	 */
1721 	if (DBCR_ACTIVE_EVENTS(current->thread.debug.dbcr0,
1722 			       current->thread.debug.dbcr1))
1723 		regs->msr |= MSR_DE;
1724 	else
1725 		/* Make sure the IDM flag is off */
1726 		current->thread.debug.dbcr0 &= ~DBCR0_IDM;
1727 
1728 	if (changed & 0x01)
1729 		mtspr(SPRN_DBCR0, current->thread.debug.dbcr0);
1730 }
1731 
1732 void DebugException(struct pt_regs *regs, unsigned long debug_status)
1733 {
1734 	current->thread.debug.dbsr = debug_status;
1735 
1736 	/* Hack alert: On BookE, Branch Taken stops on the branch itself, while
1737 	 * on server, it stops on the target of the branch. In order to simulate
1738 	 * the server behaviour, we thus restart right away with a single step
1739 	 * instead of stopping here when hitting a BT
1740 	 */
1741 	if (debug_status & DBSR_BT) {
1742 		regs->msr &= ~MSR_DE;
1743 
1744 		/* Disable BT */
1745 		mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_BT);
1746 		/* Clear the BT event */
1747 		mtspr(SPRN_DBSR, DBSR_BT);
1748 
1749 		/* Do the single step trick only when coming from userspace */
1750 		if (user_mode(regs)) {
1751 			current->thread.debug.dbcr0 &= ~DBCR0_BT;
1752 			current->thread.debug.dbcr0 |= DBCR0_IDM | DBCR0_IC;
1753 			regs->msr |= MSR_DE;
1754 			return;
1755 		}
1756 
1757 		if (notify_die(DIE_SSTEP, "block_step", regs, 5,
1758 			       5, SIGTRAP) == NOTIFY_STOP) {
1759 			return;
1760 		}
1761 		if (debugger_sstep(regs))
1762 			return;
1763 	} else if (debug_status & DBSR_IC) { 	/* Instruction complete */
1764 		regs->msr &= ~MSR_DE;
1765 
1766 		/* Disable instruction completion */
1767 		mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_IC);
1768 		/* Clear the instruction completion event */
1769 		mtspr(SPRN_DBSR, DBSR_IC);
1770 
1771 		if (notify_die(DIE_SSTEP, "single_step", regs, 5,
1772 			       5, SIGTRAP) == NOTIFY_STOP) {
1773 			return;
1774 		}
1775 
1776 		if (debugger_sstep(regs))
1777 			return;
1778 
1779 		if (user_mode(regs)) {
1780 			current->thread.debug.dbcr0 &= ~DBCR0_IC;
1781 			if (DBCR_ACTIVE_EVENTS(current->thread.debug.dbcr0,
1782 					       current->thread.debug.dbcr1))
1783 				regs->msr |= MSR_DE;
1784 			else
1785 				/* Make sure the IDM bit is off */
1786 				current->thread.debug.dbcr0 &= ~DBCR0_IDM;
1787 		}
1788 
1789 		_exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
1790 	} else
1791 		handle_debug(regs, debug_status);
1792 }
1793 NOKPROBE_SYMBOL(DebugException);
1794 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
1795 
1796 #if !defined(CONFIG_TAU_INT)
1797 void TAUException(struct pt_regs *regs)
1798 {
1799 	printk("TAU trap at PC: %lx, MSR: %lx, vector=%lx    %s\n",
1800 	       regs->nip, regs->msr, regs->trap, print_tainted());
1801 }
1802 #endif /* CONFIG_INT_TAU */
1803 
1804 #ifdef CONFIG_ALTIVEC
1805 void altivec_assist_exception(struct pt_regs *regs)
1806 {
1807 	int err;
1808 
1809 	if (!user_mode(regs)) {
1810 		printk(KERN_EMERG "VMX/Altivec assist exception in kernel mode"
1811 		       " at %lx\n", regs->nip);
1812 		die("Kernel VMX/Altivec assist exception", regs, SIGILL);
1813 	}
1814 
1815 	flush_altivec_to_thread(current);
1816 
1817 	PPC_WARN_EMULATED(altivec, regs);
1818 	err = emulate_altivec(regs);
1819 	if (err == 0) {
1820 		regs->nip += 4;		/* skip emulated instruction */
1821 		emulate_single_step(regs);
1822 		return;
1823 	}
1824 
1825 	if (err == -EFAULT) {
1826 		/* got an error reading the instruction */
1827 		_exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
1828 	} else {
1829 		/* didn't recognize the instruction */
1830 		/* XXX quick hack for now: set the non-Java bit in the VSCR */
1831 		printk_ratelimited(KERN_ERR "Unrecognized altivec instruction "
1832 				   "in %s at %lx\n", current->comm, regs->nip);
1833 		current->thread.vr_state.vscr.u[3] |= 0x10000;
1834 	}
1835 }
1836 #endif /* CONFIG_ALTIVEC */
1837 
1838 #ifdef CONFIG_FSL_BOOKE
1839 void CacheLockingException(struct pt_regs *regs, unsigned long address,
1840 			   unsigned long error_code)
1841 {
1842 	/* We treat cache locking instructions from the user
1843 	 * as priv ops, in the future we could try to do
1844 	 * something smarter
1845 	 */
1846 	if (error_code & (ESR_DLK|ESR_ILK))
1847 		_exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
1848 	return;
1849 }
1850 #endif /* CONFIG_FSL_BOOKE */
1851 
1852 #ifdef CONFIG_SPE
1853 void SPEFloatingPointException(struct pt_regs *regs)
1854 {
1855 	extern int do_spe_mathemu(struct pt_regs *regs);
1856 	unsigned long spefscr;
1857 	int fpexc_mode;
1858 	int code = 0;
1859 	int err;
1860 
1861 	flush_spe_to_thread(current);
1862 
1863 	spefscr = current->thread.spefscr;
1864 	fpexc_mode = current->thread.fpexc_mode;
1865 
1866 	if ((spefscr & SPEFSCR_FOVF) && (fpexc_mode & PR_FP_EXC_OVF)) {
1867 		code = FPE_FLTOVF;
1868 	}
1869 	else if ((spefscr & SPEFSCR_FUNF) && (fpexc_mode & PR_FP_EXC_UND)) {
1870 		code = FPE_FLTUND;
1871 	}
1872 	else if ((spefscr & SPEFSCR_FDBZ) && (fpexc_mode & PR_FP_EXC_DIV))
1873 		code = FPE_FLTDIV;
1874 	else if ((spefscr & SPEFSCR_FINV) && (fpexc_mode & PR_FP_EXC_INV)) {
1875 		code = FPE_FLTINV;
1876 	}
1877 	else if ((spefscr & (SPEFSCR_FG | SPEFSCR_FX)) && (fpexc_mode & PR_FP_EXC_RES))
1878 		code = FPE_FLTRES;
1879 
1880 	err = do_spe_mathemu(regs);
1881 	if (err == 0) {
1882 		regs->nip += 4;		/* skip emulated instruction */
1883 		emulate_single_step(regs);
1884 		return;
1885 	}
1886 
1887 	if (err == -EFAULT) {
1888 		/* got an error reading the instruction */
1889 		_exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
1890 	} else if (err == -EINVAL) {
1891 		/* didn't recognize the instruction */
1892 		printk(KERN_ERR "unrecognized spe instruction "
1893 		       "in %s at %lx\n", current->comm, regs->nip);
1894 	} else {
1895 		_exception(SIGFPE, regs, code, regs->nip);
1896 	}
1897 
1898 	return;
1899 }
1900 
1901 void SPEFloatingPointRoundException(struct pt_regs *regs)
1902 {
1903 	extern int speround_handler(struct pt_regs *regs);
1904 	int err;
1905 
1906 	preempt_disable();
1907 	if (regs->msr & MSR_SPE)
1908 		giveup_spe(current);
1909 	preempt_enable();
1910 
1911 	regs->nip -= 4;
1912 	err = speround_handler(regs);
1913 	if (err == 0) {
1914 		regs->nip += 4;		/* skip emulated instruction */
1915 		emulate_single_step(regs);
1916 		return;
1917 	}
1918 
1919 	if (err == -EFAULT) {
1920 		/* got an error reading the instruction */
1921 		_exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
1922 	} else if (err == -EINVAL) {
1923 		/* didn't recognize the instruction */
1924 		printk(KERN_ERR "unrecognized spe instruction "
1925 		       "in %s at %lx\n", current->comm, regs->nip);
1926 	} else {
1927 		_exception(SIGFPE, regs, 0, regs->nip);
1928 		return;
1929 	}
1930 }
1931 #endif
1932 
1933 /*
1934  * We enter here if we get an unrecoverable exception, that is, one
1935  * that happened at a point where the RI (recoverable interrupt) bit
1936  * in the MSR is 0.  This indicates that SRR0/1 are live, and that
1937  * we therefore lost state by taking this exception.
1938  */
1939 void unrecoverable_exception(struct pt_regs *regs)
1940 {
1941 	printk(KERN_EMERG "Unrecoverable exception %lx at %lx\n",
1942 	       regs->trap, regs->nip);
1943 	die("Unrecoverable exception", regs, SIGABRT);
1944 }
1945 
1946 #if defined(CONFIG_BOOKE_WDT) || defined(CONFIG_40x)
1947 /*
1948  * Default handler for a Watchdog exception,
1949  * spins until a reboot occurs
1950  */
1951 void __attribute__ ((weak)) WatchdogHandler(struct pt_regs *regs)
1952 {
1953 	/* Generic WatchdogHandler, implement your own */
1954 	mtspr(SPRN_TCR, mfspr(SPRN_TCR)&(~TCR_WIE));
1955 	return;
1956 }
1957 
1958 void WatchdogException(struct pt_regs *regs)
1959 {
1960 	printk (KERN_EMERG "PowerPC Book-E Watchdog Exception\n");
1961 	WatchdogHandler(regs);
1962 }
1963 #endif
1964 
1965 /*
1966  * We enter here if we discover during exception entry that we are
1967  * running in supervisor mode with a userspace value in the stack pointer.
1968  */
1969 void kernel_bad_stack(struct pt_regs *regs)
1970 {
1971 	printk(KERN_EMERG "Bad kernel stack pointer %lx at %lx\n",
1972 	       regs->gpr[1], regs->nip);
1973 	die("Bad kernel stack pointer", regs, SIGABRT);
1974 }
1975 
1976 void __init trap_init(void)
1977 {
1978 }
1979 
1980 
1981 #ifdef CONFIG_PPC_EMULATED_STATS
1982 
1983 #define WARN_EMULATED_SETUP(type)	.type = { .name = #type }
1984 
1985 struct ppc_emulated ppc_emulated = {
1986 #ifdef CONFIG_ALTIVEC
1987 	WARN_EMULATED_SETUP(altivec),
1988 #endif
1989 	WARN_EMULATED_SETUP(dcba),
1990 	WARN_EMULATED_SETUP(dcbz),
1991 	WARN_EMULATED_SETUP(fp_pair),
1992 	WARN_EMULATED_SETUP(isel),
1993 	WARN_EMULATED_SETUP(mcrxr),
1994 	WARN_EMULATED_SETUP(mfpvr),
1995 	WARN_EMULATED_SETUP(multiple),
1996 	WARN_EMULATED_SETUP(popcntb),
1997 	WARN_EMULATED_SETUP(spe),
1998 	WARN_EMULATED_SETUP(string),
1999 	WARN_EMULATED_SETUP(sync),
2000 	WARN_EMULATED_SETUP(unaligned),
2001 #ifdef CONFIG_MATH_EMULATION
2002 	WARN_EMULATED_SETUP(math),
2003 #endif
2004 #ifdef CONFIG_VSX
2005 	WARN_EMULATED_SETUP(vsx),
2006 #endif
2007 #ifdef CONFIG_PPC64
2008 	WARN_EMULATED_SETUP(mfdscr),
2009 	WARN_EMULATED_SETUP(mtdscr),
2010 	WARN_EMULATED_SETUP(lq_stq),
2011 #endif
2012 };
2013 
2014 u32 ppc_warn_emulated;
2015 
2016 void ppc_warn_emulated_print(const char *type)
2017 {
2018 	pr_warn_ratelimited("%s used emulated %s instruction\n", current->comm,
2019 			    type);
2020 }
2021 
2022 static int __init ppc_warn_emulated_init(void)
2023 {
2024 	struct dentry *dir, *d;
2025 	unsigned int i;
2026 	struct ppc_emulated_entry *entries = (void *)&ppc_emulated;
2027 
2028 	if (!powerpc_debugfs_root)
2029 		return -ENODEV;
2030 
2031 	dir = debugfs_create_dir("emulated_instructions",
2032 				 powerpc_debugfs_root);
2033 	if (!dir)
2034 		return -ENOMEM;
2035 
2036 	d = debugfs_create_u32("do_warn", S_IRUGO | S_IWUSR, dir,
2037 			       &ppc_warn_emulated);
2038 	if (!d)
2039 		goto fail;
2040 
2041 	for (i = 0; i < sizeof(ppc_emulated)/sizeof(*entries); i++) {
2042 		d = debugfs_create_u32(entries[i].name, S_IRUGO | S_IWUSR, dir,
2043 				       (u32 *)&entries[i].val.counter);
2044 		if (!d)
2045 			goto fail;
2046 	}
2047 
2048 	return 0;
2049 
2050 fail:
2051 	debugfs_remove_recursive(dir);
2052 	return -ENOMEM;
2053 }
2054 
2055 device_initcall(ppc_warn_emulated_init);
2056 
2057 #endif /* CONFIG_PPC_EMULATED_STATS */
2058