xref: /openbmc/linux/arch/powerpc/kernel/traps.c (revision b627b4ed)
1 /*
2  *  Copyright (C) 1995-1996  Gary Thomas (gdt@linuxppc.org)
3  *
4  *  This program is free software; you can redistribute it and/or
5  *  modify it under the terms of the GNU General Public License
6  *  as published by the Free Software Foundation; either version
7  *  2 of the License, or (at your option) any later version.
8  *
9  *  Modified by Cort Dougan (cort@cs.nmt.edu)
10  *  and Paul Mackerras (paulus@samba.org)
11  */
12 
13 /*
14  * This file handles the architecture-dependent parts of hardware exceptions
15  */
16 
17 #include <linux/errno.h>
18 #include <linux/sched.h>
19 #include <linux/kernel.h>
20 #include <linux/mm.h>
21 #include <linux/stddef.h>
22 #include <linux/unistd.h>
23 #include <linux/ptrace.h>
24 #include <linux/slab.h>
25 #include <linux/user.h>
26 #include <linux/interrupt.h>
27 #include <linux/init.h>
28 #include <linux/module.h>
29 #include <linux/prctl.h>
30 #include <linux/delay.h>
31 #include <linux/kprobes.h>
32 #include <linux/kexec.h>
33 #include <linux/backlight.h>
34 #include <linux/bug.h>
35 #include <linux/kdebug.h>
36 
37 #include <asm/pgtable.h>
38 #include <asm/uaccess.h>
39 #include <asm/system.h>
40 #include <asm/io.h>
41 #include <asm/machdep.h>
42 #include <asm/rtas.h>
43 #include <asm/pmc.h>
44 #ifdef CONFIG_PPC32
45 #include <asm/reg.h>
46 #endif
47 #ifdef CONFIG_PMAC_BACKLIGHT
48 #include <asm/backlight.h>
49 #endif
50 #ifdef CONFIG_PPC64
51 #include <asm/firmware.h>
52 #include <asm/processor.h>
53 #endif
54 #include <asm/kexec.h>
55 #include <asm/ppc-opcode.h>
56 #ifdef CONFIG_FSL_BOOKE
57 #include <asm/dbell.h>
58 #endif
59 
60 #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC)
61 int (*__debugger)(struct pt_regs *regs);
62 int (*__debugger_ipi)(struct pt_regs *regs);
63 int (*__debugger_bpt)(struct pt_regs *regs);
64 int (*__debugger_sstep)(struct pt_regs *regs);
65 int (*__debugger_iabr_match)(struct pt_regs *regs);
66 int (*__debugger_dabr_match)(struct pt_regs *regs);
67 int (*__debugger_fault_handler)(struct pt_regs *regs);
68 
69 EXPORT_SYMBOL(__debugger);
70 EXPORT_SYMBOL(__debugger_ipi);
71 EXPORT_SYMBOL(__debugger_bpt);
72 EXPORT_SYMBOL(__debugger_sstep);
73 EXPORT_SYMBOL(__debugger_iabr_match);
74 EXPORT_SYMBOL(__debugger_dabr_match);
75 EXPORT_SYMBOL(__debugger_fault_handler);
76 #endif
77 
78 /*
79  * Trap & Exception support
80  */
81 
82 #ifdef CONFIG_PMAC_BACKLIGHT
83 static void pmac_backlight_unblank(void)
84 {
85 	mutex_lock(&pmac_backlight_mutex);
86 	if (pmac_backlight) {
87 		struct backlight_properties *props;
88 
89 		props = &pmac_backlight->props;
90 		props->brightness = props->max_brightness;
91 		props->power = FB_BLANK_UNBLANK;
92 		backlight_update_status(pmac_backlight);
93 	}
94 	mutex_unlock(&pmac_backlight_mutex);
95 }
96 #else
97 static inline void pmac_backlight_unblank(void) { }
98 #endif
99 
100 int die(const char *str, struct pt_regs *regs, long err)
101 {
102 	static struct {
103 		spinlock_t lock;
104 		u32 lock_owner;
105 		int lock_owner_depth;
106 	} die = {
107 		.lock =			__SPIN_LOCK_UNLOCKED(die.lock),
108 		.lock_owner =		-1,
109 		.lock_owner_depth =	0
110 	};
111 	static int die_counter;
112 	unsigned long flags;
113 
114 	if (debugger(regs))
115 		return 1;
116 
117 	oops_enter();
118 
119 	if (die.lock_owner != raw_smp_processor_id()) {
120 		console_verbose();
121 		spin_lock_irqsave(&die.lock, flags);
122 		die.lock_owner = smp_processor_id();
123 		die.lock_owner_depth = 0;
124 		bust_spinlocks(1);
125 		if (machine_is(powermac))
126 			pmac_backlight_unblank();
127 	} else {
128 		local_save_flags(flags);
129 	}
130 
131 	if (++die.lock_owner_depth < 3) {
132 		printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter);
133 #ifdef CONFIG_PREEMPT
134 		printk("PREEMPT ");
135 #endif
136 #ifdef CONFIG_SMP
137 		printk("SMP NR_CPUS=%d ", NR_CPUS);
138 #endif
139 #ifdef CONFIG_DEBUG_PAGEALLOC
140 		printk("DEBUG_PAGEALLOC ");
141 #endif
142 #ifdef CONFIG_NUMA
143 		printk("NUMA ");
144 #endif
145 		printk("%s\n", ppc_md.name ? ppc_md.name : "");
146 
147 		print_modules();
148 		show_regs(regs);
149 	} else {
150 		printk("Recursive die() failure, output suppressed\n");
151 	}
152 
153 	bust_spinlocks(0);
154 	die.lock_owner = -1;
155 	add_taint(TAINT_DIE);
156 	spin_unlock_irqrestore(&die.lock, flags);
157 
158 	if (kexec_should_crash(current) ||
159 		kexec_sr_activated(smp_processor_id()))
160 		crash_kexec(regs);
161 	crash_kexec_secondary(regs);
162 
163 	if (in_interrupt())
164 		panic("Fatal exception in interrupt");
165 
166 	if (panic_on_oops)
167 		panic("Fatal exception");
168 
169 	oops_exit();
170 	do_exit(err);
171 
172 	return 0;
173 }
174 
175 void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
176 {
177 	siginfo_t info;
178 	const char fmt32[] = KERN_INFO "%s[%d]: unhandled signal %d " \
179 			"at %08lx nip %08lx lr %08lx code %x\n";
180 	const char fmt64[] = KERN_INFO "%s[%d]: unhandled signal %d " \
181 			"at %016lx nip %016lx lr %016lx code %x\n";
182 
183 	if (!user_mode(regs)) {
184 		if (die("Exception in kernel mode", regs, signr))
185 			return;
186 	} else if (show_unhandled_signals &&
187 		    unhandled_signal(current, signr) &&
188 		    printk_ratelimit()) {
189 			printk(regs->msr & MSR_SF ? fmt64 : fmt32,
190 				current->comm, current->pid, signr,
191 				addr, regs->nip, regs->link, code);
192 		}
193 
194 	memset(&info, 0, sizeof(info));
195 	info.si_signo = signr;
196 	info.si_code = code;
197 	info.si_addr = (void __user *) addr;
198 	force_sig_info(signr, &info, current);
199 
200 	/*
201 	 * Init gets no signals that it doesn't have a handler for.
202 	 * That's all very well, but if it has caused a synchronous
203 	 * exception and we ignore the resulting signal, it will just
204 	 * generate the same exception over and over again and we get
205 	 * nowhere.  Better to kill it and let the kernel panic.
206 	 */
207 	if (is_global_init(current)) {
208 		__sighandler_t handler;
209 
210 		spin_lock_irq(&current->sighand->siglock);
211 		handler = current->sighand->action[signr-1].sa.sa_handler;
212 		spin_unlock_irq(&current->sighand->siglock);
213 		if (handler == SIG_DFL) {
214 			/* init has generated a synchronous exception
215 			   and it doesn't have a handler for the signal */
216 			printk(KERN_CRIT "init has generated signal %d "
217 			       "but has no handler for it\n", signr);
218 			do_exit(signr);
219 		}
220 	}
221 }
222 
223 #ifdef CONFIG_PPC64
224 void system_reset_exception(struct pt_regs *regs)
225 {
226 	/* See if any machine dependent calls */
227 	if (ppc_md.system_reset_exception) {
228 		if (ppc_md.system_reset_exception(regs))
229 			return;
230 	}
231 
232 #ifdef CONFIG_KEXEC
233 	cpu_set(smp_processor_id(), cpus_in_sr);
234 #endif
235 
236 	die("System Reset", regs, SIGABRT);
237 
238 	/*
239 	 * Some CPUs when released from the debugger will execute this path.
240 	 * These CPUs entered the debugger via a soft-reset. If the CPU was
241 	 * hung before entering the debugger it will return to the hung
242 	 * state when exiting this function.  This causes a problem in
243 	 * kdump since the hung CPU(s) will not respond to the IPI sent
244 	 * from kdump. To prevent the problem we call crash_kexec_secondary()
245 	 * here. If a kdump had not been initiated or we exit the debugger
246 	 * with the "exit and recover" command (x) crash_kexec_secondary()
247 	 * will return after 5ms and the CPU returns to its previous state.
248 	 */
249 	crash_kexec_secondary(regs);
250 
251 	/* Must die if the interrupt is not recoverable */
252 	if (!(regs->msr & MSR_RI))
253 		panic("Unrecoverable System Reset");
254 
255 	/* What should we do here? We could issue a shutdown or hard reset. */
256 }
257 #endif
258 
259 /*
260  * I/O accesses can cause machine checks on powermacs.
261  * Check if the NIP corresponds to the address of a sync
262  * instruction for which there is an entry in the exception
263  * table.
264  * Note that the 601 only takes a machine check on TEA
265  * (transfer error ack) signal assertion, and does not
266  * set any of the top 16 bits of SRR1.
267  *  -- paulus.
268  */
269 static inline int check_io_access(struct pt_regs *regs)
270 {
271 #ifdef CONFIG_PPC32
272 	unsigned long msr = regs->msr;
273 	const struct exception_table_entry *entry;
274 	unsigned int *nip = (unsigned int *)regs->nip;
275 
276 	if (((msr & 0xffff0000) == 0 || (msr & (0x80000 | 0x40000)))
277 	    && (entry = search_exception_tables(regs->nip)) != NULL) {
278 		/*
279 		 * Check that it's a sync instruction, or somewhere
280 		 * in the twi; isync; nop sequence that inb/inw/inl uses.
281 		 * As the address is in the exception table
282 		 * we should be able to read the instr there.
283 		 * For the debug message, we look at the preceding
284 		 * load or store.
285 		 */
286 		if (*nip == 0x60000000)		/* nop */
287 			nip -= 2;
288 		else if (*nip == 0x4c00012c)	/* isync */
289 			--nip;
290 		if (*nip == 0x7c0004ac || (*nip >> 26) == 3) {
291 			/* sync or twi */
292 			unsigned int rb;
293 
294 			--nip;
295 			rb = (*nip >> 11) & 0x1f;
296 			printk(KERN_DEBUG "%s bad port %lx at %p\n",
297 			       (*nip & 0x100)? "OUT to": "IN from",
298 			       regs->gpr[rb] - _IO_BASE, nip);
299 			regs->msr |= MSR_RI;
300 			regs->nip = entry->fixup;
301 			return 1;
302 		}
303 	}
304 #endif /* CONFIG_PPC32 */
305 	return 0;
306 }
307 
308 #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
309 /* On 4xx, the reason for the machine check or program exception
310    is in the ESR. */
311 #define get_reason(regs)	((regs)->dsisr)
312 #ifndef CONFIG_FSL_BOOKE
313 #define get_mc_reason(regs)	((regs)->dsisr)
314 #else
315 #define get_mc_reason(regs)	(mfspr(SPRN_MCSR) & MCSR_MASK)
316 #endif
317 #define REASON_FP		ESR_FP
318 #define REASON_ILLEGAL		(ESR_PIL | ESR_PUO)
319 #define REASON_PRIVILEGED	ESR_PPR
320 #define REASON_TRAP		ESR_PTR
321 
322 /* single-step stuff */
323 #define single_stepping(regs)	(current->thread.dbcr0 & DBCR0_IC)
324 #define clear_single_step(regs)	(current->thread.dbcr0 &= ~DBCR0_IC)
325 
326 #else
327 /* On non-4xx, the reason for the machine check or program
328    exception is in the MSR. */
329 #define get_reason(regs)	((regs)->msr)
330 #define get_mc_reason(regs)	((regs)->msr)
331 #define REASON_FP		0x100000
332 #define REASON_ILLEGAL		0x80000
333 #define REASON_PRIVILEGED	0x40000
334 #define REASON_TRAP		0x20000
335 
336 #define single_stepping(regs)	((regs)->msr & MSR_SE)
337 #define clear_single_step(regs)	((regs)->msr &= ~MSR_SE)
338 #endif
339 
340 #if defined(CONFIG_4xx)
341 int machine_check_4xx(struct pt_regs *regs)
342 {
343 	unsigned long reason = get_mc_reason(regs);
344 
345 	if (reason & ESR_IMCP) {
346 		printk("Instruction");
347 		mtspr(SPRN_ESR, reason & ~ESR_IMCP);
348 	} else
349 		printk("Data");
350 	printk(" machine check in kernel mode.\n");
351 
352 	return 0;
353 }
354 
355 int machine_check_440A(struct pt_regs *regs)
356 {
357 	unsigned long reason = get_mc_reason(regs);
358 
359 	printk("Machine check in kernel mode.\n");
360 	if (reason & ESR_IMCP){
361 		printk("Instruction Synchronous Machine Check exception\n");
362 		mtspr(SPRN_ESR, reason & ~ESR_IMCP);
363 	}
364 	else {
365 		u32 mcsr = mfspr(SPRN_MCSR);
366 		if (mcsr & MCSR_IB)
367 			printk("Instruction Read PLB Error\n");
368 		if (mcsr & MCSR_DRB)
369 			printk("Data Read PLB Error\n");
370 		if (mcsr & MCSR_DWB)
371 			printk("Data Write PLB Error\n");
372 		if (mcsr & MCSR_TLBP)
373 			printk("TLB Parity Error\n");
374 		if (mcsr & MCSR_ICP){
375 			flush_instruction_cache();
376 			printk("I-Cache Parity Error\n");
377 		}
378 		if (mcsr & MCSR_DCSP)
379 			printk("D-Cache Search Parity Error\n");
380 		if (mcsr & MCSR_DCFP)
381 			printk("D-Cache Flush Parity Error\n");
382 		if (mcsr & MCSR_IMPE)
383 			printk("Machine Check exception is imprecise\n");
384 
385 		/* Clear MCSR */
386 		mtspr(SPRN_MCSR, mcsr);
387 	}
388 	return 0;
389 }
390 #elif defined(CONFIG_E500)
391 int machine_check_e500(struct pt_regs *regs)
392 {
393 	unsigned long reason = get_mc_reason(regs);
394 
395 	printk("Machine check in kernel mode.\n");
396 	printk("Caused by (from MCSR=%lx): ", reason);
397 
398 	if (reason & MCSR_MCP)
399 		printk("Machine Check Signal\n");
400 	if (reason & MCSR_ICPERR)
401 		printk("Instruction Cache Parity Error\n");
402 	if (reason & MCSR_DCP_PERR)
403 		printk("Data Cache Push Parity Error\n");
404 	if (reason & MCSR_DCPERR)
405 		printk("Data Cache Parity Error\n");
406 	if (reason & MCSR_BUS_IAERR)
407 		printk("Bus - Instruction Address Error\n");
408 	if (reason & MCSR_BUS_RAERR)
409 		printk("Bus - Read Address Error\n");
410 	if (reason & MCSR_BUS_WAERR)
411 		printk("Bus - Write Address Error\n");
412 	if (reason & MCSR_BUS_IBERR)
413 		printk("Bus - Instruction Data Error\n");
414 	if (reason & MCSR_BUS_RBERR)
415 		printk("Bus - Read Data Bus Error\n");
416 	if (reason & MCSR_BUS_WBERR)
417 		printk("Bus - Read Data Bus Error\n");
418 	if (reason & MCSR_BUS_IPERR)
419 		printk("Bus - Instruction Parity Error\n");
420 	if (reason & MCSR_BUS_RPERR)
421 		printk("Bus - Read Parity Error\n");
422 
423 	return 0;
424 }
425 #elif defined(CONFIG_E200)
426 int machine_check_e200(struct pt_regs *regs)
427 {
428 	unsigned long reason = get_mc_reason(regs);
429 
430 	printk("Machine check in kernel mode.\n");
431 	printk("Caused by (from MCSR=%lx): ", reason);
432 
433 	if (reason & MCSR_MCP)
434 		printk("Machine Check Signal\n");
435 	if (reason & MCSR_CP_PERR)
436 		printk("Cache Push Parity Error\n");
437 	if (reason & MCSR_CPERR)
438 		printk("Cache Parity Error\n");
439 	if (reason & MCSR_EXCP_ERR)
440 		printk("ISI, ITLB, or Bus Error on first instruction fetch for an exception handler\n");
441 	if (reason & MCSR_BUS_IRERR)
442 		printk("Bus - Read Bus Error on instruction fetch\n");
443 	if (reason & MCSR_BUS_DRERR)
444 		printk("Bus - Read Bus Error on data load\n");
445 	if (reason & MCSR_BUS_WRERR)
446 		printk("Bus - Write Bus Error on buffered store or cache line push\n");
447 
448 	return 0;
449 }
450 #else
451 int machine_check_generic(struct pt_regs *regs)
452 {
453 	unsigned long reason = get_mc_reason(regs);
454 
455 	printk("Machine check in kernel mode.\n");
456 	printk("Caused by (from SRR1=%lx): ", reason);
457 	switch (reason & 0x601F0000) {
458 	case 0x80000:
459 		printk("Machine check signal\n");
460 		break;
461 	case 0:		/* for 601 */
462 	case 0x40000:
463 	case 0x140000:	/* 7450 MSS error and TEA */
464 		printk("Transfer error ack signal\n");
465 		break;
466 	case 0x20000:
467 		printk("Data parity error signal\n");
468 		break;
469 	case 0x10000:
470 		printk("Address parity error signal\n");
471 		break;
472 	case 0x20000000:
473 		printk("L1 Data Cache error\n");
474 		break;
475 	case 0x40000000:
476 		printk("L1 Instruction Cache error\n");
477 		break;
478 	case 0x00100000:
479 		printk("L2 data cache parity error\n");
480 		break;
481 	default:
482 		printk("Unknown values in msr\n");
483 	}
484 	return 0;
485 }
486 #endif /* everything else */
487 
488 void machine_check_exception(struct pt_regs *regs)
489 {
490 	int recover = 0;
491 
492 	/* See if any machine dependent calls. In theory, we would want
493 	 * to call the CPU first, and call the ppc_md. one if the CPU
494 	 * one returns a positive number. However there is existing code
495 	 * that assumes the board gets a first chance, so let's keep it
496 	 * that way for now and fix things later. --BenH.
497 	 */
498 	if (ppc_md.machine_check_exception)
499 		recover = ppc_md.machine_check_exception(regs);
500 	else if (cur_cpu_spec->machine_check)
501 		recover = cur_cpu_spec->machine_check(regs);
502 
503 	if (recover > 0)
504 		return;
505 
506 	if (user_mode(regs)) {
507 		regs->msr |= MSR_RI;
508 		_exception(SIGBUS, regs, BUS_ADRERR, regs->nip);
509 		return;
510 	}
511 
512 #if defined(CONFIG_8xx) && defined(CONFIG_PCI)
513 	/* the qspan pci read routines can cause machine checks -- Cort
514 	 *
515 	 * yuck !!! that totally needs to go away ! There are better ways
516 	 * to deal with that than having a wart in the mcheck handler.
517 	 * -- BenH
518 	 */
519 	bad_page_fault(regs, regs->dar, SIGBUS);
520 	return;
521 #endif
522 
523 	if (debugger_fault_handler(regs)) {
524 		regs->msr |= MSR_RI;
525 		return;
526 	}
527 
528 	if (check_io_access(regs))
529 		return;
530 
531 	if (debugger_fault_handler(regs))
532 		return;
533 	die("Machine check", regs, SIGBUS);
534 
535 	/* Must die if the interrupt is not recoverable */
536 	if (!(regs->msr & MSR_RI))
537 		panic("Unrecoverable Machine check");
538 }
539 
540 void SMIException(struct pt_regs *regs)
541 {
542 	die("System Management Interrupt", regs, SIGABRT);
543 }
544 
545 void unknown_exception(struct pt_regs *regs)
546 {
547 	printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n",
548 	       regs->nip, regs->msr, regs->trap);
549 
550 	_exception(SIGTRAP, regs, 0, 0);
551 }
552 
553 void instruction_breakpoint_exception(struct pt_regs *regs)
554 {
555 	if (notify_die(DIE_IABR_MATCH, "iabr_match", regs, 5,
556 					5, SIGTRAP) == NOTIFY_STOP)
557 		return;
558 	if (debugger_iabr_match(regs))
559 		return;
560 	_exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
561 }
562 
563 void RunModeException(struct pt_regs *regs)
564 {
565 	_exception(SIGTRAP, regs, 0, 0);
566 }
567 
568 void __kprobes single_step_exception(struct pt_regs *regs)
569 {
570 	regs->msr &= ~(MSR_SE | MSR_BE);  /* Turn off 'trace' bits */
571 
572 	if (notify_die(DIE_SSTEP, "single_step", regs, 5,
573 					5, SIGTRAP) == NOTIFY_STOP)
574 		return;
575 	if (debugger_sstep(regs))
576 		return;
577 
578 	_exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
579 }
580 
581 /*
582  * After we have successfully emulated an instruction, we have to
583  * check if the instruction was being single-stepped, and if so,
584  * pretend we got a single-step exception.  This was pointed out
585  * by Kumar Gala.  -- paulus
586  */
587 static void emulate_single_step(struct pt_regs *regs)
588 {
589 	if (single_stepping(regs)) {
590 		clear_single_step(regs);
591 		_exception(SIGTRAP, regs, TRAP_TRACE, 0);
592 	}
593 }
594 
595 static inline int __parse_fpscr(unsigned long fpscr)
596 {
597 	int ret = 0;
598 
599 	/* Invalid operation */
600 	if ((fpscr & FPSCR_VE) && (fpscr & FPSCR_VX))
601 		ret = FPE_FLTINV;
602 
603 	/* Overflow */
604 	else if ((fpscr & FPSCR_OE) && (fpscr & FPSCR_OX))
605 		ret = FPE_FLTOVF;
606 
607 	/* Underflow */
608 	else if ((fpscr & FPSCR_UE) && (fpscr & FPSCR_UX))
609 		ret = FPE_FLTUND;
610 
611 	/* Divide by zero */
612 	else if ((fpscr & FPSCR_ZE) && (fpscr & FPSCR_ZX))
613 		ret = FPE_FLTDIV;
614 
615 	/* Inexact result */
616 	else if ((fpscr & FPSCR_XE) && (fpscr & FPSCR_XX))
617 		ret = FPE_FLTRES;
618 
619 	return ret;
620 }
621 
622 static void parse_fpe(struct pt_regs *regs)
623 {
624 	int code = 0;
625 
626 	flush_fp_to_thread(current);
627 
628 	code = __parse_fpscr(current->thread.fpscr.val);
629 
630 	_exception(SIGFPE, regs, code, regs->nip);
631 }
632 
633 /*
634  * Illegal instruction emulation support.  Originally written to
635  * provide the PVR to user applications using the mfspr rd, PVR.
636  * Return non-zero if we can't emulate, or -EFAULT if the associated
637  * memory access caused an access fault.  Return zero on success.
638  *
639  * There are a couple of ways to do this, either "decode" the instruction
640  * or directly match lots of bits.  In this case, matching lots of
641  * bits is faster and easier.
642  *
643  */
644 static int emulate_string_inst(struct pt_regs *regs, u32 instword)
645 {
646 	u8 rT = (instword >> 21) & 0x1f;
647 	u8 rA = (instword >> 16) & 0x1f;
648 	u8 NB_RB = (instword >> 11) & 0x1f;
649 	u32 num_bytes;
650 	unsigned long EA;
651 	int pos = 0;
652 
653 	/* Early out if we are an invalid form of lswx */
654 	if ((instword & PPC_INST_STRING_MASK) == PPC_INST_LSWX)
655 		if ((rT == rA) || (rT == NB_RB))
656 			return -EINVAL;
657 
658 	EA = (rA == 0) ? 0 : regs->gpr[rA];
659 
660 	switch (instword & PPC_INST_STRING_MASK) {
661 		case PPC_INST_LSWX:
662 		case PPC_INST_STSWX:
663 			EA += NB_RB;
664 			num_bytes = regs->xer & 0x7f;
665 			break;
666 		case PPC_INST_LSWI:
667 		case PPC_INST_STSWI:
668 			num_bytes = (NB_RB == 0) ? 32 : NB_RB;
669 			break;
670 		default:
671 			return -EINVAL;
672 	}
673 
674 	while (num_bytes != 0)
675 	{
676 		u8 val;
677 		u32 shift = 8 * (3 - (pos & 0x3));
678 
679 		switch ((instword & PPC_INST_STRING_MASK)) {
680 			case PPC_INST_LSWX:
681 			case PPC_INST_LSWI:
682 				if (get_user(val, (u8 __user *)EA))
683 					return -EFAULT;
684 				/* first time updating this reg,
685 				 * zero it out */
686 				if (pos == 0)
687 					regs->gpr[rT] = 0;
688 				regs->gpr[rT] |= val << shift;
689 				break;
690 			case PPC_INST_STSWI:
691 			case PPC_INST_STSWX:
692 				val = regs->gpr[rT] >> shift;
693 				if (put_user(val, (u8 __user *)EA))
694 					return -EFAULT;
695 				break;
696 		}
697 		/* move EA to next address */
698 		EA += 1;
699 		num_bytes--;
700 
701 		/* manage our position within the register */
702 		if (++pos == 4) {
703 			pos = 0;
704 			if (++rT == 32)
705 				rT = 0;
706 		}
707 	}
708 
709 	return 0;
710 }
711 
712 static int emulate_popcntb_inst(struct pt_regs *regs, u32 instword)
713 {
714 	u32 ra,rs;
715 	unsigned long tmp;
716 
717 	ra = (instword >> 16) & 0x1f;
718 	rs = (instword >> 21) & 0x1f;
719 
720 	tmp = regs->gpr[rs];
721 	tmp = tmp - ((tmp >> 1) & 0x5555555555555555ULL);
722 	tmp = (tmp & 0x3333333333333333ULL) + ((tmp >> 2) & 0x3333333333333333ULL);
723 	tmp = (tmp + (tmp >> 4)) & 0x0f0f0f0f0f0f0f0fULL;
724 	regs->gpr[ra] = tmp;
725 
726 	return 0;
727 }
728 
729 static int emulate_isel(struct pt_regs *regs, u32 instword)
730 {
731 	u8 rT = (instword >> 21) & 0x1f;
732 	u8 rA = (instword >> 16) & 0x1f;
733 	u8 rB = (instword >> 11) & 0x1f;
734 	u8 BC = (instword >> 6) & 0x1f;
735 	u8 bit;
736 	unsigned long tmp;
737 
738 	tmp = (rA == 0) ? 0 : regs->gpr[rA];
739 	bit = (regs->ccr >> (31 - BC)) & 0x1;
740 
741 	regs->gpr[rT] = bit ? tmp : regs->gpr[rB];
742 
743 	return 0;
744 }
745 
746 static int emulate_instruction(struct pt_regs *regs)
747 {
748 	u32 instword;
749 	u32 rd;
750 
751 	if (!user_mode(regs) || (regs->msr & MSR_LE))
752 		return -EINVAL;
753 	CHECK_FULL_REGS(regs);
754 
755 	if (get_user(instword, (u32 __user *)(regs->nip)))
756 		return -EFAULT;
757 
758 	/* Emulate the mfspr rD, PVR. */
759 	if ((instword & PPC_INST_MFSPR_PVR_MASK) == PPC_INST_MFSPR_PVR) {
760 		rd = (instword >> 21) & 0x1f;
761 		regs->gpr[rd] = mfspr(SPRN_PVR);
762 		return 0;
763 	}
764 
765 	/* Emulating the dcba insn is just a no-op.  */
766 	if ((instword & PPC_INST_DCBA_MASK) == PPC_INST_DCBA)
767 		return 0;
768 
769 	/* Emulate the mcrxr insn.  */
770 	if ((instword & PPC_INST_MCRXR_MASK) == PPC_INST_MCRXR) {
771 		int shift = (instword >> 21) & 0x1c;
772 		unsigned long msk = 0xf0000000UL >> shift;
773 
774 		regs->ccr = (regs->ccr & ~msk) | ((regs->xer >> shift) & msk);
775 		regs->xer &= ~0xf0000000UL;
776 		return 0;
777 	}
778 
779 	/* Emulate load/store string insn. */
780 	if ((instword & PPC_INST_STRING_GEN_MASK) == PPC_INST_STRING)
781 		return emulate_string_inst(regs, instword);
782 
783 	/* Emulate the popcntb (Population Count Bytes) instruction. */
784 	if ((instword & PPC_INST_POPCNTB_MASK) == PPC_INST_POPCNTB) {
785 		return emulate_popcntb_inst(regs, instword);
786 	}
787 
788 	/* Emulate isel (Integer Select) instruction */
789 	if ((instword & PPC_INST_ISEL_MASK) == PPC_INST_ISEL) {
790 		return emulate_isel(regs, instword);
791 	}
792 
793 	return -EINVAL;
794 }
795 
796 int is_valid_bugaddr(unsigned long addr)
797 {
798 	return is_kernel_addr(addr);
799 }
800 
801 void __kprobes program_check_exception(struct pt_regs *regs)
802 {
803 	unsigned int reason = get_reason(regs);
804 	extern int do_mathemu(struct pt_regs *regs);
805 
806 	/* We can now get here via a FP Unavailable exception if the core
807 	 * has no FPU, in that case the reason flags will be 0 */
808 
809 	if (reason & REASON_FP) {
810 		/* IEEE FP exception */
811 		parse_fpe(regs);
812 		return;
813 	}
814 	if (reason & REASON_TRAP) {
815 		/* trap exception */
816 		if (notify_die(DIE_BPT, "breakpoint", regs, 5, 5, SIGTRAP)
817 				== NOTIFY_STOP)
818 			return;
819 		if (debugger_bpt(regs))
820 			return;
821 
822 		if (!(regs->msr & MSR_PR) &&  /* not user-mode */
823 		    report_bug(regs->nip, regs) == BUG_TRAP_TYPE_WARN) {
824 			regs->nip += 4;
825 			return;
826 		}
827 		_exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
828 		return;
829 	}
830 
831 	local_irq_enable();
832 
833 #ifdef CONFIG_MATH_EMULATION
834 	/* (reason & REASON_ILLEGAL) would be the obvious thing here,
835 	 * but there seems to be a hardware bug on the 405GP (RevD)
836 	 * that means ESR is sometimes set incorrectly - either to
837 	 * ESR_DST (!?) or 0.  In the process of chasing this with the
838 	 * hardware people - not sure if it can happen on any illegal
839 	 * instruction or only on FP instructions, whether there is a
840 	 * pattern to occurences etc. -dgibson 31/Mar/2003 */
841 	switch (do_mathemu(regs)) {
842 	case 0:
843 		emulate_single_step(regs);
844 		return;
845 	case 1: {
846 			int code = 0;
847 			code = __parse_fpscr(current->thread.fpscr.val);
848 			_exception(SIGFPE, regs, code, regs->nip);
849 			return;
850 		}
851 	case -EFAULT:
852 		_exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
853 		return;
854 	}
855 	/* fall through on any other errors */
856 #endif /* CONFIG_MATH_EMULATION */
857 
858 	/* Try to emulate it if we should. */
859 	if (reason & (REASON_ILLEGAL | REASON_PRIVILEGED)) {
860 		switch (emulate_instruction(regs)) {
861 		case 0:
862 			regs->nip += 4;
863 			emulate_single_step(regs);
864 			return;
865 		case -EFAULT:
866 			_exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
867 			return;
868 		}
869 	}
870 
871 	if (reason & REASON_PRIVILEGED)
872 		_exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
873 	else
874 		_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
875 }
876 
877 void alignment_exception(struct pt_regs *regs)
878 {
879 	int sig, code, fixed = 0;
880 
881 	/* we don't implement logging of alignment exceptions */
882 	if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS))
883 		fixed = fix_alignment(regs);
884 
885 	if (fixed == 1) {
886 		regs->nip += 4;	/* skip over emulated instruction */
887 		emulate_single_step(regs);
888 		return;
889 	}
890 
891 	/* Operand address was bad */
892 	if (fixed == -EFAULT) {
893 		sig = SIGSEGV;
894 		code = SEGV_ACCERR;
895 	} else {
896 		sig = SIGBUS;
897 		code = BUS_ADRALN;
898 	}
899 	if (user_mode(regs))
900 		_exception(sig, regs, code, regs->dar);
901 	else
902 		bad_page_fault(regs, regs->dar, sig);
903 }
904 
905 void StackOverflow(struct pt_regs *regs)
906 {
907 	printk(KERN_CRIT "Kernel stack overflow in process %p, r1=%lx\n",
908 	       current, regs->gpr[1]);
909 	debugger(regs);
910 	show_regs(regs);
911 	panic("kernel stack overflow");
912 }
913 
914 void nonrecoverable_exception(struct pt_regs *regs)
915 {
916 	printk(KERN_ERR "Non-recoverable exception at PC=%lx MSR=%lx\n",
917 	       regs->nip, regs->msr);
918 	debugger(regs);
919 	die("nonrecoverable exception", regs, SIGKILL);
920 }
921 
922 void trace_syscall(struct pt_regs *regs)
923 {
924 	printk("Task: %p(%d), PC: %08lX/%08lX, Syscall: %3ld, Result: %s%ld    %s\n",
925 	       current, task_pid_nr(current), regs->nip, regs->link, regs->gpr[0],
926 	       regs->ccr&0x10000000?"Error=":"", regs->gpr[3], print_tainted());
927 }
928 
929 void kernel_fp_unavailable_exception(struct pt_regs *regs)
930 {
931 	printk(KERN_EMERG "Unrecoverable FP Unavailable Exception "
932 			  "%lx at %lx\n", regs->trap, regs->nip);
933 	die("Unrecoverable FP Unavailable Exception", regs, SIGABRT);
934 }
935 
936 void altivec_unavailable_exception(struct pt_regs *regs)
937 {
938 	if (user_mode(regs)) {
939 		/* A user program has executed an altivec instruction,
940 		   but this kernel doesn't support altivec. */
941 		_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
942 		return;
943 	}
944 
945 	printk(KERN_EMERG "Unrecoverable VMX/Altivec Unavailable Exception "
946 			"%lx at %lx\n", regs->trap, regs->nip);
947 	die("Unrecoverable VMX/Altivec Unavailable Exception", regs, SIGABRT);
948 }
949 
950 void vsx_unavailable_exception(struct pt_regs *regs)
951 {
952 	if (user_mode(regs)) {
953 		/* A user program has executed an vsx instruction,
954 		   but this kernel doesn't support vsx. */
955 		_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
956 		return;
957 	}
958 
959 	printk(KERN_EMERG "Unrecoverable VSX Unavailable Exception "
960 			"%lx at %lx\n", regs->trap, regs->nip);
961 	die("Unrecoverable VSX Unavailable Exception", regs, SIGABRT);
962 }
963 
964 void performance_monitor_exception(struct pt_regs *regs)
965 {
966 	perf_irq(regs);
967 }
968 
969 #ifdef CONFIG_8xx
970 void SoftwareEmulation(struct pt_regs *regs)
971 {
972 	extern int do_mathemu(struct pt_regs *);
973 	extern int Soft_emulate_8xx(struct pt_regs *);
974 #if defined(CONFIG_MATH_EMULATION) || defined(CONFIG_8XX_MINIMAL_FPEMU)
975 	int errcode;
976 #endif
977 
978 	CHECK_FULL_REGS(regs);
979 
980 	if (!user_mode(regs)) {
981 		debugger(regs);
982 		die("Kernel Mode Software FPU Emulation", regs, SIGFPE);
983 	}
984 
985 #ifdef CONFIG_MATH_EMULATION
986 	errcode = do_mathemu(regs);
987 
988 	switch (errcode) {
989 	case 0:
990 		emulate_single_step(regs);
991 		return;
992 	case 1: {
993 			int code = 0;
994 			code = __parse_fpscr(current->thread.fpscr.val);
995 			_exception(SIGFPE, regs, code, regs->nip);
996 			return;
997 		}
998 	case -EFAULT:
999 		_exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
1000 		return;
1001 	default:
1002 		_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1003 		return;
1004 	}
1005 
1006 #elif defined(CONFIG_8XX_MINIMAL_FPEMU)
1007 	errcode = Soft_emulate_8xx(regs);
1008 	switch (errcode) {
1009 	case 0:
1010 		emulate_single_step(regs);
1011 		return;
1012 	case 1:
1013 		_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1014 		return;
1015 	case -EFAULT:
1016 		_exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
1017 		return;
1018 	}
1019 #else
1020 	_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1021 #endif
1022 }
1023 #endif /* CONFIG_8xx */
1024 
1025 #if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
1026 
1027 void __kprobes DebugException(struct pt_regs *regs, unsigned long debug_status)
1028 {
1029 	if (debug_status & DBSR_IC) {	/* instruction completion */
1030 		regs->msr &= ~MSR_DE;
1031 
1032 		/* Disable instruction completion */
1033 		mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_IC);
1034 		/* Clear the instruction completion event */
1035 		mtspr(SPRN_DBSR, DBSR_IC);
1036 
1037 		if (notify_die(DIE_SSTEP, "single_step", regs, 5,
1038 			       5, SIGTRAP) == NOTIFY_STOP) {
1039 			return;
1040 		}
1041 
1042 		if (debugger_sstep(regs))
1043 			return;
1044 
1045 		if (user_mode(regs)) {
1046 			current->thread.dbcr0 &= ~DBCR0_IC;
1047 		}
1048 
1049 		_exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
1050 	} else if (debug_status & (DBSR_DAC1R | DBSR_DAC1W)) {
1051 		regs->msr &= ~MSR_DE;
1052 
1053 		if (user_mode(regs)) {
1054 			current->thread.dbcr0 &= ~(DBSR_DAC1R | DBSR_DAC1W |
1055 								DBCR0_IDM);
1056 		} else {
1057 			/* Disable DAC interupts */
1058 			mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~(DBSR_DAC1R |
1059 						DBSR_DAC1W | DBCR0_IDM));
1060 
1061 			/* Clear the DAC event */
1062 			mtspr(SPRN_DBSR, (DBSR_DAC1R | DBSR_DAC1W));
1063 		}
1064 		/* Setup and send the trap to the handler */
1065 		do_dabr(regs, mfspr(SPRN_DAC1), debug_status);
1066 	}
1067 }
1068 #endif /* CONFIG_4xx || CONFIG_BOOKE */
1069 
1070 #if !defined(CONFIG_TAU_INT)
1071 void TAUException(struct pt_regs *regs)
1072 {
1073 	printk("TAU trap at PC: %lx, MSR: %lx, vector=%lx    %s\n",
1074 	       regs->nip, regs->msr, regs->trap, print_tainted());
1075 }
1076 #endif /* CONFIG_INT_TAU */
1077 
1078 #ifdef CONFIG_ALTIVEC
1079 void altivec_assist_exception(struct pt_regs *regs)
1080 {
1081 	int err;
1082 
1083 	if (!user_mode(regs)) {
1084 		printk(KERN_EMERG "VMX/Altivec assist exception in kernel mode"
1085 		       " at %lx\n", regs->nip);
1086 		die("Kernel VMX/Altivec assist exception", regs, SIGILL);
1087 	}
1088 
1089 	flush_altivec_to_thread(current);
1090 
1091 	err = emulate_altivec(regs);
1092 	if (err == 0) {
1093 		regs->nip += 4;		/* skip emulated instruction */
1094 		emulate_single_step(regs);
1095 		return;
1096 	}
1097 
1098 	if (err == -EFAULT) {
1099 		/* got an error reading the instruction */
1100 		_exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
1101 	} else {
1102 		/* didn't recognize the instruction */
1103 		/* XXX quick hack for now: set the non-Java bit in the VSCR */
1104 		if (printk_ratelimit())
1105 			printk(KERN_ERR "Unrecognized altivec instruction "
1106 			       "in %s at %lx\n", current->comm, regs->nip);
1107 		current->thread.vscr.u[3] |= 0x10000;
1108 	}
1109 }
1110 #endif /* CONFIG_ALTIVEC */
1111 
1112 #ifdef CONFIG_VSX
1113 void vsx_assist_exception(struct pt_regs *regs)
1114 {
1115 	if (!user_mode(regs)) {
1116 		printk(KERN_EMERG "VSX assist exception in kernel mode"
1117 		       " at %lx\n", regs->nip);
1118 		die("Kernel VSX assist exception", regs, SIGILL);
1119 	}
1120 
1121 	flush_vsx_to_thread(current);
1122 	printk(KERN_INFO "VSX assist not supported at %lx\n", regs->nip);
1123 	_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1124 }
1125 #endif /* CONFIG_VSX */
1126 
1127 #ifdef CONFIG_FSL_BOOKE
1128 
1129 void doorbell_exception(struct pt_regs *regs)
1130 {
1131 #ifdef CONFIG_SMP
1132 	int cpu = smp_processor_id();
1133 	int msg;
1134 
1135 	if (num_online_cpus() < 2)
1136 		return;
1137 
1138 	for (msg = 0; msg < 4; msg++)
1139 		if (test_and_clear_bit(msg, &dbell_smp_message[cpu]))
1140 			smp_message_recv(msg);
1141 #else
1142 	printk(KERN_WARNING "Received doorbell on non-smp system\n");
1143 #endif
1144 }
1145 
1146 void CacheLockingException(struct pt_regs *regs, unsigned long address,
1147 			   unsigned long error_code)
1148 {
1149 	/* We treat cache locking instructions from the user
1150 	 * as priv ops, in the future we could try to do
1151 	 * something smarter
1152 	 */
1153 	if (error_code & (ESR_DLK|ESR_ILK))
1154 		_exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
1155 	return;
1156 }
1157 #endif /* CONFIG_FSL_BOOKE */
1158 
1159 #ifdef CONFIG_SPE
1160 void SPEFloatingPointException(struct pt_regs *regs)
1161 {
1162 	extern int do_spe_mathemu(struct pt_regs *regs);
1163 	unsigned long spefscr;
1164 	int fpexc_mode;
1165 	int code = 0;
1166 	int err;
1167 
1168 	preempt_disable();
1169 	if (regs->msr & MSR_SPE)
1170 		giveup_spe(current);
1171 	preempt_enable();
1172 
1173 	spefscr = current->thread.spefscr;
1174 	fpexc_mode = current->thread.fpexc_mode;
1175 
1176 	if ((spefscr & SPEFSCR_FOVF) && (fpexc_mode & PR_FP_EXC_OVF)) {
1177 		code = FPE_FLTOVF;
1178 	}
1179 	else if ((spefscr & SPEFSCR_FUNF) && (fpexc_mode & PR_FP_EXC_UND)) {
1180 		code = FPE_FLTUND;
1181 	}
1182 	else if ((spefscr & SPEFSCR_FDBZ) && (fpexc_mode & PR_FP_EXC_DIV))
1183 		code = FPE_FLTDIV;
1184 	else if ((spefscr & SPEFSCR_FINV) && (fpexc_mode & PR_FP_EXC_INV)) {
1185 		code = FPE_FLTINV;
1186 	}
1187 	else if ((spefscr & (SPEFSCR_FG | SPEFSCR_FX)) && (fpexc_mode & PR_FP_EXC_RES))
1188 		code = FPE_FLTRES;
1189 
1190 	err = do_spe_mathemu(regs);
1191 	if (err == 0) {
1192 		regs->nip += 4;		/* skip emulated instruction */
1193 		emulate_single_step(regs);
1194 		return;
1195 	}
1196 
1197 	if (err == -EFAULT) {
1198 		/* got an error reading the instruction */
1199 		_exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
1200 	} else if (err == -EINVAL) {
1201 		/* didn't recognize the instruction */
1202 		printk(KERN_ERR "unrecognized spe instruction "
1203 		       "in %s at %lx\n", current->comm, regs->nip);
1204 	} else {
1205 		_exception(SIGFPE, regs, code, regs->nip);
1206 	}
1207 
1208 	return;
1209 }
1210 
1211 void SPEFloatingPointRoundException(struct pt_regs *regs)
1212 {
1213 	extern int speround_handler(struct pt_regs *regs);
1214 	int err;
1215 
1216 	preempt_disable();
1217 	if (regs->msr & MSR_SPE)
1218 		giveup_spe(current);
1219 	preempt_enable();
1220 
1221 	regs->nip -= 4;
1222 	err = speround_handler(regs);
1223 	if (err == 0) {
1224 		regs->nip += 4;		/* skip emulated instruction */
1225 		emulate_single_step(regs);
1226 		return;
1227 	}
1228 
1229 	if (err == -EFAULT) {
1230 		/* got an error reading the instruction */
1231 		_exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
1232 	} else if (err == -EINVAL) {
1233 		/* didn't recognize the instruction */
1234 		printk(KERN_ERR "unrecognized spe instruction "
1235 		       "in %s at %lx\n", current->comm, regs->nip);
1236 	} else {
1237 		_exception(SIGFPE, regs, 0, regs->nip);
1238 		return;
1239 	}
1240 }
1241 #endif
1242 
1243 /*
1244  * We enter here if we get an unrecoverable exception, that is, one
1245  * that happened at a point where the RI (recoverable interrupt) bit
1246  * in the MSR is 0.  This indicates that SRR0/1 are live, and that
1247  * we therefore lost state by taking this exception.
1248  */
1249 void unrecoverable_exception(struct pt_regs *regs)
1250 {
1251 	printk(KERN_EMERG "Unrecoverable exception %lx at %lx\n",
1252 	       regs->trap, regs->nip);
1253 	die("Unrecoverable exception", regs, SIGABRT);
1254 }
1255 
1256 #ifdef CONFIG_BOOKE_WDT
1257 /*
1258  * Default handler for a Watchdog exception,
1259  * spins until a reboot occurs
1260  */
1261 void __attribute__ ((weak)) WatchdogHandler(struct pt_regs *regs)
1262 {
1263 	/* Generic WatchdogHandler, implement your own */
1264 	mtspr(SPRN_TCR, mfspr(SPRN_TCR)&(~TCR_WIE));
1265 	return;
1266 }
1267 
1268 void WatchdogException(struct pt_regs *regs)
1269 {
1270 	printk (KERN_EMERG "PowerPC Book-E Watchdog Exception\n");
1271 	WatchdogHandler(regs);
1272 }
1273 #endif
1274 
1275 /*
1276  * We enter here if we discover during exception entry that we are
1277  * running in supervisor mode with a userspace value in the stack pointer.
1278  */
1279 void kernel_bad_stack(struct pt_regs *regs)
1280 {
1281 	printk(KERN_EMERG "Bad kernel stack pointer %lx at %lx\n",
1282 	       regs->gpr[1], regs->nip);
1283 	die("Bad kernel stack pointer", regs, SIGABRT);
1284 }
1285 
1286 void __init trap_init(void)
1287 {
1288 }
1289