xref: /openbmc/linux/arch/powerpc/kernel/traps.c (revision 96de0e252cedffad61b3cb5e05662c591898e69a)
1 /*
2  *  Copyright (C) 1995-1996  Gary Thomas (gdt@linuxppc.org)
3  *
4  *  This program is free software; you can redistribute it and/or
5  *  modify it under the terms of the GNU General Public License
6  *  as published by the Free Software Foundation; either version
7  *  2 of the License, or (at your option) any later version.
8  *
9  *  Modified by Cort Dougan (cort@cs.nmt.edu)
10  *  and Paul Mackerras (paulus@samba.org)
11  */
12 
13 /*
14  * This file handles the architecture-dependent parts of hardware exceptions
15  */
16 
17 #include <linux/errno.h>
18 #include <linux/sched.h>
19 #include <linux/kernel.h>
20 #include <linux/mm.h>
21 #include <linux/stddef.h>
22 #include <linux/unistd.h>
23 #include <linux/ptrace.h>
24 #include <linux/slab.h>
25 #include <linux/user.h>
26 #include <linux/a.out.h>
27 #include <linux/interrupt.h>
28 #include <linux/init.h>
29 #include <linux/module.h>
30 #include <linux/prctl.h>
31 #include <linux/delay.h>
32 #include <linux/kprobes.h>
33 #include <linux/kexec.h>
34 #include <linux/backlight.h>
35 #include <linux/bug.h>
36 #include <linux/kdebug.h>
37 
38 #include <asm/pgtable.h>
39 #include <asm/uaccess.h>
40 #include <asm/system.h>
41 #include <asm/io.h>
42 #include <asm/machdep.h>
43 #include <asm/rtas.h>
44 #include <asm/pmc.h>
45 #ifdef CONFIG_PPC32
46 #include <asm/reg.h>
47 #endif
48 #ifdef CONFIG_PMAC_BACKLIGHT
49 #include <asm/backlight.h>
50 #endif
51 #ifdef CONFIG_PPC64
52 #include <asm/firmware.h>
53 #include <asm/processor.h>
54 #endif
55 #include <asm/kexec.h>
56 
57 #ifdef CONFIG_DEBUGGER
58 int (*__debugger)(struct pt_regs *regs);
59 int (*__debugger_ipi)(struct pt_regs *regs);
60 int (*__debugger_bpt)(struct pt_regs *regs);
61 int (*__debugger_sstep)(struct pt_regs *regs);
62 int (*__debugger_iabr_match)(struct pt_regs *regs);
63 int (*__debugger_dabr_match)(struct pt_regs *regs);
64 int (*__debugger_fault_handler)(struct pt_regs *regs);
65 
66 EXPORT_SYMBOL(__debugger);
67 EXPORT_SYMBOL(__debugger_ipi);
68 EXPORT_SYMBOL(__debugger_bpt);
69 EXPORT_SYMBOL(__debugger_sstep);
70 EXPORT_SYMBOL(__debugger_iabr_match);
71 EXPORT_SYMBOL(__debugger_dabr_match);
72 EXPORT_SYMBOL(__debugger_fault_handler);
73 #endif
74 
75 /*
76  * Trap & Exception support
77  */
78 
79 #ifdef CONFIG_PMAC_BACKLIGHT
80 static void pmac_backlight_unblank(void)
81 {
82 	mutex_lock(&pmac_backlight_mutex);
83 	if (pmac_backlight) {
84 		struct backlight_properties *props;
85 
86 		props = &pmac_backlight->props;
87 		props->brightness = props->max_brightness;
88 		props->power = FB_BLANK_UNBLANK;
89 		backlight_update_status(pmac_backlight);
90 	}
91 	mutex_unlock(&pmac_backlight_mutex);
92 }
93 #else
94 static inline void pmac_backlight_unblank(void) { }
95 #endif
96 
97 int die(const char *str, struct pt_regs *regs, long err)
98 {
99 	static struct {
100 		spinlock_t lock;
101 		u32 lock_owner;
102 		int lock_owner_depth;
103 	} die = {
104 		.lock =			__SPIN_LOCK_UNLOCKED(die.lock),
105 		.lock_owner =		-1,
106 		.lock_owner_depth =	0
107 	};
108 	static int die_counter;
109 	unsigned long flags;
110 
111 	if (debugger(regs))
112 		return 1;
113 
114 	oops_enter();
115 
116 	if (die.lock_owner != raw_smp_processor_id()) {
117 		console_verbose();
118 		spin_lock_irqsave(&die.lock, flags);
119 		die.lock_owner = smp_processor_id();
120 		die.lock_owner_depth = 0;
121 		bust_spinlocks(1);
122 		if (machine_is(powermac))
123 			pmac_backlight_unblank();
124 	} else {
125 		local_save_flags(flags);
126 	}
127 
128 	if (++die.lock_owner_depth < 3) {
129 		printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter);
130 #ifdef CONFIG_PREEMPT
131 		printk("PREEMPT ");
132 #endif
133 #ifdef CONFIG_SMP
134 		printk("SMP NR_CPUS=%d ", NR_CPUS);
135 #endif
136 #ifdef CONFIG_DEBUG_PAGEALLOC
137 		printk("DEBUG_PAGEALLOC ");
138 #endif
139 #ifdef CONFIG_NUMA
140 		printk("NUMA ");
141 #endif
142 		printk("%s\n", ppc_md.name ? ppc_md.name : "");
143 
144 		print_modules();
145 		show_regs(regs);
146 	} else {
147 		printk("Recursive die() failure, output suppressed\n");
148 	}
149 
150 	bust_spinlocks(0);
151 	die.lock_owner = -1;
152 	add_taint(TAINT_DIE);
153 	spin_unlock_irqrestore(&die.lock, flags);
154 
155 	if (kexec_should_crash(current) ||
156 		kexec_sr_activated(smp_processor_id()))
157 		crash_kexec(regs);
158 	crash_kexec_secondary(regs);
159 
160 	if (in_interrupt())
161 		panic("Fatal exception in interrupt");
162 
163 	if (panic_on_oops)
164 		panic("Fatal exception");
165 
166 	oops_exit();
167 	do_exit(err);
168 
169 	return 0;
170 }
171 
172 void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
173 {
174 	siginfo_t info;
175 	const char fmt32[] = KERN_INFO "%s[%d]: unhandled signal %d " \
176 			"at %08lx nip %08lx lr %08lx code %x\n";
177 	const char fmt64[] = KERN_INFO "%s[%d]: unhandled signal %d " \
178 			"at %016lx nip %016lx lr %016lx code %x\n";
179 
180 	if (!user_mode(regs)) {
181 		if (die("Exception in kernel mode", regs, signr))
182 			return;
183 	} else if (show_unhandled_signals &&
184 		    unhandled_signal(current, signr) &&
185 		    printk_ratelimit()) {
186 			printk(regs->msr & MSR_SF ? fmt64 : fmt32,
187 				current->comm, current->pid, signr,
188 				addr, regs->nip, regs->link, code);
189 		}
190 
191 	memset(&info, 0, sizeof(info));
192 	info.si_signo = signr;
193 	info.si_code = code;
194 	info.si_addr = (void __user *) addr;
195 	force_sig_info(signr, &info, current);
196 
197 	/*
198 	 * Init gets no signals that it doesn't have a handler for.
199 	 * That's all very well, but if it has caused a synchronous
200 	 * exception and we ignore the resulting signal, it will just
201 	 * generate the same exception over and over again and we get
202 	 * nowhere.  Better to kill it and let the kernel panic.
203 	 */
204 	if (is_global_init(current)) {
205 		__sighandler_t handler;
206 
207 		spin_lock_irq(&current->sighand->siglock);
208 		handler = current->sighand->action[signr-1].sa.sa_handler;
209 		spin_unlock_irq(&current->sighand->siglock);
210 		if (handler == SIG_DFL) {
211 			/* init has generated a synchronous exception
212 			   and it doesn't have a handler for the signal */
213 			printk(KERN_CRIT "init has generated signal %d "
214 			       "but has no handler for it\n", signr);
215 			do_exit(signr);
216 		}
217 	}
218 }
219 
220 #ifdef CONFIG_PPC64
221 void system_reset_exception(struct pt_regs *regs)
222 {
223 	/* See if any machine dependent calls */
224 	if (ppc_md.system_reset_exception) {
225 		if (ppc_md.system_reset_exception(regs))
226 			return;
227 	}
228 
229 #ifdef CONFIG_KEXEC
230 	cpu_set(smp_processor_id(), cpus_in_sr);
231 #endif
232 
233 	die("System Reset", regs, SIGABRT);
234 
235 	/*
236 	 * Some CPUs when released from the debugger will execute this path.
237 	 * These CPUs entered the debugger via a soft-reset. If the CPU was
238 	 * hung before entering the debugger it will return to the hung
239 	 * state when exiting this function.  This causes a problem in
240 	 * kdump since the hung CPU(s) will not respond to the IPI sent
241 	 * from kdump. To prevent the problem we call crash_kexec_secondary()
242 	 * here. If a kdump had not been initiated or we exit the debugger
243 	 * with the "exit and recover" command (x) crash_kexec_secondary()
244 	 * will return after 5ms and the CPU returns to its previous state.
245 	 */
246 	crash_kexec_secondary(regs);
247 
248 	/* Must die if the interrupt is not recoverable */
249 	if (!(regs->msr & MSR_RI))
250 		panic("Unrecoverable System Reset");
251 
252 	/* What should we do here? We could issue a shutdown or hard reset. */
253 }
254 #endif
255 
256 /*
257  * I/O accesses can cause machine checks on powermacs.
258  * Check if the NIP corresponds to the address of a sync
259  * instruction for which there is an entry in the exception
260  * table.
261  * Note that the 601 only takes a machine check on TEA
262  * (transfer error ack) signal assertion, and does not
263  * set any of the top 16 bits of SRR1.
264  *  -- paulus.
265  */
266 static inline int check_io_access(struct pt_regs *regs)
267 {
268 #ifdef CONFIG_PPC32
269 	unsigned long msr = regs->msr;
270 	const struct exception_table_entry *entry;
271 	unsigned int *nip = (unsigned int *)regs->nip;
272 
273 	if (((msr & 0xffff0000) == 0 || (msr & (0x80000 | 0x40000)))
274 	    && (entry = search_exception_tables(regs->nip)) != NULL) {
275 		/*
276 		 * Check that it's a sync instruction, or somewhere
277 		 * in the twi; isync; nop sequence that inb/inw/inl uses.
278 		 * As the address is in the exception table
279 		 * we should be able to read the instr there.
280 		 * For the debug message, we look at the preceding
281 		 * load or store.
282 		 */
283 		if (*nip == 0x60000000)		/* nop */
284 			nip -= 2;
285 		else if (*nip == 0x4c00012c)	/* isync */
286 			--nip;
287 		if (*nip == 0x7c0004ac || (*nip >> 26) == 3) {
288 			/* sync or twi */
289 			unsigned int rb;
290 
291 			--nip;
292 			rb = (*nip >> 11) & 0x1f;
293 			printk(KERN_DEBUG "%s bad port %lx at %p\n",
294 			       (*nip & 0x100)? "OUT to": "IN from",
295 			       regs->gpr[rb] - _IO_BASE, nip);
296 			regs->msr |= MSR_RI;
297 			regs->nip = entry->fixup;
298 			return 1;
299 		}
300 	}
301 #endif /* CONFIG_PPC32 */
302 	return 0;
303 }
304 
305 #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
306 /* On 4xx, the reason for the machine check or program exception
307    is in the ESR. */
308 #define get_reason(regs)	((regs)->dsisr)
309 #ifndef CONFIG_FSL_BOOKE
310 #define get_mc_reason(regs)	((regs)->dsisr)
311 #else
312 #define get_mc_reason(regs)	(mfspr(SPRN_MCSR) & MCSR_MASK)
313 #endif
314 #define REASON_FP		ESR_FP
315 #define REASON_ILLEGAL		(ESR_PIL | ESR_PUO)
316 #define REASON_PRIVILEGED	ESR_PPR
317 #define REASON_TRAP		ESR_PTR
318 
319 /* single-step stuff */
320 #define single_stepping(regs)	(current->thread.dbcr0 & DBCR0_IC)
321 #define clear_single_step(regs)	(current->thread.dbcr0 &= ~DBCR0_IC)
322 
323 #else
324 /* On non-4xx, the reason for the machine check or program
325    exception is in the MSR. */
326 #define get_reason(regs)	((regs)->msr)
327 #define get_mc_reason(regs)	((regs)->msr)
328 #define REASON_FP		0x100000
329 #define REASON_ILLEGAL		0x80000
330 #define REASON_PRIVILEGED	0x40000
331 #define REASON_TRAP		0x20000
332 
333 #define single_stepping(regs)	((regs)->msr & MSR_SE)
334 #define clear_single_step(regs)	((regs)->msr &= ~MSR_SE)
335 #endif
336 
337 static int generic_machine_check_exception(struct pt_regs *regs)
338 {
339 	unsigned long reason = get_mc_reason(regs);
340 
341 #if defined(CONFIG_4xx) && !defined(CONFIG_440A)
342 	if (reason & ESR_IMCP) {
343 		printk("Instruction");
344 		mtspr(SPRN_ESR, reason & ~ESR_IMCP);
345 	} else
346 		printk("Data");
347 	printk(" machine check in kernel mode.\n");
348 #elif defined(CONFIG_440A)
349 	printk("Machine check in kernel mode.\n");
350 	if (reason & ESR_IMCP){
351 		printk("Instruction Synchronous Machine Check exception\n");
352 		mtspr(SPRN_ESR, reason & ~ESR_IMCP);
353 	}
354 	else {
355 		u32 mcsr = mfspr(SPRN_MCSR);
356 		if (mcsr & MCSR_IB)
357 			printk("Instruction Read PLB Error\n");
358 		if (mcsr & MCSR_DRB)
359 			printk("Data Read PLB Error\n");
360 		if (mcsr & MCSR_DWB)
361 			printk("Data Write PLB Error\n");
362 		if (mcsr & MCSR_TLBP)
363 			printk("TLB Parity Error\n");
364 		if (mcsr & MCSR_ICP){
365 			flush_instruction_cache();
366 			printk("I-Cache Parity Error\n");
367 		}
368 		if (mcsr & MCSR_DCSP)
369 			printk("D-Cache Search Parity Error\n");
370 		if (mcsr & MCSR_DCFP)
371 			printk("D-Cache Flush Parity Error\n");
372 		if (mcsr & MCSR_IMPE)
373 			printk("Machine Check exception is imprecise\n");
374 
375 		/* Clear MCSR */
376 		mtspr(SPRN_MCSR, mcsr);
377 	}
378 #elif defined (CONFIG_E500)
379 	printk("Machine check in kernel mode.\n");
380 	printk("Caused by (from MCSR=%lx): ", reason);
381 
382 	if (reason & MCSR_MCP)
383 		printk("Machine Check Signal\n");
384 	if (reason & MCSR_ICPERR)
385 		printk("Instruction Cache Parity Error\n");
386 	if (reason & MCSR_DCP_PERR)
387 		printk("Data Cache Push Parity Error\n");
388 	if (reason & MCSR_DCPERR)
389 		printk("Data Cache Parity Error\n");
390 	if (reason & MCSR_BUS_IAERR)
391 		printk("Bus - Instruction Address Error\n");
392 	if (reason & MCSR_BUS_RAERR)
393 		printk("Bus - Read Address Error\n");
394 	if (reason & MCSR_BUS_WAERR)
395 		printk("Bus - Write Address Error\n");
396 	if (reason & MCSR_BUS_IBERR)
397 		printk("Bus - Instruction Data Error\n");
398 	if (reason & MCSR_BUS_RBERR)
399 		printk("Bus - Read Data Bus Error\n");
400 	if (reason & MCSR_BUS_WBERR)
401 		printk("Bus - Read Data Bus Error\n");
402 	if (reason & MCSR_BUS_IPERR)
403 		printk("Bus - Instruction Parity Error\n");
404 	if (reason & MCSR_BUS_RPERR)
405 		printk("Bus - Read Parity Error\n");
406 #elif defined (CONFIG_E200)
407 	printk("Machine check in kernel mode.\n");
408 	printk("Caused by (from MCSR=%lx): ", reason);
409 
410 	if (reason & MCSR_MCP)
411 		printk("Machine Check Signal\n");
412 	if (reason & MCSR_CP_PERR)
413 		printk("Cache Push Parity Error\n");
414 	if (reason & MCSR_CPERR)
415 		printk("Cache Parity Error\n");
416 	if (reason & MCSR_EXCP_ERR)
417 		printk("ISI, ITLB, or Bus Error on first instruction fetch for an exception handler\n");
418 	if (reason & MCSR_BUS_IRERR)
419 		printk("Bus - Read Bus Error on instruction fetch\n");
420 	if (reason & MCSR_BUS_DRERR)
421 		printk("Bus - Read Bus Error on data load\n");
422 	if (reason & MCSR_BUS_WRERR)
423 		printk("Bus - Write Bus Error on buffered store or cache line push\n");
424 #else /* !CONFIG_4xx && !CONFIG_E500 && !CONFIG_E200 */
425 	printk("Machine check in kernel mode.\n");
426 	printk("Caused by (from SRR1=%lx): ", reason);
427 	switch (reason & 0x601F0000) {
428 	case 0x80000:
429 		printk("Machine check signal\n");
430 		break;
431 	case 0:		/* for 601 */
432 	case 0x40000:
433 	case 0x140000:	/* 7450 MSS error and TEA */
434 		printk("Transfer error ack signal\n");
435 		break;
436 	case 0x20000:
437 		printk("Data parity error signal\n");
438 		break;
439 	case 0x10000:
440 		printk("Address parity error signal\n");
441 		break;
442 	case 0x20000000:
443 		printk("L1 Data Cache error\n");
444 		break;
445 	case 0x40000000:
446 		printk("L1 Instruction Cache error\n");
447 		break;
448 	case 0x00100000:
449 		printk("L2 data cache parity error\n");
450 		break;
451 	default:
452 		printk("Unknown values in msr\n");
453 	}
454 #endif /* CONFIG_4xx */
455 
456 	return 0;
457 }
458 
459 void machine_check_exception(struct pt_regs *regs)
460 {
461 	int recover = 0;
462 
463 	/* See if any machine dependent calls */
464 	if (ppc_md.machine_check_exception)
465 		recover = ppc_md.machine_check_exception(regs);
466 	else
467 		recover = generic_machine_check_exception(regs);
468 
469 	if (recover)
470 		return;
471 
472 	if (user_mode(regs)) {
473 		regs->msr |= MSR_RI;
474 		_exception(SIGBUS, regs, BUS_ADRERR, regs->nip);
475 		return;
476 	}
477 
478 #if defined(CONFIG_8xx) && defined(CONFIG_PCI)
479 	/* the qspan pci read routines can cause machine checks -- Cort */
480 	bad_page_fault(regs, regs->dar, SIGBUS);
481 	return;
482 #endif
483 
484 	if (debugger_fault_handler(regs)) {
485 		regs->msr |= MSR_RI;
486 		return;
487 	}
488 
489 	if (check_io_access(regs))
490 		return;
491 
492 	if (debugger_fault_handler(regs))
493 		return;
494 	die("Machine check", regs, SIGBUS);
495 
496 	/* Must die if the interrupt is not recoverable */
497 	if (!(regs->msr & MSR_RI))
498 		panic("Unrecoverable Machine check");
499 }
500 
501 void SMIException(struct pt_regs *regs)
502 {
503 	die("System Management Interrupt", regs, SIGABRT);
504 }
505 
506 void unknown_exception(struct pt_regs *regs)
507 {
508 	printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n",
509 	       regs->nip, regs->msr, regs->trap);
510 
511 	_exception(SIGTRAP, regs, 0, 0);
512 }
513 
514 void instruction_breakpoint_exception(struct pt_regs *regs)
515 {
516 	if (notify_die(DIE_IABR_MATCH, "iabr_match", regs, 5,
517 					5, SIGTRAP) == NOTIFY_STOP)
518 		return;
519 	if (debugger_iabr_match(regs))
520 		return;
521 	_exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
522 }
523 
524 void RunModeException(struct pt_regs *regs)
525 {
526 	_exception(SIGTRAP, regs, 0, 0);
527 }
528 
529 void __kprobes single_step_exception(struct pt_regs *regs)
530 {
531 	regs->msr &= ~(MSR_SE | MSR_BE);  /* Turn off 'trace' bits */
532 
533 	if (notify_die(DIE_SSTEP, "single_step", regs, 5,
534 					5, SIGTRAP) == NOTIFY_STOP)
535 		return;
536 	if (debugger_sstep(regs))
537 		return;
538 
539 	_exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
540 }
541 
542 /*
543  * After we have successfully emulated an instruction, we have to
544  * check if the instruction was being single-stepped, and if so,
545  * pretend we got a single-step exception.  This was pointed out
546  * by Kumar Gala.  -- paulus
547  */
548 static void emulate_single_step(struct pt_regs *regs)
549 {
550 	if (single_stepping(regs)) {
551 		clear_single_step(regs);
552 		_exception(SIGTRAP, regs, TRAP_TRACE, 0);
553 	}
554 }
555 
556 static inline int __parse_fpscr(unsigned long fpscr)
557 {
558 	int ret = 0;
559 
560 	/* Invalid operation */
561 	if ((fpscr & FPSCR_VE) && (fpscr & FPSCR_VX))
562 		ret = FPE_FLTINV;
563 
564 	/* Overflow */
565 	else if ((fpscr & FPSCR_OE) && (fpscr & FPSCR_OX))
566 		ret = FPE_FLTOVF;
567 
568 	/* Underflow */
569 	else if ((fpscr & FPSCR_UE) && (fpscr & FPSCR_UX))
570 		ret = FPE_FLTUND;
571 
572 	/* Divide by zero */
573 	else if ((fpscr & FPSCR_ZE) && (fpscr & FPSCR_ZX))
574 		ret = FPE_FLTDIV;
575 
576 	/* Inexact result */
577 	else if ((fpscr & FPSCR_XE) && (fpscr & FPSCR_XX))
578 		ret = FPE_FLTRES;
579 
580 	return ret;
581 }
582 
583 static void parse_fpe(struct pt_regs *regs)
584 {
585 	int code = 0;
586 
587 	flush_fp_to_thread(current);
588 
589 	code = __parse_fpscr(current->thread.fpscr.val);
590 
591 	_exception(SIGFPE, regs, code, regs->nip);
592 }
593 
594 /*
595  * Illegal instruction emulation support.  Originally written to
596  * provide the PVR to user applications using the mfspr rd, PVR.
597  * Return non-zero if we can't emulate, or -EFAULT if the associated
598  * memory access caused an access fault.  Return zero on success.
599  *
600  * There are a couple of ways to do this, either "decode" the instruction
601  * or directly match lots of bits.  In this case, matching lots of
602  * bits is faster and easier.
603  *
604  */
605 #define INST_MFSPR_PVR		0x7c1f42a6
606 #define INST_MFSPR_PVR_MASK	0xfc1fffff
607 
608 #define INST_DCBA		0x7c0005ec
609 #define INST_DCBA_MASK		0xfc0007fe
610 
611 #define INST_MCRXR		0x7c000400
612 #define INST_MCRXR_MASK		0xfc0007fe
613 
614 #define INST_STRING		0x7c00042a
615 #define INST_STRING_MASK	0xfc0007fe
616 #define INST_STRING_GEN_MASK	0xfc00067e
617 #define INST_LSWI		0x7c0004aa
618 #define INST_LSWX		0x7c00042a
619 #define INST_STSWI		0x7c0005aa
620 #define INST_STSWX		0x7c00052a
621 
622 #define INST_POPCNTB		0x7c0000f4
623 #define INST_POPCNTB_MASK	0xfc0007fe
624 
625 static int emulate_string_inst(struct pt_regs *regs, u32 instword)
626 {
627 	u8 rT = (instword >> 21) & 0x1f;
628 	u8 rA = (instword >> 16) & 0x1f;
629 	u8 NB_RB = (instword >> 11) & 0x1f;
630 	u32 num_bytes;
631 	unsigned long EA;
632 	int pos = 0;
633 
634 	/* Early out if we are an invalid form of lswx */
635 	if ((instword & INST_STRING_MASK) == INST_LSWX)
636 		if ((rT == rA) || (rT == NB_RB))
637 			return -EINVAL;
638 
639 	EA = (rA == 0) ? 0 : regs->gpr[rA];
640 
641 	switch (instword & INST_STRING_MASK) {
642 		case INST_LSWX:
643 		case INST_STSWX:
644 			EA += NB_RB;
645 			num_bytes = regs->xer & 0x7f;
646 			break;
647 		case INST_LSWI:
648 		case INST_STSWI:
649 			num_bytes = (NB_RB == 0) ? 32 : NB_RB;
650 			break;
651 		default:
652 			return -EINVAL;
653 	}
654 
655 	while (num_bytes != 0)
656 	{
657 		u8 val;
658 		u32 shift = 8 * (3 - (pos & 0x3));
659 
660 		switch ((instword & INST_STRING_MASK)) {
661 			case INST_LSWX:
662 			case INST_LSWI:
663 				if (get_user(val, (u8 __user *)EA))
664 					return -EFAULT;
665 				/* first time updating this reg,
666 				 * zero it out */
667 				if (pos == 0)
668 					regs->gpr[rT] = 0;
669 				regs->gpr[rT] |= val << shift;
670 				break;
671 			case INST_STSWI:
672 			case INST_STSWX:
673 				val = regs->gpr[rT] >> shift;
674 				if (put_user(val, (u8 __user *)EA))
675 					return -EFAULT;
676 				break;
677 		}
678 		/* move EA to next address */
679 		EA += 1;
680 		num_bytes--;
681 
682 		/* manage our position within the register */
683 		if (++pos == 4) {
684 			pos = 0;
685 			if (++rT == 32)
686 				rT = 0;
687 		}
688 	}
689 
690 	return 0;
691 }
692 
693 static int emulate_popcntb_inst(struct pt_regs *regs, u32 instword)
694 {
695 	u32 ra,rs;
696 	unsigned long tmp;
697 
698 	ra = (instword >> 16) & 0x1f;
699 	rs = (instword >> 21) & 0x1f;
700 
701 	tmp = regs->gpr[rs];
702 	tmp = tmp - ((tmp >> 1) & 0x5555555555555555ULL);
703 	tmp = (tmp & 0x3333333333333333ULL) + ((tmp >> 2) & 0x3333333333333333ULL);
704 	tmp = (tmp + (tmp >> 4)) & 0x0f0f0f0f0f0f0f0fULL;
705 	regs->gpr[ra] = tmp;
706 
707 	return 0;
708 }
709 
710 static int emulate_instruction(struct pt_regs *regs)
711 {
712 	u32 instword;
713 	u32 rd;
714 
715 	if (!user_mode(regs) || (regs->msr & MSR_LE))
716 		return -EINVAL;
717 	CHECK_FULL_REGS(regs);
718 
719 	if (get_user(instword, (u32 __user *)(regs->nip)))
720 		return -EFAULT;
721 
722 	/* Emulate the mfspr rD, PVR. */
723 	if ((instword & INST_MFSPR_PVR_MASK) == INST_MFSPR_PVR) {
724 		rd = (instword >> 21) & 0x1f;
725 		regs->gpr[rd] = mfspr(SPRN_PVR);
726 		return 0;
727 	}
728 
729 	/* Emulating the dcba insn is just a no-op.  */
730 	if ((instword & INST_DCBA_MASK) == INST_DCBA)
731 		return 0;
732 
733 	/* Emulate the mcrxr insn.  */
734 	if ((instword & INST_MCRXR_MASK) == INST_MCRXR) {
735 		int shift = (instword >> 21) & 0x1c;
736 		unsigned long msk = 0xf0000000UL >> shift;
737 
738 		regs->ccr = (regs->ccr & ~msk) | ((regs->xer >> shift) & msk);
739 		regs->xer &= ~0xf0000000UL;
740 		return 0;
741 	}
742 
743 	/* Emulate load/store string insn. */
744 	if ((instword & INST_STRING_GEN_MASK) == INST_STRING)
745 		return emulate_string_inst(regs, instword);
746 
747 	/* Emulate the popcntb (Population Count Bytes) instruction. */
748 	if ((instword & INST_POPCNTB_MASK) == INST_POPCNTB) {
749 		return emulate_popcntb_inst(regs, instword);
750 	}
751 
752 	return -EINVAL;
753 }
754 
755 int is_valid_bugaddr(unsigned long addr)
756 {
757 	return is_kernel_addr(addr);
758 }
759 
760 void __kprobes program_check_exception(struct pt_regs *regs)
761 {
762 	unsigned int reason = get_reason(regs);
763 	extern int do_mathemu(struct pt_regs *regs);
764 
765 	/* We can now get here via a FP Unavailable exception if the core
766 	 * has no FPU, in that case the reason flags will be 0 */
767 
768 	if (reason & REASON_FP) {
769 		/* IEEE FP exception */
770 		parse_fpe(regs);
771 		return;
772 	}
773 	if (reason & REASON_TRAP) {
774 		/* trap exception */
775 		if (notify_die(DIE_BPT, "breakpoint", regs, 5, 5, SIGTRAP)
776 				== NOTIFY_STOP)
777 			return;
778 		if (debugger_bpt(regs))
779 			return;
780 
781 		if (!(regs->msr & MSR_PR) &&  /* not user-mode */
782 		    report_bug(regs->nip, regs) == BUG_TRAP_TYPE_WARN) {
783 			regs->nip += 4;
784 			return;
785 		}
786 		_exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
787 		return;
788 	}
789 
790 	local_irq_enable();
791 
792 #ifdef CONFIG_MATH_EMULATION
793 	/* (reason & REASON_ILLEGAL) would be the obvious thing here,
794 	 * but there seems to be a hardware bug on the 405GP (RevD)
795 	 * that means ESR is sometimes set incorrectly - either to
796 	 * ESR_DST (!?) or 0.  In the process of chasing this with the
797 	 * hardware people - not sure if it can happen on any illegal
798 	 * instruction or only on FP instructions, whether there is a
799 	 * pattern to occurences etc. -dgibson 31/Mar/2003 */
800 	switch (do_mathemu(regs)) {
801 	case 0:
802 		emulate_single_step(regs);
803 		return;
804 	case 1: {
805 			int code = 0;
806 			code = __parse_fpscr(current->thread.fpscr.val);
807 			_exception(SIGFPE, regs, code, regs->nip);
808 			return;
809 		}
810 	case -EFAULT:
811 		_exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
812 		return;
813 	}
814 	/* fall through on any other errors */
815 #endif /* CONFIG_MATH_EMULATION */
816 
817 	/* Try to emulate it if we should. */
818 	if (reason & (REASON_ILLEGAL | REASON_PRIVILEGED)) {
819 		switch (emulate_instruction(regs)) {
820 		case 0:
821 			regs->nip += 4;
822 			emulate_single_step(regs);
823 			return;
824 		case -EFAULT:
825 			_exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
826 			return;
827 		}
828 	}
829 
830 	if (reason & REASON_PRIVILEGED)
831 		_exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
832 	else
833 		_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
834 }
835 
836 void alignment_exception(struct pt_regs *regs)
837 {
838 	int sig, code, fixed = 0;
839 
840 	/* we don't implement logging of alignment exceptions */
841 	if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS))
842 		fixed = fix_alignment(regs);
843 
844 	if (fixed == 1) {
845 		regs->nip += 4;	/* skip over emulated instruction */
846 		emulate_single_step(regs);
847 		return;
848 	}
849 
850 	/* Operand address was bad */
851 	if (fixed == -EFAULT) {
852 		sig = SIGSEGV;
853 		code = SEGV_ACCERR;
854 	} else {
855 		sig = SIGBUS;
856 		code = BUS_ADRALN;
857 	}
858 	if (user_mode(regs))
859 		_exception(sig, regs, code, regs->dar);
860 	else
861 		bad_page_fault(regs, regs->dar, sig);
862 }
863 
864 void StackOverflow(struct pt_regs *regs)
865 {
866 	printk(KERN_CRIT "Kernel stack overflow in process %p, r1=%lx\n",
867 	       current, regs->gpr[1]);
868 	debugger(regs);
869 	show_regs(regs);
870 	panic("kernel stack overflow");
871 }
872 
873 void nonrecoverable_exception(struct pt_regs *regs)
874 {
875 	printk(KERN_ERR "Non-recoverable exception at PC=%lx MSR=%lx\n",
876 	       regs->nip, regs->msr);
877 	debugger(regs);
878 	die("nonrecoverable exception", regs, SIGKILL);
879 }
880 
881 void trace_syscall(struct pt_regs *regs)
882 {
883 	printk("Task: %p(%d), PC: %08lX/%08lX, Syscall: %3ld, Result: %s%ld    %s\n",
884 	       current, task_pid_nr(current), regs->nip, regs->link, regs->gpr[0],
885 	       regs->ccr&0x10000000?"Error=":"", regs->gpr[3], print_tainted());
886 }
887 
888 void kernel_fp_unavailable_exception(struct pt_regs *regs)
889 {
890 	printk(KERN_EMERG "Unrecoverable FP Unavailable Exception "
891 			  "%lx at %lx\n", regs->trap, regs->nip);
892 	die("Unrecoverable FP Unavailable Exception", regs, SIGABRT);
893 }
894 
895 void altivec_unavailable_exception(struct pt_regs *regs)
896 {
897 	if (user_mode(regs)) {
898 		/* A user program has executed an altivec instruction,
899 		   but this kernel doesn't support altivec. */
900 		_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
901 		return;
902 	}
903 
904 	printk(KERN_EMERG "Unrecoverable VMX/Altivec Unavailable Exception "
905 			"%lx at %lx\n", regs->trap, regs->nip);
906 	die("Unrecoverable VMX/Altivec Unavailable Exception", regs, SIGABRT);
907 }
908 
909 void performance_monitor_exception(struct pt_regs *regs)
910 {
911 	perf_irq(regs);
912 }
913 
914 #ifdef CONFIG_8xx
915 void SoftwareEmulation(struct pt_regs *regs)
916 {
917 	extern int do_mathemu(struct pt_regs *);
918 	extern int Soft_emulate_8xx(struct pt_regs *);
919 #if defined(CONFIG_MATH_EMULATION) || defined(CONFIG_8XX_MINIMAL_FPEMU)
920 	int errcode;
921 #endif
922 
923 	CHECK_FULL_REGS(regs);
924 
925 	if (!user_mode(regs)) {
926 		debugger(regs);
927 		die("Kernel Mode Software FPU Emulation", regs, SIGFPE);
928 	}
929 
930 #ifdef CONFIG_MATH_EMULATION
931 	errcode = do_mathemu(regs);
932 
933 	switch (errcode) {
934 	case 0:
935 		emulate_single_step(regs);
936 		return;
937 	case 1: {
938 			int code = 0;
939 			code = __parse_fpscr(current->thread.fpscr.val);
940 			_exception(SIGFPE, regs, code, regs->nip);
941 			return;
942 		}
943 	case -EFAULT:
944 		_exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
945 		return;
946 	default:
947 		_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
948 		return;
949 	}
950 
951 #elif defined(CONFIG_8XX_MINIMAL_FPEMU)
952 	errcode = Soft_emulate_8xx(regs);
953 	switch (errcode) {
954 	case 0:
955 		emulate_single_step(regs);
956 		return;
957 	case 1:
958 		_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
959 		return;
960 	case -EFAULT:
961 		_exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
962 		return;
963 	}
964 #else
965 	_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
966 #endif
967 }
968 #endif /* CONFIG_8xx */
969 
970 #if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
971 
972 void DebugException(struct pt_regs *regs, unsigned long debug_status)
973 {
974 	if (debug_status & DBSR_IC) {	/* instruction completion */
975 		regs->msr &= ~MSR_DE;
976 		if (user_mode(regs)) {
977 			current->thread.dbcr0 &= ~DBCR0_IC;
978 		} else {
979 			/* Disable instruction completion */
980 			mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_IC);
981 			/* Clear the instruction completion event */
982 			mtspr(SPRN_DBSR, DBSR_IC);
983 			if (debugger_sstep(regs))
984 				return;
985 		}
986 		_exception(SIGTRAP, regs, TRAP_TRACE, 0);
987 	}
988 }
989 #endif /* CONFIG_4xx || CONFIG_BOOKE */
990 
991 #if !defined(CONFIG_TAU_INT)
992 void TAUException(struct pt_regs *regs)
993 {
994 	printk("TAU trap at PC: %lx, MSR: %lx, vector=%lx    %s\n",
995 	       regs->nip, regs->msr, regs->trap, print_tainted());
996 }
997 #endif /* CONFIG_INT_TAU */
998 
999 #ifdef CONFIG_ALTIVEC
1000 void altivec_assist_exception(struct pt_regs *regs)
1001 {
1002 	int err;
1003 
1004 	if (!user_mode(regs)) {
1005 		printk(KERN_EMERG "VMX/Altivec assist exception in kernel mode"
1006 		       " at %lx\n", regs->nip);
1007 		die("Kernel VMX/Altivec assist exception", regs, SIGILL);
1008 	}
1009 
1010 	flush_altivec_to_thread(current);
1011 
1012 	err = emulate_altivec(regs);
1013 	if (err == 0) {
1014 		regs->nip += 4;		/* skip emulated instruction */
1015 		emulate_single_step(regs);
1016 		return;
1017 	}
1018 
1019 	if (err == -EFAULT) {
1020 		/* got an error reading the instruction */
1021 		_exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
1022 	} else {
1023 		/* didn't recognize the instruction */
1024 		/* XXX quick hack for now: set the non-Java bit in the VSCR */
1025 		if (printk_ratelimit())
1026 			printk(KERN_ERR "Unrecognized altivec instruction "
1027 			       "in %s at %lx\n", current->comm, regs->nip);
1028 		current->thread.vscr.u[3] |= 0x10000;
1029 	}
1030 }
1031 #endif /* CONFIG_ALTIVEC */
1032 
1033 #ifdef CONFIG_FSL_BOOKE
1034 void CacheLockingException(struct pt_regs *regs, unsigned long address,
1035 			   unsigned long error_code)
1036 {
1037 	/* We treat cache locking instructions from the user
1038 	 * as priv ops, in the future we could try to do
1039 	 * something smarter
1040 	 */
1041 	if (error_code & (ESR_DLK|ESR_ILK))
1042 		_exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
1043 	return;
1044 }
1045 #endif /* CONFIG_FSL_BOOKE */
1046 
1047 #ifdef CONFIG_SPE
1048 void SPEFloatingPointException(struct pt_regs *regs)
1049 {
1050 	unsigned long spefscr;
1051 	int fpexc_mode;
1052 	int code = 0;
1053 
1054 	spefscr = current->thread.spefscr;
1055 	fpexc_mode = current->thread.fpexc_mode;
1056 
1057 	/* Hardware does not neccessarily set sticky
1058 	 * underflow/overflow/invalid flags */
1059 	if ((spefscr & SPEFSCR_FOVF) && (fpexc_mode & PR_FP_EXC_OVF)) {
1060 		code = FPE_FLTOVF;
1061 		spefscr |= SPEFSCR_FOVFS;
1062 	}
1063 	else if ((spefscr & SPEFSCR_FUNF) && (fpexc_mode & PR_FP_EXC_UND)) {
1064 		code = FPE_FLTUND;
1065 		spefscr |= SPEFSCR_FUNFS;
1066 	}
1067 	else if ((spefscr & SPEFSCR_FDBZ) && (fpexc_mode & PR_FP_EXC_DIV))
1068 		code = FPE_FLTDIV;
1069 	else if ((spefscr & SPEFSCR_FINV) && (fpexc_mode & PR_FP_EXC_INV)) {
1070 		code = FPE_FLTINV;
1071 		spefscr |= SPEFSCR_FINVS;
1072 	}
1073 	else if ((spefscr & (SPEFSCR_FG | SPEFSCR_FX)) && (fpexc_mode & PR_FP_EXC_RES))
1074 		code = FPE_FLTRES;
1075 
1076 	current->thread.spefscr = spefscr;
1077 
1078 	_exception(SIGFPE, regs, code, regs->nip);
1079 	return;
1080 }
1081 #endif
1082 
1083 /*
1084  * We enter here if we get an unrecoverable exception, that is, one
1085  * that happened at a point where the RI (recoverable interrupt) bit
1086  * in the MSR is 0.  This indicates that SRR0/1 are live, and that
1087  * we therefore lost state by taking this exception.
1088  */
1089 void unrecoverable_exception(struct pt_regs *regs)
1090 {
1091 	printk(KERN_EMERG "Unrecoverable exception %lx at %lx\n",
1092 	       regs->trap, regs->nip);
1093 	die("Unrecoverable exception", regs, SIGABRT);
1094 }
1095 
1096 #ifdef CONFIG_BOOKE_WDT
1097 /*
1098  * Default handler for a Watchdog exception,
1099  * spins until a reboot occurs
1100  */
1101 void __attribute__ ((weak)) WatchdogHandler(struct pt_regs *regs)
1102 {
1103 	/* Generic WatchdogHandler, implement your own */
1104 	mtspr(SPRN_TCR, mfspr(SPRN_TCR)&(~TCR_WIE));
1105 	return;
1106 }
1107 
1108 void WatchdogException(struct pt_regs *regs)
1109 {
1110 	printk (KERN_EMERG "PowerPC Book-E Watchdog Exception\n");
1111 	WatchdogHandler(regs);
1112 }
1113 #endif
1114 
1115 /*
1116  * We enter here if we discover during exception entry that we are
1117  * running in supervisor mode with a userspace value in the stack pointer.
1118  */
1119 void kernel_bad_stack(struct pt_regs *regs)
1120 {
1121 	printk(KERN_EMERG "Bad kernel stack pointer %lx at %lx\n",
1122 	       regs->gpr[1], regs->nip);
1123 	die("Bad kernel stack pointer", regs, SIGABRT);
1124 }
1125 
1126 void __init trap_init(void)
1127 {
1128 }
1129