xref: /openbmc/linux/arch/powerpc/kernel/traps.c (revision 293e4688fe2fec87fccf84a3b1100b27191424e9)
1 /*
2  *  Copyright (C) 1995-1996  Gary Thomas (gdt@linuxppc.org)
3  *
4  *  This program is free software; you can redistribute it and/or
5  *  modify it under the terms of the GNU General Public License
6  *  as published by the Free Software Foundation; either version
7  *  2 of the License, or (at your option) any later version.
8  *
9  *  Modified by Cort Dougan (cort@cs.nmt.edu)
10  *  and Paul Mackerras (paulus@samba.org)
11  */
12 
13 /*
14  * This file handles the architecture-dependent parts of hardware exceptions
15  */
16 
17 #include <linux/errno.h>
18 #include <linux/sched.h>
19 #include <linux/kernel.h>
20 #include <linux/mm.h>
21 #include <linux/stddef.h>
22 #include <linux/unistd.h>
23 #include <linux/ptrace.h>
24 #include <linux/slab.h>
25 #include <linux/user.h>
26 #include <linux/a.out.h>
27 #include <linux/interrupt.h>
28 #include <linux/init.h>
29 #include <linux/module.h>
30 #include <linux/prctl.h>
31 #include <linux/delay.h>
32 #include <linux/kprobes.h>
33 #include <linux/kexec.h>
34 #include <linux/backlight.h>
35 #include <linux/bug.h>
36 
37 #include <asm/kdebug.h>
38 #include <asm/pgtable.h>
39 #include <asm/uaccess.h>
40 #include <asm/system.h>
41 #include <asm/io.h>
42 #include <asm/machdep.h>
43 #include <asm/rtas.h>
44 #include <asm/pmc.h>
45 #ifdef CONFIG_PPC32
46 #include <asm/reg.h>
47 #endif
48 #ifdef CONFIG_PMAC_BACKLIGHT
49 #include <asm/backlight.h>
50 #endif
51 #ifdef CONFIG_PPC64
52 #include <asm/firmware.h>
53 #include <asm/processor.h>
54 #endif
55 #include <asm/kexec.h>
56 
57 #ifdef CONFIG_DEBUGGER
58 int (*__debugger)(struct pt_regs *regs);
59 int (*__debugger_ipi)(struct pt_regs *regs);
60 int (*__debugger_bpt)(struct pt_regs *regs);
61 int (*__debugger_sstep)(struct pt_regs *regs);
62 int (*__debugger_iabr_match)(struct pt_regs *regs);
63 int (*__debugger_dabr_match)(struct pt_regs *regs);
64 int (*__debugger_fault_handler)(struct pt_regs *regs);
65 
66 EXPORT_SYMBOL(__debugger);
67 EXPORT_SYMBOL(__debugger_ipi);
68 EXPORT_SYMBOL(__debugger_bpt);
69 EXPORT_SYMBOL(__debugger_sstep);
70 EXPORT_SYMBOL(__debugger_iabr_match);
71 EXPORT_SYMBOL(__debugger_dabr_match);
72 EXPORT_SYMBOL(__debugger_fault_handler);
73 #endif
74 
75 ATOMIC_NOTIFIER_HEAD(powerpc_die_chain);
76 
77 int register_die_notifier(struct notifier_block *nb)
78 {
79 	return atomic_notifier_chain_register(&powerpc_die_chain, nb);
80 }
81 EXPORT_SYMBOL(register_die_notifier);
82 
83 int unregister_die_notifier(struct notifier_block *nb)
84 {
85 	return atomic_notifier_chain_unregister(&powerpc_die_chain, nb);
86 }
87 EXPORT_SYMBOL(unregister_die_notifier);
88 
89 /*
90  * Trap & Exception support
91  */
92 
93 static DEFINE_SPINLOCK(die_lock);
94 
95 int die(const char *str, struct pt_regs *regs, long err)
96 {
97 	static int die_counter;
98 
99 	if (debugger(regs))
100 		return 1;
101 
102 	oops_enter();
103 
104 	console_verbose();
105 	spin_lock_irq(&die_lock);
106 	bust_spinlocks(1);
107 #ifdef CONFIG_PMAC_BACKLIGHT
108 	mutex_lock(&pmac_backlight_mutex);
109 	if (machine_is(powermac) && pmac_backlight) {
110 		struct backlight_properties *props;
111 
112 		props = &pmac_backlight->props;
113 		props->brightness = props->max_brightness;
114 		props->power = FB_BLANK_UNBLANK;
115 		backlight_update_status(pmac_backlight);
116 	}
117 	mutex_unlock(&pmac_backlight_mutex);
118 #endif
119 	printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter);
120 #ifdef CONFIG_PREEMPT
121 	printk("PREEMPT ");
122 #endif
123 #ifdef CONFIG_SMP
124 	printk("SMP NR_CPUS=%d ", NR_CPUS);
125 #endif
126 #ifdef CONFIG_DEBUG_PAGEALLOC
127 	printk("DEBUG_PAGEALLOC ");
128 #endif
129 #ifdef CONFIG_NUMA
130 	printk("NUMA ");
131 #endif
132 	printk("%s\n", ppc_md.name ? "" : ppc_md.name);
133 
134 	print_modules();
135 	show_regs(regs);
136 	bust_spinlocks(0);
137 	spin_unlock_irq(&die_lock);
138 
139 	if (kexec_should_crash(current) ||
140 		kexec_sr_activated(smp_processor_id()))
141 		crash_kexec(regs);
142 	crash_kexec_secondary(regs);
143 
144 	if (in_interrupt())
145 		panic("Fatal exception in interrupt");
146 
147 	if (panic_on_oops)
148 		panic("Fatal exception");
149 
150 	oops_exit();
151 	do_exit(err);
152 
153 	return 0;
154 }
155 
156 void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
157 {
158 	siginfo_t info;
159 
160 	if (!user_mode(regs)) {
161 		if (die("Exception in kernel mode", regs, signr))
162 			return;
163 	}
164 
165 	memset(&info, 0, sizeof(info));
166 	info.si_signo = signr;
167 	info.si_code = code;
168 	info.si_addr = (void __user *) addr;
169 	force_sig_info(signr, &info, current);
170 
171 	/*
172 	 * Init gets no signals that it doesn't have a handler for.
173 	 * That's all very well, but if it has caused a synchronous
174 	 * exception and we ignore the resulting signal, it will just
175 	 * generate the same exception over and over again and we get
176 	 * nowhere.  Better to kill it and let the kernel panic.
177 	 */
178 	if (is_init(current)) {
179 		__sighandler_t handler;
180 
181 		spin_lock_irq(&current->sighand->siglock);
182 		handler = current->sighand->action[signr-1].sa.sa_handler;
183 		spin_unlock_irq(&current->sighand->siglock);
184 		if (handler == SIG_DFL) {
185 			/* init has generated a synchronous exception
186 			   and it doesn't have a handler for the signal */
187 			printk(KERN_CRIT "init has generated signal %d "
188 			       "but has no handler for it\n", signr);
189 			do_exit(signr);
190 		}
191 	}
192 }
193 
194 #ifdef CONFIG_PPC64
195 void system_reset_exception(struct pt_regs *regs)
196 {
197 	/* See if any machine dependent calls */
198 	if (ppc_md.system_reset_exception) {
199 		if (ppc_md.system_reset_exception(regs))
200 			return;
201 	}
202 
203 #ifdef CONFIG_KEXEC
204 	cpu_set(smp_processor_id(), cpus_in_sr);
205 #endif
206 
207 	die("System Reset", regs, SIGABRT);
208 
209 	/*
210 	 * Some CPUs when released from the debugger will execute this path.
211 	 * These CPUs entered the debugger via a soft-reset. If the CPU was
212 	 * hung before entering the debugger it will return to the hung
213 	 * state when exiting this function.  This causes a problem in
214 	 * kdump since the hung CPU(s) will not respond to the IPI sent
215 	 * from kdump. To prevent the problem we call crash_kexec_secondary()
216 	 * here. If a kdump had not been initiated or we exit the debugger
217 	 * with the "exit and recover" command (x) crash_kexec_secondary()
218 	 * will return after 5ms and the CPU returns to its previous state.
219 	 */
220 	crash_kexec_secondary(regs);
221 
222 	/* Must die if the interrupt is not recoverable */
223 	if (!(regs->msr & MSR_RI))
224 		panic("Unrecoverable System Reset");
225 
226 	/* What should we do here? We could issue a shutdown or hard reset. */
227 }
228 #endif
229 
230 /*
231  * I/O accesses can cause machine checks on powermacs.
232  * Check if the NIP corresponds to the address of a sync
233  * instruction for which there is an entry in the exception
234  * table.
235  * Note that the 601 only takes a machine check on TEA
236  * (transfer error ack) signal assertion, and does not
237  * set any of the top 16 bits of SRR1.
238  *  -- paulus.
239  */
240 static inline int check_io_access(struct pt_regs *regs)
241 {
242 #ifdef CONFIG_PPC32
243 	unsigned long msr = regs->msr;
244 	const struct exception_table_entry *entry;
245 	unsigned int *nip = (unsigned int *)regs->nip;
246 
247 	if (((msr & 0xffff0000) == 0 || (msr & (0x80000 | 0x40000)))
248 	    && (entry = search_exception_tables(regs->nip)) != NULL) {
249 		/*
250 		 * Check that it's a sync instruction, or somewhere
251 		 * in the twi; isync; nop sequence that inb/inw/inl uses.
252 		 * As the address is in the exception table
253 		 * we should be able to read the instr there.
254 		 * For the debug message, we look at the preceding
255 		 * load or store.
256 		 */
257 		if (*nip == 0x60000000)		/* nop */
258 			nip -= 2;
259 		else if (*nip == 0x4c00012c)	/* isync */
260 			--nip;
261 		if (*nip == 0x7c0004ac || (*nip >> 26) == 3) {
262 			/* sync or twi */
263 			unsigned int rb;
264 
265 			--nip;
266 			rb = (*nip >> 11) & 0x1f;
267 			printk(KERN_DEBUG "%s bad port %lx at %p\n",
268 			       (*nip & 0x100)? "OUT to": "IN from",
269 			       regs->gpr[rb] - _IO_BASE, nip);
270 			regs->msr |= MSR_RI;
271 			regs->nip = entry->fixup;
272 			return 1;
273 		}
274 	}
275 #endif /* CONFIG_PPC32 */
276 	return 0;
277 }
278 
279 #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
280 /* On 4xx, the reason for the machine check or program exception
281    is in the ESR. */
282 #define get_reason(regs)	((regs)->dsisr)
283 #ifndef CONFIG_FSL_BOOKE
284 #define get_mc_reason(regs)	((regs)->dsisr)
285 #else
286 #define get_mc_reason(regs)	(mfspr(SPRN_MCSR))
287 #endif
288 #define REASON_FP		ESR_FP
289 #define REASON_ILLEGAL		(ESR_PIL | ESR_PUO)
290 #define REASON_PRIVILEGED	ESR_PPR
291 #define REASON_TRAP		ESR_PTR
292 
293 /* single-step stuff */
294 #define single_stepping(regs)	(current->thread.dbcr0 & DBCR0_IC)
295 #define clear_single_step(regs)	(current->thread.dbcr0 &= ~DBCR0_IC)
296 
297 #else
298 /* On non-4xx, the reason for the machine check or program
299    exception is in the MSR. */
300 #define get_reason(regs)	((regs)->msr)
301 #define get_mc_reason(regs)	((regs)->msr)
302 #define REASON_FP		0x100000
303 #define REASON_ILLEGAL		0x80000
304 #define REASON_PRIVILEGED	0x40000
305 #define REASON_TRAP		0x20000
306 
307 #define single_stepping(regs)	((regs)->msr & MSR_SE)
308 #define clear_single_step(regs)	((regs)->msr &= ~MSR_SE)
309 #endif
310 
311 /*
312  * This is "fall-back" implementation for configurations
313  * which don't provide platform-specific machine check info
314  */
315 void __attribute__ ((weak))
316 platform_machine_check(struct pt_regs *regs)
317 {
318 }
319 
320 void machine_check_exception(struct pt_regs *regs)
321 {
322 	int recover = 0;
323 	unsigned long reason = get_mc_reason(regs);
324 
325 	/* See if any machine dependent calls */
326 	if (ppc_md.machine_check_exception)
327 		recover = ppc_md.machine_check_exception(regs);
328 
329 	if (recover)
330 		return;
331 
332 	if (user_mode(regs)) {
333 		regs->msr |= MSR_RI;
334 		_exception(SIGBUS, regs, BUS_ADRERR, regs->nip);
335 		return;
336 	}
337 
338 #if defined(CONFIG_8xx) && defined(CONFIG_PCI)
339 	/* the qspan pci read routines can cause machine checks -- Cort */
340 	bad_page_fault(regs, regs->dar, SIGBUS);
341 	return;
342 #endif
343 
344 	if (debugger_fault_handler(regs)) {
345 		regs->msr |= MSR_RI;
346 		return;
347 	}
348 
349 	if (check_io_access(regs))
350 		return;
351 
352 #if defined(CONFIG_4xx) && !defined(CONFIG_440A)
353 	if (reason & ESR_IMCP) {
354 		printk("Instruction");
355 		mtspr(SPRN_ESR, reason & ~ESR_IMCP);
356 	} else
357 		printk("Data");
358 	printk(" machine check in kernel mode.\n");
359 #elif defined(CONFIG_440A)
360 	printk("Machine check in kernel mode.\n");
361 	if (reason & ESR_IMCP){
362 		printk("Instruction Synchronous Machine Check exception\n");
363 		mtspr(SPRN_ESR, reason & ~ESR_IMCP);
364 	}
365 	else {
366 		u32 mcsr = mfspr(SPRN_MCSR);
367 		if (mcsr & MCSR_IB)
368 			printk("Instruction Read PLB Error\n");
369 		if (mcsr & MCSR_DRB)
370 			printk("Data Read PLB Error\n");
371 		if (mcsr & MCSR_DWB)
372 			printk("Data Write PLB Error\n");
373 		if (mcsr & MCSR_TLBP)
374 			printk("TLB Parity Error\n");
375 		if (mcsr & MCSR_ICP){
376 			flush_instruction_cache();
377 			printk("I-Cache Parity Error\n");
378 		}
379 		if (mcsr & MCSR_DCSP)
380 			printk("D-Cache Search Parity Error\n");
381 		if (mcsr & MCSR_DCFP)
382 			printk("D-Cache Flush Parity Error\n");
383 		if (mcsr & MCSR_IMPE)
384 			printk("Machine Check exception is imprecise\n");
385 
386 		/* Clear MCSR */
387 		mtspr(SPRN_MCSR, mcsr);
388 	}
389 #elif defined (CONFIG_E500)
390 	printk("Machine check in kernel mode.\n");
391 	printk("Caused by (from MCSR=%lx): ", reason);
392 
393 	if (reason & MCSR_MCP)
394 		printk("Machine Check Signal\n");
395 	if (reason & MCSR_ICPERR)
396 		printk("Instruction Cache Parity Error\n");
397 	if (reason & MCSR_DCP_PERR)
398 		printk("Data Cache Push Parity Error\n");
399 	if (reason & MCSR_DCPERR)
400 		printk("Data Cache Parity Error\n");
401 	if (reason & MCSR_GL_CI)
402 		printk("Guarded Load or Cache-Inhibited stwcx.\n");
403 	if (reason & MCSR_BUS_IAERR)
404 		printk("Bus - Instruction Address Error\n");
405 	if (reason & MCSR_BUS_RAERR)
406 		printk("Bus - Read Address Error\n");
407 	if (reason & MCSR_BUS_WAERR)
408 		printk("Bus - Write Address Error\n");
409 	if (reason & MCSR_BUS_IBERR)
410 		printk("Bus - Instruction Data Error\n");
411 	if (reason & MCSR_BUS_RBERR)
412 		printk("Bus - Read Data Bus Error\n");
413 	if (reason & MCSR_BUS_WBERR)
414 		printk("Bus - Read Data Bus Error\n");
415 	if (reason & MCSR_BUS_IPERR)
416 		printk("Bus - Instruction Parity Error\n");
417 	if (reason & MCSR_BUS_RPERR)
418 		printk("Bus - Read Parity Error\n");
419 #elif defined (CONFIG_E200)
420 	printk("Machine check in kernel mode.\n");
421 	printk("Caused by (from MCSR=%lx): ", reason);
422 
423 	if (reason & MCSR_MCP)
424 		printk("Machine Check Signal\n");
425 	if (reason & MCSR_CP_PERR)
426 		printk("Cache Push Parity Error\n");
427 	if (reason & MCSR_CPERR)
428 		printk("Cache Parity Error\n");
429 	if (reason & MCSR_EXCP_ERR)
430 		printk("ISI, ITLB, or Bus Error on first instruction fetch for an exception handler\n");
431 	if (reason & MCSR_BUS_IRERR)
432 		printk("Bus - Read Bus Error on instruction fetch\n");
433 	if (reason & MCSR_BUS_DRERR)
434 		printk("Bus - Read Bus Error on data load\n");
435 	if (reason & MCSR_BUS_WRERR)
436 		printk("Bus - Write Bus Error on buffered store or cache line push\n");
437 #else /* !CONFIG_4xx && !CONFIG_E500 && !CONFIG_E200 */
438 	printk("Machine check in kernel mode.\n");
439 	printk("Caused by (from SRR1=%lx): ", reason);
440 	switch (reason & 0x601F0000) {
441 	case 0x80000:
442 		printk("Machine check signal\n");
443 		break;
444 	case 0:		/* for 601 */
445 	case 0x40000:
446 	case 0x140000:	/* 7450 MSS error and TEA */
447 		printk("Transfer error ack signal\n");
448 		break;
449 	case 0x20000:
450 		printk("Data parity error signal\n");
451 		break;
452 	case 0x10000:
453 		printk("Address parity error signal\n");
454 		break;
455 	case 0x20000000:
456 		printk("L1 Data Cache error\n");
457 		break;
458 	case 0x40000000:
459 		printk("L1 Instruction Cache error\n");
460 		break;
461 	case 0x00100000:
462 		printk("L2 data cache parity error\n");
463 		break;
464 	default:
465 		printk("Unknown values in msr\n");
466 	}
467 #endif /* CONFIG_4xx */
468 
469 	/*
470 	 * Optional platform-provided routine to print out
471 	 * additional info, e.g. bus error registers.
472 	 */
473 	platform_machine_check(regs);
474 
475 	if (debugger_fault_handler(regs))
476 		return;
477 	die("Machine check", regs, SIGBUS);
478 
479 	/* Must die if the interrupt is not recoverable */
480 	if (!(regs->msr & MSR_RI))
481 		panic("Unrecoverable Machine check");
482 }
483 
484 void SMIException(struct pt_regs *regs)
485 {
486 	die("System Management Interrupt", regs, SIGABRT);
487 }
488 
489 void unknown_exception(struct pt_regs *regs)
490 {
491 	printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n",
492 	       regs->nip, regs->msr, regs->trap);
493 
494 	_exception(SIGTRAP, regs, 0, 0);
495 }
496 
497 void instruction_breakpoint_exception(struct pt_regs *regs)
498 {
499 	if (notify_die(DIE_IABR_MATCH, "iabr_match", regs, 5,
500 					5, SIGTRAP) == NOTIFY_STOP)
501 		return;
502 	if (debugger_iabr_match(regs))
503 		return;
504 	_exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
505 }
506 
507 void RunModeException(struct pt_regs *regs)
508 {
509 	_exception(SIGTRAP, regs, 0, 0);
510 }
511 
512 void __kprobes single_step_exception(struct pt_regs *regs)
513 {
514 	regs->msr &= ~(MSR_SE | MSR_BE);  /* Turn off 'trace' bits */
515 
516 	if (notify_die(DIE_SSTEP, "single_step", regs, 5,
517 					5, SIGTRAP) == NOTIFY_STOP)
518 		return;
519 	if (debugger_sstep(regs))
520 		return;
521 
522 	_exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
523 }
524 
525 /*
526  * After we have successfully emulated an instruction, we have to
527  * check if the instruction was being single-stepped, and if so,
528  * pretend we got a single-step exception.  This was pointed out
529  * by Kumar Gala.  -- paulus
530  */
531 static void emulate_single_step(struct pt_regs *regs)
532 {
533 	if (single_stepping(regs)) {
534 		clear_single_step(regs);
535 		_exception(SIGTRAP, regs, TRAP_TRACE, 0);
536 	}
537 }
538 
539 static inline int __parse_fpscr(unsigned long fpscr)
540 {
541 	int ret = 0;
542 
543 	/* Invalid operation */
544 	if ((fpscr & FPSCR_VE) && (fpscr & FPSCR_VX))
545 		ret = FPE_FLTINV;
546 
547 	/* Overflow */
548 	else if ((fpscr & FPSCR_OE) && (fpscr & FPSCR_OX))
549 		ret = FPE_FLTOVF;
550 
551 	/* Underflow */
552 	else if ((fpscr & FPSCR_UE) && (fpscr & FPSCR_UX))
553 		ret = FPE_FLTUND;
554 
555 	/* Divide by zero */
556 	else if ((fpscr & FPSCR_ZE) && (fpscr & FPSCR_ZX))
557 		ret = FPE_FLTDIV;
558 
559 	/* Inexact result */
560 	else if ((fpscr & FPSCR_XE) && (fpscr & FPSCR_XX))
561 		ret = FPE_FLTRES;
562 
563 	return ret;
564 }
565 
566 static void parse_fpe(struct pt_regs *regs)
567 {
568 	int code = 0;
569 
570 	flush_fp_to_thread(current);
571 
572 	code = __parse_fpscr(current->thread.fpscr.val);
573 
574 	_exception(SIGFPE, regs, code, regs->nip);
575 }
576 
577 /*
578  * Illegal instruction emulation support.  Originally written to
579  * provide the PVR to user applications using the mfspr rd, PVR.
580  * Return non-zero if we can't emulate, or -EFAULT if the associated
581  * memory access caused an access fault.  Return zero on success.
582  *
583  * There are a couple of ways to do this, either "decode" the instruction
584  * or directly match lots of bits.  In this case, matching lots of
585  * bits is faster and easier.
586  *
587  */
588 #define INST_MFSPR_PVR		0x7c1f42a6
589 #define INST_MFSPR_PVR_MASK	0xfc1fffff
590 
591 #define INST_DCBA		0x7c0005ec
592 #define INST_DCBA_MASK		0xfc0007fe
593 
594 #define INST_MCRXR		0x7c000400
595 #define INST_MCRXR_MASK		0xfc0007fe
596 
597 #define INST_STRING		0x7c00042a
598 #define INST_STRING_MASK	0xfc0007fe
599 #define INST_STRING_GEN_MASK	0xfc00067e
600 #define INST_LSWI		0x7c0004aa
601 #define INST_LSWX		0x7c00042a
602 #define INST_STSWI		0x7c0005aa
603 #define INST_STSWX		0x7c00052a
604 
605 #define INST_POPCNTB		0x7c0000f4
606 #define INST_POPCNTB_MASK	0xfc0007fe
607 
608 static int emulate_string_inst(struct pt_regs *regs, u32 instword)
609 {
610 	u8 rT = (instword >> 21) & 0x1f;
611 	u8 rA = (instword >> 16) & 0x1f;
612 	u8 NB_RB = (instword >> 11) & 0x1f;
613 	u32 num_bytes;
614 	unsigned long EA;
615 	int pos = 0;
616 
617 	/* Early out if we are an invalid form of lswx */
618 	if ((instword & INST_STRING_MASK) == INST_LSWX)
619 		if ((rT == rA) || (rT == NB_RB))
620 			return -EINVAL;
621 
622 	EA = (rA == 0) ? 0 : regs->gpr[rA];
623 
624 	switch (instword & INST_STRING_MASK) {
625 		case INST_LSWX:
626 		case INST_STSWX:
627 			EA += NB_RB;
628 			num_bytes = regs->xer & 0x7f;
629 			break;
630 		case INST_LSWI:
631 		case INST_STSWI:
632 			num_bytes = (NB_RB == 0) ? 32 : NB_RB;
633 			break;
634 		default:
635 			return -EINVAL;
636 	}
637 
638 	while (num_bytes != 0)
639 	{
640 		u8 val;
641 		u32 shift = 8 * (3 - (pos & 0x3));
642 
643 		switch ((instword & INST_STRING_MASK)) {
644 			case INST_LSWX:
645 			case INST_LSWI:
646 				if (get_user(val, (u8 __user *)EA))
647 					return -EFAULT;
648 				/* first time updating this reg,
649 				 * zero it out */
650 				if (pos == 0)
651 					regs->gpr[rT] = 0;
652 				regs->gpr[rT] |= val << shift;
653 				break;
654 			case INST_STSWI:
655 			case INST_STSWX:
656 				val = regs->gpr[rT] >> shift;
657 				if (put_user(val, (u8 __user *)EA))
658 					return -EFAULT;
659 				break;
660 		}
661 		/* move EA to next address */
662 		EA += 1;
663 		num_bytes--;
664 
665 		/* manage our position within the register */
666 		if (++pos == 4) {
667 			pos = 0;
668 			if (++rT == 32)
669 				rT = 0;
670 		}
671 	}
672 
673 	return 0;
674 }
675 
676 static int emulate_popcntb_inst(struct pt_regs *regs, u32 instword)
677 {
678 	u32 ra,rs;
679 	unsigned long tmp;
680 
681 	ra = (instword >> 16) & 0x1f;
682 	rs = (instword >> 21) & 0x1f;
683 
684 	tmp = regs->gpr[rs];
685 	tmp = tmp - ((tmp >> 1) & 0x5555555555555555ULL);
686 	tmp = (tmp & 0x3333333333333333ULL) + ((tmp >> 2) & 0x3333333333333333ULL);
687 	tmp = (tmp + (tmp >> 4)) & 0x0f0f0f0f0f0f0f0fULL;
688 	regs->gpr[ra] = tmp;
689 
690 	return 0;
691 }
692 
693 static int emulate_instruction(struct pt_regs *regs)
694 {
695 	u32 instword;
696 	u32 rd;
697 
698 	if (!user_mode(regs) || (regs->msr & MSR_LE))
699 		return -EINVAL;
700 	CHECK_FULL_REGS(regs);
701 
702 	if (get_user(instword, (u32 __user *)(regs->nip)))
703 		return -EFAULT;
704 
705 	/* Emulate the mfspr rD, PVR. */
706 	if ((instword & INST_MFSPR_PVR_MASK) == INST_MFSPR_PVR) {
707 		rd = (instword >> 21) & 0x1f;
708 		regs->gpr[rd] = mfspr(SPRN_PVR);
709 		return 0;
710 	}
711 
712 	/* Emulating the dcba insn is just a no-op.  */
713 	if ((instword & INST_DCBA_MASK) == INST_DCBA)
714 		return 0;
715 
716 	/* Emulate the mcrxr insn.  */
717 	if ((instword & INST_MCRXR_MASK) == INST_MCRXR) {
718 		int shift = (instword >> 21) & 0x1c;
719 		unsigned long msk = 0xf0000000UL >> shift;
720 
721 		regs->ccr = (regs->ccr & ~msk) | ((regs->xer >> shift) & msk);
722 		regs->xer &= ~0xf0000000UL;
723 		return 0;
724 	}
725 
726 	/* Emulate load/store string insn. */
727 	if ((instword & INST_STRING_GEN_MASK) == INST_STRING)
728 		return emulate_string_inst(regs, instword);
729 
730 	/* Emulate the popcntb (Population Count Bytes) instruction. */
731 	if ((instword & INST_POPCNTB_MASK) == INST_POPCNTB) {
732 		return emulate_popcntb_inst(regs, instword);
733 	}
734 
735 	return -EINVAL;
736 }
737 
738 int is_valid_bugaddr(unsigned long addr)
739 {
740 	return is_kernel_addr(addr);
741 }
742 
743 void __kprobes program_check_exception(struct pt_regs *regs)
744 {
745 	unsigned int reason = get_reason(regs);
746 	extern int do_mathemu(struct pt_regs *regs);
747 
748 	/* We can now get here via a FP Unavailable exception if the core
749 	 * has no FPU, in that case the reason flags will be 0 */
750 
751 	if (reason & REASON_FP) {
752 		/* IEEE FP exception */
753 		parse_fpe(regs);
754 		return;
755 	}
756 	if (reason & REASON_TRAP) {
757 		/* trap exception */
758 		if (notify_die(DIE_BPT, "breakpoint", regs, 5, 5, SIGTRAP)
759 				== NOTIFY_STOP)
760 			return;
761 		if (debugger_bpt(regs))
762 			return;
763 
764 		if (!(regs->msr & MSR_PR) &&  /* not user-mode */
765 		    report_bug(regs->nip) == BUG_TRAP_TYPE_WARN) {
766 			regs->nip += 4;
767 			return;
768 		}
769 		_exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
770 		return;
771 	}
772 
773 	local_irq_enable();
774 
775 #ifdef CONFIG_MATH_EMULATION
776 	/* (reason & REASON_ILLEGAL) would be the obvious thing here,
777 	 * but there seems to be a hardware bug on the 405GP (RevD)
778 	 * that means ESR is sometimes set incorrectly - either to
779 	 * ESR_DST (!?) or 0.  In the process of chasing this with the
780 	 * hardware people - not sure if it can happen on any illegal
781 	 * instruction or only on FP instructions, whether there is a
782 	 * pattern to occurences etc. -dgibson 31/Mar/2003 */
783 	switch (do_mathemu(regs)) {
784 	case 0:
785 		emulate_single_step(regs);
786 		return;
787 	case 1: {
788 			int code = 0;
789 			code = __parse_fpscr(current->thread.fpscr.val);
790 			_exception(SIGFPE, regs, code, regs->nip);
791 			return;
792 		}
793 	case -EFAULT:
794 		_exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
795 		return;
796 	}
797 	/* fall through on any other errors */
798 #endif /* CONFIG_MATH_EMULATION */
799 
800 	/* Try to emulate it if we should. */
801 	if (reason & (REASON_ILLEGAL | REASON_PRIVILEGED)) {
802 		switch (emulate_instruction(regs)) {
803 		case 0:
804 			regs->nip += 4;
805 			emulate_single_step(regs);
806 			return;
807 		case -EFAULT:
808 			_exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
809 			return;
810 		}
811 	}
812 
813 	if (reason & REASON_PRIVILEGED)
814 		_exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
815 	else
816 		_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
817 }
818 
819 void alignment_exception(struct pt_regs *regs)
820 {
821 	int sig, code, fixed = 0;
822 
823 	/* we don't implement logging of alignment exceptions */
824 	if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS))
825 		fixed = fix_alignment(regs);
826 
827 	if (fixed == 1) {
828 		regs->nip += 4;	/* skip over emulated instruction */
829 		emulate_single_step(regs);
830 		return;
831 	}
832 
833 	/* Operand address was bad */
834 	if (fixed == -EFAULT) {
835 		sig = SIGSEGV;
836 		code = SEGV_ACCERR;
837 	} else {
838 		sig = SIGBUS;
839 		code = BUS_ADRALN;
840 	}
841 	if (user_mode(regs))
842 		_exception(sig, regs, code, regs->dar);
843 	else
844 		bad_page_fault(regs, regs->dar, sig);
845 }
846 
847 void StackOverflow(struct pt_regs *regs)
848 {
849 	printk(KERN_CRIT "Kernel stack overflow in process %p, r1=%lx\n",
850 	       current, regs->gpr[1]);
851 	debugger(regs);
852 	show_regs(regs);
853 	panic("kernel stack overflow");
854 }
855 
856 void nonrecoverable_exception(struct pt_regs *regs)
857 {
858 	printk(KERN_ERR "Non-recoverable exception at PC=%lx MSR=%lx\n",
859 	       regs->nip, regs->msr);
860 	debugger(regs);
861 	die("nonrecoverable exception", regs, SIGKILL);
862 }
863 
864 void trace_syscall(struct pt_regs *regs)
865 {
866 	printk("Task: %p(%d), PC: %08lX/%08lX, Syscall: %3ld, Result: %s%ld    %s\n",
867 	       current, current->pid, regs->nip, regs->link, regs->gpr[0],
868 	       regs->ccr&0x10000000?"Error=":"", regs->gpr[3], print_tainted());
869 }
870 
871 void kernel_fp_unavailable_exception(struct pt_regs *regs)
872 {
873 	printk(KERN_EMERG "Unrecoverable FP Unavailable Exception "
874 			  "%lx at %lx\n", regs->trap, regs->nip);
875 	die("Unrecoverable FP Unavailable Exception", regs, SIGABRT);
876 }
877 
878 void altivec_unavailable_exception(struct pt_regs *regs)
879 {
880 	if (user_mode(regs)) {
881 		/* A user program has executed an altivec instruction,
882 		   but this kernel doesn't support altivec. */
883 		_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
884 		return;
885 	}
886 
887 	printk(KERN_EMERG "Unrecoverable VMX/Altivec Unavailable Exception "
888 			"%lx at %lx\n", regs->trap, regs->nip);
889 	die("Unrecoverable VMX/Altivec Unavailable Exception", regs, SIGABRT);
890 }
891 
892 void performance_monitor_exception(struct pt_regs *regs)
893 {
894 	perf_irq(regs);
895 }
896 
897 #ifdef CONFIG_8xx
898 void SoftwareEmulation(struct pt_regs *regs)
899 {
900 	extern int do_mathemu(struct pt_regs *);
901 	extern int Soft_emulate_8xx(struct pt_regs *);
902 	int errcode;
903 
904 	CHECK_FULL_REGS(regs);
905 
906 	if (!user_mode(regs)) {
907 		debugger(regs);
908 		die("Kernel Mode Software FPU Emulation", regs, SIGFPE);
909 	}
910 
911 #ifdef CONFIG_MATH_EMULATION
912 	errcode = do_mathemu(regs);
913 
914 	switch (errcode) {
915 	case 0:
916 		emulate_single_step(regs);
917 		return;
918 	case 1: {
919 			int code = 0;
920 			code = __parse_fpscr(current->thread.fpscr.val);
921 			_exception(SIGFPE, regs, code, regs->nip);
922 			return;
923 		}
924 	case -EFAULT:
925 		_exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
926 		return;
927 	default:
928 		_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
929 		return;
930 	}
931 
932 #else
933 	errcode = Soft_emulate_8xx(regs);
934 	switch (errcode) {
935 	case 0:
936 		emulate_single_step(regs);
937 		return;
938 	case 1:
939 		_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
940 		return;
941 	case -EFAULT:
942 		_exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
943 		return;
944 	}
945 #endif
946 }
947 #endif /* CONFIG_8xx */
948 
949 #if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
950 
951 void DebugException(struct pt_regs *regs, unsigned long debug_status)
952 {
953 	if (debug_status & DBSR_IC) {	/* instruction completion */
954 		regs->msr &= ~MSR_DE;
955 		if (user_mode(regs)) {
956 			current->thread.dbcr0 &= ~DBCR0_IC;
957 		} else {
958 			/* Disable instruction completion */
959 			mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_IC);
960 			/* Clear the instruction completion event */
961 			mtspr(SPRN_DBSR, DBSR_IC);
962 			if (debugger_sstep(regs))
963 				return;
964 		}
965 		_exception(SIGTRAP, regs, TRAP_TRACE, 0);
966 	}
967 }
968 #endif /* CONFIG_4xx || CONFIG_BOOKE */
969 
970 #if !defined(CONFIG_TAU_INT)
971 void TAUException(struct pt_regs *regs)
972 {
973 	printk("TAU trap at PC: %lx, MSR: %lx, vector=%lx    %s\n",
974 	       regs->nip, regs->msr, regs->trap, print_tainted());
975 }
976 #endif /* CONFIG_INT_TAU */
977 
978 #ifdef CONFIG_ALTIVEC
979 void altivec_assist_exception(struct pt_regs *regs)
980 {
981 	int err;
982 
983 	if (!user_mode(regs)) {
984 		printk(KERN_EMERG "VMX/Altivec assist exception in kernel mode"
985 		       " at %lx\n", regs->nip);
986 		die("Kernel VMX/Altivec assist exception", regs, SIGILL);
987 	}
988 
989 	flush_altivec_to_thread(current);
990 
991 	err = emulate_altivec(regs);
992 	if (err == 0) {
993 		regs->nip += 4;		/* skip emulated instruction */
994 		emulate_single_step(regs);
995 		return;
996 	}
997 
998 	if (err == -EFAULT) {
999 		/* got an error reading the instruction */
1000 		_exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
1001 	} else {
1002 		/* didn't recognize the instruction */
1003 		/* XXX quick hack for now: set the non-Java bit in the VSCR */
1004 		if (printk_ratelimit())
1005 			printk(KERN_ERR "Unrecognized altivec instruction "
1006 			       "in %s at %lx\n", current->comm, regs->nip);
1007 		current->thread.vscr.u[3] |= 0x10000;
1008 	}
1009 }
1010 #endif /* CONFIG_ALTIVEC */
1011 
1012 #ifdef CONFIG_FSL_BOOKE
1013 void CacheLockingException(struct pt_regs *regs, unsigned long address,
1014 			   unsigned long error_code)
1015 {
1016 	/* We treat cache locking instructions from the user
1017 	 * as priv ops, in the future we could try to do
1018 	 * something smarter
1019 	 */
1020 	if (error_code & (ESR_DLK|ESR_ILK))
1021 		_exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
1022 	return;
1023 }
1024 #endif /* CONFIG_FSL_BOOKE */
1025 
1026 #ifdef CONFIG_SPE
1027 void SPEFloatingPointException(struct pt_regs *regs)
1028 {
1029 	unsigned long spefscr;
1030 	int fpexc_mode;
1031 	int code = 0;
1032 
1033 	spefscr = current->thread.spefscr;
1034 	fpexc_mode = current->thread.fpexc_mode;
1035 
1036 	/* Hardware does not neccessarily set sticky
1037 	 * underflow/overflow/invalid flags */
1038 	if ((spefscr & SPEFSCR_FOVF) && (fpexc_mode & PR_FP_EXC_OVF)) {
1039 		code = FPE_FLTOVF;
1040 		spefscr |= SPEFSCR_FOVFS;
1041 	}
1042 	else if ((spefscr & SPEFSCR_FUNF) && (fpexc_mode & PR_FP_EXC_UND)) {
1043 		code = FPE_FLTUND;
1044 		spefscr |= SPEFSCR_FUNFS;
1045 	}
1046 	else if ((spefscr & SPEFSCR_FDBZ) && (fpexc_mode & PR_FP_EXC_DIV))
1047 		code = FPE_FLTDIV;
1048 	else if ((spefscr & SPEFSCR_FINV) && (fpexc_mode & PR_FP_EXC_INV)) {
1049 		code = FPE_FLTINV;
1050 		spefscr |= SPEFSCR_FINVS;
1051 	}
1052 	else if ((spefscr & (SPEFSCR_FG | SPEFSCR_FX)) && (fpexc_mode & PR_FP_EXC_RES))
1053 		code = FPE_FLTRES;
1054 
1055 	current->thread.spefscr = spefscr;
1056 
1057 	_exception(SIGFPE, regs, code, regs->nip);
1058 	return;
1059 }
1060 #endif
1061 
1062 /*
1063  * We enter here if we get an unrecoverable exception, that is, one
1064  * that happened at a point where the RI (recoverable interrupt) bit
1065  * in the MSR is 0.  This indicates that SRR0/1 are live, and that
1066  * we therefore lost state by taking this exception.
1067  */
1068 void unrecoverable_exception(struct pt_regs *regs)
1069 {
1070 	printk(KERN_EMERG "Unrecoverable exception %lx at %lx\n",
1071 	       regs->trap, regs->nip);
1072 	die("Unrecoverable exception", regs, SIGABRT);
1073 }
1074 
1075 #ifdef CONFIG_BOOKE_WDT
1076 /*
1077  * Default handler for a Watchdog exception,
1078  * spins until a reboot occurs
1079  */
1080 void __attribute__ ((weak)) WatchdogHandler(struct pt_regs *regs)
1081 {
1082 	/* Generic WatchdogHandler, implement your own */
1083 	mtspr(SPRN_TCR, mfspr(SPRN_TCR)&(~TCR_WIE));
1084 	return;
1085 }
1086 
1087 void WatchdogException(struct pt_regs *regs)
1088 {
1089 	printk (KERN_EMERG "PowerPC Book-E Watchdog Exception\n");
1090 	WatchdogHandler(regs);
1091 }
1092 #endif
1093 
1094 /*
1095  * We enter here if we discover during exception entry that we are
1096  * running in supervisor mode with a userspace value in the stack pointer.
1097  */
1098 void kernel_bad_stack(struct pt_regs *regs)
1099 {
1100 	printk(KERN_EMERG "Bad kernel stack pointer %lx at %lx\n",
1101 	       regs->gpr[1], regs->nip);
1102 	die("Bad kernel stack pointer", regs, SIGABRT);
1103 }
1104 
1105 void __init trap_init(void)
1106 {
1107 }
1108