xref: /openbmc/linux/arch/powerpc/kernel/traps.c (revision 6031d9d9ad905b514bf45572bd1877fe6b5145ab)
1 /*
2  *  Copyright (C) 1995-1996  Gary Thomas (gdt@linuxppc.org)
3  *
4  *  This program is free software; you can redistribute it and/or
5  *  modify it under the terms of the GNU General Public License
6  *  as published by the Free Software Foundation; either version
7  *  2 of the License, or (at your option) any later version.
8  *
9  *  Modified by Cort Dougan (cort@cs.nmt.edu)
10  *  and Paul Mackerras (paulus@samba.org)
11  */
12 
13 /*
14  * This file handles the architecture-dependent parts of hardware exceptions
15  */
16 
17 #include <linux/errno.h>
18 #include <linux/sched.h>
19 #include <linux/kernel.h>
20 #include <linux/mm.h>
21 #include <linux/stddef.h>
22 #include <linux/unistd.h>
23 #include <linux/ptrace.h>
24 #include <linux/slab.h>
25 #include <linux/user.h>
26 #include <linux/a.out.h>
27 #include <linux/interrupt.h>
28 #include <linux/init.h>
29 #include <linux/module.h>
30 #include <linux/prctl.h>
31 #include <linux/delay.h>
32 #include <linux/kprobes.h>
33 #include <linux/kexec.h>
34 #include <linux/backlight.h>
35 #include <linux/bug.h>
36 
37 #include <asm/kdebug.h>
38 #include <asm/pgtable.h>
39 #include <asm/uaccess.h>
40 #include <asm/system.h>
41 #include <asm/io.h>
42 #include <asm/machdep.h>
43 #include <asm/rtas.h>
44 #include <asm/pmc.h>
45 #ifdef CONFIG_PPC32
46 #include <asm/reg.h>
47 #endif
48 #ifdef CONFIG_PMAC_BACKLIGHT
49 #include <asm/backlight.h>
50 #endif
51 #ifdef CONFIG_PPC64
52 #include <asm/firmware.h>
53 #include <asm/processor.h>
54 #endif
55 #include <asm/kexec.h>
56 
57 #ifdef CONFIG_DEBUGGER
58 int (*__debugger)(struct pt_regs *regs);
59 int (*__debugger_ipi)(struct pt_regs *regs);
60 int (*__debugger_bpt)(struct pt_regs *regs);
61 int (*__debugger_sstep)(struct pt_regs *regs);
62 int (*__debugger_iabr_match)(struct pt_regs *regs);
63 int (*__debugger_dabr_match)(struct pt_regs *regs);
64 int (*__debugger_fault_handler)(struct pt_regs *regs);
65 
66 EXPORT_SYMBOL(__debugger);
67 EXPORT_SYMBOL(__debugger_ipi);
68 EXPORT_SYMBOL(__debugger_bpt);
69 EXPORT_SYMBOL(__debugger_sstep);
70 EXPORT_SYMBOL(__debugger_iabr_match);
71 EXPORT_SYMBOL(__debugger_dabr_match);
72 EXPORT_SYMBOL(__debugger_fault_handler);
73 #endif
74 
75 ATOMIC_NOTIFIER_HEAD(powerpc_die_chain);
76 
77 int register_die_notifier(struct notifier_block *nb)
78 {
79 	return atomic_notifier_chain_register(&powerpc_die_chain, nb);
80 }
81 EXPORT_SYMBOL(register_die_notifier);
82 
83 int unregister_die_notifier(struct notifier_block *nb)
84 {
85 	return atomic_notifier_chain_unregister(&powerpc_die_chain, nb);
86 }
87 EXPORT_SYMBOL(unregister_die_notifier);
88 
89 /*
90  * Trap & Exception support
91  */
92 
93 #ifdef CONFIG_PMAC_BACKLIGHT
94 static void pmac_backlight_unblank(void)
95 {
96 	mutex_lock(&pmac_backlight_mutex);
97 	if (pmac_backlight) {
98 		struct backlight_properties *props;
99 
100 		props = &pmac_backlight->props;
101 		props->brightness = props->max_brightness;
102 		props->power = FB_BLANK_UNBLANK;
103 		backlight_update_status(pmac_backlight);
104 	}
105 	mutex_unlock(&pmac_backlight_mutex);
106 }
107 #else
108 static inline void pmac_backlight_unblank(void) { }
109 #endif
110 
111 static DEFINE_SPINLOCK(die_lock);
112 
113 int die(const char *str, struct pt_regs *regs, long err)
114 {
115 	static int die_counter;
116 
117 	if (debugger(regs))
118 		return 1;
119 
120 	oops_enter();
121 
122 	console_verbose();
123 	spin_lock_irq(&die_lock);
124 	bust_spinlocks(1);
125 	if (machine_is(powermac))
126 		pmac_backlight_unblank();
127 
128 	printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter);
129 #ifdef CONFIG_PREEMPT
130 	printk("PREEMPT ");
131 #endif
132 #ifdef CONFIG_SMP
133 	printk("SMP NR_CPUS=%d ", NR_CPUS);
134 #endif
135 #ifdef CONFIG_DEBUG_PAGEALLOC
136 	printk("DEBUG_PAGEALLOC ");
137 #endif
138 #ifdef CONFIG_NUMA
139 	printk("NUMA ");
140 #endif
141 	printk("%s\n", ppc_md.name ? "" : ppc_md.name);
142 
143 	print_modules();
144 	show_regs(regs);
145 	bust_spinlocks(0);
146 	spin_unlock_irq(&die_lock);
147 
148 	if (kexec_should_crash(current) ||
149 		kexec_sr_activated(smp_processor_id()))
150 		crash_kexec(regs);
151 	crash_kexec_secondary(regs);
152 
153 	if (in_interrupt())
154 		panic("Fatal exception in interrupt");
155 
156 	if (panic_on_oops)
157 		panic("Fatal exception");
158 
159 	oops_exit();
160 	do_exit(err);
161 
162 	return 0;
163 }
164 
165 void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
166 {
167 	siginfo_t info;
168 
169 	if (!user_mode(regs)) {
170 		if (die("Exception in kernel mode", regs, signr))
171 			return;
172 	}
173 
174 	memset(&info, 0, sizeof(info));
175 	info.si_signo = signr;
176 	info.si_code = code;
177 	info.si_addr = (void __user *) addr;
178 	force_sig_info(signr, &info, current);
179 
180 	/*
181 	 * Init gets no signals that it doesn't have a handler for.
182 	 * That's all very well, but if it has caused a synchronous
183 	 * exception and we ignore the resulting signal, it will just
184 	 * generate the same exception over and over again and we get
185 	 * nowhere.  Better to kill it and let the kernel panic.
186 	 */
187 	if (is_init(current)) {
188 		__sighandler_t handler;
189 
190 		spin_lock_irq(&current->sighand->siglock);
191 		handler = current->sighand->action[signr-1].sa.sa_handler;
192 		spin_unlock_irq(&current->sighand->siglock);
193 		if (handler == SIG_DFL) {
194 			/* init has generated a synchronous exception
195 			   and it doesn't have a handler for the signal */
196 			printk(KERN_CRIT "init has generated signal %d "
197 			       "but has no handler for it\n", signr);
198 			do_exit(signr);
199 		}
200 	}
201 }
202 
203 #ifdef CONFIG_PPC64
204 void system_reset_exception(struct pt_regs *regs)
205 {
206 	/* See if any machine dependent calls */
207 	if (ppc_md.system_reset_exception) {
208 		if (ppc_md.system_reset_exception(regs))
209 			return;
210 	}
211 
212 #ifdef CONFIG_KEXEC
213 	cpu_set(smp_processor_id(), cpus_in_sr);
214 #endif
215 
216 	die("System Reset", regs, SIGABRT);
217 
218 	/*
219 	 * Some CPUs when released from the debugger will execute this path.
220 	 * These CPUs entered the debugger via a soft-reset. If the CPU was
221 	 * hung before entering the debugger it will return to the hung
222 	 * state when exiting this function.  This causes a problem in
223 	 * kdump since the hung CPU(s) will not respond to the IPI sent
224 	 * from kdump. To prevent the problem we call crash_kexec_secondary()
225 	 * here. If a kdump had not been initiated or we exit the debugger
226 	 * with the "exit and recover" command (x) crash_kexec_secondary()
227 	 * will return after 5ms and the CPU returns to its previous state.
228 	 */
229 	crash_kexec_secondary(regs);
230 
231 	/* Must die if the interrupt is not recoverable */
232 	if (!(regs->msr & MSR_RI))
233 		panic("Unrecoverable System Reset");
234 
235 	/* What should we do here? We could issue a shutdown or hard reset. */
236 }
237 #endif
238 
239 /*
240  * I/O accesses can cause machine checks on powermacs.
241  * Check if the NIP corresponds to the address of a sync
242  * instruction for which there is an entry in the exception
243  * table.
244  * Note that the 601 only takes a machine check on TEA
245  * (transfer error ack) signal assertion, and does not
246  * set any of the top 16 bits of SRR1.
247  *  -- paulus.
248  */
249 static inline int check_io_access(struct pt_regs *regs)
250 {
251 #ifdef CONFIG_PPC32
252 	unsigned long msr = regs->msr;
253 	const struct exception_table_entry *entry;
254 	unsigned int *nip = (unsigned int *)regs->nip;
255 
256 	if (((msr & 0xffff0000) == 0 || (msr & (0x80000 | 0x40000)))
257 	    && (entry = search_exception_tables(regs->nip)) != NULL) {
258 		/*
259 		 * Check that it's a sync instruction, or somewhere
260 		 * in the twi; isync; nop sequence that inb/inw/inl uses.
261 		 * As the address is in the exception table
262 		 * we should be able to read the instr there.
263 		 * For the debug message, we look at the preceding
264 		 * load or store.
265 		 */
266 		if (*nip == 0x60000000)		/* nop */
267 			nip -= 2;
268 		else if (*nip == 0x4c00012c)	/* isync */
269 			--nip;
270 		if (*nip == 0x7c0004ac || (*nip >> 26) == 3) {
271 			/* sync or twi */
272 			unsigned int rb;
273 
274 			--nip;
275 			rb = (*nip >> 11) & 0x1f;
276 			printk(KERN_DEBUG "%s bad port %lx at %p\n",
277 			       (*nip & 0x100)? "OUT to": "IN from",
278 			       regs->gpr[rb] - _IO_BASE, nip);
279 			regs->msr |= MSR_RI;
280 			regs->nip = entry->fixup;
281 			return 1;
282 		}
283 	}
284 #endif /* CONFIG_PPC32 */
285 	return 0;
286 }
287 
288 #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
289 /* On 4xx, the reason for the machine check or program exception
290    is in the ESR. */
291 #define get_reason(regs)	((regs)->dsisr)
292 #ifndef CONFIG_FSL_BOOKE
293 #define get_mc_reason(regs)	((regs)->dsisr)
294 #else
295 #define get_mc_reason(regs)	(mfspr(SPRN_MCSR))
296 #endif
297 #define REASON_FP		ESR_FP
298 #define REASON_ILLEGAL		(ESR_PIL | ESR_PUO)
299 #define REASON_PRIVILEGED	ESR_PPR
300 #define REASON_TRAP		ESR_PTR
301 
302 /* single-step stuff */
303 #define single_stepping(regs)	(current->thread.dbcr0 & DBCR0_IC)
304 #define clear_single_step(regs)	(current->thread.dbcr0 &= ~DBCR0_IC)
305 
306 #else
307 /* On non-4xx, the reason for the machine check or program
308    exception is in the MSR. */
309 #define get_reason(regs)	((regs)->msr)
310 #define get_mc_reason(regs)	((regs)->msr)
311 #define REASON_FP		0x100000
312 #define REASON_ILLEGAL		0x80000
313 #define REASON_PRIVILEGED	0x40000
314 #define REASON_TRAP		0x20000
315 
316 #define single_stepping(regs)	((regs)->msr & MSR_SE)
317 #define clear_single_step(regs)	((regs)->msr &= ~MSR_SE)
318 #endif
319 
320 /*
321  * This is "fall-back" implementation for configurations
322  * which don't provide platform-specific machine check info
323  */
324 void __attribute__ ((weak))
325 platform_machine_check(struct pt_regs *regs)
326 {
327 }
328 
329 void machine_check_exception(struct pt_regs *regs)
330 {
331 	int recover = 0;
332 	unsigned long reason = get_mc_reason(regs);
333 
334 	/* See if any machine dependent calls */
335 	if (ppc_md.machine_check_exception)
336 		recover = ppc_md.machine_check_exception(regs);
337 
338 	if (recover)
339 		return;
340 
341 	if (user_mode(regs)) {
342 		regs->msr |= MSR_RI;
343 		_exception(SIGBUS, regs, BUS_ADRERR, regs->nip);
344 		return;
345 	}
346 
347 #if defined(CONFIG_8xx) && defined(CONFIG_PCI)
348 	/* the qspan pci read routines can cause machine checks -- Cort */
349 	bad_page_fault(regs, regs->dar, SIGBUS);
350 	return;
351 #endif
352 
353 	if (debugger_fault_handler(regs)) {
354 		regs->msr |= MSR_RI;
355 		return;
356 	}
357 
358 	if (check_io_access(regs))
359 		return;
360 
361 #if defined(CONFIG_4xx) && !defined(CONFIG_440A)
362 	if (reason & ESR_IMCP) {
363 		printk("Instruction");
364 		mtspr(SPRN_ESR, reason & ~ESR_IMCP);
365 	} else
366 		printk("Data");
367 	printk(" machine check in kernel mode.\n");
368 #elif defined(CONFIG_440A)
369 	printk("Machine check in kernel mode.\n");
370 	if (reason & ESR_IMCP){
371 		printk("Instruction Synchronous Machine Check exception\n");
372 		mtspr(SPRN_ESR, reason & ~ESR_IMCP);
373 	}
374 	else {
375 		u32 mcsr = mfspr(SPRN_MCSR);
376 		if (mcsr & MCSR_IB)
377 			printk("Instruction Read PLB Error\n");
378 		if (mcsr & MCSR_DRB)
379 			printk("Data Read PLB Error\n");
380 		if (mcsr & MCSR_DWB)
381 			printk("Data Write PLB Error\n");
382 		if (mcsr & MCSR_TLBP)
383 			printk("TLB Parity Error\n");
384 		if (mcsr & MCSR_ICP){
385 			flush_instruction_cache();
386 			printk("I-Cache Parity Error\n");
387 		}
388 		if (mcsr & MCSR_DCSP)
389 			printk("D-Cache Search Parity Error\n");
390 		if (mcsr & MCSR_DCFP)
391 			printk("D-Cache Flush Parity Error\n");
392 		if (mcsr & MCSR_IMPE)
393 			printk("Machine Check exception is imprecise\n");
394 
395 		/* Clear MCSR */
396 		mtspr(SPRN_MCSR, mcsr);
397 	}
398 #elif defined (CONFIG_E500)
399 	printk("Machine check in kernel mode.\n");
400 	printk("Caused by (from MCSR=%lx): ", reason);
401 
402 	if (reason & MCSR_MCP)
403 		printk("Machine Check Signal\n");
404 	if (reason & MCSR_ICPERR)
405 		printk("Instruction Cache Parity Error\n");
406 	if (reason & MCSR_DCP_PERR)
407 		printk("Data Cache Push Parity Error\n");
408 	if (reason & MCSR_DCPERR)
409 		printk("Data Cache Parity Error\n");
410 	if (reason & MCSR_GL_CI)
411 		printk("Guarded Load or Cache-Inhibited stwcx.\n");
412 	if (reason & MCSR_BUS_IAERR)
413 		printk("Bus - Instruction Address Error\n");
414 	if (reason & MCSR_BUS_RAERR)
415 		printk("Bus - Read Address Error\n");
416 	if (reason & MCSR_BUS_WAERR)
417 		printk("Bus - Write Address Error\n");
418 	if (reason & MCSR_BUS_IBERR)
419 		printk("Bus - Instruction Data Error\n");
420 	if (reason & MCSR_BUS_RBERR)
421 		printk("Bus - Read Data Bus Error\n");
422 	if (reason & MCSR_BUS_WBERR)
423 		printk("Bus - Read Data Bus Error\n");
424 	if (reason & MCSR_BUS_IPERR)
425 		printk("Bus - Instruction Parity Error\n");
426 	if (reason & MCSR_BUS_RPERR)
427 		printk("Bus - Read Parity Error\n");
428 #elif defined (CONFIG_E200)
429 	printk("Machine check in kernel mode.\n");
430 	printk("Caused by (from MCSR=%lx): ", reason);
431 
432 	if (reason & MCSR_MCP)
433 		printk("Machine Check Signal\n");
434 	if (reason & MCSR_CP_PERR)
435 		printk("Cache Push Parity Error\n");
436 	if (reason & MCSR_CPERR)
437 		printk("Cache Parity Error\n");
438 	if (reason & MCSR_EXCP_ERR)
439 		printk("ISI, ITLB, or Bus Error on first instruction fetch for an exception handler\n");
440 	if (reason & MCSR_BUS_IRERR)
441 		printk("Bus - Read Bus Error on instruction fetch\n");
442 	if (reason & MCSR_BUS_DRERR)
443 		printk("Bus - Read Bus Error on data load\n");
444 	if (reason & MCSR_BUS_WRERR)
445 		printk("Bus - Write Bus Error on buffered store or cache line push\n");
446 #else /* !CONFIG_4xx && !CONFIG_E500 && !CONFIG_E200 */
447 	printk("Machine check in kernel mode.\n");
448 	printk("Caused by (from SRR1=%lx): ", reason);
449 	switch (reason & 0x601F0000) {
450 	case 0x80000:
451 		printk("Machine check signal\n");
452 		break;
453 	case 0:		/* for 601 */
454 	case 0x40000:
455 	case 0x140000:	/* 7450 MSS error and TEA */
456 		printk("Transfer error ack signal\n");
457 		break;
458 	case 0x20000:
459 		printk("Data parity error signal\n");
460 		break;
461 	case 0x10000:
462 		printk("Address parity error signal\n");
463 		break;
464 	case 0x20000000:
465 		printk("L1 Data Cache error\n");
466 		break;
467 	case 0x40000000:
468 		printk("L1 Instruction Cache error\n");
469 		break;
470 	case 0x00100000:
471 		printk("L2 data cache parity error\n");
472 		break;
473 	default:
474 		printk("Unknown values in msr\n");
475 	}
476 #endif /* CONFIG_4xx */
477 
478 	/*
479 	 * Optional platform-provided routine to print out
480 	 * additional info, e.g. bus error registers.
481 	 */
482 	platform_machine_check(regs);
483 
484 	if (debugger_fault_handler(regs))
485 		return;
486 	die("Machine check", regs, SIGBUS);
487 
488 	/* Must die if the interrupt is not recoverable */
489 	if (!(regs->msr & MSR_RI))
490 		panic("Unrecoverable Machine check");
491 }
492 
493 void SMIException(struct pt_regs *regs)
494 {
495 	die("System Management Interrupt", regs, SIGABRT);
496 }
497 
498 void unknown_exception(struct pt_regs *regs)
499 {
500 	printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n",
501 	       regs->nip, regs->msr, regs->trap);
502 
503 	_exception(SIGTRAP, regs, 0, 0);
504 }
505 
506 void instruction_breakpoint_exception(struct pt_regs *regs)
507 {
508 	if (notify_die(DIE_IABR_MATCH, "iabr_match", regs, 5,
509 					5, SIGTRAP) == NOTIFY_STOP)
510 		return;
511 	if (debugger_iabr_match(regs))
512 		return;
513 	_exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
514 }
515 
516 void RunModeException(struct pt_regs *regs)
517 {
518 	_exception(SIGTRAP, regs, 0, 0);
519 }
520 
521 void __kprobes single_step_exception(struct pt_regs *regs)
522 {
523 	regs->msr &= ~(MSR_SE | MSR_BE);  /* Turn off 'trace' bits */
524 
525 	if (notify_die(DIE_SSTEP, "single_step", regs, 5,
526 					5, SIGTRAP) == NOTIFY_STOP)
527 		return;
528 	if (debugger_sstep(regs))
529 		return;
530 
531 	_exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
532 }
533 
534 /*
535  * After we have successfully emulated an instruction, we have to
536  * check if the instruction was being single-stepped, and if so,
537  * pretend we got a single-step exception.  This was pointed out
538  * by Kumar Gala.  -- paulus
539  */
540 static void emulate_single_step(struct pt_regs *regs)
541 {
542 	if (single_stepping(regs)) {
543 		clear_single_step(regs);
544 		_exception(SIGTRAP, regs, TRAP_TRACE, 0);
545 	}
546 }
547 
548 static inline int __parse_fpscr(unsigned long fpscr)
549 {
550 	int ret = 0;
551 
552 	/* Invalid operation */
553 	if ((fpscr & FPSCR_VE) && (fpscr & FPSCR_VX))
554 		ret = FPE_FLTINV;
555 
556 	/* Overflow */
557 	else if ((fpscr & FPSCR_OE) && (fpscr & FPSCR_OX))
558 		ret = FPE_FLTOVF;
559 
560 	/* Underflow */
561 	else if ((fpscr & FPSCR_UE) && (fpscr & FPSCR_UX))
562 		ret = FPE_FLTUND;
563 
564 	/* Divide by zero */
565 	else if ((fpscr & FPSCR_ZE) && (fpscr & FPSCR_ZX))
566 		ret = FPE_FLTDIV;
567 
568 	/* Inexact result */
569 	else if ((fpscr & FPSCR_XE) && (fpscr & FPSCR_XX))
570 		ret = FPE_FLTRES;
571 
572 	return ret;
573 }
574 
575 static void parse_fpe(struct pt_regs *regs)
576 {
577 	int code = 0;
578 
579 	flush_fp_to_thread(current);
580 
581 	code = __parse_fpscr(current->thread.fpscr.val);
582 
583 	_exception(SIGFPE, regs, code, regs->nip);
584 }
585 
586 /*
587  * Illegal instruction emulation support.  Originally written to
588  * provide the PVR to user applications using the mfspr rd, PVR.
589  * Return non-zero if we can't emulate, or -EFAULT if the associated
590  * memory access caused an access fault.  Return zero on success.
591  *
592  * There are a couple of ways to do this, either "decode" the instruction
593  * or directly match lots of bits.  In this case, matching lots of
594  * bits is faster and easier.
595  *
596  */
597 #define INST_MFSPR_PVR		0x7c1f42a6
598 #define INST_MFSPR_PVR_MASK	0xfc1fffff
599 
600 #define INST_DCBA		0x7c0005ec
601 #define INST_DCBA_MASK		0xfc0007fe
602 
603 #define INST_MCRXR		0x7c000400
604 #define INST_MCRXR_MASK		0xfc0007fe
605 
606 #define INST_STRING		0x7c00042a
607 #define INST_STRING_MASK	0xfc0007fe
608 #define INST_STRING_GEN_MASK	0xfc00067e
609 #define INST_LSWI		0x7c0004aa
610 #define INST_LSWX		0x7c00042a
611 #define INST_STSWI		0x7c0005aa
612 #define INST_STSWX		0x7c00052a
613 
614 #define INST_POPCNTB		0x7c0000f4
615 #define INST_POPCNTB_MASK	0xfc0007fe
616 
617 static int emulate_string_inst(struct pt_regs *regs, u32 instword)
618 {
619 	u8 rT = (instword >> 21) & 0x1f;
620 	u8 rA = (instword >> 16) & 0x1f;
621 	u8 NB_RB = (instword >> 11) & 0x1f;
622 	u32 num_bytes;
623 	unsigned long EA;
624 	int pos = 0;
625 
626 	/* Early out if we are an invalid form of lswx */
627 	if ((instword & INST_STRING_MASK) == INST_LSWX)
628 		if ((rT == rA) || (rT == NB_RB))
629 			return -EINVAL;
630 
631 	EA = (rA == 0) ? 0 : regs->gpr[rA];
632 
633 	switch (instword & INST_STRING_MASK) {
634 		case INST_LSWX:
635 		case INST_STSWX:
636 			EA += NB_RB;
637 			num_bytes = regs->xer & 0x7f;
638 			break;
639 		case INST_LSWI:
640 		case INST_STSWI:
641 			num_bytes = (NB_RB == 0) ? 32 : NB_RB;
642 			break;
643 		default:
644 			return -EINVAL;
645 	}
646 
647 	while (num_bytes != 0)
648 	{
649 		u8 val;
650 		u32 shift = 8 * (3 - (pos & 0x3));
651 
652 		switch ((instword & INST_STRING_MASK)) {
653 			case INST_LSWX:
654 			case INST_LSWI:
655 				if (get_user(val, (u8 __user *)EA))
656 					return -EFAULT;
657 				/* first time updating this reg,
658 				 * zero it out */
659 				if (pos == 0)
660 					regs->gpr[rT] = 0;
661 				regs->gpr[rT] |= val << shift;
662 				break;
663 			case INST_STSWI:
664 			case INST_STSWX:
665 				val = regs->gpr[rT] >> shift;
666 				if (put_user(val, (u8 __user *)EA))
667 					return -EFAULT;
668 				break;
669 		}
670 		/* move EA to next address */
671 		EA += 1;
672 		num_bytes--;
673 
674 		/* manage our position within the register */
675 		if (++pos == 4) {
676 			pos = 0;
677 			if (++rT == 32)
678 				rT = 0;
679 		}
680 	}
681 
682 	return 0;
683 }
684 
685 static int emulate_popcntb_inst(struct pt_regs *regs, u32 instword)
686 {
687 	u32 ra,rs;
688 	unsigned long tmp;
689 
690 	ra = (instword >> 16) & 0x1f;
691 	rs = (instword >> 21) & 0x1f;
692 
693 	tmp = regs->gpr[rs];
694 	tmp = tmp - ((tmp >> 1) & 0x5555555555555555ULL);
695 	tmp = (tmp & 0x3333333333333333ULL) + ((tmp >> 2) & 0x3333333333333333ULL);
696 	tmp = (tmp + (tmp >> 4)) & 0x0f0f0f0f0f0f0f0fULL;
697 	regs->gpr[ra] = tmp;
698 
699 	return 0;
700 }
701 
702 static int emulate_instruction(struct pt_regs *regs)
703 {
704 	u32 instword;
705 	u32 rd;
706 
707 	if (!user_mode(regs) || (regs->msr & MSR_LE))
708 		return -EINVAL;
709 	CHECK_FULL_REGS(regs);
710 
711 	if (get_user(instword, (u32 __user *)(regs->nip)))
712 		return -EFAULT;
713 
714 	/* Emulate the mfspr rD, PVR. */
715 	if ((instword & INST_MFSPR_PVR_MASK) == INST_MFSPR_PVR) {
716 		rd = (instword >> 21) & 0x1f;
717 		regs->gpr[rd] = mfspr(SPRN_PVR);
718 		return 0;
719 	}
720 
721 	/* Emulating the dcba insn is just a no-op.  */
722 	if ((instword & INST_DCBA_MASK) == INST_DCBA)
723 		return 0;
724 
725 	/* Emulate the mcrxr insn.  */
726 	if ((instword & INST_MCRXR_MASK) == INST_MCRXR) {
727 		int shift = (instword >> 21) & 0x1c;
728 		unsigned long msk = 0xf0000000UL >> shift;
729 
730 		regs->ccr = (regs->ccr & ~msk) | ((regs->xer >> shift) & msk);
731 		regs->xer &= ~0xf0000000UL;
732 		return 0;
733 	}
734 
735 	/* Emulate load/store string insn. */
736 	if ((instword & INST_STRING_GEN_MASK) == INST_STRING)
737 		return emulate_string_inst(regs, instword);
738 
739 	/* Emulate the popcntb (Population Count Bytes) instruction. */
740 	if ((instword & INST_POPCNTB_MASK) == INST_POPCNTB) {
741 		return emulate_popcntb_inst(regs, instword);
742 	}
743 
744 	return -EINVAL;
745 }
746 
747 int is_valid_bugaddr(unsigned long addr)
748 {
749 	return is_kernel_addr(addr);
750 }
751 
752 void __kprobes program_check_exception(struct pt_regs *regs)
753 {
754 	unsigned int reason = get_reason(regs);
755 	extern int do_mathemu(struct pt_regs *regs);
756 
757 	/* We can now get here via a FP Unavailable exception if the core
758 	 * has no FPU, in that case the reason flags will be 0 */
759 
760 	if (reason & REASON_FP) {
761 		/* IEEE FP exception */
762 		parse_fpe(regs);
763 		return;
764 	}
765 	if (reason & REASON_TRAP) {
766 		/* trap exception */
767 		if (notify_die(DIE_BPT, "breakpoint", regs, 5, 5, SIGTRAP)
768 				== NOTIFY_STOP)
769 			return;
770 		if (debugger_bpt(regs))
771 			return;
772 
773 		if (!(regs->msr & MSR_PR) &&  /* not user-mode */
774 		    report_bug(regs->nip) == BUG_TRAP_TYPE_WARN) {
775 			regs->nip += 4;
776 			return;
777 		}
778 		_exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
779 		return;
780 	}
781 
782 	local_irq_enable();
783 
784 #ifdef CONFIG_MATH_EMULATION
785 	/* (reason & REASON_ILLEGAL) would be the obvious thing here,
786 	 * but there seems to be a hardware bug on the 405GP (RevD)
787 	 * that means ESR is sometimes set incorrectly - either to
788 	 * ESR_DST (!?) or 0.  In the process of chasing this with the
789 	 * hardware people - not sure if it can happen on any illegal
790 	 * instruction or only on FP instructions, whether there is a
791 	 * pattern to occurences etc. -dgibson 31/Mar/2003 */
792 	switch (do_mathemu(regs)) {
793 	case 0:
794 		emulate_single_step(regs);
795 		return;
796 	case 1: {
797 			int code = 0;
798 			code = __parse_fpscr(current->thread.fpscr.val);
799 			_exception(SIGFPE, regs, code, regs->nip);
800 			return;
801 		}
802 	case -EFAULT:
803 		_exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
804 		return;
805 	}
806 	/* fall through on any other errors */
807 #endif /* CONFIG_MATH_EMULATION */
808 
809 	/* Try to emulate it if we should. */
810 	if (reason & (REASON_ILLEGAL | REASON_PRIVILEGED)) {
811 		switch (emulate_instruction(regs)) {
812 		case 0:
813 			regs->nip += 4;
814 			emulate_single_step(regs);
815 			return;
816 		case -EFAULT:
817 			_exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
818 			return;
819 		}
820 	}
821 
822 	if (reason & REASON_PRIVILEGED)
823 		_exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
824 	else
825 		_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
826 }
827 
828 void alignment_exception(struct pt_regs *regs)
829 {
830 	int sig, code, fixed = 0;
831 
832 	/* we don't implement logging of alignment exceptions */
833 	if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS))
834 		fixed = fix_alignment(regs);
835 
836 	if (fixed == 1) {
837 		regs->nip += 4;	/* skip over emulated instruction */
838 		emulate_single_step(regs);
839 		return;
840 	}
841 
842 	/* Operand address was bad */
843 	if (fixed == -EFAULT) {
844 		sig = SIGSEGV;
845 		code = SEGV_ACCERR;
846 	} else {
847 		sig = SIGBUS;
848 		code = BUS_ADRALN;
849 	}
850 	if (user_mode(regs))
851 		_exception(sig, regs, code, regs->dar);
852 	else
853 		bad_page_fault(regs, regs->dar, sig);
854 }
855 
856 void StackOverflow(struct pt_regs *regs)
857 {
858 	printk(KERN_CRIT "Kernel stack overflow in process %p, r1=%lx\n",
859 	       current, regs->gpr[1]);
860 	debugger(regs);
861 	show_regs(regs);
862 	panic("kernel stack overflow");
863 }
864 
865 void nonrecoverable_exception(struct pt_regs *regs)
866 {
867 	printk(KERN_ERR "Non-recoverable exception at PC=%lx MSR=%lx\n",
868 	       regs->nip, regs->msr);
869 	debugger(regs);
870 	die("nonrecoverable exception", regs, SIGKILL);
871 }
872 
873 void trace_syscall(struct pt_regs *regs)
874 {
875 	printk("Task: %p(%d), PC: %08lX/%08lX, Syscall: %3ld, Result: %s%ld    %s\n",
876 	       current, current->pid, regs->nip, regs->link, regs->gpr[0],
877 	       regs->ccr&0x10000000?"Error=":"", regs->gpr[3], print_tainted());
878 }
879 
880 void kernel_fp_unavailable_exception(struct pt_regs *regs)
881 {
882 	printk(KERN_EMERG "Unrecoverable FP Unavailable Exception "
883 			  "%lx at %lx\n", regs->trap, regs->nip);
884 	die("Unrecoverable FP Unavailable Exception", regs, SIGABRT);
885 }
886 
887 void altivec_unavailable_exception(struct pt_regs *regs)
888 {
889 	if (user_mode(regs)) {
890 		/* A user program has executed an altivec instruction,
891 		   but this kernel doesn't support altivec. */
892 		_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
893 		return;
894 	}
895 
896 	printk(KERN_EMERG "Unrecoverable VMX/Altivec Unavailable Exception "
897 			"%lx at %lx\n", regs->trap, regs->nip);
898 	die("Unrecoverable VMX/Altivec Unavailable Exception", regs, SIGABRT);
899 }
900 
901 void performance_monitor_exception(struct pt_regs *regs)
902 {
903 	perf_irq(regs);
904 }
905 
906 #ifdef CONFIG_8xx
907 void SoftwareEmulation(struct pt_regs *regs)
908 {
909 	extern int do_mathemu(struct pt_regs *);
910 	extern int Soft_emulate_8xx(struct pt_regs *);
911 	int errcode;
912 
913 	CHECK_FULL_REGS(regs);
914 
915 	if (!user_mode(regs)) {
916 		debugger(regs);
917 		die("Kernel Mode Software FPU Emulation", regs, SIGFPE);
918 	}
919 
920 #ifdef CONFIG_MATH_EMULATION
921 	errcode = do_mathemu(regs);
922 
923 	switch (errcode) {
924 	case 0:
925 		emulate_single_step(regs);
926 		return;
927 	case 1: {
928 			int code = 0;
929 			code = __parse_fpscr(current->thread.fpscr.val);
930 			_exception(SIGFPE, regs, code, regs->nip);
931 			return;
932 		}
933 	case -EFAULT:
934 		_exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
935 		return;
936 	default:
937 		_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
938 		return;
939 	}
940 
941 #else
942 	errcode = Soft_emulate_8xx(regs);
943 	switch (errcode) {
944 	case 0:
945 		emulate_single_step(regs);
946 		return;
947 	case 1:
948 		_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
949 		return;
950 	case -EFAULT:
951 		_exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
952 		return;
953 	}
954 #endif
955 }
956 #endif /* CONFIG_8xx */
957 
958 #if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
959 
960 void DebugException(struct pt_regs *regs, unsigned long debug_status)
961 {
962 	if (debug_status & DBSR_IC) {	/* instruction completion */
963 		regs->msr &= ~MSR_DE;
964 		if (user_mode(regs)) {
965 			current->thread.dbcr0 &= ~DBCR0_IC;
966 		} else {
967 			/* Disable instruction completion */
968 			mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_IC);
969 			/* Clear the instruction completion event */
970 			mtspr(SPRN_DBSR, DBSR_IC);
971 			if (debugger_sstep(regs))
972 				return;
973 		}
974 		_exception(SIGTRAP, regs, TRAP_TRACE, 0);
975 	}
976 }
977 #endif /* CONFIG_4xx || CONFIG_BOOKE */
978 
979 #if !defined(CONFIG_TAU_INT)
980 void TAUException(struct pt_regs *regs)
981 {
982 	printk("TAU trap at PC: %lx, MSR: %lx, vector=%lx    %s\n",
983 	       regs->nip, regs->msr, regs->trap, print_tainted());
984 }
985 #endif /* CONFIG_INT_TAU */
986 
987 #ifdef CONFIG_ALTIVEC
988 void altivec_assist_exception(struct pt_regs *regs)
989 {
990 	int err;
991 
992 	if (!user_mode(regs)) {
993 		printk(KERN_EMERG "VMX/Altivec assist exception in kernel mode"
994 		       " at %lx\n", regs->nip);
995 		die("Kernel VMX/Altivec assist exception", regs, SIGILL);
996 	}
997 
998 	flush_altivec_to_thread(current);
999 
1000 	err = emulate_altivec(regs);
1001 	if (err == 0) {
1002 		regs->nip += 4;		/* skip emulated instruction */
1003 		emulate_single_step(regs);
1004 		return;
1005 	}
1006 
1007 	if (err == -EFAULT) {
1008 		/* got an error reading the instruction */
1009 		_exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
1010 	} else {
1011 		/* didn't recognize the instruction */
1012 		/* XXX quick hack for now: set the non-Java bit in the VSCR */
1013 		if (printk_ratelimit())
1014 			printk(KERN_ERR "Unrecognized altivec instruction "
1015 			       "in %s at %lx\n", current->comm, regs->nip);
1016 		current->thread.vscr.u[3] |= 0x10000;
1017 	}
1018 }
1019 #endif /* CONFIG_ALTIVEC */
1020 
1021 #ifdef CONFIG_FSL_BOOKE
1022 void CacheLockingException(struct pt_regs *regs, unsigned long address,
1023 			   unsigned long error_code)
1024 {
1025 	/* We treat cache locking instructions from the user
1026 	 * as priv ops, in the future we could try to do
1027 	 * something smarter
1028 	 */
1029 	if (error_code & (ESR_DLK|ESR_ILK))
1030 		_exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
1031 	return;
1032 }
1033 #endif /* CONFIG_FSL_BOOKE */
1034 
1035 #ifdef CONFIG_SPE
1036 void SPEFloatingPointException(struct pt_regs *regs)
1037 {
1038 	unsigned long spefscr;
1039 	int fpexc_mode;
1040 	int code = 0;
1041 
1042 	spefscr = current->thread.spefscr;
1043 	fpexc_mode = current->thread.fpexc_mode;
1044 
1045 	/* Hardware does not neccessarily set sticky
1046 	 * underflow/overflow/invalid flags */
1047 	if ((spefscr & SPEFSCR_FOVF) && (fpexc_mode & PR_FP_EXC_OVF)) {
1048 		code = FPE_FLTOVF;
1049 		spefscr |= SPEFSCR_FOVFS;
1050 	}
1051 	else if ((spefscr & SPEFSCR_FUNF) && (fpexc_mode & PR_FP_EXC_UND)) {
1052 		code = FPE_FLTUND;
1053 		spefscr |= SPEFSCR_FUNFS;
1054 	}
1055 	else if ((spefscr & SPEFSCR_FDBZ) && (fpexc_mode & PR_FP_EXC_DIV))
1056 		code = FPE_FLTDIV;
1057 	else if ((spefscr & SPEFSCR_FINV) && (fpexc_mode & PR_FP_EXC_INV)) {
1058 		code = FPE_FLTINV;
1059 		spefscr |= SPEFSCR_FINVS;
1060 	}
1061 	else if ((spefscr & (SPEFSCR_FG | SPEFSCR_FX)) && (fpexc_mode & PR_FP_EXC_RES))
1062 		code = FPE_FLTRES;
1063 
1064 	current->thread.spefscr = spefscr;
1065 
1066 	_exception(SIGFPE, regs, code, regs->nip);
1067 	return;
1068 }
1069 #endif
1070 
1071 /*
1072  * We enter here if we get an unrecoverable exception, that is, one
1073  * that happened at a point where the RI (recoverable interrupt) bit
1074  * in the MSR is 0.  This indicates that SRR0/1 are live, and that
1075  * we therefore lost state by taking this exception.
1076  */
1077 void unrecoverable_exception(struct pt_regs *regs)
1078 {
1079 	printk(KERN_EMERG "Unrecoverable exception %lx at %lx\n",
1080 	       regs->trap, regs->nip);
1081 	die("Unrecoverable exception", regs, SIGABRT);
1082 }
1083 
1084 #ifdef CONFIG_BOOKE_WDT
1085 /*
1086  * Default handler for a Watchdog exception,
1087  * spins until a reboot occurs
1088  */
1089 void __attribute__ ((weak)) WatchdogHandler(struct pt_regs *regs)
1090 {
1091 	/* Generic WatchdogHandler, implement your own */
1092 	mtspr(SPRN_TCR, mfspr(SPRN_TCR)&(~TCR_WIE));
1093 	return;
1094 }
1095 
1096 void WatchdogException(struct pt_regs *regs)
1097 {
1098 	printk (KERN_EMERG "PowerPC Book-E Watchdog Exception\n");
1099 	WatchdogHandler(regs);
1100 }
1101 #endif
1102 
1103 /*
1104  * We enter here if we discover during exception entry that we are
1105  * running in supervisor mode with a userspace value in the stack pointer.
1106  */
1107 void kernel_bad_stack(struct pt_regs *regs)
1108 {
1109 	printk(KERN_EMERG "Bad kernel stack pointer %lx at %lx\n",
1110 	       regs->gpr[1], regs->nip);
1111 	die("Bad kernel stack pointer", regs, SIGABRT);
1112 }
1113 
1114 void __init trap_init(void)
1115 {
1116 }
1117