xref: /openbmc/linux/arch/powerpc/kernel/traps.c (revision 4388c9b3a6ee7d6afc36c8a0bb5579b1606229b5)
1 /*
2  *  Copyright (C) 1995-1996  Gary Thomas (gdt@linuxppc.org)
3  *  Copyright 2007-2010 Freescale Semiconductor, Inc.
4  *
5  *  This program is free software; you can redistribute it and/or
6  *  modify it under the terms of the GNU General Public License
7  *  as published by the Free Software Foundation; either version
8  *  2 of the License, or (at your option) any later version.
9  *
10  *  Modified by Cort Dougan (cort@cs.nmt.edu)
11  *  and Paul Mackerras (paulus@samba.org)
12  */
13 
14 /*
15  * This file handles the architecture-dependent parts of hardware exceptions
16  */
17 
18 #include <linux/errno.h>
19 #include <linux/sched.h>
20 #include <linux/sched/debug.h>
21 #include <linux/kernel.h>
22 #include <linux/mm.h>
23 #include <linux/stddef.h>
24 #include <linux/unistd.h>
25 #include <linux/ptrace.h>
26 #include <linux/user.h>
27 #include <linux/interrupt.h>
28 #include <linux/init.h>
29 #include <linux/extable.h>
30 #include <linux/module.h>	/* print_modules */
31 #include <linux/prctl.h>
32 #include <linux/delay.h>
33 #include <linux/kprobes.h>
34 #include <linux/kexec.h>
35 #include <linux/backlight.h>
36 #include <linux/bug.h>
37 #include <linux/kdebug.h>
38 #include <linux/ratelimit.h>
39 #include <linux/context_tracking.h>
40 
41 #include <asm/emulated_ops.h>
42 #include <asm/pgtable.h>
43 #include <linux/uaccess.h>
44 #include <asm/debugfs.h>
45 #include <asm/io.h>
46 #include <asm/machdep.h>
47 #include <asm/rtas.h>
48 #include <asm/pmc.h>
49 #include <asm/reg.h>
50 #ifdef CONFIG_PMAC_BACKLIGHT
51 #include <asm/backlight.h>
52 #endif
53 #ifdef CONFIG_PPC64
54 #include <asm/firmware.h>
55 #include <asm/processor.h>
56 #include <asm/tm.h>
57 #endif
58 #include <asm/kexec.h>
59 #include <asm/ppc-opcode.h>
60 #include <asm/rio.h>
61 #include <asm/fadump.h>
62 #include <asm/switch_to.h>
63 #include <asm/tm.h>
64 #include <asm/debug.h>
65 #include <asm/asm-prototypes.h>
66 #include <asm/hmi.h>
67 #include <sysdev/fsl_pci.h>
68 #include <asm/kprobes.h>
69 
70 #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC_CORE)
71 int (*__debugger)(struct pt_regs *regs) __read_mostly;
72 int (*__debugger_ipi)(struct pt_regs *regs) __read_mostly;
73 int (*__debugger_bpt)(struct pt_regs *regs) __read_mostly;
74 int (*__debugger_sstep)(struct pt_regs *regs) __read_mostly;
75 int (*__debugger_iabr_match)(struct pt_regs *regs) __read_mostly;
76 int (*__debugger_break_match)(struct pt_regs *regs) __read_mostly;
77 int (*__debugger_fault_handler)(struct pt_regs *regs) __read_mostly;
78 
79 EXPORT_SYMBOL(__debugger);
80 EXPORT_SYMBOL(__debugger_ipi);
81 EXPORT_SYMBOL(__debugger_bpt);
82 EXPORT_SYMBOL(__debugger_sstep);
83 EXPORT_SYMBOL(__debugger_iabr_match);
84 EXPORT_SYMBOL(__debugger_break_match);
85 EXPORT_SYMBOL(__debugger_fault_handler);
86 #endif
87 
88 /* Transactional Memory trap debug */
89 #ifdef TM_DEBUG_SW
90 #define TM_DEBUG(x...) printk(KERN_INFO x)
91 #else
92 #define TM_DEBUG(x...) do { } while(0)
93 #endif
94 
95 /*
96  * Trap & Exception support
97  */
98 
99 #ifdef CONFIG_PMAC_BACKLIGHT
100 static void pmac_backlight_unblank(void)
101 {
102 	mutex_lock(&pmac_backlight_mutex);
103 	if (pmac_backlight) {
104 		struct backlight_properties *props;
105 
106 		props = &pmac_backlight->props;
107 		props->brightness = props->max_brightness;
108 		props->power = FB_BLANK_UNBLANK;
109 		backlight_update_status(pmac_backlight);
110 	}
111 	mutex_unlock(&pmac_backlight_mutex);
112 }
113 #else
114 static inline void pmac_backlight_unblank(void) { }
115 #endif
116 
117 static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
118 static int die_owner = -1;
119 static unsigned int die_nest_count;
120 static int die_counter;
121 
122 static unsigned long oops_begin(struct pt_regs *regs)
123 {
124 	int cpu;
125 	unsigned long flags;
126 
127 	oops_enter();
128 
129 	/* racy, but better than risking deadlock. */
130 	raw_local_irq_save(flags);
131 	cpu = smp_processor_id();
132 	if (!arch_spin_trylock(&die_lock)) {
133 		if (cpu == die_owner)
134 			/* nested oops. should stop eventually */;
135 		else
136 			arch_spin_lock(&die_lock);
137 	}
138 	die_nest_count++;
139 	die_owner = cpu;
140 	console_verbose();
141 	bust_spinlocks(1);
142 	if (machine_is(powermac))
143 		pmac_backlight_unblank();
144 	return flags;
145 }
146 NOKPROBE_SYMBOL(oops_begin);
147 
148 static void oops_end(unsigned long flags, struct pt_regs *regs,
149 			       int signr)
150 {
151 	bust_spinlocks(0);
152 	add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
153 	die_nest_count--;
154 	oops_exit();
155 	printk("\n");
156 	if (!die_nest_count) {
157 		/* Nest count reaches zero, release the lock. */
158 		die_owner = -1;
159 		arch_spin_unlock(&die_lock);
160 	}
161 	raw_local_irq_restore(flags);
162 
163 	crash_fadump(regs, "die oops");
164 
165 	if (kexec_should_crash(current))
166 		crash_kexec(regs);
167 
168 	if (!signr)
169 		return;
170 
171 	/*
172 	 * While our oops output is serialised by a spinlock, output
173 	 * from panic() called below can race and corrupt it. If we
174 	 * know we are going to panic, delay for 1 second so we have a
175 	 * chance to get clean backtraces from all CPUs that are oopsing.
176 	 */
177 	if (in_interrupt() || panic_on_oops || !current->pid ||
178 	    is_global_init(current)) {
179 		mdelay(MSEC_PER_SEC);
180 	}
181 
182 	if (in_interrupt())
183 		panic("Fatal exception in interrupt");
184 	if (panic_on_oops)
185 		panic("Fatal exception");
186 	do_exit(signr);
187 }
188 NOKPROBE_SYMBOL(oops_end);
189 
190 static int __die(const char *str, struct pt_regs *regs, long err)
191 {
192 	printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter);
193 
194 	if (IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN))
195 		printk("LE ");
196 	else
197 		printk("BE ");
198 
199 	if (IS_ENABLED(CONFIG_PREEMPT))
200 		pr_cont("PREEMPT ");
201 
202 	if (IS_ENABLED(CONFIG_SMP))
203 		pr_cont("SMP NR_CPUS=%d ", NR_CPUS);
204 
205 	if (debug_pagealloc_enabled())
206 		pr_cont("DEBUG_PAGEALLOC ");
207 
208 	if (IS_ENABLED(CONFIG_NUMA))
209 		pr_cont("NUMA ");
210 
211 	pr_cont("%s\n", ppc_md.name ? ppc_md.name : "");
212 
213 	if (notify_die(DIE_OOPS, str, regs, err, 255, SIGSEGV) == NOTIFY_STOP)
214 		return 1;
215 
216 	print_modules();
217 	show_regs(regs);
218 
219 	return 0;
220 }
221 NOKPROBE_SYMBOL(__die);
222 
223 void die(const char *str, struct pt_regs *regs, long err)
224 {
225 	unsigned long flags;
226 
227 	if (debugger(regs))
228 		return;
229 
230 	flags = oops_begin(regs);
231 	if (__die(str, regs, err))
232 		err = 0;
233 	oops_end(flags, regs, err);
234 }
235 NOKPROBE_SYMBOL(die);
236 
237 void user_single_step_siginfo(struct task_struct *tsk,
238 				struct pt_regs *regs, siginfo_t *info)
239 {
240 	memset(info, 0, sizeof(*info));
241 	info->si_signo = SIGTRAP;
242 	info->si_code = TRAP_TRACE;
243 	info->si_addr = (void __user *)regs->nip;
244 }
245 
246 void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
247 {
248 	siginfo_t info;
249 	const char fmt32[] = KERN_INFO "%s[%d]: unhandled signal %d " \
250 			"at %08lx nip %08lx lr %08lx code %x\n";
251 	const char fmt64[] = KERN_INFO "%s[%d]: unhandled signal %d " \
252 			"at %016lx nip %016lx lr %016lx code %x\n";
253 
254 	if (!user_mode(regs)) {
255 		die("Exception in kernel mode", regs, signr);
256 		return;
257 	}
258 
259 	if (show_unhandled_signals && unhandled_signal(current, signr)) {
260 		printk_ratelimited(regs->msr & MSR_64BIT ? fmt64 : fmt32,
261 				   current->comm, current->pid, signr,
262 				   addr, regs->nip, regs->link, code);
263 	}
264 
265 	if (arch_irqs_disabled() && !arch_irq_disabled_regs(regs))
266 		local_irq_enable();
267 
268 	current->thread.trap_nr = code;
269 	memset(&info, 0, sizeof(info));
270 	info.si_signo = signr;
271 	info.si_code = code;
272 	info.si_addr = (void __user *) addr;
273 	force_sig_info(signr, &info, current);
274 }
275 
276 void system_reset_exception(struct pt_regs *regs)
277 {
278 	/*
279 	 * Avoid crashes in case of nested NMI exceptions. Recoverability
280 	 * is determined by RI and in_nmi
281 	 */
282 	bool nested = in_nmi();
283 	if (!nested)
284 		nmi_enter();
285 
286 	__this_cpu_inc(irq_stat.sreset_irqs);
287 
288 	/* See if any machine dependent calls */
289 	if (ppc_md.system_reset_exception) {
290 		if (ppc_md.system_reset_exception(regs))
291 			goto out;
292 	}
293 
294 	if (debugger(regs))
295 		goto out;
296 
297 	/*
298 	 * A system reset is a request to dump, so we always send
299 	 * it through the crashdump code (if fadump or kdump are
300 	 * registered).
301 	 */
302 	crash_fadump(regs, "System Reset");
303 
304 	crash_kexec(regs);
305 
306 	/*
307 	 * We aren't the primary crash CPU. We need to send it
308 	 * to a holding pattern to avoid it ending up in the panic
309 	 * code.
310 	 */
311 	crash_kexec_secondary(regs);
312 
313 	/*
314 	 * No debugger or crash dump registered, print logs then
315 	 * panic.
316 	 */
317 	__die("System Reset", regs, SIGABRT);
318 
319 	mdelay(2*MSEC_PER_SEC); /* Wait a little while for others to print */
320 	add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
321 	nmi_panic(regs, "System Reset");
322 
323 out:
324 #ifdef CONFIG_PPC_BOOK3S_64
325 	BUG_ON(get_paca()->in_nmi == 0);
326 	if (get_paca()->in_nmi > 1)
327 		nmi_panic(regs, "Unrecoverable nested System Reset");
328 #endif
329 	/* Must die if the interrupt is not recoverable */
330 	if (!(regs->msr & MSR_RI))
331 		nmi_panic(regs, "Unrecoverable System Reset");
332 
333 	if (!nested)
334 		nmi_exit();
335 
336 	/* What should we do here? We could issue a shutdown or hard reset. */
337 }
338 
339 /*
340  * I/O accesses can cause machine checks on powermacs.
341  * Check if the NIP corresponds to the address of a sync
342  * instruction for which there is an entry in the exception
343  * table.
344  * Note that the 601 only takes a machine check on TEA
345  * (transfer error ack) signal assertion, and does not
346  * set any of the top 16 bits of SRR1.
347  *  -- paulus.
348  */
349 static inline int check_io_access(struct pt_regs *regs)
350 {
351 #ifdef CONFIG_PPC32
352 	unsigned long msr = regs->msr;
353 	const struct exception_table_entry *entry;
354 	unsigned int *nip = (unsigned int *)regs->nip;
355 
356 	if (((msr & 0xffff0000) == 0 || (msr & (0x80000 | 0x40000)))
357 	    && (entry = search_exception_tables(regs->nip)) != NULL) {
358 		/*
359 		 * Check that it's a sync instruction, or somewhere
360 		 * in the twi; isync; nop sequence that inb/inw/inl uses.
361 		 * As the address is in the exception table
362 		 * we should be able to read the instr there.
363 		 * For the debug message, we look at the preceding
364 		 * load or store.
365 		 */
366 		if (*nip == PPC_INST_NOP)
367 			nip -= 2;
368 		else if (*nip == PPC_INST_ISYNC)
369 			--nip;
370 		if (*nip == PPC_INST_SYNC || (*nip >> 26) == OP_TRAP) {
371 			unsigned int rb;
372 
373 			--nip;
374 			rb = (*nip >> 11) & 0x1f;
375 			printk(KERN_DEBUG "%s bad port %lx at %p\n",
376 			       (*nip & 0x100)? "OUT to": "IN from",
377 			       regs->gpr[rb] - _IO_BASE, nip);
378 			regs->msr |= MSR_RI;
379 			regs->nip = extable_fixup(entry);
380 			return 1;
381 		}
382 	}
383 #endif /* CONFIG_PPC32 */
384 	return 0;
385 }
386 
387 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
388 /* On 4xx, the reason for the machine check or program exception
389    is in the ESR. */
390 #define get_reason(regs)	((regs)->dsisr)
391 #define REASON_FP		ESR_FP
392 #define REASON_ILLEGAL		(ESR_PIL | ESR_PUO)
393 #define REASON_PRIVILEGED	ESR_PPR
394 #define REASON_TRAP		ESR_PTR
395 
396 /* single-step stuff */
397 #define single_stepping(regs)	(current->thread.debug.dbcr0 & DBCR0_IC)
398 #define clear_single_step(regs)	(current->thread.debug.dbcr0 &= ~DBCR0_IC)
399 
400 #else
401 /* On non-4xx, the reason for the machine check or program
402    exception is in the MSR. */
403 #define get_reason(regs)	((regs)->msr)
404 #define REASON_TM		SRR1_PROGTM
405 #define REASON_FP		SRR1_PROGFPE
406 #define REASON_ILLEGAL		SRR1_PROGILL
407 #define REASON_PRIVILEGED	SRR1_PROGPRIV
408 #define REASON_TRAP		SRR1_PROGTRAP
409 
410 #define single_stepping(regs)	((regs)->msr & MSR_SE)
411 #define clear_single_step(regs)	((regs)->msr &= ~MSR_SE)
412 #endif
413 
414 #if defined(CONFIG_E500)
415 int machine_check_e500mc(struct pt_regs *regs)
416 {
417 	unsigned long mcsr = mfspr(SPRN_MCSR);
418 	unsigned long reason = mcsr;
419 	int recoverable = 1;
420 
421 	if (reason & MCSR_LD) {
422 		recoverable = fsl_rio_mcheck_exception(regs);
423 		if (recoverable == 1)
424 			goto silent_out;
425 	}
426 
427 	printk("Machine check in kernel mode.\n");
428 	printk("Caused by (from MCSR=%lx): ", reason);
429 
430 	if (reason & MCSR_MCP)
431 		printk("Machine Check Signal\n");
432 
433 	if (reason & MCSR_ICPERR) {
434 		printk("Instruction Cache Parity Error\n");
435 
436 		/*
437 		 * This is recoverable by invalidating the i-cache.
438 		 */
439 		mtspr(SPRN_L1CSR1, mfspr(SPRN_L1CSR1) | L1CSR1_ICFI);
440 		while (mfspr(SPRN_L1CSR1) & L1CSR1_ICFI)
441 			;
442 
443 		/*
444 		 * This will generally be accompanied by an instruction
445 		 * fetch error report -- only treat MCSR_IF as fatal
446 		 * if it wasn't due to an L1 parity error.
447 		 */
448 		reason &= ~MCSR_IF;
449 	}
450 
451 	if (reason & MCSR_DCPERR_MC) {
452 		printk("Data Cache Parity Error\n");
453 
454 		/*
455 		 * In write shadow mode we auto-recover from the error, but it
456 		 * may still get logged and cause a machine check.  We should
457 		 * only treat the non-write shadow case as non-recoverable.
458 		 */
459 		if (!(mfspr(SPRN_L1CSR2) & L1CSR2_DCWS))
460 			recoverable = 0;
461 	}
462 
463 	if (reason & MCSR_L2MMU_MHIT) {
464 		printk("Hit on multiple TLB entries\n");
465 		recoverable = 0;
466 	}
467 
468 	if (reason & MCSR_NMI)
469 		printk("Non-maskable interrupt\n");
470 
471 	if (reason & MCSR_IF) {
472 		printk("Instruction Fetch Error Report\n");
473 		recoverable = 0;
474 	}
475 
476 	if (reason & MCSR_LD) {
477 		printk("Load Error Report\n");
478 		recoverable = 0;
479 	}
480 
481 	if (reason & MCSR_ST) {
482 		printk("Store Error Report\n");
483 		recoverable = 0;
484 	}
485 
486 	if (reason & MCSR_LDG) {
487 		printk("Guarded Load Error Report\n");
488 		recoverable = 0;
489 	}
490 
491 	if (reason & MCSR_TLBSYNC)
492 		printk("Simultaneous tlbsync operations\n");
493 
494 	if (reason & MCSR_BSL2_ERR) {
495 		printk("Level 2 Cache Error\n");
496 		recoverable = 0;
497 	}
498 
499 	if (reason & MCSR_MAV) {
500 		u64 addr;
501 
502 		addr = mfspr(SPRN_MCAR);
503 		addr |= (u64)mfspr(SPRN_MCARU) << 32;
504 
505 		printk("Machine Check %s Address: %#llx\n",
506 		       reason & MCSR_MEA ? "Effective" : "Physical", addr);
507 	}
508 
509 silent_out:
510 	mtspr(SPRN_MCSR, mcsr);
511 	return mfspr(SPRN_MCSR) == 0 && recoverable;
512 }
513 
514 int machine_check_e500(struct pt_regs *regs)
515 {
516 	unsigned long reason = mfspr(SPRN_MCSR);
517 
518 	if (reason & MCSR_BUS_RBERR) {
519 		if (fsl_rio_mcheck_exception(regs))
520 			return 1;
521 		if (fsl_pci_mcheck_exception(regs))
522 			return 1;
523 	}
524 
525 	printk("Machine check in kernel mode.\n");
526 	printk("Caused by (from MCSR=%lx): ", reason);
527 
528 	if (reason & MCSR_MCP)
529 		printk("Machine Check Signal\n");
530 	if (reason & MCSR_ICPERR)
531 		printk("Instruction Cache Parity Error\n");
532 	if (reason & MCSR_DCP_PERR)
533 		printk("Data Cache Push Parity Error\n");
534 	if (reason & MCSR_DCPERR)
535 		printk("Data Cache Parity Error\n");
536 	if (reason & MCSR_BUS_IAERR)
537 		printk("Bus - Instruction Address Error\n");
538 	if (reason & MCSR_BUS_RAERR)
539 		printk("Bus - Read Address Error\n");
540 	if (reason & MCSR_BUS_WAERR)
541 		printk("Bus - Write Address Error\n");
542 	if (reason & MCSR_BUS_IBERR)
543 		printk("Bus - Instruction Data Error\n");
544 	if (reason & MCSR_BUS_RBERR)
545 		printk("Bus - Read Data Bus Error\n");
546 	if (reason & MCSR_BUS_WBERR)
547 		printk("Bus - Write Data Bus Error\n");
548 	if (reason & MCSR_BUS_IPERR)
549 		printk("Bus - Instruction Parity Error\n");
550 	if (reason & MCSR_BUS_RPERR)
551 		printk("Bus - Read Parity Error\n");
552 
553 	return 0;
554 }
555 
556 int machine_check_generic(struct pt_regs *regs)
557 {
558 	return 0;
559 }
560 #elif defined(CONFIG_E200)
561 int machine_check_e200(struct pt_regs *regs)
562 {
563 	unsigned long reason = mfspr(SPRN_MCSR);
564 
565 	printk("Machine check in kernel mode.\n");
566 	printk("Caused by (from MCSR=%lx): ", reason);
567 
568 	if (reason & MCSR_MCP)
569 		printk("Machine Check Signal\n");
570 	if (reason & MCSR_CP_PERR)
571 		printk("Cache Push Parity Error\n");
572 	if (reason & MCSR_CPERR)
573 		printk("Cache Parity Error\n");
574 	if (reason & MCSR_EXCP_ERR)
575 		printk("ISI, ITLB, or Bus Error on first instruction fetch for an exception handler\n");
576 	if (reason & MCSR_BUS_IRERR)
577 		printk("Bus - Read Bus Error on instruction fetch\n");
578 	if (reason & MCSR_BUS_DRERR)
579 		printk("Bus - Read Bus Error on data load\n");
580 	if (reason & MCSR_BUS_WRERR)
581 		printk("Bus - Write Bus Error on buffered store or cache line push\n");
582 
583 	return 0;
584 }
585 #elif defined(CONFIG_PPC32)
586 int machine_check_generic(struct pt_regs *regs)
587 {
588 	unsigned long reason = regs->msr;
589 
590 	printk("Machine check in kernel mode.\n");
591 	printk("Caused by (from SRR1=%lx): ", reason);
592 	switch (reason & 0x601F0000) {
593 	case 0x80000:
594 		printk("Machine check signal\n");
595 		break;
596 	case 0:		/* for 601 */
597 	case 0x40000:
598 	case 0x140000:	/* 7450 MSS error and TEA */
599 		printk("Transfer error ack signal\n");
600 		break;
601 	case 0x20000:
602 		printk("Data parity error signal\n");
603 		break;
604 	case 0x10000:
605 		printk("Address parity error signal\n");
606 		break;
607 	case 0x20000000:
608 		printk("L1 Data Cache error\n");
609 		break;
610 	case 0x40000000:
611 		printk("L1 Instruction Cache error\n");
612 		break;
613 	case 0x00100000:
614 		printk("L2 data cache parity error\n");
615 		break;
616 	default:
617 		printk("Unknown values in msr\n");
618 	}
619 	return 0;
620 }
621 #endif /* everything else */
622 
623 void machine_check_exception(struct pt_regs *regs)
624 {
625 	enum ctx_state prev_state = exception_enter();
626 	int recover = 0;
627 
628 	/* 64s accounts the mce in machine_check_early when in HVMODE */
629 	if (!IS_ENABLED(CONFIG_PPC_BOOK3S_64) || !cpu_has_feature(CPU_FTR_HVMODE))
630 		__this_cpu_inc(irq_stat.mce_exceptions);
631 
632 	add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
633 
634 	/* See if any machine dependent calls. In theory, we would want
635 	 * to call the CPU first, and call the ppc_md. one if the CPU
636 	 * one returns a positive number. However there is existing code
637 	 * that assumes the board gets a first chance, so let's keep it
638 	 * that way for now and fix things later. --BenH.
639 	 */
640 	if (ppc_md.machine_check_exception)
641 		recover = ppc_md.machine_check_exception(regs);
642 	else if (cur_cpu_spec->machine_check)
643 		recover = cur_cpu_spec->machine_check(regs);
644 
645 	if (recover > 0)
646 		goto bail;
647 
648 	if (debugger_fault_handler(regs))
649 		goto bail;
650 
651 	if (check_io_access(regs))
652 		goto bail;
653 
654 	die("Machine check", regs, SIGBUS);
655 
656 	/* Must die if the interrupt is not recoverable */
657 	if (!(regs->msr & MSR_RI))
658 		panic("Unrecoverable Machine check");
659 
660 bail:
661 	exception_exit(prev_state);
662 }
663 
664 void SMIException(struct pt_regs *regs)
665 {
666 	die("System Management Interrupt", regs, SIGABRT);
667 }
668 
669 void handle_hmi_exception(struct pt_regs *regs)
670 {
671 	struct pt_regs *old_regs;
672 
673 	old_regs = set_irq_regs(regs);
674 	irq_enter();
675 
676 	if (ppc_md.handle_hmi_exception)
677 		ppc_md.handle_hmi_exception(regs);
678 
679 	irq_exit();
680 	set_irq_regs(old_regs);
681 }
682 
683 void unknown_exception(struct pt_regs *regs)
684 {
685 	enum ctx_state prev_state = exception_enter();
686 
687 	printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n",
688 	       regs->nip, regs->msr, regs->trap);
689 
690 	_exception(SIGTRAP, regs, 0, 0);
691 
692 	exception_exit(prev_state);
693 }
694 
695 void instruction_breakpoint_exception(struct pt_regs *regs)
696 {
697 	enum ctx_state prev_state = exception_enter();
698 
699 	if (notify_die(DIE_IABR_MATCH, "iabr_match", regs, 5,
700 					5, SIGTRAP) == NOTIFY_STOP)
701 		goto bail;
702 	if (debugger_iabr_match(regs))
703 		goto bail;
704 	_exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
705 
706 bail:
707 	exception_exit(prev_state);
708 }
709 
710 void RunModeException(struct pt_regs *regs)
711 {
712 	_exception(SIGTRAP, regs, 0, 0);
713 }
714 
715 void single_step_exception(struct pt_regs *regs)
716 {
717 	enum ctx_state prev_state = exception_enter();
718 
719 	clear_single_step(regs);
720 
721 	if (kprobe_post_handler(regs))
722 		return;
723 
724 	if (notify_die(DIE_SSTEP, "single_step", regs, 5,
725 					5, SIGTRAP) == NOTIFY_STOP)
726 		goto bail;
727 	if (debugger_sstep(regs))
728 		goto bail;
729 
730 	_exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
731 
732 bail:
733 	exception_exit(prev_state);
734 }
735 NOKPROBE_SYMBOL(single_step_exception);
736 
737 /*
738  * After we have successfully emulated an instruction, we have to
739  * check if the instruction was being single-stepped, and if so,
740  * pretend we got a single-step exception.  This was pointed out
741  * by Kumar Gala.  -- paulus
742  */
743 static void emulate_single_step(struct pt_regs *regs)
744 {
745 	if (single_stepping(regs))
746 		single_step_exception(regs);
747 }
748 
749 static inline int __parse_fpscr(unsigned long fpscr)
750 {
751 	int ret = 0;
752 
753 	/* Invalid operation */
754 	if ((fpscr & FPSCR_VE) && (fpscr & FPSCR_VX))
755 		ret = FPE_FLTINV;
756 
757 	/* Overflow */
758 	else if ((fpscr & FPSCR_OE) && (fpscr & FPSCR_OX))
759 		ret = FPE_FLTOVF;
760 
761 	/* Underflow */
762 	else if ((fpscr & FPSCR_UE) && (fpscr & FPSCR_UX))
763 		ret = FPE_FLTUND;
764 
765 	/* Divide by zero */
766 	else if ((fpscr & FPSCR_ZE) && (fpscr & FPSCR_ZX))
767 		ret = FPE_FLTDIV;
768 
769 	/* Inexact result */
770 	else if ((fpscr & FPSCR_XE) && (fpscr & FPSCR_XX))
771 		ret = FPE_FLTRES;
772 
773 	return ret;
774 }
775 
776 static void parse_fpe(struct pt_regs *regs)
777 {
778 	int code = 0;
779 
780 	flush_fp_to_thread(current);
781 
782 	code = __parse_fpscr(current->thread.fp_state.fpscr);
783 
784 	_exception(SIGFPE, regs, code, regs->nip);
785 }
786 
787 /*
788  * Illegal instruction emulation support.  Originally written to
789  * provide the PVR to user applications using the mfspr rd, PVR.
790  * Return non-zero if we can't emulate, or -EFAULT if the associated
791  * memory access caused an access fault.  Return zero on success.
792  *
793  * There are a couple of ways to do this, either "decode" the instruction
794  * or directly match lots of bits.  In this case, matching lots of
795  * bits is faster and easier.
796  *
797  */
798 static int emulate_string_inst(struct pt_regs *regs, u32 instword)
799 {
800 	u8 rT = (instword >> 21) & 0x1f;
801 	u8 rA = (instword >> 16) & 0x1f;
802 	u8 NB_RB = (instword >> 11) & 0x1f;
803 	u32 num_bytes;
804 	unsigned long EA;
805 	int pos = 0;
806 
807 	/* Early out if we are an invalid form of lswx */
808 	if ((instword & PPC_INST_STRING_MASK) == PPC_INST_LSWX)
809 		if ((rT == rA) || (rT == NB_RB))
810 			return -EINVAL;
811 
812 	EA = (rA == 0) ? 0 : regs->gpr[rA];
813 
814 	switch (instword & PPC_INST_STRING_MASK) {
815 		case PPC_INST_LSWX:
816 		case PPC_INST_STSWX:
817 			EA += NB_RB;
818 			num_bytes = regs->xer & 0x7f;
819 			break;
820 		case PPC_INST_LSWI:
821 		case PPC_INST_STSWI:
822 			num_bytes = (NB_RB == 0) ? 32 : NB_RB;
823 			break;
824 		default:
825 			return -EINVAL;
826 	}
827 
828 	while (num_bytes != 0)
829 	{
830 		u8 val;
831 		u32 shift = 8 * (3 - (pos & 0x3));
832 
833 		/* if process is 32-bit, clear upper 32 bits of EA */
834 		if ((regs->msr & MSR_64BIT) == 0)
835 			EA &= 0xFFFFFFFF;
836 
837 		switch ((instword & PPC_INST_STRING_MASK)) {
838 			case PPC_INST_LSWX:
839 			case PPC_INST_LSWI:
840 				if (get_user(val, (u8 __user *)EA))
841 					return -EFAULT;
842 				/* first time updating this reg,
843 				 * zero it out */
844 				if (pos == 0)
845 					regs->gpr[rT] = 0;
846 				regs->gpr[rT] |= val << shift;
847 				break;
848 			case PPC_INST_STSWI:
849 			case PPC_INST_STSWX:
850 				val = regs->gpr[rT] >> shift;
851 				if (put_user(val, (u8 __user *)EA))
852 					return -EFAULT;
853 				break;
854 		}
855 		/* move EA to next address */
856 		EA += 1;
857 		num_bytes--;
858 
859 		/* manage our position within the register */
860 		if (++pos == 4) {
861 			pos = 0;
862 			if (++rT == 32)
863 				rT = 0;
864 		}
865 	}
866 
867 	return 0;
868 }
869 
870 static int emulate_popcntb_inst(struct pt_regs *regs, u32 instword)
871 {
872 	u32 ra,rs;
873 	unsigned long tmp;
874 
875 	ra = (instword >> 16) & 0x1f;
876 	rs = (instword >> 21) & 0x1f;
877 
878 	tmp = regs->gpr[rs];
879 	tmp = tmp - ((tmp >> 1) & 0x5555555555555555ULL);
880 	tmp = (tmp & 0x3333333333333333ULL) + ((tmp >> 2) & 0x3333333333333333ULL);
881 	tmp = (tmp + (tmp >> 4)) & 0x0f0f0f0f0f0f0f0fULL;
882 	regs->gpr[ra] = tmp;
883 
884 	return 0;
885 }
886 
887 static int emulate_isel(struct pt_regs *regs, u32 instword)
888 {
889 	u8 rT = (instword >> 21) & 0x1f;
890 	u8 rA = (instword >> 16) & 0x1f;
891 	u8 rB = (instword >> 11) & 0x1f;
892 	u8 BC = (instword >> 6) & 0x1f;
893 	u8 bit;
894 	unsigned long tmp;
895 
896 	tmp = (rA == 0) ? 0 : regs->gpr[rA];
897 	bit = (regs->ccr >> (31 - BC)) & 0x1;
898 
899 	regs->gpr[rT] = bit ? tmp : regs->gpr[rB];
900 
901 	return 0;
902 }
903 
904 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
905 static inline bool tm_abort_check(struct pt_regs *regs, int cause)
906 {
907         /* If we're emulating a load/store in an active transaction, we cannot
908          * emulate it as the kernel operates in transaction suspended context.
909          * We need to abort the transaction.  This creates a persistent TM
910          * abort so tell the user what caused it with a new code.
911 	 */
912 	if (MSR_TM_TRANSACTIONAL(regs->msr)) {
913 		tm_enable();
914 		tm_abort(cause);
915 		return true;
916 	}
917 	return false;
918 }
919 #else
920 static inline bool tm_abort_check(struct pt_regs *regs, int reason)
921 {
922 	return false;
923 }
924 #endif
925 
926 static int emulate_instruction(struct pt_regs *regs)
927 {
928 	u32 instword;
929 	u32 rd;
930 
931 	if (!user_mode(regs))
932 		return -EINVAL;
933 	CHECK_FULL_REGS(regs);
934 
935 	if (get_user(instword, (u32 __user *)(regs->nip)))
936 		return -EFAULT;
937 
938 	/* Emulate the mfspr rD, PVR. */
939 	if ((instword & PPC_INST_MFSPR_PVR_MASK) == PPC_INST_MFSPR_PVR) {
940 		PPC_WARN_EMULATED(mfpvr, regs);
941 		rd = (instword >> 21) & 0x1f;
942 		regs->gpr[rd] = mfspr(SPRN_PVR);
943 		return 0;
944 	}
945 
946 	/* Emulating the dcba insn is just a no-op.  */
947 	if ((instword & PPC_INST_DCBA_MASK) == PPC_INST_DCBA) {
948 		PPC_WARN_EMULATED(dcba, regs);
949 		return 0;
950 	}
951 
952 	/* Emulate the mcrxr insn.  */
953 	if ((instword & PPC_INST_MCRXR_MASK) == PPC_INST_MCRXR) {
954 		int shift = (instword >> 21) & 0x1c;
955 		unsigned long msk = 0xf0000000UL >> shift;
956 
957 		PPC_WARN_EMULATED(mcrxr, regs);
958 		regs->ccr = (regs->ccr & ~msk) | ((regs->xer >> shift) & msk);
959 		regs->xer &= ~0xf0000000UL;
960 		return 0;
961 	}
962 
963 	/* Emulate load/store string insn. */
964 	if ((instword & PPC_INST_STRING_GEN_MASK) == PPC_INST_STRING) {
965 		if (tm_abort_check(regs,
966 				   TM_CAUSE_EMULATE | TM_CAUSE_PERSISTENT))
967 			return -EINVAL;
968 		PPC_WARN_EMULATED(string, regs);
969 		return emulate_string_inst(regs, instword);
970 	}
971 
972 	/* Emulate the popcntb (Population Count Bytes) instruction. */
973 	if ((instword & PPC_INST_POPCNTB_MASK) == PPC_INST_POPCNTB) {
974 		PPC_WARN_EMULATED(popcntb, regs);
975 		return emulate_popcntb_inst(regs, instword);
976 	}
977 
978 	/* Emulate isel (Integer Select) instruction */
979 	if ((instword & PPC_INST_ISEL_MASK) == PPC_INST_ISEL) {
980 		PPC_WARN_EMULATED(isel, regs);
981 		return emulate_isel(regs, instword);
982 	}
983 
984 	/* Emulate sync instruction variants */
985 	if ((instword & PPC_INST_SYNC_MASK) == PPC_INST_SYNC) {
986 		PPC_WARN_EMULATED(sync, regs);
987 		asm volatile("sync");
988 		return 0;
989 	}
990 
991 #ifdef CONFIG_PPC64
992 	/* Emulate the mfspr rD, DSCR. */
993 	if ((((instword & PPC_INST_MFSPR_DSCR_USER_MASK) ==
994 		PPC_INST_MFSPR_DSCR_USER) ||
995 	     ((instword & PPC_INST_MFSPR_DSCR_MASK) ==
996 		PPC_INST_MFSPR_DSCR)) &&
997 			cpu_has_feature(CPU_FTR_DSCR)) {
998 		PPC_WARN_EMULATED(mfdscr, regs);
999 		rd = (instword >> 21) & 0x1f;
1000 		regs->gpr[rd] = mfspr(SPRN_DSCR);
1001 		return 0;
1002 	}
1003 	/* Emulate the mtspr DSCR, rD. */
1004 	if ((((instword & PPC_INST_MTSPR_DSCR_USER_MASK) ==
1005 		PPC_INST_MTSPR_DSCR_USER) ||
1006 	     ((instword & PPC_INST_MTSPR_DSCR_MASK) ==
1007 		PPC_INST_MTSPR_DSCR)) &&
1008 			cpu_has_feature(CPU_FTR_DSCR)) {
1009 		PPC_WARN_EMULATED(mtdscr, regs);
1010 		rd = (instword >> 21) & 0x1f;
1011 		current->thread.dscr = regs->gpr[rd];
1012 		current->thread.dscr_inherit = 1;
1013 		mtspr(SPRN_DSCR, current->thread.dscr);
1014 		return 0;
1015 	}
1016 #endif
1017 
1018 	return -EINVAL;
1019 }
1020 
1021 int is_valid_bugaddr(unsigned long addr)
1022 {
1023 	return is_kernel_addr(addr);
1024 }
1025 
1026 #ifdef CONFIG_MATH_EMULATION
1027 static int emulate_math(struct pt_regs *regs)
1028 {
1029 	int ret;
1030 	extern int do_mathemu(struct pt_regs *regs);
1031 
1032 	ret = do_mathemu(regs);
1033 	if (ret >= 0)
1034 		PPC_WARN_EMULATED(math, regs);
1035 
1036 	switch (ret) {
1037 	case 0:
1038 		emulate_single_step(regs);
1039 		return 0;
1040 	case 1: {
1041 			int code = 0;
1042 			code = __parse_fpscr(current->thread.fp_state.fpscr);
1043 			_exception(SIGFPE, regs, code, regs->nip);
1044 			return 0;
1045 		}
1046 	case -EFAULT:
1047 		_exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
1048 		return 0;
1049 	}
1050 
1051 	return -1;
1052 }
1053 #else
1054 static inline int emulate_math(struct pt_regs *regs) { return -1; }
1055 #endif
1056 
1057 void program_check_exception(struct pt_regs *regs)
1058 {
1059 	enum ctx_state prev_state = exception_enter();
1060 	unsigned int reason = get_reason(regs);
1061 
1062 	/* We can now get here via a FP Unavailable exception if the core
1063 	 * has no FPU, in that case the reason flags will be 0 */
1064 
1065 	if (reason & REASON_FP) {
1066 		/* IEEE FP exception */
1067 		parse_fpe(regs);
1068 		goto bail;
1069 	}
1070 	if (reason & REASON_TRAP) {
1071 		unsigned long bugaddr;
1072 		/* Debugger is first in line to stop recursive faults in
1073 		 * rcu_lock, notify_die, or atomic_notifier_call_chain */
1074 		if (debugger_bpt(regs))
1075 			goto bail;
1076 
1077 		if (kprobe_handler(regs))
1078 			goto bail;
1079 
1080 		/* trap exception */
1081 		if (notify_die(DIE_BPT, "breakpoint", regs, 5, 5, SIGTRAP)
1082 				== NOTIFY_STOP)
1083 			goto bail;
1084 
1085 		bugaddr = regs->nip;
1086 		/*
1087 		 * Fixup bugaddr for BUG_ON() in real mode
1088 		 */
1089 		if (!is_kernel_addr(bugaddr) && !(regs->msr & MSR_IR))
1090 			bugaddr += PAGE_OFFSET;
1091 
1092 		if (!(regs->msr & MSR_PR) &&  /* not user-mode */
1093 		    report_bug(bugaddr, regs) == BUG_TRAP_TYPE_WARN) {
1094 			regs->nip += 4;
1095 			goto bail;
1096 		}
1097 		_exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
1098 		goto bail;
1099 	}
1100 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1101 	if (reason & REASON_TM) {
1102 		/* This is a TM "Bad Thing Exception" program check.
1103 		 * This occurs when:
1104 		 * -  An rfid/hrfid/mtmsrd attempts to cause an illegal
1105 		 *    transition in TM states.
1106 		 * -  A trechkpt is attempted when transactional.
1107 		 * -  A treclaim is attempted when non transactional.
1108 		 * -  A tend is illegally attempted.
1109 		 * -  writing a TM SPR when transactional.
1110 		 */
1111 		if (!user_mode(regs) &&
1112 		    report_bug(regs->nip, regs) == BUG_TRAP_TYPE_WARN) {
1113 			regs->nip += 4;
1114 			goto bail;
1115 		}
1116 		/* If usermode caused this, it's done something illegal and
1117 		 * gets a SIGILL slap on the wrist.  We call it an illegal
1118 		 * operand to distinguish from the instruction just being bad
1119 		 * (e.g. executing a 'tend' on a CPU without TM!); it's an
1120 		 * illegal /placement/ of a valid instruction.
1121 		 */
1122 		if (user_mode(regs)) {
1123 			_exception(SIGILL, regs, ILL_ILLOPN, regs->nip);
1124 			goto bail;
1125 		} else {
1126 			printk(KERN_EMERG "Unexpected TM Bad Thing exception "
1127 			       "at %lx (msr 0x%x)\n", regs->nip, reason);
1128 			die("Unrecoverable exception", regs, SIGABRT);
1129 		}
1130 	}
1131 #endif
1132 
1133 	/*
1134 	 * If we took the program check in the kernel skip down to sending a
1135 	 * SIGILL. The subsequent cases all relate to emulating instructions
1136 	 * which we should only do for userspace. We also do not want to enable
1137 	 * interrupts for kernel faults because that might lead to further
1138 	 * faults, and loose the context of the original exception.
1139 	 */
1140 	if (!user_mode(regs))
1141 		goto sigill;
1142 
1143 	/* We restore the interrupt state now */
1144 	if (!arch_irq_disabled_regs(regs))
1145 		local_irq_enable();
1146 
1147 	/* (reason & REASON_ILLEGAL) would be the obvious thing here,
1148 	 * but there seems to be a hardware bug on the 405GP (RevD)
1149 	 * that means ESR is sometimes set incorrectly - either to
1150 	 * ESR_DST (!?) or 0.  In the process of chasing this with the
1151 	 * hardware people - not sure if it can happen on any illegal
1152 	 * instruction or only on FP instructions, whether there is a
1153 	 * pattern to occurrences etc. -dgibson 31/Mar/2003
1154 	 */
1155 	if (!emulate_math(regs))
1156 		goto bail;
1157 
1158 	/* Try to emulate it if we should. */
1159 	if (reason & (REASON_ILLEGAL | REASON_PRIVILEGED)) {
1160 		switch (emulate_instruction(regs)) {
1161 		case 0:
1162 			regs->nip += 4;
1163 			emulate_single_step(regs);
1164 			goto bail;
1165 		case -EFAULT:
1166 			_exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
1167 			goto bail;
1168 		}
1169 	}
1170 
1171 sigill:
1172 	if (reason & REASON_PRIVILEGED)
1173 		_exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
1174 	else
1175 		_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1176 
1177 bail:
1178 	exception_exit(prev_state);
1179 }
1180 NOKPROBE_SYMBOL(program_check_exception);
1181 
1182 /*
1183  * This occurs when running in hypervisor mode on POWER6 or later
1184  * and an illegal instruction is encountered.
1185  */
1186 void emulation_assist_interrupt(struct pt_regs *regs)
1187 {
1188 	regs->msr |= REASON_ILLEGAL;
1189 	program_check_exception(regs);
1190 }
1191 NOKPROBE_SYMBOL(emulation_assist_interrupt);
1192 
1193 void alignment_exception(struct pt_regs *regs)
1194 {
1195 	enum ctx_state prev_state = exception_enter();
1196 	int sig, code, fixed = 0;
1197 
1198 	/* We restore the interrupt state now */
1199 	if (!arch_irq_disabled_regs(regs))
1200 		local_irq_enable();
1201 
1202 	if (tm_abort_check(regs, TM_CAUSE_ALIGNMENT | TM_CAUSE_PERSISTENT))
1203 		goto bail;
1204 
1205 	/* we don't implement logging of alignment exceptions */
1206 	if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS))
1207 		fixed = fix_alignment(regs);
1208 
1209 	if (fixed == 1) {
1210 		regs->nip += 4;	/* skip over emulated instruction */
1211 		emulate_single_step(regs);
1212 		goto bail;
1213 	}
1214 
1215 	/* Operand address was bad */
1216 	if (fixed == -EFAULT) {
1217 		sig = SIGSEGV;
1218 		code = SEGV_ACCERR;
1219 	} else {
1220 		sig = SIGBUS;
1221 		code = BUS_ADRALN;
1222 	}
1223 	if (user_mode(regs))
1224 		_exception(sig, regs, code, regs->dar);
1225 	else
1226 		bad_page_fault(regs, regs->dar, sig);
1227 
1228 bail:
1229 	exception_exit(prev_state);
1230 }
1231 
1232 void slb_miss_bad_addr(struct pt_regs *regs)
1233 {
1234 	enum ctx_state prev_state = exception_enter();
1235 
1236 	if (user_mode(regs))
1237 		_exception(SIGSEGV, regs, SEGV_BNDERR, regs->dar);
1238 	else
1239 		bad_page_fault(regs, regs->dar, SIGSEGV);
1240 
1241 	exception_exit(prev_state);
1242 }
1243 
1244 void StackOverflow(struct pt_regs *regs)
1245 {
1246 	printk(KERN_CRIT "Kernel stack overflow in process %p, r1=%lx\n",
1247 	       current, regs->gpr[1]);
1248 	debugger(regs);
1249 	show_regs(regs);
1250 	panic("kernel stack overflow");
1251 }
1252 
1253 void nonrecoverable_exception(struct pt_regs *regs)
1254 {
1255 	printk(KERN_ERR "Non-recoverable exception at PC=%lx MSR=%lx\n",
1256 	       regs->nip, regs->msr);
1257 	debugger(regs);
1258 	die("nonrecoverable exception", regs, SIGKILL);
1259 }
1260 
1261 void kernel_fp_unavailable_exception(struct pt_regs *regs)
1262 {
1263 	enum ctx_state prev_state = exception_enter();
1264 
1265 	printk(KERN_EMERG "Unrecoverable FP Unavailable Exception "
1266 			  "%lx at %lx\n", regs->trap, regs->nip);
1267 	die("Unrecoverable FP Unavailable Exception", regs, SIGABRT);
1268 
1269 	exception_exit(prev_state);
1270 }
1271 
1272 void altivec_unavailable_exception(struct pt_regs *regs)
1273 {
1274 	enum ctx_state prev_state = exception_enter();
1275 
1276 	if (user_mode(regs)) {
1277 		/* A user program has executed an altivec instruction,
1278 		   but this kernel doesn't support altivec. */
1279 		_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1280 		goto bail;
1281 	}
1282 
1283 	printk(KERN_EMERG "Unrecoverable VMX/Altivec Unavailable Exception "
1284 			"%lx at %lx\n", regs->trap, regs->nip);
1285 	die("Unrecoverable VMX/Altivec Unavailable Exception", regs, SIGABRT);
1286 
1287 bail:
1288 	exception_exit(prev_state);
1289 }
1290 
1291 void vsx_unavailable_exception(struct pt_regs *regs)
1292 {
1293 	if (user_mode(regs)) {
1294 		/* A user program has executed an vsx instruction,
1295 		   but this kernel doesn't support vsx. */
1296 		_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1297 		return;
1298 	}
1299 
1300 	printk(KERN_EMERG "Unrecoverable VSX Unavailable Exception "
1301 			"%lx at %lx\n", regs->trap, regs->nip);
1302 	die("Unrecoverable VSX Unavailable Exception", regs, SIGABRT);
1303 }
1304 
1305 #ifdef CONFIG_PPC64
1306 static void tm_unavailable(struct pt_regs *regs)
1307 {
1308 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1309 	if (user_mode(regs)) {
1310 		current->thread.load_tm++;
1311 		regs->msr |= MSR_TM;
1312 		tm_enable();
1313 		tm_restore_sprs(&current->thread);
1314 		return;
1315 	}
1316 #endif
1317 	pr_emerg("Unrecoverable TM Unavailable Exception "
1318 			"%lx at %lx\n", regs->trap, regs->nip);
1319 	die("Unrecoverable TM Unavailable Exception", regs, SIGABRT);
1320 }
1321 
1322 void facility_unavailable_exception(struct pt_regs *regs)
1323 {
1324 	static char *facility_strings[] = {
1325 		[FSCR_FP_LG] = "FPU",
1326 		[FSCR_VECVSX_LG] = "VMX/VSX",
1327 		[FSCR_DSCR_LG] = "DSCR",
1328 		[FSCR_PM_LG] = "PMU SPRs",
1329 		[FSCR_BHRB_LG] = "BHRB",
1330 		[FSCR_TM_LG] = "TM",
1331 		[FSCR_EBB_LG] = "EBB",
1332 		[FSCR_TAR_LG] = "TAR",
1333 		[FSCR_MSGP_LG] = "MSGP",
1334 		[FSCR_SCV_LG] = "SCV",
1335 	};
1336 	char *facility = "unknown";
1337 	u64 value;
1338 	u32 instword, rd;
1339 	u8 status;
1340 	bool hv;
1341 
1342 	hv = (regs->trap == 0xf80);
1343 	if (hv)
1344 		value = mfspr(SPRN_HFSCR);
1345 	else
1346 		value = mfspr(SPRN_FSCR);
1347 
1348 	status = value >> 56;
1349 	if (status == FSCR_DSCR_LG) {
1350 		/*
1351 		 * User is accessing the DSCR register using the problem
1352 		 * state only SPR number (0x03) either through a mfspr or
1353 		 * a mtspr instruction. If it is a write attempt through
1354 		 * a mtspr, then we set the inherit bit. This also allows
1355 		 * the user to write or read the register directly in the
1356 		 * future by setting via the FSCR DSCR bit. But in case it
1357 		 * is a read DSCR attempt through a mfspr instruction, we
1358 		 * just emulate the instruction instead. This code path will
1359 		 * always emulate all the mfspr instructions till the user
1360 		 * has attempted at least one mtspr instruction. This way it
1361 		 * preserves the same behaviour when the user is accessing
1362 		 * the DSCR through privilege level only SPR number (0x11)
1363 		 * which is emulated through illegal instruction exception.
1364 		 * We always leave HFSCR DSCR set.
1365 		 */
1366 		if (get_user(instword, (u32 __user *)(regs->nip))) {
1367 			pr_err("Failed to fetch the user instruction\n");
1368 			return;
1369 		}
1370 
1371 		/* Write into DSCR (mtspr 0x03, RS) */
1372 		if ((instword & PPC_INST_MTSPR_DSCR_USER_MASK)
1373 				== PPC_INST_MTSPR_DSCR_USER) {
1374 			rd = (instword >> 21) & 0x1f;
1375 			current->thread.dscr = regs->gpr[rd];
1376 			current->thread.dscr_inherit = 1;
1377 			current->thread.fscr |= FSCR_DSCR;
1378 			mtspr(SPRN_FSCR, current->thread.fscr);
1379 		}
1380 
1381 		/* Read from DSCR (mfspr RT, 0x03) */
1382 		if ((instword & PPC_INST_MFSPR_DSCR_USER_MASK)
1383 				== PPC_INST_MFSPR_DSCR_USER) {
1384 			if (emulate_instruction(regs)) {
1385 				pr_err("DSCR based mfspr emulation failed\n");
1386 				return;
1387 			}
1388 			regs->nip += 4;
1389 			emulate_single_step(regs);
1390 		}
1391 		return;
1392 	}
1393 
1394 	if (status == FSCR_TM_LG) {
1395 		/*
1396 		 * If we're here then the hardware is TM aware because it
1397 		 * generated an exception with FSRM_TM set.
1398 		 *
1399 		 * If cpu_has_feature(CPU_FTR_TM) is false, then either firmware
1400 		 * told us not to do TM, or the kernel is not built with TM
1401 		 * support.
1402 		 *
1403 		 * If both of those things are true, then userspace can spam the
1404 		 * console by triggering the printk() below just by continually
1405 		 * doing tbegin (or any TM instruction). So in that case just
1406 		 * send the process a SIGILL immediately.
1407 		 */
1408 		if (!cpu_has_feature(CPU_FTR_TM))
1409 			goto out;
1410 
1411 		tm_unavailable(regs);
1412 		return;
1413 	}
1414 
1415 	if ((hv || status >= 2) &&
1416 	    (status < ARRAY_SIZE(facility_strings)) &&
1417 	    facility_strings[status])
1418 		facility = facility_strings[status];
1419 
1420 	/* We restore the interrupt state now */
1421 	if (!arch_irq_disabled_regs(regs))
1422 		local_irq_enable();
1423 
1424 	pr_err_ratelimited("%sFacility '%s' unavailable (%d), exception at 0x%lx, MSR=%lx\n",
1425 		hv ? "Hypervisor " : "", facility, status, regs->nip, regs->msr);
1426 
1427 out:
1428 	if (user_mode(regs)) {
1429 		_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1430 		return;
1431 	}
1432 
1433 	die("Unexpected facility unavailable exception", regs, SIGABRT);
1434 }
1435 #endif
1436 
1437 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1438 
1439 void fp_unavailable_tm(struct pt_regs *regs)
1440 {
1441 	/* Note:  This does not handle any kind of FP laziness. */
1442 
1443 	TM_DEBUG("FP Unavailable trap whilst transactional at 0x%lx, MSR=%lx\n",
1444 		 regs->nip, regs->msr);
1445 
1446         /* We can only have got here if the task started using FP after
1447          * beginning the transaction.  So, the transactional regs are just a
1448          * copy of the checkpointed ones.  But, we still need to recheckpoint
1449          * as we're enabling FP for the process; it will return, abort the
1450          * transaction, and probably retry but now with FP enabled.  So the
1451          * checkpointed FP registers need to be loaded.
1452 	 */
1453 	tm_reclaim_current(TM_CAUSE_FAC_UNAV);
1454 	/* Reclaim didn't save out any FPRs to transact_fprs. */
1455 
1456 	/* Enable FP for the task: */
1457 	regs->msr |= (MSR_FP | current->thread.fpexc_mode);
1458 
1459 	/* This loads and recheckpoints the FP registers from
1460 	 * thread.fpr[].  They will remain in registers after the
1461 	 * checkpoint so we don't need to reload them after.
1462 	 * If VMX is in use, the VRs now hold checkpointed values,
1463 	 * so we don't want to load the VRs from the thread_struct.
1464 	 */
1465 	tm_recheckpoint(&current->thread, MSR_FP);
1466 
1467 	/* If VMX is in use, get the transactional values back */
1468 	if (regs->msr & MSR_VEC) {
1469 		msr_check_and_set(MSR_VEC);
1470 		load_vr_state(&current->thread.vr_state);
1471 		/* At this point all the VSX state is loaded, so enable it */
1472 		regs->msr |= MSR_VSX;
1473 	}
1474 }
1475 
1476 void altivec_unavailable_tm(struct pt_regs *regs)
1477 {
1478 	/* See the comments in fp_unavailable_tm().  This function operates
1479 	 * the same way.
1480 	 */
1481 
1482 	TM_DEBUG("Vector Unavailable trap whilst transactional at 0x%lx,"
1483 		 "MSR=%lx\n",
1484 		 regs->nip, regs->msr);
1485 	tm_reclaim_current(TM_CAUSE_FAC_UNAV);
1486 	regs->msr |= MSR_VEC;
1487 	tm_recheckpoint(&current->thread, MSR_VEC);
1488 	current->thread.used_vr = 1;
1489 
1490 	if (regs->msr & MSR_FP) {
1491 		msr_check_and_set(MSR_FP);
1492 		load_fp_state(&current->thread.fp_state);
1493 		regs->msr |= MSR_VSX;
1494 	}
1495 }
1496 
1497 void vsx_unavailable_tm(struct pt_regs *regs)
1498 {
1499 	unsigned long orig_msr = regs->msr;
1500 
1501 	/* See the comments in fp_unavailable_tm().  This works similarly,
1502 	 * though we're loading both FP and VEC registers in here.
1503 	 *
1504 	 * If FP isn't in use, load FP regs.  If VEC isn't in use, load VEC
1505 	 * regs.  Either way, set MSR_VSX.
1506 	 */
1507 
1508 	TM_DEBUG("VSX Unavailable trap whilst transactional at 0x%lx,"
1509 		 "MSR=%lx\n",
1510 		 regs->nip, regs->msr);
1511 
1512 	current->thread.used_vsr = 1;
1513 
1514 	/* If FP and VMX are already loaded, we have all the state we need */
1515 	if ((orig_msr & (MSR_FP | MSR_VEC)) == (MSR_FP | MSR_VEC)) {
1516 		regs->msr |= MSR_VSX;
1517 		return;
1518 	}
1519 
1520 	/* This reclaims FP and/or VR regs if they're already enabled */
1521 	tm_reclaim_current(TM_CAUSE_FAC_UNAV);
1522 
1523 	regs->msr |= MSR_VEC | MSR_FP | current->thread.fpexc_mode |
1524 		MSR_VSX;
1525 
1526 	/* This loads & recheckpoints FP and VRs; but we have
1527 	 * to be sure not to overwrite previously-valid state.
1528 	 */
1529 	tm_recheckpoint(&current->thread, regs->msr & ~orig_msr);
1530 
1531 	msr_check_and_set(orig_msr & (MSR_FP | MSR_VEC));
1532 
1533 	if (orig_msr & MSR_FP)
1534 		load_fp_state(&current->thread.fp_state);
1535 	if (orig_msr & MSR_VEC)
1536 		load_vr_state(&current->thread.vr_state);
1537 }
1538 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
1539 
1540 void performance_monitor_exception(struct pt_regs *regs)
1541 {
1542 	__this_cpu_inc(irq_stat.pmu_irqs);
1543 
1544 	perf_irq(regs);
1545 }
1546 
1547 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1548 static void handle_debug(struct pt_regs *regs, unsigned long debug_status)
1549 {
1550 	int changed = 0;
1551 	/*
1552 	 * Determine the cause of the debug event, clear the
1553 	 * event flags and send a trap to the handler. Torez
1554 	 */
1555 	if (debug_status & (DBSR_DAC1R | DBSR_DAC1W)) {
1556 		dbcr_dac(current) &= ~(DBCR_DAC1R | DBCR_DAC1W);
1557 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
1558 		current->thread.debug.dbcr2 &= ~DBCR2_DAC12MODE;
1559 #endif
1560 		do_send_trap(regs, mfspr(SPRN_DAC1), debug_status, TRAP_HWBKPT,
1561 			     5);
1562 		changed |= 0x01;
1563 	}  else if (debug_status & (DBSR_DAC2R | DBSR_DAC2W)) {
1564 		dbcr_dac(current) &= ~(DBCR_DAC2R | DBCR_DAC2W);
1565 		do_send_trap(regs, mfspr(SPRN_DAC2), debug_status, TRAP_HWBKPT,
1566 			     6);
1567 		changed |= 0x01;
1568 	}  else if (debug_status & DBSR_IAC1) {
1569 		current->thread.debug.dbcr0 &= ~DBCR0_IAC1;
1570 		dbcr_iac_range(current) &= ~DBCR_IAC12MODE;
1571 		do_send_trap(regs, mfspr(SPRN_IAC1), debug_status, TRAP_HWBKPT,
1572 			     1);
1573 		changed |= 0x01;
1574 	}  else if (debug_status & DBSR_IAC2) {
1575 		current->thread.debug.dbcr0 &= ~DBCR0_IAC2;
1576 		do_send_trap(regs, mfspr(SPRN_IAC2), debug_status, TRAP_HWBKPT,
1577 			     2);
1578 		changed |= 0x01;
1579 	}  else if (debug_status & DBSR_IAC3) {
1580 		current->thread.debug.dbcr0 &= ~DBCR0_IAC3;
1581 		dbcr_iac_range(current) &= ~DBCR_IAC34MODE;
1582 		do_send_trap(regs, mfspr(SPRN_IAC3), debug_status, TRAP_HWBKPT,
1583 			     3);
1584 		changed |= 0x01;
1585 	}  else if (debug_status & DBSR_IAC4) {
1586 		current->thread.debug.dbcr0 &= ~DBCR0_IAC4;
1587 		do_send_trap(regs, mfspr(SPRN_IAC4), debug_status, TRAP_HWBKPT,
1588 			     4);
1589 		changed |= 0x01;
1590 	}
1591 	/*
1592 	 * At the point this routine was called, the MSR(DE) was turned off.
1593 	 * Check all other debug flags and see if that bit needs to be turned
1594 	 * back on or not.
1595 	 */
1596 	if (DBCR_ACTIVE_EVENTS(current->thread.debug.dbcr0,
1597 			       current->thread.debug.dbcr1))
1598 		regs->msr |= MSR_DE;
1599 	else
1600 		/* Make sure the IDM flag is off */
1601 		current->thread.debug.dbcr0 &= ~DBCR0_IDM;
1602 
1603 	if (changed & 0x01)
1604 		mtspr(SPRN_DBCR0, current->thread.debug.dbcr0);
1605 }
1606 
1607 void DebugException(struct pt_regs *regs, unsigned long debug_status)
1608 {
1609 	current->thread.debug.dbsr = debug_status;
1610 
1611 	/* Hack alert: On BookE, Branch Taken stops on the branch itself, while
1612 	 * on server, it stops on the target of the branch. In order to simulate
1613 	 * the server behaviour, we thus restart right away with a single step
1614 	 * instead of stopping here when hitting a BT
1615 	 */
1616 	if (debug_status & DBSR_BT) {
1617 		regs->msr &= ~MSR_DE;
1618 
1619 		/* Disable BT */
1620 		mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_BT);
1621 		/* Clear the BT event */
1622 		mtspr(SPRN_DBSR, DBSR_BT);
1623 
1624 		/* Do the single step trick only when coming from userspace */
1625 		if (user_mode(regs)) {
1626 			current->thread.debug.dbcr0 &= ~DBCR0_BT;
1627 			current->thread.debug.dbcr0 |= DBCR0_IDM | DBCR0_IC;
1628 			regs->msr |= MSR_DE;
1629 			return;
1630 		}
1631 
1632 		if (kprobe_post_handler(regs))
1633 			return;
1634 
1635 		if (notify_die(DIE_SSTEP, "block_step", regs, 5,
1636 			       5, SIGTRAP) == NOTIFY_STOP) {
1637 			return;
1638 		}
1639 		if (debugger_sstep(regs))
1640 			return;
1641 	} else if (debug_status & DBSR_IC) { 	/* Instruction complete */
1642 		regs->msr &= ~MSR_DE;
1643 
1644 		/* Disable instruction completion */
1645 		mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_IC);
1646 		/* Clear the instruction completion event */
1647 		mtspr(SPRN_DBSR, DBSR_IC);
1648 
1649 		if (kprobe_post_handler(regs))
1650 			return;
1651 
1652 		if (notify_die(DIE_SSTEP, "single_step", regs, 5,
1653 			       5, SIGTRAP) == NOTIFY_STOP) {
1654 			return;
1655 		}
1656 
1657 		if (debugger_sstep(regs))
1658 			return;
1659 
1660 		if (user_mode(regs)) {
1661 			current->thread.debug.dbcr0 &= ~DBCR0_IC;
1662 			if (DBCR_ACTIVE_EVENTS(current->thread.debug.dbcr0,
1663 					       current->thread.debug.dbcr1))
1664 				regs->msr |= MSR_DE;
1665 			else
1666 				/* Make sure the IDM bit is off */
1667 				current->thread.debug.dbcr0 &= ~DBCR0_IDM;
1668 		}
1669 
1670 		_exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
1671 	} else
1672 		handle_debug(regs, debug_status);
1673 }
1674 NOKPROBE_SYMBOL(DebugException);
1675 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
1676 
1677 #if !defined(CONFIG_TAU_INT)
1678 void TAUException(struct pt_regs *regs)
1679 {
1680 	printk("TAU trap at PC: %lx, MSR: %lx, vector=%lx    %s\n",
1681 	       regs->nip, regs->msr, regs->trap, print_tainted());
1682 }
1683 #endif /* CONFIG_INT_TAU */
1684 
1685 #ifdef CONFIG_ALTIVEC
1686 void altivec_assist_exception(struct pt_regs *regs)
1687 {
1688 	int err;
1689 
1690 	if (!user_mode(regs)) {
1691 		printk(KERN_EMERG "VMX/Altivec assist exception in kernel mode"
1692 		       " at %lx\n", regs->nip);
1693 		die("Kernel VMX/Altivec assist exception", regs, SIGILL);
1694 	}
1695 
1696 	flush_altivec_to_thread(current);
1697 
1698 	PPC_WARN_EMULATED(altivec, regs);
1699 	err = emulate_altivec(regs);
1700 	if (err == 0) {
1701 		regs->nip += 4;		/* skip emulated instruction */
1702 		emulate_single_step(regs);
1703 		return;
1704 	}
1705 
1706 	if (err == -EFAULT) {
1707 		/* got an error reading the instruction */
1708 		_exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
1709 	} else {
1710 		/* didn't recognize the instruction */
1711 		/* XXX quick hack for now: set the non-Java bit in the VSCR */
1712 		printk_ratelimited(KERN_ERR "Unrecognized altivec instruction "
1713 				   "in %s at %lx\n", current->comm, regs->nip);
1714 		current->thread.vr_state.vscr.u[3] |= 0x10000;
1715 	}
1716 }
1717 #endif /* CONFIG_ALTIVEC */
1718 
1719 #ifdef CONFIG_FSL_BOOKE
1720 void CacheLockingException(struct pt_regs *regs, unsigned long address,
1721 			   unsigned long error_code)
1722 {
1723 	/* We treat cache locking instructions from the user
1724 	 * as priv ops, in the future we could try to do
1725 	 * something smarter
1726 	 */
1727 	if (error_code & (ESR_DLK|ESR_ILK))
1728 		_exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
1729 	return;
1730 }
1731 #endif /* CONFIG_FSL_BOOKE */
1732 
1733 #ifdef CONFIG_SPE
1734 void SPEFloatingPointException(struct pt_regs *regs)
1735 {
1736 	extern int do_spe_mathemu(struct pt_regs *regs);
1737 	unsigned long spefscr;
1738 	int fpexc_mode;
1739 	int code = 0;
1740 	int err;
1741 
1742 	flush_spe_to_thread(current);
1743 
1744 	spefscr = current->thread.spefscr;
1745 	fpexc_mode = current->thread.fpexc_mode;
1746 
1747 	if ((spefscr & SPEFSCR_FOVF) && (fpexc_mode & PR_FP_EXC_OVF)) {
1748 		code = FPE_FLTOVF;
1749 	}
1750 	else if ((spefscr & SPEFSCR_FUNF) && (fpexc_mode & PR_FP_EXC_UND)) {
1751 		code = FPE_FLTUND;
1752 	}
1753 	else if ((spefscr & SPEFSCR_FDBZ) && (fpexc_mode & PR_FP_EXC_DIV))
1754 		code = FPE_FLTDIV;
1755 	else if ((spefscr & SPEFSCR_FINV) && (fpexc_mode & PR_FP_EXC_INV)) {
1756 		code = FPE_FLTINV;
1757 	}
1758 	else if ((spefscr & (SPEFSCR_FG | SPEFSCR_FX)) && (fpexc_mode & PR_FP_EXC_RES))
1759 		code = FPE_FLTRES;
1760 
1761 	err = do_spe_mathemu(regs);
1762 	if (err == 0) {
1763 		regs->nip += 4;		/* skip emulated instruction */
1764 		emulate_single_step(regs);
1765 		return;
1766 	}
1767 
1768 	if (err == -EFAULT) {
1769 		/* got an error reading the instruction */
1770 		_exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
1771 	} else if (err == -EINVAL) {
1772 		/* didn't recognize the instruction */
1773 		printk(KERN_ERR "unrecognized spe instruction "
1774 		       "in %s at %lx\n", current->comm, regs->nip);
1775 	} else {
1776 		_exception(SIGFPE, regs, code, regs->nip);
1777 	}
1778 
1779 	return;
1780 }
1781 
1782 void SPEFloatingPointRoundException(struct pt_regs *regs)
1783 {
1784 	extern int speround_handler(struct pt_regs *regs);
1785 	int err;
1786 
1787 	preempt_disable();
1788 	if (regs->msr & MSR_SPE)
1789 		giveup_spe(current);
1790 	preempt_enable();
1791 
1792 	regs->nip -= 4;
1793 	err = speround_handler(regs);
1794 	if (err == 0) {
1795 		regs->nip += 4;		/* skip emulated instruction */
1796 		emulate_single_step(regs);
1797 		return;
1798 	}
1799 
1800 	if (err == -EFAULT) {
1801 		/* got an error reading the instruction */
1802 		_exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
1803 	} else if (err == -EINVAL) {
1804 		/* didn't recognize the instruction */
1805 		printk(KERN_ERR "unrecognized spe instruction "
1806 		       "in %s at %lx\n", current->comm, regs->nip);
1807 	} else {
1808 		_exception(SIGFPE, regs, 0, regs->nip);
1809 		return;
1810 	}
1811 }
1812 #endif
1813 
1814 /*
1815  * We enter here if we get an unrecoverable exception, that is, one
1816  * that happened at a point where the RI (recoverable interrupt) bit
1817  * in the MSR is 0.  This indicates that SRR0/1 are live, and that
1818  * we therefore lost state by taking this exception.
1819  */
1820 void unrecoverable_exception(struct pt_regs *regs)
1821 {
1822 	printk(KERN_EMERG "Unrecoverable exception %lx at %lx\n",
1823 	       regs->trap, regs->nip);
1824 	die("Unrecoverable exception", regs, SIGABRT);
1825 }
1826 NOKPROBE_SYMBOL(unrecoverable_exception);
1827 
1828 #if defined(CONFIG_BOOKE_WDT) || defined(CONFIG_40x)
1829 /*
1830  * Default handler for a Watchdog exception,
1831  * spins until a reboot occurs
1832  */
1833 void __attribute__ ((weak)) WatchdogHandler(struct pt_regs *regs)
1834 {
1835 	/* Generic WatchdogHandler, implement your own */
1836 	mtspr(SPRN_TCR, mfspr(SPRN_TCR)&(~TCR_WIE));
1837 	return;
1838 }
1839 
1840 void WatchdogException(struct pt_regs *regs)
1841 {
1842 	printk (KERN_EMERG "PowerPC Book-E Watchdog Exception\n");
1843 	WatchdogHandler(regs);
1844 }
1845 #endif
1846 
1847 /*
1848  * We enter here if we discover during exception entry that we are
1849  * running in supervisor mode with a userspace value in the stack pointer.
1850  */
1851 void kernel_bad_stack(struct pt_regs *regs)
1852 {
1853 	printk(KERN_EMERG "Bad kernel stack pointer %lx at %lx\n",
1854 	       regs->gpr[1], regs->nip);
1855 	die("Bad kernel stack pointer", regs, SIGABRT);
1856 }
1857 NOKPROBE_SYMBOL(kernel_bad_stack);
1858 
1859 void __init trap_init(void)
1860 {
1861 }
1862 
1863 
1864 #ifdef CONFIG_PPC_EMULATED_STATS
1865 
1866 #define WARN_EMULATED_SETUP(type)	.type = { .name = #type }
1867 
1868 struct ppc_emulated ppc_emulated = {
1869 #ifdef CONFIG_ALTIVEC
1870 	WARN_EMULATED_SETUP(altivec),
1871 #endif
1872 	WARN_EMULATED_SETUP(dcba),
1873 	WARN_EMULATED_SETUP(dcbz),
1874 	WARN_EMULATED_SETUP(fp_pair),
1875 	WARN_EMULATED_SETUP(isel),
1876 	WARN_EMULATED_SETUP(mcrxr),
1877 	WARN_EMULATED_SETUP(mfpvr),
1878 	WARN_EMULATED_SETUP(multiple),
1879 	WARN_EMULATED_SETUP(popcntb),
1880 	WARN_EMULATED_SETUP(spe),
1881 	WARN_EMULATED_SETUP(string),
1882 	WARN_EMULATED_SETUP(sync),
1883 	WARN_EMULATED_SETUP(unaligned),
1884 #ifdef CONFIG_MATH_EMULATION
1885 	WARN_EMULATED_SETUP(math),
1886 #endif
1887 #ifdef CONFIG_VSX
1888 	WARN_EMULATED_SETUP(vsx),
1889 #endif
1890 #ifdef CONFIG_PPC64
1891 	WARN_EMULATED_SETUP(mfdscr),
1892 	WARN_EMULATED_SETUP(mtdscr),
1893 	WARN_EMULATED_SETUP(lq_stq),
1894 #endif
1895 };
1896 
1897 u32 ppc_warn_emulated;
1898 
1899 void ppc_warn_emulated_print(const char *type)
1900 {
1901 	pr_warn_ratelimited("%s used emulated %s instruction\n", current->comm,
1902 			    type);
1903 }
1904 
1905 static int __init ppc_warn_emulated_init(void)
1906 {
1907 	struct dentry *dir, *d;
1908 	unsigned int i;
1909 	struct ppc_emulated_entry *entries = (void *)&ppc_emulated;
1910 
1911 	if (!powerpc_debugfs_root)
1912 		return -ENODEV;
1913 
1914 	dir = debugfs_create_dir("emulated_instructions",
1915 				 powerpc_debugfs_root);
1916 	if (!dir)
1917 		return -ENOMEM;
1918 
1919 	d = debugfs_create_u32("do_warn", S_IRUGO | S_IWUSR, dir,
1920 			       &ppc_warn_emulated);
1921 	if (!d)
1922 		goto fail;
1923 
1924 	for (i = 0; i < sizeof(ppc_emulated)/sizeof(*entries); i++) {
1925 		d = debugfs_create_u32(entries[i].name, S_IRUGO | S_IWUSR, dir,
1926 				       (u32 *)&entries[i].val.counter);
1927 		if (!d)
1928 			goto fail;
1929 	}
1930 
1931 	return 0;
1932 
1933 fail:
1934 	debugfs_remove_recursive(dir);
1935 	return -ENOMEM;
1936 }
1937 
1938 device_initcall(ppc_warn_emulated_init);
1939 
1940 #endif /* CONFIG_PPC_EMULATED_STATS */
1941