12874c5fdSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
214cf11afSPaul Mackerras /*
314cf11afSPaul Mackerras * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4fe04b112SScott Wood * Copyright 2007-2010 Freescale Semiconductor, Inc.
514cf11afSPaul Mackerras *
614cf11afSPaul Mackerras * Modified by Cort Dougan (cort@cs.nmt.edu)
714cf11afSPaul Mackerras * and Paul Mackerras (paulus@samba.org)
814cf11afSPaul Mackerras */
914cf11afSPaul Mackerras
1014cf11afSPaul Mackerras /*
1114cf11afSPaul Mackerras * This file handles the architecture-dependent parts of hardware exceptions
1214cf11afSPaul Mackerras */
1314cf11afSPaul Mackerras
1414cf11afSPaul Mackerras #include <linux/errno.h>
1514cf11afSPaul Mackerras #include <linux/sched.h>
16b17b0153SIngo Molnar #include <linux/sched/debug.h>
1714cf11afSPaul Mackerras #include <linux/kernel.h>
1814cf11afSPaul Mackerras #include <linux/mm.h>
1999cd1302SRam Pai #include <linux/pkeys.h>
2014cf11afSPaul Mackerras #include <linux/stddef.h>
2114cf11afSPaul Mackerras #include <linux/unistd.h>
228dad3f92SPaul Mackerras #include <linux/ptrace.h>
2314cf11afSPaul Mackerras #include <linux/user.h>
2414cf11afSPaul Mackerras #include <linux/interrupt.h>
2514cf11afSPaul Mackerras #include <linux/init.h>
268a39b05fSPaul Gortmaker #include <linux/extable.h>
278a39b05fSPaul Gortmaker #include <linux/module.h> /* print_modules */
288dad3f92SPaul Mackerras #include <linux/prctl.h>
2914cf11afSPaul Mackerras #include <linux/delay.h>
3014cf11afSPaul Mackerras #include <linux/kprobes.h>
31cc532915SMichael Ellerman #include <linux/kexec.h>
325474c120SMichael Hanselmann #include <linux/backlight.h>
3373c9ceabSJeremy Fitzhardinge #include <linux/bug.h>
341eeb66a1SChristoph Hellwig #include <linux/kdebug.h>
3576462232SChristian Dietrich #include <linux/ratelimit.h>
36ba12eedeSLi Zhong #include <linux/context_tracking.h>
375080332cSMichael Neuling #include <linux/smp.h>
3835adacd6SNicholas Piggin #include <linux/console.h>
3935adacd6SNicholas Piggin #include <linux/kmsg_dump.h>
40dbf77fedSAneesh Kumar K.V #include <linux/debugfs.h>
4114cf11afSPaul Mackerras
4280947e7cSGeert Uytterhoeven #include <asm/emulated_ops.h>
437c0f6ba6SLinus Torvalds #include <linux/uaccess.h>
443a96570fSNicholas Piggin #include <asm/interrupt.h>
4514cf11afSPaul Mackerras #include <asm/io.h>
4686417780SPaul Mackerras #include <asm/machdep.h>
4786417780SPaul Mackerras #include <asm/rtas.h>
48f7f6f4feSDavid Gibson #include <asm/pmc.h>
4914cf11afSPaul Mackerras #include <asm/reg.h>
5014cf11afSPaul Mackerras #ifdef CONFIG_PMAC_BACKLIGHT
5114cf11afSPaul Mackerras #include <asm/backlight.h>
5214cf11afSPaul Mackerras #endif
53dc1c1ca3SStephen Rothwell #ifdef CONFIG_PPC64
5486417780SPaul Mackerras #include <asm/firmware.h>
55dc1c1ca3SStephen Rothwell #include <asm/processor.h>
56dc1c1ca3SStephen Rothwell #endif
57c0ce7d08SDavid Wilder #include <asm/kexec.h>
5816c57b36SKumar Gala #include <asm/ppc-opcode.h>
59cce1f106SShaohui Xie #include <asm/rio.h>
60ebaeb5aeSMahesh Salgaonkar #include <asm/fadump.h>
61ae3a197eSDavid Howells #include <asm/switch_to.h>
62f54db641SMichael Neuling #include <asm/tm.h>
63ae3a197eSDavid Howells #include <asm/debug.h>
6442f5b4caSDaniel Axtens #include <asm/asm-prototypes.h>
65fd7bacbcSMahesh Salgaonkar #include <asm/hmi.h>
664e0e3435SHongtao Jia #include <sysdev/fsl_pci.h>
676cc89badSNaveen N. Rao #include <asm/kprobes.h>
68a99b9c5eSMurilo Opsfelder Araujo #include <asm/stacktrace.h>
69de3c83c2SMathieu Malaterre #include <asm/nmi.h>
70deefd0aeSChristophe Leroy #include <asm/disassemble.h>
712f5182cfSNicholas Piggin #include <asm/udbg.h>
72dc1c1ca3SStephen Rothwell
73da665885SThiago Jung Bauermann #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC_CORE)
745be3492fSAnton Blanchard int (*__debugger)(struct pt_regs *regs) __read_mostly;
755be3492fSAnton Blanchard int (*__debugger_ipi)(struct pt_regs *regs) __read_mostly;
765be3492fSAnton Blanchard int (*__debugger_bpt)(struct pt_regs *regs) __read_mostly;
775be3492fSAnton Blanchard int (*__debugger_sstep)(struct pt_regs *regs) __read_mostly;
785be3492fSAnton Blanchard int (*__debugger_iabr_match)(struct pt_regs *regs) __read_mostly;
799422de3eSMichael Neuling int (*__debugger_break_match)(struct pt_regs *regs) __read_mostly;
805be3492fSAnton Blanchard int (*__debugger_fault_handler)(struct pt_regs *regs) __read_mostly;
8114cf11afSPaul Mackerras
8214cf11afSPaul Mackerras EXPORT_SYMBOL(__debugger);
8314cf11afSPaul Mackerras EXPORT_SYMBOL(__debugger_ipi);
8414cf11afSPaul Mackerras EXPORT_SYMBOL(__debugger_bpt);
8514cf11afSPaul Mackerras EXPORT_SYMBOL(__debugger_sstep);
8614cf11afSPaul Mackerras EXPORT_SYMBOL(__debugger_iabr_match);
879422de3eSMichael Neuling EXPORT_SYMBOL(__debugger_break_match);
8814cf11afSPaul Mackerras EXPORT_SYMBOL(__debugger_fault_handler);
8914cf11afSPaul Mackerras #endif
9014cf11afSPaul Mackerras
918b3c34cfSMichael Neuling /* Transactional Memory trap debug */
928b3c34cfSMichael Neuling #ifdef TM_DEBUG_SW
938b3c34cfSMichael Neuling #define TM_DEBUG(x...) printk(KERN_INFO x)
948b3c34cfSMichael Neuling #else
958b3c34cfSMichael Neuling #define TM_DEBUG(x...) do { } while(0)
968b3c34cfSMichael Neuling #endif
978b3c34cfSMichael Neuling
signame(int signr)980f642d61SMurilo Opsfelder Araujo static const char *signame(int signr)
990f642d61SMurilo Opsfelder Araujo {
1000f642d61SMurilo Opsfelder Araujo switch (signr) {
1010f642d61SMurilo Opsfelder Araujo case SIGBUS: return "bus error";
1020f642d61SMurilo Opsfelder Araujo case SIGFPE: return "floating point exception";
1030f642d61SMurilo Opsfelder Araujo case SIGILL: return "illegal instruction";
1040f642d61SMurilo Opsfelder Araujo case SIGSEGV: return "segfault";
1050f642d61SMurilo Opsfelder Araujo case SIGTRAP: return "unhandled trap";
1060f642d61SMurilo Opsfelder Araujo }
1070f642d61SMurilo Opsfelder Araujo
1080f642d61SMurilo Opsfelder Araujo return "unknown signal";
1090f642d61SMurilo Opsfelder Araujo }
1100f642d61SMurilo Opsfelder Araujo
11114cf11afSPaul Mackerras /*
11214cf11afSPaul Mackerras * Trap & Exception support
11314cf11afSPaul Mackerras */
11414cf11afSPaul Mackerras
1156031d9d9Santon@samba.org #ifdef CONFIG_PMAC_BACKLIGHT
pmac_backlight_unblank(void)1166031d9d9Santon@samba.org static void pmac_backlight_unblank(void)
1176031d9d9Santon@samba.org {
1186031d9d9Santon@samba.org mutex_lock(&pmac_backlight_mutex);
1196031d9d9Santon@samba.org if (pmac_backlight) {
1206031d9d9Santon@samba.org struct backlight_properties *props;
1216031d9d9Santon@samba.org
1226031d9d9Santon@samba.org props = &pmac_backlight->props;
1236031d9d9Santon@samba.org props->brightness = props->max_brightness;
1246031d9d9Santon@samba.org props->power = FB_BLANK_UNBLANK;
1256031d9d9Santon@samba.org backlight_update_status(pmac_backlight);
1266031d9d9Santon@samba.org }
1276031d9d9Santon@samba.org mutex_unlock(&pmac_backlight_mutex);
1286031d9d9Santon@samba.org }
1296031d9d9Santon@samba.org #else
pmac_backlight_unblank(void)1306031d9d9Santon@samba.org static inline void pmac_backlight_unblank(void) { }
1316031d9d9Santon@samba.org #endif
1326031d9d9Santon@samba.org
1336fcd6baaSNicholas Piggin /*
1346fcd6baaSNicholas Piggin * If oops/die is expected to crash the machine, return true here.
1356fcd6baaSNicholas Piggin *
1366fcd6baaSNicholas Piggin * This should not be expected to be 100% accurate, there may be
1376fcd6baaSNicholas Piggin * notifiers registered or other unexpected conditions that may bring
1386fcd6baaSNicholas Piggin * down the kernel. Or if the current process in the kernel is holding
1396fcd6baaSNicholas Piggin * locks or has other critical state, the kernel may become effectively
1406fcd6baaSNicholas Piggin * unusable anyway.
1416fcd6baaSNicholas Piggin */
die_will_crash(void)1426fcd6baaSNicholas Piggin bool die_will_crash(void)
1436fcd6baaSNicholas Piggin {
1446fcd6baaSNicholas Piggin if (should_fadump_crash())
1456fcd6baaSNicholas Piggin return true;
1466fcd6baaSNicholas Piggin if (kexec_should_crash(current))
1476fcd6baaSNicholas Piggin return true;
1486fcd6baaSNicholas Piggin if (in_interrupt() || panic_on_oops ||
1496fcd6baaSNicholas Piggin !current->pid || is_global_init(current))
1506fcd6baaSNicholas Piggin return true;
1516fcd6baaSNicholas Piggin
1526fcd6baaSNicholas Piggin return false;
1536fcd6baaSNicholas Piggin }
1546fcd6baaSNicholas Piggin
155760ca4dcSAnton Blanchard static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
156760ca4dcSAnton Blanchard static int die_owner = -1;
157760ca4dcSAnton Blanchard static unsigned int die_nest_count;
158c0ce7d08SDavid Wilder static int die_counter;
159760ca4dcSAnton Blanchard
panic_flush_kmsg_start(void)16035adacd6SNicholas Piggin extern void panic_flush_kmsg_start(void)
16135adacd6SNicholas Piggin {
16235adacd6SNicholas Piggin /*
16335adacd6SNicholas Piggin * These are mostly taken from kernel/panic.c, but tries to do
16435adacd6SNicholas Piggin * relatively minimal work. Don't use delay functions (TB may
16535adacd6SNicholas Piggin * be broken), don't crash dump (need to set a firmware log),
16635adacd6SNicholas Piggin * don't run notifiers. We do want to get some information to
16735adacd6SNicholas Piggin * Linux console.
16835adacd6SNicholas Piggin */
16935adacd6SNicholas Piggin console_verbose();
17035adacd6SNicholas Piggin bust_spinlocks(1);
17135adacd6SNicholas Piggin }
17235adacd6SNicholas Piggin
panic_flush_kmsg_end(void)17335adacd6SNicholas Piggin extern void panic_flush_kmsg_end(void)
17435adacd6SNicholas Piggin {
17535adacd6SNicholas Piggin kmsg_dump(KMSG_DUMP_PANIC);
17635adacd6SNicholas Piggin bust_spinlocks(0);
17735adacd6SNicholas Piggin debug_locks_off();
178de6da1e8SFeng Tang console_flush_on_panic(CONSOLE_FLUSH_PENDING);
17935adacd6SNicholas Piggin }
18035adacd6SNicholas Piggin
oops_begin(struct pt_regs * regs)18103465f89SNicholas Piggin static unsigned long oops_begin(struct pt_regs *regs)
182760ca4dcSAnton Blanchard {
183760ca4dcSAnton Blanchard int cpu;
18434c2a14fSanton@samba.org unsigned long flags;
18514cf11afSPaul Mackerras
186293e4688Santon@samba.org oops_enter();
187293e4688Santon@samba.org
188760ca4dcSAnton Blanchard /* racy, but better than risking deadlock. */
189760ca4dcSAnton Blanchard raw_local_irq_save(flags);
190760ca4dcSAnton Blanchard cpu = smp_processor_id();
191760ca4dcSAnton Blanchard if (!arch_spin_trylock(&die_lock)) {
192760ca4dcSAnton Blanchard if (cpu == die_owner)
193760ca4dcSAnton Blanchard /* nested oops. should stop eventually */;
194760ca4dcSAnton Blanchard else
195760ca4dcSAnton Blanchard arch_spin_lock(&die_lock);
196760ca4dcSAnton Blanchard }
197760ca4dcSAnton Blanchard die_nest_count++;
198760ca4dcSAnton Blanchard die_owner = cpu;
19914cf11afSPaul Mackerras console_verbose();
20014cf11afSPaul Mackerras bust_spinlocks(1);
2016031d9d9Santon@samba.org if (machine_is(powermac))
2026031d9d9Santon@samba.org pmac_backlight_unblank();
203760ca4dcSAnton Blanchard return flags;
20434c2a14fSanton@samba.org }
20503465f89SNicholas Piggin NOKPROBE_SYMBOL(oops_begin);
2065474c120SMichael Hanselmann
oops_end(unsigned long flags,struct pt_regs * regs,int signr)20703465f89SNicholas Piggin static void oops_end(unsigned long flags, struct pt_regs *regs,
208760ca4dcSAnton Blanchard int signr)
209760ca4dcSAnton Blanchard {
21014cf11afSPaul Mackerras bust_spinlocks(0);
211373d4d09SRusty Russell add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
212760ca4dcSAnton Blanchard die_nest_count--;
21358154c8cSAnton Blanchard oops_exit();
21458154c8cSAnton Blanchard printk("\n");
2157458e8b2SNicholas Piggin if (!die_nest_count) {
216760ca4dcSAnton Blanchard /* Nest count reaches zero, release the lock. */
2177458e8b2SNicholas Piggin die_owner = -1;
218760ca4dcSAnton Blanchard arch_spin_unlock(&die_lock);
2197458e8b2SNicholas Piggin }
220760ca4dcSAnton Blanchard raw_local_irq_restore(flags);
221cc532915SMichael Ellerman
222d40b6768SNicholas Piggin /*
223d40b6768SNicholas Piggin * system_reset_excption handles debugger, crash dump, panic, for 0x100
224d40b6768SNicholas Piggin */
2257153d4bfSXiongwei Song if (TRAP(regs) == INTERRUPT_SYSTEM_RESET)
226d40b6768SNicholas Piggin return;
227d40b6768SNicholas Piggin
228ebaeb5aeSMahesh Salgaonkar crash_fadump(regs, "die oops");
229ebaeb5aeSMahesh Salgaonkar
2304388c9b3SNicholas Piggin if (kexec_should_crash(current))
231cc532915SMichael Ellerman crash_kexec(regs);
2329b00ac06SAnton Blanchard
233760ca4dcSAnton Blanchard if (!signr)
234760ca4dcSAnton Blanchard return;
235760ca4dcSAnton Blanchard
23658154c8cSAnton Blanchard /*
23758154c8cSAnton Blanchard * While our oops output is serialised by a spinlock, output
23858154c8cSAnton Blanchard * from panic() called below can race and corrupt it. If we
23958154c8cSAnton Blanchard * know we are going to panic, delay for 1 second so we have a
24058154c8cSAnton Blanchard * chance to get clean backtraces from all CPUs that are oopsing.
24158154c8cSAnton Blanchard */
24258154c8cSAnton Blanchard if (in_interrupt() || panic_on_oops || !current->pid ||
24358154c8cSAnton Blanchard is_global_init(current)) {
24458154c8cSAnton Blanchard mdelay(MSEC_PER_SEC);
24558154c8cSAnton Blanchard }
24658154c8cSAnton Blanchard
247cea6a4baSHorms if (panic_on_oops)
248012c437dSHorms panic("Fatal exception");
2490e25498fSEric W. Biederman make_task_dead(signr);
250760ca4dcSAnton Blanchard }
25103465f89SNicholas Piggin NOKPROBE_SYMBOL(oops_end);
252cea6a4baSHorms
get_mmu_str(void)253d7e02f7bSAneesh Kumar K.V static char *get_mmu_str(void)
254d7e02f7bSAneesh Kumar K.V {
255d7e02f7bSAneesh Kumar K.V if (early_radix_enabled())
256d7e02f7bSAneesh Kumar K.V return " MMU=Radix";
257d7e02f7bSAneesh Kumar K.V if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE))
258d7e02f7bSAneesh Kumar K.V return " MMU=Hash";
259d7e02f7bSAneesh Kumar K.V return "";
260d7e02f7bSAneesh Kumar K.V }
261d7e02f7bSAneesh Kumar K.V
__die(const char * str,struct pt_regs * regs,long err)26203465f89SNicholas Piggin static int __die(const char *str, struct pt_regs *regs, long err)
263760ca4dcSAnton Blanchard {
264760ca4dcSAnton Blanchard printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter);
2652e82ca3cSMichael Ellerman
266d7e02f7bSAneesh Kumar K.V printk("%s PAGE_SIZE=%luK%s%s%s%s%s%s %s\n",
26778227443SMichael Ellerman IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN) ? "LE" : "BE",
268d7e02f7bSAneesh Kumar K.V PAGE_SIZE / 1024, get_mmu_str(),
26978227443SMichael Ellerman IS_ENABLED(CONFIG_PREEMPT) ? " PREEMPT" : "",
27078227443SMichael Ellerman IS_ENABLED(CONFIG_SMP) ? " SMP" : "",
27178227443SMichael Ellerman IS_ENABLED(CONFIG_SMP) ? (" NR_CPUS=" __stringify(NR_CPUS)) : "",
27278227443SMichael Ellerman debug_pagealloc_enabled() ? " DEBUG_PAGEALLOC" : "",
27378227443SMichael Ellerman IS_ENABLED(CONFIG_NUMA) ? " NUMA" : "",
27478227443SMichael Ellerman ppc_md.name ? ppc_md.name : "");
275760ca4dcSAnton Blanchard
276760ca4dcSAnton Blanchard if (notify_die(DIE_OOPS, str, regs, err, 255, SIGSEGV) == NOTIFY_STOP)
277760ca4dcSAnton Blanchard return 1;
278760ca4dcSAnton Blanchard
279760ca4dcSAnton Blanchard print_modules();
280760ca4dcSAnton Blanchard show_regs(regs);
28114cf11afSPaul Mackerras
28214cf11afSPaul Mackerras return 0;
28314cf11afSPaul Mackerras }
28403465f89SNicholas Piggin NOKPROBE_SYMBOL(__die);
28514cf11afSPaul Mackerras
die(const char * str,struct pt_regs * regs,long err)286760ca4dcSAnton Blanchard void die(const char *str, struct pt_regs *regs, long err)
287760ca4dcSAnton Blanchard {
2886f44b20eSNicholas Piggin unsigned long flags;
289760ca4dcSAnton Blanchard
290d40b6768SNicholas Piggin /*
291d40b6768SNicholas Piggin * system_reset_excption handles debugger, crash dump, panic, for 0x100
292d40b6768SNicholas Piggin */
2937153d4bfSXiongwei Song if (TRAP(regs) != INTERRUPT_SYSTEM_RESET) {
2946f44b20eSNicholas Piggin if (debugger(regs))
2956f44b20eSNicholas Piggin return;
296d40b6768SNicholas Piggin }
2976f44b20eSNicholas Piggin
2986f44b20eSNicholas Piggin flags = oops_begin(regs);
299760ca4dcSAnton Blanchard if (__die(str, regs, err))
300760ca4dcSAnton Blanchard err = 0;
301760ca4dcSAnton Blanchard oops_end(flags, regs, err);
302760ca4dcSAnton Blanchard }
30315770a13SNaveen N. Rao NOKPROBE_SYMBOL(die);
304760ca4dcSAnton Blanchard
user_single_step_report(struct pt_regs * regs)305efc463adSEric W. Biederman void user_single_step_report(struct pt_regs *regs)
30625baa35bSOleg Nesterov {
3072e1661d2SEric W. Biederman force_sig_fault(SIGTRAP, TRAP_TRACE, (void __user *)regs->nip);
30825baa35bSOleg Nesterov }
30925baa35bSOleg Nesterov
show_signal_msg(int signr,struct pt_regs * regs,int code,unsigned long addr)310658b0f92SMurilo Opsfelder Araujo static void show_signal_msg(int signr, struct pt_regs *regs, int code,
311658b0f92SMurilo Opsfelder Araujo unsigned long addr)
31214cf11afSPaul Mackerras {
313997dd26cSMichael Ellerman static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
314997dd26cSMichael Ellerman DEFAULT_RATELIMIT_BURST);
315997dd26cSMichael Ellerman
316997dd26cSMichael Ellerman if (!show_unhandled_signals)
31735a52a10SMurilo Opsfelder Araujo return;
31835a52a10SMurilo Opsfelder Araujo
31935a52a10SMurilo Opsfelder Araujo if (!unhandled_signal(current, signr))
32035a52a10SMurilo Opsfelder Araujo return;
32135a52a10SMurilo Opsfelder Araujo
322997dd26cSMichael Ellerman if (!__ratelimit(&rs))
323997dd26cSMichael Ellerman return;
324997dd26cSMichael Ellerman
3250f642d61SMurilo Opsfelder Araujo pr_info("%s[%d]: %s (%d) at %lx nip %lx lr %lx code %x",
3260f642d61SMurilo Opsfelder Araujo current->comm, current->pid, signame(signr), signr,
327d0c3d534SOlof Johansson addr, regs->nip, regs->link, code);
3280f642d61SMurilo Opsfelder Araujo
3290f642d61SMurilo Opsfelder Araujo print_vma_addr(KERN_CONT " in ", regs->nip);
3300f642d61SMurilo Opsfelder Araujo
3310f642d61SMurilo Opsfelder Araujo pr_cont("\n");
332a99b9c5eSMurilo Opsfelder Araujo
333a99b9c5eSMurilo Opsfelder Araujo show_user_instructions(regs);
33414cf11afSPaul Mackerras }
335658b0f92SMurilo Opsfelder Araujo
exception_common(int signr,struct pt_regs * regs,int code,unsigned long addr)3362c44ce28SEric W. Biederman static bool exception_common(int signr, struct pt_regs *regs, int code,
3372c44ce28SEric W. Biederman unsigned long addr)
338658b0f92SMurilo Opsfelder Araujo {
339658b0f92SMurilo Opsfelder Araujo if (!user_mode(regs)) {
340658b0f92SMurilo Opsfelder Araujo die("Exception in kernel mode", regs, signr);
3412c44ce28SEric W. Biederman return false;
342658b0f92SMurilo Opsfelder Araujo }
343658b0f92SMurilo Opsfelder Araujo
344d0afd44cSNicholas Piggin /*
345d0afd44cSNicholas Piggin * Must not enable interrupts even for user-mode exception, because
346d0afd44cSNicholas Piggin * this can be called from machine check, which may be a NMI or IRQ
347d0afd44cSNicholas Piggin * which don't like interrupts being enabled. Could check for
348d0afd44cSNicholas Piggin * in_hardirq || in_nmi perhaps, but there doesn't seem to be a good
349d0afd44cSNicholas Piggin * reason why _exception() should enable irqs for an exception handler,
350d0afd44cSNicholas Piggin * the handlers themselves do that directly.
351d0afd44cSNicholas Piggin */
35214cf11afSPaul Mackerras
353d0afd44cSNicholas Piggin show_signal_msg(signr, regs, code, addr);
3549f2f79e3SBenjamin Herrenschmidt
35541ab5266SAnanth N Mavinakayanahalli current->thread.trap_nr = code;
356c5cc1f4dSThiago Jung Bauermann
3572c44ce28SEric W. Biederman return true;
3582c44ce28SEric W. Biederman }
3592c44ce28SEric W. Biederman
_exception_pkey(struct pt_regs * regs,unsigned long addr,int key)3605d8fb8a5SEric W. Biederman void _exception_pkey(struct pt_regs *regs, unsigned long addr, int key)
3612c44ce28SEric W. Biederman {
3625d8fb8a5SEric W. Biederman if (!exception_common(SIGSEGV, regs, SEGV_PKUERR, addr))
3632c44ce28SEric W. Biederman return;
3642c44ce28SEric W. Biederman
36577c70728SEric W. Biederman force_sig_pkuerr((void __user *) addr, key);
36614cf11afSPaul Mackerras }
36714cf11afSPaul Mackerras
_exception(int signr,struct pt_regs * regs,int code,unsigned long addr)36899cd1302SRam Pai void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
36999cd1302SRam Pai {
370c1c7c85cSEric W. Biederman if (!exception_common(signr, regs, code, addr))
371c1c7c85cSEric W. Biederman return;
372c1c7c85cSEric W. Biederman
3732e1661d2SEric W. Biederman force_sig_fault(signr, code, (void __user *)addr);
37499cd1302SRam Pai }
37599cd1302SRam Pai
376ccd47702SNicholas Piggin /*
377ccd47702SNicholas Piggin * The interrupt architecture has a quirk in that the HV interrupts excluding
378ccd47702SNicholas Piggin * the NMIs (0x100 and 0x200) do not clear MSR[RI] at entry. The first thing
379ccd47702SNicholas Piggin * that an interrupt handler must do is save off a GPR into a scratch register,
380ccd47702SNicholas Piggin * and all interrupts on POWERNV (HV=1) use the HSPRG1 register as scratch.
381ccd47702SNicholas Piggin * Therefore an NMI can clobber an HV interrupt's live HSPRG1 without noticing
382ccd47702SNicholas Piggin * that it is non-reentrant, which leads to random data corruption.
383ccd47702SNicholas Piggin *
384ccd47702SNicholas Piggin * The solution is for NMI interrupts in HV mode to check if they originated
385ccd47702SNicholas Piggin * from these critical HV interrupt regions. If so, then mark them not
386ccd47702SNicholas Piggin * recoverable.
387ccd47702SNicholas Piggin *
388ccd47702SNicholas Piggin * An alternative would be for HV NMIs to use SPRG for scratch to avoid the
389ccd47702SNicholas Piggin * HSPRG1 clobber, however this would cause guest SPRG to be clobbered. Linux
390ccd47702SNicholas Piggin * guests should always have MSR[RI]=0 when its scratch SPRG is in use, so
391ccd47702SNicholas Piggin * that would work. However any other guest OS that may have the SPRG live
392ccd47702SNicholas Piggin * and MSR[RI]=1 could encounter silent corruption.
393ccd47702SNicholas Piggin *
394ccd47702SNicholas Piggin * Builds that do not support KVM could take this second option to increase
395ccd47702SNicholas Piggin * the recoverability of NMIs.
396ccd47702SNicholas Piggin */
hv_nmi_check_nonrecoverable(struct pt_regs * regs)3975352090aSDaniel Axtens noinstr void hv_nmi_check_nonrecoverable(struct pt_regs *regs)
398ccd47702SNicholas Piggin {
399ccd47702SNicholas Piggin #ifdef CONFIG_PPC_POWERNV
400ccd47702SNicholas Piggin unsigned long kbase = (unsigned long)_stext;
401ccd47702SNicholas Piggin unsigned long nip = regs->nip;
402ccd47702SNicholas Piggin
403ccd47702SNicholas Piggin if (!(regs->msr & MSR_RI))
404ccd47702SNicholas Piggin return;
405ccd47702SNicholas Piggin if (!(regs->msr & MSR_HV))
406ccd47702SNicholas Piggin return;
407ccd47702SNicholas Piggin if (regs->msr & MSR_PR)
408ccd47702SNicholas Piggin return;
409ccd47702SNicholas Piggin
410ccd47702SNicholas Piggin /*
411ccd47702SNicholas Piggin * Now test if the interrupt has hit a range that may be using
412ccd47702SNicholas Piggin * HSPRG1 without having RI=0 (i.e., an HSRR interrupt). The
413ccd47702SNicholas Piggin * problem ranges all run un-relocated. Test real and virt modes
4145c4a4802SBhaskar Chowdhury * at the same time by dropping the high bit of the nip (virt mode
415ccd47702SNicholas Piggin * entry points still have the +0x4000 offset).
416ccd47702SNicholas Piggin */
417ccd47702SNicholas Piggin nip &= ~0xc000000000000000ULL;
418ccd47702SNicholas Piggin if ((nip >= 0x500 && nip < 0x600) || (nip >= 0x4500 && nip < 0x4600))
419ccd47702SNicholas Piggin goto nonrecoverable;
420ccd47702SNicholas Piggin if ((nip >= 0x980 && nip < 0xa00) || (nip >= 0x4980 && nip < 0x4a00))
421ccd47702SNicholas Piggin goto nonrecoverable;
422ccd47702SNicholas Piggin if ((nip >= 0xe00 && nip < 0xec0) || (nip >= 0x4e00 && nip < 0x4ec0))
423ccd47702SNicholas Piggin goto nonrecoverable;
424ccd47702SNicholas Piggin if ((nip >= 0xf80 && nip < 0xfa0) || (nip >= 0x4f80 && nip < 0x4fa0))
425ccd47702SNicholas Piggin goto nonrecoverable;
426bd3524feSNicholas Piggin
427ccd47702SNicholas Piggin /* Trampoline code runs un-relocated so subtract kbase. */
428bd3524feSNicholas Piggin if (nip >= (unsigned long)(start_real_trampolines - kbase) &&
429bd3524feSNicholas Piggin nip < (unsigned long)(end_real_trampolines - kbase))
430ccd47702SNicholas Piggin goto nonrecoverable;
431bd3524feSNicholas Piggin if (nip >= (unsigned long)(start_virt_trampolines - kbase) &&
432bd3524feSNicholas Piggin nip < (unsigned long)(end_virt_trampolines - kbase))
433ccd47702SNicholas Piggin goto nonrecoverable;
434ccd47702SNicholas Piggin return;
435ccd47702SNicholas Piggin
436ccd47702SNicholas Piggin nonrecoverable:
4375352090aSDaniel Axtens regs->msr &= ~MSR_RI;
4385352090aSDaniel Axtens local_paca->hsrr_valid = 0;
4395352090aSDaniel Axtens local_paca->srr_valid = 0;
440ccd47702SNicholas Piggin #endif
441ccd47702SNicholas Piggin }
DEFINE_INTERRUPT_HANDLER_NMI(system_reset_exception)4423a96570fSNicholas Piggin DEFINE_INTERRUPT_HANDLER_NMI(system_reset_exception)
44314cf11afSPaul Mackerras {
444cbf2ba95SNicholas Piggin unsigned long hsrr0, hsrr1;
445cbf2ba95SNicholas Piggin bool saved_hsrrs = false;
4462b4f3ac5SNicholas Piggin
447cbf2ba95SNicholas Piggin /*
448cbf2ba95SNicholas Piggin * System reset can interrupt code where HSRRs are live and MSR[RI]=1.
449cbf2ba95SNicholas Piggin * The system reset interrupt itself may clobber HSRRs (e.g., to call
450cbf2ba95SNicholas Piggin * OPAL), so save them here and restore them before returning.
451cbf2ba95SNicholas Piggin *
452cbf2ba95SNicholas Piggin * Machine checks don't need to save HSRRs, as the real mode handler
453cbf2ba95SNicholas Piggin * is careful to avoid them, and the regular handler is not delivered
454cbf2ba95SNicholas Piggin * as an NMI.
455cbf2ba95SNicholas Piggin */
456cbf2ba95SNicholas Piggin if (cpu_has_feature(CPU_FTR_HVMODE)) {
457cbf2ba95SNicholas Piggin hsrr0 = mfspr(SPRN_HSRR0);
458cbf2ba95SNicholas Piggin hsrr1 = mfspr(SPRN_HSRR1);
459cbf2ba95SNicholas Piggin saved_hsrrs = true;
460cbf2ba95SNicholas Piggin }
461cbf2ba95SNicholas Piggin
462ccd47702SNicholas Piggin hv_nmi_check_nonrecoverable(regs);
463ccd47702SNicholas Piggin
464ca41ad43SNicholas Piggin __this_cpu_inc(irq_stat.sreset_irqs);
465ca41ad43SNicholas Piggin
46614cf11afSPaul Mackerras /* See if any machine dependent calls */
467c902be71SArnd Bergmann if (ppc_md.system_reset_exception) {
468c902be71SArnd Bergmann if (ppc_md.system_reset_exception(regs))
469c4f3b52cSNicholas Piggin goto out;
470c902be71SArnd Bergmann }
47114cf11afSPaul Mackerras
4724388c9b3SNicholas Piggin if (debugger(regs))
4734388c9b3SNicholas Piggin goto out;
4744388c9b3SNicholas Piggin
475e7ca44edSGanesh Goudar kmsg_dump(KMSG_DUMP_OOPS);
4764388c9b3SNicholas Piggin /*
4774388c9b3SNicholas Piggin * A system reset is a request to dump, so we always send
4784388c9b3SNicholas Piggin * it through the crashdump code (if fadump or kdump are
4794388c9b3SNicholas Piggin * registered).
4804388c9b3SNicholas Piggin */
4814388c9b3SNicholas Piggin crash_fadump(regs, "System Reset");
4824388c9b3SNicholas Piggin
4834388c9b3SNicholas Piggin crash_kexec(regs);
4844388c9b3SNicholas Piggin
4854388c9b3SNicholas Piggin /*
4864388c9b3SNicholas Piggin * We aren't the primary crash CPU. We need to send it
4874388c9b3SNicholas Piggin * to a holding pattern to avoid it ending up in the panic
4884388c9b3SNicholas Piggin * code.
4894388c9b3SNicholas Piggin */
4904388c9b3SNicholas Piggin crash_kexec_secondary(regs);
4914388c9b3SNicholas Piggin
4924388c9b3SNicholas Piggin /*
4934388c9b3SNicholas Piggin * No debugger or crash dump registered, print logs then
4944388c9b3SNicholas Piggin * panic.
4954388c9b3SNicholas Piggin */
4964552d128SNicholas Piggin die("System Reset", regs, SIGABRT);
4974388c9b3SNicholas Piggin
4984388c9b3SNicholas Piggin mdelay(2*MSEC_PER_SEC); /* Wait a little while for others to print */
4994388c9b3SNicholas Piggin add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
5004388c9b3SNicholas Piggin nmi_panic(regs, "System Reset");
50114cf11afSPaul Mackerras
502c4f3b52cSNicholas Piggin out:
503c4f3b52cSNicholas Piggin #ifdef CONFIG_PPC_BOOK3S_64
504c4f3b52cSNicholas Piggin BUG_ON(get_paca()->in_nmi == 0);
505c4f3b52cSNicholas Piggin if (get_paca()->in_nmi > 1)
506265d6e58SNicholas Piggin die("Unrecoverable nested System Reset", regs, SIGABRT);
507c4f3b52cSNicholas Piggin #endif
50814cf11afSPaul Mackerras /* Must die if the interrupt is not recoverable */
509806c0e6eSChristophe Leroy if (regs_is_unrecoverable(regs)) {
51011cb0a25SNicholas Piggin /* For the reason explained in die_mce, nmi_exit before die */
51111cb0a25SNicholas Piggin nmi_exit();
512265d6e58SNicholas Piggin die("Unrecoverable System Reset", regs, SIGABRT);
51311cb0a25SNicholas Piggin }
51414cf11afSPaul Mackerras
515cbf2ba95SNicholas Piggin if (saved_hsrrs) {
516cbf2ba95SNicholas Piggin mtspr(SPRN_HSRR0, hsrr0);
517cbf2ba95SNicholas Piggin mtspr(SPRN_HSRR1, hsrr1);
518cbf2ba95SNicholas Piggin }
519cbf2ba95SNicholas Piggin
52014cf11afSPaul Mackerras /* What should we do here? We could issue a shutdown or hard reset. */
5213a96570fSNicholas Piggin
5223a96570fSNicholas Piggin return 0;
52314cf11afSPaul Mackerras }
5241e9b4507SMahesh Salgaonkar
52514cf11afSPaul Mackerras /*
52614cf11afSPaul Mackerras * I/O accesses can cause machine checks on powermacs.
52714cf11afSPaul Mackerras * Check if the NIP corresponds to the address of a sync
52814cf11afSPaul Mackerras * instruction for which there is an entry in the exception
52914cf11afSPaul Mackerras * table.
53014cf11afSPaul Mackerras * -- paulus.
53114cf11afSPaul Mackerras */
check_io_access(struct pt_regs * regs)53214cf11afSPaul Mackerras static inline int check_io_access(struct pt_regs *regs)
53314cf11afSPaul Mackerras {
53468a64357SBenjamin Herrenschmidt #ifdef CONFIG_PPC32
53514cf11afSPaul Mackerras unsigned long msr = regs->msr;
53614cf11afSPaul Mackerras const struct exception_table_entry *entry;
53714cf11afSPaul Mackerras unsigned int *nip = (unsigned int *)regs->nip;
53814cf11afSPaul Mackerras
53914cf11afSPaul Mackerras if (((msr & 0xffff0000) == 0 || (msr & (0x80000 | 0x40000)))
54014cf11afSPaul Mackerras && (entry = search_exception_tables(regs->nip)) != NULL) {
54114cf11afSPaul Mackerras /*
54214cf11afSPaul Mackerras * Check that it's a sync instruction, or somewhere
54314cf11afSPaul Mackerras * in the twi; isync; nop sequence that inb/inw/inl uses.
54414cf11afSPaul Mackerras * As the address is in the exception table
54514cf11afSPaul Mackerras * we should be able to read the instr there.
54614cf11afSPaul Mackerras * For the debug message, we look at the preceding
54714cf11afSPaul Mackerras * load or store.
54814cf11afSPaul Mackerras */
549deefd0aeSChristophe Leroy if (*nip == PPC_RAW_NOP())
55014cf11afSPaul Mackerras nip -= 2;
551deefd0aeSChristophe Leroy else if (*nip == PPC_RAW_ISYNC())
55214cf11afSPaul Mackerras --nip;
553deefd0aeSChristophe Leroy if (*nip == PPC_RAW_SYNC() || get_op(*nip) == OP_TRAP) {
55414cf11afSPaul Mackerras unsigned int rb;
55514cf11afSPaul Mackerras
55614cf11afSPaul Mackerras --nip;
55714cf11afSPaul Mackerras rb = (*nip >> 11) & 0x1f;
55814cf11afSPaul Mackerras printk(KERN_DEBUG "%s bad port %lx at %p\n",
55914cf11afSPaul Mackerras (*nip & 0x100)? "OUT to": "IN from",
56014cf11afSPaul Mackerras regs->gpr[rb] - _IO_BASE, nip);
561806c0e6eSChristophe Leroy regs_set_recoverable(regs);
56259dc5bfcSNicholas Piggin regs_set_return_ip(regs, extable_fixup(entry));
56314cf11afSPaul Mackerras return 1;
56414cf11afSPaul Mackerras }
56514cf11afSPaul Mackerras }
56668a64357SBenjamin Herrenschmidt #endif /* CONFIG_PPC32 */
56714cf11afSPaul Mackerras return 0;
56814cf11afSPaul Mackerras }
56914cf11afSPaul Mackerras
570172ae2e7SDave Kleikamp #ifdef CONFIG_PPC_ADV_DEBUG_REGS
57114cf11afSPaul Mackerras /* On 4xx, the reason for the machine check or program exception
57214cf11afSPaul Mackerras is in the ESR. */
5734f8e78c0SXiongwei Song #define get_reason(regs) ((regs)->esr)
57414cf11afSPaul Mackerras #define REASON_FP ESR_FP
57514cf11afSPaul Mackerras #define REASON_ILLEGAL (ESR_PIL | ESR_PUO)
57614cf11afSPaul Mackerras #define REASON_PRIVILEGED ESR_PPR
57714cf11afSPaul Mackerras #define REASON_TRAP ESR_PTR
5789409d2f9SJordan Niethe #define REASON_PREFIXED 0
5799409d2f9SJordan Niethe #define REASON_BOUNDARY 0
58014cf11afSPaul Mackerras
58114cf11afSPaul Mackerras /* single-step stuff */
58251ae8d4aSBharat Bhushan #define single_stepping(regs) (current->thread.debug.dbcr0 & DBCR0_IC)
58351ae8d4aSBharat Bhushan #define clear_single_step(regs) (current->thread.debug.dbcr0 &= ~DBCR0_IC)
5840e524e76SMatt Evans #define clear_br_trace(regs) do {} while(0)
58514cf11afSPaul Mackerras #else
58614cf11afSPaul Mackerras /* On non-4xx, the reason for the machine check or program
58714cf11afSPaul Mackerras exception is in the MSR. */
58814cf11afSPaul Mackerras #define get_reason(regs) ((regs)->msr)
589d30a5a52SMichael Ellerman #define REASON_TM SRR1_PROGTM
590d30a5a52SMichael Ellerman #define REASON_FP SRR1_PROGFPE
591d30a5a52SMichael Ellerman #define REASON_ILLEGAL SRR1_PROGILL
592d30a5a52SMichael Ellerman #define REASON_PRIVILEGED SRR1_PROGPRIV
593d30a5a52SMichael Ellerman #define REASON_TRAP SRR1_PROGTRAP
5949409d2f9SJordan Niethe #define REASON_PREFIXED SRR1_PREFIXED
5959409d2f9SJordan Niethe #define REASON_BOUNDARY SRR1_BOUNDARY
59614cf11afSPaul Mackerras
59714cf11afSPaul Mackerras #define single_stepping(regs) ((regs)->msr & MSR_SE)
59859dc5bfcSNicholas Piggin #define clear_single_step(regs) (regs_set_return_msr((regs), (regs)->msr & ~MSR_SE))
59959dc5bfcSNicholas Piggin #define clear_br_trace(regs) (regs_set_return_msr((regs), (regs)->msr & ~MSR_BE))
60014cf11afSPaul Mackerras #endif
60114cf11afSPaul Mackerras
6029409d2f9SJordan Niethe #define inst_length(reason) (((reason) & REASON_PREFIXED) ? 8 : 4)
6039409d2f9SJordan Niethe
604688de017SChristophe Leroy #if defined(CONFIG_PPC_E500)
machine_check_e500mc(struct pt_regs * regs)605fe04b112SScott Wood int machine_check_e500mc(struct pt_regs *regs)
606fe04b112SScott Wood {
607fe04b112SScott Wood unsigned long mcsr = mfspr(SPRN_MCSR);
608a4e89ffbSMatt Weber unsigned long pvr = mfspr(SPRN_PVR);
609fe04b112SScott Wood unsigned long reason = mcsr;
610fe04b112SScott Wood int recoverable = 1;
611fe04b112SScott Wood
61282a9a480SScott Wood if (reason & MCSR_LD) {
613cce1f106SShaohui Xie recoverable = fsl_rio_mcheck_exception(regs);
614cce1f106SShaohui Xie if (recoverable == 1)
615cce1f106SShaohui Xie goto silent_out;
616cce1f106SShaohui Xie }
617cce1f106SShaohui Xie
618fe04b112SScott Wood printk("Machine check in kernel mode.\n");
619fe04b112SScott Wood printk("Caused by (from MCSR=%lx): ", reason);
620fe04b112SScott Wood
621fe04b112SScott Wood if (reason & MCSR_MCP)
622422123ccSChristophe Leroy pr_cont("Machine Check Signal\n");
623fe04b112SScott Wood
624fe04b112SScott Wood if (reason & MCSR_ICPERR) {
625422123ccSChristophe Leroy pr_cont("Instruction Cache Parity Error\n");
626fe04b112SScott Wood
627fe04b112SScott Wood /*
628fe04b112SScott Wood * This is recoverable by invalidating the i-cache.
629fe04b112SScott Wood */
630fe04b112SScott Wood mtspr(SPRN_L1CSR1, mfspr(SPRN_L1CSR1) | L1CSR1_ICFI);
631fe04b112SScott Wood while (mfspr(SPRN_L1CSR1) & L1CSR1_ICFI)
632fe04b112SScott Wood ;
633fe04b112SScott Wood
634fe04b112SScott Wood /*
635fe04b112SScott Wood * This will generally be accompanied by an instruction
636fe04b112SScott Wood * fetch error report -- only treat MCSR_IF as fatal
637fe04b112SScott Wood * if it wasn't due to an L1 parity error.
638fe04b112SScott Wood */
639fe04b112SScott Wood reason &= ~MCSR_IF;
640fe04b112SScott Wood }
641fe04b112SScott Wood
642fe04b112SScott Wood if (reason & MCSR_DCPERR_MC) {
643422123ccSChristophe Leroy pr_cont("Data Cache Parity Error\n");
64437caf9f2SKumar Gala
64537caf9f2SKumar Gala /*
64637caf9f2SKumar Gala * In write shadow mode we auto-recover from the error, but it
64737caf9f2SKumar Gala * may still get logged and cause a machine check. We should
64837caf9f2SKumar Gala * only treat the non-write shadow case as non-recoverable.
64937caf9f2SKumar Gala */
650a4e89ffbSMatt Weber /* On e6500 core, L1 DCWS (Data cache write shadow mode) bit
651a4e89ffbSMatt Weber * is not implemented but L1 data cache always runs in write
652a4e89ffbSMatt Weber * shadow mode. Hence on data cache parity errors HW will
653a4e89ffbSMatt Weber * automatically invalidate the L1 Data Cache.
654a4e89ffbSMatt Weber */
655a4e89ffbSMatt Weber if (PVR_VER(pvr) != PVR_VER_E6500) {
65637caf9f2SKumar Gala if (!(mfspr(SPRN_L1CSR2) & L1CSR2_DCWS))
657fe04b112SScott Wood recoverable = 0;
658fe04b112SScott Wood }
659a4e89ffbSMatt Weber }
660fe04b112SScott Wood
661fe04b112SScott Wood if (reason & MCSR_L2MMU_MHIT) {
662422123ccSChristophe Leroy pr_cont("Hit on multiple TLB entries\n");
663fe04b112SScott Wood recoverable = 0;
664fe04b112SScott Wood }
665fe04b112SScott Wood
666fe04b112SScott Wood if (reason & MCSR_NMI)
667422123ccSChristophe Leroy pr_cont("Non-maskable interrupt\n");
668fe04b112SScott Wood
669fe04b112SScott Wood if (reason & MCSR_IF) {
670422123ccSChristophe Leroy pr_cont("Instruction Fetch Error Report\n");
671fe04b112SScott Wood recoverable = 0;
672fe04b112SScott Wood }
673fe04b112SScott Wood
674fe04b112SScott Wood if (reason & MCSR_LD) {
675422123ccSChristophe Leroy pr_cont("Load Error Report\n");
676fe04b112SScott Wood recoverable = 0;
677fe04b112SScott Wood }
678fe04b112SScott Wood
679fe04b112SScott Wood if (reason & MCSR_ST) {
680422123ccSChristophe Leroy pr_cont("Store Error Report\n");
681fe04b112SScott Wood recoverable = 0;
682fe04b112SScott Wood }
683fe04b112SScott Wood
684fe04b112SScott Wood if (reason & MCSR_LDG) {
685422123ccSChristophe Leroy pr_cont("Guarded Load Error Report\n");
686fe04b112SScott Wood recoverable = 0;
687fe04b112SScott Wood }
688fe04b112SScott Wood
689fe04b112SScott Wood if (reason & MCSR_TLBSYNC)
690422123ccSChristophe Leroy pr_cont("Simultaneous tlbsync operations\n");
691fe04b112SScott Wood
692fe04b112SScott Wood if (reason & MCSR_BSL2_ERR) {
693422123ccSChristophe Leroy pr_cont("Level 2 Cache Error\n");
694fe04b112SScott Wood recoverable = 0;
695fe04b112SScott Wood }
696fe04b112SScott Wood
697fe04b112SScott Wood if (reason & MCSR_MAV) {
698fe04b112SScott Wood u64 addr;
699fe04b112SScott Wood
700fe04b112SScott Wood addr = mfspr(SPRN_MCAR);
701fe04b112SScott Wood addr |= (u64)mfspr(SPRN_MCARU) << 32;
702fe04b112SScott Wood
703422123ccSChristophe Leroy pr_cont("Machine Check %s Address: %#llx\n",
704fe04b112SScott Wood reason & MCSR_MEA ? "Effective" : "Physical", addr);
705fe04b112SScott Wood }
706fe04b112SScott Wood
707cce1f106SShaohui Xie silent_out:
708fe04b112SScott Wood mtspr(SPRN_MCSR, mcsr);
709fe04b112SScott Wood return mfspr(SPRN_MCSR) == 0 && recoverable;
710fe04b112SScott Wood }
711fe04b112SScott Wood
machine_check_e500(struct pt_regs * regs)71247c0bd1aSBenjamin Herrenschmidt int machine_check_e500(struct pt_regs *regs)
71347c0bd1aSBenjamin Herrenschmidt {
71442bff234SMichael Ellerman unsigned long reason = mfspr(SPRN_MCSR);
71547c0bd1aSBenjamin Herrenschmidt
716cce1f106SShaohui Xie if (reason & MCSR_BUS_RBERR) {
717cce1f106SShaohui Xie if (fsl_rio_mcheck_exception(regs))
718cce1f106SShaohui Xie return 1;
7194e0e3435SHongtao Jia if (fsl_pci_mcheck_exception(regs))
7204e0e3435SHongtao Jia return 1;
721cce1f106SShaohui Xie }
722cce1f106SShaohui Xie
72314cf11afSPaul Mackerras printk("Machine check in kernel mode.\n");
72414cf11afSPaul Mackerras printk("Caused by (from MCSR=%lx): ", reason);
72514cf11afSPaul Mackerras
72614cf11afSPaul Mackerras if (reason & MCSR_MCP)
727422123ccSChristophe Leroy pr_cont("Machine Check Signal\n");
72814cf11afSPaul Mackerras if (reason & MCSR_ICPERR)
729422123ccSChristophe Leroy pr_cont("Instruction Cache Parity Error\n");
73014cf11afSPaul Mackerras if (reason & MCSR_DCP_PERR)
731422123ccSChristophe Leroy pr_cont("Data Cache Push Parity Error\n");
73214cf11afSPaul Mackerras if (reason & MCSR_DCPERR)
733422123ccSChristophe Leroy pr_cont("Data Cache Parity Error\n");
73414cf11afSPaul Mackerras if (reason & MCSR_BUS_IAERR)
735422123ccSChristophe Leroy pr_cont("Bus - Instruction Address Error\n");
73614cf11afSPaul Mackerras if (reason & MCSR_BUS_RAERR)
737422123ccSChristophe Leroy pr_cont("Bus - Read Address Error\n");
73814cf11afSPaul Mackerras if (reason & MCSR_BUS_WAERR)
739422123ccSChristophe Leroy pr_cont("Bus - Write Address Error\n");
74014cf11afSPaul Mackerras if (reason & MCSR_BUS_IBERR)
741422123ccSChristophe Leroy pr_cont("Bus - Instruction Data Error\n");
74214cf11afSPaul Mackerras if (reason & MCSR_BUS_RBERR)
743422123ccSChristophe Leroy pr_cont("Bus - Read Data Bus Error\n");
74414cf11afSPaul Mackerras if (reason & MCSR_BUS_WBERR)
745422123ccSChristophe Leroy pr_cont("Bus - Write Data Bus Error\n");
74614cf11afSPaul Mackerras if (reason & MCSR_BUS_IPERR)
747422123ccSChristophe Leroy pr_cont("Bus - Instruction Parity Error\n");
74814cf11afSPaul Mackerras if (reason & MCSR_BUS_RPERR)
749422123ccSChristophe Leroy pr_cont("Bus - Read Parity Error\n");
75047c0bd1aSBenjamin Herrenschmidt
75147c0bd1aSBenjamin Herrenschmidt return 0;
75247c0bd1aSBenjamin Herrenschmidt }
7534490c06bSKumar Gala
machine_check_generic(struct pt_regs * regs)7544490c06bSKumar Gala int machine_check_generic(struct pt_regs *regs)
7554490c06bSKumar Gala {
7564490c06bSKumar Gala return 0;
7574490c06bSKumar Gala }
7587f3f819eSMichael Ellerman #elif defined(CONFIG_PPC32)
machine_check_generic(struct pt_regs * regs)75947c0bd1aSBenjamin Herrenschmidt int machine_check_generic(struct pt_regs *regs)
76047c0bd1aSBenjamin Herrenschmidt {
76142bff234SMichael Ellerman unsigned long reason = regs->msr;
76247c0bd1aSBenjamin Herrenschmidt
76314cf11afSPaul Mackerras printk("Machine check in kernel mode.\n");
76414cf11afSPaul Mackerras printk("Caused by (from SRR1=%lx): ", reason);
76514cf11afSPaul Mackerras switch (reason & 0x601F0000) {
76614cf11afSPaul Mackerras case 0x80000:
767422123ccSChristophe Leroy pr_cont("Machine check signal\n");
76814cf11afSPaul Mackerras break;
76914cf11afSPaul Mackerras case 0x40000:
77014cf11afSPaul Mackerras case 0x140000: /* 7450 MSS error and TEA */
771422123ccSChristophe Leroy pr_cont("Transfer error ack signal\n");
77214cf11afSPaul Mackerras break;
77314cf11afSPaul Mackerras case 0x20000:
774422123ccSChristophe Leroy pr_cont("Data parity error signal\n");
77514cf11afSPaul Mackerras break;
77614cf11afSPaul Mackerras case 0x10000:
777422123ccSChristophe Leroy pr_cont("Address parity error signal\n");
77814cf11afSPaul Mackerras break;
77914cf11afSPaul Mackerras case 0x20000000:
780422123ccSChristophe Leroy pr_cont("L1 Data Cache error\n");
78114cf11afSPaul Mackerras break;
78214cf11afSPaul Mackerras case 0x40000000:
783422123ccSChristophe Leroy pr_cont("L1 Instruction Cache error\n");
78414cf11afSPaul Mackerras break;
78514cf11afSPaul Mackerras case 0x00100000:
786422123ccSChristophe Leroy pr_cont("L2 data cache parity error\n");
78714cf11afSPaul Mackerras break;
78814cf11afSPaul Mackerras default:
789422123ccSChristophe Leroy pr_cont("Unknown values in msr\n");
79014cf11afSPaul Mackerras }
79175918a4bSOlof Johansson return 0;
79275918a4bSOlof Johansson }
79347c0bd1aSBenjamin Herrenschmidt #endif /* everything else */
79475918a4bSOlof Johansson
die_mce(const char * str,struct pt_regs * regs,long err)795209e9d50SNicholas Piggin void die_mce(const char *str, struct pt_regs *regs, long err)
796209e9d50SNicholas Piggin {
797209e9d50SNicholas Piggin /*
7980e25498fSEric W. Biederman * The machine check wants to kill the interrupted context,
7990e25498fSEric W. Biederman * but make_task_dead() checks for in_interrupt() and panics
8000e25498fSEric W. Biederman * in that case, so exit the irq/nmi before calling die.
801209e9d50SNicholas Piggin */
802f08fb25bSNicholas Piggin if (in_nmi())
803209e9d50SNicholas Piggin nmi_exit();
804f08fb25bSNicholas Piggin else
805f08fb25bSNicholas Piggin irq_exit();
806209e9d50SNicholas Piggin die(str, regs, err);
807209e9d50SNicholas Piggin }
808209e9d50SNicholas Piggin
809118178e6SNicholas Piggin /*
810f08fb25bSNicholas Piggin * BOOK3S_64 does not usually call this handler as a non-maskable interrupt
811118178e6SNicholas Piggin * (it uses its own early real-mode handler to handle the MCE proper
812118178e6SNicholas Piggin * and then raises irq_work to call this handler when interrupts are
813f08fb25bSNicholas Piggin * enabled). The only time when this is not true is if the early handler
814f08fb25bSNicholas Piggin * is unrecoverable, then it does call this directly to try to get a
815f08fb25bSNicholas Piggin * message out.
816118178e6SNicholas Piggin */
__machine_check_exception(struct pt_regs * regs)817f08fb25bSNicholas Piggin static void __machine_check_exception(struct pt_regs *regs)
81875918a4bSOlof Johansson {
81975918a4bSOlof Johansson int recover = 0;
82069ea03b5SPeter Zijlstra
82169111bacSChristoph Lameter __this_cpu_inc(irq_stat.mce_exceptions);
82289713ed1SAnton Blanchard
823d93b0ac0SMahesh Salgaonkar add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
824d93b0ac0SMahesh Salgaonkar
82547c0bd1aSBenjamin Herrenschmidt /* See if any machine dependent calls. In theory, we would want
82647c0bd1aSBenjamin Herrenschmidt * to call the CPU first, and call the ppc_md. one if the CPU
82747c0bd1aSBenjamin Herrenschmidt * one returns a positive number. However there is existing code
82847c0bd1aSBenjamin Herrenschmidt * that assumes the board gets a first chance, so let's keep it
82947c0bd1aSBenjamin Herrenschmidt * that way for now and fix things later. --BenH.
83047c0bd1aSBenjamin Herrenschmidt */
83175918a4bSOlof Johansson if (ppc_md.machine_check_exception)
83275918a4bSOlof Johansson recover = ppc_md.machine_check_exception(regs);
83347c0bd1aSBenjamin Herrenschmidt else if (cur_cpu_spec->machine_check)
83447c0bd1aSBenjamin Herrenschmidt recover = cur_cpu_spec->machine_check(regs);
83575918a4bSOlof Johansson
83647c0bd1aSBenjamin Herrenschmidt if (recover > 0)
837ba12eedeSLi Zhong goto bail;
83875918a4bSOlof Johansson
839a443506bSAnton Blanchard if (debugger_fault_handler(regs))
840ba12eedeSLi Zhong goto bail;
84175918a4bSOlof Johansson
84275918a4bSOlof Johansson if (check_io_access(regs))
843ba12eedeSLi Zhong goto bail;
84475918a4bSOlof Johansson
845209e9d50SNicholas Piggin die_mce("Machine check", regs, SIGBUS);
846daf00ae7SChristophe Leroy
847c538938fSNicholas Piggin bail:
8480bbea75cSChristophe Leroy /* Must die if the interrupt is not recoverable */
849806c0e6eSChristophe Leroy if (regs_is_unrecoverable(regs))
850209e9d50SNicholas Piggin die_mce("Unrecoverable Machine check", regs, SIGBUS);
851f08fb25bSNicholas Piggin }
852daf00ae7SChristophe Leroy
8533a96570fSNicholas Piggin #ifdef CONFIG_PPC_BOOK3S_64
DEFINE_INTERRUPT_HANDLER_RAW(machine_check_early_boot)8542f5182cfSNicholas Piggin DEFINE_INTERRUPT_HANDLER_RAW(machine_check_early_boot)
8552f5182cfSNicholas Piggin {
8562f5182cfSNicholas Piggin udbg_printf("Machine check (early boot)\n");
8572f5182cfSNicholas Piggin udbg_printf("SRR0=0x%016lx SRR1=0x%016lx\n", regs->nip, regs->msr);
8582f5182cfSNicholas Piggin udbg_printf(" DAR=0x%016lx DSISR=0x%08lx\n", regs->dar, regs->dsisr);
8592f5182cfSNicholas Piggin udbg_printf(" LR=0x%016lx R1=0x%08lx\n", regs->link, regs->gpr[1]);
8602f5182cfSNicholas Piggin udbg_printf("------\n");
8612f5182cfSNicholas Piggin die("Machine check (early boot)", regs, SIGBUS);
8622f5182cfSNicholas Piggin for (;;)
8632f5182cfSNicholas Piggin ;
8642f5182cfSNicholas Piggin return 0;
8652f5182cfSNicholas Piggin }
8662f5182cfSNicholas Piggin
DEFINE_INTERRUPT_HANDLER_ASYNC(machine_check_exception_async)867f08fb25bSNicholas Piggin DEFINE_INTERRUPT_HANDLER_ASYNC(machine_check_exception_async)
868f08fb25bSNicholas Piggin {
869f08fb25bSNicholas Piggin __machine_check_exception(regs);
870f08fb25bSNicholas Piggin }
8713a96570fSNicholas Piggin #endif
DEFINE_INTERRUPT_HANDLER_NMI(machine_check_exception)872f08fb25bSNicholas Piggin DEFINE_INTERRUPT_HANDLER_NMI(machine_check_exception)
873f08fb25bSNicholas Piggin {
874f08fb25bSNicholas Piggin __machine_check_exception(regs);
875f08fb25bSNicholas Piggin
876f08fb25bSNicholas Piggin return 0;
87714cf11afSPaul Mackerras }
87814cf11afSPaul Mackerras
DEFINE_INTERRUPT_HANDLER(SMIException)8793a96570fSNicholas Piggin DEFINE_INTERRUPT_HANDLER(SMIException) /* async? */
88014cf11afSPaul Mackerras {
88114cf11afSPaul Mackerras die("System Management Interrupt", regs, SIGABRT);
88214cf11afSPaul Mackerras }
88314cf11afSPaul Mackerras
8845080332cSMichael Neuling #ifdef CONFIG_VSX
p9_hmi_special_emu(struct pt_regs * regs)8855080332cSMichael Neuling static void p9_hmi_special_emu(struct pt_regs *regs)
8865080332cSMichael Neuling {
8875080332cSMichael Neuling unsigned int ra, rb, t, i, sel, instr, rc;
8885080332cSMichael Neuling const void __user *addr;
8891da4a027SMichael Neuling u8 vbuf[16] __aligned(16), *vdst;
8905080332cSMichael Neuling unsigned long ea, msr, msr_mask;
8915080332cSMichael Neuling bool swap;
8925080332cSMichael Neuling
893bad956b8SChristophe Leroy if (__get_user(instr, (unsigned int __user *)regs->nip))
8945080332cSMichael Neuling return;
8955080332cSMichael Neuling
8965080332cSMichael Neuling /*
8975080332cSMichael Neuling * lxvb16x opcode: 0x7c0006d8
8985080332cSMichael Neuling * lxvd2x opcode: 0x7c000698
8995080332cSMichael Neuling * lxvh8x opcode: 0x7c000658
9005080332cSMichael Neuling * lxvw4x opcode: 0x7c000618
9015080332cSMichael Neuling */
9025080332cSMichael Neuling if ((instr & 0xfc00073e) != 0x7c000618) {
9035080332cSMichael Neuling pr_devel("HMI vec emu: not vector CI %i:%s[%d] nip=%016lx"
9045080332cSMichael Neuling " instr=%08x\n",
9055080332cSMichael Neuling smp_processor_id(), current->comm, current->pid,
9065080332cSMichael Neuling regs->nip, instr);
9075080332cSMichael Neuling return;
9085080332cSMichael Neuling }
9095080332cSMichael Neuling
9105080332cSMichael Neuling /* Grab vector registers into the task struct */
9115080332cSMichael Neuling msr = regs->msr; /* Grab msr before we flush the bits */
9125080332cSMichael Neuling flush_vsx_to_thread(current);
9135080332cSMichael Neuling enable_kernel_altivec();
9145080332cSMichael Neuling
9155080332cSMichael Neuling /*
9165080332cSMichael Neuling * Is userspace running with a different endian (this is rare but
9175080332cSMichael Neuling * not impossible)
9185080332cSMichael Neuling */
9195080332cSMichael Neuling swap = (msr & MSR_LE) != (MSR_KERNEL & MSR_LE);
9205080332cSMichael Neuling
9215080332cSMichael Neuling /* Decode the instruction */
9225080332cSMichael Neuling ra = (instr >> 16) & 0x1f;
9235080332cSMichael Neuling rb = (instr >> 11) & 0x1f;
9245080332cSMichael Neuling t = (instr >> 21) & 0x1f;
9255080332cSMichael Neuling if (instr & 1)
9265080332cSMichael Neuling vdst = (u8 *)¤t->thread.vr_state.vr[t];
9275080332cSMichael Neuling else
9285080332cSMichael Neuling vdst = (u8 *)¤t->thread.fp_state.fpr[t][0];
9295080332cSMichael Neuling
9305080332cSMichael Neuling /* Grab the vector address */
9315080332cSMichael Neuling ea = regs->gpr[rb] + (ra ? regs->gpr[ra] : 0);
9325080332cSMichael Neuling if (is_32bit_task())
9335080332cSMichael Neuling ea &= 0xfffffffful;
9345080332cSMichael Neuling addr = (__force const void __user *)ea;
9355080332cSMichael Neuling
9365080332cSMichael Neuling /* Check it */
93796d4f267SLinus Torvalds if (!access_ok(addr, 16)) {
9385080332cSMichael Neuling pr_devel("HMI vec emu: bad access %i:%s[%d] nip=%016lx"
9395080332cSMichael Neuling " instr=%08x addr=%016lx\n",
9405080332cSMichael Neuling smp_processor_id(), current->comm, current->pid,
9415080332cSMichael Neuling regs->nip, instr, (unsigned long)addr);
9425080332cSMichael Neuling return;
9435080332cSMichael Neuling }
9445080332cSMichael Neuling
9455080332cSMichael Neuling /* Read the vector */
9465080332cSMichael Neuling rc = 0;
9475080332cSMichael Neuling if ((unsigned long)addr & 0xfUL)
9485080332cSMichael Neuling /* unaligned case */
9495080332cSMichael Neuling rc = __copy_from_user_inatomic(vbuf, addr, 16);
9505080332cSMichael Neuling else
9515080332cSMichael Neuling __get_user_atomic_128_aligned(vbuf, addr, rc);
9525080332cSMichael Neuling if (rc) {
9535080332cSMichael Neuling pr_devel("HMI vec emu: page fault %i:%s[%d] nip=%016lx"
9545080332cSMichael Neuling " instr=%08x addr=%016lx\n",
9555080332cSMichael Neuling smp_processor_id(), current->comm, current->pid,
9565080332cSMichael Neuling regs->nip, instr, (unsigned long)addr);
9575080332cSMichael Neuling return;
9585080332cSMichael Neuling }
9595080332cSMichael Neuling
9605080332cSMichael Neuling pr_devel("HMI vec emu: emulated vector CI %i:%s[%d] nip=%016lx"
9615080332cSMichael Neuling " instr=%08x addr=%016lx\n",
9625080332cSMichael Neuling smp_processor_id(), current->comm, current->pid, regs->nip,
9635080332cSMichael Neuling instr, (unsigned long) addr);
9645080332cSMichael Neuling
9655080332cSMichael Neuling /* Grab instruction "selector" */
9665080332cSMichael Neuling sel = (instr >> 6) & 3;
9675080332cSMichael Neuling
9685080332cSMichael Neuling /*
9695080332cSMichael Neuling * Check to make sure the facility is actually enabled. This
9705080332cSMichael Neuling * could happen if we get a false positive hit.
9715080332cSMichael Neuling *
9725080332cSMichael Neuling * lxvd2x/lxvw4x always check MSR VSX sel = 0,2
9735080332cSMichael Neuling * lxvh8x/lxvb16x check MSR VSX or VEC depending on VSR used sel = 1,3
9745080332cSMichael Neuling */
9755080332cSMichael Neuling msr_mask = MSR_VSX;
9765080332cSMichael Neuling if ((sel & 1) && (instr & 1)) /* lxvh8x & lxvb16x + VSR >= 32 */
9775080332cSMichael Neuling msr_mask = MSR_VEC;
9785080332cSMichael Neuling if (!(msr & msr_mask)) {
9795080332cSMichael Neuling pr_devel("HMI vec emu: MSR fac clear %i:%s[%d] nip=%016lx"
9805080332cSMichael Neuling " instr=%08x msr:%016lx\n",
9815080332cSMichael Neuling smp_processor_id(), current->comm, current->pid,
9825080332cSMichael Neuling regs->nip, instr, msr);
9835080332cSMichael Neuling return;
9845080332cSMichael Neuling }
9855080332cSMichael Neuling
9865080332cSMichael Neuling /* Do logging here before we modify sel based on endian */
9875080332cSMichael Neuling switch (sel) {
9885080332cSMichael Neuling case 0: /* lxvw4x */
9895080332cSMichael Neuling PPC_WARN_EMULATED(lxvw4x, regs);
9905080332cSMichael Neuling break;
9915080332cSMichael Neuling case 1: /* lxvh8x */
9925080332cSMichael Neuling PPC_WARN_EMULATED(lxvh8x, regs);
9935080332cSMichael Neuling break;
9945080332cSMichael Neuling case 2: /* lxvd2x */
9955080332cSMichael Neuling PPC_WARN_EMULATED(lxvd2x, regs);
9965080332cSMichael Neuling break;
9975080332cSMichael Neuling case 3: /* lxvb16x */
9985080332cSMichael Neuling PPC_WARN_EMULATED(lxvb16x, regs);
9995080332cSMichael Neuling break;
10005080332cSMichael Neuling }
10015080332cSMichael Neuling
10025080332cSMichael Neuling #ifdef __LITTLE_ENDIAN__
10035080332cSMichael Neuling /*
10045080332cSMichael Neuling * An LE kernel stores the vector in the task struct as an LE
10055080332cSMichael Neuling * byte array (effectively swapping both the components and
10065080332cSMichael Neuling * the content of the components). Those instructions expect
10075080332cSMichael Neuling * the components to remain in ascending address order, so we
10085080332cSMichael Neuling * swap them back.
10095080332cSMichael Neuling *
10105080332cSMichael Neuling * If we are running a BE user space, the expectation is that
10115080332cSMichael Neuling * of a simple memcpy, so forcing the emulation to look like
10125080332cSMichael Neuling * a lxvb16x should do the trick.
10135080332cSMichael Neuling */
10145080332cSMichael Neuling if (swap)
10155080332cSMichael Neuling sel = 3;
10165080332cSMichael Neuling
10175080332cSMichael Neuling switch (sel) {
10185080332cSMichael Neuling case 0: /* lxvw4x */
10195080332cSMichael Neuling for (i = 0; i < 4; i++)
10205080332cSMichael Neuling ((u32 *)vdst)[i] = ((u32 *)vbuf)[3-i];
10215080332cSMichael Neuling break;
10225080332cSMichael Neuling case 1: /* lxvh8x */
10235080332cSMichael Neuling for (i = 0; i < 8; i++)
10245080332cSMichael Neuling ((u16 *)vdst)[i] = ((u16 *)vbuf)[7-i];
10255080332cSMichael Neuling break;
10265080332cSMichael Neuling case 2: /* lxvd2x */
10275080332cSMichael Neuling for (i = 0; i < 2; i++)
10285080332cSMichael Neuling ((u64 *)vdst)[i] = ((u64 *)vbuf)[1-i];
10295080332cSMichael Neuling break;
10305080332cSMichael Neuling case 3: /* lxvb16x */
10315080332cSMichael Neuling for (i = 0; i < 16; i++)
10325080332cSMichael Neuling vdst[i] = vbuf[15-i];
10335080332cSMichael Neuling break;
10345080332cSMichael Neuling }
10355080332cSMichael Neuling #else /* __LITTLE_ENDIAN__ */
10365080332cSMichael Neuling /* On a big endian kernel, a BE userspace only needs a memcpy */
10375080332cSMichael Neuling if (!swap)
10385080332cSMichael Neuling sel = 3;
10395080332cSMichael Neuling
10405080332cSMichael Neuling /* Otherwise, we need to swap the content of the components */
10415080332cSMichael Neuling switch (sel) {
10425080332cSMichael Neuling case 0: /* lxvw4x */
10435080332cSMichael Neuling for (i = 0; i < 4; i++)
10445080332cSMichael Neuling ((u32 *)vdst)[i] = cpu_to_le32(((u32 *)vbuf)[i]);
10455080332cSMichael Neuling break;
10465080332cSMichael Neuling case 1: /* lxvh8x */
10475080332cSMichael Neuling for (i = 0; i < 8; i++)
10485080332cSMichael Neuling ((u16 *)vdst)[i] = cpu_to_le16(((u16 *)vbuf)[i]);
10495080332cSMichael Neuling break;
10505080332cSMichael Neuling case 2: /* lxvd2x */
10515080332cSMichael Neuling for (i = 0; i < 2; i++)
10525080332cSMichael Neuling ((u64 *)vdst)[i] = cpu_to_le64(((u64 *)vbuf)[i]);
10535080332cSMichael Neuling break;
10545080332cSMichael Neuling case 3: /* lxvb16x */
10555080332cSMichael Neuling memcpy(vdst, vbuf, 16);
10565080332cSMichael Neuling break;
10575080332cSMichael Neuling }
10585080332cSMichael Neuling #endif /* !__LITTLE_ENDIAN__ */
10595080332cSMichael Neuling
10605080332cSMichael Neuling /* Go to next instruction */
106159dc5bfcSNicholas Piggin regs_add_return_ip(regs, 4);
10625080332cSMichael Neuling }
10635080332cSMichael Neuling #endif /* CONFIG_VSX */
10645080332cSMichael Neuling
DEFINE_INTERRUPT_HANDLER_ASYNC(handle_hmi_exception)10653a96570fSNicholas Piggin DEFINE_INTERRUPT_HANDLER_ASYNC(handle_hmi_exception)
10660869b6fdSMahesh Salgaonkar {
10670869b6fdSMahesh Salgaonkar struct pt_regs *old_regs;
10680869b6fdSMahesh Salgaonkar
10690869b6fdSMahesh Salgaonkar old_regs = set_irq_regs(regs);
10700869b6fdSMahesh Salgaonkar
10715080332cSMichael Neuling #ifdef CONFIG_VSX
10725080332cSMichael Neuling /* Real mode flagged P9 special emu is needed */
10735080332cSMichael Neuling if (local_paca->hmi_p9_special_emu) {
10745080332cSMichael Neuling local_paca->hmi_p9_special_emu = 0;
10755080332cSMichael Neuling
10765080332cSMichael Neuling /*
10775080332cSMichael Neuling * We don't want to take page faults while doing the
10785080332cSMichael Neuling * emulation, we just replay the instruction if necessary.
10795080332cSMichael Neuling */
10805080332cSMichael Neuling pagefault_disable();
10815080332cSMichael Neuling p9_hmi_special_emu(regs);
10825080332cSMichael Neuling pagefault_enable();
10835080332cSMichael Neuling }
10845080332cSMichael Neuling #endif /* CONFIG_VSX */
10855080332cSMichael Neuling
10860869b6fdSMahesh Salgaonkar if (ppc_md.handle_hmi_exception)
10870869b6fdSMahesh Salgaonkar ppc_md.handle_hmi_exception(regs);
10880869b6fdSMahesh Salgaonkar
10890869b6fdSMahesh Salgaonkar set_irq_regs(old_regs);
10900869b6fdSMahesh Salgaonkar }
10910869b6fdSMahesh Salgaonkar
DEFINE_INTERRUPT_HANDLER(unknown_exception)10923a96570fSNicholas Piggin DEFINE_INTERRUPT_HANDLER(unknown_exception)
109314cf11afSPaul Mackerras {
109414cf11afSPaul Mackerras printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n",
109514cf11afSPaul Mackerras regs->nip, regs->msr, regs->trap);
109614cf11afSPaul Mackerras
1097e821fa42SEric W. Biederman _exception(SIGTRAP, regs, TRAP_UNK, 0);
109814cf11afSPaul Mackerras }
109914cf11afSPaul Mackerras
DEFINE_INTERRUPT_HANDLER_ASYNC(unknown_async_exception)11003a96570fSNicholas Piggin DEFINE_INTERRUPT_HANDLER_ASYNC(unknown_async_exception)
11016c6aee00SNicholas Piggin {
11026c6aee00SNicholas Piggin printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n",
11036c6aee00SNicholas Piggin regs->nip, regs->msr, regs->trap);
11046c6aee00SNicholas Piggin
11056c6aee00SNicholas Piggin _exception(SIGTRAP, regs, TRAP_UNK, 0);
11066c6aee00SNicholas Piggin }
11076c6aee00SNicholas Piggin
DEFINE_INTERRUPT_HANDLER_NMI(unknown_nmi_exception)11083db8aa10SNicholas Piggin DEFINE_INTERRUPT_HANDLER_NMI(unknown_nmi_exception)
11093db8aa10SNicholas Piggin {
11103db8aa10SNicholas Piggin printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n",
11113db8aa10SNicholas Piggin regs->nip, regs->msr, regs->trap);
11123db8aa10SNicholas Piggin
11133db8aa10SNicholas Piggin _exception(SIGTRAP, regs, TRAP_UNK, 0);
11143db8aa10SNicholas Piggin
11153db8aa10SNicholas Piggin return 0;
11163db8aa10SNicholas Piggin }
11173db8aa10SNicholas Piggin
DEFINE_INTERRUPT_HANDLER(instruction_breakpoint_exception)11183a96570fSNicholas Piggin DEFINE_INTERRUPT_HANDLER(instruction_breakpoint_exception)
111914cf11afSPaul Mackerras {
112014cf11afSPaul Mackerras if (notify_die(DIE_IABR_MATCH, "iabr_match", regs, 5,
112114cf11afSPaul Mackerras 5, SIGTRAP) == NOTIFY_STOP)
1122540d4d34SNicholas Piggin return;
112314cf11afSPaul Mackerras if (debugger_iabr_match(regs))
1124540d4d34SNicholas Piggin return;
112514cf11afSPaul Mackerras _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
112614cf11afSPaul Mackerras }
112714cf11afSPaul Mackerras
DEFINE_INTERRUPT_HANDLER(RunModeException)11283a96570fSNicholas Piggin DEFINE_INTERRUPT_HANDLER(RunModeException)
112914cf11afSPaul Mackerras {
1130e821fa42SEric W. Biederman _exception(SIGTRAP, regs, TRAP_UNK, 0);
113114cf11afSPaul Mackerras }
113214cf11afSPaul Mackerras
__single_step_exception(struct pt_regs * regs)113301fcac8eSChristophe Leroy static void __single_step_exception(struct pt_regs *regs)
113414cf11afSPaul Mackerras {
11352538c2d0SK.Prasad clear_single_step(regs);
11360e524e76SMatt Evans clear_br_trace(regs);
113714cf11afSPaul Mackerras
11386cc89badSNaveen N. Rao if (kprobe_post_handler(regs))
11396cc89badSNaveen N. Rao return;
11406cc89badSNaveen N. Rao
114114cf11afSPaul Mackerras if (notify_die(DIE_SSTEP, "single_step", regs, 5,
114214cf11afSPaul Mackerras 5, SIGTRAP) == NOTIFY_STOP)
1143540d4d34SNicholas Piggin return;
114414cf11afSPaul Mackerras if (debugger_sstep(regs))
1145540d4d34SNicholas Piggin return;
114614cf11afSPaul Mackerras
114714cf11afSPaul Mackerras _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
114814cf11afSPaul Mackerras }
114914cf11afSPaul Mackerras
DEFINE_INTERRUPT_HANDLER(single_step_exception)115001fcac8eSChristophe Leroy DEFINE_INTERRUPT_HANDLER(single_step_exception)
115101fcac8eSChristophe Leroy {
115201fcac8eSChristophe Leroy __single_step_exception(regs);
115301fcac8eSChristophe Leroy }
115401fcac8eSChristophe Leroy
115514cf11afSPaul Mackerras /*
115614cf11afSPaul Mackerras * After we have successfully emulated an instruction, we have to
115714cf11afSPaul Mackerras * check if the instruction was being single-stepped, and if so,
115814cf11afSPaul Mackerras * pretend we got a single-step exception. This was pointed out
115914cf11afSPaul Mackerras * by Kumar Gala. -- paulus
116014cf11afSPaul Mackerras */
emulate_single_step(struct pt_regs * regs)11615222a1d5SChristophe Leroy void emulate_single_step(struct pt_regs *regs)
116214cf11afSPaul Mackerras {
11632538c2d0SK.Prasad if (single_stepping(regs))
116401fcac8eSChristophe Leroy __single_step_exception(regs);
116514cf11afSPaul Mackerras }
116614cf11afSPaul Mackerras
11678d5e9875SChristophe Leroy #ifdef CONFIG_PPC_FPU_REGS
__parse_fpscr(unsigned long fpscr)11685fad293bSKumar Gala static inline int __parse_fpscr(unsigned long fpscr)
1169dc1c1ca3SStephen Rothwell {
1170aeb1c0f6SEric W. Biederman int ret = FPE_FLTUNK;
1171dc1c1ca3SStephen Rothwell
1172dc1c1ca3SStephen Rothwell /* Invalid operation */
1173dc1c1ca3SStephen Rothwell if ((fpscr & FPSCR_VE) && (fpscr & FPSCR_VX))
11745fad293bSKumar Gala ret = FPE_FLTINV;
1175dc1c1ca3SStephen Rothwell
1176dc1c1ca3SStephen Rothwell /* Overflow */
1177dc1c1ca3SStephen Rothwell else if ((fpscr & FPSCR_OE) && (fpscr & FPSCR_OX))
11785fad293bSKumar Gala ret = FPE_FLTOVF;
1179dc1c1ca3SStephen Rothwell
1180dc1c1ca3SStephen Rothwell /* Underflow */
1181dc1c1ca3SStephen Rothwell else if ((fpscr & FPSCR_UE) && (fpscr & FPSCR_UX))
11825fad293bSKumar Gala ret = FPE_FLTUND;
1183dc1c1ca3SStephen Rothwell
1184dc1c1ca3SStephen Rothwell /* Divide by zero */
1185dc1c1ca3SStephen Rothwell else if ((fpscr & FPSCR_ZE) && (fpscr & FPSCR_ZX))
11865fad293bSKumar Gala ret = FPE_FLTDIV;
1187dc1c1ca3SStephen Rothwell
1188dc1c1ca3SStephen Rothwell /* Inexact result */
1189dc1c1ca3SStephen Rothwell else if ((fpscr & FPSCR_XE) && (fpscr & FPSCR_XX))
11905fad293bSKumar Gala ret = FPE_FLTRES;
11915fad293bSKumar Gala
11925fad293bSKumar Gala return ret;
11935fad293bSKumar Gala }
11948d5e9875SChristophe Leroy #endif
11955fad293bSKumar Gala
parse_fpe(struct pt_regs * regs)11965fad293bSKumar Gala static void parse_fpe(struct pt_regs *regs)
11975fad293bSKumar Gala {
11985fad293bSKumar Gala int code = 0;
11995fad293bSKumar Gala
12005fad293bSKumar Gala flush_fp_to_thread(current);
12015fad293bSKumar Gala
1202b6254cedSChristophe Leroy #ifdef CONFIG_PPC_FPU_REGS
1203de79f7b9SPaul Mackerras code = __parse_fpscr(current->thread.fp_state.fpscr);
1204b6254cedSChristophe Leroy #endif
1205dc1c1ca3SStephen Rothwell
1206dc1c1ca3SStephen Rothwell _exception(SIGFPE, regs, code, regs->nip);
1207dc1c1ca3SStephen Rothwell }
1208dc1c1ca3SStephen Rothwell
1209dc1c1ca3SStephen Rothwell /*
1210dc1c1ca3SStephen Rothwell * Illegal instruction emulation support. Originally written to
121114cf11afSPaul Mackerras * provide the PVR to user applications using the mfspr rd, PVR.
121214cf11afSPaul Mackerras * Return non-zero if we can't emulate, or -EFAULT if the associated
121314cf11afSPaul Mackerras * memory access caused an access fault. Return zero on success.
121414cf11afSPaul Mackerras *
121514cf11afSPaul Mackerras * There are a couple of ways to do this, either "decode" the instruction
121614cf11afSPaul Mackerras * or directly match lots of bits. In this case, matching lots of
121714cf11afSPaul Mackerras * bits is faster and easier.
121886417780SPaul Mackerras *
121914cf11afSPaul Mackerras */
emulate_string_inst(struct pt_regs * regs,u32 instword)122014cf11afSPaul Mackerras static int emulate_string_inst(struct pt_regs *regs, u32 instword)
122114cf11afSPaul Mackerras {
122214cf11afSPaul Mackerras u8 rT = (instword >> 21) & 0x1f;
122314cf11afSPaul Mackerras u8 rA = (instword >> 16) & 0x1f;
122414cf11afSPaul Mackerras u8 NB_RB = (instword >> 11) & 0x1f;
122514cf11afSPaul Mackerras u32 num_bytes;
122614cf11afSPaul Mackerras unsigned long EA;
122714cf11afSPaul Mackerras int pos = 0;
122814cf11afSPaul Mackerras
122914cf11afSPaul Mackerras /* Early out if we are an invalid form of lswx */
123016c57b36SKumar Gala if ((instword & PPC_INST_STRING_MASK) == PPC_INST_LSWX)
123114cf11afSPaul Mackerras if ((rT == rA) || (rT == NB_RB))
123214cf11afSPaul Mackerras return -EINVAL;
123314cf11afSPaul Mackerras
123414cf11afSPaul Mackerras EA = (rA == 0) ? 0 : regs->gpr[rA];
123514cf11afSPaul Mackerras
123616c57b36SKumar Gala switch (instword & PPC_INST_STRING_MASK) {
123716c57b36SKumar Gala case PPC_INST_LSWX:
123816c57b36SKumar Gala case PPC_INST_STSWX:
123914cf11afSPaul Mackerras EA += NB_RB;
124014cf11afSPaul Mackerras num_bytes = regs->xer & 0x7f;
124114cf11afSPaul Mackerras break;
124216c57b36SKumar Gala case PPC_INST_LSWI:
124316c57b36SKumar Gala case PPC_INST_STSWI:
124414cf11afSPaul Mackerras num_bytes = (NB_RB == 0) ? 32 : NB_RB;
124514cf11afSPaul Mackerras break;
124614cf11afSPaul Mackerras default:
124714cf11afSPaul Mackerras return -EINVAL;
124814cf11afSPaul Mackerras }
124914cf11afSPaul Mackerras
125014cf11afSPaul Mackerras while (num_bytes != 0)
125114cf11afSPaul Mackerras {
125214cf11afSPaul Mackerras u8 val;
125314cf11afSPaul Mackerras u32 shift = 8 * (3 - (pos & 0x3));
125414cf11afSPaul Mackerras
125580aa0fb4SJames Yang /* if process is 32-bit, clear upper 32 bits of EA */
125680aa0fb4SJames Yang if ((regs->msr & MSR_64BIT) == 0)
125780aa0fb4SJames Yang EA &= 0xFFFFFFFF;
125880aa0fb4SJames Yang
125916c57b36SKumar Gala switch ((instword & PPC_INST_STRING_MASK)) {
126016c57b36SKumar Gala case PPC_INST_LSWX:
126116c57b36SKumar Gala case PPC_INST_LSWI:
126214cf11afSPaul Mackerras if (get_user(val, (u8 __user *)EA))
126314cf11afSPaul Mackerras return -EFAULT;
126414cf11afSPaul Mackerras /* first time updating this reg,
126514cf11afSPaul Mackerras * zero it out */
126614cf11afSPaul Mackerras if (pos == 0)
126714cf11afSPaul Mackerras regs->gpr[rT] = 0;
126814cf11afSPaul Mackerras regs->gpr[rT] |= val << shift;
126914cf11afSPaul Mackerras break;
127016c57b36SKumar Gala case PPC_INST_STSWI:
127116c57b36SKumar Gala case PPC_INST_STSWX:
127214cf11afSPaul Mackerras val = regs->gpr[rT] >> shift;
127314cf11afSPaul Mackerras if (put_user(val, (u8 __user *)EA))
127414cf11afSPaul Mackerras return -EFAULT;
127514cf11afSPaul Mackerras break;
127614cf11afSPaul Mackerras }
127714cf11afSPaul Mackerras /* move EA to next address */
127814cf11afSPaul Mackerras EA += 1;
127914cf11afSPaul Mackerras num_bytes--;
128014cf11afSPaul Mackerras
128114cf11afSPaul Mackerras /* manage our position within the register */
128214cf11afSPaul Mackerras if (++pos == 4) {
128314cf11afSPaul Mackerras pos = 0;
128414cf11afSPaul Mackerras if (++rT == 32)
128514cf11afSPaul Mackerras rT = 0;
128614cf11afSPaul Mackerras }
128714cf11afSPaul Mackerras }
128814cf11afSPaul Mackerras
128914cf11afSPaul Mackerras return 0;
129014cf11afSPaul Mackerras }
129114cf11afSPaul Mackerras
emulate_popcntb_inst(struct pt_regs * regs,u32 instword)1292c3412dcbSWill Schmidt static int emulate_popcntb_inst(struct pt_regs *regs, u32 instword)
1293c3412dcbSWill Schmidt {
1294c3412dcbSWill Schmidt u32 ra,rs;
1295c3412dcbSWill Schmidt unsigned long tmp;
1296c3412dcbSWill Schmidt
1297c3412dcbSWill Schmidt ra = (instword >> 16) & 0x1f;
1298c3412dcbSWill Schmidt rs = (instword >> 21) & 0x1f;
1299c3412dcbSWill Schmidt
1300c3412dcbSWill Schmidt tmp = regs->gpr[rs];
1301c3412dcbSWill Schmidt tmp = tmp - ((tmp >> 1) & 0x5555555555555555ULL);
1302c3412dcbSWill Schmidt tmp = (tmp & 0x3333333333333333ULL) + ((tmp >> 2) & 0x3333333333333333ULL);
1303c3412dcbSWill Schmidt tmp = (tmp + (tmp >> 4)) & 0x0f0f0f0f0f0f0f0fULL;
1304c3412dcbSWill Schmidt regs->gpr[ra] = tmp;
1305c3412dcbSWill Schmidt
1306c3412dcbSWill Schmidt return 0;
1307c3412dcbSWill Schmidt }
1308c3412dcbSWill Schmidt
emulate_isel(struct pt_regs * regs,u32 instword)1309c1469f13SKumar Gala static int emulate_isel(struct pt_regs *regs, u32 instword)
1310c1469f13SKumar Gala {
1311c1469f13SKumar Gala u8 rT = (instword >> 21) & 0x1f;
1312c1469f13SKumar Gala u8 rA = (instword >> 16) & 0x1f;
1313c1469f13SKumar Gala u8 rB = (instword >> 11) & 0x1f;
1314c1469f13SKumar Gala u8 BC = (instword >> 6) & 0x1f;
1315c1469f13SKumar Gala u8 bit;
1316c1469f13SKumar Gala unsigned long tmp;
1317c1469f13SKumar Gala
1318c1469f13SKumar Gala tmp = (rA == 0) ? 0 : regs->gpr[rA];
1319c1469f13SKumar Gala bit = (regs->ccr >> (31 - BC)) & 0x1;
1320c1469f13SKumar Gala
1321c1469f13SKumar Gala regs->gpr[rT] = bit ? tmp : regs->gpr[rB];
1322c1469f13SKumar Gala
1323c1469f13SKumar Gala return 0;
1324c1469f13SKumar Gala }
1325c1469f13SKumar Gala
13266ce6c629SMichael Neuling #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
tm_abort_check(struct pt_regs * regs,int cause)13276ce6c629SMichael Neuling static inline bool tm_abort_check(struct pt_regs *regs, int cause)
13286ce6c629SMichael Neuling {
13296ce6c629SMichael Neuling /* If we're emulating a load/store in an active transaction, we cannot
13306ce6c629SMichael Neuling * emulate it as the kernel operates in transaction suspended context.
13316ce6c629SMichael Neuling * We need to abort the transaction. This creates a persistent TM
13326ce6c629SMichael Neuling * abort so tell the user what caused it with a new code.
13336ce6c629SMichael Neuling */
13346ce6c629SMichael Neuling if (MSR_TM_TRANSACTIONAL(regs->msr)) {
13356ce6c629SMichael Neuling tm_enable();
13366ce6c629SMichael Neuling tm_abort(cause);
13376ce6c629SMichael Neuling return true;
13386ce6c629SMichael Neuling }
13396ce6c629SMichael Neuling return false;
13406ce6c629SMichael Neuling }
13416ce6c629SMichael Neuling #else
tm_abort_check(struct pt_regs * regs,int reason)13426ce6c629SMichael Neuling static inline bool tm_abort_check(struct pt_regs *regs, int reason)
13436ce6c629SMichael Neuling {
13446ce6c629SMichael Neuling return false;
13456ce6c629SMichael Neuling }
13466ce6c629SMichael Neuling #endif
13476ce6c629SMichael Neuling
emulate_instruction(struct pt_regs * regs)134814cf11afSPaul Mackerras static int emulate_instruction(struct pt_regs *regs)
134914cf11afSPaul Mackerras {
135014cf11afSPaul Mackerras u32 instword;
135114cf11afSPaul Mackerras u32 rd;
135214cf11afSPaul Mackerras
13534288e343SAnton Blanchard if (!user_mode(regs))
135414cf11afSPaul Mackerras return -EINVAL;
135514cf11afSPaul Mackerras
135614cf11afSPaul Mackerras if (get_user(instword, (u32 __user *)(regs->nip)))
135714cf11afSPaul Mackerras return -EFAULT;
135814cf11afSPaul Mackerras
135914cf11afSPaul Mackerras /* Emulate the mfspr rD, PVR. */
136016c57b36SKumar Gala if ((instword & PPC_INST_MFSPR_PVR_MASK) == PPC_INST_MFSPR_PVR) {
1361eecff81dSAnton Blanchard PPC_WARN_EMULATED(mfpvr, regs);
136214cf11afSPaul Mackerras rd = (instword >> 21) & 0x1f;
136314cf11afSPaul Mackerras regs->gpr[rd] = mfspr(SPRN_PVR);
136414cf11afSPaul Mackerras return 0;
136514cf11afSPaul Mackerras }
136614cf11afSPaul Mackerras
136714cf11afSPaul Mackerras /* Emulating the dcba insn is just a no-op. */
136880947e7cSGeert Uytterhoeven if ((instword & PPC_INST_DCBA_MASK) == PPC_INST_DCBA) {
1369eecff81dSAnton Blanchard PPC_WARN_EMULATED(dcba, regs);
137014cf11afSPaul Mackerras return 0;
137180947e7cSGeert Uytterhoeven }
137214cf11afSPaul Mackerras
137314cf11afSPaul Mackerras /* Emulate the mcrxr insn. */
137416c57b36SKumar Gala if ((instword & PPC_INST_MCRXR_MASK) == PPC_INST_MCRXR) {
137586417780SPaul Mackerras int shift = (instword >> 21) & 0x1c;
137614cf11afSPaul Mackerras unsigned long msk = 0xf0000000UL >> shift;
137714cf11afSPaul Mackerras
1378eecff81dSAnton Blanchard PPC_WARN_EMULATED(mcrxr, regs);
137914cf11afSPaul Mackerras regs->ccr = (regs->ccr & ~msk) | ((regs->xer >> shift) & msk);
138014cf11afSPaul Mackerras regs->xer &= ~0xf0000000UL;
138114cf11afSPaul Mackerras return 0;
138214cf11afSPaul Mackerras }
138314cf11afSPaul Mackerras
138414cf11afSPaul Mackerras /* Emulate load/store string insn. */
138580947e7cSGeert Uytterhoeven if ((instword & PPC_INST_STRING_GEN_MASK) == PPC_INST_STRING) {
13866ce6c629SMichael Neuling if (tm_abort_check(regs,
13876ce6c629SMichael Neuling TM_CAUSE_EMULATE | TM_CAUSE_PERSISTENT))
13886ce6c629SMichael Neuling return -EINVAL;
1389eecff81dSAnton Blanchard PPC_WARN_EMULATED(string, regs);
139014cf11afSPaul Mackerras return emulate_string_inst(regs, instword);
139180947e7cSGeert Uytterhoeven }
139214cf11afSPaul Mackerras
1393c3412dcbSWill Schmidt /* Emulate the popcntb (Population Count Bytes) instruction. */
139416c57b36SKumar Gala if ((instword & PPC_INST_POPCNTB_MASK) == PPC_INST_POPCNTB) {
1395eecff81dSAnton Blanchard PPC_WARN_EMULATED(popcntb, regs);
1396c3412dcbSWill Schmidt return emulate_popcntb_inst(regs, instword);
1397c3412dcbSWill Schmidt }
1398c3412dcbSWill Schmidt
1399c1469f13SKumar Gala /* Emulate isel (Integer Select) instruction */
140016c57b36SKumar Gala if ((instword & PPC_INST_ISEL_MASK) == PPC_INST_ISEL) {
1401eecff81dSAnton Blanchard PPC_WARN_EMULATED(isel, regs);
1402c1469f13SKumar Gala return emulate_isel(regs, instword);
1403c1469f13SKumar Gala }
1404c1469f13SKumar Gala
14059863c28aSJames Yang /* Emulate sync instruction variants */
14069863c28aSJames Yang if ((instword & PPC_INST_SYNC_MASK) == PPC_INST_SYNC) {
14079863c28aSJames Yang PPC_WARN_EMULATED(sync, regs);
14089863c28aSJames Yang asm volatile("sync");
14099863c28aSJames Yang return 0;
14109863c28aSJames Yang }
14119863c28aSJames Yang
1412efcac658SAlexey Kardashevskiy #ifdef CONFIG_PPC64
1413efcac658SAlexey Kardashevskiy /* Emulate the mfspr rD, DSCR. */
141473d2fb75SAnton Blanchard if ((((instword & PPC_INST_MFSPR_DSCR_USER_MASK) ==
141573d2fb75SAnton Blanchard PPC_INST_MFSPR_DSCR_USER) ||
141673d2fb75SAnton Blanchard ((instword & PPC_INST_MFSPR_DSCR_MASK) ==
141773d2fb75SAnton Blanchard PPC_INST_MFSPR_DSCR)) &&
1418efcac658SAlexey Kardashevskiy cpu_has_feature(CPU_FTR_DSCR)) {
1419efcac658SAlexey Kardashevskiy PPC_WARN_EMULATED(mfdscr, regs);
1420efcac658SAlexey Kardashevskiy rd = (instword >> 21) & 0x1f;
1421efcac658SAlexey Kardashevskiy regs->gpr[rd] = mfspr(SPRN_DSCR);
1422efcac658SAlexey Kardashevskiy return 0;
1423efcac658SAlexey Kardashevskiy }
1424efcac658SAlexey Kardashevskiy /* Emulate the mtspr DSCR, rD. */
142573d2fb75SAnton Blanchard if ((((instword & PPC_INST_MTSPR_DSCR_USER_MASK) ==
142673d2fb75SAnton Blanchard PPC_INST_MTSPR_DSCR_USER) ||
142773d2fb75SAnton Blanchard ((instword & PPC_INST_MTSPR_DSCR_MASK) ==
142873d2fb75SAnton Blanchard PPC_INST_MTSPR_DSCR)) &&
1429efcac658SAlexey Kardashevskiy cpu_has_feature(CPU_FTR_DSCR)) {
1430efcac658SAlexey Kardashevskiy PPC_WARN_EMULATED(mtdscr, regs);
1431efcac658SAlexey Kardashevskiy rd = (instword >> 21) & 0x1f;
143200ca0de0SAnton Blanchard current->thread.dscr = regs->gpr[rd];
1433efcac658SAlexey Kardashevskiy current->thread.dscr_inherit = 1;
143400ca0de0SAnton Blanchard mtspr(SPRN_DSCR, current->thread.dscr);
1435efcac658SAlexey Kardashevskiy return 0;
1436efcac658SAlexey Kardashevskiy }
1437efcac658SAlexey Kardashevskiy #endif
1438efcac658SAlexey Kardashevskiy
143914cf11afSPaul Mackerras return -EINVAL;
144014cf11afSPaul Mackerras }
144114cf11afSPaul Mackerras
1442*641c67d5SMichael Ellerman #ifdef CONFIG_GENERIC_BUG
is_valid_bugaddr(unsigned long addr)144373c9ceabSJeremy Fitzhardinge int is_valid_bugaddr(unsigned long addr)
144414cf11afSPaul Mackerras {
144573c9ceabSJeremy Fitzhardinge return is_kernel_addr(addr);
144614cf11afSPaul Mackerras }
1447*641c67d5SMichael Ellerman #endif
144814cf11afSPaul Mackerras
14493a3b5aa6SKevin Hao #ifdef CONFIG_MATH_EMULATION
emulate_math(struct pt_regs * regs)14503a3b5aa6SKevin Hao static int emulate_math(struct pt_regs *regs)
14513a3b5aa6SKevin Hao {
14523a3b5aa6SKevin Hao int ret;
14533a3b5aa6SKevin Hao
14543a3b5aa6SKevin Hao ret = do_mathemu(regs);
14553a3b5aa6SKevin Hao if (ret >= 0)
14563a3b5aa6SKevin Hao PPC_WARN_EMULATED(math, regs);
14573a3b5aa6SKevin Hao
14583a3b5aa6SKevin Hao switch (ret) {
14593a3b5aa6SKevin Hao case 0:
14603a3b5aa6SKevin Hao emulate_single_step(regs);
14613a3b5aa6SKevin Hao return 0;
14623a3b5aa6SKevin Hao case 1: {
14633a3b5aa6SKevin Hao int code = 0;
1464de79f7b9SPaul Mackerras code = __parse_fpscr(current->thread.fp_state.fpscr);
14653a3b5aa6SKevin Hao _exception(SIGFPE, regs, code, regs->nip);
14663a3b5aa6SKevin Hao return 0;
14673a3b5aa6SKevin Hao }
14683a3b5aa6SKevin Hao case -EFAULT:
14693a3b5aa6SKevin Hao _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
14703a3b5aa6SKevin Hao return 0;
14713a3b5aa6SKevin Hao }
14723a3b5aa6SKevin Hao
14733a3b5aa6SKevin Hao return -1;
14743a3b5aa6SKevin Hao }
14753a3b5aa6SKevin Hao #else
emulate_math(struct pt_regs * regs)14763a3b5aa6SKevin Hao static inline int emulate_math(struct pt_regs *regs) { return -1; }
14773a3b5aa6SKevin Hao #endif
14783a3b5aa6SKevin Hao
do_program_check(struct pt_regs * regs)1479fd3f1e0fSNicholas Piggin static void do_program_check(struct pt_regs *regs)
148014cf11afSPaul Mackerras {
148114cf11afSPaul Mackerras unsigned int reason = get_reason(regs);
148214cf11afSPaul Mackerras
1483aa42c69cSKim Phillips /* We can now get here via a FP Unavailable exception if the core
148404903a30SKumar Gala * has no FPU, in that case the reason flags will be 0 */
148514cf11afSPaul Mackerras
148614cf11afSPaul Mackerras if (reason & REASON_FP) {
148714cf11afSPaul Mackerras /* IEEE FP exception */
1488dc1c1ca3SStephen Rothwell parse_fpe(regs);
1489fd3f1e0fSNicholas Piggin return;
14908dad3f92SPaul Mackerras }
14918dad3f92SPaul Mackerras if (reason & REASON_TRAP) {
1492a4c3f909SBalbir Singh unsigned long bugaddr;
1493ba797b28SJason Wessel /* Debugger is first in line to stop recursive faults in
1494ba797b28SJason Wessel * rcu_lock, notify_die, or atomic_notifier_call_chain */
1495ba797b28SJason Wessel if (debugger_bpt(regs))
1496fd3f1e0fSNicholas Piggin return;
1497ba797b28SJason Wessel
14986cc89badSNaveen N. Rao if (kprobe_handler(regs))
1499fd3f1e0fSNicholas Piggin return;
15006cc89badSNaveen N. Rao
150114cf11afSPaul Mackerras /* trap exception */
1502dc1c1ca3SStephen Rothwell if (notify_die(DIE_BPT, "breakpoint", regs, 5, 5, SIGTRAP)
1503dc1c1ca3SStephen Rothwell == NOTIFY_STOP)
1504fd3f1e0fSNicholas Piggin return;
150573c9ceabSJeremy Fitzhardinge
1506a4c3f909SBalbir Singh bugaddr = regs->nip;
1507a4c3f909SBalbir Singh /*
1508a4c3f909SBalbir Singh * Fixup bugaddr for BUG_ON() in real mode
1509a4c3f909SBalbir Singh */
1510a4c3f909SBalbir Singh if (!is_kernel_addr(bugaddr) && !(regs->msr & MSR_IR))
1511a4c3f909SBalbir Singh bugaddr += PAGE_OFFSET;
1512a4c3f909SBalbir Singh
151373c9ceabSJeremy Fitzhardinge if (!(regs->msr & MSR_PR) && /* not user-mode */
1514a4c3f909SBalbir Singh report_bug(bugaddr, regs) == BUG_TRAP_TYPE_WARN) {
1515b49e578bSChristophe Leroy regs_add_return_ip(regs, 4);
1516fd3f1e0fSNicholas Piggin return;
151714cf11afSPaul Mackerras }
15185bcba4e6SBenjamin Gray
1519c3f43096SBenjamin Gray /* User mode considers other cases after enabling IRQs */
1520c3f43096SBenjamin Gray if (!user_mode(regs)) {
15218dad3f92SPaul Mackerras _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
1522fd3f1e0fSNicholas Piggin return;
15238dad3f92SPaul Mackerras }
1524c3f43096SBenjamin Gray }
1525bc2a9408SMichael Neuling #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1526bc2a9408SMichael Neuling if (reason & REASON_TM) {
1527bc2a9408SMichael Neuling /* This is a TM "Bad Thing Exception" program check.
1528bc2a9408SMichael Neuling * This occurs when:
1529bc2a9408SMichael Neuling * - An rfid/hrfid/mtmsrd attempts to cause an illegal
1530bc2a9408SMichael Neuling * transition in TM states.
1531bc2a9408SMichael Neuling * - A trechkpt is attempted when transactional.
1532bc2a9408SMichael Neuling * - A treclaim is attempted when non transactional.
1533bc2a9408SMichael Neuling * - A tend is illegally attempted.
1534bc2a9408SMichael Neuling * - writing a TM SPR when transactional.
1535632f0574SMichael Ellerman *
1536632f0574SMichael Ellerman * If usermode caused this, it's done something illegal and
1537bc2a9408SMichael Neuling * gets a SIGILL slap on the wrist. We call it an illegal
1538bc2a9408SMichael Neuling * operand to distinguish from the instruction just being bad
1539bc2a9408SMichael Neuling * (e.g. executing a 'tend' on a CPU without TM!); it's an
1540bc2a9408SMichael Neuling * illegal /placement/ of a valid instruction.
1541bc2a9408SMichael Neuling */
1542bc2a9408SMichael Neuling if (user_mode(regs)) {
1543bc2a9408SMichael Neuling _exception(SIGILL, regs, ILL_ILLOPN, regs->nip);
1544fd3f1e0fSNicholas Piggin return;
1545bc2a9408SMichael Neuling } else {
1546bc2a9408SMichael Neuling printk(KERN_EMERG "Unexpected TM Bad Thing exception "
154711be3958SBreno Leitao "at %lx (msr 0x%lx) tm_scratch=%llx\n",
154811be3958SBreno Leitao regs->nip, regs->msr, get_paca()->tm_scratch);
1549bc2a9408SMichael Neuling die("Unrecoverable exception", regs, SIGABRT);
1550bc2a9408SMichael Neuling }
1551bc2a9408SMichael Neuling }
1552bc2a9408SMichael Neuling #endif
15538dad3f92SPaul Mackerras
1554b3f6a459SMichael Ellerman /*
1555b3f6a459SMichael Ellerman * If we took the program check in the kernel skip down to sending a
1556c3f43096SBenjamin Gray * SIGILL. The subsequent cases all relate to user space, such as
1557c3f43096SBenjamin Gray * emulating instructions which we should only do for user space. We
1558c3f43096SBenjamin Gray * also do not want to enable interrupts for kernel faults because that
1559c3f43096SBenjamin Gray * might lead to further faults, and loose the context of the original
1560c3f43096SBenjamin Gray * exception.
1561b3f6a459SMichael Ellerman */
1562b3f6a459SMichael Ellerman if (!user_mode(regs))
1563b3f6a459SMichael Ellerman goto sigill;
1564b3f6a459SMichael Ellerman
1565e6f8a6c8SNicholas Piggin interrupt_cond_local_irq_enable(regs);
1566cd8a5673SPaul Mackerras
1567c3f43096SBenjamin Gray /*
1568c3f43096SBenjamin Gray * (reason & REASON_TRAP) is mostly handled before enabling IRQs,
1569c3f43096SBenjamin Gray * except get_user_instr() can sleep so we cannot reliably inspect the
1570c3f43096SBenjamin Gray * current instruction in that context. Now that we know we are
1571c3f43096SBenjamin Gray * handling a user space trap and can sleep, we can check if the trap
1572c3f43096SBenjamin Gray * was a hashchk failure.
1573c3f43096SBenjamin Gray */
1574c3f43096SBenjamin Gray if (reason & REASON_TRAP) {
1575c3f43096SBenjamin Gray if (cpu_has_feature(CPU_FTR_DEXCR_NPHIE)) {
1576c3f43096SBenjamin Gray ppc_inst_t insn;
1577c3f43096SBenjamin Gray
1578c3f43096SBenjamin Gray if (get_user_instr(insn, (void __user *)regs->nip)) {
1579c3f43096SBenjamin Gray _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
1580c3f43096SBenjamin Gray return;
1581c3f43096SBenjamin Gray }
1582c3f43096SBenjamin Gray
1583c3f43096SBenjamin Gray if (ppc_inst_primary_opcode(insn) == 31 &&
1584c3f43096SBenjamin Gray get_xop(ppc_inst_val(insn)) == OP_31_XOP_HASHCHK) {
1585c3f43096SBenjamin Gray _exception(SIGILL, regs, ILL_ILLOPN, regs->nip);
1586c3f43096SBenjamin Gray return;
1587c3f43096SBenjamin Gray }
1588c3f43096SBenjamin Gray }
1589c3f43096SBenjamin Gray
1590c3f43096SBenjamin Gray _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
1591c3f43096SBenjamin Gray return;
1592c3f43096SBenjamin Gray }
1593c3f43096SBenjamin Gray
159404903a30SKumar Gala /* (reason & REASON_ILLEGAL) would be the obvious thing here,
159504903a30SKumar Gala * but there seems to be a hardware bug on the 405GP (RevD)
159604903a30SKumar Gala * that means ESR is sometimes set incorrectly - either to
159704903a30SKumar Gala * ESR_DST (!?) or 0. In the process of chasing this with the
159804903a30SKumar Gala * hardware people - not sure if it can happen on any illegal
159904903a30SKumar Gala * instruction or only on FP instructions, whether there is a
16004e63f8edSBenjamin Herrenschmidt * pattern to occurrences etc. -dgibson 31/Mar/2003
16014e63f8edSBenjamin Herrenschmidt */
16023a3b5aa6SKevin Hao if (!emulate_math(regs))
1603fd3f1e0fSNicholas Piggin return;
160404903a30SKumar Gala
16058dad3f92SPaul Mackerras /* Try to emulate it if we should. */
16068dad3f92SPaul Mackerras if (reason & (REASON_ILLEGAL | REASON_PRIVILEGED)) {
160714cf11afSPaul Mackerras switch (emulate_instruction(regs)) {
160814cf11afSPaul Mackerras case 0:
160959dc5bfcSNicholas Piggin regs_add_return_ip(regs, 4);
161014cf11afSPaul Mackerras emulate_single_step(regs);
1611fd3f1e0fSNicholas Piggin return;
161214cf11afSPaul Mackerras case -EFAULT:
161314cf11afSPaul Mackerras _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
1614fd3f1e0fSNicholas Piggin return;
16158dad3f92SPaul Mackerras }
16168dad3f92SPaul Mackerras }
16178dad3f92SPaul Mackerras
1618b3f6a459SMichael Ellerman sigill:
161914cf11afSPaul Mackerras if (reason & REASON_PRIVILEGED)
162014cf11afSPaul Mackerras _exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
162114cf11afSPaul Mackerras else
162214cf11afSPaul Mackerras _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1623ba12eedeSLi Zhong
1624fd3f1e0fSNicholas Piggin }
1625fd3f1e0fSNicholas Piggin
DEFINE_INTERRUPT_HANDLER(program_check_exception)16263a96570fSNicholas Piggin DEFINE_INTERRUPT_HANDLER(program_check_exception)
1627fd3f1e0fSNicholas Piggin {
1628fd3f1e0fSNicholas Piggin do_program_check(regs);
162914cf11afSPaul Mackerras }
163014cf11afSPaul Mackerras
1631bf593907SPaul Mackerras /*
1632bf593907SPaul Mackerras * This occurs when running in hypervisor mode on POWER6 or later
1633bf593907SPaul Mackerras * and an illegal instruction is encountered.
1634bf593907SPaul Mackerras */
DEFINE_INTERRUPT_HANDLER(emulation_assist_interrupt)16353a96570fSNicholas Piggin DEFINE_INTERRUPT_HANDLER(emulation_assist_interrupt)
1636bf593907SPaul Mackerras {
163759dc5bfcSNicholas Piggin regs_set_return_msr(regs, regs->msr | REASON_ILLEGAL);
1638fd3f1e0fSNicholas Piggin do_program_check(regs);
1639bf593907SPaul Mackerras }
1640bf593907SPaul Mackerras
DEFINE_INTERRUPT_HANDLER(alignment_exception)16413a96570fSNicholas Piggin DEFINE_INTERRUPT_HANDLER(alignment_exception)
164214cf11afSPaul Mackerras {
16434393c4f6SBenjamin Herrenschmidt int sig, code, fixed = 0;
16449409d2f9SJordan Niethe unsigned long reason;
164514cf11afSPaul Mackerras
1646e6f8a6c8SNicholas Piggin interrupt_cond_local_irq_enable(regs);
1647a3512b2dSBenjamin Herrenschmidt
16489409d2f9SJordan Niethe reason = get_reason(regs);
16499409d2f9SJordan Niethe if (reason & REASON_BOUNDARY) {
16509409d2f9SJordan Niethe sig = SIGBUS;
16519409d2f9SJordan Niethe code = BUS_ADRALN;
16529409d2f9SJordan Niethe goto bad;
16539409d2f9SJordan Niethe }
16549409d2f9SJordan Niethe
16556ce6c629SMichael Neuling if (tm_abort_check(regs, TM_CAUSE_ALIGNMENT | TM_CAUSE_PERSISTENT))
1656540d4d34SNicholas Piggin return;
16576ce6c629SMichael Neuling
1658e9370ae1SPaul Mackerras /* we don't implement logging of alignment exceptions */
1659e9370ae1SPaul Mackerras if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS))
166014cf11afSPaul Mackerras fixed = fix_alignment(regs);
166114cf11afSPaul Mackerras
166214cf11afSPaul Mackerras if (fixed == 1) {
16639409d2f9SJordan Niethe /* skip over emulated instruction */
166459dc5bfcSNicholas Piggin regs_add_return_ip(regs, inst_length(reason));
166514cf11afSPaul Mackerras emulate_single_step(regs);
1666540d4d34SNicholas Piggin return;
166714cf11afSPaul Mackerras }
166814cf11afSPaul Mackerras
166914cf11afSPaul Mackerras /* Operand address was bad */
167014cf11afSPaul Mackerras if (fixed == -EFAULT) {
16714393c4f6SBenjamin Herrenschmidt sig = SIGSEGV;
16724393c4f6SBenjamin Herrenschmidt code = SEGV_ACCERR;
16734393c4f6SBenjamin Herrenschmidt } else {
16744393c4f6SBenjamin Herrenschmidt sig = SIGBUS;
16754393c4f6SBenjamin Herrenschmidt code = BUS_ADRALN;
167614cf11afSPaul Mackerras }
16779409d2f9SJordan Niethe bad:
16784393c4f6SBenjamin Herrenschmidt if (user_mode(regs))
16794393c4f6SBenjamin Herrenschmidt _exception(sig, regs, code, regs->dar);
16804393c4f6SBenjamin Herrenschmidt else
16818458c628SNicholas Piggin bad_page_fault(regs, sig);
168214cf11afSPaul Mackerras }
168314cf11afSPaul Mackerras
DEFINE_INTERRUPT_HANDLER(stack_overflow_exception)16843a96570fSNicholas Piggin DEFINE_INTERRUPT_HANDLER(stack_overflow_exception)
16853978eb78SChristophe Leroy {
16863978eb78SChristophe Leroy die("Kernel stack overflow", regs, SIGSEGV);
16873978eb78SChristophe Leroy }
16883978eb78SChristophe Leroy
DEFINE_INTERRUPT_HANDLER(kernel_fp_unavailable_exception)16893a96570fSNicholas Piggin DEFINE_INTERRUPT_HANDLER(kernel_fp_unavailable_exception)
1690dc1c1ca3SStephen Rothwell {
1691dc1c1ca3SStephen Rothwell printk(KERN_EMERG "Unrecoverable FP Unavailable Exception "
1692dc1c1ca3SStephen Rothwell "%lx at %lx\n", regs->trap, regs->nip);
1693dc1c1ca3SStephen Rothwell die("Unrecoverable FP Unavailable Exception", regs, SIGABRT);
1694dc1c1ca3SStephen Rothwell }
1695dc1c1ca3SStephen Rothwell
DEFINE_INTERRUPT_HANDLER(altivec_unavailable_exception)16963a96570fSNicholas Piggin DEFINE_INTERRUPT_HANDLER(altivec_unavailable_exception)
1697dc1c1ca3SStephen Rothwell {
1698dc1c1ca3SStephen Rothwell if (user_mode(regs)) {
1699dc1c1ca3SStephen Rothwell /* A user program has executed an altivec instruction,
1700dc1c1ca3SStephen Rothwell but this kernel doesn't support altivec. */
1701dc1c1ca3SStephen Rothwell _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1702540d4d34SNicholas Piggin return;
1703dc1c1ca3SStephen Rothwell }
17046c4841c2SAnton Blanchard
1705dc1c1ca3SStephen Rothwell printk(KERN_EMERG "Unrecoverable VMX/Altivec Unavailable Exception "
1706dc1c1ca3SStephen Rothwell "%lx at %lx\n", regs->trap, regs->nip);
1707dc1c1ca3SStephen Rothwell die("Unrecoverable VMX/Altivec Unavailable Exception", regs, SIGABRT);
1708dc1c1ca3SStephen Rothwell }
1709dc1c1ca3SStephen Rothwell
DEFINE_INTERRUPT_HANDLER(vsx_unavailable_exception)17103a96570fSNicholas Piggin DEFINE_INTERRUPT_HANDLER(vsx_unavailable_exception)
1711ce48b210SMichael Neuling {
1712ce48b210SMichael Neuling if (user_mode(regs)) {
1713ce48b210SMichael Neuling /* A user program has executed an vsx instruction,
1714ce48b210SMichael Neuling but this kernel doesn't support vsx. */
1715ce48b210SMichael Neuling _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1716ce48b210SMichael Neuling return;
1717ce48b210SMichael Neuling }
1718ce48b210SMichael Neuling
1719ce48b210SMichael Neuling printk(KERN_EMERG "Unrecoverable VSX Unavailable Exception "
1720ce48b210SMichael Neuling "%lx at %lx\n", regs->trap, regs->nip);
1721ce48b210SMichael Neuling die("Unrecoverable VSX Unavailable Exception", regs, SIGABRT);
1722ce48b210SMichael Neuling }
1723ce48b210SMichael Neuling
1724fcdb758cSRashmica Gupta #ifdef CONFIG_PPC_BOOK3S_64
tm_unavailable(struct pt_regs * regs)1725172f7aaaSCyril Bur static void tm_unavailable(struct pt_regs *regs)
1726172f7aaaSCyril Bur {
17275d176f75SCyril Bur #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
17285d176f75SCyril Bur if (user_mode(regs)) {
17295d176f75SCyril Bur current->thread.load_tm++;
173059dc5bfcSNicholas Piggin regs_set_return_msr(regs, regs->msr | MSR_TM);
17315d176f75SCyril Bur tm_enable();
17325d176f75SCyril Bur tm_restore_sprs(¤t->thread);
17335d176f75SCyril Bur return;
17345d176f75SCyril Bur }
17355d176f75SCyril Bur #endif
1736172f7aaaSCyril Bur pr_emerg("Unrecoverable TM Unavailable Exception "
1737172f7aaaSCyril Bur "%lx at %lx\n", regs->trap, regs->nip);
1738172f7aaaSCyril Bur die("Unrecoverable TM Unavailable Exception", regs, SIGABRT);
1739172f7aaaSCyril Bur }
1740172f7aaaSCyril Bur
DEFINE_INTERRUPT_HANDLER(facility_unavailable_exception)17413a96570fSNicholas Piggin DEFINE_INTERRUPT_HANDLER(facility_unavailable_exception)
1742d0c0c9a1SMichael Neuling {
1743021424a1SMichael Ellerman static char *facility_strings[] = {
17442517617eSMichael Neuling [FSCR_FP_LG] = "FPU",
17452517617eSMichael Neuling [FSCR_VECVSX_LG] = "VMX/VSX",
17462517617eSMichael Neuling [FSCR_DSCR_LG] = "DSCR",
17472517617eSMichael Neuling [FSCR_PM_LG] = "PMU SPRs",
17482517617eSMichael Neuling [FSCR_BHRB_LG] = "BHRB",
17492517617eSMichael Neuling [FSCR_TM_LG] = "TM",
17502517617eSMichael Neuling [FSCR_EBB_LG] = "EBB",
17512517617eSMichael Neuling [FSCR_TAR_LG] = "TAR",
1752794464f4SNicholas Piggin [FSCR_MSGP_LG] = "MSGP",
17539b7ff0c6SNicholas Piggin [FSCR_SCV_LG] = "SCV",
17542aa6195eSAlistair Popple [FSCR_PREFIX_LG] = "PREFIX",
1755021424a1SMichael Ellerman };
17562517617eSMichael Neuling char *facility = "unknown";
1757021424a1SMichael Ellerman u64 value;
1758c952c1c4SAnshuman Khandual u32 instword, rd;
17592517617eSMichael Neuling u8 status;
17602517617eSMichael Neuling bool hv;
1761021424a1SMichael Ellerman
17627153d4bfSXiongwei Song hv = (TRAP(regs) == INTERRUPT_H_FAC_UNAVAIL);
17632517617eSMichael Neuling if (hv)
1764b14b6260SMichael Ellerman value = mfspr(SPRN_HFSCR);
17652517617eSMichael Neuling else
17662517617eSMichael Neuling value = mfspr(SPRN_FSCR);
17672517617eSMichael Neuling
17682517617eSMichael Neuling status = value >> 56;
1769709b973cSAnshuman Khandual if ((hv || status >= 2) &&
1770709b973cSAnshuman Khandual (status < ARRAY_SIZE(facility_strings)) &&
1771709b973cSAnshuman Khandual facility_strings[status])
1772709b973cSAnshuman Khandual facility = facility_strings[status];
1773709b973cSAnshuman Khandual
1774709b973cSAnshuman Khandual /* We should not have taken this interrupt in kernel */
1775709b973cSAnshuman Khandual if (!user_mode(regs)) {
1776709b973cSAnshuman Khandual pr_emerg("Facility '%s' unavailable (%d) exception in kernel mode at %lx\n",
1777709b973cSAnshuman Khandual facility, status, regs->nip);
1778709b973cSAnshuman Khandual die("Unexpected facility unavailable exception", regs, SIGABRT);
1779709b973cSAnshuman Khandual }
1780709b973cSAnshuman Khandual
1781e6f8a6c8SNicholas Piggin interrupt_cond_local_irq_enable(regs);
1782709b973cSAnshuman Khandual
17832517617eSMichael Neuling if (status == FSCR_DSCR_LG) {
1784c952c1c4SAnshuman Khandual /*
1785c952c1c4SAnshuman Khandual * User is accessing the DSCR register using the problem
1786c952c1c4SAnshuman Khandual * state only SPR number (0x03) either through a mfspr or
1787c952c1c4SAnshuman Khandual * a mtspr instruction. If it is a write attempt through
1788c952c1c4SAnshuman Khandual * a mtspr, then we set the inherit bit. This also allows
1789c952c1c4SAnshuman Khandual * the user to write or read the register directly in the
1790c952c1c4SAnshuman Khandual * future by setting via the FSCR DSCR bit. But in case it
1791c952c1c4SAnshuman Khandual * is a read DSCR attempt through a mfspr instruction, we
1792c952c1c4SAnshuman Khandual * just emulate the instruction instead. This code path will
1793c952c1c4SAnshuman Khandual * always emulate all the mfspr instructions till the user
1794c952c1c4SAnshuman Khandual * has attempted at least one mtspr instruction. This way it
1795c952c1c4SAnshuman Khandual * preserves the same behaviour when the user is accessing
1796c952c1c4SAnshuman Khandual * the DSCR through privilege level only SPR number (0x11)
1797c952c1c4SAnshuman Khandual * which is emulated through illegal instruction exception.
1798c952c1c4SAnshuman Khandual * We always leave HFSCR DSCR set.
17992517617eSMichael Neuling */
1800c952c1c4SAnshuman Khandual if (get_user(instword, (u32 __user *)(regs->nip))) {
1801c952c1c4SAnshuman Khandual pr_err("Failed to fetch the user instruction\n");
1802c952c1c4SAnshuman Khandual return;
1803c952c1c4SAnshuman Khandual }
1804c952c1c4SAnshuman Khandual
1805c952c1c4SAnshuman Khandual /* Write into DSCR (mtspr 0x03, RS) */
1806c952c1c4SAnshuman Khandual if ((instword & PPC_INST_MTSPR_DSCR_USER_MASK)
1807c952c1c4SAnshuman Khandual == PPC_INST_MTSPR_DSCR_USER) {
1808c952c1c4SAnshuman Khandual rd = (instword >> 21) & 0x1f;
1809c952c1c4SAnshuman Khandual current->thread.dscr = regs->gpr[rd];
18102517617eSMichael Neuling current->thread.dscr_inherit = 1;
1811b57bd2deSMichael Neuling current->thread.fscr |= FSCR_DSCR;
1812b57bd2deSMichael Neuling mtspr(SPRN_FSCR, current->thread.fscr);
1813c952c1c4SAnshuman Khandual }
1814c952c1c4SAnshuman Khandual
1815c952c1c4SAnshuman Khandual /* Read from DSCR (mfspr RT, 0x03) */
1816c952c1c4SAnshuman Khandual if ((instword & PPC_INST_MFSPR_DSCR_USER_MASK)
1817c952c1c4SAnshuman Khandual == PPC_INST_MFSPR_DSCR_USER) {
1818c952c1c4SAnshuman Khandual if (emulate_instruction(regs)) {
1819c952c1c4SAnshuman Khandual pr_err("DSCR based mfspr emulation failed\n");
1820c952c1c4SAnshuman Khandual return;
1821c952c1c4SAnshuman Khandual }
182259dc5bfcSNicholas Piggin regs_add_return_ip(regs, 4);
1823c952c1c4SAnshuman Khandual emulate_single_step(regs);
1824c952c1c4SAnshuman Khandual }
18252517617eSMichael Neuling return;
1826b14b6260SMichael Ellerman }
1827b14b6260SMichael Ellerman
1828172f7aaaSCyril Bur if (status == FSCR_TM_LG) {
1829172f7aaaSCyril Bur /*
1830172f7aaaSCyril Bur * If we're here then the hardware is TM aware because it
1831172f7aaaSCyril Bur * generated an exception with FSRM_TM set.
1832172f7aaaSCyril Bur *
1833172f7aaaSCyril Bur * If cpu_has_feature(CPU_FTR_TM) is false, then either firmware
1834172f7aaaSCyril Bur * told us not to do TM, or the kernel is not built with TM
1835172f7aaaSCyril Bur * support.
1836172f7aaaSCyril Bur *
1837172f7aaaSCyril Bur * If both of those things are true, then userspace can spam the
1838172f7aaaSCyril Bur * console by triggering the printk() below just by continually
1839172f7aaaSCyril Bur * doing tbegin (or any TM instruction). So in that case just
1840172f7aaaSCyril Bur * send the process a SIGILL immediately.
1841172f7aaaSCyril Bur */
1842172f7aaaSCyril Bur if (!cpu_has_feature(CPU_FTR_TM))
1843172f7aaaSCyril Bur goto out;
1844172f7aaaSCyril Bur
1845172f7aaaSCyril Bur tm_unavailable(regs);
1846172f7aaaSCyril Bur return;
1847172f7aaaSCyril Bur }
1848172f7aaaSCyril Bur
184993c2ec0fSBalbir Singh pr_err_ratelimited("%sFacility '%s' unavailable (%d), exception at 0x%lx, MSR=%lx\n",
185093c2ec0fSBalbir Singh hv ? "Hypervisor " : "", facility, status, regs->nip, regs->msr);
1851d0c0c9a1SMichael Neuling
1852172f7aaaSCyril Bur out:
1853d0c0c9a1SMichael Neuling _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1854d0c0c9a1SMichael Neuling }
18552517617eSMichael Neuling #endif
1856d0c0c9a1SMichael Neuling
1857f54db641SMichael Neuling #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1858f54db641SMichael Neuling
DEFINE_INTERRUPT_HANDLER(fp_unavailable_tm)18593a96570fSNicholas Piggin DEFINE_INTERRUPT_HANDLER(fp_unavailable_tm)
1860f54db641SMichael Neuling {
1861f54db641SMichael Neuling /* Note: This does not handle any kind of FP laziness. */
1862f54db641SMichael Neuling
1863f54db641SMichael Neuling TM_DEBUG("FP Unavailable trap whilst transactional at 0x%lx, MSR=%lx\n",
1864f54db641SMichael Neuling regs->nip, regs->msr);
1865f54db641SMichael Neuling
1866f54db641SMichael Neuling /* We can only have got here if the task started using FP after
1867f54db641SMichael Neuling * beginning the transaction. So, the transactional regs are just a
1868f54db641SMichael Neuling * copy of the checkpointed ones. But, we still need to recheckpoint
1869f54db641SMichael Neuling * as we're enabling FP for the process; it will return, abort the
1870f54db641SMichael Neuling * transaction, and probably retry but now with FP enabled. So the
1871f54db641SMichael Neuling * checkpointed FP registers need to be loaded.
1872f54db641SMichael Neuling */
1873d31626f7SPaul Mackerras tm_reclaim_current(TM_CAUSE_FAC_UNAV);
187496695563SBreno Leitao
187596695563SBreno Leitao /*
187696695563SBreno Leitao * Reclaim initially saved out bogus (lazy) FPRs to ckfp_state, and
187796695563SBreno Leitao * then it was overwrite by the thr->fp_state by tm_reclaim_thread().
187896695563SBreno Leitao *
187996695563SBreno Leitao * At this point, ck{fp,vr}_state contains the exact values we want to
188096695563SBreno Leitao * recheckpoint.
188196695563SBreno Leitao */
1882f54db641SMichael Neuling
1883f54db641SMichael Neuling /* Enable FP for the task: */
1884a7771176SCyril Bur current->thread.load_fp = 1;
1885f54db641SMichael Neuling
188696695563SBreno Leitao /*
188796695563SBreno Leitao * Recheckpoint all the checkpointed ckpt, ck{fp, vr}_state registers.
1888f54db641SMichael Neuling */
1889eb5c3f1cSCyril Bur tm_recheckpoint(¤t->thread);
1890f54db641SMichael Neuling }
1891f54db641SMichael Neuling
DEFINE_INTERRUPT_HANDLER(altivec_unavailable_tm)18923a96570fSNicholas Piggin DEFINE_INTERRUPT_HANDLER(altivec_unavailable_tm)
1893f54db641SMichael Neuling {
1894f54db641SMichael Neuling /* See the comments in fp_unavailable_tm(). This function operates
1895f54db641SMichael Neuling * the same way.
1896f54db641SMichael Neuling */
1897f54db641SMichael Neuling
1898f54db641SMichael Neuling TM_DEBUG("Vector Unavailable trap whilst transactional at 0x%lx,"
1899f54db641SMichael Neuling "MSR=%lx\n",
1900f54db641SMichael Neuling regs->nip, regs->msr);
1901d31626f7SPaul Mackerras tm_reclaim_current(TM_CAUSE_FAC_UNAV);
1902a7771176SCyril Bur current->thread.load_vec = 1;
1903eb5c3f1cSCyril Bur tm_recheckpoint(¤t->thread);
1904f54db641SMichael Neuling current->thread.used_vr = 1;
19053ac8ff1cSPaul Mackerras }
19063ac8ff1cSPaul Mackerras
DEFINE_INTERRUPT_HANDLER(vsx_unavailable_tm)19073a96570fSNicholas Piggin DEFINE_INTERRUPT_HANDLER(vsx_unavailable_tm)
1908f54db641SMichael Neuling {
1909f54db641SMichael Neuling /* See the comments in fp_unavailable_tm(). This works similarly,
1910f54db641SMichael Neuling * though we're loading both FP and VEC registers in here.
1911f54db641SMichael Neuling *
1912f54db641SMichael Neuling * If FP isn't in use, load FP regs. If VEC isn't in use, load VEC
1913f54db641SMichael Neuling * regs. Either way, set MSR_VSX.
1914f54db641SMichael Neuling */
1915f54db641SMichael Neuling
1916f54db641SMichael Neuling TM_DEBUG("VSX Unavailable trap whilst transactional at 0x%lx,"
1917f54db641SMichael Neuling "MSR=%lx\n",
1918f54db641SMichael Neuling regs->nip, regs->msr);
1919f54db641SMichael Neuling
19203ac8ff1cSPaul Mackerras current->thread.used_vsr = 1;
19213ac8ff1cSPaul Mackerras
1922f54db641SMichael Neuling /* This reclaims FP and/or VR regs if they're already enabled */
1923d31626f7SPaul Mackerras tm_reclaim_current(TM_CAUSE_FAC_UNAV);
1924f54db641SMichael Neuling
1925a7771176SCyril Bur current->thread.load_vec = 1;
1926a7771176SCyril Bur current->thread.load_fp = 1;
19273ac8ff1cSPaul Mackerras
1928eb5c3f1cSCyril Bur tm_recheckpoint(¤t->thread);
1929f54db641SMichael Neuling }
1930f54db641SMichael Neuling #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
1931f54db641SMichael Neuling
19323a96570fSNicholas Piggin #ifdef CONFIG_PPC64
19333a96570fSNicholas Piggin DECLARE_INTERRUPT_HANDLER_NMI(performance_monitor_exception_nmi);
DEFINE_INTERRUPT_HANDLER_NMI(performance_monitor_exception_nmi)19343a96570fSNicholas Piggin DEFINE_INTERRUPT_HANDLER_NMI(performance_monitor_exception_nmi)
1935dc1c1ca3SStephen Rothwell {
193669111bacSChristoph Lameter __this_cpu_inc(irq_stat.pmu_irqs);
193789713ed1SAnton Blanchard
1938dc1c1ca3SStephen Rothwell perf_irq(regs);
1939156b5371SNicholas Piggin
19403a96570fSNicholas Piggin return 0;
19413a96570fSNicholas Piggin }
19423a96570fSNicholas Piggin #endif
19433a96570fSNicholas Piggin
19443a96570fSNicholas Piggin DECLARE_INTERRUPT_HANDLER_ASYNC(performance_monitor_exception_async);
DEFINE_INTERRUPT_HANDLER_ASYNC(performance_monitor_exception_async)19453a96570fSNicholas Piggin DEFINE_INTERRUPT_HANDLER_ASYNC(performance_monitor_exception_async)
1946156b5371SNicholas Piggin {
1947156b5371SNicholas Piggin __this_cpu_inc(irq_stat.pmu_irqs);
1948156b5371SNicholas Piggin
1949156b5371SNicholas Piggin perf_irq(regs);
1950156b5371SNicholas Piggin }
1951156b5371SNicholas Piggin
DEFINE_INTERRUPT_HANDLER_RAW(performance_monitor_exception)19523a96570fSNicholas Piggin DEFINE_INTERRUPT_HANDLER_RAW(performance_monitor_exception)
1953156b5371SNicholas Piggin {
1954156b5371SNicholas Piggin /*
1955156b5371SNicholas Piggin * On 64-bit, if perf interrupts hit in a local_irq_disable
1956156b5371SNicholas Piggin * (soft-masked) region, we consider them as NMIs. This is required to
1957156b5371SNicholas Piggin * prevent hash faults on user addresses when reading callchains (and
1958156b5371SNicholas Piggin * looks better from an irq tracing perspective).
1959156b5371SNicholas Piggin */
1960156b5371SNicholas Piggin if (IS_ENABLED(CONFIG_PPC64) && unlikely(arch_irq_disabled_regs(regs)))
1961156b5371SNicholas Piggin performance_monitor_exception_nmi(regs);
1962156b5371SNicholas Piggin else
1963156b5371SNicholas Piggin performance_monitor_exception_async(regs);
19643a96570fSNicholas Piggin
19653a96570fSNicholas Piggin return 0;
1966dc1c1ca3SStephen Rothwell }
1967dc1c1ca3SStephen Rothwell
1968172ae2e7SDave Kleikamp #ifdef CONFIG_PPC_ADV_DEBUG_REGS
handle_debug(struct pt_regs * regs,unsigned long debug_status)19693bffb652SDave Kleikamp static void handle_debug(struct pt_regs *regs, unsigned long debug_status)
19703bffb652SDave Kleikamp {
19713bffb652SDave Kleikamp int changed = 0;
19723bffb652SDave Kleikamp /*
19733bffb652SDave Kleikamp * Determine the cause of the debug event, clear the
19743bffb652SDave Kleikamp * event flags and send a trap to the handler. Torez
19753bffb652SDave Kleikamp */
19763bffb652SDave Kleikamp if (debug_status & (DBSR_DAC1R | DBSR_DAC1W)) {
19773bffb652SDave Kleikamp dbcr_dac(current) &= ~(DBCR_DAC1R | DBCR_DAC1W);
19783bffb652SDave Kleikamp #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
197951ae8d4aSBharat Bhushan current->thread.debug.dbcr2 &= ~DBCR2_DAC12MODE;
19803bffb652SDave Kleikamp #endif
198147355040SEric W. Biederman do_send_trap(regs, mfspr(SPRN_DAC1), debug_status,
19823bffb652SDave Kleikamp 5);
19833bffb652SDave Kleikamp changed |= 0x01;
19843bffb652SDave Kleikamp } else if (debug_status & (DBSR_DAC2R | DBSR_DAC2W)) {
19853bffb652SDave Kleikamp dbcr_dac(current) &= ~(DBCR_DAC2R | DBCR_DAC2W);
198647355040SEric W. Biederman do_send_trap(regs, mfspr(SPRN_DAC2), debug_status,
19873bffb652SDave Kleikamp 6);
19883bffb652SDave Kleikamp changed |= 0x01;
19893bffb652SDave Kleikamp } else if (debug_status & DBSR_IAC1) {
199051ae8d4aSBharat Bhushan current->thread.debug.dbcr0 &= ~DBCR0_IAC1;
19913bffb652SDave Kleikamp dbcr_iac_range(current) &= ~DBCR_IAC12MODE;
199247355040SEric W. Biederman do_send_trap(regs, mfspr(SPRN_IAC1), debug_status,
19933bffb652SDave Kleikamp 1);
19943bffb652SDave Kleikamp changed |= 0x01;
19953bffb652SDave Kleikamp } else if (debug_status & DBSR_IAC2) {
199651ae8d4aSBharat Bhushan current->thread.debug.dbcr0 &= ~DBCR0_IAC2;
199747355040SEric W. Biederman do_send_trap(regs, mfspr(SPRN_IAC2), debug_status,
19983bffb652SDave Kleikamp 2);
19993bffb652SDave Kleikamp changed |= 0x01;
20003bffb652SDave Kleikamp } else if (debug_status & DBSR_IAC3) {
200151ae8d4aSBharat Bhushan current->thread.debug.dbcr0 &= ~DBCR0_IAC3;
20023bffb652SDave Kleikamp dbcr_iac_range(current) &= ~DBCR_IAC34MODE;
200347355040SEric W. Biederman do_send_trap(regs, mfspr(SPRN_IAC3), debug_status,
20043bffb652SDave Kleikamp 3);
20053bffb652SDave Kleikamp changed |= 0x01;
20063bffb652SDave Kleikamp } else if (debug_status & DBSR_IAC4) {
200751ae8d4aSBharat Bhushan current->thread.debug.dbcr0 &= ~DBCR0_IAC4;
200847355040SEric W. Biederman do_send_trap(regs, mfspr(SPRN_IAC4), debug_status,
20093bffb652SDave Kleikamp 4);
20103bffb652SDave Kleikamp changed |= 0x01;
20113bffb652SDave Kleikamp }
20123bffb652SDave Kleikamp /*
20133bffb652SDave Kleikamp * At the point this routine was called, the MSR(DE) was turned off.
20143bffb652SDave Kleikamp * Check all other debug flags and see if that bit needs to be turned
20153bffb652SDave Kleikamp * back on or not.
20163bffb652SDave Kleikamp */
201751ae8d4aSBharat Bhushan if (DBCR_ACTIVE_EVENTS(current->thread.debug.dbcr0,
201851ae8d4aSBharat Bhushan current->thread.debug.dbcr1))
201959dc5bfcSNicholas Piggin regs_set_return_msr(regs, regs->msr | MSR_DE);
20203bffb652SDave Kleikamp else
20213bffb652SDave Kleikamp /* Make sure the IDM flag is off */
202251ae8d4aSBharat Bhushan current->thread.debug.dbcr0 &= ~DBCR0_IDM;
20233bffb652SDave Kleikamp
20243bffb652SDave Kleikamp if (changed & 0x01)
202551ae8d4aSBharat Bhushan mtspr(SPRN_DBCR0, current->thread.debug.dbcr0);
20263bffb652SDave Kleikamp }
202714cf11afSPaul Mackerras
DEFINE_INTERRUPT_HANDLER(DebugException)20283a96570fSNicholas Piggin DEFINE_INTERRUPT_HANDLER(DebugException)
202914cf11afSPaul Mackerras {
2030755d6641SNicholas Piggin unsigned long debug_status = regs->dsisr;
2031755d6641SNicholas Piggin
203251ae8d4aSBharat Bhushan current->thread.debug.dbsr = debug_status;
20333bffb652SDave Kleikamp
2034ec097c84SRoland McGrath /* Hack alert: On BookE, Branch Taken stops on the branch itself, while
2035ec097c84SRoland McGrath * on server, it stops on the target of the branch. In order to simulate
2036ec097c84SRoland McGrath * the server behaviour, we thus restart right away with a single step
2037ec097c84SRoland McGrath * instead of stopping here when hitting a BT
2038ec097c84SRoland McGrath */
2039ec097c84SRoland McGrath if (debug_status & DBSR_BT) {
204059dc5bfcSNicholas Piggin regs_set_return_msr(regs, regs->msr & ~MSR_DE);
2041ec097c84SRoland McGrath
2042ec097c84SRoland McGrath /* Disable BT */
2043ec097c84SRoland McGrath mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_BT);
2044ec097c84SRoland McGrath /* Clear the BT event */
2045ec097c84SRoland McGrath mtspr(SPRN_DBSR, DBSR_BT);
2046ec097c84SRoland McGrath
2047ec097c84SRoland McGrath /* Do the single step trick only when coming from userspace */
2048ec097c84SRoland McGrath if (user_mode(regs)) {
204951ae8d4aSBharat Bhushan current->thread.debug.dbcr0 &= ~DBCR0_BT;
205051ae8d4aSBharat Bhushan current->thread.debug.dbcr0 |= DBCR0_IDM | DBCR0_IC;
205159dc5bfcSNicholas Piggin regs_set_return_msr(regs, regs->msr | MSR_DE);
2052ec097c84SRoland McGrath return;
2053ec097c84SRoland McGrath }
2054ec097c84SRoland McGrath
20556cc89badSNaveen N. Rao if (kprobe_post_handler(regs))
20566cc89badSNaveen N. Rao return;
20576cc89badSNaveen N. Rao
2058ec097c84SRoland McGrath if (notify_die(DIE_SSTEP, "block_step", regs, 5,
2059ec097c84SRoland McGrath 5, SIGTRAP) == NOTIFY_STOP) {
2060ec097c84SRoland McGrath return;
2061ec097c84SRoland McGrath }
2062ec097c84SRoland McGrath if (debugger_sstep(regs))
2063ec097c84SRoland McGrath return;
2064ec097c84SRoland McGrath } else if (debug_status & DBSR_IC) { /* Instruction complete */
206559dc5bfcSNicholas Piggin regs_set_return_msr(regs, regs->msr & ~MSR_DE);
2066f8279621SKumar Gala
206714cf11afSPaul Mackerras /* Disable instruction completion */
206814cf11afSPaul Mackerras mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_IC);
206914cf11afSPaul Mackerras /* Clear the instruction completion event */
207014cf11afSPaul Mackerras mtspr(SPRN_DBSR, DBSR_IC);
2071f8279621SKumar Gala
20726cc89badSNaveen N. Rao if (kprobe_post_handler(regs))
20736cc89badSNaveen N. Rao return;
20746cc89badSNaveen N. Rao
2075f8279621SKumar Gala if (notify_die(DIE_SSTEP, "single_step", regs, 5,
2076f8279621SKumar Gala 5, SIGTRAP) == NOTIFY_STOP) {
207714cf11afSPaul Mackerras return;
207814cf11afSPaul Mackerras }
2079f8279621SKumar Gala
2080f8279621SKumar Gala if (debugger_sstep(regs))
2081f8279621SKumar Gala return;
2082f8279621SKumar Gala
20833bffb652SDave Kleikamp if (user_mode(regs)) {
208451ae8d4aSBharat Bhushan current->thread.debug.dbcr0 &= ~DBCR0_IC;
208551ae8d4aSBharat Bhushan if (DBCR_ACTIVE_EVENTS(current->thread.debug.dbcr0,
208651ae8d4aSBharat Bhushan current->thread.debug.dbcr1))
208759dc5bfcSNicholas Piggin regs_set_return_msr(regs, regs->msr | MSR_DE);
20883bffb652SDave Kleikamp else
20893bffb652SDave Kleikamp /* Make sure the IDM bit is off */
209051ae8d4aSBharat Bhushan current->thread.debug.dbcr0 &= ~DBCR0_IDM;
20913bffb652SDave Kleikamp }
2092f8279621SKumar Gala
2093f8279621SKumar Gala _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
20943bffb652SDave Kleikamp } else
20953bffb652SDave Kleikamp handle_debug(regs, debug_status);
209614cf11afSPaul Mackerras }
2097172ae2e7SDave Kleikamp #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
209814cf11afSPaul Mackerras
209914cf11afSPaul Mackerras #ifdef CONFIG_ALTIVEC
DEFINE_INTERRUPT_HANDLER(altivec_assist_exception)21003a96570fSNicholas Piggin DEFINE_INTERRUPT_HANDLER(altivec_assist_exception)
210114cf11afSPaul Mackerras {
210214cf11afSPaul Mackerras int err;
210314cf11afSPaul Mackerras
210414cf11afSPaul Mackerras if (!user_mode(regs)) {
210514cf11afSPaul Mackerras printk(KERN_EMERG "VMX/Altivec assist exception in kernel mode"
210614cf11afSPaul Mackerras " at %lx\n", regs->nip);
21078dad3f92SPaul Mackerras die("Kernel VMX/Altivec assist exception", regs, SIGILL);
210814cf11afSPaul Mackerras }
210914cf11afSPaul Mackerras
2110dc1c1ca3SStephen Rothwell flush_altivec_to_thread(current);
2111dc1c1ca3SStephen Rothwell
2112eecff81dSAnton Blanchard PPC_WARN_EMULATED(altivec, regs);
211314cf11afSPaul Mackerras err = emulate_altivec(regs);
211414cf11afSPaul Mackerras if (err == 0) {
211559dc5bfcSNicholas Piggin regs_add_return_ip(regs, 4); /* skip emulated instruction */
211614cf11afSPaul Mackerras emulate_single_step(regs);
211714cf11afSPaul Mackerras return;
211814cf11afSPaul Mackerras }
211914cf11afSPaul Mackerras
212014cf11afSPaul Mackerras if (err == -EFAULT) {
212114cf11afSPaul Mackerras /* got an error reading the instruction */
212214cf11afSPaul Mackerras _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
212314cf11afSPaul Mackerras } else {
212414cf11afSPaul Mackerras /* didn't recognize the instruction */
212514cf11afSPaul Mackerras /* XXX quick hack for now: set the non-Java bit in the VSCR */
212676462232SChristian Dietrich printk_ratelimited(KERN_ERR "Unrecognized altivec instruction "
212714cf11afSPaul Mackerras "in %s at %lx\n", current->comm, regs->nip);
2128de79f7b9SPaul Mackerras current->thread.vr_state.vscr.u[3] |= 0x10000;
212914cf11afSPaul Mackerras }
213014cf11afSPaul Mackerras }
213114cf11afSPaul Mackerras #endif /* CONFIG_ALTIVEC */
213214cf11afSPaul Mackerras
2133dfc3095cSChristophe Leroy #ifdef CONFIG_PPC_85xx
DEFINE_INTERRUPT_HANDLER(CacheLockingException)21343a96570fSNicholas Piggin DEFINE_INTERRUPT_HANDLER(CacheLockingException)
213514cf11afSPaul Mackerras {
2136b4ced803SNicholas Piggin unsigned long error_code = regs->dsisr;
2137b4ced803SNicholas Piggin
213814cf11afSPaul Mackerras /* We treat cache locking instructions from the user
213914cf11afSPaul Mackerras * as priv ops, in the future we could try to do
214014cf11afSPaul Mackerras * something smarter
214114cf11afSPaul Mackerras */
214214cf11afSPaul Mackerras if (error_code & (ESR_DLK|ESR_ILK))
214314cf11afSPaul Mackerras _exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
214414cf11afSPaul Mackerras return;
214514cf11afSPaul Mackerras }
2146dfc3095cSChristophe Leroy #endif /* CONFIG_PPC_85xx */
214714cf11afSPaul Mackerras
214814cf11afSPaul Mackerras #ifdef CONFIG_SPE
DEFINE_INTERRUPT_HANDLER(SPEFloatingPointException)21493a96570fSNicholas Piggin DEFINE_INTERRUPT_HANDLER(SPEFloatingPointException)
215014cf11afSPaul Mackerras {
215114cf11afSPaul Mackerras unsigned long spefscr;
215214cf11afSPaul Mackerras int fpexc_mode;
2153aeb1c0f6SEric W. Biederman int code = FPE_FLTUNK;
21546a800f36SLiu Yu int err;
21556a800f36SLiu Yu
2156e6f8a6c8SNicholas Piggin interrupt_cond_local_irq_enable(regs);
2157ef429124SChristophe Leroy
2158685659eeSyu liu flush_spe_to_thread(current);
215914cf11afSPaul Mackerras
216014cf11afSPaul Mackerras spefscr = current->thread.spefscr;
216114cf11afSPaul Mackerras fpexc_mode = current->thread.fpexc_mode;
216214cf11afSPaul Mackerras
216314cf11afSPaul Mackerras if ((spefscr & SPEFSCR_FOVF) && (fpexc_mode & PR_FP_EXC_OVF)) {
216414cf11afSPaul Mackerras code = FPE_FLTOVF;
216514cf11afSPaul Mackerras }
216614cf11afSPaul Mackerras else if ((spefscr & SPEFSCR_FUNF) && (fpexc_mode & PR_FP_EXC_UND)) {
216714cf11afSPaul Mackerras code = FPE_FLTUND;
216814cf11afSPaul Mackerras }
216914cf11afSPaul Mackerras else if ((spefscr & SPEFSCR_FDBZ) && (fpexc_mode & PR_FP_EXC_DIV))
217014cf11afSPaul Mackerras code = FPE_FLTDIV;
217114cf11afSPaul Mackerras else if ((spefscr & SPEFSCR_FINV) && (fpexc_mode & PR_FP_EXC_INV)) {
217214cf11afSPaul Mackerras code = FPE_FLTINV;
217314cf11afSPaul Mackerras }
217414cf11afSPaul Mackerras else if ((spefscr & (SPEFSCR_FG | SPEFSCR_FX)) && (fpexc_mode & PR_FP_EXC_RES))
217514cf11afSPaul Mackerras code = FPE_FLTRES;
217614cf11afSPaul Mackerras
21776a800f36SLiu Yu err = do_spe_mathemu(regs);
21786a800f36SLiu Yu if (err == 0) {
217959dc5bfcSNicholas Piggin regs_add_return_ip(regs, 4); /* skip emulated instruction */
21806a800f36SLiu Yu emulate_single_step(regs);
218114cf11afSPaul Mackerras return;
218214cf11afSPaul Mackerras }
21836a800f36SLiu Yu
21846a800f36SLiu Yu if (err == -EFAULT) {
21856a800f36SLiu Yu /* got an error reading the instruction */
21866a800f36SLiu Yu _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
21876a800f36SLiu Yu } else if (err == -EINVAL) {
21886a800f36SLiu Yu /* didn't recognize the instruction */
21896a800f36SLiu Yu printk(KERN_ERR "unrecognized spe instruction "
21906a800f36SLiu Yu "in %s at %lx\n", current->comm, regs->nip);
21916a800f36SLiu Yu } else {
21926a800f36SLiu Yu _exception(SIGFPE, regs, code, regs->nip);
21936a800f36SLiu Yu }
21946a800f36SLiu Yu
21956a800f36SLiu Yu return;
21966a800f36SLiu Yu }
21976a800f36SLiu Yu
DEFINE_INTERRUPT_HANDLER(SPEFloatingPointRoundException)21983a96570fSNicholas Piggin DEFINE_INTERRUPT_HANDLER(SPEFloatingPointRoundException)
21996a800f36SLiu Yu {
22006a800f36SLiu Yu int err;
22016a800f36SLiu Yu
2202e6f8a6c8SNicholas Piggin interrupt_cond_local_irq_enable(regs);
2203ef429124SChristophe Leroy
22046a800f36SLiu Yu preempt_disable();
22056a800f36SLiu Yu if (regs->msr & MSR_SPE)
22066a800f36SLiu Yu giveup_spe(current);
22076a800f36SLiu Yu preempt_enable();
22086a800f36SLiu Yu
220959dc5bfcSNicholas Piggin regs_add_return_ip(regs, -4);
22106a800f36SLiu Yu err = speround_handler(regs);
22116a800f36SLiu Yu if (err == 0) {
221259dc5bfcSNicholas Piggin regs_add_return_ip(regs, 4); /* skip emulated instruction */
22136a800f36SLiu Yu emulate_single_step(regs);
22146a800f36SLiu Yu return;
22156a800f36SLiu Yu }
22166a800f36SLiu Yu
22176a800f36SLiu Yu if (err == -EFAULT) {
22186a800f36SLiu Yu /* got an error reading the instruction */
22196a800f36SLiu Yu _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
22206a800f36SLiu Yu } else if (err == -EINVAL) {
22216a800f36SLiu Yu /* didn't recognize the instruction */
22226a800f36SLiu Yu printk(KERN_ERR "unrecognized spe instruction "
22236a800f36SLiu Yu "in %s at %lx\n", current->comm, regs->nip);
22246a800f36SLiu Yu } else {
2225aeb1c0f6SEric W. Biederman _exception(SIGFPE, regs, FPE_FLTUNK, regs->nip);
22266a800f36SLiu Yu return;
22276a800f36SLiu Yu }
22286a800f36SLiu Yu }
222914cf11afSPaul Mackerras #endif
223014cf11afSPaul Mackerras
2231dc1c1ca3SStephen Rothwell /*
2232dc1c1ca3SStephen Rothwell * We enter here if we get an unrecoverable exception, that is, one
2233dc1c1ca3SStephen Rothwell * that happened at a point where the RI (recoverable interrupt) bit
2234dc1c1ca3SStephen Rothwell * in the MSR is 0. This indicates that SRR0/1 are live, and that
2235dc1c1ca3SStephen Rothwell * we therefore lost state by taking this exception.
2236dc1c1ca3SStephen Rothwell */
unrecoverable_exception(struct pt_regs * regs)2237a58cbed6SChristophe Leroy void __noreturn unrecoverable_exception(struct pt_regs *regs)
2238dc1c1ca3SStephen Rothwell {
223951423a9cSChristophe Leroy pr_emerg("Unrecoverable exception %lx at %lx (msr=%lx)\n",
224051423a9cSChristophe Leroy regs->trap, regs->nip, regs->msr);
2241dc1c1ca3SStephen Rothwell die("Unrecoverable exception", regs, SIGABRT);
2242a58cbed6SChristophe Leroy /* die() should not return */
2243a58cbed6SChristophe Leroy for (;;)
2244a58cbed6SChristophe Leroy ;
2245dc1c1ca3SStephen Rothwell }
2246dc1c1ca3SStephen Rothwell
22471e18c17aSJason Gunthorpe #if defined(CONFIG_BOOKE_WDT) || defined(CONFIG_40x)
DEFINE_INTERRUPT_HANDLER_NMI(WatchdogException)22483db8aa10SNicholas Piggin DEFINE_INTERRUPT_HANDLER_NMI(WatchdogException)
224914cf11afSPaul Mackerras {
225014cf11afSPaul Mackerras printk (KERN_EMERG "PowerPC Book-E Watchdog Exception\n");
2251ca13c130SChristophe Leroy mtspr(SPRN_TCR, mfspr(SPRN_TCR) & ~TCR_WIE);
22523db8aa10SNicholas Piggin return 0;
225314cf11afSPaul Mackerras }
225414cf11afSPaul Mackerras #endif
2255dc1c1ca3SStephen Rothwell
2256dc1c1ca3SStephen Rothwell /*
2257dc1c1ca3SStephen Rothwell * We enter here if we discover during exception entry that we are
2258dc1c1ca3SStephen Rothwell * running in supervisor mode with a userspace value in the stack pointer.
2259dc1c1ca3SStephen Rothwell */
DEFINE_INTERRUPT_HANDLER(kernel_bad_stack)22603a96570fSNicholas Piggin DEFINE_INTERRUPT_HANDLER(kernel_bad_stack)
2261dc1c1ca3SStephen Rothwell {
2262dc1c1ca3SStephen Rothwell printk(KERN_EMERG "Bad kernel stack pointer %lx at %lx\n",
2263dc1c1ca3SStephen Rothwell regs->gpr[1], regs->nip);
2264dc1c1ca3SStephen Rothwell die("Bad kernel stack pointer", regs, SIGABRT);
2265dc1c1ca3SStephen Rothwell }
226614cf11afSPaul Mackerras
226780947e7cSGeert Uytterhoeven #ifdef CONFIG_PPC_EMULATED_STATS
226880947e7cSGeert Uytterhoeven
226980947e7cSGeert Uytterhoeven #define WARN_EMULATED_SETUP(type) .type = { .name = #type }
227080947e7cSGeert Uytterhoeven
227180947e7cSGeert Uytterhoeven struct ppc_emulated ppc_emulated = {
227280947e7cSGeert Uytterhoeven #ifdef CONFIG_ALTIVEC
227380947e7cSGeert Uytterhoeven WARN_EMULATED_SETUP(altivec),
227480947e7cSGeert Uytterhoeven #endif
227580947e7cSGeert Uytterhoeven WARN_EMULATED_SETUP(dcba),
227680947e7cSGeert Uytterhoeven WARN_EMULATED_SETUP(dcbz),
227780947e7cSGeert Uytterhoeven WARN_EMULATED_SETUP(fp_pair),
227880947e7cSGeert Uytterhoeven WARN_EMULATED_SETUP(isel),
227980947e7cSGeert Uytterhoeven WARN_EMULATED_SETUP(mcrxr),
228080947e7cSGeert Uytterhoeven WARN_EMULATED_SETUP(mfpvr),
228180947e7cSGeert Uytterhoeven WARN_EMULATED_SETUP(multiple),
228280947e7cSGeert Uytterhoeven WARN_EMULATED_SETUP(popcntb),
228380947e7cSGeert Uytterhoeven WARN_EMULATED_SETUP(spe),
228480947e7cSGeert Uytterhoeven WARN_EMULATED_SETUP(string),
2285a3821b2aSScott Wood WARN_EMULATED_SETUP(sync),
228680947e7cSGeert Uytterhoeven WARN_EMULATED_SETUP(unaligned),
228780947e7cSGeert Uytterhoeven #ifdef CONFIG_MATH_EMULATION
228880947e7cSGeert Uytterhoeven WARN_EMULATED_SETUP(math),
228980947e7cSGeert Uytterhoeven #endif
229080947e7cSGeert Uytterhoeven #ifdef CONFIG_VSX
229180947e7cSGeert Uytterhoeven WARN_EMULATED_SETUP(vsx),
229280947e7cSGeert Uytterhoeven #endif
2293efcac658SAlexey Kardashevskiy #ifdef CONFIG_PPC64
2294efcac658SAlexey Kardashevskiy WARN_EMULATED_SETUP(mfdscr),
2295efcac658SAlexey Kardashevskiy WARN_EMULATED_SETUP(mtdscr),
2296f83319d7SAnton Blanchard WARN_EMULATED_SETUP(lq_stq),
22975080332cSMichael Neuling WARN_EMULATED_SETUP(lxvw4x),
22985080332cSMichael Neuling WARN_EMULATED_SETUP(lxvh8x),
22995080332cSMichael Neuling WARN_EMULATED_SETUP(lxvd2x),
23005080332cSMichael Neuling WARN_EMULATED_SETUP(lxvb16x),
2301efcac658SAlexey Kardashevskiy #endif
230280947e7cSGeert Uytterhoeven };
230380947e7cSGeert Uytterhoeven
230480947e7cSGeert Uytterhoeven u32 ppc_warn_emulated;
230580947e7cSGeert Uytterhoeven
ppc_warn_emulated_print(const char * type)230680947e7cSGeert Uytterhoeven void ppc_warn_emulated_print(const char *type)
230780947e7cSGeert Uytterhoeven {
230876462232SChristian Dietrich pr_warn_ratelimited("%s used emulated %s instruction\n", current->comm,
230980947e7cSGeert Uytterhoeven type);
231080947e7cSGeert Uytterhoeven }
231180947e7cSGeert Uytterhoeven
ppc_warn_emulated_init(void)231280947e7cSGeert Uytterhoeven static int __init ppc_warn_emulated_init(void)
231380947e7cSGeert Uytterhoeven {
2314860286cfSGreg Kroah-Hartman struct dentry *dir;
231580947e7cSGeert Uytterhoeven unsigned int i;
231680947e7cSGeert Uytterhoeven struct ppc_emulated_entry *entries = (void *)&ppc_emulated;
231780947e7cSGeert Uytterhoeven
231880947e7cSGeert Uytterhoeven dir = debugfs_create_dir("emulated_instructions",
2319dbf77fedSAneesh Kumar K.V arch_debugfs_dir);
232080947e7cSGeert Uytterhoeven
2321860286cfSGreg Kroah-Hartman debugfs_create_u32("do_warn", 0644, dir, &ppc_warn_emulated);
232280947e7cSGeert Uytterhoeven
2323860286cfSGreg Kroah-Hartman for (i = 0; i < sizeof(ppc_emulated)/sizeof(*entries); i++)
2324860286cfSGreg Kroah-Hartman debugfs_create_u32(entries[i].name, 0644, dir,
232580947e7cSGeert Uytterhoeven (u32 *)&entries[i].val.counter);
232680947e7cSGeert Uytterhoeven
232780947e7cSGeert Uytterhoeven return 0;
232880947e7cSGeert Uytterhoeven }
232980947e7cSGeert Uytterhoeven
233080947e7cSGeert Uytterhoeven device_initcall(ppc_warn_emulated_init);
233180947e7cSGeert Uytterhoeven
233280947e7cSGeert Uytterhoeven #endif /* CONFIG_PPC_EMULATED_STATS */
2333