12874c5fdSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later 214cf11afSPaul Mackerras /* 314cf11afSPaul Mackerras * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 4fe04b112SScott Wood * Copyright 2007-2010 Freescale Semiconductor, Inc. 514cf11afSPaul Mackerras * 614cf11afSPaul Mackerras * Modified by Cort Dougan (cort@cs.nmt.edu) 714cf11afSPaul Mackerras * and Paul Mackerras (paulus@samba.org) 814cf11afSPaul Mackerras */ 914cf11afSPaul Mackerras 1014cf11afSPaul Mackerras /* 1114cf11afSPaul Mackerras * This file handles the architecture-dependent parts of hardware exceptions 1214cf11afSPaul Mackerras */ 1314cf11afSPaul Mackerras 1414cf11afSPaul Mackerras #include <linux/errno.h> 1514cf11afSPaul Mackerras #include <linux/sched.h> 16b17b0153SIngo Molnar #include <linux/sched/debug.h> 1714cf11afSPaul Mackerras #include <linux/kernel.h> 1814cf11afSPaul Mackerras #include <linux/mm.h> 1999cd1302SRam Pai #include <linux/pkeys.h> 2014cf11afSPaul Mackerras #include <linux/stddef.h> 2114cf11afSPaul Mackerras #include <linux/unistd.h> 228dad3f92SPaul Mackerras #include <linux/ptrace.h> 2314cf11afSPaul Mackerras #include <linux/user.h> 2414cf11afSPaul Mackerras #include <linux/interrupt.h> 2514cf11afSPaul Mackerras #include <linux/init.h> 268a39b05fSPaul Gortmaker #include <linux/extable.h> 278a39b05fSPaul Gortmaker #include <linux/module.h> /* print_modules */ 288dad3f92SPaul Mackerras #include <linux/prctl.h> 2914cf11afSPaul Mackerras #include <linux/delay.h> 3014cf11afSPaul Mackerras #include <linux/kprobes.h> 31cc532915SMichael Ellerman #include <linux/kexec.h> 325474c120SMichael Hanselmann #include <linux/backlight.h> 3373c9ceabSJeremy Fitzhardinge #include <linux/bug.h> 341eeb66a1SChristoph Hellwig #include <linux/kdebug.h> 3576462232SChristian Dietrich #include <linux/ratelimit.h> 36ba12eedeSLi Zhong #include <linux/context_tracking.h> 375080332cSMichael Neuling #include <linux/smp.h> 3835adacd6SNicholas Piggin #include <linux/console.h> 3935adacd6SNicholas Piggin #include <linux/kmsg_dump.h> 40dbf77fedSAneesh Kumar K.V #include <linux/debugfs.h> 4114cf11afSPaul Mackerras 4280947e7cSGeert Uytterhoeven #include <asm/emulated_ops.h> 437c0f6ba6SLinus Torvalds #include <linux/uaccess.h> 443a96570fSNicholas Piggin #include <asm/interrupt.h> 4514cf11afSPaul Mackerras #include <asm/io.h> 4686417780SPaul Mackerras #include <asm/machdep.h> 4786417780SPaul Mackerras #include <asm/rtas.h> 48f7f6f4feSDavid Gibson #include <asm/pmc.h> 4914cf11afSPaul Mackerras #include <asm/reg.h> 5014cf11afSPaul Mackerras #ifdef CONFIG_PMAC_BACKLIGHT 5114cf11afSPaul Mackerras #include <asm/backlight.h> 5214cf11afSPaul Mackerras #endif 53dc1c1ca3SStephen Rothwell #ifdef CONFIG_PPC64 5486417780SPaul Mackerras #include <asm/firmware.h> 55dc1c1ca3SStephen Rothwell #include <asm/processor.h> 56dc1c1ca3SStephen Rothwell #endif 57c0ce7d08SDavid Wilder #include <asm/kexec.h> 5816c57b36SKumar Gala #include <asm/ppc-opcode.h> 59cce1f106SShaohui Xie #include <asm/rio.h> 60ebaeb5aeSMahesh Salgaonkar #include <asm/fadump.h> 61ae3a197eSDavid Howells #include <asm/switch_to.h> 62f54db641SMichael Neuling #include <asm/tm.h> 63ae3a197eSDavid Howells #include <asm/debug.h> 6442f5b4caSDaniel Axtens #include <asm/asm-prototypes.h> 65fd7bacbcSMahesh Salgaonkar #include <asm/hmi.h> 664e0e3435SHongtao Jia #include <sysdev/fsl_pci.h> 676cc89badSNaveen N. Rao #include <asm/kprobes.h> 68a99b9c5eSMurilo Opsfelder Araujo #include <asm/stacktrace.h> 69de3c83c2SMathieu Malaterre #include <asm/nmi.h> 70deefd0aeSChristophe Leroy #include <asm/disassemble.h> 712f5182cfSNicholas Piggin #include <asm/udbg.h> 72dc1c1ca3SStephen Rothwell 73da665885SThiago Jung Bauermann #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC_CORE) 745be3492fSAnton Blanchard int (*__debugger)(struct pt_regs *regs) __read_mostly; 755be3492fSAnton Blanchard int (*__debugger_ipi)(struct pt_regs *regs) __read_mostly; 765be3492fSAnton Blanchard int (*__debugger_bpt)(struct pt_regs *regs) __read_mostly; 775be3492fSAnton Blanchard int (*__debugger_sstep)(struct pt_regs *regs) __read_mostly; 785be3492fSAnton Blanchard int (*__debugger_iabr_match)(struct pt_regs *regs) __read_mostly; 799422de3eSMichael Neuling int (*__debugger_break_match)(struct pt_regs *regs) __read_mostly; 805be3492fSAnton Blanchard int (*__debugger_fault_handler)(struct pt_regs *regs) __read_mostly; 8114cf11afSPaul Mackerras 8214cf11afSPaul Mackerras EXPORT_SYMBOL(__debugger); 8314cf11afSPaul Mackerras EXPORT_SYMBOL(__debugger_ipi); 8414cf11afSPaul Mackerras EXPORT_SYMBOL(__debugger_bpt); 8514cf11afSPaul Mackerras EXPORT_SYMBOL(__debugger_sstep); 8614cf11afSPaul Mackerras EXPORT_SYMBOL(__debugger_iabr_match); 879422de3eSMichael Neuling EXPORT_SYMBOL(__debugger_break_match); 8814cf11afSPaul Mackerras EXPORT_SYMBOL(__debugger_fault_handler); 8914cf11afSPaul Mackerras #endif 9014cf11afSPaul Mackerras 918b3c34cfSMichael Neuling /* Transactional Memory trap debug */ 928b3c34cfSMichael Neuling #ifdef TM_DEBUG_SW 938b3c34cfSMichael Neuling #define TM_DEBUG(x...) printk(KERN_INFO x) 948b3c34cfSMichael Neuling #else 958b3c34cfSMichael Neuling #define TM_DEBUG(x...) do { } while(0) 968b3c34cfSMichael Neuling #endif 978b3c34cfSMichael Neuling 980f642d61SMurilo Opsfelder Araujo static const char *signame(int signr) 990f642d61SMurilo Opsfelder Araujo { 1000f642d61SMurilo Opsfelder Araujo switch (signr) { 1010f642d61SMurilo Opsfelder Araujo case SIGBUS: return "bus error"; 1020f642d61SMurilo Opsfelder Araujo case SIGFPE: return "floating point exception"; 1030f642d61SMurilo Opsfelder Araujo case SIGILL: return "illegal instruction"; 1040f642d61SMurilo Opsfelder Araujo case SIGSEGV: return "segfault"; 1050f642d61SMurilo Opsfelder Araujo case SIGTRAP: return "unhandled trap"; 1060f642d61SMurilo Opsfelder Araujo } 1070f642d61SMurilo Opsfelder Araujo 1080f642d61SMurilo Opsfelder Araujo return "unknown signal"; 1090f642d61SMurilo Opsfelder Araujo } 1100f642d61SMurilo Opsfelder Araujo 11114cf11afSPaul Mackerras /* 11214cf11afSPaul Mackerras * Trap & Exception support 11314cf11afSPaul Mackerras */ 11414cf11afSPaul Mackerras 1156031d9d9Santon@samba.org #ifdef CONFIG_PMAC_BACKLIGHT 1166031d9d9Santon@samba.org static void pmac_backlight_unblank(void) 1176031d9d9Santon@samba.org { 1186031d9d9Santon@samba.org mutex_lock(&pmac_backlight_mutex); 1196031d9d9Santon@samba.org if (pmac_backlight) { 1206031d9d9Santon@samba.org struct backlight_properties *props; 1216031d9d9Santon@samba.org 1226031d9d9Santon@samba.org props = &pmac_backlight->props; 1236031d9d9Santon@samba.org props->brightness = props->max_brightness; 1246031d9d9Santon@samba.org props->power = FB_BLANK_UNBLANK; 1256031d9d9Santon@samba.org backlight_update_status(pmac_backlight); 1266031d9d9Santon@samba.org } 1276031d9d9Santon@samba.org mutex_unlock(&pmac_backlight_mutex); 1286031d9d9Santon@samba.org } 1296031d9d9Santon@samba.org #else 1306031d9d9Santon@samba.org static inline void pmac_backlight_unblank(void) { } 1316031d9d9Santon@samba.org #endif 1326031d9d9Santon@samba.org 1336fcd6baaSNicholas Piggin /* 1346fcd6baaSNicholas Piggin * If oops/die is expected to crash the machine, return true here. 1356fcd6baaSNicholas Piggin * 1366fcd6baaSNicholas Piggin * This should not be expected to be 100% accurate, there may be 1376fcd6baaSNicholas Piggin * notifiers registered or other unexpected conditions that may bring 1386fcd6baaSNicholas Piggin * down the kernel. Or if the current process in the kernel is holding 1396fcd6baaSNicholas Piggin * locks or has other critical state, the kernel may become effectively 1406fcd6baaSNicholas Piggin * unusable anyway. 1416fcd6baaSNicholas Piggin */ 1426fcd6baaSNicholas Piggin bool die_will_crash(void) 1436fcd6baaSNicholas Piggin { 1446fcd6baaSNicholas Piggin if (should_fadump_crash()) 1456fcd6baaSNicholas Piggin return true; 1466fcd6baaSNicholas Piggin if (kexec_should_crash(current)) 1476fcd6baaSNicholas Piggin return true; 1486fcd6baaSNicholas Piggin if (in_interrupt() || panic_on_oops || 1496fcd6baaSNicholas Piggin !current->pid || is_global_init(current)) 1506fcd6baaSNicholas Piggin return true; 1516fcd6baaSNicholas Piggin 1526fcd6baaSNicholas Piggin return false; 1536fcd6baaSNicholas Piggin } 1546fcd6baaSNicholas Piggin 155760ca4dcSAnton Blanchard static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED; 156760ca4dcSAnton Blanchard static int die_owner = -1; 157760ca4dcSAnton Blanchard static unsigned int die_nest_count; 158c0ce7d08SDavid Wilder static int die_counter; 159760ca4dcSAnton Blanchard 16035adacd6SNicholas Piggin extern void panic_flush_kmsg_start(void) 16135adacd6SNicholas Piggin { 16235adacd6SNicholas Piggin /* 16335adacd6SNicholas Piggin * These are mostly taken from kernel/panic.c, but tries to do 16435adacd6SNicholas Piggin * relatively minimal work. Don't use delay functions (TB may 16535adacd6SNicholas Piggin * be broken), don't crash dump (need to set a firmware log), 16635adacd6SNicholas Piggin * don't run notifiers. We do want to get some information to 16735adacd6SNicholas Piggin * Linux console. 16835adacd6SNicholas Piggin */ 16935adacd6SNicholas Piggin console_verbose(); 17035adacd6SNicholas Piggin bust_spinlocks(1); 17135adacd6SNicholas Piggin } 17235adacd6SNicholas Piggin 17335adacd6SNicholas Piggin extern void panic_flush_kmsg_end(void) 17435adacd6SNicholas Piggin { 17535adacd6SNicholas Piggin kmsg_dump(KMSG_DUMP_PANIC); 17635adacd6SNicholas Piggin bust_spinlocks(0); 17735adacd6SNicholas Piggin debug_locks_off(); 178de6da1e8SFeng Tang console_flush_on_panic(CONSOLE_FLUSH_PENDING); 17935adacd6SNicholas Piggin } 18035adacd6SNicholas Piggin 18103465f89SNicholas Piggin static unsigned long oops_begin(struct pt_regs *regs) 182760ca4dcSAnton Blanchard { 183760ca4dcSAnton Blanchard int cpu; 18434c2a14fSanton@samba.org unsigned long flags; 18514cf11afSPaul Mackerras 186293e4688Santon@samba.org oops_enter(); 187293e4688Santon@samba.org 188760ca4dcSAnton Blanchard /* racy, but better than risking deadlock. */ 189760ca4dcSAnton Blanchard raw_local_irq_save(flags); 190760ca4dcSAnton Blanchard cpu = smp_processor_id(); 191760ca4dcSAnton Blanchard if (!arch_spin_trylock(&die_lock)) { 192760ca4dcSAnton Blanchard if (cpu == die_owner) 193760ca4dcSAnton Blanchard /* nested oops. should stop eventually */; 194760ca4dcSAnton Blanchard else 195760ca4dcSAnton Blanchard arch_spin_lock(&die_lock); 196760ca4dcSAnton Blanchard } 197760ca4dcSAnton Blanchard die_nest_count++; 198760ca4dcSAnton Blanchard die_owner = cpu; 19914cf11afSPaul Mackerras console_verbose(); 20014cf11afSPaul Mackerras bust_spinlocks(1); 2016031d9d9Santon@samba.org if (machine_is(powermac)) 2026031d9d9Santon@samba.org pmac_backlight_unblank(); 203760ca4dcSAnton Blanchard return flags; 20434c2a14fSanton@samba.org } 20503465f89SNicholas Piggin NOKPROBE_SYMBOL(oops_begin); 2065474c120SMichael Hanselmann 20703465f89SNicholas Piggin static void oops_end(unsigned long flags, struct pt_regs *regs, 208760ca4dcSAnton Blanchard int signr) 209760ca4dcSAnton Blanchard { 21014cf11afSPaul Mackerras bust_spinlocks(0); 211373d4d09SRusty Russell add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); 212760ca4dcSAnton Blanchard die_nest_count--; 21358154c8cSAnton Blanchard oops_exit(); 21458154c8cSAnton Blanchard printk("\n"); 2157458e8b2SNicholas Piggin if (!die_nest_count) { 216760ca4dcSAnton Blanchard /* Nest count reaches zero, release the lock. */ 2177458e8b2SNicholas Piggin die_owner = -1; 218760ca4dcSAnton Blanchard arch_spin_unlock(&die_lock); 2197458e8b2SNicholas Piggin } 220760ca4dcSAnton Blanchard raw_local_irq_restore(flags); 221cc532915SMichael Ellerman 222d40b6768SNicholas Piggin /* 223d40b6768SNicholas Piggin * system_reset_excption handles debugger, crash dump, panic, for 0x100 224d40b6768SNicholas Piggin */ 2257153d4bfSXiongwei Song if (TRAP(regs) == INTERRUPT_SYSTEM_RESET) 226d40b6768SNicholas Piggin return; 227d40b6768SNicholas Piggin 228ebaeb5aeSMahesh Salgaonkar crash_fadump(regs, "die oops"); 229ebaeb5aeSMahesh Salgaonkar 2304388c9b3SNicholas Piggin if (kexec_should_crash(current)) 231cc532915SMichael Ellerman crash_kexec(regs); 2329b00ac06SAnton Blanchard 233760ca4dcSAnton Blanchard if (!signr) 234760ca4dcSAnton Blanchard return; 235760ca4dcSAnton Blanchard 23658154c8cSAnton Blanchard /* 23758154c8cSAnton Blanchard * While our oops output is serialised by a spinlock, output 23858154c8cSAnton Blanchard * from panic() called below can race and corrupt it. If we 23958154c8cSAnton Blanchard * know we are going to panic, delay for 1 second so we have a 24058154c8cSAnton Blanchard * chance to get clean backtraces from all CPUs that are oopsing. 24158154c8cSAnton Blanchard */ 24258154c8cSAnton Blanchard if (in_interrupt() || panic_on_oops || !current->pid || 24358154c8cSAnton Blanchard is_global_init(current)) { 24458154c8cSAnton Blanchard mdelay(MSEC_PER_SEC); 24558154c8cSAnton Blanchard } 24658154c8cSAnton Blanchard 247cea6a4baSHorms if (panic_on_oops) 248012c437dSHorms panic("Fatal exception"); 2490e25498fSEric W. Biederman make_task_dead(signr); 250760ca4dcSAnton Blanchard } 25103465f89SNicholas Piggin NOKPROBE_SYMBOL(oops_end); 252cea6a4baSHorms 253d7e02f7bSAneesh Kumar K.V static char *get_mmu_str(void) 254d7e02f7bSAneesh Kumar K.V { 255d7e02f7bSAneesh Kumar K.V if (early_radix_enabled()) 256d7e02f7bSAneesh Kumar K.V return " MMU=Radix"; 257d7e02f7bSAneesh Kumar K.V if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE)) 258d7e02f7bSAneesh Kumar K.V return " MMU=Hash"; 259d7e02f7bSAneesh Kumar K.V return ""; 260d7e02f7bSAneesh Kumar K.V } 261d7e02f7bSAneesh Kumar K.V 26203465f89SNicholas Piggin static int __die(const char *str, struct pt_regs *regs, long err) 263760ca4dcSAnton Blanchard { 264760ca4dcSAnton Blanchard printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter); 2652e82ca3cSMichael Ellerman 266d7e02f7bSAneesh Kumar K.V printk("%s PAGE_SIZE=%luK%s%s%s%s%s%s %s\n", 26778227443SMichael Ellerman IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN) ? "LE" : "BE", 268d7e02f7bSAneesh Kumar K.V PAGE_SIZE / 1024, get_mmu_str(), 26978227443SMichael Ellerman IS_ENABLED(CONFIG_PREEMPT) ? " PREEMPT" : "", 27078227443SMichael Ellerman IS_ENABLED(CONFIG_SMP) ? " SMP" : "", 27178227443SMichael Ellerman IS_ENABLED(CONFIG_SMP) ? (" NR_CPUS=" __stringify(NR_CPUS)) : "", 27278227443SMichael Ellerman debug_pagealloc_enabled() ? " DEBUG_PAGEALLOC" : "", 27378227443SMichael Ellerman IS_ENABLED(CONFIG_NUMA) ? " NUMA" : "", 27478227443SMichael Ellerman ppc_md.name ? ppc_md.name : ""); 275760ca4dcSAnton Blanchard 276760ca4dcSAnton Blanchard if (notify_die(DIE_OOPS, str, regs, err, 255, SIGSEGV) == NOTIFY_STOP) 277760ca4dcSAnton Blanchard return 1; 278760ca4dcSAnton Blanchard 279760ca4dcSAnton Blanchard print_modules(); 280760ca4dcSAnton Blanchard show_regs(regs); 28114cf11afSPaul Mackerras 28214cf11afSPaul Mackerras return 0; 28314cf11afSPaul Mackerras } 28403465f89SNicholas Piggin NOKPROBE_SYMBOL(__die); 28514cf11afSPaul Mackerras 286760ca4dcSAnton Blanchard void die(const char *str, struct pt_regs *regs, long err) 287760ca4dcSAnton Blanchard { 2886f44b20eSNicholas Piggin unsigned long flags; 289760ca4dcSAnton Blanchard 290d40b6768SNicholas Piggin /* 291d40b6768SNicholas Piggin * system_reset_excption handles debugger, crash dump, panic, for 0x100 292d40b6768SNicholas Piggin */ 2937153d4bfSXiongwei Song if (TRAP(regs) != INTERRUPT_SYSTEM_RESET) { 2946f44b20eSNicholas Piggin if (debugger(regs)) 2956f44b20eSNicholas Piggin return; 296d40b6768SNicholas Piggin } 2976f44b20eSNicholas Piggin 2986f44b20eSNicholas Piggin flags = oops_begin(regs); 299760ca4dcSAnton Blanchard if (__die(str, regs, err)) 300760ca4dcSAnton Blanchard err = 0; 301760ca4dcSAnton Blanchard oops_end(flags, regs, err); 302760ca4dcSAnton Blanchard } 30315770a13SNaveen N. Rao NOKPROBE_SYMBOL(die); 304760ca4dcSAnton Blanchard 305efc463adSEric W. Biederman void user_single_step_report(struct pt_regs *regs) 30625baa35bSOleg Nesterov { 3072e1661d2SEric W. Biederman force_sig_fault(SIGTRAP, TRAP_TRACE, (void __user *)regs->nip); 30825baa35bSOleg Nesterov } 30925baa35bSOleg Nesterov 310658b0f92SMurilo Opsfelder Araujo static void show_signal_msg(int signr, struct pt_regs *regs, int code, 311658b0f92SMurilo Opsfelder Araujo unsigned long addr) 31214cf11afSPaul Mackerras { 313997dd26cSMichael Ellerman static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL, 314997dd26cSMichael Ellerman DEFAULT_RATELIMIT_BURST); 315997dd26cSMichael Ellerman 316997dd26cSMichael Ellerman if (!show_unhandled_signals) 31735a52a10SMurilo Opsfelder Araujo return; 31835a52a10SMurilo Opsfelder Araujo 31935a52a10SMurilo Opsfelder Araujo if (!unhandled_signal(current, signr)) 32035a52a10SMurilo Opsfelder Araujo return; 32135a52a10SMurilo Opsfelder Araujo 322997dd26cSMichael Ellerman if (!__ratelimit(&rs)) 323997dd26cSMichael Ellerman return; 324997dd26cSMichael Ellerman 3250f642d61SMurilo Opsfelder Araujo pr_info("%s[%d]: %s (%d) at %lx nip %lx lr %lx code %x", 3260f642d61SMurilo Opsfelder Araujo current->comm, current->pid, signame(signr), signr, 327d0c3d534SOlof Johansson addr, regs->nip, regs->link, code); 3280f642d61SMurilo Opsfelder Araujo 3290f642d61SMurilo Opsfelder Araujo print_vma_addr(KERN_CONT " in ", regs->nip); 3300f642d61SMurilo Opsfelder Araujo 3310f642d61SMurilo Opsfelder Araujo pr_cont("\n"); 332a99b9c5eSMurilo Opsfelder Araujo 333a99b9c5eSMurilo Opsfelder Araujo show_user_instructions(regs); 33414cf11afSPaul Mackerras } 335658b0f92SMurilo Opsfelder Araujo 3362c44ce28SEric W. Biederman static bool exception_common(int signr, struct pt_regs *regs, int code, 3372c44ce28SEric W. Biederman unsigned long addr) 338658b0f92SMurilo Opsfelder Araujo { 339658b0f92SMurilo Opsfelder Araujo if (!user_mode(regs)) { 340658b0f92SMurilo Opsfelder Araujo die("Exception in kernel mode", regs, signr); 3412c44ce28SEric W. Biederman return false; 342658b0f92SMurilo Opsfelder Araujo } 343658b0f92SMurilo Opsfelder Araujo 344d0afd44cSNicholas Piggin /* 345d0afd44cSNicholas Piggin * Must not enable interrupts even for user-mode exception, because 346d0afd44cSNicholas Piggin * this can be called from machine check, which may be a NMI or IRQ 347d0afd44cSNicholas Piggin * which don't like interrupts being enabled. Could check for 348d0afd44cSNicholas Piggin * in_hardirq || in_nmi perhaps, but there doesn't seem to be a good 349d0afd44cSNicholas Piggin * reason why _exception() should enable irqs for an exception handler, 350d0afd44cSNicholas Piggin * the handlers themselves do that directly. 351d0afd44cSNicholas Piggin */ 35214cf11afSPaul Mackerras 353d0afd44cSNicholas Piggin show_signal_msg(signr, regs, code, addr); 3549f2f79e3SBenjamin Herrenschmidt 35541ab5266SAnanth N Mavinakayanahalli current->thread.trap_nr = code; 356c5cc1f4dSThiago Jung Bauermann 3572c44ce28SEric W. Biederman return true; 3582c44ce28SEric W. Biederman } 3592c44ce28SEric W. Biederman 3605d8fb8a5SEric W. Biederman void _exception_pkey(struct pt_regs *regs, unsigned long addr, int key) 3612c44ce28SEric W. Biederman { 3625d8fb8a5SEric W. Biederman if (!exception_common(SIGSEGV, regs, SEGV_PKUERR, addr)) 3632c44ce28SEric W. Biederman return; 3642c44ce28SEric W. Biederman 36577c70728SEric W. Biederman force_sig_pkuerr((void __user *) addr, key); 36614cf11afSPaul Mackerras } 36714cf11afSPaul Mackerras 36899cd1302SRam Pai void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr) 36999cd1302SRam Pai { 370c1c7c85cSEric W. Biederman if (!exception_common(signr, regs, code, addr)) 371c1c7c85cSEric W. Biederman return; 372c1c7c85cSEric W. Biederman 3732e1661d2SEric W. Biederman force_sig_fault(signr, code, (void __user *)addr); 37499cd1302SRam Pai } 37599cd1302SRam Pai 376ccd47702SNicholas Piggin /* 377ccd47702SNicholas Piggin * The interrupt architecture has a quirk in that the HV interrupts excluding 378ccd47702SNicholas Piggin * the NMIs (0x100 and 0x200) do not clear MSR[RI] at entry. The first thing 379ccd47702SNicholas Piggin * that an interrupt handler must do is save off a GPR into a scratch register, 380ccd47702SNicholas Piggin * and all interrupts on POWERNV (HV=1) use the HSPRG1 register as scratch. 381ccd47702SNicholas Piggin * Therefore an NMI can clobber an HV interrupt's live HSPRG1 without noticing 382ccd47702SNicholas Piggin * that it is non-reentrant, which leads to random data corruption. 383ccd47702SNicholas Piggin * 384ccd47702SNicholas Piggin * The solution is for NMI interrupts in HV mode to check if they originated 385ccd47702SNicholas Piggin * from these critical HV interrupt regions. If so, then mark them not 386ccd47702SNicholas Piggin * recoverable. 387ccd47702SNicholas Piggin * 388ccd47702SNicholas Piggin * An alternative would be for HV NMIs to use SPRG for scratch to avoid the 389ccd47702SNicholas Piggin * HSPRG1 clobber, however this would cause guest SPRG to be clobbered. Linux 390ccd47702SNicholas Piggin * guests should always have MSR[RI]=0 when its scratch SPRG is in use, so 391ccd47702SNicholas Piggin * that would work. However any other guest OS that may have the SPRG live 392ccd47702SNicholas Piggin * and MSR[RI]=1 could encounter silent corruption. 393ccd47702SNicholas Piggin * 394ccd47702SNicholas Piggin * Builds that do not support KVM could take this second option to increase 395ccd47702SNicholas Piggin * the recoverability of NMIs. 396ccd47702SNicholas Piggin */ 3975352090aSDaniel Axtens noinstr void hv_nmi_check_nonrecoverable(struct pt_regs *regs) 398ccd47702SNicholas Piggin { 399ccd47702SNicholas Piggin #ifdef CONFIG_PPC_POWERNV 400ccd47702SNicholas Piggin unsigned long kbase = (unsigned long)_stext; 401ccd47702SNicholas Piggin unsigned long nip = regs->nip; 402ccd47702SNicholas Piggin 403ccd47702SNicholas Piggin if (!(regs->msr & MSR_RI)) 404ccd47702SNicholas Piggin return; 405ccd47702SNicholas Piggin if (!(regs->msr & MSR_HV)) 406ccd47702SNicholas Piggin return; 407ccd47702SNicholas Piggin if (regs->msr & MSR_PR) 408ccd47702SNicholas Piggin return; 409ccd47702SNicholas Piggin 410ccd47702SNicholas Piggin /* 411ccd47702SNicholas Piggin * Now test if the interrupt has hit a range that may be using 412ccd47702SNicholas Piggin * HSPRG1 without having RI=0 (i.e., an HSRR interrupt). The 413ccd47702SNicholas Piggin * problem ranges all run un-relocated. Test real and virt modes 4145c4a4802SBhaskar Chowdhury * at the same time by dropping the high bit of the nip (virt mode 415ccd47702SNicholas Piggin * entry points still have the +0x4000 offset). 416ccd47702SNicholas Piggin */ 417ccd47702SNicholas Piggin nip &= ~0xc000000000000000ULL; 418ccd47702SNicholas Piggin if ((nip >= 0x500 && nip < 0x600) || (nip >= 0x4500 && nip < 0x4600)) 419ccd47702SNicholas Piggin goto nonrecoverable; 420ccd47702SNicholas Piggin if ((nip >= 0x980 && nip < 0xa00) || (nip >= 0x4980 && nip < 0x4a00)) 421ccd47702SNicholas Piggin goto nonrecoverable; 422ccd47702SNicholas Piggin if ((nip >= 0xe00 && nip < 0xec0) || (nip >= 0x4e00 && nip < 0x4ec0)) 423ccd47702SNicholas Piggin goto nonrecoverable; 424ccd47702SNicholas Piggin if ((nip >= 0xf80 && nip < 0xfa0) || (nip >= 0x4f80 && nip < 0x4fa0)) 425ccd47702SNicholas Piggin goto nonrecoverable; 426bd3524feSNicholas Piggin 427ccd47702SNicholas Piggin /* Trampoline code runs un-relocated so subtract kbase. */ 428bd3524feSNicholas Piggin if (nip >= (unsigned long)(start_real_trampolines - kbase) && 429bd3524feSNicholas Piggin nip < (unsigned long)(end_real_trampolines - kbase)) 430ccd47702SNicholas Piggin goto nonrecoverable; 431bd3524feSNicholas Piggin if (nip >= (unsigned long)(start_virt_trampolines - kbase) && 432bd3524feSNicholas Piggin nip < (unsigned long)(end_virt_trampolines - kbase)) 433ccd47702SNicholas Piggin goto nonrecoverable; 434ccd47702SNicholas Piggin return; 435ccd47702SNicholas Piggin 436ccd47702SNicholas Piggin nonrecoverable: 4375352090aSDaniel Axtens regs->msr &= ~MSR_RI; 4385352090aSDaniel Axtens local_paca->hsrr_valid = 0; 4395352090aSDaniel Axtens local_paca->srr_valid = 0; 440ccd47702SNicholas Piggin #endif 441ccd47702SNicholas Piggin } 4423a96570fSNicholas Piggin DEFINE_INTERRUPT_HANDLER_NMI(system_reset_exception) 44314cf11afSPaul Mackerras { 444cbf2ba95SNicholas Piggin unsigned long hsrr0, hsrr1; 445cbf2ba95SNicholas Piggin bool saved_hsrrs = false; 4462b4f3ac5SNicholas Piggin 447cbf2ba95SNicholas Piggin /* 448cbf2ba95SNicholas Piggin * System reset can interrupt code where HSRRs are live and MSR[RI]=1. 449cbf2ba95SNicholas Piggin * The system reset interrupt itself may clobber HSRRs (e.g., to call 450cbf2ba95SNicholas Piggin * OPAL), so save them here and restore them before returning. 451cbf2ba95SNicholas Piggin * 452cbf2ba95SNicholas Piggin * Machine checks don't need to save HSRRs, as the real mode handler 453cbf2ba95SNicholas Piggin * is careful to avoid them, and the regular handler is not delivered 454cbf2ba95SNicholas Piggin * as an NMI. 455cbf2ba95SNicholas Piggin */ 456cbf2ba95SNicholas Piggin if (cpu_has_feature(CPU_FTR_HVMODE)) { 457cbf2ba95SNicholas Piggin hsrr0 = mfspr(SPRN_HSRR0); 458cbf2ba95SNicholas Piggin hsrr1 = mfspr(SPRN_HSRR1); 459cbf2ba95SNicholas Piggin saved_hsrrs = true; 460cbf2ba95SNicholas Piggin } 461cbf2ba95SNicholas Piggin 462ccd47702SNicholas Piggin hv_nmi_check_nonrecoverable(regs); 463ccd47702SNicholas Piggin 464ca41ad43SNicholas Piggin __this_cpu_inc(irq_stat.sreset_irqs); 465ca41ad43SNicholas Piggin 46614cf11afSPaul Mackerras /* See if any machine dependent calls */ 467c902be71SArnd Bergmann if (ppc_md.system_reset_exception) { 468c902be71SArnd Bergmann if (ppc_md.system_reset_exception(regs)) 469c4f3b52cSNicholas Piggin goto out; 470c902be71SArnd Bergmann } 47114cf11afSPaul Mackerras 4724388c9b3SNicholas Piggin if (debugger(regs)) 4734388c9b3SNicholas Piggin goto out; 4744388c9b3SNicholas Piggin 475e7ca44edSGanesh Goudar kmsg_dump(KMSG_DUMP_OOPS); 4764388c9b3SNicholas Piggin /* 4774388c9b3SNicholas Piggin * A system reset is a request to dump, so we always send 4784388c9b3SNicholas Piggin * it through the crashdump code (if fadump or kdump are 4794388c9b3SNicholas Piggin * registered). 4804388c9b3SNicholas Piggin */ 4814388c9b3SNicholas Piggin crash_fadump(regs, "System Reset"); 4824388c9b3SNicholas Piggin 4834388c9b3SNicholas Piggin crash_kexec(regs); 4844388c9b3SNicholas Piggin 4854388c9b3SNicholas Piggin /* 4864388c9b3SNicholas Piggin * We aren't the primary crash CPU. We need to send it 4874388c9b3SNicholas Piggin * to a holding pattern to avoid it ending up in the panic 4884388c9b3SNicholas Piggin * code. 4894388c9b3SNicholas Piggin */ 4904388c9b3SNicholas Piggin crash_kexec_secondary(regs); 4914388c9b3SNicholas Piggin 4924388c9b3SNicholas Piggin /* 4934388c9b3SNicholas Piggin * No debugger or crash dump registered, print logs then 4944388c9b3SNicholas Piggin * panic. 4954388c9b3SNicholas Piggin */ 4964552d128SNicholas Piggin die("System Reset", regs, SIGABRT); 4974388c9b3SNicholas Piggin 4984388c9b3SNicholas Piggin mdelay(2*MSEC_PER_SEC); /* Wait a little while for others to print */ 4994388c9b3SNicholas Piggin add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); 5004388c9b3SNicholas Piggin nmi_panic(regs, "System Reset"); 50114cf11afSPaul Mackerras 502c4f3b52cSNicholas Piggin out: 503c4f3b52cSNicholas Piggin #ifdef CONFIG_PPC_BOOK3S_64 504c4f3b52cSNicholas Piggin BUG_ON(get_paca()->in_nmi == 0); 505c4f3b52cSNicholas Piggin if (get_paca()->in_nmi > 1) 506265d6e58SNicholas Piggin die("Unrecoverable nested System Reset", regs, SIGABRT); 507c4f3b52cSNicholas Piggin #endif 50814cf11afSPaul Mackerras /* Must die if the interrupt is not recoverable */ 509806c0e6eSChristophe Leroy if (regs_is_unrecoverable(regs)) { 51011cb0a25SNicholas Piggin /* For the reason explained in die_mce, nmi_exit before die */ 51111cb0a25SNicholas Piggin nmi_exit(); 512265d6e58SNicholas Piggin die("Unrecoverable System Reset", regs, SIGABRT); 51311cb0a25SNicholas Piggin } 51414cf11afSPaul Mackerras 515cbf2ba95SNicholas Piggin if (saved_hsrrs) { 516cbf2ba95SNicholas Piggin mtspr(SPRN_HSRR0, hsrr0); 517cbf2ba95SNicholas Piggin mtspr(SPRN_HSRR1, hsrr1); 518cbf2ba95SNicholas Piggin } 519cbf2ba95SNicholas Piggin 52014cf11afSPaul Mackerras /* What should we do here? We could issue a shutdown or hard reset. */ 5213a96570fSNicholas Piggin 5223a96570fSNicholas Piggin return 0; 52314cf11afSPaul Mackerras } 5241e9b4507SMahesh Salgaonkar 52514cf11afSPaul Mackerras /* 52614cf11afSPaul Mackerras * I/O accesses can cause machine checks on powermacs. 52714cf11afSPaul Mackerras * Check if the NIP corresponds to the address of a sync 52814cf11afSPaul Mackerras * instruction for which there is an entry in the exception 52914cf11afSPaul Mackerras * table. 53014cf11afSPaul Mackerras * -- paulus. 53114cf11afSPaul Mackerras */ 53214cf11afSPaul Mackerras static inline int check_io_access(struct pt_regs *regs) 53314cf11afSPaul Mackerras { 53468a64357SBenjamin Herrenschmidt #ifdef CONFIG_PPC32 53514cf11afSPaul Mackerras unsigned long msr = regs->msr; 53614cf11afSPaul Mackerras const struct exception_table_entry *entry; 53714cf11afSPaul Mackerras unsigned int *nip = (unsigned int *)regs->nip; 53814cf11afSPaul Mackerras 53914cf11afSPaul Mackerras if (((msr & 0xffff0000) == 0 || (msr & (0x80000 | 0x40000))) 54014cf11afSPaul Mackerras && (entry = search_exception_tables(regs->nip)) != NULL) { 54114cf11afSPaul Mackerras /* 54214cf11afSPaul Mackerras * Check that it's a sync instruction, or somewhere 54314cf11afSPaul Mackerras * in the twi; isync; nop sequence that inb/inw/inl uses. 54414cf11afSPaul Mackerras * As the address is in the exception table 54514cf11afSPaul Mackerras * we should be able to read the instr there. 54614cf11afSPaul Mackerras * For the debug message, we look at the preceding 54714cf11afSPaul Mackerras * load or store. 54814cf11afSPaul Mackerras */ 549deefd0aeSChristophe Leroy if (*nip == PPC_RAW_NOP()) 55014cf11afSPaul Mackerras nip -= 2; 551deefd0aeSChristophe Leroy else if (*nip == PPC_RAW_ISYNC()) 55214cf11afSPaul Mackerras --nip; 553deefd0aeSChristophe Leroy if (*nip == PPC_RAW_SYNC() || get_op(*nip) == OP_TRAP) { 55414cf11afSPaul Mackerras unsigned int rb; 55514cf11afSPaul Mackerras 55614cf11afSPaul Mackerras --nip; 55714cf11afSPaul Mackerras rb = (*nip >> 11) & 0x1f; 55814cf11afSPaul Mackerras printk(KERN_DEBUG "%s bad port %lx at %p\n", 55914cf11afSPaul Mackerras (*nip & 0x100)? "OUT to": "IN from", 56014cf11afSPaul Mackerras regs->gpr[rb] - _IO_BASE, nip); 561806c0e6eSChristophe Leroy regs_set_recoverable(regs); 56259dc5bfcSNicholas Piggin regs_set_return_ip(regs, extable_fixup(entry)); 56314cf11afSPaul Mackerras return 1; 56414cf11afSPaul Mackerras } 56514cf11afSPaul Mackerras } 56668a64357SBenjamin Herrenschmidt #endif /* CONFIG_PPC32 */ 56714cf11afSPaul Mackerras return 0; 56814cf11afSPaul Mackerras } 56914cf11afSPaul Mackerras 570172ae2e7SDave Kleikamp #ifdef CONFIG_PPC_ADV_DEBUG_REGS 57114cf11afSPaul Mackerras /* On 4xx, the reason for the machine check or program exception 57214cf11afSPaul Mackerras is in the ESR. */ 5734f8e78c0SXiongwei Song #define get_reason(regs) ((regs)->esr) 57414cf11afSPaul Mackerras #define REASON_FP ESR_FP 57514cf11afSPaul Mackerras #define REASON_ILLEGAL (ESR_PIL | ESR_PUO) 57614cf11afSPaul Mackerras #define REASON_PRIVILEGED ESR_PPR 57714cf11afSPaul Mackerras #define REASON_TRAP ESR_PTR 5789409d2f9SJordan Niethe #define REASON_PREFIXED 0 5799409d2f9SJordan Niethe #define REASON_BOUNDARY 0 58014cf11afSPaul Mackerras 58114cf11afSPaul Mackerras /* single-step stuff */ 58251ae8d4aSBharat Bhushan #define single_stepping(regs) (current->thread.debug.dbcr0 & DBCR0_IC) 58351ae8d4aSBharat Bhushan #define clear_single_step(regs) (current->thread.debug.dbcr0 &= ~DBCR0_IC) 5840e524e76SMatt Evans #define clear_br_trace(regs) do {} while(0) 58514cf11afSPaul Mackerras #else 58614cf11afSPaul Mackerras /* On non-4xx, the reason for the machine check or program 58714cf11afSPaul Mackerras exception is in the MSR. */ 58814cf11afSPaul Mackerras #define get_reason(regs) ((regs)->msr) 589d30a5a52SMichael Ellerman #define REASON_TM SRR1_PROGTM 590d30a5a52SMichael Ellerman #define REASON_FP SRR1_PROGFPE 591d30a5a52SMichael Ellerman #define REASON_ILLEGAL SRR1_PROGILL 592d30a5a52SMichael Ellerman #define REASON_PRIVILEGED SRR1_PROGPRIV 593d30a5a52SMichael Ellerman #define REASON_TRAP SRR1_PROGTRAP 5949409d2f9SJordan Niethe #define REASON_PREFIXED SRR1_PREFIXED 5959409d2f9SJordan Niethe #define REASON_BOUNDARY SRR1_BOUNDARY 59614cf11afSPaul Mackerras 59714cf11afSPaul Mackerras #define single_stepping(regs) ((regs)->msr & MSR_SE) 59859dc5bfcSNicholas Piggin #define clear_single_step(regs) (regs_set_return_msr((regs), (regs)->msr & ~MSR_SE)) 59959dc5bfcSNicholas Piggin #define clear_br_trace(regs) (regs_set_return_msr((regs), (regs)->msr & ~MSR_BE)) 60014cf11afSPaul Mackerras #endif 60114cf11afSPaul Mackerras 6029409d2f9SJordan Niethe #define inst_length(reason) (((reason) & REASON_PREFIXED) ? 8 : 4) 6039409d2f9SJordan Niethe 604688de017SChristophe Leroy #if defined(CONFIG_PPC_E500) 605fe04b112SScott Wood int machine_check_e500mc(struct pt_regs *regs) 606fe04b112SScott Wood { 607fe04b112SScott Wood unsigned long mcsr = mfspr(SPRN_MCSR); 608a4e89ffbSMatt Weber unsigned long pvr = mfspr(SPRN_PVR); 609fe04b112SScott Wood unsigned long reason = mcsr; 610fe04b112SScott Wood int recoverable = 1; 611fe04b112SScott Wood 61282a9a480SScott Wood if (reason & MCSR_LD) { 613cce1f106SShaohui Xie recoverable = fsl_rio_mcheck_exception(regs); 614cce1f106SShaohui Xie if (recoverable == 1) 615cce1f106SShaohui Xie goto silent_out; 616cce1f106SShaohui Xie } 617cce1f106SShaohui Xie 618fe04b112SScott Wood printk("Machine check in kernel mode.\n"); 619fe04b112SScott Wood printk("Caused by (from MCSR=%lx): ", reason); 620fe04b112SScott Wood 621fe04b112SScott Wood if (reason & MCSR_MCP) 622422123ccSChristophe Leroy pr_cont("Machine Check Signal\n"); 623fe04b112SScott Wood 624fe04b112SScott Wood if (reason & MCSR_ICPERR) { 625422123ccSChristophe Leroy pr_cont("Instruction Cache Parity Error\n"); 626fe04b112SScott Wood 627fe04b112SScott Wood /* 628fe04b112SScott Wood * This is recoverable by invalidating the i-cache. 629fe04b112SScott Wood */ 630fe04b112SScott Wood mtspr(SPRN_L1CSR1, mfspr(SPRN_L1CSR1) | L1CSR1_ICFI); 631fe04b112SScott Wood while (mfspr(SPRN_L1CSR1) & L1CSR1_ICFI) 632fe04b112SScott Wood ; 633fe04b112SScott Wood 634fe04b112SScott Wood /* 635fe04b112SScott Wood * This will generally be accompanied by an instruction 636fe04b112SScott Wood * fetch error report -- only treat MCSR_IF as fatal 637fe04b112SScott Wood * if it wasn't due to an L1 parity error. 638fe04b112SScott Wood */ 639fe04b112SScott Wood reason &= ~MCSR_IF; 640fe04b112SScott Wood } 641fe04b112SScott Wood 642fe04b112SScott Wood if (reason & MCSR_DCPERR_MC) { 643422123ccSChristophe Leroy pr_cont("Data Cache Parity Error\n"); 64437caf9f2SKumar Gala 64537caf9f2SKumar Gala /* 64637caf9f2SKumar Gala * In write shadow mode we auto-recover from the error, but it 64737caf9f2SKumar Gala * may still get logged and cause a machine check. We should 64837caf9f2SKumar Gala * only treat the non-write shadow case as non-recoverable. 64937caf9f2SKumar Gala */ 650a4e89ffbSMatt Weber /* On e6500 core, L1 DCWS (Data cache write shadow mode) bit 651a4e89ffbSMatt Weber * is not implemented but L1 data cache always runs in write 652a4e89ffbSMatt Weber * shadow mode. Hence on data cache parity errors HW will 653a4e89ffbSMatt Weber * automatically invalidate the L1 Data Cache. 654a4e89ffbSMatt Weber */ 655a4e89ffbSMatt Weber if (PVR_VER(pvr) != PVR_VER_E6500) { 65637caf9f2SKumar Gala if (!(mfspr(SPRN_L1CSR2) & L1CSR2_DCWS)) 657fe04b112SScott Wood recoverable = 0; 658fe04b112SScott Wood } 659a4e89ffbSMatt Weber } 660fe04b112SScott Wood 661fe04b112SScott Wood if (reason & MCSR_L2MMU_MHIT) { 662422123ccSChristophe Leroy pr_cont("Hit on multiple TLB entries\n"); 663fe04b112SScott Wood recoverable = 0; 664fe04b112SScott Wood } 665fe04b112SScott Wood 666fe04b112SScott Wood if (reason & MCSR_NMI) 667422123ccSChristophe Leroy pr_cont("Non-maskable interrupt\n"); 668fe04b112SScott Wood 669fe04b112SScott Wood if (reason & MCSR_IF) { 670422123ccSChristophe Leroy pr_cont("Instruction Fetch Error Report\n"); 671fe04b112SScott Wood recoverable = 0; 672fe04b112SScott Wood } 673fe04b112SScott Wood 674fe04b112SScott Wood if (reason & MCSR_LD) { 675422123ccSChristophe Leroy pr_cont("Load Error Report\n"); 676fe04b112SScott Wood recoverable = 0; 677fe04b112SScott Wood } 678fe04b112SScott Wood 679fe04b112SScott Wood if (reason & MCSR_ST) { 680422123ccSChristophe Leroy pr_cont("Store Error Report\n"); 681fe04b112SScott Wood recoverable = 0; 682fe04b112SScott Wood } 683fe04b112SScott Wood 684fe04b112SScott Wood if (reason & MCSR_LDG) { 685422123ccSChristophe Leroy pr_cont("Guarded Load Error Report\n"); 686fe04b112SScott Wood recoverable = 0; 687fe04b112SScott Wood } 688fe04b112SScott Wood 689fe04b112SScott Wood if (reason & MCSR_TLBSYNC) 690422123ccSChristophe Leroy pr_cont("Simultaneous tlbsync operations\n"); 691fe04b112SScott Wood 692fe04b112SScott Wood if (reason & MCSR_BSL2_ERR) { 693422123ccSChristophe Leroy pr_cont("Level 2 Cache Error\n"); 694fe04b112SScott Wood recoverable = 0; 695fe04b112SScott Wood } 696fe04b112SScott Wood 697fe04b112SScott Wood if (reason & MCSR_MAV) { 698fe04b112SScott Wood u64 addr; 699fe04b112SScott Wood 700fe04b112SScott Wood addr = mfspr(SPRN_MCAR); 701fe04b112SScott Wood addr |= (u64)mfspr(SPRN_MCARU) << 32; 702fe04b112SScott Wood 703422123ccSChristophe Leroy pr_cont("Machine Check %s Address: %#llx\n", 704fe04b112SScott Wood reason & MCSR_MEA ? "Effective" : "Physical", addr); 705fe04b112SScott Wood } 706fe04b112SScott Wood 707cce1f106SShaohui Xie silent_out: 708fe04b112SScott Wood mtspr(SPRN_MCSR, mcsr); 709fe04b112SScott Wood return mfspr(SPRN_MCSR) == 0 && recoverable; 710fe04b112SScott Wood } 711fe04b112SScott Wood 71247c0bd1aSBenjamin Herrenschmidt int machine_check_e500(struct pt_regs *regs) 71347c0bd1aSBenjamin Herrenschmidt { 71442bff234SMichael Ellerman unsigned long reason = mfspr(SPRN_MCSR); 71547c0bd1aSBenjamin Herrenschmidt 716cce1f106SShaohui Xie if (reason & MCSR_BUS_RBERR) { 717cce1f106SShaohui Xie if (fsl_rio_mcheck_exception(regs)) 718cce1f106SShaohui Xie return 1; 7194e0e3435SHongtao Jia if (fsl_pci_mcheck_exception(regs)) 7204e0e3435SHongtao Jia return 1; 721cce1f106SShaohui Xie } 722cce1f106SShaohui Xie 72314cf11afSPaul Mackerras printk("Machine check in kernel mode.\n"); 72414cf11afSPaul Mackerras printk("Caused by (from MCSR=%lx): ", reason); 72514cf11afSPaul Mackerras 72614cf11afSPaul Mackerras if (reason & MCSR_MCP) 727422123ccSChristophe Leroy pr_cont("Machine Check Signal\n"); 72814cf11afSPaul Mackerras if (reason & MCSR_ICPERR) 729422123ccSChristophe Leroy pr_cont("Instruction Cache Parity Error\n"); 73014cf11afSPaul Mackerras if (reason & MCSR_DCP_PERR) 731422123ccSChristophe Leroy pr_cont("Data Cache Push Parity Error\n"); 73214cf11afSPaul Mackerras if (reason & MCSR_DCPERR) 733422123ccSChristophe Leroy pr_cont("Data Cache Parity Error\n"); 73414cf11afSPaul Mackerras if (reason & MCSR_BUS_IAERR) 735422123ccSChristophe Leroy pr_cont("Bus - Instruction Address Error\n"); 73614cf11afSPaul Mackerras if (reason & MCSR_BUS_RAERR) 737422123ccSChristophe Leroy pr_cont("Bus - Read Address Error\n"); 73814cf11afSPaul Mackerras if (reason & MCSR_BUS_WAERR) 739422123ccSChristophe Leroy pr_cont("Bus - Write Address Error\n"); 74014cf11afSPaul Mackerras if (reason & MCSR_BUS_IBERR) 741422123ccSChristophe Leroy pr_cont("Bus - Instruction Data Error\n"); 74214cf11afSPaul Mackerras if (reason & MCSR_BUS_RBERR) 743422123ccSChristophe Leroy pr_cont("Bus - Read Data Bus Error\n"); 74414cf11afSPaul Mackerras if (reason & MCSR_BUS_WBERR) 745422123ccSChristophe Leroy pr_cont("Bus - Write Data Bus Error\n"); 74614cf11afSPaul Mackerras if (reason & MCSR_BUS_IPERR) 747422123ccSChristophe Leroy pr_cont("Bus - Instruction Parity Error\n"); 74814cf11afSPaul Mackerras if (reason & MCSR_BUS_RPERR) 749422123ccSChristophe Leroy pr_cont("Bus - Read Parity Error\n"); 75047c0bd1aSBenjamin Herrenschmidt 75147c0bd1aSBenjamin Herrenschmidt return 0; 75247c0bd1aSBenjamin Herrenschmidt } 7534490c06bSKumar Gala 7544490c06bSKumar Gala int machine_check_generic(struct pt_regs *regs) 7554490c06bSKumar Gala { 7564490c06bSKumar Gala return 0; 7574490c06bSKumar Gala } 7587f3f819eSMichael Ellerman #elif defined(CONFIG_PPC32) 75947c0bd1aSBenjamin Herrenschmidt int machine_check_generic(struct pt_regs *regs) 76047c0bd1aSBenjamin Herrenschmidt { 76142bff234SMichael Ellerman unsigned long reason = regs->msr; 76247c0bd1aSBenjamin Herrenschmidt 76314cf11afSPaul Mackerras printk("Machine check in kernel mode.\n"); 76414cf11afSPaul Mackerras printk("Caused by (from SRR1=%lx): ", reason); 76514cf11afSPaul Mackerras switch (reason & 0x601F0000) { 76614cf11afSPaul Mackerras case 0x80000: 767422123ccSChristophe Leroy pr_cont("Machine check signal\n"); 76814cf11afSPaul Mackerras break; 76914cf11afSPaul Mackerras case 0x40000: 77014cf11afSPaul Mackerras case 0x140000: /* 7450 MSS error and TEA */ 771422123ccSChristophe Leroy pr_cont("Transfer error ack signal\n"); 77214cf11afSPaul Mackerras break; 77314cf11afSPaul Mackerras case 0x20000: 774422123ccSChristophe Leroy pr_cont("Data parity error signal\n"); 77514cf11afSPaul Mackerras break; 77614cf11afSPaul Mackerras case 0x10000: 777422123ccSChristophe Leroy pr_cont("Address parity error signal\n"); 77814cf11afSPaul Mackerras break; 77914cf11afSPaul Mackerras case 0x20000000: 780422123ccSChristophe Leroy pr_cont("L1 Data Cache error\n"); 78114cf11afSPaul Mackerras break; 78214cf11afSPaul Mackerras case 0x40000000: 783422123ccSChristophe Leroy pr_cont("L1 Instruction Cache error\n"); 78414cf11afSPaul Mackerras break; 78514cf11afSPaul Mackerras case 0x00100000: 786422123ccSChristophe Leroy pr_cont("L2 data cache parity error\n"); 78714cf11afSPaul Mackerras break; 78814cf11afSPaul Mackerras default: 789422123ccSChristophe Leroy pr_cont("Unknown values in msr\n"); 79014cf11afSPaul Mackerras } 79175918a4bSOlof Johansson return 0; 79275918a4bSOlof Johansson } 79347c0bd1aSBenjamin Herrenschmidt #endif /* everything else */ 79475918a4bSOlof Johansson 795209e9d50SNicholas Piggin void die_mce(const char *str, struct pt_regs *regs, long err) 796209e9d50SNicholas Piggin { 797209e9d50SNicholas Piggin /* 7980e25498fSEric W. Biederman * The machine check wants to kill the interrupted context, 7990e25498fSEric W. Biederman * but make_task_dead() checks for in_interrupt() and panics 8000e25498fSEric W. Biederman * in that case, so exit the irq/nmi before calling die. 801209e9d50SNicholas Piggin */ 802f08fb25bSNicholas Piggin if (in_nmi()) 803209e9d50SNicholas Piggin nmi_exit(); 804f08fb25bSNicholas Piggin else 805f08fb25bSNicholas Piggin irq_exit(); 806209e9d50SNicholas Piggin die(str, regs, err); 807209e9d50SNicholas Piggin } 808209e9d50SNicholas Piggin 809118178e6SNicholas Piggin /* 810f08fb25bSNicholas Piggin * BOOK3S_64 does not usually call this handler as a non-maskable interrupt 811118178e6SNicholas Piggin * (it uses its own early real-mode handler to handle the MCE proper 812118178e6SNicholas Piggin * and then raises irq_work to call this handler when interrupts are 813f08fb25bSNicholas Piggin * enabled). The only time when this is not true is if the early handler 814f08fb25bSNicholas Piggin * is unrecoverable, then it does call this directly to try to get a 815f08fb25bSNicholas Piggin * message out. 816118178e6SNicholas Piggin */ 817f08fb25bSNicholas Piggin static void __machine_check_exception(struct pt_regs *regs) 81875918a4bSOlof Johansson { 81975918a4bSOlof Johansson int recover = 0; 82069ea03b5SPeter Zijlstra 82169111bacSChristoph Lameter __this_cpu_inc(irq_stat.mce_exceptions); 82289713ed1SAnton Blanchard 823d93b0ac0SMahesh Salgaonkar add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE); 824d93b0ac0SMahesh Salgaonkar 82547c0bd1aSBenjamin Herrenschmidt /* See if any machine dependent calls. In theory, we would want 82647c0bd1aSBenjamin Herrenschmidt * to call the CPU first, and call the ppc_md. one if the CPU 82747c0bd1aSBenjamin Herrenschmidt * one returns a positive number. However there is existing code 82847c0bd1aSBenjamin Herrenschmidt * that assumes the board gets a first chance, so let's keep it 82947c0bd1aSBenjamin Herrenschmidt * that way for now and fix things later. --BenH. 83047c0bd1aSBenjamin Herrenschmidt */ 83175918a4bSOlof Johansson if (ppc_md.machine_check_exception) 83275918a4bSOlof Johansson recover = ppc_md.machine_check_exception(regs); 83347c0bd1aSBenjamin Herrenschmidt else if (cur_cpu_spec->machine_check) 83447c0bd1aSBenjamin Herrenschmidt recover = cur_cpu_spec->machine_check(regs); 83575918a4bSOlof Johansson 83647c0bd1aSBenjamin Herrenschmidt if (recover > 0) 837ba12eedeSLi Zhong goto bail; 83875918a4bSOlof Johansson 839a443506bSAnton Blanchard if (debugger_fault_handler(regs)) 840ba12eedeSLi Zhong goto bail; 84175918a4bSOlof Johansson 84275918a4bSOlof Johansson if (check_io_access(regs)) 843ba12eedeSLi Zhong goto bail; 84475918a4bSOlof Johansson 845209e9d50SNicholas Piggin die_mce("Machine check", regs, SIGBUS); 846daf00ae7SChristophe Leroy 847c538938fSNicholas Piggin bail: 8480bbea75cSChristophe Leroy /* Must die if the interrupt is not recoverable */ 849806c0e6eSChristophe Leroy if (regs_is_unrecoverable(regs)) 850209e9d50SNicholas Piggin die_mce("Unrecoverable Machine check", regs, SIGBUS); 851f08fb25bSNicholas Piggin } 852daf00ae7SChristophe Leroy 8533a96570fSNicholas Piggin #ifdef CONFIG_PPC_BOOK3S_64 8542f5182cfSNicholas Piggin DEFINE_INTERRUPT_HANDLER_RAW(machine_check_early_boot) 8552f5182cfSNicholas Piggin { 8562f5182cfSNicholas Piggin udbg_printf("Machine check (early boot)\n"); 8572f5182cfSNicholas Piggin udbg_printf("SRR0=0x%016lx SRR1=0x%016lx\n", regs->nip, regs->msr); 8582f5182cfSNicholas Piggin udbg_printf(" DAR=0x%016lx DSISR=0x%08lx\n", regs->dar, regs->dsisr); 8592f5182cfSNicholas Piggin udbg_printf(" LR=0x%016lx R1=0x%08lx\n", regs->link, regs->gpr[1]); 8602f5182cfSNicholas Piggin udbg_printf("------\n"); 8612f5182cfSNicholas Piggin die("Machine check (early boot)", regs, SIGBUS); 8622f5182cfSNicholas Piggin for (;;) 8632f5182cfSNicholas Piggin ; 8642f5182cfSNicholas Piggin return 0; 8652f5182cfSNicholas Piggin } 8662f5182cfSNicholas Piggin 867f08fb25bSNicholas Piggin DEFINE_INTERRUPT_HANDLER_ASYNC(machine_check_exception_async) 868f08fb25bSNicholas Piggin { 869f08fb25bSNicholas Piggin __machine_check_exception(regs); 870f08fb25bSNicholas Piggin } 8713a96570fSNicholas Piggin #endif 872f08fb25bSNicholas Piggin DEFINE_INTERRUPT_HANDLER_NMI(machine_check_exception) 873f08fb25bSNicholas Piggin { 874f08fb25bSNicholas Piggin __machine_check_exception(regs); 875f08fb25bSNicholas Piggin 876f08fb25bSNicholas Piggin return 0; 87714cf11afSPaul Mackerras } 87814cf11afSPaul Mackerras 8793a96570fSNicholas Piggin DEFINE_INTERRUPT_HANDLER(SMIException) /* async? */ 88014cf11afSPaul Mackerras { 88114cf11afSPaul Mackerras die("System Management Interrupt", regs, SIGABRT); 88214cf11afSPaul Mackerras } 88314cf11afSPaul Mackerras 8845080332cSMichael Neuling #ifdef CONFIG_VSX 8855080332cSMichael Neuling static void p9_hmi_special_emu(struct pt_regs *regs) 8865080332cSMichael Neuling { 8875080332cSMichael Neuling unsigned int ra, rb, t, i, sel, instr, rc; 8885080332cSMichael Neuling const void __user *addr; 8891da4a027SMichael Neuling u8 vbuf[16] __aligned(16), *vdst; 8905080332cSMichael Neuling unsigned long ea, msr, msr_mask; 8915080332cSMichael Neuling bool swap; 8925080332cSMichael Neuling 893bad956b8SChristophe Leroy if (__get_user(instr, (unsigned int __user *)regs->nip)) 8945080332cSMichael Neuling return; 8955080332cSMichael Neuling 8965080332cSMichael Neuling /* 8975080332cSMichael Neuling * lxvb16x opcode: 0x7c0006d8 8985080332cSMichael Neuling * lxvd2x opcode: 0x7c000698 8995080332cSMichael Neuling * lxvh8x opcode: 0x7c000658 9005080332cSMichael Neuling * lxvw4x opcode: 0x7c000618 9015080332cSMichael Neuling */ 9025080332cSMichael Neuling if ((instr & 0xfc00073e) != 0x7c000618) { 9035080332cSMichael Neuling pr_devel("HMI vec emu: not vector CI %i:%s[%d] nip=%016lx" 9045080332cSMichael Neuling " instr=%08x\n", 9055080332cSMichael Neuling smp_processor_id(), current->comm, current->pid, 9065080332cSMichael Neuling regs->nip, instr); 9075080332cSMichael Neuling return; 9085080332cSMichael Neuling } 9095080332cSMichael Neuling 9105080332cSMichael Neuling /* Grab vector registers into the task struct */ 9115080332cSMichael Neuling msr = regs->msr; /* Grab msr before we flush the bits */ 9125080332cSMichael Neuling flush_vsx_to_thread(current); 9135080332cSMichael Neuling enable_kernel_altivec(); 9145080332cSMichael Neuling 9155080332cSMichael Neuling /* 9165080332cSMichael Neuling * Is userspace running with a different endian (this is rare but 9175080332cSMichael Neuling * not impossible) 9185080332cSMichael Neuling */ 9195080332cSMichael Neuling swap = (msr & MSR_LE) != (MSR_KERNEL & MSR_LE); 9205080332cSMichael Neuling 9215080332cSMichael Neuling /* Decode the instruction */ 9225080332cSMichael Neuling ra = (instr >> 16) & 0x1f; 9235080332cSMichael Neuling rb = (instr >> 11) & 0x1f; 9245080332cSMichael Neuling t = (instr >> 21) & 0x1f; 9255080332cSMichael Neuling if (instr & 1) 9265080332cSMichael Neuling vdst = (u8 *)¤t->thread.vr_state.vr[t]; 9275080332cSMichael Neuling else 9285080332cSMichael Neuling vdst = (u8 *)¤t->thread.fp_state.fpr[t][0]; 9295080332cSMichael Neuling 9305080332cSMichael Neuling /* Grab the vector address */ 9315080332cSMichael Neuling ea = regs->gpr[rb] + (ra ? regs->gpr[ra] : 0); 9325080332cSMichael Neuling if (is_32bit_task()) 9335080332cSMichael Neuling ea &= 0xfffffffful; 9345080332cSMichael Neuling addr = (__force const void __user *)ea; 9355080332cSMichael Neuling 9365080332cSMichael Neuling /* Check it */ 93796d4f267SLinus Torvalds if (!access_ok(addr, 16)) { 9385080332cSMichael Neuling pr_devel("HMI vec emu: bad access %i:%s[%d] nip=%016lx" 9395080332cSMichael Neuling " instr=%08x addr=%016lx\n", 9405080332cSMichael Neuling smp_processor_id(), current->comm, current->pid, 9415080332cSMichael Neuling regs->nip, instr, (unsigned long)addr); 9425080332cSMichael Neuling return; 9435080332cSMichael Neuling } 9445080332cSMichael Neuling 9455080332cSMichael Neuling /* Read the vector */ 9465080332cSMichael Neuling rc = 0; 9475080332cSMichael Neuling if ((unsigned long)addr & 0xfUL) 9485080332cSMichael Neuling /* unaligned case */ 9495080332cSMichael Neuling rc = __copy_from_user_inatomic(vbuf, addr, 16); 9505080332cSMichael Neuling else 9515080332cSMichael Neuling __get_user_atomic_128_aligned(vbuf, addr, rc); 9525080332cSMichael Neuling if (rc) { 9535080332cSMichael Neuling pr_devel("HMI vec emu: page fault %i:%s[%d] nip=%016lx" 9545080332cSMichael Neuling " instr=%08x addr=%016lx\n", 9555080332cSMichael Neuling smp_processor_id(), current->comm, current->pid, 9565080332cSMichael Neuling regs->nip, instr, (unsigned long)addr); 9575080332cSMichael Neuling return; 9585080332cSMichael Neuling } 9595080332cSMichael Neuling 9605080332cSMichael Neuling pr_devel("HMI vec emu: emulated vector CI %i:%s[%d] nip=%016lx" 9615080332cSMichael Neuling " instr=%08x addr=%016lx\n", 9625080332cSMichael Neuling smp_processor_id(), current->comm, current->pid, regs->nip, 9635080332cSMichael Neuling instr, (unsigned long) addr); 9645080332cSMichael Neuling 9655080332cSMichael Neuling /* Grab instruction "selector" */ 9665080332cSMichael Neuling sel = (instr >> 6) & 3; 9675080332cSMichael Neuling 9685080332cSMichael Neuling /* 9695080332cSMichael Neuling * Check to make sure the facility is actually enabled. This 9705080332cSMichael Neuling * could happen if we get a false positive hit. 9715080332cSMichael Neuling * 9725080332cSMichael Neuling * lxvd2x/lxvw4x always check MSR VSX sel = 0,2 9735080332cSMichael Neuling * lxvh8x/lxvb16x check MSR VSX or VEC depending on VSR used sel = 1,3 9745080332cSMichael Neuling */ 9755080332cSMichael Neuling msr_mask = MSR_VSX; 9765080332cSMichael Neuling if ((sel & 1) && (instr & 1)) /* lxvh8x & lxvb16x + VSR >= 32 */ 9775080332cSMichael Neuling msr_mask = MSR_VEC; 9785080332cSMichael Neuling if (!(msr & msr_mask)) { 9795080332cSMichael Neuling pr_devel("HMI vec emu: MSR fac clear %i:%s[%d] nip=%016lx" 9805080332cSMichael Neuling " instr=%08x msr:%016lx\n", 9815080332cSMichael Neuling smp_processor_id(), current->comm, current->pid, 9825080332cSMichael Neuling regs->nip, instr, msr); 9835080332cSMichael Neuling return; 9845080332cSMichael Neuling } 9855080332cSMichael Neuling 9865080332cSMichael Neuling /* Do logging here before we modify sel based on endian */ 9875080332cSMichael Neuling switch (sel) { 9885080332cSMichael Neuling case 0: /* lxvw4x */ 9895080332cSMichael Neuling PPC_WARN_EMULATED(lxvw4x, regs); 9905080332cSMichael Neuling break; 9915080332cSMichael Neuling case 1: /* lxvh8x */ 9925080332cSMichael Neuling PPC_WARN_EMULATED(lxvh8x, regs); 9935080332cSMichael Neuling break; 9945080332cSMichael Neuling case 2: /* lxvd2x */ 9955080332cSMichael Neuling PPC_WARN_EMULATED(lxvd2x, regs); 9965080332cSMichael Neuling break; 9975080332cSMichael Neuling case 3: /* lxvb16x */ 9985080332cSMichael Neuling PPC_WARN_EMULATED(lxvb16x, regs); 9995080332cSMichael Neuling break; 10005080332cSMichael Neuling } 10015080332cSMichael Neuling 10025080332cSMichael Neuling #ifdef __LITTLE_ENDIAN__ 10035080332cSMichael Neuling /* 10045080332cSMichael Neuling * An LE kernel stores the vector in the task struct as an LE 10055080332cSMichael Neuling * byte array (effectively swapping both the components and 10065080332cSMichael Neuling * the content of the components). Those instructions expect 10075080332cSMichael Neuling * the components to remain in ascending address order, so we 10085080332cSMichael Neuling * swap them back. 10095080332cSMichael Neuling * 10105080332cSMichael Neuling * If we are running a BE user space, the expectation is that 10115080332cSMichael Neuling * of a simple memcpy, so forcing the emulation to look like 10125080332cSMichael Neuling * a lxvb16x should do the trick. 10135080332cSMichael Neuling */ 10145080332cSMichael Neuling if (swap) 10155080332cSMichael Neuling sel = 3; 10165080332cSMichael Neuling 10175080332cSMichael Neuling switch (sel) { 10185080332cSMichael Neuling case 0: /* lxvw4x */ 10195080332cSMichael Neuling for (i = 0; i < 4; i++) 10205080332cSMichael Neuling ((u32 *)vdst)[i] = ((u32 *)vbuf)[3-i]; 10215080332cSMichael Neuling break; 10225080332cSMichael Neuling case 1: /* lxvh8x */ 10235080332cSMichael Neuling for (i = 0; i < 8; i++) 10245080332cSMichael Neuling ((u16 *)vdst)[i] = ((u16 *)vbuf)[7-i]; 10255080332cSMichael Neuling break; 10265080332cSMichael Neuling case 2: /* lxvd2x */ 10275080332cSMichael Neuling for (i = 0; i < 2; i++) 10285080332cSMichael Neuling ((u64 *)vdst)[i] = ((u64 *)vbuf)[1-i]; 10295080332cSMichael Neuling break; 10305080332cSMichael Neuling case 3: /* lxvb16x */ 10315080332cSMichael Neuling for (i = 0; i < 16; i++) 10325080332cSMichael Neuling vdst[i] = vbuf[15-i]; 10335080332cSMichael Neuling break; 10345080332cSMichael Neuling } 10355080332cSMichael Neuling #else /* __LITTLE_ENDIAN__ */ 10365080332cSMichael Neuling /* On a big endian kernel, a BE userspace only needs a memcpy */ 10375080332cSMichael Neuling if (!swap) 10385080332cSMichael Neuling sel = 3; 10395080332cSMichael Neuling 10405080332cSMichael Neuling /* Otherwise, we need to swap the content of the components */ 10415080332cSMichael Neuling switch (sel) { 10425080332cSMichael Neuling case 0: /* lxvw4x */ 10435080332cSMichael Neuling for (i = 0; i < 4; i++) 10445080332cSMichael Neuling ((u32 *)vdst)[i] = cpu_to_le32(((u32 *)vbuf)[i]); 10455080332cSMichael Neuling break; 10465080332cSMichael Neuling case 1: /* lxvh8x */ 10475080332cSMichael Neuling for (i = 0; i < 8; i++) 10485080332cSMichael Neuling ((u16 *)vdst)[i] = cpu_to_le16(((u16 *)vbuf)[i]); 10495080332cSMichael Neuling break; 10505080332cSMichael Neuling case 2: /* lxvd2x */ 10515080332cSMichael Neuling for (i = 0; i < 2; i++) 10525080332cSMichael Neuling ((u64 *)vdst)[i] = cpu_to_le64(((u64 *)vbuf)[i]); 10535080332cSMichael Neuling break; 10545080332cSMichael Neuling case 3: /* lxvb16x */ 10555080332cSMichael Neuling memcpy(vdst, vbuf, 16); 10565080332cSMichael Neuling break; 10575080332cSMichael Neuling } 10585080332cSMichael Neuling #endif /* !__LITTLE_ENDIAN__ */ 10595080332cSMichael Neuling 10605080332cSMichael Neuling /* Go to next instruction */ 106159dc5bfcSNicholas Piggin regs_add_return_ip(regs, 4); 10625080332cSMichael Neuling } 10635080332cSMichael Neuling #endif /* CONFIG_VSX */ 10645080332cSMichael Neuling 10653a96570fSNicholas Piggin DEFINE_INTERRUPT_HANDLER_ASYNC(handle_hmi_exception) 10660869b6fdSMahesh Salgaonkar { 10670869b6fdSMahesh Salgaonkar struct pt_regs *old_regs; 10680869b6fdSMahesh Salgaonkar 10690869b6fdSMahesh Salgaonkar old_regs = set_irq_regs(regs); 10700869b6fdSMahesh Salgaonkar 10715080332cSMichael Neuling #ifdef CONFIG_VSX 10725080332cSMichael Neuling /* Real mode flagged P9 special emu is needed */ 10735080332cSMichael Neuling if (local_paca->hmi_p9_special_emu) { 10745080332cSMichael Neuling local_paca->hmi_p9_special_emu = 0; 10755080332cSMichael Neuling 10765080332cSMichael Neuling /* 10775080332cSMichael Neuling * We don't want to take page faults while doing the 10785080332cSMichael Neuling * emulation, we just replay the instruction if necessary. 10795080332cSMichael Neuling */ 10805080332cSMichael Neuling pagefault_disable(); 10815080332cSMichael Neuling p9_hmi_special_emu(regs); 10825080332cSMichael Neuling pagefault_enable(); 10835080332cSMichael Neuling } 10845080332cSMichael Neuling #endif /* CONFIG_VSX */ 10855080332cSMichael Neuling 10860869b6fdSMahesh Salgaonkar if (ppc_md.handle_hmi_exception) 10870869b6fdSMahesh Salgaonkar ppc_md.handle_hmi_exception(regs); 10880869b6fdSMahesh Salgaonkar 10890869b6fdSMahesh Salgaonkar set_irq_regs(old_regs); 10900869b6fdSMahesh Salgaonkar } 10910869b6fdSMahesh Salgaonkar 10923a96570fSNicholas Piggin DEFINE_INTERRUPT_HANDLER(unknown_exception) 109314cf11afSPaul Mackerras { 109414cf11afSPaul Mackerras printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n", 109514cf11afSPaul Mackerras regs->nip, regs->msr, regs->trap); 109614cf11afSPaul Mackerras 1097e821fa42SEric W. Biederman _exception(SIGTRAP, regs, TRAP_UNK, 0); 109814cf11afSPaul Mackerras } 109914cf11afSPaul Mackerras 11003a96570fSNicholas Piggin DEFINE_INTERRUPT_HANDLER_ASYNC(unknown_async_exception) 11016c6aee00SNicholas Piggin { 11026c6aee00SNicholas Piggin printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n", 11036c6aee00SNicholas Piggin regs->nip, regs->msr, regs->trap); 11046c6aee00SNicholas Piggin 11056c6aee00SNicholas Piggin _exception(SIGTRAP, regs, TRAP_UNK, 0); 11066c6aee00SNicholas Piggin } 11076c6aee00SNicholas Piggin 11083db8aa10SNicholas Piggin DEFINE_INTERRUPT_HANDLER_NMI(unknown_nmi_exception) 11093db8aa10SNicholas Piggin { 11103db8aa10SNicholas Piggin printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n", 11113db8aa10SNicholas Piggin regs->nip, regs->msr, regs->trap); 11123db8aa10SNicholas Piggin 11133db8aa10SNicholas Piggin _exception(SIGTRAP, regs, TRAP_UNK, 0); 11143db8aa10SNicholas Piggin 11153db8aa10SNicholas Piggin return 0; 11163db8aa10SNicholas Piggin } 11173db8aa10SNicholas Piggin 11183a96570fSNicholas Piggin DEFINE_INTERRUPT_HANDLER(instruction_breakpoint_exception) 111914cf11afSPaul Mackerras { 112014cf11afSPaul Mackerras if (notify_die(DIE_IABR_MATCH, "iabr_match", regs, 5, 112114cf11afSPaul Mackerras 5, SIGTRAP) == NOTIFY_STOP) 1122540d4d34SNicholas Piggin return; 112314cf11afSPaul Mackerras if (debugger_iabr_match(regs)) 1124540d4d34SNicholas Piggin return; 112514cf11afSPaul Mackerras _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip); 112614cf11afSPaul Mackerras } 112714cf11afSPaul Mackerras 11283a96570fSNicholas Piggin DEFINE_INTERRUPT_HANDLER(RunModeException) 112914cf11afSPaul Mackerras { 1130e821fa42SEric W. Biederman _exception(SIGTRAP, regs, TRAP_UNK, 0); 113114cf11afSPaul Mackerras } 113214cf11afSPaul Mackerras 113301fcac8eSChristophe Leroy static void __single_step_exception(struct pt_regs *regs) 113414cf11afSPaul Mackerras { 11352538c2d0SK.Prasad clear_single_step(regs); 11360e524e76SMatt Evans clear_br_trace(regs); 113714cf11afSPaul Mackerras 11386cc89badSNaveen N. Rao if (kprobe_post_handler(regs)) 11396cc89badSNaveen N. Rao return; 11406cc89badSNaveen N. Rao 114114cf11afSPaul Mackerras if (notify_die(DIE_SSTEP, "single_step", regs, 5, 114214cf11afSPaul Mackerras 5, SIGTRAP) == NOTIFY_STOP) 1143540d4d34SNicholas Piggin return; 114414cf11afSPaul Mackerras if (debugger_sstep(regs)) 1145540d4d34SNicholas Piggin return; 114614cf11afSPaul Mackerras 114714cf11afSPaul Mackerras _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip); 114814cf11afSPaul Mackerras } 114914cf11afSPaul Mackerras 115001fcac8eSChristophe Leroy DEFINE_INTERRUPT_HANDLER(single_step_exception) 115101fcac8eSChristophe Leroy { 115201fcac8eSChristophe Leroy __single_step_exception(regs); 115301fcac8eSChristophe Leroy } 115401fcac8eSChristophe Leroy 115514cf11afSPaul Mackerras /* 115614cf11afSPaul Mackerras * After we have successfully emulated an instruction, we have to 115714cf11afSPaul Mackerras * check if the instruction was being single-stepped, and if so, 115814cf11afSPaul Mackerras * pretend we got a single-step exception. This was pointed out 115914cf11afSPaul Mackerras * by Kumar Gala. -- paulus 116014cf11afSPaul Mackerras */ 11615222a1d5SChristophe Leroy void emulate_single_step(struct pt_regs *regs) 116214cf11afSPaul Mackerras { 11632538c2d0SK.Prasad if (single_stepping(regs)) 116401fcac8eSChristophe Leroy __single_step_exception(regs); 116514cf11afSPaul Mackerras } 116614cf11afSPaul Mackerras 1167*8d5e9875SChristophe Leroy #ifdef CONFIG_PPC_FPU_REGS 11685fad293bSKumar Gala static inline int __parse_fpscr(unsigned long fpscr) 1169dc1c1ca3SStephen Rothwell { 1170aeb1c0f6SEric W. Biederman int ret = FPE_FLTUNK; 1171dc1c1ca3SStephen Rothwell 1172dc1c1ca3SStephen Rothwell /* Invalid operation */ 1173dc1c1ca3SStephen Rothwell if ((fpscr & FPSCR_VE) && (fpscr & FPSCR_VX)) 11745fad293bSKumar Gala ret = FPE_FLTINV; 1175dc1c1ca3SStephen Rothwell 1176dc1c1ca3SStephen Rothwell /* Overflow */ 1177dc1c1ca3SStephen Rothwell else if ((fpscr & FPSCR_OE) && (fpscr & FPSCR_OX)) 11785fad293bSKumar Gala ret = FPE_FLTOVF; 1179dc1c1ca3SStephen Rothwell 1180dc1c1ca3SStephen Rothwell /* Underflow */ 1181dc1c1ca3SStephen Rothwell else if ((fpscr & FPSCR_UE) && (fpscr & FPSCR_UX)) 11825fad293bSKumar Gala ret = FPE_FLTUND; 1183dc1c1ca3SStephen Rothwell 1184dc1c1ca3SStephen Rothwell /* Divide by zero */ 1185dc1c1ca3SStephen Rothwell else if ((fpscr & FPSCR_ZE) && (fpscr & FPSCR_ZX)) 11865fad293bSKumar Gala ret = FPE_FLTDIV; 1187dc1c1ca3SStephen Rothwell 1188dc1c1ca3SStephen Rothwell /* Inexact result */ 1189dc1c1ca3SStephen Rothwell else if ((fpscr & FPSCR_XE) && (fpscr & FPSCR_XX)) 11905fad293bSKumar Gala ret = FPE_FLTRES; 11915fad293bSKumar Gala 11925fad293bSKumar Gala return ret; 11935fad293bSKumar Gala } 1194*8d5e9875SChristophe Leroy #endif 11955fad293bSKumar Gala 11965fad293bSKumar Gala static void parse_fpe(struct pt_regs *regs) 11975fad293bSKumar Gala { 11985fad293bSKumar Gala int code = 0; 11995fad293bSKumar Gala 12005fad293bSKumar Gala flush_fp_to_thread(current); 12015fad293bSKumar Gala 1202b6254cedSChristophe Leroy #ifdef CONFIG_PPC_FPU_REGS 1203de79f7b9SPaul Mackerras code = __parse_fpscr(current->thread.fp_state.fpscr); 1204b6254cedSChristophe Leroy #endif 1205dc1c1ca3SStephen Rothwell 1206dc1c1ca3SStephen Rothwell _exception(SIGFPE, regs, code, regs->nip); 1207dc1c1ca3SStephen Rothwell } 1208dc1c1ca3SStephen Rothwell 1209dc1c1ca3SStephen Rothwell /* 1210dc1c1ca3SStephen Rothwell * Illegal instruction emulation support. Originally written to 121114cf11afSPaul Mackerras * provide the PVR to user applications using the mfspr rd, PVR. 121214cf11afSPaul Mackerras * Return non-zero if we can't emulate, or -EFAULT if the associated 121314cf11afSPaul Mackerras * memory access caused an access fault. Return zero on success. 121414cf11afSPaul Mackerras * 121514cf11afSPaul Mackerras * There are a couple of ways to do this, either "decode" the instruction 121614cf11afSPaul Mackerras * or directly match lots of bits. In this case, matching lots of 121714cf11afSPaul Mackerras * bits is faster and easier. 121886417780SPaul Mackerras * 121914cf11afSPaul Mackerras */ 122014cf11afSPaul Mackerras static int emulate_string_inst(struct pt_regs *regs, u32 instword) 122114cf11afSPaul Mackerras { 122214cf11afSPaul Mackerras u8 rT = (instword >> 21) & 0x1f; 122314cf11afSPaul Mackerras u8 rA = (instword >> 16) & 0x1f; 122414cf11afSPaul Mackerras u8 NB_RB = (instword >> 11) & 0x1f; 122514cf11afSPaul Mackerras u32 num_bytes; 122614cf11afSPaul Mackerras unsigned long EA; 122714cf11afSPaul Mackerras int pos = 0; 122814cf11afSPaul Mackerras 122914cf11afSPaul Mackerras /* Early out if we are an invalid form of lswx */ 123016c57b36SKumar Gala if ((instword & PPC_INST_STRING_MASK) == PPC_INST_LSWX) 123114cf11afSPaul Mackerras if ((rT == rA) || (rT == NB_RB)) 123214cf11afSPaul Mackerras return -EINVAL; 123314cf11afSPaul Mackerras 123414cf11afSPaul Mackerras EA = (rA == 0) ? 0 : regs->gpr[rA]; 123514cf11afSPaul Mackerras 123616c57b36SKumar Gala switch (instword & PPC_INST_STRING_MASK) { 123716c57b36SKumar Gala case PPC_INST_LSWX: 123816c57b36SKumar Gala case PPC_INST_STSWX: 123914cf11afSPaul Mackerras EA += NB_RB; 124014cf11afSPaul Mackerras num_bytes = regs->xer & 0x7f; 124114cf11afSPaul Mackerras break; 124216c57b36SKumar Gala case PPC_INST_LSWI: 124316c57b36SKumar Gala case PPC_INST_STSWI: 124414cf11afSPaul Mackerras num_bytes = (NB_RB == 0) ? 32 : NB_RB; 124514cf11afSPaul Mackerras break; 124614cf11afSPaul Mackerras default: 124714cf11afSPaul Mackerras return -EINVAL; 124814cf11afSPaul Mackerras } 124914cf11afSPaul Mackerras 125014cf11afSPaul Mackerras while (num_bytes != 0) 125114cf11afSPaul Mackerras { 125214cf11afSPaul Mackerras u8 val; 125314cf11afSPaul Mackerras u32 shift = 8 * (3 - (pos & 0x3)); 125414cf11afSPaul Mackerras 125580aa0fb4SJames Yang /* if process is 32-bit, clear upper 32 bits of EA */ 125680aa0fb4SJames Yang if ((regs->msr & MSR_64BIT) == 0) 125780aa0fb4SJames Yang EA &= 0xFFFFFFFF; 125880aa0fb4SJames Yang 125916c57b36SKumar Gala switch ((instword & PPC_INST_STRING_MASK)) { 126016c57b36SKumar Gala case PPC_INST_LSWX: 126116c57b36SKumar Gala case PPC_INST_LSWI: 126214cf11afSPaul Mackerras if (get_user(val, (u8 __user *)EA)) 126314cf11afSPaul Mackerras return -EFAULT; 126414cf11afSPaul Mackerras /* first time updating this reg, 126514cf11afSPaul Mackerras * zero it out */ 126614cf11afSPaul Mackerras if (pos == 0) 126714cf11afSPaul Mackerras regs->gpr[rT] = 0; 126814cf11afSPaul Mackerras regs->gpr[rT] |= val << shift; 126914cf11afSPaul Mackerras break; 127016c57b36SKumar Gala case PPC_INST_STSWI: 127116c57b36SKumar Gala case PPC_INST_STSWX: 127214cf11afSPaul Mackerras val = regs->gpr[rT] >> shift; 127314cf11afSPaul Mackerras if (put_user(val, (u8 __user *)EA)) 127414cf11afSPaul Mackerras return -EFAULT; 127514cf11afSPaul Mackerras break; 127614cf11afSPaul Mackerras } 127714cf11afSPaul Mackerras /* move EA to next address */ 127814cf11afSPaul Mackerras EA += 1; 127914cf11afSPaul Mackerras num_bytes--; 128014cf11afSPaul Mackerras 128114cf11afSPaul Mackerras /* manage our position within the register */ 128214cf11afSPaul Mackerras if (++pos == 4) { 128314cf11afSPaul Mackerras pos = 0; 128414cf11afSPaul Mackerras if (++rT == 32) 128514cf11afSPaul Mackerras rT = 0; 128614cf11afSPaul Mackerras } 128714cf11afSPaul Mackerras } 128814cf11afSPaul Mackerras 128914cf11afSPaul Mackerras return 0; 129014cf11afSPaul Mackerras } 129114cf11afSPaul Mackerras 1292c3412dcbSWill Schmidt static int emulate_popcntb_inst(struct pt_regs *regs, u32 instword) 1293c3412dcbSWill Schmidt { 1294c3412dcbSWill Schmidt u32 ra,rs; 1295c3412dcbSWill Schmidt unsigned long tmp; 1296c3412dcbSWill Schmidt 1297c3412dcbSWill Schmidt ra = (instword >> 16) & 0x1f; 1298c3412dcbSWill Schmidt rs = (instword >> 21) & 0x1f; 1299c3412dcbSWill Schmidt 1300c3412dcbSWill Schmidt tmp = regs->gpr[rs]; 1301c3412dcbSWill Schmidt tmp = tmp - ((tmp >> 1) & 0x5555555555555555ULL); 1302c3412dcbSWill Schmidt tmp = (tmp & 0x3333333333333333ULL) + ((tmp >> 2) & 0x3333333333333333ULL); 1303c3412dcbSWill Schmidt tmp = (tmp + (tmp >> 4)) & 0x0f0f0f0f0f0f0f0fULL; 1304c3412dcbSWill Schmidt regs->gpr[ra] = tmp; 1305c3412dcbSWill Schmidt 1306c3412dcbSWill Schmidt return 0; 1307c3412dcbSWill Schmidt } 1308c3412dcbSWill Schmidt 1309c1469f13SKumar Gala static int emulate_isel(struct pt_regs *regs, u32 instword) 1310c1469f13SKumar Gala { 1311c1469f13SKumar Gala u8 rT = (instword >> 21) & 0x1f; 1312c1469f13SKumar Gala u8 rA = (instword >> 16) & 0x1f; 1313c1469f13SKumar Gala u8 rB = (instword >> 11) & 0x1f; 1314c1469f13SKumar Gala u8 BC = (instword >> 6) & 0x1f; 1315c1469f13SKumar Gala u8 bit; 1316c1469f13SKumar Gala unsigned long tmp; 1317c1469f13SKumar Gala 1318c1469f13SKumar Gala tmp = (rA == 0) ? 0 : regs->gpr[rA]; 1319c1469f13SKumar Gala bit = (regs->ccr >> (31 - BC)) & 0x1; 1320c1469f13SKumar Gala 1321c1469f13SKumar Gala regs->gpr[rT] = bit ? tmp : regs->gpr[rB]; 1322c1469f13SKumar Gala 1323c1469f13SKumar Gala return 0; 1324c1469f13SKumar Gala } 1325c1469f13SKumar Gala 13266ce6c629SMichael Neuling #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 13276ce6c629SMichael Neuling static inline bool tm_abort_check(struct pt_regs *regs, int cause) 13286ce6c629SMichael Neuling { 13296ce6c629SMichael Neuling /* If we're emulating a load/store in an active transaction, we cannot 13306ce6c629SMichael Neuling * emulate it as the kernel operates in transaction suspended context. 13316ce6c629SMichael Neuling * We need to abort the transaction. This creates a persistent TM 13326ce6c629SMichael Neuling * abort so tell the user what caused it with a new code. 13336ce6c629SMichael Neuling */ 13346ce6c629SMichael Neuling if (MSR_TM_TRANSACTIONAL(regs->msr)) { 13356ce6c629SMichael Neuling tm_enable(); 13366ce6c629SMichael Neuling tm_abort(cause); 13376ce6c629SMichael Neuling return true; 13386ce6c629SMichael Neuling } 13396ce6c629SMichael Neuling return false; 13406ce6c629SMichael Neuling } 13416ce6c629SMichael Neuling #else 13426ce6c629SMichael Neuling static inline bool tm_abort_check(struct pt_regs *regs, int reason) 13436ce6c629SMichael Neuling { 13446ce6c629SMichael Neuling return false; 13456ce6c629SMichael Neuling } 13466ce6c629SMichael Neuling #endif 13476ce6c629SMichael Neuling 134814cf11afSPaul Mackerras static int emulate_instruction(struct pt_regs *regs) 134914cf11afSPaul Mackerras { 135014cf11afSPaul Mackerras u32 instword; 135114cf11afSPaul Mackerras u32 rd; 135214cf11afSPaul Mackerras 13534288e343SAnton Blanchard if (!user_mode(regs)) 135414cf11afSPaul Mackerras return -EINVAL; 135514cf11afSPaul Mackerras 135614cf11afSPaul Mackerras if (get_user(instword, (u32 __user *)(regs->nip))) 135714cf11afSPaul Mackerras return -EFAULT; 135814cf11afSPaul Mackerras 135914cf11afSPaul Mackerras /* Emulate the mfspr rD, PVR. */ 136016c57b36SKumar Gala if ((instword & PPC_INST_MFSPR_PVR_MASK) == PPC_INST_MFSPR_PVR) { 1361eecff81dSAnton Blanchard PPC_WARN_EMULATED(mfpvr, regs); 136214cf11afSPaul Mackerras rd = (instword >> 21) & 0x1f; 136314cf11afSPaul Mackerras regs->gpr[rd] = mfspr(SPRN_PVR); 136414cf11afSPaul Mackerras return 0; 136514cf11afSPaul Mackerras } 136614cf11afSPaul Mackerras 136714cf11afSPaul Mackerras /* Emulating the dcba insn is just a no-op. */ 136880947e7cSGeert Uytterhoeven if ((instword & PPC_INST_DCBA_MASK) == PPC_INST_DCBA) { 1369eecff81dSAnton Blanchard PPC_WARN_EMULATED(dcba, regs); 137014cf11afSPaul Mackerras return 0; 137180947e7cSGeert Uytterhoeven } 137214cf11afSPaul Mackerras 137314cf11afSPaul Mackerras /* Emulate the mcrxr insn. */ 137416c57b36SKumar Gala if ((instword & PPC_INST_MCRXR_MASK) == PPC_INST_MCRXR) { 137586417780SPaul Mackerras int shift = (instword >> 21) & 0x1c; 137614cf11afSPaul Mackerras unsigned long msk = 0xf0000000UL >> shift; 137714cf11afSPaul Mackerras 1378eecff81dSAnton Blanchard PPC_WARN_EMULATED(mcrxr, regs); 137914cf11afSPaul Mackerras regs->ccr = (regs->ccr & ~msk) | ((regs->xer >> shift) & msk); 138014cf11afSPaul Mackerras regs->xer &= ~0xf0000000UL; 138114cf11afSPaul Mackerras return 0; 138214cf11afSPaul Mackerras } 138314cf11afSPaul Mackerras 138414cf11afSPaul Mackerras /* Emulate load/store string insn. */ 138580947e7cSGeert Uytterhoeven if ((instword & PPC_INST_STRING_GEN_MASK) == PPC_INST_STRING) { 13866ce6c629SMichael Neuling if (tm_abort_check(regs, 13876ce6c629SMichael Neuling TM_CAUSE_EMULATE | TM_CAUSE_PERSISTENT)) 13886ce6c629SMichael Neuling return -EINVAL; 1389eecff81dSAnton Blanchard PPC_WARN_EMULATED(string, regs); 139014cf11afSPaul Mackerras return emulate_string_inst(regs, instword); 139180947e7cSGeert Uytterhoeven } 139214cf11afSPaul Mackerras 1393c3412dcbSWill Schmidt /* Emulate the popcntb (Population Count Bytes) instruction. */ 139416c57b36SKumar Gala if ((instword & PPC_INST_POPCNTB_MASK) == PPC_INST_POPCNTB) { 1395eecff81dSAnton Blanchard PPC_WARN_EMULATED(popcntb, regs); 1396c3412dcbSWill Schmidt return emulate_popcntb_inst(regs, instword); 1397c3412dcbSWill Schmidt } 1398c3412dcbSWill Schmidt 1399c1469f13SKumar Gala /* Emulate isel (Integer Select) instruction */ 140016c57b36SKumar Gala if ((instword & PPC_INST_ISEL_MASK) == PPC_INST_ISEL) { 1401eecff81dSAnton Blanchard PPC_WARN_EMULATED(isel, regs); 1402c1469f13SKumar Gala return emulate_isel(regs, instword); 1403c1469f13SKumar Gala } 1404c1469f13SKumar Gala 14059863c28aSJames Yang /* Emulate sync instruction variants */ 14069863c28aSJames Yang if ((instword & PPC_INST_SYNC_MASK) == PPC_INST_SYNC) { 14079863c28aSJames Yang PPC_WARN_EMULATED(sync, regs); 14089863c28aSJames Yang asm volatile("sync"); 14099863c28aSJames Yang return 0; 14109863c28aSJames Yang } 14119863c28aSJames Yang 1412efcac658SAlexey Kardashevskiy #ifdef CONFIG_PPC64 1413efcac658SAlexey Kardashevskiy /* Emulate the mfspr rD, DSCR. */ 141473d2fb75SAnton Blanchard if ((((instword & PPC_INST_MFSPR_DSCR_USER_MASK) == 141573d2fb75SAnton Blanchard PPC_INST_MFSPR_DSCR_USER) || 141673d2fb75SAnton Blanchard ((instword & PPC_INST_MFSPR_DSCR_MASK) == 141773d2fb75SAnton Blanchard PPC_INST_MFSPR_DSCR)) && 1418efcac658SAlexey Kardashevskiy cpu_has_feature(CPU_FTR_DSCR)) { 1419efcac658SAlexey Kardashevskiy PPC_WARN_EMULATED(mfdscr, regs); 1420efcac658SAlexey Kardashevskiy rd = (instword >> 21) & 0x1f; 1421efcac658SAlexey Kardashevskiy regs->gpr[rd] = mfspr(SPRN_DSCR); 1422efcac658SAlexey Kardashevskiy return 0; 1423efcac658SAlexey Kardashevskiy } 1424efcac658SAlexey Kardashevskiy /* Emulate the mtspr DSCR, rD. */ 142573d2fb75SAnton Blanchard if ((((instword & PPC_INST_MTSPR_DSCR_USER_MASK) == 142673d2fb75SAnton Blanchard PPC_INST_MTSPR_DSCR_USER) || 142773d2fb75SAnton Blanchard ((instword & PPC_INST_MTSPR_DSCR_MASK) == 142873d2fb75SAnton Blanchard PPC_INST_MTSPR_DSCR)) && 1429efcac658SAlexey Kardashevskiy cpu_has_feature(CPU_FTR_DSCR)) { 1430efcac658SAlexey Kardashevskiy PPC_WARN_EMULATED(mtdscr, regs); 1431efcac658SAlexey Kardashevskiy rd = (instword >> 21) & 0x1f; 143200ca0de0SAnton Blanchard current->thread.dscr = regs->gpr[rd]; 1433efcac658SAlexey Kardashevskiy current->thread.dscr_inherit = 1; 143400ca0de0SAnton Blanchard mtspr(SPRN_DSCR, current->thread.dscr); 1435efcac658SAlexey Kardashevskiy return 0; 1436efcac658SAlexey Kardashevskiy } 1437efcac658SAlexey Kardashevskiy #endif 1438efcac658SAlexey Kardashevskiy 143914cf11afSPaul Mackerras return -EINVAL; 144014cf11afSPaul Mackerras } 144114cf11afSPaul Mackerras 144273c9ceabSJeremy Fitzhardinge int is_valid_bugaddr(unsigned long addr) 144314cf11afSPaul Mackerras { 144473c9ceabSJeremy Fitzhardinge return is_kernel_addr(addr); 144514cf11afSPaul Mackerras } 144614cf11afSPaul Mackerras 14473a3b5aa6SKevin Hao #ifdef CONFIG_MATH_EMULATION 14483a3b5aa6SKevin Hao static int emulate_math(struct pt_regs *regs) 14493a3b5aa6SKevin Hao { 14503a3b5aa6SKevin Hao int ret; 14513a3b5aa6SKevin Hao 14523a3b5aa6SKevin Hao ret = do_mathemu(regs); 14533a3b5aa6SKevin Hao if (ret >= 0) 14543a3b5aa6SKevin Hao PPC_WARN_EMULATED(math, regs); 14553a3b5aa6SKevin Hao 14563a3b5aa6SKevin Hao switch (ret) { 14573a3b5aa6SKevin Hao case 0: 14583a3b5aa6SKevin Hao emulate_single_step(regs); 14593a3b5aa6SKevin Hao return 0; 14603a3b5aa6SKevin Hao case 1: { 14613a3b5aa6SKevin Hao int code = 0; 1462de79f7b9SPaul Mackerras code = __parse_fpscr(current->thread.fp_state.fpscr); 14633a3b5aa6SKevin Hao _exception(SIGFPE, regs, code, regs->nip); 14643a3b5aa6SKevin Hao return 0; 14653a3b5aa6SKevin Hao } 14663a3b5aa6SKevin Hao case -EFAULT: 14673a3b5aa6SKevin Hao _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip); 14683a3b5aa6SKevin Hao return 0; 14693a3b5aa6SKevin Hao } 14703a3b5aa6SKevin Hao 14713a3b5aa6SKevin Hao return -1; 14723a3b5aa6SKevin Hao } 14733a3b5aa6SKevin Hao #else 14743a3b5aa6SKevin Hao static inline int emulate_math(struct pt_regs *regs) { return -1; } 14753a3b5aa6SKevin Hao #endif 14763a3b5aa6SKevin Hao 1477fd3f1e0fSNicholas Piggin static void do_program_check(struct pt_regs *regs) 147814cf11afSPaul Mackerras { 147914cf11afSPaul Mackerras unsigned int reason = get_reason(regs); 148014cf11afSPaul Mackerras 1481aa42c69cSKim Phillips /* We can now get here via a FP Unavailable exception if the core 148204903a30SKumar Gala * has no FPU, in that case the reason flags will be 0 */ 148314cf11afSPaul Mackerras 148414cf11afSPaul Mackerras if (reason & REASON_FP) { 148514cf11afSPaul Mackerras /* IEEE FP exception */ 1486dc1c1ca3SStephen Rothwell parse_fpe(regs); 1487fd3f1e0fSNicholas Piggin return; 14888dad3f92SPaul Mackerras } 14898dad3f92SPaul Mackerras if (reason & REASON_TRAP) { 1490a4c3f909SBalbir Singh unsigned long bugaddr; 1491ba797b28SJason Wessel /* Debugger is first in line to stop recursive faults in 1492ba797b28SJason Wessel * rcu_lock, notify_die, or atomic_notifier_call_chain */ 1493ba797b28SJason Wessel if (debugger_bpt(regs)) 1494fd3f1e0fSNicholas Piggin return; 1495ba797b28SJason Wessel 14966cc89badSNaveen N. Rao if (kprobe_handler(regs)) 1497fd3f1e0fSNicholas Piggin return; 14986cc89badSNaveen N. Rao 149914cf11afSPaul Mackerras /* trap exception */ 1500dc1c1ca3SStephen Rothwell if (notify_die(DIE_BPT, "breakpoint", regs, 5, 5, SIGTRAP) 1501dc1c1ca3SStephen Rothwell == NOTIFY_STOP) 1502fd3f1e0fSNicholas Piggin return; 150373c9ceabSJeremy Fitzhardinge 1504a4c3f909SBalbir Singh bugaddr = regs->nip; 1505a4c3f909SBalbir Singh /* 1506a4c3f909SBalbir Singh * Fixup bugaddr for BUG_ON() in real mode 1507a4c3f909SBalbir Singh */ 1508a4c3f909SBalbir Singh if (!is_kernel_addr(bugaddr) && !(regs->msr & MSR_IR)) 1509a4c3f909SBalbir Singh bugaddr += PAGE_OFFSET; 1510a4c3f909SBalbir Singh 151173c9ceabSJeremy Fitzhardinge if (!(regs->msr & MSR_PR) && /* not user-mode */ 1512a4c3f909SBalbir Singh report_bug(bugaddr, regs) == BUG_TRAP_TYPE_WARN) { 1513b49e578bSChristophe Leroy regs_add_return_ip(regs, 4); 1514fd3f1e0fSNicholas Piggin return; 151514cf11afSPaul Mackerras } 15165bcba4e6SBenjamin Gray 1517c3f43096SBenjamin Gray /* User mode considers other cases after enabling IRQs */ 1518c3f43096SBenjamin Gray if (!user_mode(regs)) { 15198dad3f92SPaul Mackerras _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip); 1520fd3f1e0fSNicholas Piggin return; 15218dad3f92SPaul Mackerras } 1522c3f43096SBenjamin Gray } 1523bc2a9408SMichael Neuling #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1524bc2a9408SMichael Neuling if (reason & REASON_TM) { 1525bc2a9408SMichael Neuling /* This is a TM "Bad Thing Exception" program check. 1526bc2a9408SMichael Neuling * This occurs when: 1527bc2a9408SMichael Neuling * - An rfid/hrfid/mtmsrd attempts to cause an illegal 1528bc2a9408SMichael Neuling * transition in TM states. 1529bc2a9408SMichael Neuling * - A trechkpt is attempted when transactional. 1530bc2a9408SMichael Neuling * - A treclaim is attempted when non transactional. 1531bc2a9408SMichael Neuling * - A tend is illegally attempted. 1532bc2a9408SMichael Neuling * - writing a TM SPR when transactional. 1533632f0574SMichael Ellerman * 1534632f0574SMichael Ellerman * If usermode caused this, it's done something illegal and 1535bc2a9408SMichael Neuling * gets a SIGILL slap on the wrist. We call it an illegal 1536bc2a9408SMichael Neuling * operand to distinguish from the instruction just being bad 1537bc2a9408SMichael Neuling * (e.g. executing a 'tend' on a CPU without TM!); it's an 1538bc2a9408SMichael Neuling * illegal /placement/ of a valid instruction. 1539bc2a9408SMichael Neuling */ 1540bc2a9408SMichael Neuling if (user_mode(regs)) { 1541bc2a9408SMichael Neuling _exception(SIGILL, regs, ILL_ILLOPN, regs->nip); 1542fd3f1e0fSNicholas Piggin return; 1543bc2a9408SMichael Neuling } else { 1544bc2a9408SMichael Neuling printk(KERN_EMERG "Unexpected TM Bad Thing exception " 154511be3958SBreno Leitao "at %lx (msr 0x%lx) tm_scratch=%llx\n", 154611be3958SBreno Leitao regs->nip, regs->msr, get_paca()->tm_scratch); 1547bc2a9408SMichael Neuling die("Unrecoverable exception", regs, SIGABRT); 1548bc2a9408SMichael Neuling } 1549bc2a9408SMichael Neuling } 1550bc2a9408SMichael Neuling #endif 15518dad3f92SPaul Mackerras 1552b3f6a459SMichael Ellerman /* 1553b3f6a459SMichael Ellerman * If we took the program check in the kernel skip down to sending a 1554c3f43096SBenjamin Gray * SIGILL. The subsequent cases all relate to user space, such as 1555c3f43096SBenjamin Gray * emulating instructions which we should only do for user space. We 1556c3f43096SBenjamin Gray * also do not want to enable interrupts for kernel faults because that 1557c3f43096SBenjamin Gray * might lead to further faults, and loose the context of the original 1558c3f43096SBenjamin Gray * exception. 1559b3f6a459SMichael Ellerman */ 1560b3f6a459SMichael Ellerman if (!user_mode(regs)) 1561b3f6a459SMichael Ellerman goto sigill; 1562b3f6a459SMichael Ellerman 1563e6f8a6c8SNicholas Piggin interrupt_cond_local_irq_enable(regs); 1564cd8a5673SPaul Mackerras 1565c3f43096SBenjamin Gray /* 1566c3f43096SBenjamin Gray * (reason & REASON_TRAP) is mostly handled before enabling IRQs, 1567c3f43096SBenjamin Gray * except get_user_instr() can sleep so we cannot reliably inspect the 1568c3f43096SBenjamin Gray * current instruction in that context. Now that we know we are 1569c3f43096SBenjamin Gray * handling a user space trap and can sleep, we can check if the trap 1570c3f43096SBenjamin Gray * was a hashchk failure. 1571c3f43096SBenjamin Gray */ 1572c3f43096SBenjamin Gray if (reason & REASON_TRAP) { 1573c3f43096SBenjamin Gray if (cpu_has_feature(CPU_FTR_DEXCR_NPHIE)) { 1574c3f43096SBenjamin Gray ppc_inst_t insn; 1575c3f43096SBenjamin Gray 1576c3f43096SBenjamin Gray if (get_user_instr(insn, (void __user *)regs->nip)) { 1577c3f43096SBenjamin Gray _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip); 1578c3f43096SBenjamin Gray return; 1579c3f43096SBenjamin Gray } 1580c3f43096SBenjamin Gray 1581c3f43096SBenjamin Gray if (ppc_inst_primary_opcode(insn) == 31 && 1582c3f43096SBenjamin Gray get_xop(ppc_inst_val(insn)) == OP_31_XOP_HASHCHK) { 1583c3f43096SBenjamin Gray _exception(SIGILL, regs, ILL_ILLOPN, regs->nip); 1584c3f43096SBenjamin Gray return; 1585c3f43096SBenjamin Gray } 1586c3f43096SBenjamin Gray } 1587c3f43096SBenjamin Gray 1588c3f43096SBenjamin Gray _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip); 1589c3f43096SBenjamin Gray return; 1590c3f43096SBenjamin Gray } 1591c3f43096SBenjamin Gray 159204903a30SKumar Gala /* (reason & REASON_ILLEGAL) would be the obvious thing here, 159304903a30SKumar Gala * but there seems to be a hardware bug on the 405GP (RevD) 159404903a30SKumar Gala * that means ESR is sometimes set incorrectly - either to 159504903a30SKumar Gala * ESR_DST (!?) or 0. In the process of chasing this with the 159604903a30SKumar Gala * hardware people - not sure if it can happen on any illegal 159704903a30SKumar Gala * instruction or only on FP instructions, whether there is a 15984e63f8edSBenjamin Herrenschmidt * pattern to occurrences etc. -dgibson 31/Mar/2003 15994e63f8edSBenjamin Herrenschmidt */ 16003a3b5aa6SKevin Hao if (!emulate_math(regs)) 1601fd3f1e0fSNicholas Piggin return; 160204903a30SKumar Gala 16038dad3f92SPaul Mackerras /* Try to emulate it if we should. */ 16048dad3f92SPaul Mackerras if (reason & (REASON_ILLEGAL | REASON_PRIVILEGED)) { 160514cf11afSPaul Mackerras switch (emulate_instruction(regs)) { 160614cf11afSPaul Mackerras case 0: 160759dc5bfcSNicholas Piggin regs_add_return_ip(regs, 4); 160814cf11afSPaul Mackerras emulate_single_step(regs); 1609fd3f1e0fSNicholas Piggin return; 161014cf11afSPaul Mackerras case -EFAULT: 161114cf11afSPaul Mackerras _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip); 1612fd3f1e0fSNicholas Piggin return; 16138dad3f92SPaul Mackerras } 16148dad3f92SPaul Mackerras } 16158dad3f92SPaul Mackerras 1616b3f6a459SMichael Ellerman sigill: 161714cf11afSPaul Mackerras if (reason & REASON_PRIVILEGED) 161814cf11afSPaul Mackerras _exception(SIGILL, regs, ILL_PRVOPC, regs->nip); 161914cf11afSPaul Mackerras else 162014cf11afSPaul Mackerras _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 1621ba12eedeSLi Zhong 1622fd3f1e0fSNicholas Piggin } 1623fd3f1e0fSNicholas Piggin 16243a96570fSNicholas Piggin DEFINE_INTERRUPT_HANDLER(program_check_exception) 1625fd3f1e0fSNicholas Piggin { 1626fd3f1e0fSNicholas Piggin do_program_check(regs); 162714cf11afSPaul Mackerras } 162814cf11afSPaul Mackerras 1629bf593907SPaul Mackerras /* 1630bf593907SPaul Mackerras * This occurs when running in hypervisor mode on POWER6 or later 1631bf593907SPaul Mackerras * and an illegal instruction is encountered. 1632bf593907SPaul Mackerras */ 16333a96570fSNicholas Piggin DEFINE_INTERRUPT_HANDLER(emulation_assist_interrupt) 1634bf593907SPaul Mackerras { 163559dc5bfcSNicholas Piggin regs_set_return_msr(regs, regs->msr | REASON_ILLEGAL); 1636fd3f1e0fSNicholas Piggin do_program_check(regs); 1637bf593907SPaul Mackerras } 1638bf593907SPaul Mackerras 16393a96570fSNicholas Piggin DEFINE_INTERRUPT_HANDLER(alignment_exception) 164014cf11afSPaul Mackerras { 16414393c4f6SBenjamin Herrenschmidt int sig, code, fixed = 0; 16429409d2f9SJordan Niethe unsigned long reason; 164314cf11afSPaul Mackerras 1644e6f8a6c8SNicholas Piggin interrupt_cond_local_irq_enable(regs); 1645a3512b2dSBenjamin Herrenschmidt 16469409d2f9SJordan Niethe reason = get_reason(regs); 16479409d2f9SJordan Niethe if (reason & REASON_BOUNDARY) { 16489409d2f9SJordan Niethe sig = SIGBUS; 16499409d2f9SJordan Niethe code = BUS_ADRALN; 16509409d2f9SJordan Niethe goto bad; 16519409d2f9SJordan Niethe } 16529409d2f9SJordan Niethe 16536ce6c629SMichael Neuling if (tm_abort_check(regs, TM_CAUSE_ALIGNMENT | TM_CAUSE_PERSISTENT)) 1654540d4d34SNicholas Piggin return; 16556ce6c629SMichael Neuling 1656e9370ae1SPaul Mackerras /* we don't implement logging of alignment exceptions */ 1657e9370ae1SPaul Mackerras if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS)) 165814cf11afSPaul Mackerras fixed = fix_alignment(regs); 165914cf11afSPaul Mackerras 166014cf11afSPaul Mackerras if (fixed == 1) { 16619409d2f9SJordan Niethe /* skip over emulated instruction */ 166259dc5bfcSNicholas Piggin regs_add_return_ip(regs, inst_length(reason)); 166314cf11afSPaul Mackerras emulate_single_step(regs); 1664540d4d34SNicholas Piggin return; 166514cf11afSPaul Mackerras } 166614cf11afSPaul Mackerras 166714cf11afSPaul Mackerras /* Operand address was bad */ 166814cf11afSPaul Mackerras if (fixed == -EFAULT) { 16694393c4f6SBenjamin Herrenschmidt sig = SIGSEGV; 16704393c4f6SBenjamin Herrenschmidt code = SEGV_ACCERR; 16714393c4f6SBenjamin Herrenschmidt } else { 16724393c4f6SBenjamin Herrenschmidt sig = SIGBUS; 16734393c4f6SBenjamin Herrenschmidt code = BUS_ADRALN; 167414cf11afSPaul Mackerras } 16759409d2f9SJordan Niethe bad: 16764393c4f6SBenjamin Herrenschmidt if (user_mode(regs)) 16774393c4f6SBenjamin Herrenschmidt _exception(sig, regs, code, regs->dar); 16784393c4f6SBenjamin Herrenschmidt else 16798458c628SNicholas Piggin bad_page_fault(regs, sig); 168014cf11afSPaul Mackerras } 168114cf11afSPaul Mackerras 16823a96570fSNicholas Piggin DEFINE_INTERRUPT_HANDLER(stack_overflow_exception) 16833978eb78SChristophe Leroy { 16843978eb78SChristophe Leroy die("Kernel stack overflow", regs, SIGSEGV); 16853978eb78SChristophe Leroy } 16863978eb78SChristophe Leroy 16873a96570fSNicholas Piggin DEFINE_INTERRUPT_HANDLER(kernel_fp_unavailable_exception) 1688dc1c1ca3SStephen Rothwell { 1689dc1c1ca3SStephen Rothwell printk(KERN_EMERG "Unrecoverable FP Unavailable Exception " 1690dc1c1ca3SStephen Rothwell "%lx at %lx\n", regs->trap, regs->nip); 1691dc1c1ca3SStephen Rothwell die("Unrecoverable FP Unavailable Exception", regs, SIGABRT); 1692dc1c1ca3SStephen Rothwell } 1693dc1c1ca3SStephen Rothwell 16943a96570fSNicholas Piggin DEFINE_INTERRUPT_HANDLER(altivec_unavailable_exception) 1695dc1c1ca3SStephen Rothwell { 1696dc1c1ca3SStephen Rothwell if (user_mode(regs)) { 1697dc1c1ca3SStephen Rothwell /* A user program has executed an altivec instruction, 1698dc1c1ca3SStephen Rothwell but this kernel doesn't support altivec. */ 1699dc1c1ca3SStephen Rothwell _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 1700540d4d34SNicholas Piggin return; 1701dc1c1ca3SStephen Rothwell } 17026c4841c2SAnton Blanchard 1703dc1c1ca3SStephen Rothwell printk(KERN_EMERG "Unrecoverable VMX/Altivec Unavailable Exception " 1704dc1c1ca3SStephen Rothwell "%lx at %lx\n", regs->trap, regs->nip); 1705dc1c1ca3SStephen Rothwell die("Unrecoverable VMX/Altivec Unavailable Exception", regs, SIGABRT); 1706dc1c1ca3SStephen Rothwell } 1707dc1c1ca3SStephen Rothwell 17083a96570fSNicholas Piggin DEFINE_INTERRUPT_HANDLER(vsx_unavailable_exception) 1709ce48b210SMichael Neuling { 1710ce48b210SMichael Neuling if (user_mode(regs)) { 1711ce48b210SMichael Neuling /* A user program has executed an vsx instruction, 1712ce48b210SMichael Neuling but this kernel doesn't support vsx. */ 1713ce48b210SMichael Neuling _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 1714ce48b210SMichael Neuling return; 1715ce48b210SMichael Neuling } 1716ce48b210SMichael Neuling 1717ce48b210SMichael Neuling printk(KERN_EMERG "Unrecoverable VSX Unavailable Exception " 1718ce48b210SMichael Neuling "%lx at %lx\n", regs->trap, regs->nip); 1719ce48b210SMichael Neuling die("Unrecoverable VSX Unavailable Exception", regs, SIGABRT); 1720ce48b210SMichael Neuling } 1721ce48b210SMichael Neuling 1722fcdb758cSRashmica Gupta #ifdef CONFIG_PPC_BOOK3S_64 1723172f7aaaSCyril Bur static void tm_unavailable(struct pt_regs *regs) 1724172f7aaaSCyril Bur { 17255d176f75SCyril Bur #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 17265d176f75SCyril Bur if (user_mode(regs)) { 17275d176f75SCyril Bur current->thread.load_tm++; 172859dc5bfcSNicholas Piggin regs_set_return_msr(regs, regs->msr | MSR_TM); 17295d176f75SCyril Bur tm_enable(); 17305d176f75SCyril Bur tm_restore_sprs(¤t->thread); 17315d176f75SCyril Bur return; 17325d176f75SCyril Bur } 17335d176f75SCyril Bur #endif 1734172f7aaaSCyril Bur pr_emerg("Unrecoverable TM Unavailable Exception " 1735172f7aaaSCyril Bur "%lx at %lx\n", regs->trap, regs->nip); 1736172f7aaaSCyril Bur die("Unrecoverable TM Unavailable Exception", regs, SIGABRT); 1737172f7aaaSCyril Bur } 1738172f7aaaSCyril Bur 17393a96570fSNicholas Piggin DEFINE_INTERRUPT_HANDLER(facility_unavailable_exception) 1740d0c0c9a1SMichael Neuling { 1741021424a1SMichael Ellerman static char *facility_strings[] = { 17422517617eSMichael Neuling [FSCR_FP_LG] = "FPU", 17432517617eSMichael Neuling [FSCR_VECVSX_LG] = "VMX/VSX", 17442517617eSMichael Neuling [FSCR_DSCR_LG] = "DSCR", 17452517617eSMichael Neuling [FSCR_PM_LG] = "PMU SPRs", 17462517617eSMichael Neuling [FSCR_BHRB_LG] = "BHRB", 17472517617eSMichael Neuling [FSCR_TM_LG] = "TM", 17482517617eSMichael Neuling [FSCR_EBB_LG] = "EBB", 17492517617eSMichael Neuling [FSCR_TAR_LG] = "TAR", 1750794464f4SNicholas Piggin [FSCR_MSGP_LG] = "MSGP", 17519b7ff0c6SNicholas Piggin [FSCR_SCV_LG] = "SCV", 17522aa6195eSAlistair Popple [FSCR_PREFIX_LG] = "PREFIX", 1753021424a1SMichael Ellerman }; 17542517617eSMichael Neuling char *facility = "unknown"; 1755021424a1SMichael Ellerman u64 value; 1756c952c1c4SAnshuman Khandual u32 instword, rd; 17572517617eSMichael Neuling u8 status; 17582517617eSMichael Neuling bool hv; 1759021424a1SMichael Ellerman 17607153d4bfSXiongwei Song hv = (TRAP(regs) == INTERRUPT_H_FAC_UNAVAIL); 17612517617eSMichael Neuling if (hv) 1762b14b6260SMichael Ellerman value = mfspr(SPRN_HFSCR); 17632517617eSMichael Neuling else 17642517617eSMichael Neuling value = mfspr(SPRN_FSCR); 17652517617eSMichael Neuling 17662517617eSMichael Neuling status = value >> 56; 1767709b973cSAnshuman Khandual if ((hv || status >= 2) && 1768709b973cSAnshuman Khandual (status < ARRAY_SIZE(facility_strings)) && 1769709b973cSAnshuman Khandual facility_strings[status]) 1770709b973cSAnshuman Khandual facility = facility_strings[status]; 1771709b973cSAnshuman Khandual 1772709b973cSAnshuman Khandual /* We should not have taken this interrupt in kernel */ 1773709b973cSAnshuman Khandual if (!user_mode(regs)) { 1774709b973cSAnshuman Khandual pr_emerg("Facility '%s' unavailable (%d) exception in kernel mode at %lx\n", 1775709b973cSAnshuman Khandual facility, status, regs->nip); 1776709b973cSAnshuman Khandual die("Unexpected facility unavailable exception", regs, SIGABRT); 1777709b973cSAnshuman Khandual } 1778709b973cSAnshuman Khandual 1779e6f8a6c8SNicholas Piggin interrupt_cond_local_irq_enable(regs); 1780709b973cSAnshuman Khandual 17812517617eSMichael Neuling if (status == FSCR_DSCR_LG) { 1782c952c1c4SAnshuman Khandual /* 1783c952c1c4SAnshuman Khandual * User is accessing the DSCR register using the problem 1784c952c1c4SAnshuman Khandual * state only SPR number (0x03) either through a mfspr or 1785c952c1c4SAnshuman Khandual * a mtspr instruction. If it is a write attempt through 1786c952c1c4SAnshuman Khandual * a mtspr, then we set the inherit bit. This also allows 1787c952c1c4SAnshuman Khandual * the user to write or read the register directly in the 1788c952c1c4SAnshuman Khandual * future by setting via the FSCR DSCR bit. But in case it 1789c952c1c4SAnshuman Khandual * is a read DSCR attempt through a mfspr instruction, we 1790c952c1c4SAnshuman Khandual * just emulate the instruction instead. This code path will 1791c952c1c4SAnshuman Khandual * always emulate all the mfspr instructions till the user 1792c952c1c4SAnshuman Khandual * has attempted at least one mtspr instruction. This way it 1793c952c1c4SAnshuman Khandual * preserves the same behaviour when the user is accessing 1794c952c1c4SAnshuman Khandual * the DSCR through privilege level only SPR number (0x11) 1795c952c1c4SAnshuman Khandual * which is emulated through illegal instruction exception. 1796c952c1c4SAnshuman Khandual * We always leave HFSCR DSCR set. 17972517617eSMichael Neuling */ 1798c952c1c4SAnshuman Khandual if (get_user(instword, (u32 __user *)(regs->nip))) { 1799c952c1c4SAnshuman Khandual pr_err("Failed to fetch the user instruction\n"); 1800c952c1c4SAnshuman Khandual return; 1801c952c1c4SAnshuman Khandual } 1802c952c1c4SAnshuman Khandual 1803c952c1c4SAnshuman Khandual /* Write into DSCR (mtspr 0x03, RS) */ 1804c952c1c4SAnshuman Khandual if ((instword & PPC_INST_MTSPR_DSCR_USER_MASK) 1805c952c1c4SAnshuman Khandual == PPC_INST_MTSPR_DSCR_USER) { 1806c952c1c4SAnshuman Khandual rd = (instword >> 21) & 0x1f; 1807c952c1c4SAnshuman Khandual current->thread.dscr = regs->gpr[rd]; 18082517617eSMichael Neuling current->thread.dscr_inherit = 1; 1809b57bd2deSMichael Neuling current->thread.fscr |= FSCR_DSCR; 1810b57bd2deSMichael Neuling mtspr(SPRN_FSCR, current->thread.fscr); 1811c952c1c4SAnshuman Khandual } 1812c952c1c4SAnshuman Khandual 1813c952c1c4SAnshuman Khandual /* Read from DSCR (mfspr RT, 0x03) */ 1814c952c1c4SAnshuman Khandual if ((instword & PPC_INST_MFSPR_DSCR_USER_MASK) 1815c952c1c4SAnshuman Khandual == PPC_INST_MFSPR_DSCR_USER) { 1816c952c1c4SAnshuman Khandual if (emulate_instruction(regs)) { 1817c952c1c4SAnshuman Khandual pr_err("DSCR based mfspr emulation failed\n"); 1818c952c1c4SAnshuman Khandual return; 1819c952c1c4SAnshuman Khandual } 182059dc5bfcSNicholas Piggin regs_add_return_ip(regs, 4); 1821c952c1c4SAnshuman Khandual emulate_single_step(regs); 1822c952c1c4SAnshuman Khandual } 18232517617eSMichael Neuling return; 1824b14b6260SMichael Ellerman } 1825b14b6260SMichael Ellerman 1826172f7aaaSCyril Bur if (status == FSCR_TM_LG) { 1827172f7aaaSCyril Bur /* 1828172f7aaaSCyril Bur * If we're here then the hardware is TM aware because it 1829172f7aaaSCyril Bur * generated an exception with FSRM_TM set. 1830172f7aaaSCyril Bur * 1831172f7aaaSCyril Bur * If cpu_has_feature(CPU_FTR_TM) is false, then either firmware 1832172f7aaaSCyril Bur * told us not to do TM, or the kernel is not built with TM 1833172f7aaaSCyril Bur * support. 1834172f7aaaSCyril Bur * 1835172f7aaaSCyril Bur * If both of those things are true, then userspace can spam the 1836172f7aaaSCyril Bur * console by triggering the printk() below just by continually 1837172f7aaaSCyril Bur * doing tbegin (or any TM instruction). So in that case just 1838172f7aaaSCyril Bur * send the process a SIGILL immediately. 1839172f7aaaSCyril Bur */ 1840172f7aaaSCyril Bur if (!cpu_has_feature(CPU_FTR_TM)) 1841172f7aaaSCyril Bur goto out; 1842172f7aaaSCyril Bur 1843172f7aaaSCyril Bur tm_unavailable(regs); 1844172f7aaaSCyril Bur return; 1845172f7aaaSCyril Bur } 1846172f7aaaSCyril Bur 184793c2ec0fSBalbir Singh pr_err_ratelimited("%sFacility '%s' unavailable (%d), exception at 0x%lx, MSR=%lx\n", 184893c2ec0fSBalbir Singh hv ? "Hypervisor " : "", facility, status, regs->nip, regs->msr); 1849d0c0c9a1SMichael Neuling 1850172f7aaaSCyril Bur out: 1851d0c0c9a1SMichael Neuling _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 1852d0c0c9a1SMichael Neuling } 18532517617eSMichael Neuling #endif 1854d0c0c9a1SMichael Neuling 1855f54db641SMichael Neuling #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1856f54db641SMichael Neuling 18573a96570fSNicholas Piggin DEFINE_INTERRUPT_HANDLER(fp_unavailable_tm) 1858f54db641SMichael Neuling { 1859f54db641SMichael Neuling /* Note: This does not handle any kind of FP laziness. */ 1860f54db641SMichael Neuling 1861f54db641SMichael Neuling TM_DEBUG("FP Unavailable trap whilst transactional at 0x%lx, MSR=%lx\n", 1862f54db641SMichael Neuling regs->nip, regs->msr); 1863f54db641SMichael Neuling 1864f54db641SMichael Neuling /* We can only have got here if the task started using FP after 1865f54db641SMichael Neuling * beginning the transaction. So, the transactional regs are just a 1866f54db641SMichael Neuling * copy of the checkpointed ones. But, we still need to recheckpoint 1867f54db641SMichael Neuling * as we're enabling FP for the process; it will return, abort the 1868f54db641SMichael Neuling * transaction, and probably retry but now with FP enabled. So the 1869f54db641SMichael Neuling * checkpointed FP registers need to be loaded. 1870f54db641SMichael Neuling */ 1871d31626f7SPaul Mackerras tm_reclaim_current(TM_CAUSE_FAC_UNAV); 187296695563SBreno Leitao 187396695563SBreno Leitao /* 187496695563SBreno Leitao * Reclaim initially saved out bogus (lazy) FPRs to ckfp_state, and 187596695563SBreno Leitao * then it was overwrite by the thr->fp_state by tm_reclaim_thread(). 187696695563SBreno Leitao * 187796695563SBreno Leitao * At this point, ck{fp,vr}_state contains the exact values we want to 187896695563SBreno Leitao * recheckpoint. 187996695563SBreno Leitao */ 1880f54db641SMichael Neuling 1881f54db641SMichael Neuling /* Enable FP for the task: */ 1882a7771176SCyril Bur current->thread.load_fp = 1; 1883f54db641SMichael Neuling 188496695563SBreno Leitao /* 188596695563SBreno Leitao * Recheckpoint all the checkpointed ckpt, ck{fp, vr}_state registers. 1886f54db641SMichael Neuling */ 1887eb5c3f1cSCyril Bur tm_recheckpoint(¤t->thread); 1888f54db641SMichael Neuling } 1889f54db641SMichael Neuling 18903a96570fSNicholas Piggin DEFINE_INTERRUPT_HANDLER(altivec_unavailable_tm) 1891f54db641SMichael Neuling { 1892f54db641SMichael Neuling /* See the comments in fp_unavailable_tm(). This function operates 1893f54db641SMichael Neuling * the same way. 1894f54db641SMichael Neuling */ 1895f54db641SMichael Neuling 1896f54db641SMichael Neuling TM_DEBUG("Vector Unavailable trap whilst transactional at 0x%lx," 1897f54db641SMichael Neuling "MSR=%lx\n", 1898f54db641SMichael Neuling regs->nip, regs->msr); 1899d31626f7SPaul Mackerras tm_reclaim_current(TM_CAUSE_FAC_UNAV); 1900a7771176SCyril Bur current->thread.load_vec = 1; 1901eb5c3f1cSCyril Bur tm_recheckpoint(¤t->thread); 1902f54db641SMichael Neuling current->thread.used_vr = 1; 19033ac8ff1cSPaul Mackerras } 19043ac8ff1cSPaul Mackerras 19053a96570fSNicholas Piggin DEFINE_INTERRUPT_HANDLER(vsx_unavailable_tm) 1906f54db641SMichael Neuling { 1907f54db641SMichael Neuling /* See the comments in fp_unavailable_tm(). This works similarly, 1908f54db641SMichael Neuling * though we're loading both FP and VEC registers in here. 1909f54db641SMichael Neuling * 1910f54db641SMichael Neuling * If FP isn't in use, load FP regs. If VEC isn't in use, load VEC 1911f54db641SMichael Neuling * regs. Either way, set MSR_VSX. 1912f54db641SMichael Neuling */ 1913f54db641SMichael Neuling 1914f54db641SMichael Neuling TM_DEBUG("VSX Unavailable trap whilst transactional at 0x%lx," 1915f54db641SMichael Neuling "MSR=%lx\n", 1916f54db641SMichael Neuling regs->nip, regs->msr); 1917f54db641SMichael Neuling 19183ac8ff1cSPaul Mackerras current->thread.used_vsr = 1; 19193ac8ff1cSPaul Mackerras 1920f54db641SMichael Neuling /* This reclaims FP and/or VR regs if they're already enabled */ 1921d31626f7SPaul Mackerras tm_reclaim_current(TM_CAUSE_FAC_UNAV); 1922f54db641SMichael Neuling 1923a7771176SCyril Bur current->thread.load_vec = 1; 1924a7771176SCyril Bur current->thread.load_fp = 1; 19253ac8ff1cSPaul Mackerras 1926eb5c3f1cSCyril Bur tm_recheckpoint(¤t->thread); 1927f54db641SMichael Neuling } 1928f54db641SMichael Neuling #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ 1929f54db641SMichael Neuling 19303a96570fSNicholas Piggin #ifdef CONFIG_PPC64 19313a96570fSNicholas Piggin DECLARE_INTERRUPT_HANDLER_NMI(performance_monitor_exception_nmi); 19323a96570fSNicholas Piggin DEFINE_INTERRUPT_HANDLER_NMI(performance_monitor_exception_nmi) 1933dc1c1ca3SStephen Rothwell { 193469111bacSChristoph Lameter __this_cpu_inc(irq_stat.pmu_irqs); 193589713ed1SAnton Blanchard 1936dc1c1ca3SStephen Rothwell perf_irq(regs); 1937156b5371SNicholas Piggin 19383a96570fSNicholas Piggin return 0; 19393a96570fSNicholas Piggin } 19403a96570fSNicholas Piggin #endif 19413a96570fSNicholas Piggin 19423a96570fSNicholas Piggin DECLARE_INTERRUPT_HANDLER_ASYNC(performance_monitor_exception_async); 19433a96570fSNicholas Piggin DEFINE_INTERRUPT_HANDLER_ASYNC(performance_monitor_exception_async) 1944156b5371SNicholas Piggin { 1945156b5371SNicholas Piggin __this_cpu_inc(irq_stat.pmu_irqs); 1946156b5371SNicholas Piggin 1947156b5371SNicholas Piggin perf_irq(regs); 1948156b5371SNicholas Piggin } 1949156b5371SNicholas Piggin 19503a96570fSNicholas Piggin DEFINE_INTERRUPT_HANDLER_RAW(performance_monitor_exception) 1951156b5371SNicholas Piggin { 1952156b5371SNicholas Piggin /* 1953156b5371SNicholas Piggin * On 64-bit, if perf interrupts hit in a local_irq_disable 1954156b5371SNicholas Piggin * (soft-masked) region, we consider them as NMIs. This is required to 1955156b5371SNicholas Piggin * prevent hash faults on user addresses when reading callchains (and 1956156b5371SNicholas Piggin * looks better from an irq tracing perspective). 1957156b5371SNicholas Piggin */ 1958156b5371SNicholas Piggin if (IS_ENABLED(CONFIG_PPC64) && unlikely(arch_irq_disabled_regs(regs))) 1959156b5371SNicholas Piggin performance_monitor_exception_nmi(regs); 1960156b5371SNicholas Piggin else 1961156b5371SNicholas Piggin performance_monitor_exception_async(regs); 19623a96570fSNicholas Piggin 19633a96570fSNicholas Piggin return 0; 1964dc1c1ca3SStephen Rothwell } 1965dc1c1ca3SStephen Rothwell 1966172ae2e7SDave Kleikamp #ifdef CONFIG_PPC_ADV_DEBUG_REGS 19673bffb652SDave Kleikamp static void handle_debug(struct pt_regs *regs, unsigned long debug_status) 19683bffb652SDave Kleikamp { 19693bffb652SDave Kleikamp int changed = 0; 19703bffb652SDave Kleikamp /* 19713bffb652SDave Kleikamp * Determine the cause of the debug event, clear the 19723bffb652SDave Kleikamp * event flags and send a trap to the handler. Torez 19733bffb652SDave Kleikamp */ 19743bffb652SDave Kleikamp if (debug_status & (DBSR_DAC1R | DBSR_DAC1W)) { 19753bffb652SDave Kleikamp dbcr_dac(current) &= ~(DBCR_DAC1R | DBCR_DAC1W); 19763bffb652SDave Kleikamp #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE 197751ae8d4aSBharat Bhushan current->thread.debug.dbcr2 &= ~DBCR2_DAC12MODE; 19783bffb652SDave Kleikamp #endif 197947355040SEric W. Biederman do_send_trap(regs, mfspr(SPRN_DAC1), debug_status, 19803bffb652SDave Kleikamp 5); 19813bffb652SDave Kleikamp changed |= 0x01; 19823bffb652SDave Kleikamp } else if (debug_status & (DBSR_DAC2R | DBSR_DAC2W)) { 19833bffb652SDave Kleikamp dbcr_dac(current) &= ~(DBCR_DAC2R | DBCR_DAC2W); 198447355040SEric W. Biederman do_send_trap(regs, mfspr(SPRN_DAC2), debug_status, 19853bffb652SDave Kleikamp 6); 19863bffb652SDave Kleikamp changed |= 0x01; 19873bffb652SDave Kleikamp } else if (debug_status & DBSR_IAC1) { 198851ae8d4aSBharat Bhushan current->thread.debug.dbcr0 &= ~DBCR0_IAC1; 19893bffb652SDave Kleikamp dbcr_iac_range(current) &= ~DBCR_IAC12MODE; 199047355040SEric W. Biederman do_send_trap(regs, mfspr(SPRN_IAC1), debug_status, 19913bffb652SDave Kleikamp 1); 19923bffb652SDave Kleikamp changed |= 0x01; 19933bffb652SDave Kleikamp } else if (debug_status & DBSR_IAC2) { 199451ae8d4aSBharat Bhushan current->thread.debug.dbcr0 &= ~DBCR0_IAC2; 199547355040SEric W. Biederman do_send_trap(regs, mfspr(SPRN_IAC2), debug_status, 19963bffb652SDave Kleikamp 2); 19973bffb652SDave Kleikamp changed |= 0x01; 19983bffb652SDave Kleikamp } else if (debug_status & DBSR_IAC3) { 199951ae8d4aSBharat Bhushan current->thread.debug.dbcr0 &= ~DBCR0_IAC3; 20003bffb652SDave Kleikamp dbcr_iac_range(current) &= ~DBCR_IAC34MODE; 200147355040SEric W. Biederman do_send_trap(regs, mfspr(SPRN_IAC3), debug_status, 20023bffb652SDave Kleikamp 3); 20033bffb652SDave Kleikamp changed |= 0x01; 20043bffb652SDave Kleikamp } else if (debug_status & DBSR_IAC4) { 200551ae8d4aSBharat Bhushan current->thread.debug.dbcr0 &= ~DBCR0_IAC4; 200647355040SEric W. Biederman do_send_trap(regs, mfspr(SPRN_IAC4), debug_status, 20073bffb652SDave Kleikamp 4); 20083bffb652SDave Kleikamp changed |= 0x01; 20093bffb652SDave Kleikamp } 20103bffb652SDave Kleikamp /* 20113bffb652SDave Kleikamp * At the point this routine was called, the MSR(DE) was turned off. 20123bffb652SDave Kleikamp * Check all other debug flags and see if that bit needs to be turned 20133bffb652SDave Kleikamp * back on or not. 20143bffb652SDave Kleikamp */ 201551ae8d4aSBharat Bhushan if (DBCR_ACTIVE_EVENTS(current->thread.debug.dbcr0, 201651ae8d4aSBharat Bhushan current->thread.debug.dbcr1)) 201759dc5bfcSNicholas Piggin regs_set_return_msr(regs, regs->msr | MSR_DE); 20183bffb652SDave Kleikamp else 20193bffb652SDave Kleikamp /* Make sure the IDM flag is off */ 202051ae8d4aSBharat Bhushan current->thread.debug.dbcr0 &= ~DBCR0_IDM; 20213bffb652SDave Kleikamp 20223bffb652SDave Kleikamp if (changed & 0x01) 202351ae8d4aSBharat Bhushan mtspr(SPRN_DBCR0, current->thread.debug.dbcr0); 20243bffb652SDave Kleikamp } 202514cf11afSPaul Mackerras 20263a96570fSNicholas Piggin DEFINE_INTERRUPT_HANDLER(DebugException) 202714cf11afSPaul Mackerras { 2028755d6641SNicholas Piggin unsigned long debug_status = regs->dsisr; 2029755d6641SNicholas Piggin 203051ae8d4aSBharat Bhushan current->thread.debug.dbsr = debug_status; 20313bffb652SDave Kleikamp 2032ec097c84SRoland McGrath /* Hack alert: On BookE, Branch Taken stops on the branch itself, while 2033ec097c84SRoland McGrath * on server, it stops on the target of the branch. In order to simulate 2034ec097c84SRoland McGrath * the server behaviour, we thus restart right away with a single step 2035ec097c84SRoland McGrath * instead of stopping here when hitting a BT 2036ec097c84SRoland McGrath */ 2037ec097c84SRoland McGrath if (debug_status & DBSR_BT) { 203859dc5bfcSNicholas Piggin regs_set_return_msr(regs, regs->msr & ~MSR_DE); 2039ec097c84SRoland McGrath 2040ec097c84SRoland McGrath /* Disable BT */ 2041ec097c84SRoland McGrath mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_BT); 2042ec097c84SRoland McGrath /* Clear the BT event */ 2043ec097c84SRoland McGrath mtspr(SPRN_DBSR, DBSR_BT); 2044ec097c84SRoland McGrath 2045ec097c84SRoland McGrath /* Do the single step trick only when coming from userspace */ 2046ec097c84SRoland McGrath if (user_mode(regs)) { 204751ae8d4aSBharat Bhushan current->thread.debug.dbcr0 &= ~DBCR0_BT; 204851ae8d4aSBharat Bhushan current->thread.debug.dbcr0 |= DBCR0_IDM | DBCR0_IC; 204959dc5bfcSNicholas Piggin regs_set_return_msr(regs, regs->msr | MSR_DE); 2050ec097c84SRoland McGrath return; 2051ec097c84SRoland McGrath } 2052ec097c84SRoland McGrath 20536cc89badSNaveen N. Rao if (kprobe_post_handler(regs)) 20546cc89badSNaveen N. Rao return; 20556cc89badSNaveen N. Rao 2056ec097c84SRoland McGrath if (notify_die(DIE_SSTEP, "block_step", regs, 5, 2057ec097c84SRoland McGrath 5, SIGTRAP) == NOTIFY_STOP) { 2058ec097c84SRoland McGrath return; 2059ec097c84SRoland McGrath } 2060ec097c84SRoland McGrath if (debugger_sstep(regs)) 2061ec097c84SRoland McGrath return; 2062ec097c84SRoland McGrath } else if (debug_status & DBSR_IC) { /* Instruction complete */ 206359dc5bfcSNicholas Piggin regs_set_return_msr(regs, regs->msr & ~MSR_DE); 2064f8279621SKumar Gala 206514cf11afSPaul Mackerras /* Disable instruction completion */ 206614cf11afSPaul Mackerras mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_IC); 206714cf11afSPaul Mackerras /* Clear the instruction completion event */ 206814cf11afSPaul Mackerras mtspr(SPRN_DBSR, DBSR_IC); 2069f8279621SKumar Gala 20706cc89badSNaveen N. Rao if (kprobe_post_handler(regs)) 20716cc89badSNaveen N. Rao return; 20726cc89badSNaveen N. Rao 2073f8279621SKumar Gala if (notify_die(DIE_SSTEP, "single_step", regs, 5, 2074f8279621SKumar Gala 5, SIGTRAP) == NOTIFY_STOP) { 207514cf11afSPaul Mackerras return; 207614cf11afSPaul Mackerras } 2077f8279621SKumar Gala 2078f8279621SKumar Gala if (debugger_sstep(regs)) 2079f8279621SKumar Gala return; 2080f8279621SKumar Gala 20813bffb652SDave Kleikamp if (user_mode(regs)) { 208251ae8d4aSBharat Bhushan current->thread.debug.dbcr0 &= ~DBCR0_IC; 208351ae8d4aSBharat Bhushan if (DBCR_ACTIVE_EVENTS(current->thread.debug.dbcr0, 208451ae8d4aSBharat Bhushan current->thread.debug.dbcr1)) 208559dc5bfcSNicholas Piggin regs_set_return_msr(regs, regs->msr | MSR_DE); 20863bffb652SDave Kleikamp else 20873bffb652SDave Kleikamp /* Make sure the IDM bit is off */ 208851ae8d4aSBharat Bhushan current->thread.debug.dbcr0 &= ~DBCR0_IDM; 20893bffb652SDave Kleikamp } 2090f8279621SKumar Gala 2091f8279621SKumar Gala _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip); 20923bffb652SDave Kleikamp } else 20933bffb652SDave Kleikamp handle_debug(regs, debug_status); 209414cf11afSPaul Mackerras } 2095172ae2e7SDave Kleikamp #endif /* CONFIG_PPC_ADV_DEBUG_REGS */ 209614cf11afSPaul Mackerras 209714cf11afSPaul Mackerras #ifdef CONFIG_ALTIVEC 20983a96570fSNicholas Piggin DEFINE_INTERRUPT_HANDLER(altivec_assist_exception) 209914cf11afSPaul Mackerras { 210014cf11afSPaul Mackerras int err; 210114cf11afSPaul Mackerras 210214cf11afSPaul Mackerras if (!user_mode(regs)) { 210314cf11afSPaul Mackerras printk(KERN_EMERG "VMX/Altivec assist exception in kernel mode" 210414cf11afSPaul Mackerras " at %lx\n", regs->nip); 21058dad3f92SPaul Mackerras die("Kernel VMX/Altivec assist exception", regs, SIGILL); 210614cf11afSPaul Mackerras } 210714cf11afSPaul Mackerras 2108dc1c1ca3SStephen Rothwell flush_altivec_to_thread(current); 2109dc1c1ca3SStephen Rothwell 2110eecff81dSAnton Blanchard PPC_WARN_EMULATED(altivec, regs); 211114cf11afSPaul Mackerras err = emulate_altivec(regs); 211214cf11afSPaul Mackerras if (err == 0) { 211359dc5bfcSNicholas Piggin regs_add_return_ip(regs, 4); /* skip emulated instruction */ 211414cf11afSPaul Mackerras emulate_single_step(regs); 211514cf11afSPaul Mackerras return; 211614cf11afSPaul Mackerras } 211714cf11afSPaul Mackerras 211814cf11afSPaul Mackerras if (err == -EFAULT) { 211914cf11afSPaul Mackerras /* got an error reading the instruction */ 212014cf11afSPaul Mackerras _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip); 212114cf11afSPaul Mackerras } else { 212214cf11afSPaul Mackerras /* didn't recognize the instruction */ 212314cf11afSPaul Mackerras /* XXX quick hack for now: set the non-Java bit in the VSCR */ 212476462232SChristian Dietrich printk_ratelimited(KERN_ERR "Unrecognized altivec instruction " 212514cf11afSPaul Mackerras "in %s at %lx\n", current->comm, regs->nip); 2126de79f7b9SPaul Mackerras current->thread.vr_state.vscr.u[3] |= 0x10000; 212714cf11afSPaul Mackerras } 212814cf11afSPaul Mackerras } 212914cf11afSPaul Mackerras #endif /* CONFIG_ALTIVEC */ 213014cf11afSPaul Mackerras 2131dfc3095cSChristophe Leroy #ifdef CONFIG_PPC_85xx 21323a96570fSNicholas Piggin DEFINE_INTERRUPT_HANDLER(CacheLockingException) 213314cf11afSPaul Mackerras { 2134b4ced803SNicholas Piggin unsigned long error_code = regs->dsisr; 2135b4ced803SNicholas Piggin 213614cf11afSPaul Mackerras /* We treat cache locking instructions from the user 213714cf11afSPaul Mackerras * as priv ops, in the future we could try to do 213814cf11afSPaul Mackerras * something smarter 213914cf11afSPaul Mackerras */ 214014cf11afSPaul Mackerras if (error_code & (ESR_DLK|ESR_ILK)) 214114cf11afSPaul Mackerras _exception(SIGILL, regs, ILL_PRVOPC, regs->nip); 214214cf11afSPaul Mackerras return; 214314cf11afSPaul Mackerras } 2144dfc3095cSChristophe Leroy #endif /* CONFIG_PPC_85xx */ 214514cf11afSPaul Mackerras 214614cf11afSPaul Mackerras #ifdef CONFIG_SPE 21473a96570fSNicholas Piggin DEFINE_INTERRUPT_HANDLER(SPEFloatingPointException) 214814cf11afSPaul Mackerras { 214914cf11afSPaul Mackerras unsigned long spefscr; 215014cf11afSPaul Mackerras int fpexc_mode; 2151aeb1c0f6SEric W. Biederman int code = FPE_FLTUNK; 21526a800f36SLiu Yu int err; 21536a800f36SLiu Yu 2154e6f8a6c8SNicholas Piggin interrupt_cond_local_irq_enable(regs); 2155ef429124SChristophe Leroy 2156685659eeSyu liu flush_spe_to_thread(current); 215714cf11afSPaul Mackerras 215814cf11afSPaul Mackerras spefscr = current->thread.spefscr; 215914cf11afSPaul Mackerras fpexc_mode = current->thread.fpexc_mode; 216014cf11afSPaul Mackerras 216114cf11afSPaul Mackerras if ((spefscr & SPEFSCR_FOVF) && (fpexc_mode & PR_FP_EXC_OVF)) { 216214cf11afSPaul Mackerras code = FPE_FLTOVF; 216314cf11afSPaul Mackerras } 216414cf11afSPaul Mackerras else if ((spefscr & SPEFSCR_FUNF) && (fpexc_mode & PR_FP_EXC_UND)) { 216514cf11afSPaul Mackerras code = FPE_FLTUND; 216614cf11afSPaul Mackerras } 216714cf11afSPaul Mackerras else if ((spefscr & SPEFSCR_FDBZ) && (fpexc_mode & PR_FP_EXC_DIV)) 216814cf11afSPaul Mackerras code = FPE_FLTDIV; 216914cf11afSPaul Mackerras else if ((spefscr & SPEFSCR_FINV) && (fpexc_mode & PR_FP_EXC_INV)) { 217014cf11afSPaul Mackerras code = FPE_FLTINV; 217114cf11afSPaul Mackerras } 217214cf11afSPaul Mackerras else if ((spefscr & (SPEFSCR_FG | SPEFSCR_FX)) && (fpexc_mode & PR_FP_EXC_RES)) 217314cf11afSPaul Mackerras code = FPE_FLTRES; 217414cf11afSPaul Mackerras 21756a800f36SLiu Yu err = do_spe_mathemu(regs); 21766a800f36SLiu Yu if (err == 0) { 217759dc5bfcSNicholas Piggin regs_add_return_ip(regs, 4); /* skip emulated instruction */ 21786a800f36SLiu Yu emulate_single_step(regs); 217914cf11afSPaul Mackerras return; 218014cf11afSPaul Mackerras } 21816a800f36SLiu Yu 21826a800f36SLiu Yu if (err == -EFAULT) { 21836a800f36SLiu Yu /* got an error reading the instruction */ 21846a800f36SLiu Yu _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip); 21856a800f36SLiu Yu } else if (err == -EINVAL) { 21866a800f36SLiu Yu /* didn't recognize the instruction */ 21876a800f36SLiu Yu printk(KERN_ERR "unrecognized spe instruction " 21886a800f36SLiu Yu "in %s at %lx\n", current->comm, regs->nip); 21896a800f36SLiu Yu } else { 21906a800f36SLiu Yu _exception(SIGFPE, regs, code, regs->nip); 21916a800f36SLiu Yu } 21926a800f36SLiu Yu 21936a800f36SLiu Yu return; 21946a800f36SLiu Yu } 21956a800f36SLiu Yu 21963a96570fSNicholas Piggin DEFINE_INTERRUPT_HANDLER(SPEFloatingPointRoundException) 21976a800f36SLiu Yu { 21986a800f36SLiu Yu int err; 21996a800f36SLiu Yu 2200e6f8a6c8SNicholas Piggin interrupt_cond_local_irq_enable(regs); 2201ef429124SChristophe Leroy 22026a800f36SLiu Yu preempt_disable(); 22036a800f36SLiu Yu if (regs->msr & MSR_SPE) 22046a800f36SLiu Yu giveup_spe(current); 22056a800f36SLiu Yu preempt_enable(); 22066a800f36SLiu Yu 220759dc5bfcSNicholas Piggin regs_add_return_ip(regs, -4); 22086a800f36SLiu Yu err = speround_handler(regs); 22096a800f36SLiu Yu if (err == 0) { 221059dc5bfcSNicholas Piggin regs_add_return_ip(regs, 4); /* skip emulated instruction */ 22116a800f36SLiu Yu emulate_single_step(regs); 22126a800f36SLiu Yu return; 22136a800f36SLiu Yu } 22146a800f36SLiu Yu 22156a800f36SLiu Yu if (err == -EFAULT) { 22166a800f36SLiu Yu /* got an error reading the instruction */ 22176a800f36SLiu Yu _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip); 22186a800f36SLiu Yu } else if (err == -EINVAL) { 22196a800f36SLiu Yu /* didn't recognize the instruction */ 22206a800f36SLiu Yu printk(KERN_ERR "unrecognized spe instruction " 22216a800f36SLiu Yu "in %s at %lx\n", current->comm, regs->nip); 22226a800f36SLiu Yu } else { 2223aeb1c0f6SEric W. Biederman _exception(SIGFPE, regs, FPE_FLTUNK, regs->nip); 22246a800f36SLiu Yu return; 22256a800f36SLiu Yu } 22266a800f36SLiu Yu } 222714cf11afSPaul Mackerras #endif 222814cf11afSPaul Mackerras 2229dc1c1ca3SStephen Rothwell /* 2230dc1c1ca3SStephen Rothwell * We enter here if we get an unrecoverable exception, that is, one 2231dc1c1ca3SStephen Rothwell * that happened at a point where the RI (recoverable interrupt) bit 2232dc1c1ca3SStephen Rothwell * in the MSR is 0. This indicates that SRR0/1 are live, and that 2233dc1c1ca3SStephen Rothwell * we therefore lost state by taking this exception. 2234dc1c1ca3SStephen Rothwell */ 2235a58cbed6SChristophe Leroy void __noreturn unrecoverable_exception(struct pt_regs *regs) 2236dc1c1ca3SStephen Rothwell { 223751423a9cSChristophe Leroy pr_emerg("Unrecoverable exception %lx at %lx (msr=%lx)\n", 223851423a9cSChristophe Leroy regs->trap, regs->nip, regs->msr); 2239dc1c1ca3SStephen Rothwell die("Unrecoverable exception", regs, SIGABRT); 2240a58cbed6SChristophe Leroy /* die() should not return */ 2241a58cbed6SChristophe Leroy for (;;) 2242a58cbed6SChristophe Leroy ; 2243dc1c1ca3SStephen Rothwell } 2244dc1c1ca3SStephen Rothwell 22451e18c17aSJason Gunthorpe #if defined(CONFIG_BOOKE_WDT) || defined(CONFIG_40x) 22463db8aa10SNicholas Piggin DEFINE_INTERRUPT_HANDLER_NMI(WatchdogException) 224714cf11afSPaul Mackerras { 224814cf11afSPaul Mackerras printk (KERN_EMERG "PowerPC Book-E Watchdog Exception\n"); 2249ca13c130SChristophe Leroy mtspr(SPRN_TCR, mfspr(SPRN_TCR) & ~TCR_WIE); 22503db8aa10SNicholas Piggin return 0; 225114cf11afSPaul Mackerras } 225214cf11afSPaul Mackerras #endif 2253dc1c1ca3SStephen Rothwell 2254dc1c1ca3SStephen Rothwell /* 2255dc1c1ca3SStephen Rothwell * We enter here if we discover during exception entry that we are 2256dc1c1ca3SStephen Rothwell * running in supervisor mode with a userspace value in the stack pointer. 2257dc1c1ca3SStephen Rothwell */ 22583a96570fSNicholas Piggin DEFINE_INTERRUPT_HANDLER(kernel_bad_stack) 2259dc1c1ca3SStephen Rothwell { 2260dc1c1ca3SStephen Rothwell printk(KERN_EMERG "Bad kernel stack pointer %lx at %lx\n", 2261dc1c1ca3SStephen Rothwell regs->gpr[1], regs->nip); 2262dc1c1ca3SStephen Rothwell die("Bad kernel stack pointer", regs, SIGABRT); 2263dc1c1ca3SStephen Rothwell } 226414cf11afSPaul Mackerras 226580947e7cSGeert Uytterhoeven #ifdef CONFIG_PPC_EMULATED_STATS 226680947e7cSGeert Uytterhoeven 226780947e7cSGeert Uytterhoeven #define WARN_EMULATED_SETUP(type) .type = { .name = #type } 226880947e7cSGeert Uytterhoeven 226980947e7cSGeert Uytterhoeven struct ppc_emulated ppc_emulated = { 227080947e7cSGeert Uytterhoeven #ifdef CONFIG_ALTIVEC 227180947e7cSGeert Uytterhoeven WARN_EMULATED_SETUP(altivec), 227280947e7cSGeert Uytterhoeven #endif 227380947e7cSGeert Uytterhoeven WARN_EMULATED_SETUP(dcba), 227480947e7cSGeert Uytterhoeven WARN_EMULATED_SETUP(dcbz), 227580947e7cSGeert Uytterhoeven WARN_EMULATED_SETUP(fp_pair), 227680947e7cSGeert Uytterhoeven WARN_EMULATED_SETUP(isel), 227780947e7cSGeert Uytterhoeven WARN_EMULATED_SETUP(mcrxr), 227880947e7cSGeert Uytterhoeven WARN_EMULATED_SETUP(mfpvr), 227980947e7cSGeert Uytterhoeven WARN_EMULATED_SETUP(multiple), 228080947e7cSGeert Uytterhoeven WARN_EMULATED_SETUP(popcntb), 228180947e7cSGeert Uytterhoeven WARN_EMULATED_SETUP(spe), 228280947e7cSGeert Uytterhoeven WARN_EMULATED_SETUP(string), 2283a3821b2aSScott Wood WARN_EMULATED_SETUP(sync), 228480947e7cSGeert Uytterhoeven WARN_EMULATED_SETUP(unaligned), 228580947e7cSGeert Uytterhoeven #ifdef CONFIG_MATH_EMULATION 228680947e7cSGeert Uytterhoeven WARN_EMULATED_SETUP(math), 228780947e7cSGeert Uytterhoeven #endif 228880947e7cSGeert Uytterhoeven #ifdef CONFIG_VSX 228980947e7cSGeert Uytterhoeven WARN_EMULATED_SETUP(vsx), 229080947e7cSGeert Uytterhoeven #endif 2291efcac658SAlexey Kardashevskiy #ifdef CONFIG_PPC64 2292efcac658SAlexey Kardashevskiy WARN_EMULATED_SETUP(mfdscr), 2293efcac658SAlexey Kardashevskiy WARN_EMULATED_SETUP(mtdscr), 2294f83319d7SAnton Blanchard WARN_EMULATED_SETUP(lq_stq), 22955080332cSMichael Neuling WARN_EMULATED_SETUP(lxvw4x), 22965080332cSMichael Neuling WARN_EMULATED_SETUP(lxvh8x), 22975080332cSMichael Neuling WARN_EMULATED_SETUP(lxvd2x), 22985080332cSMichael Neuling WARN_EMULATED_SETUP(lxvb16x), 2299efcac658SAlexey Kardashevskiy #endif 230080947e7cSGeert Uytterhoeven }; 230180947e7cSGeert Uytterhoeven 230280947e7cSGeert Uytterhoeven u32 ppc_warn_emulated; 230380947e7cSGeert Uytterhoeven 230480947e7cSGeert Uytterhoeven void ppc_warn_emulated_print(const char *type) 230580947e7cSGeert Uytterhoeven { 230676462232SChristian Dietrich pr_warn_ratelimited("%s used emulated %s instruction\n", current->comm, 230780947e7cSGeert Uytterhoeven type); 230880947e7cSGeert Uytterhoeven } 230980947e7cSGeert Uytterhoeven 231080947e7cSGeert Uytterhoeven static int __init ppc_warn_emulated_init(void) 231180947e7cSGeert Uytterhoeven { 2312860286cfSGreg Kroah-Hartman struct dentry *dir; 231380947e7cSGeert Uytterhoeven unsigned int i; 231480947e7cSGeert Uytterhoeven struct ppc_emulated_entry *entries = (void *)&ppc_emulated; 231580947e7cSGeert Uytterhoeven 231680947e7cSGeert Uytterhoeven dir = debugfs_create_dir("emulated_instructions", 2317dbf77fedSAneesh Kumar K.V arch_debugfs_dir); 231880947e7cSGeert Uytterhoeven 2319860286cfSGreg Kroah-Hartman debugfs_create_u32("do_warn", 0644, dir, &ppc_warn_emulated); 232080947e7cSGeert Uytterhoeven 2321860286cfSGreg Kroah-Hartman for (i = 0; i < sizeof(ppc_emulated)/sizeof(*entries); i++) 2322860286cfSGreg Kroah-Hartman debugfs_create_u32(entries[i].name, 0644, dir, 232380947e7cSGeert Uytterhoeven (u32 *)&entries[i].val.counter); 232480947e7cSGeert Uytterhoeven 232580947e7cSGeert Uytterhoeven return 0; 232680947e7cSGeert Uytterhoeven } 232780947e7cSGeert Uytterhoeven 232880947e7cSGeert Uytterhoeven device_initcall(ppc_warn_emulated_init); 232980947e7cSGeert Uytterhoeven 233080947e7cSGeert Uytterhoeven #endif /* CONFIG_PPC_EMULATED_STATS */ 2331