114cf11afSPaul Mackerras /* 214cf11afSPaul Mackerras * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 3fe04b112SScott Wood * Copyright 2007-2010 Freescale Semiconductor, Inc. 414cf11afSPaul Mackerras * 514cf11afSPaul Mackerras * This program is free software; you can redistribute it and/or 614cf11afSPaul Mackerras * modify it under the terms of the GNU General Public License 714cf11afSPaul Mackerras * as published by the Free Software Foundation; either version 814cf11afSPaul Mackerras * 2 of the License, or (at your option) any later version. 914cf11afSPaul Mackerras * 1014cf11afSPaul Mackerras * Modified by Cort Dougan (cort@cs.nmt.edu) 1114cf11afSPaul Mackerras * and Paul Mackerras (paulus@samba.org) 1214cf11afSPaul Mackerras */ 1314cf11afSPaul Mackerras 1414cf11afSPaul Mackerras /* 1514cf11afSPaul Mackerras * This file handles the architecture-dependent parts of hardware exceptions 1614cf11afSPaul Mackerras */ 1714cf11afSPaul Mackerras 1814cf11afSPaul Mackerras #include <linux/errno.h> 1914cf11afSPaul Mackerras #include <linux/sched.h> 20b17b0153SIngo Molnar #include <linux/sched/debug.h> 2114cf11afSPaul Mackerras #include <linux/kernel.h> 2214cf11afSPaul Mackerras #include <linux/mm.h> 2399cd1302SRam Pai #include <linux/pkeys.h> 2414cf11afSPaul Mackerras #include <linux/stddef.h> 2514cf11afSPaul Mackerras #include <linux/unistd.h> 268dad3f92SPaul Mackerras #include <linux/ptrace.h> 2714cf11afSPaul Mackerras #include <linux/user.h> 2814cf11afSPaul Mackerras #include <linux/interrupt.h> 2914cf11afSPaul Mackerras #include <linux/init.h> 308a39b05fSPaul Gortmaker #include <linux/extable.h> 318a39b05fSPaul Gortmaker #include <linux/module.h> /* print_modules */ 328dad3f92SPaul Mackerras #include <linux/prctl.h> 3314cf11afSPaul Mackerras #include <linux/delay.h> 3414cf11afSPaul Mackerras #include <linux/kprobes.h> 35cc532915SMichael Ellerman #include <linux/kexec.h> 365474c120SMichael Hanselmann #include <linux/backlight.h> 3773c9ceabSJeremy Fitzhardinge #include <linux/bug.h> 381eeb66a1SChristoph Hellwig #include <linux/kdebug.h> 3976462232SChristian Dietrich #include <linux/ratelimit.h> 40ba12eedeSLi Zhong #include <linux/context_tracking.h> 415080332cSMichael Neuling #include <linux/smp.h> 4235adacd6SNicholas Piggin #include <linux/console.h> 4335adacd6SNicholas Piggin #include <linux/kmsg_dump.h> 4414cf11afSPaul Mackerras 4580947e7cSGeert Uytterhoeven #include <asm/emulated_ops.h> 4614cf11afSPaul Mackerras #include <asm/pgtable.h> 477c0f6ba6SLinus Torvalds #include <linux/uaccess.h> 487644d581SMichael Ellerman #include <asm/debugfs.h> 4914cf11afSPaul Mackerras #include <asm/io.h> 5086417780SPaul Mackerras #include <asm/machdep.h> 5186417780SPaul Mackerras #include <asm/rtas.h> 52f7f6f4feSDavid Gibson #include <asm/pmc.h> 5314cf11afSPaul Mackerras #include <asm/reg.h> 5414cf11afSPaul Mackerras #ifdef CONFIG_PMAC_BACKLIGHT 5514cf11afSPaul Mackerras #include <asm/backlight.h> 5614cf11afSPaul Mackerras #endif 57dc1c1ca3SStephen Rothwell #ifdef CONFIG_PPC64 5886417780SPaul Mackerras #include <asm/firmware.h> 59dc1c1ca3SStephen Rothwell #include <asm/processor.h> 606ce6c629SMichael Neuling #include <asm/tm.h> 61dc1c1ca3SStephen Rothwell #endif 62c0ce7d08SDavid Wilder #include <asm/kexec.h> 6316c57b36SKumar Gala #include <asm/ppc-opcode.h> 64cce1f106SShaohui Xie #include <asm/rio.h> 65ebaeb5aeSMahesh Salgaonkar #include <asm/fadump.h> 66ae3a197eSDavid Howells #include <asm/switch_to.h> 67f54db641SMichael Neuling #include <asm/tm.h> 68ae3a197eSDavid Howells #include <asm/debug.h> 6942f5b4caSDaniel Axtens #include <asm/asm-prototypes.h> 70fd7bacbcSMahesh Salgaonkar #include <asm/hmi.h> 714e0e3435SHongtao Jia #include <sysdev/fsl_pci.h> 726cc89badSNaveen N. Rao #include <asm/kprobes.h> 73dc1c1ca3SStephen Rothwell 74da665885SThiago Jung Bauermann #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC_CORE) 755be3492fSAnton Blanchard int (*__debugger)(struct pt_regs *regs) __read_mostly; 765be3492fSAnton Blanchard int (*__debugger_ipi)(struct pt_regs *regs) __read_mostly; 775be3492fSAnton Blanchard int (*__debugger_bpt)(struct pt_regs *regs) __read_mostly; 785be3492fSAnton Blanchard int (*__debugger_sstep)(struct pt_regs *regs) __read_mostly; 795be3492fSAnton Blanchard int (*__debugger_iabr_match)(struct pt_regs *regs) __read_mostly; 809422de3eSMichael Neuling int (*__debugger_break_match)(struct pt_regs *regs) __read_mostly; 815be3492fSAnton Blanchard int (*__debugger_fault_handler)(struct pt_regs *regs) __read_mostly; 8214cf11afSPaul Mackerras 8314cf11afSPaul Mackerras EXPORT_SYMBOL(__debugger); 8414cf11afSPaul Mackerras EXPORT_SYMBOL(__debugger_ipi); 8514cf11afSPaul Mackerras EXPORT_SYMBOL(__debugger_bpt); 8614cf11afSPaul Mackerras EXPORT_SYMBOL(__debugger_sstep); 8714cf11afSPaul Mackerras EXPORT_SYMBOL(__debugger_iabr_match); 889422de3eSMichael Neuling EXPORT_SYMBOL(__debugger_break_match); 8914cf11afSPaul Mackerras EXPORT_SYMBOL(__debugger_fault_handler); 9014cf11afSPaul Mackerras #endif 9114cf11afSPaul Mackerras 928b3c34cfSMichael Neuling /* Transactional Memory trap debug */ 938b3c34cfSMichael Neuling #ifdef TM_DEBUG_SW 948b3c34cfSMichael Neuling #define TM_DEBUG(x...) printk(KERN_INFO x) 958b3c34cfSMichael Neuling #else 968b3c34cfSMichael Neuling #define TM_DEBUG(x...) do { } while(0) 978b3c34cfSMichael Neuling #endif 988b3c34cfSMichael Neuling 9914cf11afSPaul Mackerras /* 10014cf11afSPaul Mackerras * Trap & Exception support 10114cf11afSPaul Mackerras */ 10214cf11afSPaul Mackerras 1036031d9d9Santon@samba.org #ifdef CONFIG_PMAC_BACKLIGHT 1046031d9d9Santon@samba.org static void pmac_backlight_unblank(void) 1056031d9d9Santon@samba.org { 1066031d9d9Santon@samba.org mutex_lock(&pmac_backlight_mutex); 1076031d9d9Santon@samba.org if (pmac_backlight) { 1086031d9d9Santon@samba.org struct backlight_properties *props; 1096031d9d9Santon@samba.org 1106031d9d9Santon@samba.org props = &pmac_backlight->props; 1116031d9d9Santon@samba.org props->brightness = props->max_brightness; 1126031d9d9Santon@samba.org props->power = FB_BLANK_UNBLANK; 1136031d9d9Santon@samba.org backlight_update_status(pmac_backlight); 1146031d9d9Santon@samba.org } 1156031d9d9Santon@samba.org mutex_unlock(&pmac_backlight_mutex); 1166031d9d9Santon@samba.org } 1176031d9d9Santon@samba.org #else 1186031d9d9Santon@samba.org static inline void pmac_backlight_unblank(void) { } 1196031d9d9Santon@samba.org #endif 1206031d9d9Santon@samba.org 1216fcd6baaSNicholas Piggin /* 1226fcd6baaSNicholas Piggin * If oops/die is expected to crash the machine, return true here. 1236fcd6baaSNicholas Piggin * 1246fcd6baaSNicholas Piggin * This should not be expected to be 100% accurate, there may be 1256fcd6baaSNicholas Piggin * notifiers registered or other unexpected conditions that may bring 1266fcd6baaSNicholas Piggin * down the kernel. Or if the current process in the kernel is holding 1276fcd6baaSNicholas Piggin * locks or has other critical state, the kernel may become effectively 1286fcd6baaSNicholas Piggin * unusable anyway. 1296fcd6baaSNicholas Piggin */ 1306fcd6baaSNicholas Piggin bool die_will_crash(void) 1316fcd6baaSNicholas Piggin { 1326fcd6baaSNicholas Piggin if (should_fadump_crash()) 1336fcd6baaSNicholas Piggin return true; 1346fcd6baaSNicholas Piggin if (kexec_should_crash(current)) 1356fcd6baaSNicholas Piggin return true; 1366fcd6baaSNicholas Piggin if (in_interrupt() || panic_on_oops || 1376fcd6baaSNicholas Piggin !current->pid || is_global_init(current)) 1386fcd6baaSNicholas Piggin return true; 1396fcd6baaSNicholas Piggin 1406fcd6baaSNicholas Piggin return false; 1416fcd6baaSNicholas Piggin } 1426fcd6baaSNicholas Piggin 143760ca4dcSAnton Blanchard static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED; 144760ca4dcSAnton Blanchard static int die_owner = -1; 145760ca4dcSAnton Blanchard static unsigned int die_nest_count; 146c0ce7d08SDavid Wilder static int die_counter; 147760ca4dcSAnton Blanchard 14835adacd6SNicholas Piggin extern void panic_flush_kmsg_start(void) 14935adacd6SNicholas Piggin { 15035adacd6SNicholas Piggin /* 15135adacd6SNicholas Piggin * These are mostly taken from kernel/panic.c, but tries to do 15235adacd6SNicholas Piggin * relatively minimal work. Don't use delay functions (TB may 15335adacd6SNicholas Piggin * be broken), don't crash dump (need to set a firmware log), 15435adacd6SNicholas Piggin * don't run notifiers. We do want to get some information to 15535adacd6SNicholas Piggin * Linux console. 15635adacd6SNicholas Piggin */ 15735adacd6SNicholas Piggin console_verbose(); 15835adacd6SNicholas Piggin bust_spinlocks(1); 15935adacd6SNicholas Piggin } 16035adacd6SNicholas Piggin 16135adacd6SNicholas Piggin extern void panic_flush_kmsg_end(void) 16235adacd6SNicholas Piggin { 16335adacd6SNicholas Piggin printk_safe_flush_on_panic(); 16435adacd6SNicholas Piggin kmsg_dump(KMSG_DUMP_PANIC); 16535adacd6SNicholas Piggin bust_spinlocks(0); 16635adacd6SNicholas Piggin debug_locks_off(); 16735adacd6SNicholas Piggin console_flush_on_panic(); 16835adacd6SNicholas Piggin } 16935adacd6SNicholas Piggin 17003465f89SNicholas Piggin static unsigned long oops_begin(struct pt_regs *regs) 171760ca4dcSAnton Blanchard { 172760ca4dcSAnton Blanchard int cpu; 17334c2a14fSanton@samba.org unsigned long flags; 17414cf11afSPaul Mackerras 175293e4688Santon@samba.org oops_enter(); 176293e4688Santon@samba.org 177760ca4dcSAnton Blanchard /* racy, but better than risking deadlock. */ 178760ca4dcSAnton Blanchard raw_local_irq_save(flags); 179760ca4dcSAnton Blanchard cpu = smp_processor_id(); 180760ca4dcSAnton Blanchard if (!arch_spin_trylock(&die_lock)) { 181760ca4dcSAnton Blanchard if (cpu == die_owner) 182760ca4dcSAnton Blanchard /* nested oops. should stop eventually */; 183760ca4dcSAnton Blanchard else 184760ca4dcSAnton Blanchard arch_spin_lock(&die_lock); 185760ca4dcSAnton Blanchard } 186760ca4dcSAnton Blanchard die_nest_count++; 187760ca4dcSAnton Blanchard die_owner = cpu; 18814cf11afSPaul Mackerras console_verbose(); 18914cf11afSPaul Mackerras bust_spinlocks(1); 1906031d9d9Santon@samba.org if (machine_is(powermac)) 1916031d9d9Santon@samba.org pmac_backlight_unblank(); 192760ca4dcSAnton Blanchard return flags; 19334c2a14fSanton@samba.org } 19403465f89SNicholas Piggin NOKPROBE_SYMBOL(oops_begin); 1955474c120SMichael Hanselmann 19603465f89SNicholas Piggin static void oops_end(unsigned long flags, struct pt_regs *regs, 197760ca4dcSAnton Blanchard int signr) 198760ca4dcSAnton Blanchard { 19914cf11afSPaul Mackerras bust_spinlocks(0); 200373d4d09SRusty Russell add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); 201760ca4dcSAnton Blanchard die_nest_count--; 20258154c8cSAnton Blanchard oops_exit(); 20358154c8cSAnton Blanchard printk("\n"); 2047458e8b2SNicholas Piggin if (!die_nest_count) { 205760ca4dcSAnton Blanchard /* Nest count reaches zero, release the lock. */ 2067458e8b2SNicholas Piggin die_owner = -1; 207760ca4dcSAnton Blanchard arch_spin_unlock(&die_lock); 2087458e8b2SNicholas Piggin } 209760ca4dcSAnton Blanchard raw_local_irq_restore(flags); 210cc532915SMichael Ellerman 211d40b6768SNicholas Piggin /* 212d40b6768SNicholas Piggin * system_reset_excption handles debugger, crash dump, panic, for 0x100 213d40b6768SNicholas Piggin */ 214d40b6768SNicholas Piggin if (TRAP(regs) == 0x100) 215d40b6768SNicholas Piggin return; 216d40b6768SNicholas Piggin 217ebaeb5aeSMahesh Salgaonkar crash_fadump(regs, "die oops"); 218ebaeb5aeSMahesh Salgaonkar 2194388c9b3SNicholas Piggin if (kexec_should_crash(current)) 220cc532915SMichael Ellerman crash_kexec(regs); 2219b00ac06SAnton Blanchard 222760ca4dcSAnton Blanchard if (!signr) 223760ca4dcSAnton Blanchard return; 224760ca4dcSAnton Blanchard 22558154c8cSAnton Blanchard /* 22658154c8cSAnton Blanchard * While our oops output is serialised by a spinlock, output 22758154c8cSAnton Blanchard * from panic() called below can race and corrupt it. If we 22858154c8cSAnton Blanchard * know we are going to panic, delay for 1 second so we have a 22958154c8cSAnton Blanchard * chance to get clean backtraces from all CPUs that are oopsing. 23058154c8cSAnton Blanchard */ 23158154c8cSAnton Blanchard if (in_interrupt() || panic_on_oops || !current->pid || 23258154c8cSAnton Blanchard is_global_init(current)) { 23358154c8cSAnton Blanchard mdelay(MSEC_PER_SEC); 23458154c8cSAnton Blanchard } 23558154c8cSAnton Blanchard 23614cf11afSPaul Mackerras if (in_interrupt()) 23714cf11afSPaul Mackerras panic("Fatal exception in interrupt"); 238cea6a4baSHorms if (panic_on_oops) 239012c437dSHorms panic("Fatal exception"); 240760ca4dcSAnton Blanchard do_exit(signr); 241760ca4dcSAnton Blanchard } 24203465f89SNicholas Piggin NOKPROBE_SYMBOL(oops_end); 243cea6a4baSHorms 24403465f89SNicholas Piggin static int __die(const char *str, struct pt_regs *regs, long err) 245760ca4dcSAnton Blanchard { 246760ca4dcSAnton Blanchard printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter); 2472e82ca3cSMichael Ellerman 2482e82ca3cSMichael Ellerman if (IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN)) 2492e82ca3cSMichael Ellerman printk("LE "); 2502e82ca3cSMichael Ellerman else 2512e82ca3cSMichael Ellerman printk("BE "); 2522e82ca3cSMichael Ellerman 2531c56cd8eSMichael Ellerman if (IS_ENABLED(CONFIG_PREEMPT)) 25472c0d9eeSMichael Ellerman pr_cont("PREEMPT "); 2551c56cd8eSMichael Ellerman 2561c56cd8eSMichael Ellerman if (IS_ENABLED(CONFIG_SMP)) 25772c0d9eeSMichael Ellerman pr_cont("SMP NR_CPUS=%d ", NR_CPUS); 2581c56cd8eSMichael Ellerman 259e7df0d88SJoonsoo Kim if (debug_pagealloc_enabled()) 26072c0d9eeSMichael Ellerman pr_cont("DEBUG_PAGEALLOC "); 2611c56cd8eSMichael Ellerman 2621c56cd8eSMichael Ellerman if (IS_ENABLED(CONFIG_NUMA)) 26372c0d9eeSMichael Ellerman pr_cont("NUMA "); 2641c56cd8eSMichael Ellerman 26572c0d9eeSMichael Ellerman pr_cont("%s\n", ppc_md.name ? ppc_md.name : ""); 266760ca4dcSAnton Blanchard 267760ca4dcSAnton Blanchard if (notify_die(DIE_OOPS, str, regs, err, 255, SIGSEGV) == NOTIFY_STOP) 268760ca4dcSAnton Blanchard return 1; 269760ca4dcSAnton Blanchard 270760ca4dcSAnton Blanchard print_modules(); 271760ca4dcSAnton Blanchard show_regs(regs); 27214cf11afSPaul Mackerras 27314cf11afSPaul Mackerras return 0; 27414cf11afSPaul Mackerras } 27503465f89SNicholas Piggin NOKPROBE_SYMBOL(__die); 27614cf11afSPaul Mackerras 277760ca4dcSAnton Blanchard void die(const char *str, struct pt_regs *regs, long err) 278760ca4dcSAnton Blanchard { 2796f44b20eSNicholas Piggin unsigned long flags; 280760ca4dcSAnton Blanchard 281d40b6768SNicholas Piggin /* 282d40b6768SNicholas Piggin * system_reset_excption handles debugger, crash dump, panic, for 0x100 283d40b6768SNicholas Piggin */ 284d40b6768SNicholas Piggin if (TRAP(regs) != 0x100) { 2856f44b20eSNicholas Piggin if (debugger(regs)) 2866f44b20eSNicholas Piggin return; 287d40b6768SNicholas Piggin } 2886f44b20eSNicholas Piggin 2896f44b20eSNicholas Piggin flags = oops_begin(regs); 290760ca4dcSAnton Blanchard if (__die(str, regs, err)) 291760ca4dcSAnton Blanchard err = 0; 292760ca4dcSAnton Blanchard oops_end(flags, regs, err); 293760ca4dcSAnton Blanchard } 29415770a13SNaveen N. Rao NOKPROBE_SYMBOL(die); 295760ca4dcSAnton Blanchard 29625baa35bSOleg Nesterov void user_single_step_siginfo(struct task_struct *tsk, 29725baa35bSOleg Nesterov struct pt_regs *regs, siginfo_t *info) 29825baa35bSOleg Nesterov { 29925baa35bSOleg Nesterov info->si_signo = SIGTRAP; 30025baa35bSOleg Nesterov info->si_code = TRAP_TRACE; 30125baa35bSOleg Nesterov info->si_addr = (void __user *)regs->nip; 30225baa35bSOleg Nesterov } 30325baa35bSOleg Nesterov 304*35a52a10SMurilo Opsfelder Araujo static bool show_unhandled_signals_ratelimited(void) 305*35a52a10SMurilo Opsfelder Araujo { 306*35a52a10SMurilo Opsfelder Araujo static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL, 307*35a52a10SMurilo Opsfelder Araujo DEFAULT_RATELIMIT_BURST); 308*35a52a10SMurilo Opsfelder Araujo return show_unhandled_signals && __ratelimit(&rs); 309*35a52a10SMurilo Opsfelder Araujo } 310*35a52a10SMurilo Opsfelder Araujo 311658b0f92SMurilo Opsfelder Araujo static void show_signal_msg(int signr, struct pt_regs *regs, int code, 312658b0f92SMurilo Opsfelder Araujo unsigned long addr) 31314cf11afSPaul Mackerras { 314d0c3d534SOlof Johansson const char fmt32[] = KERN_INFO "%s[%d]: unhandled signal %d " \ 315d0c3d534SOlof Johansson "at %08lx nip %08lx lr %08lx code %x\n"; 316d0c3d534SOlof Johansson const char fmt64[] = KERN_INFO "%s[%d]: unhandled signal %d " \ 317d0c3d534SOlof Johansson "at %016lx nip %016lx lr %016lx code %x\n"; 31814cf11afSPaul Mackerras 319*35a52a10SMurilo Opsfelder Araujo if (!show_unhandled_signals_ratelimited()) 320*35a52a10SMurilo Opsfelder Araujo return; 321*35a52a10SMurilo Opsfelder Araujo 322*35a52a10SMurilo Opsfelder Araujo if (!unhandled_signal(current, signr)) 323*35a52a10SMurilo Opsfelder Araujo return; 324*35a52a10SMurilo Opsfelder Araujo 325*35a52a10SMurilo Opsfelder Araujo printk(regs->msr & MSR_64BIT ? fmt64 : fmt32, 326d0c3d534SOlof Johansson current->comm, current->pid, signr, 327d0c3d534SOlof Johansson addr, regs->nip, regs->link, code); 32814cf11afSPaul Mackerras } 329658b0f92SMurilo Opsfelder Araujo 330658b0f92SMurilo Opsfelder Araujo void _exception_pkey(int signr, struct pt_regs *regs, int code, 331658b0f92SMurilo Opsfelder Araujo unsigned long addr, int key) 332658b0f92SMurilo Opsfelder Araujo { 333658b0f92SMurilo Opsfelder Araujo siginfo_t info; 334658b0f92SMurilo Opsfelder Araujo 335658b0f92SMurilo Opsfelder Araujo if (!user_mode(regs)) { 336658b0f92SMurilo Opsfelder Araujo die("Exception in kernel mode", regs, signr); 337658b0f92SMurilo Opsfelder Araujo return; 338658b0f92SMurilo Opsfelder Araujo } 339658b0f92SMurilo Opsfelder Araujo 340658b0f92SMurilo Opsfelder Araujo show_signal_msg(signr, regs, code, addr); 34114cf11afSPaul Mackerras 342a3512b2dSBenjamin Herrenschmidt if (arch_irqs_disabled() && !arch_irq_disabled_regs(regs)) 3439f2f79e3SBenjamin Herrenschmidt local_irq_enable(); 3449f2f79e3SBenjamin Herrenschmidt 34541ab5266SAnanth N Mavinakayanahalli current->thread.trap_nr = code; 346c5cc1f4dSThiago Jung Bauermann 347c5cc1f4dSThiago Jung Bauermann /* 348c5cc1f4dSThiago Jung Bauermann * Save all the pkey registers AMR/IAMR/UAMOR. Eg: Core dumps need 349c5cc1f4dSThiago Jung Bauermann * to capture the content, if the task gets killed. 350c5cc1f4dSThiago Jung Bauermann */ 351c5cc1f4dSThiago Jung Bauermann thread_pkey_regs_save(¤t->thread); 352c5cc1f4dSThiago Jung Bauermann 3533eb0f519SEric W. Biederman clear_siginfo(&info); 35414cf11afSPaul Mackerras info.si_signo = signr; 35514cf11afSPaul Mackerras info.si_code = code; 35614cf11afSPaul Mackerras info.si_addr = (void __user *) addr; 35799cd1302SRam Pai info.si_pkey = key; 35899cd1302SRam Pai 35914cf11afSPaul Mackerras force_sig_info(signr, &info, current); 36014cf11afSPaul Mackerras } 36114cf11afSPaul Mackerras 36299cd1302SRam Pai void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr) 36399cd1302SRam Pai { 36499cd1302SRam Pai _exception_pkey(signr, regs, code, addr, 0); 36599cd1302SRam Pai } 36699cd1302SRam Pai 36714cf11afSPaul Mackerras void system_reset_exception(struct pt_regs *regs) 36814cf11afSPaul Mackerras { 3692b4f3ac5SNicholas Piggin /* 3702b4f3ac5SNicholas Piggin * Avoid crashes in case of nested NMI exceptions. Recoverability 3712b4f3ac5SNicholas Piggin * is determined by RI and in_nmi 3722b4f3ac5SNicholas Piggin */ 3732b4f3ac5SNicholas Piggin bool nested = in_nmi(); 3742b4f3ac5SNicholas Piggin if (!nested) 3752b4f3ac5SNicholas Piggin nmi_enter(); 3762b4f3ac5SNicholas Piggin 377ca41ad43SNicholas Piggin __this_cpu_inc(irq_stat.sreset_irqs); 378ca41ad43SNicholas Piggin 37914cf11afSPaul Mackerras /* See if any machine dependent calls */ 380c902be71SArnd Bergmann if (ppc_md.system_reset_exception) { 381c902be71SArnd Bergmann if (ppc_md.system_reset_exception(regs)) 382c4f3b52cSNicholas Piggin goto out; 383c902be71SArnd Bergmann } 38414cf11afSPaul Mackerras 3854388c9b3SNicholas Piggin if (debugger(regs)) 3864388c9b3SNicholas Piggin goto out; 3874388c9b3SNicholas Piggin 3884388c9b3SNicholas Piggin /* 3894388c9b3SNicholas Piggin * A system reset is a request to dump, so we always send 3904388c9b3SNicholas Piggin * it through the crashdump code (if fadump or kdump are 3914388c9b3SNicholas Piggin * registered). 3924388c9b3SNicholas Piggin */ 3934388c9b3SNicholas Piggin crash_fadump(regs, "System Reset"); 3944388c9b3SNicholas Piggin 3954388c9b3SNicholas Piggin crash_kexec(regs); 3964388c9b3SNicholas Piggin 3974388c9b3SNicholas Piggin /* 3984388c9b3SNicholas Piggin * We aren't the primary crash CPU. We need to send it 3994388c9b3SNicholas Piggin * to a holding pattern to avoid it ending up in the panic 4004388c9b3SNicholas Piggin * code. 4014388c9b3SNicholas Piggin */ 4024388c9b3SNicholas Piggin crash_kexec_secondary(regs); 4034388c9b3SNicholas Piggin 4044388c9b3SNicholas Piggin /* 4054388c9b3SNicholas Piggin * No debugger or crash dump registered, print logs then 4064388c9b3SNicholas Piggin * panic. 4074388c9b3SNicholas Piggin */ 4084552d128SNicholas Piggin die("System Reset", regs, SIGABRT); 4094388c9b3SNicholas Piggin 4104388c9b3SNicholas Piggin mdelay(2*MSEC_PER_SEC); /* Wait a little while for others to print */ 4114388c9b3SNicholas Piggin add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); 4124388c9b3SNicholas Piggin nmi_panic(regs, "System Reset"); 41314cf11afSPaul Mackerras 414c4f3b52cSNicholas Piggin out: 415c4f3b52cSNicholas Piggin #ifdef CONFIG_PPC_BOOK3S_64 416c4f3b52cSNicholas Piggin BUG_ON(get_paca()->in_nmi == 0); 417c4f3b52cSNicholas Piggin if (get_paca()->in_nmi > 1) 4184388c9b3SNicholas Piggin nmi_panic(regs, "Unrecoverable nested System Reset"); 419c4f3b52cSNicholas Piggin #endif 42014cf11afSPaul Mackerras /* Must die if the interrupt is not recoverable */ 42114cf11afSPaul Mackerras if (!(regs->msr & MSR_RI)) 4224388c9b3SNicholas Piggin nmi_panic(regs, "Unrecoverable System Reset"); 42314cf11afSPaul Mackerras 4242b4f3ac5SNicholas Piggin if (!nested) 4252b4f3ac5SNicholas Piggin nmi_exit(); 4262b4f3ac5SNicholas Piggin 42714cf11afSPaul Mackerras /* What should we do here? We could issue a shutdown or hard reset. */ 42814cf11afSPaul Mackerras } 4291e9b4507SMahesh Salgaonkar 43014cf11afSPaul Mackerras /* 43114cf11afSPaul Mackerras * I/O accesses can cause machine checks on powermacs. 43214cf11afSPaul Mackerras * Check if the NIP corresponds to the address of a sync 43314cf11afSPaul Mackerras * instruction for which there is an entry in the exception 43414cf11afSPaul Mackerras * table. 43514cf11afSPaul Mackerras * Note that the 601 only takes a machine check on TEA 43614cf11afSPaul Mackerras * (transfer error ack) signal assertion, and does not 43714cf11afSPaul Mackerras * set any of the top 16 bits of SRR1. 43814cf11afSPaul Mackerras * -- paulus. 43914cf11afSPaul Mackerras */ 44014cf11afSPaul Mackerras static inline int check_io_access(struct pt_regs *regs) 44114cf11afSPaul Mackerras { 44268a64357SBenjamin Herrenschmidt #ifdef CONFIG_PPC32 44314cf11afSPaul Mackerras unsigned long msr = regs->msr; 44414cf11afSPaul Mackerras const struct exception_table_entry *entry; 44514cf11afSPaul Mackerras unsigned int *nip = (unsigned int *)regs->nip; 44614cf11afSPaul Mackerras 44714cf11afSPaul Mackerras if (((msr & 0xffff0000) == 0 || (msr & (0x80000 | 0x40000))) 44814cf11afSPaul Mackerras && (entry = search_exception_tables(regs->nip)) != NULL) { 44914cf11afSPaul Mackerras /* 45014cf11afSPaul Mackerras * Check that it's a sync instruction, or somewhere 45114cf11afSPaul Mackerras * in the twi; isync; nop sequence that inb/inw/inl uses. 45214cf11afSPaul Mackerras * As the address is in the exception table 45314cf11afSPaul Mackerras * we should be able to read the instr there. 45414cf11afSPaul Mackerras * For the debug message, we look at the preceding 45514cf11afSPaul Mackerras * load or store. 45614cf11afSPaul Mackerras */ 457ddc6cd0dSChristophe Leroy if (*nip == PPC_INST_NOP) 45814cf11afSPaul Mackerras nip -= 2; 459ddc6cd0dSChristophe Leroy else if (*nip == PPC_INST_ISYNC) 46014cf11afSPaul Mackerras --nip; 461ddc6cd0dSChristophe Leroy if (*nip == PPC_INST_SYNC || (*nip >> 26) == OP_TRAP) { 46214cf11afSPaul Mackerras unsigned int rb; 46314cf11afSPaul Mackerras 46414cf11afSPaul Mackerras --nip; 46514cf11afSPaul Mackerras rb = (*nip >> 11) & 0x1f; 46614cf11afSPaul Mackerras printk(KERN_DEBUG "%s bad port %lx at %p\n", 46714cf11afSPaul Mackerras (*nip & 0x100)? "OUT to": "IN from", 46814cf11afSPaul Mackerras regs->gpr[rb] - _IO_BASE, nip); 46914cf11afSPaul Mackerras regs->msr |= MSR_RI; 47061a92f70SNicholas Piggin regs->nip = extable_fixup(entry); 47114cf11afSPaul Mackerras return 1; 47214cf11afSPaul Mackerras } 47314cf11afSPaul Mackerras } 47468a64357SBenjamin Herrenschmidt #endif /* CONFIG_PPC32 */ 47514cf11afSPaul Mackerras return 0; 47614cf11afSPaul Mackerras } 47714cf11afSPaul Mackerras 478172ae2e7SDave Kleikamp #ifdef CONFIG_PPC_ADV_DEBUG_REGS 47914cf11afSPaul Mackerras /* On 4xx, the reason for the machine check or program exception 48014cf11afSPaul Mackerras is in the ESR. */ 48114cf11afSPaul Mackerras #define get_reason(regs) ((regs)->dsisr) 48214cf11afSPaul Mackerras #define REASON_FP ESR_FP 48314cf11afSPaul Mackerras #define REASON_ILLEGAL (ESR_PIL | ESR_PUO) 48414cf11afSPaul Mackerras #define REASON_PRIVILEGED ESR_PPR 48514cf11afSPaul Mackerras #define REASON_TRAP ESR_PTR 48614cf11afSPaul Mackerras 48714cf11afSPaul Mackerras /* single-step stuff */ 48851ae8d4aSBharat Bhushan #define single_stepping(regs) (current->thread.debug.dbcr0 & DBCR0_IC) 48951ae8d4aSBharat Bhushan #define clear_single_step(regs) (current->thread.debug.dbcr0 &= ~DBCR0_IC) 4900e524e76SMatt Evans #define clear_br_trace(regs) do {} while(0) 49114cf11afSPaul Mackerras #else 49214cf11afSPaul Mackerras /* On non-4xx, the reason for the machine check or program 49314cf11afSPaul Mackerras exception is in the MSR. */ 49414cf11afSPaul Mackerras #define get_reason(regs) ((regs)->msr) 495d30a5a52SMichael Ellerman #define REASON_TM SRR1_PROGTM 496d30a5a52SMichael Ellerman #define REASON_FP SRR1_PROGFPE 497d30a5a52SMichael Ellerman #define REASON_ILLEGAL SRR1_PROGILL 498d30a5a52SMichael Ellerman #define REASON_PRIVILEGED SRR1_PROGPRIV 499d30a5a52SMichael Ellerman #define REASON_TRAP SRR1_PROGTRAP 50014cf11afSPaul Mackerras 50114cf11afSPaul Mackerras #define single_stepping(regs) ((regs)->msr & MSR_SE) 50214cf11afSPaul Mackerras #define clear_single_step(regs) ((regs)->msr &= ~MSR_SE) 5030e524e76SMatt Evans #define clear_br_trace(regs) ((regs)->msr &= ~MSR_BE) 50414cf11afSPaul Mackerras #endif 50514cf11afSPaul Mackerras 5060d0935b3SMichael Ellerman #if defined(CONFIG_E500) 507fe04b112SScott Wood int machine_check_e500mc(struct pt_regs *regs) 508fe04b112SScott Wood { 509fe04b112SScott Wood unsigned long mcsr = mfspr(SPRN_MCSR); 510a4e89ffbSMatt Weber unsigned long pvr = mfspr(SPRN_PVR); 511fe04b112SScott Wood unsigned long reason = mcsr; 512fe04b112SScott Wood int recoverable = 1; 513fe04b112SScott Wood 51482a9a480SScott Wood if (reason & MCSR_LD) { 515cce1f106SShaohui Xie recoverable = fsl_rio_mcheck_exception(regs); 516cce1f106SShaohui Xie if (recoverable == 1) 517cce1f106SShaohui Xie goto silent_out; 518cce1f106SShaohui Xie } 519cce1f106SShaohui Xie 520fe04b112SScott Wood printk("Machine check in kernel mode.\n"); 521fe04b112SScott Wood printk("Caused by (from MCSR=%lx): ", reason); 522fe04b112SScott Wood 523fe04b112SScott Wood if (reason & MCSR_MCP) 524fe04b112SScott Wood printk("Machine Check Signal\n"); 525fe04b112SScott Wood 526fe04b112SScott Wood if (reason & MCSR_ICPERR) { 527fe04b112SScott Wood printk("Instruction Cache Parity Error\n"); 528fe04b112SScott Wood 529fe04b112SScott Wood /* 530fe04b112SScott Wood * This is recoverable by invalidating the i-cache. 531fe04b112SScott Wood */ 532fe04b112SScott Wood mtspr(SPRN_L1CSR1, mfspr(SPRN_L1CSR1) | L1CSR1_ICFI); 533fe04b112SScott Wood while (mfspr(SPRN_L1CSR1) & L1CSR1_ICFI) 534fe04b112SScott Wood ; 535fe04b112SScott Wood 536fe04b112SScott Wood /* 537fe04b112SScott Wood * This will generally be accompanied by an instruction 538fe04b112SScott Wood * fetch error report -- only treat MCSR_IF as fatal 539fe04b112SScott Wood * if it wasn't due to an L1 parity error. 540fe04b112SScott Wood */ 541fe04b112SScott Wood reason &= ~MCSR_IF; 542fe04b112SScott Wood } 543fe04b112SScott Wood 544fe04b112SScott Wood if (reason & MCSR_DCPERR_MC) { 545fe04b112SScott Wood printk("Data Cache Parity Error\n"); 54637caf9f2SKumar Gala 54737caf9f2SKumar Gala /* 54837caf9f2SKumar Gala * In write shadow mode we auto-recover from the error, but it 54937caf9f2SKumar Gala * may still get logged and cause a machine check. We should 55037caf9f2SKumar Gala * only treat the non-write shadow case as non-recoverable. 55137caf9f2SKumar Gala */ 552a4e89ffbSMatt Weber /* On e6500 core, L1 DCWS (Data cache write shadow mode) bit 553a4e89ffbSMatt Weber * is not implemented but L1 data cache always runs in write 554a4e89ffbSMatt Weber * shadow mode. Hence on data cache parity errors HW will 555a4e89ffbSMatt Weber * automatically invalidate the L1 Data Cache. 556a4e89ffbSMatt Weber */ 557a4e89ffbSMatt Weber if (PVR_VER(pvr) != PVR_VER_E6500) { 55837caf9f2SKumar Gala if (!(mfspr(SPRN_L1CSR2) & L1CSR2_DCWS)) 559fe04b112SScott Wood recoverable = 0; 560fe04b112SScott Wood } 561a4e89ffbSMatt Weber } 562fe04b112SScott Wood 563fe04b112SScott Wood if (reason & MCSR_L2MMU_MHIT) { 564fe04b112SScott Wood printk("Hit on multiple TLB entries\n"); 565fe04b112SScott Wood recoverable = 0; 566fe04b112SScott Wood } 567fe04b112SScott Wood 568fe04b112SScott Wood if (reason & MCSR_NMI) 569fe04b112SScott Wood printk("Non-maskable interrupt\n"); 570fe04b112SScott Wood 571fe04b112SScott Wood if (reason & MCSR_IF) { 572fe04b112SScott Wood printk("Instruction Fetch Error Report\n"); 573fe04b112SScott Wood recoverable = 0; 574fe04b112SScott Wood } 575fe04b112SScott Wood 576fe04b112SScott Wood if (reason & MCSR_LD) { 577fe04b112SScott Wood printk("Load Error Report\n"); 578fe04b112SScott Wood recoverable = 0; 579fe04b112SScott Wood } 580fe04b112SScott Wood 581fe04b112SScott Wood if (reason & MCSR_ST) { 582fe04b112SScott Wood printk("Store Error Report\n"); 583fe04b112SScott Wood recoverable = 0; 584fe04b112SScott Wood } 585fe04b112SScott Wood 586fe04b112SScott Wood if (reason & MCSR_LDG) { 587fe04b112SScott Wood printk("Guarded Load Error Report\n"); 588fe04b112SScott Wood recoverable = 0; 589fe04b112SScott Wood } 590fe04b112SScott Wood 591fe04b112SScott Wood if (reason & MCSR_TLBSYNC) 592fe04b112SScott Wood printk("Simultaneous tlbsync operations\n"); 593fe04b112SScott Wood 594fe04b112SScott Wood if (reason & MCSR_BSL2_ERR) { 595fe04b112SScott Wood printk("Level 2 Cache Error\n"); 596fe04b112SScott Wood recoverable = 0; 597fe04b112SScott Wood } 598fe04b112SScott Wood 599fe04b112SScott Wood if (reason & MCSR_MAV) { 600fe04b112SScott Wood u64 addr; 601fe04b112SScott Wood 602fe04b112SScott Wood addr = mfspr(SPRN_MCAR); 603fe04b112SScott Wood addr |= (u64)mfspr(SPRN_MCARU) << 32; 604fe04b112SScott Wood 605fe04b112SScott Wood printk("Machine Check %s Address: %#llx\n", 606fe04b112SScott Wood reason & MCSR_MEA ? "Effective" : "Physical", addr); 607fe04b112SScott Wood } 608fe04b112SScott Wood 609cce1f106SShaohui Xie silent_out: 610fe04b112SScott Wood mtspr(SPRN_MCSR, mcsr); 611fe04b112SScott Wood return mfspr(SPRN_MCSR) == 0 && recoverable; 612fe04b112SScott Wood } 613fe04b112SScott Wood 61447c0bd1aSBenjamin Herrenschmidt int machine_check_e500(struct pt_regs *regs) 61547c0bd1aSBenjamin Herrenschmidt { 61642bff234SMichael Ellerman unsigned long reason = mfspr(SPRN_MCSR); 61747c0bd1aSBenjamin Herrenschmidt 618cce1f106SShaohui Xie if (reason & MCSR_BUS_RBERR) { 619cce1f106SShaohui Xie if (fsl_rio_mcheck_exception(regs)) 620cce1f106SShaohui Xie return 1; 6214e0e3435SHongtao Jia if (fsl_pci_mcheck_exception(regs)) 6224e0e3435SHongtao Jia return 1; 623cce1f106SShaohui Xie } 624cce1f106SShaohui Xie 62514cf11afSPaul Mackerras printk("Machine check in kernel mode.\n"); 62614cf11afSPaul Mackerras printk("Caused by (from MCSR=%lx): ", reason); 62714cf11afSPaul Mackerras 62814cf11afSPaul Mackerras if (reason & MCSR_MCP) 62914cf11afSPaul Mackerras printk("Machine Check Signal\n"); 63014cf11afSPaul Mackerras if (reason & MCSR_ICPERR) 63114cf11afSPaul Mackerras printk("Instruction Cache Parity Error\n"); 63214cf11afSPaul Mackerras if (reason & MCSR_DCP_PERR) 63314cf11afSPaul Mackerras printk("Data Cache Push Parity Error\n"); 63414cf11afSPaul Mackerras if (reason & MCSR_DCPERR) 63514cf11afSPaul Mackerras printk("Data Cache Parity Error\n"); 63614cf11afSPaul Mackerras if (reason & MCSR_BUS_IAERR) 63714cf11afSPaul Mackerras printk("Bus - Instruction Address Error\n"); 63814cf11afSPaul Mackerras if (reason & MCSR_BUS_RAERR) 63914cf11afSPaul Mackerras printk("Bus - Read Address Error\n"); 64014cf11afSPaul Mackerras if (reason & MCSR_BUS_WAERR) 64114cf11afSPaul Mackerras printk("Bus - Write Address Error\n"); 64214cf11afSPaul Mackerras if (reason & MCSR_BUS_IBERR) 64314cf11afSPaul Mackerras printk("Bus - Instruction Data Error\n"); 64414cf11afSPaul Mackerras if (reason & MCSR_BUS_RBERR) 64514cf11afSPaul Mackerras printk("Bus - Read Data Bus Error\n"); 64614cf11afSPaul Mackerras if (reason & MCSR_BUS_WBERR) 647c1528339SWladislav Wiebe printk("Bus - Write Data Bus Error\n"); 64814cf11afSPaul Mackerras if (reason & MCSR_BUS_IPERR) 64914cf11afSPaul Mackerras printk("Bus - Instruction Parity Error\n"); 65014cf11afSPaul Mackerras if (reason & MCSR_BUS_RPERR) 65114cf11afSPaul Mackerras printk("Bus - Read Parity Error\n"); 65247c0bd1aSBenjamin Herrenschmidt 65347c0bd1aSBenjamin Herrenschmidt return 0; 65447c0bd1aSBenjamin Herrenschmidt } 6554490c06bSKumar Gala 6564490c06bSKumar Gala int machine_check_generic(struct pt_regs *regs) 6574490c06bSKumar Gala { 6584490c06bSKumar Gala return 0; 6594490c06bSKumar Gala } 66014cf11afSPaul Mackerras #elif defined(CONFIG_E200) 66147c0bd1aSBenjamin Herrenschmidt int machine_check_e200(struct pt_regs *regs) 66247c0bd1aSBenjamin Herrenschmidt { 66342bff234SMichael Ellerman unsigned long reason = mfspr(SPRN_MCSR); 66447c0bd1aSBenjamin Herrenschmidt 66514cf11afSPaul Mackerras printk("Machine check in kernel mode.\n"); 66614cf11afSPaul Mackerras printk("Caused by (from MCSR=%lx): ", reason); 66714cf11afSPaul Mackerras 66814cf11afSPaul Mackerras if (reason & MCSR_MCP) 66914cf11afSPaul Mackerras printk("Machine Check Signal\n"); 67014cf11afSPaul Mackerras if (reason & MCSR_CP_PERR) 67114cf11afSPaul Mackerras printk("Cache Push Parity Error\n"); 67214cf11afSPaul Mackerras if (reason & MCSR_CPERR) 67314cf11afSPaul Mackerras printk("Cache Parity Error\n"); 67414cf11afSPaul Mackerras if (reason & MCSR_EXCP_ERR) 67514cf11afSPaul Mackerras printk("ISI, ITLB, or Bus Error on first instruction fetch for an exception handler\n"); 67614cf11afSPaul Mackerras if (reason & MCSR_BUS_IRERR) 67714cf11afSPaul Mackerras printk("Bus - Read Bus Error on instruction fetch\n"); 67814cf11afSPaul Mackerras if (reason & MCSR_BUS_DRERR) 67914cf11afSPaul Mackerras printk("Bus - Read Bus Error on data load\n"); 68014cf11afSPaul Mackerras if (reason & MCSR_BUS_WRERR) 68114cf11afSPaul Mackerras printk("Bus - Write Bus Error on buffered store or cache line push\n"); 68247c0bd1aSBenjamin Herrenschmidt 68347c0bd1aSBenjamin Herrenschmidt return 0; 68447c0bd1aSBenjamin Herrenschmidt } 6857f3f819eSMichael Ellerman #elif defined(CONFIG_PPC32) 68647c0bd1aSBenjamin Herrenschmidt int machine_check_generic(struct pt_regs *regs) 68747c0bd1aSBenjamin Herrenschmidt { 68842bff234SMichael Ellerman unsigned long reason = regs->msr; 68947c0bd1aSBenjamin Herrenschmidt 69014cf11afSPaul Mackerras printk("Machine check in kernel mode.\n"); 69114cf11afSPaul Mackerras printk("Caused by (from SRR1=%lx): ", reason); 69214cf11afSPaul Mackerras switch (reason & 0x601F0000) { 69314cf11afSPaul Mackerras case 0x80000: 69414cf11afSPaul Mackerras printk("Machine check signal\n"); 69514cf11afSPaul Mackerras break; 69614cf11afSPaul Mackerras case 0: /* for 601 */ 69714cf11afSPaul Mackerras case 0x40000: 69814cf11afSPaul Mackerras case 0x140000: /* 7450 MSS error and TEA */ 69914cf11afSPaul Mackerras printk("Transfer error ack signal\n"); 70014cf11afSPaul Mackerras break; 70114cf11afSPaul Mackerras case 0x20000: 70214cf11afSPaul Mackerras printk("Data parity error signal\n"); 70314cf11afSPaul Mackerras break; 70414cf11afSPaul Mackerras case 0x10000: 70514cf11afSPaul Mackerras printk("Address parity error signal\n"); 70614cf11afSPaul Mackerras break; 70714cf11afSPaul Mackerras case 0x20000000: 70814cf11afSPaul Mackerras printk("L1 Data Cache error\n"); 70914cf11afSPaul Mackerras break; 71014cf11afSPaul Mackerras case 0x40000000: 71114cf11afSPaul Mackerras printk("L1 Instruction Cache error\n"); 71214cf11afSPaul Mackerras break; 71314cf11afSPaul Mackerras case 0x00100000: 71414cf11afSPaul Mackerras printk("L2 data cache parity error\n"); 71514cf11afSPaul Mackerras break; 71614cf11afSPaul Mackerras default: 71714cf11afSPaul Mackerras printk("Unknown values in msr\n"); 71814cf11afSPaul Mackerras } 71975918a4bSOlof Johansson return 0; 72075918a4bSOlof Johansson } 72147c0bd1aSBenjamin Herrenschmidt #endif /* everything else */ 72275918a4bSOlof Johansson 72375918a4bSOlof Johansson void machine_check_exception(struct pt_regs *regs) 72475918a4bSOlof Johansson { 72575918a4bSOlof Johansson int recover = 0; 726b96672ddSNicholas Piggin bool nested = in_nmi(); 727b96672ddSNicholas Piggin if (!nested) 728b96672ddSNicholas Piggin nmi_enter(); 72975918a4bSOlof Johansson 730f886f0f6SNicholas Piggin /* 64s accounts the mce in machine_check_early when in HVMODE */ 731f886f0f6SNicholas Piggin if (!IS_ENABLED(CONFIG_PPC_BOOK3S_64) || !cpu_has_feature(CPU_FTR_HVMODE)) 73269111bacSChristoph Lameter __this_cpu_inc(irq_stat.mce_exceptions); 73389713ed1SAnton Blanchard 734d93b0ac0SMahesh Salgaonkar add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE); 735d93b0ac0SMahesh Salgaonkar 73647c0bd1aSBenjamin Herrenschmidt /* See if any machine dependent calls. In theory, we would want 73747c0bd1aSBenjamin Herrenschmidt * to call the CPU first, and call the ppc_md. one if the CPU 73847c0bd1aSBenjamin Herrenschmidt * one returns a positive number. However there is existing code 73947c0bd1aSBenjamin Herrenschmidt * that assumes the board gets a first chance, so let's keep it 74047c0bd1aSBenjamin Herrenschmidt * that way for now and fix things later. --BenH. 74147c0bd1aSBenjamin Herrenschmidt */ 74275918a4bSOlof Johansson if (ppc_md.machine_check_exception) 74375918a4bSOlof Johansson recover = ppc_md.machine_check_exception(regs); 74447c0bd1aSBenjamin Herrenschmidt else if (cur_cpu_spec->machine_check) 74547c0bd1aSBenjamin Herrenschmidt recover = cur_cpu_spec->machine_check(regs); 74675918a4bSOlof Johansson 74747c0bd1aSBenjamin Herrenschmidt if (recover > 0) 748ba12eedeSLi Zhong goto bail; 74975918a4bSOlof Johansson 750a443506bSAnton Blanchard if (debugger_fault_handler(regs)) 751ba12eedeSLi Zhong goto bail; 75275918a4bSOlof Johansson 75375918a4bSOlof Johansson if (check_io_access(regs)) 754ba12eedeSLi Zhong goto bail; 75575918a4bSOlof Johansson 7568dad3f92SPaul Mackerras die("Machine check", regs, SIGBUS); 75714cf11afSPaul Mackerras 75814cf11afSPaul Mackerras /* Must die if the interrupt is not recoverable */ 75914cf11afSPaul Mackerras if (!(regs->msr & MSR_RI)) 760b96672ddSNicholas Piggin nmi_panic(regs, "Unrecoverable Machine check"); 761ba12eedeSLi Zhong 762ba12eedeSLi Zhong bail: 763b96672ddSNicholas Piggin if (!nested) 764b96672ddSNicholas Piggin nmi_exit(); 76514cf11afSPaul Mackerras } 76614cf11afSPaul Mackerras 76714cf11afSPaul Mackerras void SMIException(struct pt_regs *regs) 76814cf11afSPaul Mackerras { 76914cf11afSPaul Mackerras die("System Management Interrupt", regs, SIGABRT); 77014cf11afSPaul Mackerras } 77114cf11afSPaul Mackerras 7725080332cSMichael Neuling #ifdef CONFIG_VSX 7735080332cSMichael Neuling static void p9_hmi_special_emu(struct pt_regs *regs) 7745080332cSMichael Neuling { 7755080332cSMichael Neuling unsigned int ra, rb, t, i, sel, instr, rc; 7765080332cSMichael Neuling const void __user *addr; 7775080332cSMichael Neuling u8 vbuf[16], *vdst; 7785080332cSMichael Neuling unsigned long ea, msr, msr_mask; 7795080332cSMichael Neuling bool swap; 7805080332cSMichael Neuling 7815080332cSMichael Neuling if (__get_user_inatomic(instr, (unsigned int __user *)regs->nip)) 7825080332cSMichael Neuling return; 7835080332cSMichael Neuling 7845080332cSMichael Neuling /* 7855080332cSMichael Neuling * lxvb16x opcode: 0x7c0006d8 7865080332cSMichael Neuling * lxvd2x opcode: 0x7c000698 7875080332cSMichael Neuling * lxvh8x opcode: 0x7c000658 7885080332cSMichael Neuling * lxvw4x opcode: 0x7c000618 7895080332cSMichael Neuling */ 7905080332cSMichael Neuling if ((instr & 0xfc00073e) != 0x7c000618) { 7915080332cSMichael Neuling pr_devel("HMI vec emu: not vector CI %i:%s[%d] nip=%016lx" 7925080332cSMichael Neuling " instr=%08x\n", 7935080332cSMichael Neuling smp_processor_id(), current->comm, current->pid, 7945080332cSMichael Neuling regs->nip, instr); 7955080332cSMichael Neuling return; 7965080332cSMichael Neuling } 7975080332cSMichael Neuling 7985080332cSMichael Neuling /* Grab vector registers into the task struct */ 7995080332cSMichael Neuling msr = regs->msr; /* Grab msr before we flush the bits */ 8005080332cSMichael Neuling flush_vsx_to_thread(current); 8015080332cSMichael Neuling enable_kernel_altivec(); 8025080332cSMichael Neuling 8035080332cSMichael Neuling /* 8045080332cSMichael Neuling * Is userspace running with a different endian (this is rare but 8055080332cSMichael Neuling * not impossible) 8065080332cSMichael Neuling */ 8075080332cSMichael Neuling swap = (msr & MSR_LE) != (MSR_KERNEL & MSR_LE); 8085080332cSMichael Neuling 8095080332cSMichael Neuling /* Decode the instruction */ 8105080332cSMichael Neuling ra = (instr >> 16) & 0x1f; 8115080332cSMichael Neuling rb = (instr >> 11) & 0x1f; 8125080332cSMichael Neuling t = (instr >> 21) & 0x1f; 8135080332cSMichael Neuling if (instr & 1) 8145080332cSMichael Neuling vdst = (u8 *)¤t->thread.vr_state.vr[t]; 8155080332cSMichael Neuling else 8165080332cSMichael Neuling vdst = (u8 *)¤t->thread.fp_state.fpr[t][0]; 8175080332cSMichael Neuling 8185080332cSMichael Neuling /* Grab the vector address */ 8195080332cSMichael Neuling ea = regs->gpr[rb] + (ra ? regs->gpr[ra] : 0); 8205080332cSMichael Neuling if (is_32bit_task()) 8215080332cSMichael Neuling ea &= 0xfffffffful; 8225080332cSMichael Neuling addr = (__force const void __user *)ea; 8235080332cSMichael Neuling 8245080332cSMichael Neuling /* Check it */ 8255080332cSMichael Neuling if (!access_ok(VERIFY_READ, addr, 16)) { 8265080332cSMichael Neuling pr_devel("HMI vec emu: bad access %i:%s[%d] nip=%016lx" 8275080332cSMichael Neuling " instr=%08x addr=%016lx\n", 8285080332cSMichael Neuling smp_processor_id(), current->comm, current->pid, 8295080332cSMichael Neuling regs->nip, instr, (unsigned long)addr); 8305080332cSMichael Neuling return; 8315080332cSMichael Neuling } 8325080332cSMichael Neuling 8335080332cSMichael Neuling /* Read the vector */ 8345080332cSMichael Neuling rc = 0; 8355080332cSMichael Neuling if ((unsigned long)addr & 0xfUL) 8365080332cSMichael Neuling /* unaligned case */ 8375080332cSMichael Neuling rc = __copy_from_user_inatomic(vbuf, addr, 16); 8385080332cSMichael Neuling else 8395080332cSMichael Neuling __get_user_atomic_128_aligned(vbuf, addr, rc); 8405080332cSMichael Neuling if (rc) { 8415080332cSMichael Neuling pr_devel("HMI vec emu: page fault %i:%s[%d] nip=%016lx" 8425080332cSMichael Neuling " instr=%08x addr=%016lx\n", 8435080332cSMichael Neuling smp_processor_id(), current->comm, current->pid, 8445080332cSMichael Neuling regs->nip, instr, (unsigned long)addr); 8455080332cSMichael Neuling return; 8465080332cSMichael Neuling } 8475080332cSMichael Neuling 8485080332cSMichael Neuling pr_devel("HMI vec emu: emulated vector CI %i:%s[%d] nip=%016lx" 8495080332cSMichael Neuling " instr=%08x addr=%016lx\n", 8505080332cSMichael Neuling smp_processor_id(), current->comm, current->pid, regs->nip, 8515080332cSMichael Neuling instr, (unsigned long) addr); 8525080332cSMichael Neuling 8535080332cSMichael Neuling /* Grab instruction "selector" */ 8545080332cSMichael Neuling sel = (instr >> 6) & 3; 8555080332cSMichael Neuling 8565080332cSMichael Neuling /* 8575080332cSMichael Neuling * Check to make sure the facility is actually enabled. This 8585080332cSMichael Neuling * could happen if we get a false positive hit. 8595080332cSMichael Neuling * 8605080332cSMichael Neuling * lxvd2x/lxvw4x always check MSR VSX sel = 0,2 8615080332cSMichael Neuling * lxvh8x/lxvb16x check MSR VSX or VEC depending on VSR used sel = 1,3 8625080332cSMichael Neuling */ 8635080332cSMichael Neuling msr_mask = MSR_VSX; 8645080332cSMichael Neuling if ((sel & 1) && (instr & 1)) /* lxvh8x & lxvb16x + VSR >= 32 */ 8655080332cSMichael Neuling msr_mask = MSR_VEC; 8665080332cSMichael Neuling if (!(msr & msr_mask)) { 8675080332cSMichael Neuling pr_devel("HMI vec emu: MSR fac clear %i:%s[%d] nip=%016lx" 8685080332cSMichael Neuling " instr=%08x msr:%016lx\n", 8695080332cSMichael Neuling smp_processor_id(), current->comm, current->pid, 8705080332cSMichael Neuling regs->nip, instr, msr); 8715080332cSMichael Neuling return; 8725080332cSMichael Neuling } 8735080332cSMichael Neuling 8745080332cSMichael Neuling /* Do logging here before we modify sel based on endian */ 8755080332cSMichael Neuling switch (sel) { 8765080332cSMichael Neuling case 0: /* lxvw4x */ 8775080332cSMichael Neuling PPC_WARN_EMULATED(lxvw4x, regs); 8785080332cSMichael Neuling break; 8795080332cSMichael Neuling case 1: /* lxvh8x */ 8805080332cSMichael Neuling PPC_WARN_EMULATED(lxvh8x, regs); 8815080332cSMichael Neuling break; 8825080332cSMichael Neuling case 2: /* lxvd2x */ 8835080332cSMichael Neuling PPC_WARN_EMULATED(lxvd2x, regs); 8845080332cSMichael Neuling break; 8855080332cSMichael Neuling case 3: /* lxvb16x */ 8865080332cSMichael Neuling PPC_WARN_EMULATED(lxvb16x, regs); 8875080332cSMichael Neuling break; 8885080332cSMichael Neuling } 8895080332cSMichael Neuling 8905080332cSMichael Neuling #ifdef __LITTLE_ENDIAN__ 8915080332cSMichael Neuling /* 8925080332cSMichael Neuling * An LE kernel stores the vector in the task struct as an LE 8935080332cSMichael Neuling * byte array (effectively swapping both the components and 8945080332cSMichael Neuling * the content of the components). Those instructions expect 8955080332cSMichael Neuling * the components to remain in ascending address order, so we 8965080332cSMichael Neuling * swap them back. 8975080332cSMichael Neuling * 8985080332cSMichael Neuling * If we are running a BE user space, the expectation is that 8995080332cSMichael Neuling * of a simple memcpy, so forcing the emulation to look like 9005080332cSMichael Neuling * a lxvb16x should do the trick. 9015080332cSMichael Neuling */ 9025080332cSMichael Neuling if (swap) 9035080332cSMichael Neuling sel = 3; 9045080332cSMichael Neuling 9055080332cSMichael Neuling switch (sel) { 9065080332cSMichael Neuling case 0: /* lxvw4x */ 9075080332cSMichael Neuling for (i = 0; i < 4; i++) 9085080332cSMichael Neuling ((u32 *)vdst)[i] = ((u32 *)vbuf)[3-i]; 9095080332cSMichael Neuling break; 9105080332cSMichael Neuling case 1: /* lxvh8x */ 9115080332cSMichael Neuling for (i = 0; i < 8; i++) 9125080332cSMichael Neuling ((u16 *)vdst)[i] = ((u16 *)vbuf)[7-i]; 9135080332cSMichael Neuling break; 9145080332cSMichael Neuling case 2: /* lxvd2x */ 9155080332cSMichael Neuling for (i = 0; i < 2; i++) 9165080332cSMichael Neuling ((u64 *)vdst)[i] = ((u64 *)vbuf)[1-i]; 9175080332cSMichael Neuling break; 9185080332cSMichael Neuling case 3: /* lxvb16x */ 9195080332cSMichael Neuling for (i = 0; i < 16; i++) 9205080332cSMichael Neuling vdst[i] = vbuf[15-i]; 9215080332cSMichael Neuling break; 9225080332cSMichael Neuling } 9235080332cSMichael Neuling #else /* __LITTLE_ENDIAN__ */ 9245080332cSMichael Neuling /* On a big endian kernel, a BE userspace only needs a memcpy */ 9255080332cSMichael Neuling if (!swap) 9265080332cSMichael Neuling sel = 3; 9275080332cSMichael Neuling 9285080332cSMichael Neuling /* Otherwise, we need to swap the content of the components */ 9295080332cSMichael Neuling switch (sel) { 9305080332cSMichael Neuling case 0: /* lxvw4x */ 9315080332cSMichael Neuling for (i = 0; i < 4; i++) 9325080332cSMichael Neuling ((u32 *)vdst)[i] = cpu_to_le32(((u32 *)vbuf)[i]); 9335080332cSMichael Neuling break; 9345080332cSMichael Neuling case 1: /* lxvh8x */ 9355080332cSMichael Neuling for (i = 0; i < 8; i++) 9365080332cSMichael Neuling ((u16 *)vdst)[i] = cpu_to_le16(((u16 *)vbuf)[i]); 9375080332cSMichael Neuling break; 9385080332cSMichael Neuling case 2: /* lxvd2x */ 9395080332cSMichael Neuling for (i = 0; i < 2; i++) 9405080332cSMichael Neuling ((u64 *)vdst)[i] = cpu_to_le64(((u64 *)vbuf)[i]); 9415080332cSMichael Neuling break; 9425080332cSMichael Neuling case 3: /* lxvb16x */ 9435080332cSMichael Neuling memcpy(vdst, vbuf, 16); 9445080332cSMichael Neuling break; 9455080332cSMichael Neuling } 9465080332cSMichael Neuling #endif /* !__LITTLE_ENDIAN__ */ 9475080332cSMichael Neuling 9485080332cSMichael Neuling /* Go to next instruction */ 9495080332cSMichael Neuling regs->nip += 4; 9505080332cSMichael Neuling } 9515080332cSMichael Neuling #endif /* CONFIG_VSX */ 9525080332cSMichael Neuling 9530869b6fdSMahesh Salgaonkar void handle_hmi_exception(struct pt_regs *regs) 9540869b6fdSMahesh Salgaonkar { 9550869b6fdSMahesh Salgaonkar struct pt_regs *old_regs; 9560869b6fdSMahesh Salgaonkar 9570869b6fdSMahesh Salgaonkar old_regs = set_irq_regs(regs); 9580869b6fdSMahesh Salgaonkar irq_enter(); 9590869b6fdSMahesh Salgaonkar 9605080332cSMichael Neuling #ifdef CONFIG_VSX 9615080332cSMichael Neuling /* Real mode flagged P9 special emu is needed */ 9625080332cSMichael Neuling if (local_paca->hmi_p9_special_emu) { 9635080332cSMichael Neuling local_paca->hmi_p9_special_emu = 0; 9645080332cSMichael Neuling 9655080332cSMichael Neuling /* 9665080332cSMichael Neuling * We don't want to take page faults while doing the 9675080332cSMichael Neuling * emulation, we just replay the instruction if necessary. 9685080332cSMichael Neuling */ 9695080332cSMichael Neuling pagefault_disable(); 9705080332cSMichael Neuling p9_hmi_special_emu(regs); 9715080332cSMichael Neuling pagefault_enable(); 9725080332cSMichael Neuling } 9735080332cSMichael Neuling #endif /* CONFIG_VSX */ 9745080332cSMichael Neuling 9750869b6fdSMahesh Salgaonkar if (ppc_md.handle_hmi_exception) 9760869b6fdSMahesh Salgaonkar ppc_md.handle_hmi_exception(regs); 9770869b6fdSMahesh Salgaonkar 9780869b6fdSMahesh Salgaonkar irq_exit(); 9790869b6fdSMahesh Salgaonkar set_irq_regs(old_regs); 9800869b6fdSMahesh Salgaonkar } 9810869b6fdSMahesh Salgaonkar 982dc1c1ca3SStephen Rothwell void unknown_exception(struct pt_regs *regs) 98314cf11afSPaul Mackerras { 984ba12eedeSLi Zhong enum ctx_state prev_state = exception_enter(); 985ba12eedeSLi Zhong 98614cf11afSPaul Mackerras printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n", 98714cf11afSPaul Mackerras regs->nip, regs->msr, regs->trap); 98814cf11afSPaul Mackerras 989e821fa42SEric W. Biederman _exception(SIGTRAP, regs, TRAP_UNK, 0); 990ba12eedeSLi Zhong 991ba12eedeSLi Zhong exception_exit(prev_state); 99214cf11afSPaul Mackerras } 99314cf11afSPaul Mackerras 994dc1c1ca3SStephen Rothwell void instruction_breakpoint_exception(struct pt_regs *regs) 99514cf11afSPaul Mackerras { 996ba12eedeSLi Zhong enum ctx_state prev_state = exception_enter(); 997ba12eedeSLi Zhong 99814cf11afSPaul Mackerras if (notify_die(DIE_IABR_MATCH, "iabr_match", regs, 5, 99914cf11afSPaul Mackerras 5, SIGTRAP) == NOTIFY_STOP) 1000ba12eedeSLi Zhong goto bail; 100114cf11afSPaul Mackerras if (debugger_iabr_match(regs)) 1002ba12eedeSLi Zhong goto bail; 100314cf11afSPaul Mackerras _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip); 1004ba12eedeSLi Zhong 1005ba12eedeSLi Zhong bail: 1006ba12eedeSLi Zhong exception_exit(prev_state); 100714cf11afSPaul Mackerras } 100814cf11afSPaul Mackerras 100914cf11afSPaul Mackerras void RunModeException(struct pt_regs *regs) 101014cf11afSPaul Mackerras { 1011e821fa42SEric W. Biederman _exception(SIGTRAP, regs, TRAP_UNK, 0); 101214cf11afSPaul Mackerras } 101314cf11afSPaul Mackerras 101403465f89SNicholas Piggin void single_step_exception(struct pt_regs *regs) 101514cf11afSPaul Mackerras { 1016ba12eedeSLi Zhong enum ctx_state prev_state = exception_enter(); 1017ba12eedeSLi Zhong 10182538c2d0SK.Prasad clear_single_step(regs); 10190e524e76SMatt Evans clear_br_trace(regs); 102014cf11afSPaul Mackerras 10216cc89badSNaveen N. Rao if (kprobe_post_handler(regs)) 10226cc89badSNaveen N. Rao return; 10236cc89badSNaveen N. Rao 102414cf11afSPaul Mackerras if (notify_die(DIE_SSTEP, "single_step", regs, 5, 102514cf11afSPaul Mackerras 5, SIGTRAP) == NOTIFY_STOP) 1026ba12eedeSLi Zhong goto bail; 102714cf11afSPaul Mackerras if (debugger_sstep(regs)) 1028ba12eedeSLi Zhong goto bail; 102914cf11afSPaul Mackerras 103014cf11afSPaul Mackerras _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip); 1031ba12eedeSLi Zhong 1032ba12eedeSLi Zhong bail: 1033ba12eedeSLi Zhong exception_exit(prev_state); 103414cf11afSPaul Mackerras } 103503465f89SNicholas Piggin NOKPROBE_SYMBOL(single_step_exception); 103614cf11afSPaul Mackerras 103714cf11afSPaul Mackerras /* 103814cf11afSPaul Mackerras * After we have successfully emulated an instruction, we have to 103914cf11afSPaul Mackerras * check if the instruction was being single-stepped, and if so, 104014cf11afSPaul Mackerras * pretend we got a single-step exception. This was pointed out 104114cf11afSPaul Mackerras * by Kumar Gala. -- paulus 104214cf11afSPaul Mackerras */ 10438dad3f92SPaul Mackerras static void emulate_single_step(struct pt_regs *regs) 104414cf11afSPaul Mackerras { 10452538c2d0SK.Prasad if (single_stepping(regs)) 10462538c2d0SK.Prasad single_step_exception(regs); 104714cf11afSPaul Mackerras } 104814cf11afSPaul Mackerras 10495fad293bSKumar Gala static inline int __parse_fpscr(unsigned long fpscr) 1050dc1c1ca3SStephen Rothwell { 1051aeb1c0f6SEric W. Biederman int ret = FPE_FLTUNK; 1052dc1c1ca3SStephen Rothwell 1053dc1c1ca3SStephen Rothwell /* Invalid operation */ 1054dc1c1ca3SStephen Rothwell if ((fpscr & FPSCR_VE) && (fpscr & FPSCR_VX)) 10555fad293bSKumar Gala ret = FPE_FLTINV; 1056dc1c1ca3SStephen Rothwell 1057dc1c1ca3SStephen Rothwell /* Overflow */ 1058dc1c1ca3SStephen Rothwell else if ((fpscr & FPSCR_OE) && (fpscr & FPSCR_OX)) 10595fad293bSKumar Gala ret = FPE_FLTOVF; 1060dc1c1ca3SStephen Rothwell 1061dc1c1ca3SStephen Rothwell /* Underflow */ 1062dc1c1ca3SStephen Rothwell else if ((fpscr & FPSCR_UE) && (fpscr & FPSCR_UX)) 10635fad293bSKumar Gala ret = FPE_FLTUND; 1064dc1c1ca3SStephen Rothwell 1065dc1c1ca3SStephen Rothwell /* Divide by zero */ 1066dc1c1ca3SStephen Rothwell else if ((fpscr & FPSCR_ZE) && (fpscr & FPSCR_ZX)) 10675fad293bSKumar Gala ret = FPE_FLTDIV; 1068dc1c1ca3SStephen Rothwell 1069dc1c1ca3SStephen Rothwell /* Inexact result */ 1070dc1c1ca3SStephen Rothwell else if ((fpscr & FPSCR_XE) && (fpscr & FPSCR_XX)) 10715fad293bSKumar Gala ret = FPE_FLTRES; 10725fad293bSKumar Gala 10735fad293bSKumar Gala return ret; 10745fad293bSKumar Gala } 10755fad293bSKumar Gala 10765fad293bSKumar Gala static void parse_fpe(struct pt_regs *regs) 10775fad293bSKumar Gala { 10785fad293bSKumar Gala int code = 0; 10795fad293bSKumar Gala 10805fad293bSKumar Gala flush_fp_to_thread(current); 10815fad293bSKumar Gala 1082de79f7b9SPaul Mackerras code = __parse_fpscr(current->thread.fp_state.fpscr); 1083dc1c1ca3SStephen Rothwell 1084dc1c1ca3SStephen Rothwell _exception(SIGFPE, regs, code, regs->nip); 1085dc1c1ca3SStephen Rothwell } 1086dc1c1ca3SStephen Rothwell 1087dc1c1ca3SStephen Rothwell /* 1088dc1c1ca3SStephen Rothwell * Illegal instruction emulation support. Originally written to 108914cf11afSPaul Mackerras * provide the PVR to user applications using the mfspr rd, PVR. 109014cf11afSPaul Mackerras * Return non-zero if we can't emulate, or -EFAULT if the associated 109114cf11afSPaul Mackerras * memory access caused an access fault. Return zero on success. 109214cf11afSPaul Mackerras * 109314cf11afSPaul Mackerras * There are a couple of ways to do this, either "decode" the instruction 109414cf11afSPaul Mackerras * or directly match lots of bits. In this case, matching lots of 109514cf11afSPaul Mackerras * bits is faster and easier. 109686417780SPaul Mackerras * 109714cf11afSPaul Mackerras */ 109814cf11afSPaul Mackerras static int emulate_string_inst(struct pt_regs *regs, u32 instword) 109914cf11afSPaul Mackerras { 110014cf11afSPaul Mackerras u8 rT = (instword >> 21) & 0x1f; 110114cf11afSPaul Mackerras u8 rA = (instword >> 16) & 0x1f; 110214cf11afSPaul Mackerras u8 NB_RB = (instword >> 11) & 0x1f; 110314cf11afSPaul Mackerras u32 num_bytes; 110414cf11afSPaul Mackerras unsigned long EA; 110514cf11afSPaul Mackerras int pos = 0; 110614cf11afSPaul Mackerras 110714cf11afSPaul Mackerras /* Early out if we are an invalid form of lswx */ 110816c57b36SKumar Gala if ((instword & PPC_INST_STRING_MASK) == PPC_INST_LSWX) 110914cf11afSPaul Mackerras if ((rT == rA) || (rT == NB_RB)) 111014cf11afSPaul Mackerras return -EINVAL; 111114cf11afSPaul Mackerras 111214cf11afSPaul Mackerras EA = (rA == 0) ? 0 : regs->gpr[rA]; 111314cf11afSPaul Mackerras 111416c57b36SKumar Gala switch (instword & PPC_INST_STRING_MASK) { 111516c57b36SKumar Gala case PPC_INST_LSWX: 111616c57b36SKumar Gala case PPC_INST_STSWX: 111714cf11afSPaul Mackerras EA += NB_RB; 111814cf11afSPaul Mackerras num_bytes = regs->xer & 0x7f; 111914cf11afSPaul Mackerras break; 112016c57b36SKumar Gala case PPC_INST_LSWI: 112116c57b36SKumar Gala case PPC_INST_STSWI: 112214cf11afSPaul Mackerras num_bytes = (NB_RB == 0) ? 32 : NB_RB; 112314cf11afSPaul Mackerras break; 112414cf11afSPaul Mackerras default: 112514cf11afSPaul Mackerras return -EINVAL; 112614cf11afSPaul Mackerras } 112714cf11afSPaul Mackerras 112814cf11afSPaul Mackerras while (num_bytes != 0) 112914cf11afSPaul Mackerras { 113014cf11afSPaul Mackerras u8 val; 113114cf11afSPaul Mackerras u32 shift = 8 * (3 - (pos & 0x3)); 113214cf11afSPaul Mackerras 113380aa0fb4SJames Yang /* if process is 32-bit, clear upper 32 bits of EA */ 113480aa0fb4SJames Yang if ((regs->msr & MSR_64BIT) == 0) 113580aa0fb4SJames Yang EA &= 0xFFFFFFFF; 113680aa0fb4SJames Yang 113716c57b36SKumar Gala switch ((instword & PPC_INST_STRING_MASK)) { 113816c57b36SKumar Gala case PPC_INST_LSWX: 113916c57b36SKumar Gala case PPC_INST_LSWI: 114014cf11afSPaul Mackerras if (get_user(val, (u8 __user *)EA)) 114114cf11afSPaul Mackerras return -EFAULT; 114214cf11afSPaul Mackerras /* first time updating this reg, 114314cf11afSPaul Mackerras * zero it out */ 114414cf11afSPaul Mackerras if (pos == 0) 114514cf11afSPaul Mackerras regs->gpr[rT] = 0; 114614cf11afSPaul Mackerras regs->gpr[rT] |= val << shift; 114714cf11afSPaul Mackerras break; 114816c57b36SKumar Gala case PPC_INST_STSWI: 114916c57b36SKumar Gala case PPC_INST_STSWX: 115014cf11afSPaul Mackerras val = regs->gpr[rT] >> shift; 115114cf11afSPaul Mackerras if (put_user(val, (u8 __user *)EA)) 115214cf11afSPaul Mackerras return -EFAULT; 115314cf11afSPaul Mackerras break; 115414cf11afSPaul Mackerras } 115514cf11afSPaul Mackerras /* move EA to next address */ 115614cf11afSPaul Mackerras EA += 1; 115714cf11afSPaul Mackerras num_bytes--; 115814cf11afSPaul Mackerras 115914cf11afSPaul Mackerras /* manage our position within the register */ 116014cf11afSPaul Mackerras if (++pos == 4) { 116114cf11afSPaul Mackerras pos = 0; 116214cf11afSPaul Mackerras if (++rT == 32) 116314cf11afSPaul Mackerras rT = 0; 116414cf11afSPaul Mackerras } 116514cf11afSPaul Mackerras } 116614cf11afSPaul Mackerras 116714cf11afSPaul Mackerras return 0; 116814cf11afSPaul Mackerras } 116914cf11afSPaul Mackerras 1170c3412dcbSWill Schmidt static int emulate_popcntb_inst(struct pt_regs *regs, u32 instword) 1171c3412dcbSWill Schmidt { 1172c3412dcbSWill Schmidt u32 ra,rs; 1173c3412dcbSWill Schmidt unsigned long tmp; 1174c3412dcbSWill Schmidt 1175c3412dcbSWill Schmidt ra = (instword >> 16) & 0x1f; 1176c3412dcbSWill Schmidt rs = (instword >> 21) & 0x1f; 1177c3412dcbSWill Schmidt 1178c3412dcbSWill Schmidt tmp = regs->gpr[rs]; 1179c3412dcbSWill Schmidt tmp = tmp - ((tmp >> 1) & 0x5555555555555555ULL); 1180c3412dcbSWill Schmidt tmp = (tmp & 0x3333333333333333ULL) + ((tmp >> 2) & 0x3333333333333333ULL); 1181c3412dcbSWill Schmidt tmp = (tmp + (tmp >> 4)) & 0x0f0f0f0f0f0f0f0fULL; 1182c3412dcbSWill Schmidt regs->gpr[ra] = tmp; 1183c3412dcbSWill Schmidt 1184c3412dcbSWill Schmidt return 0; 1185c3412dcbSWill Schmidt } 1186c3412dcbSWill Schmidt 1187c1469f13SKumar Gala static int emulate_isel(struct pt_regs *regs, u32 instword) 1188c1469f13SKumar Gala { 1189c1469f13SKumar Gala u8 rT = (instword >> 21) & 0x1f; 1190c1469f13SKumar Gala u8 rA = (instword >> 16) & 0x1f; 1191c1469f13SKumar Gala u8 rB = (instword >> 11) & 0x1f; 1192c1469f13SKumar Gala u8 BC = (instword >> 6) & 0x1f; 1193c1469f13SKumar Gala u8 bit; 1194c1469f13SKumar Gala unsigned long tmp; 1195c1469f13SKumar Gala 1196c1469f13SKumar Gala tmp = (rA == 0) ? 0 : regs->gpr[rA]; 1197c1469f13SKumar Gala bit = (regs->ccr >> (31 - BC)) & 0x1; 1198c1469f13SKumar Gala 1199c1469f13SKumar Gala regs->gpr[rT] = bit ? tmp : regs->gpr[rB]; 1200c1469f13SKumar Gala 1201c1469f13SKumar Gala return 0; 1202c1469f13SKumar Gala } 1203c1469f13SKumar Gala 12046ce6c629SMichael Neuling #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 12056ce6c629SMichael Neuling static inline bool tm_abort_check(struct pt_regs *regs, int cause) 12066ce6c629SMichael Neuling { 12076ce6c629SMichael Neuling /* If we're emulating a load/store in an active transaction, we cannot 12086ce6c629SMichael Neuling * emulate it as the kernel operates in transaction suspended context. 12096ce6c629SMichael Neuling * We need to abort the transaction. This creates a persistent TM 12106ce6c629SMichael Neuling * abort so tell the user what caused it with a new code. 12116ce6c629SMichael Neuling */ 12126ce6c629SMichael Neuling if (MSR_TM_TRANSACTIONAL(regs->msr)) { 12136ce6c629SMichael Neuling tm_enable(); 12146ce6c629SMichael Neuling tm_abort(cause); 12156ce6c629SMichael Neuling return true; 12166ce6c629SMichael Neuling } 12176ce6c629SMichael Neuling return false; 12186ce6c629SMichael Neuling } 12196ce6c629SMichael Neuling #else 12206ce6c629SMichael Neuling static inline bool tm_abort_check(struct pt_regs *regs, int reason) 12216ce6c629SMichael Neuling { 12226ce6c629SMichael Neuling return false; 12236ce6c629SMichael Neuling } 12246ce6c629SMichael Neuling #endif 12256ce6c629SMichael Neuling 122614cf11afSPaul Mackerras static int emulate_instruction(struct pt_regs *regs) 122714cf11afSPaul Mackerras { 122814cf11afSPaul Mackerras u32 instword; 122914cf11afSPaul Mackerras u32 rd; 123014cf11afSPaul Mackerras 12314288e343SAnton Blanchard if (!user_mode(regs)) 123214cf11afSPaul Mackerras return -EINVAL; 123314cf11afSPaul Mackerras CHECK_FULL_REGS(regs); 123414cf11afSPaul Mackerras 123514cf11afSPaul Mackerras if (get_user(instword, (u32 __user *)(regs->nip))) 123614cf11afSPaul Mackerras return -EFAULT; 123714cf11afSPaul Mackerras 123814cf11afSPaul Mackerras /* Emulate the mfspr rD, PVR. */ 123916c57b36SKumar Gala if ((instword & PPC_INST_MFSPR_PVR_MASK) == PPC_INST_MFSPR_PVR) { 1240eecff81dSAnton Blanchard PPC_WARN_EMULATED(mfpvr, regs); 124114cf11afSPaul Mackerras rd = (instword >> 21) & 0x1f; 124214cf11afSPaul Mackerras regs->gpr[rd] = mfspr(SPRN_PVR); 124314cf11afSPaul Mackerras return 0; 124414cf11afSPaul Mackerras } 124514cf11afSPaul Mackerras 124614cf11afSPaul Mackerras /* Emulating the dcba insn is just a no-op. */ 124780947e7cSGeert Uytterhoeven if ((instword & PPC_INST_DCBA_MASK) == PPC_INST_DCBA) { 1248eecff81dSAnton Blanchard PPC_WARN_EMULATED(dcba, regs); 124914cf11afSPaul Mackerras return 0; 125080947e7cSGeert Uytterhoeven } 125114cf11afSPaul Mackerras 125214cf11afSPaul Mackerras /* Emulate the mcrxr insn. */ 125316c57b36SKumar Gala if ((instword & PPC_INST_MCRXR_MASK) == PPC_INST_MCRXR) { 125486417780SPaul Mackerras int shift = (instword >> 21) & 0x1c; 125514cf11afSPaul Mackerras unsigned long msk = 0xf0000000UL >> shift; 125614cf11afSPaul Mackerras 1257eecff81dSAnton Blanchard PPC_WARN_EMULATED(mcrxr, regs); 125814cf11afSPaul Mackerras regs->ccr = (regs->ccr & ~msk) | ((regs->xer >> shift) & msk); 125914cf11afSPaul Mackerras regs->xer &= ~0xf0000000UL; 126014cf11afSPaul Mackerras return 0; 126114cf11afSPaul Mackerras } 126214cf11afSPaul Mackerras 126314cf11afSPaul Mackerras /* Emulate load/store string insn. */ 126480947e7cSGeert Uytterhoeven if ((instword & PPC_INST_STRING_GEN_MASK) == PPC_INST_STRING) { 12656ce6c629SMichael Neuling if (tm_abort_check(regs, 12666ce6c629SMichael Neuling TM_CAUSE_EMULATE | TM_CAUSE_PERSISTENT)) 12676ce6c629SMichael Neuling return -EINVAL; 1268eecff81dSAnton Blanchard PPC_WARN_EMULATED(string, regs); 126914cf11afSPaul Mackerras return emulate_string_inst(regs, instword); 127080947e7cSGeert Uytterhoeven } 127114cf11afSPaul Mackerras 1272c3412dcbSWill Schmidt /* Emulate the popcntb (Population Count Bytes) instruction. */ 127316c57b36SKumar Gala if ((instword & PPC_INST_POPCNTB_MASK) == PPC_INST_POPCNTB) { 1274eecff81dSAnton Blanchard PPC_WARN_EMULATED(popcntb, regs); 1275c3412dcbSWill Schmidt return emulate_popcntb_inst(regs, instword); 1276c3412dcbSWill Schmidt } 1277c3412dcbSWill Schmidt 1278c1469f13SKumar Gala /* Emulate isel (Integer Select) instruction */ 127916c57b36SKumar Gala if ((instword & PPC_INST_ISEL_MASK) == PPC_INST_ISEL) { 1280eecff81dSAnton Blanchard PPC_WARN_EMULATED(isel, regs); 1281c1469f13SKumar Gala return emulate_isel(regs, instword); 1282c1469f13SKumar Gala } 1283c1469f13SKumar Gala 12849863c28aSJames Yang /* Emulate sync instruction variants */ 12859863c28aSJames Yang if ((instword & PPC_INST_SYNC_MASK) == PPC_INST_SYNC) { 12869863c28aSJames Yang PPC_WARN_EMULATED(sync, regs); 12879863c28aSJames Yang asm volatile("sync"); 12889863c28aSJames Yang return 0; 12899863c28aSJames Yang } 12909863c28aSJames Yang 1291efcac658SAlexey Kardashevskiy #ifdef CONFIG_PPC64 1292efcac658SAlexey Kardashevskiy /* Emulate the mfspr rD, DSCR. */ 129373d2fb75SAnton Blanchard if ((((instword & PPC_INST_MFSPR_DSCR_USER_MASK) == 129473d2fb75SAnton Blanchard PPC_INST_MFSPR_DSCR_USER) || 129573d2fb75SAnton Blanchard ((instword & PPC_INST_MFSPR_DSCR_MASK) == 129673d2fb75SAnton Blanchard PPC_INST_MFSPR_DSCR)) && 1297efcac658SAlexey Kardashevskiy cpu_has_feature(CPU_FTR_DSCR)) { 1298efcac658SAlexey Kardashevskiy PPC_WARN_EMULATED(mfdscr, regs); 1299efcac658SAlexey Kardashevskiy rd = (instword >> 21) & 0x1f; 1300efcac658SAlexey Kardashevskiy regs->gpr[rd] = mfspr(SPRN_DSCR); 1301efcac658SAlexey Kardashevskiy return 0; 1302efcac658SAlexey Kardashevskiy } 1303efcac658SAlexey Kardashevskiy /* Emulate the mtspr DSCR, rD. */ 130473d2fb75SAnton Blanchard if ((((instword & PPC_INST_MTSPR_DSCR_USER_MASK) == 130573d2fb75SAnton Blanchard PPC_INST_MTSPR_DSCR_USER) || 130673d2fb75SAnton Blanchard ((instword & PPC_INST_MTSPR_DSCR_MASK) == 130773d2fb75SAnton Blanchard PPC_INST_MTSPR_DSCR)) && 1308efcac658SAlexey Kardashevskiy cpu_has_feature(CPU_FTR_DSCR)) { 1309efcac658SAlexey Kardashevskiy PPC_WARN_EMULATED(mtdscr, regs); 1310efcac658SAlexey Kardashevskiy rd = (instword >> 21) & 0x1f; 131100ca0de0SAnton Blanchard current->thread.dscr = regs->gpr[rd]; 1312efcac658SAlexey Kardashevskiy current->thread.dscr_inherit = 1; 131300ca0de0SAnton Blanchard mtspr(SPRN_DSCR, current->thread.dscr); 1314efcac658SAlexey Kardashevskiy return 0; 1315efcac658SAlexey Kardashevskiy } 1316efcac658SAlexey Kardashevskiy #endif 1317efcac658SAlexey Kardashevskiy 131814cf11afSPaul Mackerras return -EINVAL; 131914cf11afSPaul Mackerras } 132014cf11afSPaul Mackerras 132173c9ceabSJeremy Fitzhardinge int is_valid_bugaddr(unsigned long addr) 132214cf11afSPaul Mackerras { 132373c9ceabSJeremy Fitzhardinge return is_kernel_addr(addr); 132414cf11afSPaul Mackerras } 132514cf11afSPaul Mackerras 13263a3b5aa6SKevin Hao #ifdef CONFIG_MATH_EMULATION 13273a3b5aa6SKevin Hao static int emulate_math(struct pt_regs *regs) 13283a3b5aa6SKevin Hao { 13293a3b5aa6SKevin Hao int ret; 13303a3b5aa6SKevin Hao extern int do_mathemu(struct pt_regs *regs); 13313a3b5aa6SKevin Hao 13323a3b5aa6SKevin Hao ret = do_mathemu(regs); 13333a3b5aa6SKevin Hao if (ret >= 0) 13343a3b5aa6SKevin Hao PPC_WARN_EMULATED(math, regs); 13353a3b5aa6SKevin Hao 13363a3b5aa6SKevin Hao switch (ret) { 13373a3b5aa6SKevin Hao case 0: 13383a3b5aa6SKevin Hao emulate_single_step(regs); 13393a3b5aa6SKevin Hao return 0; 13403a3b5aa6SKevin Hao case 1: { 13413a3b5aa6SKevin Hao int code = 0; 1342de79f7b9SPaul Mackerras code = __parse_fpscr(current->thread.fp_state.fpscr); 13433a3b5aa6SKevin Hao _exception(SIGFPE, regs, code, regs->nip); 13443a3b5aa6SKevin Hao return 0; 13453a3b5aa6SKevin Hao } 13463a3b5aa6SKevin Hao case -EFAULT: 13473a3b5aa6SKevin Hao _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip); 13483a3b5aa6SKevin Hao return 0; 13493a3b5aa6SKevin Hao } 13503a3b5aa6SKevin Hao 13513a3b5aa6SKevin Hao return -1; 13523a3b5aa6SKevin Hao } 13533a3b5aa6SKevin Hao #else 13543a3b5aa6SKevin Hao static inline int emulate_math(struct pt_regs *regs) { return -1; } 13553a3b5aa6SKevin Hao #endif 13563a3b5aa6SKevin Hao 135703465f89SNicholas Piggin void program_check_exception(struct pt_regs *regs) 135814cf11afSPaul Mackerras { 1359ba12eedeSLi Zhong enum ctx_state prev_state = exception_enter(); 136014cf11afSPaul Mackerras unsigned int reason = get_reason(regs); 136114cf11afSPaul Mackerras 1362aa42c69cSKim Phillips /* We can now get here via a FP Unavailable exception if the core 136304903a30SKumar Gala * has no FPU, in that case the reason flags will be 0 */ 136414cf11afSPaul Mackerras 136514cf11afSPaul Mackerras if (reason & REASON_FP) { 136614cf11afSPaul Mackerras /* IEEE FP exception */ 1367dc1c1ca3SStephen Rothwell parse_fpe(regs); 1368ba12eedeSLi Zhong goto bail; 13698dad3f92SPaul Mackerras } 13708dad3f92SPaul Mackerras if (reason & REASON_TRAP) { 1371a4c3f909SBalbir Singh unsigned long bugaddr; 1372ba797b28SJason Wessel /* Debugger is first in line to stop recursive faults in 1373ba797b28SJason Wessel * rcu_lock, notify_die, or atomic_notifier_call_chain */ 1374ba797b28SJason Wessel if (debugger_bpt(regs)) 1375ba12eedeSLi Zhong goto bail; 1376ba797b28SJason Wessel 13776cc89badSNaveen N. Rao if (kprobe_handler(regs)) 13786cc89badSNaveen N. Rao goto bail; 13796cc89badSNaveen N. Rao 138014cf11afSPaul Mackerras /* trap exception */ 1381dc1c1ca3SStephen Rothwell if (notify_die(DIE_BPT, "breakpoint", regs, 5, 5, SIGTRAP) 1382dc1c1ca3SStephen Rothwell == NOTIFY_STOP) 1383ba12eedeSLi Zhong goto bail; 138473c9ceabSJeremy Fitzhardinge 1385a4c3f909SBalbir Singh bugaddr = regs->nip; 1386a4c3f909SBalbir Singh /* 1387a4c3f909SBalbir Singh * Fixup bugaddr for BUG_ON() in real mode 1388a4c3f909SBalbir Singh */ 1389a4c3f909SBalbir Singh if (!is_kernel_addr(bugaddr) && !(regs->msr & MSR_IR)) 1390a4c3f909SBalbir Singh bugaddr += PAGE_OFFSET; 1391a4c3f909SBalbir Singh 139273c9ceabSJeremy Fitzhardinge if (!(regs->msr & MSR_PR) && /* not user-mode */ 1393a4c3f909SBalbir Singh report_bug(bugaddr, regs) == BUG_TRAP_TYPE_WARN) { 139414cf11afSPaul Mackerras regs->nip += 4; 1395ba12eedeSLi Zhong goto bail; 139614cf11afSPaul Mackerras } 13978dad3f92SPaul Mackerras _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip); 1398ba12eedeSLi Zhong goto bail; 13998dad3f92SPaul Mackerras } 1400bc2a9408SMichael Neuling #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1401bc2a9408SMichael Neuling if (reason & REASON_TM) { 1402bc2a9408SMichael Neuling /* This is a TM "Bad Thing Exception" program check. 1403bc2a9408SMichael Neuling * This occurs when: 1404bc2a9408SMichael Neuling * - An rfid/hrfid/mtmsrd attempts to cause an illegal 1405bc2a9408SMichael Neuling * transition in TM states. 1406bc2a9408SMichael Neuling * - A trechkpt is attempted when transactional. 1407bc2a9408SMichael Neuling * - A treclaim is attempted when non transactional. 1408bc2a9408SMichael Neuling * - A tend is illegally attempted. 1409bc2a9408SMichael Neuling * - writing a TM SPR when transactional. 1410632f0574SMichael Ellerman * 1411632f0574SMichael Ellerman * If usermode caused this, it's done something illegal and 1412bc2a9408SMichael Neuling * gets a SIGILL slap on the wrist. We call it an illegal 1413bc2a9408SMichael Neuling * operand to distinguish from the instruction just being bad 1414bc2a9408SMichael Neuling * (e.g. executing a 'tend' on a CPU without TM!); it's an 1415bc2a9408SMichael Neuling * illegal /placement/ of a valid instruction. 1416bc2a9408SMichael Neuling */ 1417bc2a9408SMichael Neuling if (user_mode(regs)) { 1418bc2a9408SMichael Neuling _exception(SIGILL, regs, ILL_ILLOPN, regs->nip); 1419ba12eedeSLi Zhong goto bail; 1420bc2a9408SMichael Neuling } else { 1421bc2a9408SMichael Neuling printk(KERN_EMERG "Unexpected TM Bad Thing exception " 1422bc2a9408SMichael Neuling "at %lx (msr 0x%x)\n", regs->nip, reason); 1423bc2a9408SMichael Neuling die("Unrecoverable exception", regs, SIGABRT); 1424bc2a9408SMichael Neuling } 1425bc2a9408SMichael Neuling } 1426bc2a9408SMichael Neuling #endif 14278dad3f92SPaul Mackerras 1428b3f6a459SMichael Ellerman /* 1429b3f6a459SMichael Ellerman * If we took the program check in the kernel skip down to sending a 1430b3f6a459SMichael Ellerman * SIGILL. The subsequent cases all relate to emulating instructions 1431b3f6a459SMichael Ellerman * which we should only do for userspace. We also do not want to enable 1432b3f6a459SMichael Ellerman * interrupts for kernel faults because that might lead to further 1433b3f6a459SMichael Ellerman * faults, and loose the context of the original exception. 1434b3f6a459SMichael Ellerman */ 1435b3f6a459SMichael Ellerman if (!user_mode(regs)) 1436b3f6a459SMichael Ellerman goto sigill; 1437b3f6a459SMichael Ellerman 1438a3512b2dSBenjamin Herrenschmidt /* We restore the interrupt state now */ 1439a3512b2dSBenjamin Herrenschmidt if (!arch_irq_disabled_regs(regs)) 1440cd8a5673SPaul Mackerras local_irq_enable(); 1441cd8a5673SPaul Mackerras 144204903a30SKumar Gala /* (reason & REASON_ILLEGAL) would be the obvious thing here, 144304903a30SKumar Gala * but there seems to be a hardware bug on the 405GP (RevD) 144404903a30SKumar Gala * that means ESR is sometimes set incorrectly - either to 144504903a30SKumar Gala * ESR_DST (!?) or 0. In the process of chasing this with the 144604903a30SKumar Gala * hardware people - not sure if it can happen on any illegal 144704903a30SKumar Gala * instruction or only on FP instructions, whether there is a 14484e63f8edSBenjamin Herrenschmidt * pattern to occurrences etc. -dgibson 31/Mar/2003 14494e63f8edSBenjamin Herrenschmidt */ 14503a3b5aa6SKevin Hao if (!emulate_math(regs)) 1451ba12eedeSLi Zhong goto bail; 145204903a30SKumar Gala 14538dad3f92SPaul Mackerras /* Try to emulate it if we should. */ 14548dad3f92SPaul Mackerras if (reason & (REASON_ILLEGAL | REASON_PRIVILEGED)) { 145514cf11afSPaul Mackerras switch (emulate_instruction(regs)) { 145614cf11afSPaul Mackerras case 0: 145714cf11afSPaul Mackerras regs->nip += 4; 145814cf11afSPaul Mackerras emulate_single_step(regs); 1459ba12eedeSLi Zhong goto bail; 146014cf11afSPaul Mackerras case -EFAULT: 146114cf11afSPaul Mackerras _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip); 1462ba12eedeSLi Zhong goto bail; 14638dad3f92SPaul Mackerras } 14648dad3f92SPaul Mackerras } 14658dad3f92SPaul Mackerras 1466b3f6a459SMichael Ellerman sigill: 146714cf11afSPaul Mackerras if (reason & REASON_PRIVILEGED) 146814cf11afSPaul Mackerras _exception(SIGILL, regs, ILL_PRVOPC, regs->nip); 146914cf11afSPaul Mackerras else 147014cf11afSPaul Mackerras _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 1471ba12eedeSLi Zhong 1472ba12eedeSLi Zhong bail: 1473ba12eedeSLi Zhong exception_exit(prev_state); 147414cf11afSPaul Mackerras } 147503465f89SNicholas Piggin NOKPROBE_SYMBOL(program_check_exception); 147614cf11afSPaul Mackerras 1477bf593907SPaul Mackerras /* 1478bf593907SPaul Mackerras * This occurs when running in hypervisor mode on POWER6 or later 1479bf593907SPaul Mackerras * and an illegal instruction is encountered. 1480bf593907SPaul Mackerras */ 148103465f89SNicholas Piggin void emulation_assist_interrupt(struct pt_regs *regs) 1482bf593907SPaul Mackerras { 1483bf593907SPaul Mackerras regs->msr |= REASON_ILLEGAL; 1484bf593907SPaul Mackerras program_check_exception(regs); 1485bf593907SPaul Mackerras } 148603465f89SNicholas Piggin NOKPROBE_SYMBOL(emulation_assist_interrupt); 1487bf593907SPaul Mackerras 1488dc1c1ca3SStephen Rothwell void alignment_exception(struct pt_regs *regs) 148914cf11afSPaul Mackerras { 1490ba12eedeSLi Zhong enum ctx_state prev_state = exception_enter(); 14914393c4f6SBenjamin Herrenschmidt int sig, code, fixed = 0; 149214cf11afSPaul Mackerras 1493a3512b2dSBenjamin Herrenschmidt /* We restore the interrupt state now */ 1494a3512b2dSBenjamin Herrenschmidt if (!arch_irq_disabled_regs(regs)) 1495a3512b2dSBenjamin Herrenschmidt local_irq_enable(); 1496a3512b2dSBenjamin Herrenschmidt 14976ce6c629SMichael Neuling if (tm_abort_check(regs, TM_CAUSE_ALIGNMENT | TM_CAUSE_PERSISTENT)) 14986ce6c629SMichael Neuling goto bail; 14996ce6c629SMichael Neuling 1500e9370ae1SPaul Mackerras /* we don't implement logging of alignment exceptions */ 1501e9370ae1SPaul Mackerras if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS)) 150214cf11afSPaul Mackerras fixed = fix_alignment(regs); 150314cf11afSPaul Mackerras 150414cf11afSPaul Mackerras if (fixed == 1) { 150514cf11afSPaul Mackerras regs->nip += 4; /* skip over emulated instruction */ 150614cf11afSPaul Mackerras emulate_single_step(regs); 1507ba12eedeSLi Zhong goto bail; 150814cf11afSPaul Mackerras } 150914cf11afSPaul Mackerras 151014cf11afSPaul Mackerras /* Operand address was bad */ 151114cf11afSPaul Mackerras if (fixed == -EFAULT) { 15124393c4f6SBenjamin Herrenschmidt sig = SIGSEGV; 15134393c4f6SBenjamin Herrenschmidt code = SEGV_ACCERR; 15144393c4f6SBenjamin Herrenschmidt } else { 15154393c4f6SBenjamin Herrenschmidt sig = SIGBUS; 15164393c4f6SBenjamin Herrenschmidt code = BUS_ADRALN; 151714cf11afSPaul Mackerras } 15184393c4f6SBenjamin Herrenschmidt if (user_mode(regs)) 15194393c4f6SBenjamin Herrenschmidt _exception(sig, regs, code, regs->dar); 15204393c4f6SBenjamin Herrenschmidt else 15214393c4f6SBenjamin Herrenschmidt bad_page_fault(regs, regs->dar, sig); 1522ba12eedeSLi Zhong 1523ba12eedeSLi Zhong bail: 1524ba12eedeSLi Zhong exception_exit(prev_state); 152514cf11afSPaul Mackerras } 152614cf11afSPaul Mackerras 152714cf11afSPaul Mackerras void StackOverflow(struct pt_regs *regs) 152814cf11afSPaul Mackerras { 152914cf11afSPaul Mackerras printk(KERN_CRIT "Kernel stack overflow in process %p, r1=%lx\n", 153014cf11afSPaul Mackerras current, regs->gpr[1]); 153114cf11afSPaul Mackerras debugger(regs); 153214cf11afSPaul Mackerras show_regs(regs); 153314cf11afSPaul Mackerras panic("kernel stack overflow"); 153414cf11afSPaul Mackerras } 153514cf11afSPaul Mackerras 153614cf11afSPaul Mackerras void nonrecoverable_exception(struct pt_regs *regs) 153714cf11afSPaul Mackerras { 153814cf11afSPaul Mackerras printk(KERN_ERR "Non-recoverable exception at PC=%lx MSR=%lx\n", 153914cf11afSPaul Mackerras regs->nip, regs->msr); 154014cf11afSPaul Mackerras debugger(regs); 154114cf11afSPaul Mackerras die("nonrecoverable exception", regs, SIGKILL); 154214cf11afSPaul Mackerras } 154314cf11afSPaul Mackerras 1544dc1c1ca3SStephen Rothwell void kernel_fp_unavailable_exception(struct pt_regs *regs) 1545dc1c1ca3SStephen Rothwell { 1546ba12eedeSLi Zhong enum ctx_state prev_state = exception_enter(); 1547ba12eedeSLi Zhong 1548dc1c1ca3SStephen Rothwell printk(KERN_EMERG "Unrecoverable FP Unavailable Exception " 1549dc1c1ca3SStephen Rothwell "%lx at %lx\n", regs->trap, regs->nip); 1550dc1c1ca3SStephen Rothwell die("Unrecoverable FP Unavailable Exception", regs, SIGABRT); 1551ba12eedeSLi Zhong 1552ba12eedeSLi Zhong exception_exit(prev_state); 1553dc1c1ca3SStephen Rothwell } 1554dc1c1ca3SStephen Rothwell 1555dc1c1ca3SStephen Rothwell void altivec_unavailable_exception(struct pt_regs *regs) 1556dc1c1ca3SStephen Rothwell { 1557ba12eedeSLi Zhong enum ctx_state prev_state = exception_enter(); 1558ba12eedeSLi Zhong 1559dc1c1ca3SStephen Rothwell if (user_mode(regs)) { 1560dc1c1ca3SStephen Rothwell /* A user program has executed an altivec instruction, 1561dc1c1ca3SStephen Rothwell but this kernel doesn't support altivec. */ 1562dc1c1ca3SStephen Rothwell _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 1563ba12eedeSLi Zhong goto bail; 1564dc1c1ca3SStephen Rothwell } 15656c4841c2SAnton Blanchard 1566dc1c1ca3SStephen Rothwell printk(KERN_EMERG "Unrecoverable VMX/Altivec Unavailable Exception " 1567dc1c1ca3SStephen Rothwell "%lx at %lx\n", regs->trap, regs->nip); 1568dc1c1ca3SStephen Rothwell die("Unrecoverable VMX/Altivec Unavailable Exception", regs, SIGABRT); 1569ba12eedeSLi Zhong 1570ba12eedeSLi Zhong bail: 1571ba12eedeSLi Zhong exception_exit(prev_state); 1572dc1c1ca3SStephen Rothwell } 1573dc1c1ca3SStephen Rothwell 1574ce48b210SMichael Neuling void vsx_unavailable_exception(struct pt_regs *regs) 1575ce48b210SMichael Neuling { 1576ce48b210SMichael Neuling if (user_mode(regs)) { 1577ce48b210SMichael Neuling /* A user program has executed an vsx instruction, 1578ce48b210SMichael Neuling but this kernel doesn't support vsx. */ 1579ce48b210SMichael Neuling _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 1580ce48b210SMichael Neuling return; 1581ce48b210SMichael Neuling } 1582ce48b210SMichael Neuling 1583ce48b210SMichael Neuling printk(KERN_EMERG "Unrecoverable VSX Unavailable Exception " 1584ce48b210SMichael Neuling "%lx at %lx\n", regs->trap, regs->nip); 1585ce48b210SMichael Neuling die("Unrecoverable VSX Unavailable Exception", regs, SIGABRT); 1586ce48b210SMichael Neuling } 1587ce48b210SMichael Neuling 15882517617eSMichael Neuling #ifdef CONFIG_PPC64 1589172f7aaaSCyril Bur static void tm_unavailable(struct pt_regs *regs) 1590172f7aaaSCyril Bur { 15915d176f75SCyril Bur #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 15925d176f75SCyril Bur if (user_mode(regs)) { 15935d176f75SCyril Bur current->thread.load_tm++; 15945d176f75SCyril Bur regs->msr |= MSR_TM; 15955d176f75SCyril Bur tm_enable(); 15965d176f75SCyril Bur tm_restore_sprs(¤t->thread); 15975d176f75SCyril Bur return; 15985d176f75SCyril Bur } 15995d176f75SCyril Bur #endif 1600172f7aaaSCyril Bur pr_emerg("Unrecoverable TM Unavailable Exception " 1601172f7aaaSCyril Bur "%lx at %lx\n", regs->trap, regs->nip); 1602172f7aaaSCyril Bur die("Unrecoverable TM Unavailable Exception", regs, SIGABRT); 1603172f7aaaSCyril Bur } 1604172f7aaaSCyril Bur 1605021424a1SMichael Ellerman void facility_unavailable_exception(struct pt_regs *regs) 1606d0c0c9a1SMichael Neuling { 1607021424a1SMichael Ellerman static char *facility_strings[] = { 16082517617eSMichael Neuling [FSCR_FP_LG] = "FPU", 16092517617eSMichael Neuling [FSCR_VECVSX_LG] = "VMX/VSX", 16102517617eSMichael Neuling [FSCR_DSCR_LG] = "DSCR", 16112517617eSMichael Neuling [FSCR_PM_LG] = "PMU SPRs", 16122517617eSMichael Neuling [FSCR_BHRB_LG] = "BHRB", 16132517617eSMichael Neuling [FSCR_TM_LG] = "TM", 16142517617eSMichael Neuling [FSCR_EBB_LG] = "EBB", 16152517617eSMichael Neuling [FSCR_TAR_LG] = "TAR", 1616794464f4SNicholas Piggin [FSCR_MSGP_LG] = "MSGP", 16179b7ff0c6SNicholas Piggin [FSCR_SCV_LG] = "SCV", 1618021424a1SMichael Ellerman }; 16192517617eSMichael Neuling char *facility = "unknown"; 1620021424a1SMichael Ellerman u64 value; 1621c952c1c4SAnshuman Khandual u32 instword, rd; 16222517617eSMichael Neuling u8 status; 16232517617eSMichael Neuling bool hv; 1624021424a1SMichael Ellerman 16252271db20SBenjamin Herrenschmidt hv = (TRAP(regs) == 0xf80); 16262517617eSMichael Neuling if (hv) 1627b14b6260SMichael Ellerman value = mfspr(SPRN_HFSCR); 16282517617eSMichael Neuling else 16292517617eSMichael Neuling value = mfspr(SPRN_FSCR); 16302517617eSMichael Neuling 16312517617eSMichael Neuling status = value >> 56; 1632709b973cSAnshuman Khandual if ((hv || status >= 2) && 1633709b973cSAnshuman Khandual (status < ARRAY_SIZE(facility_strings)) && 1634709b973cSAnshuman Khandual facility_strings[status]) 1635709b973cSAnshuman Khandual facility = facility_strings[status]; 1636709b973cSAnshuman Khandual 1637709b973cSAnshuman Khandual /* We should not have taken this interrupt in kernel */ 1638709b973cSAnshuman Khandual if (!user_mode(regs)) { 1639709b973cSAnshuman Khandual pr_emerg("Facility '%s' unavailable (%d) exception in kernel mode at %lx\n", 1640709b973cSAnshuman Khandual facility, status, regs->nip); 1641709b973cSAnshuman Khandual die("Unexpected facility unavailable exception", regs, SIGABRT); 1642709b973cSAnshuman Khandual } 1643709b973cSAnshuman Khandual 1644709b973cSAnshuman Khandual /* We restore the interrupt state now */ 1645709b973cSAnshuman Khandual if (!arch_irq_disabled_regs(regs)) 1646709b973cSAnshuman Khandual local_irq_enable(); 1647709b973cSAnshuman Khandual 16482517617eSMichael Neuling if (status == FSCR_DSCR_LG) { 1649c952c1c4SAnshuman Khandual /* 1650c952c1c4SAnshuman Khandual * User is accessing the DSCR register using the problem 1651c952c1c4SAnshuman Khandual * state only SPR number (0x03) either through a mfspr or 1652c952c1c4SAnshuman Khandual * a mtspr instruction. If it is a write attempt through 1653c952c1c4SAnshuman Khandual * a mtspr, then we set the inherit bit. This also allows 1654c952c1c4SAnshuman Khandual * the user to write or read the register directly in the 1655c952c1c4SAnshuman Khandual * future by setting via the FSCR DSCR bit. But in case it 1656c952c1c4SAnshuman Khandual * is a read DSCR attempt through a mfspr instruction, we 1657c952c1c4SAnshuman Khandual * just emulate the instruction instead. This code path will 1658c952c1c4SAnshuman Khandual * always emulate all the mfspr instructions till the user 1659c952c1c4SAnshuman Khandual * has attempted at least one mtspr instruction. This way it 1660c952c1c4SAnshuman Khandual * preserves the same behaviour when the user is accessing 1661c952c1c4SAnshuman Khandual * the DSCR through privilege level only SPR number (0x11) 1662c952c1c4SAnshuman Khandual * which is emulated through illegal instruction exception. 1663c952c1c4SAnshuman Khandual * We always leave HFSCR DSCR set. 16642517617eSMichael Neuling */ 1665c952c1c4SAnshuman Khandual if (get_user(instword, (u32 __user *)(regs->nip))) { 1666c952c1c4SAnshuman Khandual pr_err("Failed to fetch the user instruction\n"); 1667c952c1c4SAnshuman Khandual return; 1668c952c1c4SAnshuman Khandual } 1669c952c1c4SAnshuman Khandual 1670c952c1c4SAnshuman Khandual /* Write into DSCR (mtspr 0x03, RS) */ 1671c952c1c4SAnshuman Khandual if ((instword & PPC_INST_MTSPR_DSCR_USER_MASK) 1672c952c1c4SAnshuman Khandual == PPC_INST_MTSPR_DSCR_USER) { 1673c952c1c4SAnshuman Khandual rd = (instword >> 21) & 0x1f; 1674c952c1c4SAnshuman Khandual current->thread.dscr = regs->gpr[rd]; 16752517617eSMichael Neuling current->thread.dscr_inherit = 1; 1676b57bd2deSMichael Neuling current->thread.fscr |= FSCR_DSCR; 1677b57bd2deSMichael Neuling mtspr(SPRN_FSCR, current->thread.fscr); 1678c952c1c4SAnshuman Khandual } 1679c952c1c4SAnshuman Khandual 1680c952c1c4SAnshuman Khandual /* Read from DSCR (mfspr RT, 0x03) */ 1681c952c1c4SAnshuman Khandual if ((instword & PPC_INST_MFSPR_DSCR_USER_MASK) 1682c952c1c4SAnshuman Khandual == PPC_INST_MFSPR_DSCR_USER) { 1683c952c1c4SAnshuman Khandual if (emulate_instruction(regs)) { 1684c952c1c4SAnshuman Khandual pr_err("DSCR based mfspr emulation failed\n"); 1685c952c1c4SAnshuman Khandual return; 1686c952c1c4SAnshuman Khandual } 1687c952c1c4SAnshuman Khandual regs->nip += 4; 1688c952c1c4SAnshuman Khandual emulate_single_step(regs); 1689c952c1c4SAnshuman Khandual } 16902517617eSMichael Neuling return; 1691b14b6260SMichael Ellerman } 1692b14b6260SMichael Ellerman 1693172f7aaaSCyril Bur if (status == FSCR_TM_LG) { 1694172f7aaaSCyril Bur /* 1695172f7aaaSCyril Bur * If we're here then the hardware is TM aware because it 1696172f7aaaSCyril Bur * generated an exception with FSRM_TM set. 1697172f7aaaSCyril Bur * 1698172f7aaaSCyril Bur * If cpu_has_feature(CPU_FTR_TM) is false, then either firmware 1699172f7aaaSCyril Bur * told us not to do TM, or the kernel is not built with TM 1700172f7aaaSCyril Bur * support. 1701172f7aaaSCyril Bur * 1702172f7aaaSCyril Bur * If both of those things are true, then userspace can spam the 1703172f7aaaSCyril Bur * console by triggering the printk() below just by continually 1704172f7aaaSCyril Bur * doing tbegin (or any TM instruction). So in that case just 1705172f7aaaSCyril Bur * send the process a SIGILL immediately. 1706172f7aaaSCyril Bur */ 1707172f7aaaSCyril Bur if (!cpu_has_feature(CPU_FTR_TM)) 1708172f7aaaSCyril Bur goto out; 1709172f7aaaSCyril Bur 1710172f7aaaSCyril Bur tm_unavailable(regs); 1711172f7aaaSCyril Bur return; 1712172f7aaaSCyril Bur } 1713172f7aaaSCyril Bur 171493c2ec0fSBalbir Singh pr_err_ratelimited("%sFacility '%s' unavailable (%d), exception at 0x%lx, MSR=%lx\n", 171593c2ec0fSBalbir Singh hv ? "Hypervisor " : "", facility, status, regs->nip, regs->msr); 1716d0c0c9a1SMichael Neuling 1717172f7aaaSCyril Bur out: 1718d0c0c9a1SMichael Neuling _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 1719d0c0c9a1SMichael Neuling } 17202517617eSMichael Neuling #endif 1721d0c0c9a1SMichael Neuling 1722f54db641SMichael Neuling #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1723f54db641SMichael Neuling 1724f54db641SMichael Neuling void fp_unavailable_tm(struct pt_regs *regs) 1725f54db641SMichael Neuling { 1726f54db641SMichael Neuling /* Note: This does not handle any kind of FP laziness. */ 1727f54db641SMichael Neuling 1728f54db641SMichael Neuling TM_DEBUG("FP Unavailable trap whilst transactional at 0x%lx, MSR=%lx\n", 1729f54db641SMichael Neuling regs->nip, regs->msr); 1730f54db641SMichael Neuling 1731f54db641SMichael Neuling /* We can only have got here if the task started using FP after 1732f54db641SMichael Neuling * beginning the transaction. So, the transactional regs are just a 1733f54db641SMichael Neuling * copy of the checkpointed ones. But, we still need to recheckpoint 1734f54db641SMichael Neuling * as we're enabling FP for the process; it will return, abort the 1735f54db641SMichael Neuling * transaction, and probably retry but now with FP enabled. So the 1736f54db641SMichael Neuling * checkpointed FP registers need to be loaded. 1737f54db641SMichael Neuling */ 1738d31626f7SPaul Mackerras tm_reclaim_current(TM_CAUSE_FAC_UNAV); 1739f54db641SMichael Neuling /* Reclaim didn't save out any FPRs to transact_fprs. */ 1740f54db641SMichael Neuling 1741f54db641SMichael Neuling /* Enable FP for the task: */ 1742a7771176SCyril Bur current->thread.load_fp = 1; 1743f54db641SMichael Neuling 1744f54db641SMichael Neuling /* This loads and recheckpoints the FP registers from 1745f54db641SMichael Neuling * thread.fpr[]. They will remain in registers after the 1746f54db641SMichael Neuling * checkpoint so we don't need to reload them after. 17473ac8ff1cSPaul Mackerras * If VMX is in use, the VRs now hold checkpointed values, 17483ac8ff1cSPaul Mackerras * so we don't want to load the VRs from the thread_struct. 1749f54db641SMichael Neuling */ 1750eb5c3f1cSCyril Bur tm_recheckpoint(¤t->thread); 1751f54db641SMichael Neuling } 1752f54db641SMichael Neuling 1753f54db641SMichael Neuling void altivec_unavailable_tm(struct pt_regs *regs) 1754f54db641SMichael Neuling { 1755f54db641SMichael Neuling /* See the comments in fp_unavailable_tm(). This function operates 1756f54db641SMichael Neuling * the same way. 1757f54db641SMichael Neuling */ 1758f54db641SMichael Neuling 1759f54db641SMichael Neuling TM_DEBUG("Vector Unavailable trap whilst transactional at 0x%lx," 1760f54db641SMichael Neuling "MSR=%lx\n", 1761f54db641SMichael Neuling regs->nip, regs->msr); 1762d31626f7SPaul Mackerras tm_reclaim_current(TM_CAUSE_FAC_UNAV); 1763a7771176SCyril Bur current->thread.load_vec = 1; 1764eb5c3f1cSCyril Bur tm_recheckpoint(¤t->thread); 1765f54db641SMichael Neuling current->thread.used_vr = 1; 17663ac8ff1cSPaul Mackerras } 17673ac8ff1cSPaul Mackerras 1768f54db641SMichael Neuling void vsx_unavailable_tm(struct pt_regs *regs) 1769f54db641SMichael Neuling { 1770f54db641SMichael Neuling /* See the comments in fp_unavailable_tm(). This works similarly, 1771f54db641SMichael Neuling * though we're loading both FP and VEC registers in here. 1772f54db641SMichael Neuling * 1773f54db641SMichael Neuling * If FP isn't in use, load FP regs. If VEC isn't in use, load VEC 1774f54db641SMichael Neuling * regs. Either way, set MSR_VSX. 1775f54db641SMichael Neuling */ 1776f54db641SMichael Neuling 1777f54db641SMichael Neuling TM_DEBUG("VSX Unavailable trap whilst transactional at 0x%lx," 1778f54db641SMichael Neuling "MSR=%lx\n", 1779f54db641SMichael Neuling regs->nip, regs->msr); 1780f54db641SMichael Neuling 17813ac8ff1cSPaul Mackerras current->thread.used_vsr = 1; 17823ac8ff1cSPaul Mackerras 1783f54db641SMichael Neuling /* This reclaims FP and/or VR regs if they're already enabled */ 1784d31626f7SPaul Mackerras tm_reclaim_current(TM_CAUSE_FAC_UNAV); 1785f54db641SMichael Neuling 1786a7771176SCyril Bur current->thread.load_vec = 1; 1787a7771176SCyril Bur current->thread.load_fp = 1; 17883ac8ff1cSPaul Mackerras 1789eb5c3f1cSCyril Bur tm_recheckpoint(¤t->thread); 1790f54db641SMichael Neuling } 1791f54db641SMichael Neuling #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ 1792f54db641SMichael Neuling 1793dc1c1ca3SStephen Rothwell void performance_monitor_exception(struct pt_regs *regs) 1794dc1c1ca3SStephen Rothwell { 179569111bacSChristoph Lameter __this_cpu_inc(irq_stat.pmu_irqs); 179689713ed1SAnton Blanchard 1797dc1c1ca3SStephen Rothwell perf_irq(regs); 1798dc1c1ca3SStephen Rothwell } 1799dc1c1ca3SStephen Rothwell 1800172ae2e7SDave Kleikamp #ifdef CONFIG_PPC_ADV_DEBUG_REGS 18013bffb652SDave Kleikamp static void handle_debug(struct pt_regs *regs, unsigned long debug_status) 18023bffb652SDave Kleikamp { 18033bffb652SDave Kleikamp int changed = 0; 18043bffb652SDave Kleikamp /* 18053bffb652SDave Kleikamp * Determine the cause of the debug event, clear the 18063bffb652SDave Kleikamp * event flags and send a trap to the handler. Torez 18073bffb652SDave Kleikamp */ 18083bffb652SDave Kleikamp if (debug_status & (DBSR_DAC1R | DBSR_DAC1W)) { 18093bffb652SDave Kleikamp dbcr_dac(current) &= ~(DBCR_DAC1R | DBCR_DAC1W); 18103bffb652SDave Kleikamp #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE 181151ae8d4aSBharat Bhushan current->thread.debug.dbcr2 &= ~DBCR2_DAC12MODE; 18123bffb652SDave Kleikamp #endif 181347355040SEric W. Biederman do_send_trap(regs, mfspr(SPRN_DAC1), debug_status, 18143bffb652SDave Kleikamp 5); 18153bffb652SDave Kleikamp changed |= 0x01; 18163bffb652SDave Kleikamp } else if (debug_status & (DBSR_DAC2R | DBSR_DAC2W)) { 18173bffb652SDave Kleikamp dbcr_dac(current) &= ~(DBCR_DAC2R | DBCR_DAC2W); 181847355040SEric W. Biederman do_send_trap(regs, mfspr(SPRN_DAC2), debug_status, 18193bffb652SDave Kleikamp 6); 18203bffb652SDave Kleikamp changed |= 0x01; 18213bffb652SDave Kleikamp } else if (debug_status & DBSR_IAC1) { 182251ae8d4aSBharat Bhushan current->thread.debug.dbcr0 &= ~DBCR0_IAC1; 18233bffb652SDave Kleikamp dbcr_iac_range(current) &= ~DBCR_IAC12MODE; 182447355040SEric W. Biederman do_send_trap(regs, mfspr(SPRN_IAC1), debug_status, 18253bffb652SDave Kleikamp 1); 18263bffb652SDave Kleikamp changed |= 0x01; 18273bffb652SDave Kleikamp } else if (debug_status & DBSR_IAC2) { 182851ae8d4aSBharat Bhushan current->thread.debug.dbcr0 &= ~DBCR0_IAC2; 182947355040SEric W. Biederman do_send_trap(regs, mfspr(SPRN_IAC2), debug_status, 18303bffb652SDave Kleikamp 2); 18313bffb652SDave Kleikamp changed |= 0x01; 18323bffb652SDave Kleikamp } else if (debug_status & DBSR_IAC3) { 183351ae8d4aSBharat Bhushan current->thread.debug.dbcr0 &= ~DBCR0_IAC3; 18343bffb652SDave Kleikamp dbcr_iac_range(current) &= ~DBCR_IAC34MODE; 183547355040SEric W. Biederman do_send_trap(regs, mfspr(SPRN_IAC3), debug_status, 18363bffb652SDave Kleikamp 3); 18373bffb652SDave Kleikamp changed |= 0x01; 18383bffb652SDave Kleikamp } else if (debug_status & DBSR_IAC4) { 183951ae8d4aSBharat Bhushan current->thread.debug.dbcr0 &= ~DBCR0_IAC4; 184047355040SEric W. Biederman do_send_trap(regs, mfspr(SPRN_IAC4), debug_status, 18413bffb652SDave Kleikamp 4); 18423bffb652SDave Kleikamp changed |= 0x01; 18433bffb652SDave Kleikamp } 18443bffb652SDave Kleikamp /* 18453bffb652SDave Kleikamp * At the point this routine was called, the MSR(DE) was turned off. 18463bffb652SDave Kleikamp * Check all other debug flags and see if that bit needs to be turned 18473bffb652SDave Kleikamp * back on or not. 18483bffb652SDave Kleikamp */ 184951ae8d4aSBharat Bhushan if (DBCR_ACTIVE_EVENTS(current->thread.debug.dbcr0, 185051ae8d4aSBharat Bhushan current->thread.debug.dbcr1)) 18513bffb652SDave Kleikamp regs->msr |= MSR_DE; 18523bffb652SDave Kleikamp else 18533bffb652SDave Kleikamp /* Make sure the IDM flag is off */ 185451ae8d4aSBharat Bhushan current->thread.debug.dbcr0 &= ~DBCR0_IDM; 18553bffb652SDave Kleikamp 18563bffb652SDave Kleikamp if (changed & 0x01) 185751ae8d4aSBharat Bhushan mtspr(SPRN_DBCR0, current->thread.debug.dbcr0); 18583bffb652SDave Kleikamp } 185914cf11afSPaul Mackerras 186003465f89SNicholas Piggin void DebugException(struct pt_regs *regs, unsigned long debug_status) 186114cf11afSPaul Mackerras { 186251ae8d4aSBharat Bhushan current->thread.debug.dbsr = debug_status; 18633bffb652SDave Kleikamp 1864ec097c84SRoland McGrath /* Hack alert: On BookE, Branch Taken stops on the branch itself, while 1865ec097c84SRoland McGrath * on server, it stops on the target of the branch. In order to simulate 1866ec097c84SRoland McGrath * the server behaviour, we thus restart right away with a single step 1867ec097c84SRoland McGrath * instead of stopping here when hitting a BT 1868ec097c84SRoland McGrath */ 1869ec097c84SRoland McGrath if (debug_status & DBSR_BT) { 1870ec097c84SRoland McGrath regs->msr &= ~MSR_DE; 1871ec097c84SRoland McGrath 1872ec097c84SRoland McGrath /* Disable BT */ 1873ec097c84SRoland McGrath mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_BT); 1874ec097c84SRoland McGrath /* Clear the BT event */ 1875ec097c84SRoland McGrath mtspr(SPRN_DBSR, DBSR_BT); 1876ec097c84SRoland McGrath 1877ec097c84SRoland McGrath /* Do the single step trick only when coming from userspace */ 1878ec097c84SRoland McGrath if (user_mode(regs)) { 187951ae8d4aSBharat Bhushan current->thread.debug.dbcr0 &= ~DBCR0_BT; 188051ae8d4aSBharat Bhushan current->thread.debug.dbcr0 |= DBCR0_IDM | DBCR0_IC; 1881ec097c84SRoland McGrath regs->msr |= MSR_DE; 1882ec097c84SRoland McGrath return; 1883ec097c84SRoland McGrath } 1884ec097c84SRoland McGrath 18856cc89badSNaveen N. Rao if (kprobe_post_handler(regs)) 18866cc89badSNaveen N. Rao return; 18876cc89badSNaveen N. Rao 1888ec097c84SRoland McGrath if (notify_die(DIE_SSTEP, "block_step", regs, 5, 1889ec097c84SRoland McGrath 5, SIGTRAP) == NOTIFY_STOP) { 1890ec097c84SRoland McGrath return; 1891ec097c84SRoland McGrath } 1892ec097c84SRoland McGrath if (debugger_sstep(regs)) 1893ec097c84SRoland McGrath return; 1894ec097c84SRoland McGrath } else if (debug_status & DBSR_IC) { /* Instruction complete */ 189514cf11afSPaul Mackerras regs->msr &= ~MSR_DE; 1896f8279621SKumar Gala 189714cf11afSPaul Mackerras /* Disable instruction completion */ 189814cf11afSPaul Mackerras mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_IC); 189914cf11afSPaul Mackerras /* Clear the instruction completion event */ 190014cf11afSPaul Mackerras mtspr(SPRN_DBSR, DBSR_IC); 1901f8279621SKumar Gala 19026cc89badSNaveen N. Rao if (kprobe_post_handler(regs)) 19036cc89badSNaveen N. Rao return; 19046cc89badSNaveen N. Rao 1905f8279621SKumar Gala if (notify_die(DIE_SSTEP, "single_step", regs, 5, 1906f8279621SKumar Gala 5, SIGTRAP) == NOTIFY_STOP) { 190714cf11afSPaul Mackerras return; 190814cf11afSPaul Mackerras } 1909f8279621SKumar Gala 1910f8279621SKumar Gala if (debugger_sstep(regs)) 1911f8279621SKumar Gala return; 1912f8279621SKumar Gala 19133bffb652SDave Kleikamp if (user_mode(regs)) { 191451ae8d4aSBharat Bhushan current->thread.debug.dbcr0 &= ~DBCR0_IC; 191551ae8d4aSBharat Bhushan if (DBCR_ACTIVE_EVENTS(current->thread.debug.dbcr0, 191651ae8d4aSBharat Bhushan current->thread.debug.dbcr1)) 19173bffb652SDave Kleikamp regs->msr |= MSR_DE; 19183bffb652SDave Kleikamp else 19193bffb652SDave Kleikamp /* Make sure the IDM bit is off */ 192051ae8d4aSBharat Bhushan current->thread.debug.dbcr0 &= ~DBCR0_IDM; 19213bffb652SDave Kleikamp } 1922f8279621SKumar Gala 1923f8279621SKumar Gala _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip); 19243bffb652SDave Kleikamp } else 19253bffb652SDave Kleikamp handle_debug(regs, debug_status); 192614cf11afSPaul Mackerras } 192703465f89SNicholas Piggin NOKPROBE_SYMBOL(DebugException); 1928172ae2e7SDave Kleikamp #endif /* CONFIG_PPC_ADV_DEBUG_REGS */ 192914cf11afSPaul Mackerras 193014cf11afSPaul Mackerras #if !defined(CONFIG_TAU_INT) 193114cf11afSPaul Mackerras void TAUException(struct pt_regs *regs) 193214cf11afSPaul Mackerras { 193314cf11afSPaul Mackerras printk("TAU trap at PC: %lx, MSR: %lx, vector=%lx %s\n", 193414cf11afSPaul Mackerras regs->nip, regs->msr, regs->trap, print_tainted()); 193514cf11afSPaul Mackerras } 193614cf11afSPaul Mackerras #endif /* CONFIG_INT_TAU */ 193714cf11afSPaul Mackerras 193814cf11afSPaul Mackerras #ifdef CONFIG_ALTIVEC 1939dc1c1ca3SStephen Rothwell void altivec_assist_exception(struct pt_regs *regs) 194014cf11afSPaul Mackerras { 194114cf11afSPaul Mackerras int err; 194214cf11afSPaul Mackerras 194314cf11afSPaul Mackerras if (!user_mode(regs)) { 194414cf11afSPaul Mackerras printk(KERN_EMERG "VMX/Altivec assist exception in kernel mode" 194514cf11afSPaul Mackerras " at %lx\n", regs->nip); 19468dad3f92SPaul Mackerras die("Kernel VMX/Altivec assist exception", regs, SIGILL); 194714cf11afSPaul Mackerras } 194814cf11afSPaul Mackerras 1949dc1c1ca3SStephen Rothwell flush_altivec_to_thread(current); 1950dc1c1ca3SStephen Rothwell 1951eecff81dSAnton Blanchard PPC_WARN_EMULATED(altivec, regs); 195214cf11afSPaul Mackerras err = emulate_altivec(regs); 195314cf11afSPaul Mackerras if (err == 0) { 195414cf11afSPaul Mackerras regs->nip += 4; /* skip emulated instruction */ 195514cf11afSPaul Mackerras emulate_single_step(regs); 195614cf11afSPaul Mackerras return; 195714cf11afSPaul Mackerras } 195814cf11afSPaul Mackerras 195914cf11afSPaul Mackerras if (err == -EFAULT) { 196014cf11afSPaul Mackerras /* got an error reading the instruction */ 196114cf11afSPaul Mackerras _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip); 196214cf11afSPaul Mackerras } else { 196314cf11afSPaul Mackerras /* didn't recognize the instruction */ 196414cf11afSPaul Mackerras /* XXX quick hack for now: set the non-Java bit in the VSCR */ 196576462232SChristian Dietrich printk_ratelimited(KERN_ERR "Unrecognized altivec instruction " 196614cf11afSPaul Mackerras "in %s at %lx\n", current->comm, regs->nip); 1967de79f7b9SPaul Mackerras current->thread.vr_state.vscr.u[3] |= 0x10000; 196814cf11afSPaul Mackerras } 196914cf11afSPaul Mackerras } 197014cf11afSPaul Mackerras #endif /* CONFIG_ALTIVEC */ 197114cf11afSPaul Mackerras 197214cf11afSPaul Mackerras #ifdef CONFIG_FSL_BOOKE 197314cf11afSPaul Mackerras void CacheLockingException(struct pt_regs *regs, unsigned long address, 197414cf11afSPaul Mackerras unsigned long error_code) 197514cf11afSPaul Mackerras { 197614cf11afSPaul Mackerras /* We treat cache locking instructions from the user 197714cf11afSPaul Mackerras * as priv ops, in the future we could try to do 197814cf11afSPaul Mackerras * something smarter 197914cf11afSPaul Mackerras */ 198014cf11afSPaul Mackerras if (error_code & (ESR_DLK|ESR_ILK)) 198114cf11afSPaul Mackerras _exception(SIGILL, regs, ILL_PRVOPC, regs->nip); 198214cf11afSPaul Mackerras return; 198314cf11afSPaul Mackerras } 198414cf11afSPaul Mackerras #endif /* CONFIG_FSL_BOOKE */ 198514cf11afSPaul Mackerras 198614cf11afSPaul Mackerras #ifdef CONFIG_SPE 198714cf11afSPaul Mackerras void SPEFloatingPointException(struct pt_regs *regs) 198814cf11afSPaul Mackerras { 19896a800f36SLiu Yu extern int do_spe_mathemu(struct pt_regs *regs); 199014cf11afSPaul Mackerras unsigned long spefscr; 199114cf11afSPaul Mackerras int fpexc_mode; 1992aeb1c0f6SEric W. Biederman int code = FPE_FLTUNK; 19936a800f36SLiu Yu int err; 19946a800f36SLiu Yu 1995685659eeSyu liu flush_spe_to_thread(current); 199614cf11afSPaul Mackerras 199714cf11afSPaul Mackerras spefscr = current->thread.spefscr; 199814cf11afSPaul Mackerras fpexc_mode = current->thread.fpexc_mode; 199914cf11afSPaul Mackerras 200014cf11afSPaul Mackerras if ((spefscr & SPEFSCR_FOVF) && (fpexc_mode & PR_FP_EXC_OVF)) { 200114cf11afSPaul Mackerras code = FPE_FLTOVF; 200214cf11afSPaul Mackerras } 200314cf11afSPaul Mackerras else if ((spefscr & SPEFSCR_FUNF) && (fpexc_mode & PR_FP_EXC_UND)) { 200414cf11afSPaul Mackerras code = FPE_FLTUND; 200514cf11afSPaul Mackerras } 200614cf11afSPaul Mackerras else if ((spefscr & SPEFSCR_FDBZ) && (fpexc_mode & PR_FP_EXC_DIV)) 200714cf11afSPaul Mackerras code = FPE_FLTDIV; 200814cf11afSPaul Mackerras else if ((spefscr & SPEFSCR_FINV) && (fpexc_mode & PR_FP_EXC_INV)) { 200914cf11afSPaul Mackerras code = FPE_FLTINV; 201014cf11afSPaul Mackerras } 201114cf11afSPaul Mackerras else if ((spefscr & (SPEFSCR_FG | SPEFSCR_FX)) && (fpexc_mode & PR_FP_EXC_RES)) 201214cf11afSPaul Mackerras code = FPE_FLTRES; 201314cf11afSPaul Mackerras 20146a800f36SLiu Yu err = do_spe_mathemu(regs); 20156a800f36SLiu Yu if (err == 0) { 20166a800f36SLiu Yu regs->nip += 4; /* skip emulated instruction */ 20176a800f36SLiu Yu emulate_single_step(regs); 201814cf11afSPaul Mackerras return; 201914cf11afSPaul Mackerras } 20206a800f36SLiu Yu 20216a800f36SLiu Yu if (err == -EFAULT) { 20226a800f36SLiu Yu /* got an error reading the instruction */ 20236a800f36SLiu Yu _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip); 20246a800f36SLiu Yu } else if (err == -EINVAL) { 20256a800f36SLiu Yu /* didn't recognize the instruction */ 20266a800f36SLiu Yu printk(KERN_ERR "unrecognized spe instruction " 20276a800f36SLiu Yu "in %s at %lx\n", current->comm, regs->nip); 20286a800f36SLiu Yu } else { 20296a800f36SLiu Yu _exception(SIGFPE, regs, code, regs->nip); 20306a800f36SLiu Yu } 20316a800f36SLiu Yu 20326a800f36SLiu Yu return; 20336a800f36SLiu Yu } 20346a800f36SLiu Yu 20356a800f36SLiu Yu void SPEFloatingPointRoundException(struct pt_regs *regs) 20366a800f36SLiu Yu { 20376a800f36SLiu Yu extern int speround_handler(struct pt_regs *regs); 20386a800f36SLiu Yu int err; 20396a800f36SLiu Yu 20406a800f36SLiu Yu preempt_disable(); 20416a800f36SLiu Yu if (regs->msr & MSR_SPE) 20426a800f36SLiu Yu giveup_spe(current); 20436a800f36SLiu Yu preempt_enable(); 20446a800f36SLiu Yu 20456a800f36SLiu Yu regs->nip -= 4; 20466a800f36SLiu Yu err = speround_handler(regs); 20476a800f36SLiu Yu if (err == 0) { 20486a800f36SLiu Yu regs->nip += 4; /* skip emulated instruction */ 20496a800f36SLiu Yu emulate_single_step(regs); 20506a800f36SLiu Yu return; 20516a800f36SLiu Yu } 20526a800f36SLiu Yu 20536a800f36SLiu Yu if (err == -EFAULT) { 20546a800f36SLiu Yu /* got an error reading the instruction */ 20556a800f36SLiu Yu _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip); 20566a800f36SLiu Yu } else if (err == -EINVAL) { 20576a800f36SLiu Yu /* didn't recognize the instruction */ 20586a800f36SLiu Yu printk(KERN_ERR "unrecognized spe instruction " 20596a800f36SLiu Yu "in %s at %lx\n", current->comm, regs->nip); 20606a800f36SLiu Yu } else { 2061aeb1c0f6SEric W. Biederman _exception(SIGFPE, regs, FPE_FLTUNK, regs->nip); 20626a800f36SLiu Yu return; 20636a800f36SLiu Yu } 20646a800f36SLiu Yu } 206514cf11afSPaul Mackerras #endif 206614cf11afSPaul Mackerras 2067dc1c1ca3SStephen Rothwell /* 2068dc1c1ca3SStephen Rothwell * We enter here if we get an unrecoverable exception, that is, one 2069dc1c1ca3SStephen Rothwell * that happened at a point where the RI (recoverable interrupt) bit 2070dc1c1ca3SStephen Rothwell * in the MSR is 0. This indicates that SRR0/1 are live, and that 2071dc1c1ca3SStephen Rothwell * we therefore lost state by taking this exception. 2072dc1c1ca3SStephen Rothwell */ 2073dc1c1ca3SStephen Rothwell void unrecoverable_exception(struct pt_regs *regs) 2074dc1c1ca3SStephen Rothwell { 2075dc1c1ca3SStephen Rothwell printk(KERN_EMERG "Unrecoverable exception %lx at %lx\n", 2076dc1c1ca3SStephen Rothwell regs->trap, regs->nip); 2077dc1c1ca3SStephen Rothwell die("Unrecoverable exception", regs, SIGABRT); 2078dc1c1ca3SStephen Rothwell } 207915770a13SNaveen N. Rao NOKPROBE_SYMBOL(unrecoverable_exception); 2080dc1c1ca3SStephen Rothwell 20811e18c17aSJason Gunthorpe #if defined(CONFIG_BOOKE_WDT) || defined(CONFIG_40x) 208214cf11afSPaul Mackerras /* 208314cf11afSPaul Mackerras * Default handler for a Watchdog exception, 208414cf11afSPaul Mackerras * spins until a reboot occurs 208514cf11afSPaul Mackerras */ 208614cf11afSPaul Mackerras void __attribute__ ((weak)) WatchdogHandler(struct pt_regs *regs) 208714cf11afSPaul Mackerras { 208814cf11afSPaul Mackerras /* Generic WatchdogHandler, implement your own */ 208914cf11afSPaul Mackerras mtspr(SPRN_TCR, mfspr(SPRN_TCR)&(~TCR_WIE)); 209014cf11afSPaul Mackerras return; 209114cf11afSPaul Mackerras } 209214cf11afSPaul Mackerras 209314cf11afSPaul Mackerras void WatchdogException(struct pt_regs *regs) 209414cf11afSPaul Mackerras { 209514cf11afSPaul Mackerras printk (KERN_EMERG "PowerPC Book-E Watchdog Exception\n"); 209614cf11afSPaul Mackerras WatchdogHandler(regs); 209714cf11afSPaul Mackerras } 209814cf11afSPaul Mackerras #endif 2099dc1c1ca3SStephen Rothwell 2100dc1c1ca3SStephen Rothwell /* 2101dc1c1ca3SStephen Rothwell * We enter here if we discover during exception entry that we are 2102dc1c1ca3SStephen Rothwell * running in supervisor mode with a userspace value in the stack pointer. 2103dc1c1ca3SStephen Rothwell */ 2104dc1c1ca3SStephen Rothwell void kernel_bad_stack(struct pt_regs *regs) 2105dc1c1ca3SStephen Rothwell { 2106dc1c1ca3SStephen Rothwell printk(KERN_EMERG "Bad kernel stack pointer %lx at %lx\n", 2107dc1c1ca3SStephen Rothwell regs->gpr[1], regs->nip); 2108dc1c1ca3SStephen Rothwell die("Bad kernel stack pointer", regs, SIGABRT); 2109dc1c1ca3SStephen Rothwell } 211015770a13SNaveen N. Rao NOKPROBE_SYMBOL(kernel_bad_stack); 211114cf11afSPaul Mackerras 211214cf11afSPaul Mackerras void __init trap_init(void) 211314cf11afSPaul Mackerras { 211414cf11afSPaul Mackerras } 211580947e7cSGeert Uytterhoeven 211680947e7cSGeert Uytterhoeven 211780947e7cSGeert Uytterhoeven #ifdef CONFIG_PPC_EMULATED_STATS 211880947e7cSGeert Uytterhoeven 211980947e7cSGeert Uytterhoeven #define WARN_EMULATED_SETUP(type) .type = { .name = #type } 212080947e7cSGeert Uytterhoeven 212180947e7cSGeert Uytterhoeven struct ppc_emulated ppc_emulated = { 212280947e7cSGeert Uytterhoeven #ifdef CONFIG_ALTIVEC 212380947e7cSGeert Uytterhoeven WARN_EMULATED_SETUP(altivec), 212480947e7cSGeert Uytterhoeven #endif 212580947e7cSGeert Uytterhoeven WARN_EMULATED_SETUP(dcba), 212680947e7cSGeert Uytterhoeven WARN_EMULATED_SETUP(dcbz), 212780947e7cSGeert Uytterhoeven WARN_EMULATED_SETUP(fp_pair), 212880947e7cSGeert Uytterhoeven WARN_EMULATED_SETUP(isel), 212980947e7cSGeert Uytterhoeven WARN_EMULATED_SETUP(mcrxr), 213080947e7cSGeert Uytterhoeven WARN_EMULATED_SETUP(mfpvr), 213180947e7cSGeert Uytterhoeven WARN_EMULATED_SETUP(multiple), 213280947e7cSGeert Uytterhoeven WARN_EMULATED_SETUP(popcntb), 213380947e7cSGeert Uytterhoeven WARN_EMULATED_SETUP(spe), 213480947e7cSGeert Uytterhoeven WARN_EMULATED_SETUP(string), 2135a3821b2aSScott Wood WARN_EMULATED_SETUP(sync), 213680947e7cSGeert Uytterhoeven WARN_EMULATED_SETUP(unaligned), 213780947e7cSGeert Uytterhoeven #ifdef CONFIG_MATH_EMULATION 213880947e7cSGeert Uytterhoeven WARN_EMULATED_SETUP(math), 213980947e7cSGeert Uytterhoeven #endif 214080947e7cSGeert Uytterhoeven #ifdef CONFIG_VSX 214180947e7cSGeert Uytterhoeven WARN_EMULATED_SETUP(vsx), 214280947e7cSGeert Uytterhoeven #endif 2143efcac658SAlexey Kardashevskiy #ifdef CONFIG_PPC64 2144efcac658SAlexey Kardashevskiy WARN_EMULATED_SETUP(mfdscr), 2145efcac658SAlexey Kardashevskiy WARN_EMULATED_SETUP(mtdscr), 2146f83319d7SAnton Blanchard WARN_EMULATED_SETUP(lq_stq), 21475080332cSMichael Neuling WARN_EMULATED_SETUP(lxvw4x), 21485080332cSMichael Neuling WARN_EMULATED_SETUP(lxvh8x), 21495080332cSMichael Neuling WARN_EMULATED_SETUP(lxvd2x), 21505080332cSMichael Neuling WARN_EMULATED_SETUP(lxvb16x), 2151efcac658SAlexey Kardashevskiy #endif 215280947e7cSGeert Uytterhoeven }; 215380947e7cSGeert Uytterhoeven 215480947e7cSGeert Uytterhoeven u32 ppc_warn_emulated; 215580947e7cSGeert Uytterhoeven 215680947e7cSGeert Uytterhoeven void ppc_warn_emulated_print(const char *type) 215780947e7cSGeert Uytterhoeven { 215876462232SChristian Dietrich pr_warn_ratelimited("%s used emulated %s instruction\n", current->comm, 215980947e7cSGeert Uytterhoeven type); 216080947e7cSGeert Uytterhoeven } 216180947e7cSGeert Uytterhoeven 216280947e7cSGeert Uytterhoeven static int __init ppc_warn_emulated_init(void) 216380947e7cSGeert Uytterhoeven { 216480947e7cSGeert Uytterhoeven struct dentry *dir, *d; 216580947e7cSGeert Uytterhoeven unsigned int i; 216680947e7cSGeert Uytterhoeven struct ppc_emulated_entry *entries = (void *)&ppc_emulated; 216780947e7cSGeert Uytterhoeven 216880947e7cSGeert Uytterhoeven if (!powerpc_debugfs_root) 216980947e7cSGeert Uytterhoeven return -ENODEV; 217080947e7cSGeert Uytterhoeven 217180947e7cSGeert Uytterhoeven dir = debugfs_create_dir("emulated_instructions", 217280947e7cSGeert Uytterhoeven powerpc_debugfs_root); 217380947e7cSGeert Uytterhoeven if (!dir) 217480947e7cSGeert Uytterhoeven return -ENOMEM; 217580947e7cSGeert Uytterhoeven 217657ad583fSRussell Currey d = debugfs_create_u32("do_warn", 0644, dir, 217780947e7cSGeert Uytterhoeven &ppc_warn_emulated); 217880947e7cSGeert Uytterhoeven if (!d) 217980947e7cSGeert Uytterhoeven goto fail; 218080947e7cSGeert Uytterhoeven 218180947e7cSGeert Uytterhoeven for (i = 0; i < sizeof(ppc_emulated)/sizeof(*entries); i++) { 218257ad583fSRussell Currey d = debugfs_create_u32(entries[i].name, 0644, dir, 218380947e7cSGeert Uytterhoeven (u32 *)&entries[i].val.counter); 218480947e7cSGeert Uytterhoeven if (!d) 218580947e7cSGeert Uytterhoeven goto fail; 218680947e7cSGeert Uytterhoeven } 218780947e7cSGeert Uytterhoeven 218880947e7cSGeert Uytterhoeven return 0; 218980947e7cSGeert Uytterhoeven 219080947e7cSGeert Uytterhoeven fail: 219180947e7cSGeert Uytterhoeven debugfs_remove_recursive(dir); 219280947e7cSGeert Uytterhoeven return -ENOMEM; 219380947e7cSGeert Uytterhoeven } 219480947e7cSGeert Uytterhoeven 219580947e7cSGeert Uytterhoeven device_initcall(ppc_warn_emulated_init); 219680947e7cSGeert Uytterhoeven 219780947e7cSGeert Uytterhoeven #endif /* CONFIG_PPC_EMULATED_STATS */ 2198