114cf11afSPaul Mackerras /* 214cf11afSPaul Mackerras * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 3fe04b112SScott Wood * Copyright 2007-2010 Freescale Semiconductor, Inc. 414cf11afSPaul Mackerras * 514cf11afSPaul Mackerras * This program is free software; you can redistribute it and/or 614cf11afSPaul Mackerras * modify it under the terms of the GNU General Public License 714cf11afSPaul Mackerras * as published by the Free Software Foundation; either version 814cf11afSPaul Mackerras * 2 of the License, or (at your option) any later version. 914cf11afSPaul Mackerras * 1014cf11afSPaul Mackerras * Modified by Cort Dougan (cort@cs.nmt.edu) 1114cf11afSPaul Mackerras * and Paul Mackerras (paulus@samba.org) 1214cf11afSPaul Mackerras */ 1314cf11afSPaul Mackerras 1414cf11afSPaul Mackerras /* 1514cf11afSPaul Mackerras * This file handles the architecture-dependent parts of hardware exceptions 1614cf11afSPaul Mackerras */ 1714cf11afSPaul Mackerras 1814cf11afSPaul Mackerras #include <linux/errno.h> 1914cf11afSPaul Mackerras #include <linux/sched.h> 20b17b0153SIngo Molnar #include <linux/sched/debug.h> 2114cf11afSPaul Mackerras #include <linux/kernel.h> 2214cf11afSPaul Mackerras #include <linux/mm.h> 2399cd1302SRam Pai #include <linux/pkeys.h> 2414cf11afSPaul Mackerras #include <linux/stddef.h> 2514cf11afSPaul Mackerras #include <linux/unistd.h> 268dad3f92SPaul Mackerras #include <linux/ptrace.h> 2714cf11afSPaul Mackerras #include <linux/user.h> 2814cf11afSPaul Mackerras #include <linux/interrupt.h> 2914cf11afSPaul Mackerras #include <linux/init.h> 308a39b05fSPaul Gortmaker #include <linux/extable.h> 318a39b05fSPaul Gortmaker #include <linux/module.h> /* print_modules */ 328dad3f92SPaul Mackerras #include <linux/prctl.h> 3314cf11afSPaul Mackerras #include <linux/delay.h> 3414cf11afSPaul Mackerras #include <linux/kprobes.h> 35cc532915SMichael Ellerman #include <linux/kexec.h> 365474c120SMichael Hanselmann #include <linux/backlight.h> 3773c9ceabSJeremy Fitzhardinge #include <linux/bug.h> 381eeb66a1SChristoph Hellwig #include <linux/kdebug.h> 3976462232SChristian Dietrich #include <linux/ratelimit.h> 40ba12eedeSLi Zhong #include <linux/context_tracking.h> 415080332cSMichael Neuling #include <linux/smp.h> 4235adacd6SNicholas Piggin #include <linux/console.h> 4335adacd6SNicholas Piggin #include <linux/kmsg_dump.h> 4414cf11afSPaul Mackerras 4580947e7cSGeert Uytterhoeven #include <asm/emulated_ops.h> 4614cf11afSPaul Mackerras #include <asm/pgtable.h> 477c0f6ba6SLinus Torvalds #include <linux/uaccess.h> 487644d581SMichael Ellerman #include <asm/debugfs.h> 4914cf11afSPaul Mackerras #include <asm/io.h> 5086417780SPaul Mackerras #include <asm/machdep.h> 5186417780SPaul Mackerras #include <asm/rtas.h> 52f7f6f4feSDavid Gibson #include <asm/pmc.h> 5314cf11afSPaul Mackerras #include <asm/reg.h> 5414cf11afSPaul Mackerras #ifdef CONFIG_PMAC_BACKLIGHT 5514cf11afSPaul Mackerras #include <asm/backlight.h> 5614cf11afSPaul Mackerras #endif 57dc1c1ca3SStephen Rothwell #ifdef CONFIG_PPC64 5886417780SPaul Mackerras #include <asm/firmware.h> 59dc1c1ca3SStephen Rothwell #include <asm/processor.h> 606ce6c629SMichael Neuling #include <asm/tm.h> 61dc1c1ca3SStephen Rothwell #endif 62c0ce7d08SDavid Wilder #include <asm/kexec.h> 6316c57b36SKumar Gala #include <asm/ppc-opcode.h> 64cce1f106SShaohui Xie #include <asm/rio.h> 65ebaeb5aeSMahesh Salgaonkar #include <asm/fadump.h> 66ae3a197eSDavid Howells #include <asm/switch_to.h> 67f54db641SMichael Neuling #include <asm/tm.h> 68ae3a197eSDavid Howells #include <asm/debug.h> 6942f5b4caSDaniel Axtens #include <asm/asm-prototypes.h> 70fd7bacbcSMahesh Salgaonkar #include <asm/hmi.h> 714e0e3435SHongtao Jia #include <sysdev/fsl_pci.h> 726cc89badSNaveen N. Rao #include <asm/kprobes.h> 73a99b9c5eSMurilo Opsfelder Araujo #include <asm/stacktrace.h> 74dc1c1ca3SStephen Rothwell 75da665885SThiago Jung Bauermann #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC_CORE) 765be3492fSAnton Blanchard int (*__debugger)(struct pt_regs *regs) __read_mostly; 775be3492fSAnton Blanchard int (*__debugger_ipi)(struct pt_regs *regs) __read_mostly; 785be3492fSAnton Blanchard int (*__debugger_bpt)(struct pt_regs *regs) __read_mostly; 795be3492fSAnton Blanchard int (*__debugger_sstep)(struct pt_regs *regs) __read_mostly; 805be3492fSAnton Blanchard int (*__debugger_iabr_match)(struct pt_regs *regs) __read_mostly; 819422de3eSMichael Neuling int (*__debugger_break_match)(struct pt_regs *regs) __read_mostly; 825be3492fSAnton Blanchard int (*__debugger_fault_handler)(struct pt_regs *regs) __read_mostly; 8314cf11afSPaul Mackerras 8414cf11afSPaul Mackerras EXPORT_SYMBOL(__debugger); 8514cf11afSPaul Mackerras EXPORT_SYMBOL(__debugger_ipi); 8614cf11afSPaul Mackerras EXPORT_SYMBOL(__debugger_bpt); 8714cf11afSPaul Mackerras EXPORT_SYMBOL(__debugger_sstep); 8814cf11afSPaul Mackerras EXPORT_SYMBOL(__debugger_iabr_match); 899422de3eSMichael Neuling EXPORT_SYMBOL(__debugger_break_match); 9014cf11afSPaul Mackerras EXPORT_SYMBOL(__debugger_fault_handler); 9114cf11afSPaul Mackerras #endif 9214cf11afSPaul Mackerras 938b3c34cfSMichael Neuling /* Transactional Memory trap debug */ 948b3c34cfSMichael Neuling #ifdef TM_DEBUG_SW 958b3c34cfSMichael Neuling #define TM_DEBUG(x...) printk(KERN_INFO x) 968b3c34cfSMichael Neuling #else 978b3c34cfSMichael Neuling #define TM_DEBUG(x...) do { } while(0) 988b3c34cfSMichael Neuling #endif 998b3c34cfSMichael Neuling 1000f642d61SMurilo Opsfelder Araujo static const char *signame(int signr) 1010f642d61SMurilo Opsfelder Araujo { 1020f642d61SMurilo Opsfelder Araujo switch (signr) { 1030f642d61SMurilo Opsfelder Araujo case SIGBUS: return "bus error"; 1040f642d61SMurilo Opsfelder Araujo case SIGFPE: return "floating point exception"; 1050f642d61SMurilo Opsfelder Araujo case SIGILL: return "illegal instruction"; 1060f642d61SMurilo Opsfelder Araujo case SIGSEGV: return "segfault"; 1070f642d61SMurilo Opsfelder Araujo case SIGTRAP: return "unhandled trap"; 1080f642d61SMurilo Opsfelder Araujo } 1090f642d61SMurilo Opsfelder Araujo 1100f642d61SMurilo Opsfelder Araujo return "unknown signal"; 1110f642d61SMurilo Opsfelder Araujo } 1120f642d61SMurilo Opsfelder Araujo 11314cf11afSPaul Mackerras /* 11414cf11afSPaul Mackerras * Trap & Exception support 11514cf11afSPaul Mackerras */ 11614cf11afSPaul Mackerras 1176031d9d9Santon@samba.org #ifdef CONFIG_PMAC_BACKLIGHT 1186031d9d9Santon@samba.org static void pmac_backlight_unblank(void) 1196031d9d9Santon@samba.org { 1206031d9d9Santon@samba.org mutex_lock(&pmac_backlight_mutex); 1216031d9d9Santon@samba.org if (pmac_backlight) { 1226031d9d9Santon@samba.org struct backlight_properties *props; 1236031d9d9Santon@samba.org 1246031d9d9Santon@samba.org props = &pmac_backlight->props; 1256031d9d9Santon@samba.org props->brightness = props->max_brightness; 1266031d9d9Santon@samba.org props->power = FB_BLANK_UNBLANK; 1276031d9d9Santon@samba.org backlight_update_status(pmac_backlight); 1286031d9d9Santon@samba.org } 1296031d9d9Santon@samba.org mutex_unlock(&pmac_backlight_mutex); 1306031d9d9Santon@samba.org } 1316031d9d9Santon@samba.org #else 1326031d9d9Santon@samba.org static inline void pmac_backlight_unblank(void) { } 1336031d9d9Santon@samba.org #endif 1346031d9d9Santon@samba.org 1356fcd6baaSNicholas Piggin /* 1366fcd6baaSNicholas Piggin * If oops/die is expected to crash the machine, return true here. 1376fcd6baaSNicholas Piggin * 1386fcd6baaSNicholas Piggin * This should not be expected to be 100% accurate, there may be 1396fcd6baaSNicholas Piggin * notifiers registered or other unexpected conditions that may bring 1406fcd6baaSNicholas Piggin * down the kernel. Or if the current process in the kernel is holding 1416fcd6baaSNicholas Piggin * locks or has other critical state, the kernel may become effectively 1426fcd6baaSNicholas Piggin * unusable anyway. 1436fcd6baaSNicholas Piggin */ 1446fcd6baaSNicholas Piggin bool die_will_crash(void) 1456fcd6baaSNicholas Piggin { 1466fcd6baaSNicholas Piggin if (should_fadump_crash()) 1476fcd6baaSNicholas Piggin return true; 1486fcd6baaSNicholas Piggin if (kexec_should_crash(current)) 1496fcd6baaSNicholas Piggin return true; 1506fcd6baaSNicholas Piggin if (in_interrupt() || panic_on_oops || 1516fcd6baaSNicholas Piggin !current->pid || is_global_init(current)) 1526fcd6baaSNicholas Piggin return true; 1536fcd6baaSNicholas Piggin 1546fcd6baaSNicholas Piggin return false; 1556fcd6baaSNicholas Piggin } 1566fcd6baaSNicholas Piggin 157760ca4dcSAnton Blanchard static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED; 158760ca4dcSAnton Blanchard static int die_owner = -1; 159760ca4dcSAnton Blanchard static unsigned int die_nest_count; 160c0ce7d08SDavid Wilder static int die_counter; 161760ca4dcSAnton Blanchard 16235adacd6SNicholas Piggin extern void panic_flush_kmsg_start(void) 16335adacd6SNicholas Piggin { 16435adacd6SNicholas Piggin /* 16535adacd6SNicholas Piggin * These are mostly taken from kernel/panic.c, but tries to do 16635adacd6SNicholas Piggin * relatively minimal work. Don't use delay functions (TB may 16735adacd6SNicholas Piggin * be broken), don't crash dump (need to set a firmware log), 16835adacd6SNicholas Piggin * don't run notifiers. We do want to get some information to 16935adacd6SNicholas Piggin * Linux console. 17035adacd6SNicholas Piggin */ 17135adacd6SNicholas Piggin console_verbose(); 17235adacd6SNicholas Piggin bust_spinlocks(1); 17335adacd6SNicholas Piggin } 17435adacd6SNicholas Piggin 17535adacd6SNicholas Piggin extern void panic_flush_kmsg_end(void) 17635adacd6SNicholas Piggin { 17735adacd6SNicholas Piggin printk_safe_flush_on_panic(); 17835adacd6SNicholas Piggin kmsg_dump(KMSG_DUMP_PANIC); 17935adacd6SNicholas Piggin bust_spinlocks(0); 18035adacd6SNicholas Piggin debug_locks_off(); 18135adacd6SNicholas Piggin console_flush_on_panic(); 18235adacd6SNicholas Piggin } 18335adacd6SNicholas Piggin 18403465f89SNicholas Piggin static unsigned long oops_begin(struct pt_regs *regs) 185760ca4dcSAnton Blanchard { 186760ca4dcSAnton Blanchard int cpu; 18734c2a14fSanton@samba.org unsigned long flags; 18814cf11afSPaul Mackerras 189293e4688Santon@samba.org oops_enter(); 190293e4688Santon@samba.org 191760ca4dcSAnton Blanchard /* racy, but better than risking deadlock. */ 192760ca4dcSAnton Blanchard raw_local_irq_save(flags); 193760ca4dcSAnton Blanchard cpu = smp_processor_id(); 194760ca4dcSAnton Blanchard if (!arch_spin_trylock(&die_lock)) { 195760ca4dcSAnton Blanchard if (cpu == die_owner) 196760ca4dcSAnton Blanchard /* nested oops. should stop eventually */; 197760ca4dcSAnton Blanchard else 198760ca4dcSAnton Blanchard arch_spin_lock(&die_lock); 199760ca4dcSAnton Blanchard } 200760ca4dcSAnton Blanchard die_nest_count++; 201760ca4dcSAnton Blanchard die_owner = cpu; 20214cf11afSPaul Mackerras console_verbose(); 20314cf11afSPaul Mackerras bust_spinlocks(1); 2046031d9d9Santon@samba.org if (machine_is(powermac)) 2056031d9d9Santon@samba.org pmac_backlight_unblank(); 206760ca4dcSAnton Blanchard return flags; 20734c2a14fSanton@samba.org } 20803465f89SNicholas Piggin NOKPROBE_SYMBOL(oops_begin); 2095474c120SMichael Hanselmann 21003465f89SNicholas Piggin static void oops_end(unsigned long flags, struct pt_regs *regs, 211760ca4dcSAnton Blanchard int signr) 212760ca4dcSAnton Blanchard { 21314cf11afSPaul Mackerras bust_spinlocks(0); 214373d4d09SRusty Russell add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); 215760ca4dcSAnton Blanchard die_nest_count--; 21658154c8cSAnton Blanchard oops_exit(); 21758154c8cSAnton Blanchard printk("\n"); 2187458e8b2SNicholas Piggin if (!die_nest_count) { 219760ca4dcSAnton Blanchard /* Nest count reaches zero, release the lock. */ 2207458e8b2SNicholas Piggin die_owner = -1; 221760ca4dcSAnton Blanchard arch_spin_unlock(&die_lock); 2227458e8b2SNicholas Piggin } 223760ca4dcSAnton Blanchard raw_local_irq_restore(flags); 224cc532915SMichael Ellerman 225d40b6768SNicholas Piggin /* 226d40b6768SNicholas Piggin * system_reset_excption handles debugger, crash dump, panic, for 0x100 227d40b6768SNicholas Piggin */ 228d40b6768SNicholas Piggin if (TRAP(regs) == 0x100) 229d40b6768SNicholas Piggin return; 230d40b6768SNicholas Piggin 231ebaeb5aeSMahesh Salgaonkar crash_fadump(regs, "die oops"); 232ebaeb5aeSMahesh Salgaonkar 2334388c9b3SNicholas Piggin if (kexec_should_crash(current)) 234cc532915SMichael Ellerman crash_kexec(regs); 2359b00ac06SAnton Blanchard 236760ca4dcSAnton Blanchard if (!signr) 237760ca4dcSAnton Blanchard return; 238760ca4dcSAnton Blanchard 23958154c8cSAnton Blanchard /* 24058154c8cSAnton Blanchard * While our oops output is serialised by a spinlock, output 24158154c8cSAnton Blanchard * from panic() called below can race and corrupt it. If we 24258154c8cSAnton Blanchard * know we are going to panic, delay for 1 second so we have a 24358154c8cSAnton Blanchard * chance to get clean backtraces from all CPUs that are oopsing. 24458154c8cSAnton Blanchard */ 24558154c8cSAnton Blanchard if (in_interrupt() || panic_on_oops || !current->pid || 24658154c8cSAnton Blanchard is_global_init(current)) { 24758154c8cSAnton Blanchard mdelay(MSEC_PER_SEC); 24858154c8cSAnton Blanchard } 24958154c8cSAnton Blanchard 250cea6a4baSHorms if (panic_on_oops) 251012c437dSHorms panic("Fatal exception"); 252760ca4dcSAnton Blanchard do_exit(signr); 253760ca4dcSAnton Blanchard } 25403465f89SNicholas Piggin NOKPROBE_SYMBOL(oops_end); 255cea6a4baSHorms 25603465f89SNicholas Piggin static int __die(const char *str, struct pt_regs *regs, long err) 257760ca4dcSAnton Blanchard { 258760ca4dcSAnton Blanchard printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter); 2592e82ca3cSMichael Ellerman 26016842516SMichael Ellerman printk("%s PAGE_SIZE=%luK%s%s%s%s%s%s%s %s\n", 26178227443SMichael Ellerman IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN) ? "LE" : "BE", 26218405139SMichael Ellerman PAGE_SIZE / 1024, 26316842516SMichael Ellerman early_radix_enabled() ? " MMU=Radix" : "", 26416842516SMichael Ellerman early_mmu_has_feature(MMU_FTR_HPTE_TABLE) ? " MMU=Hash" : "", 26578227443SMichael Ellerman IS_ENABLED(CONFIG_PREEMPT) ? " PREEMPT" : "", 26678227443SMichael Ellerman IS_ENABLED(CONFIG_SMP) ? " SMP" : "", 26778227443SMichael Ellerman IS_ENABLED(CONFIG_SMP) ? (" NR_CPUS=" __stringify(NR_CPUS)) : "", 26878227443SMichael Ellerman debug_pagealloc_enabled() ? " DEBUG_PAGEALLOC" : "", 26978227443SMichael Ellerman IS_ENABLED(CONFIG_NUMA) ? " NUMA" : "", 27078227443SMichael Ellerman ppc_md.name ? ppc_md.name : ""); 271760ca4dcSAnton Blanchard 272760ca4dcSAnton Blanchard if (notify_die(DIE_OOPS, str, regs, err, 255, SIGSEGV) == NOTIFY_STOP) 273760ca4dcSAnton Blanchard return 1; 274760ca4dcSAnton Blanchard 275760ca4dcSAnton Blanchard print_modules(); 276760ca4dcSAnton Blanchard show_regs(regs); 27714cf11afSPaul Mackerras 27814cf11afSPaul Mackerras return 0; 27914cf11afSPaul Mackerras } 28003465f89SNicholas Piggin NOKPROBE_SYMBOL(__die); 28114cf11afSPaul Mackerras 282760ca4dcSAnton Blanchard void die(const char *str, struct pt_regs *regs, long err) 283760ca4dcSAnton Blanchard { 2846f44b20eSNicholas Piggin unsigned long flags; 285760ca4dcSAnton Blanchard 286d40b6768SNicholas Piggin /* 287d40b6768SNicholas Piggin * system_reset_excption handles debugger, crash dump, panic, for 0x100 288d40b6768SNicholas Piggin */ 289d40b6768SNicholas Piggin if (TRAP(regs) != 0x100) { 2906f44b20eSNicholas Piggin if (debugger(regs)) 2916f44b20eSNicholas Piggin return; 292d40b6768SNicholas Piggin } 2936f44b20eSNicholas Piggin 2946f44b20eSNicholas Piggin flags = oops_begin(regs); 295760ca4dcSAnton Blanchard if (__die(str, regs, err)) 296760ca4dcSAnton Blanchard err = 0; 297760ca4dcSAnton Blanchard oops_end(flags, regs, err); 298760ca4dcSAnton Blanchard } 29915770a13SNaveen N. Rao NOKPROBE_SYMBOL(die); 300760ca4dcSAnton Blanchard 301efc463adSEric W. Biederman void user_single_step_report(struct pt_regs *regs) 30225baa35bSOleg Nesterov { 303efc463adSEric W. Biederman force_sig_fault(SIGTRAP, TRAP_TRACE, (void __user *)regs->nip, current); 30425baa35bSOleg Nesterov } 30525baa35bSOleg Nesterov 306658b0f92SMurilo Opsfelder Araujo static void show_signal_msg(int signr, struct pt_regs *regs, int code, 307658b0f92SMurilo Opsfelder Araujo unsigned long addr) 30814cf11afSPaul Mackerras { 309997dd26cSMichael Ellerman static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL, 310997dd26cSMichael Ellerman DEFAULT_RATELIMIT_BURST); 311997dd26cSMichael Ellerman 312997dd26cSMichael Ellerman if (!show_unhandled_signals) 31335a52a10SMurilo Opsfelder Araujo return; 31435a52a10SMurilo Opsfelder Araujo 31535a52a10SMurilo Opsfelder Araujo if (!unhandled_signal(current, signr)) 31635a52a10SMurilo Opsfelder Araujo return; 31735a52a10SMurilo Opsfelder Araujo 318997dd26cSMichael Ellerman if (!__ratelimit(&rs)) 319997dd26cSMichael Ellerman return; 320997dd26cSMichael Ellerman 3210f642d61SMurilo Opsfelder Araujo pr_info("%s[%d]: %s (%d) at %lx nip %lx lr %lx code %x", 3220f642d61SMurilo Opsfelder Araujo current->comm, current->pid, signame(signr), signr, 323d0c3d534SOlof Johansson addr, regs->nip, regs->link, code); 3240f642d61SMurilo Opsfelder Araujo 3250f642d61SMurilo Opsfelder Araujo print_vma_addr(KERN_CONT " in ", regs->nip); 3260f642d61SMurilo Opsfelder Araujo 3270f642d61SMurilo Opsfelder Araujo pr_cont("\n"); 328a99b9c5eSMurilo Opsfelder Araujo 329a99b9c5eSMurilo Opsfelder Araujo show_user_instructions(regs); 33014cf11afSPaul Mackerras } 331658b0f92SMurilo Opsfelder Araujo 3322c44ce28SEric W. Biederman static bool exception_common(int signr, struct pt_regs *regs, int code, 3332c44ce28SEric W. Biederman unsigned long addr) 334658b0f92SMurilo Opsfelder Araujo { 335658b0f92SMurilo Opsfelder Araujo if (!user_mode(regs)) { 336658b0f92SMurilo Opsfelder Araujo die("Exception in kernel mode", regs, signr); 3372c44ce28SEric W. Biederman return false; 338658b0f92SMurilo Opsfelder Araujo } 339658b0f92SMurilo Opsfelder Araujo 340658b0f92SMurilo Opsfelder Araujo show_signal_msg(signr, regs, code, addr); 34114cf11afSPaul Mackerras 342a3512b2dSBenjamin Herrenschmidt if (arch_irqs_disabled() && !arch_irq_disabled_regs(regs)) 3439f2f79e3SBenjamin Herrenschmidt local_irq_enable(); 3449f2f79e3SBenjamin Herrenschmidt 34541ab5266SAnanth N Mavinakayanahalli current->thread.trap_nr = code; 346c5cc1f4dSThiago Jung Bauermann 347c5cc1f4dSThiago Jung Bauermann /* 348c5cc1f4dSThiago Jung Bauermann * Save all the pkey registers AMR/IAMR/UAMOR. Eg: Core dumps need 349c5cc1f4dSThiago Jung Bauermann * to capture the content, if the task gets killed. 350c5cc1f4dSThiago Jung Bauermann */ 351c5cc1f4dSThiago Jung Bauermann thread_pkey_regs_save(¤t->thread); 352c5cc1f4dSThiago Jung Bauermann 3532c44ce28SEric W. Biederman return true; 3542c44ce28SEric W. Biederman } 3552c44ce28SEric W. Biederman 3565d8fb8a5SEric W. Biederman void _exception_pkey(struct pt_regs *regs, unsigned long addr, int key) 3572c44ce28SEric W. Biederman { 3585d8fb8a5SEric W. Biederman if (!exception_common(SIGSEGV, regs, SEGV_PKUERR, addr)) 3592c44ce28SEric W. Biederman return; 3602c44ce28SEric W. Biederman 36177c70728SEric W. Biederman force_sig_pkuerr((void __user *) addr, key); 36214cf11afSPaul Mackerras } 36314cf11afSPaul Mackerras 36499cd1302SRam Pai void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr) 36599cd1302SRam Pai { 366c1c7c85cSEric W. Biederman if (!exception_common(signr, regs, code, addr)) 367c1c7c85cSEric W. Biederman return; 368c1c7c85cSEric W. Biederman 369c1c7c85cSEric W. Biederman force_sig_fault(signr, code, (void __user *)addr, current); 37099cd1302SRam Pai } 37199cd1302SRam Pai 372ccd47702SNicholas Piggin /* 373ccd47702SNicholas Piggin * The interrupt architecture has a quirk in that the HV interrupts excluding 374ccd47702SNicholas Piggin * the NMIs (0x100 and 0x200) do not clear MSR[RI] at entry. The first thing 375ccd47702SNicholas Piggin * that an interrupt handler must do is save off a GPR into a scratch register, 376ccd47702SNicholas Piggin * and all interrupts on POWERNV (HV=1) use the HSPRG1 register as scratch. 377ccd47702SNicholas Piggin * Therefore an NMI can clobber an HV interrupt's live HSPRG1 without noticing 378ccd47702SNicholas Piggin * that it is non-reentrant, which leads to random data corruption. 379ccd47702SNicholas Piggin * 380ccd47702SNicholas Piggin * The solution is for NMI interrupts in HV mode to check if they originated 381ccd47702SNicholas Piggin * from these critical HV interrupt regions. If so, then mark them not 382ccd47702SNicholas Piggin * recoverable. 383ccd47702SNicholas Piggin * 384ccd47702SNicholas Piggin * An alternative would be for HV NMIs to use SPRG for scratch to avoid the 385ccd47702SNicholas Piggin * HSPRG1 clobber, however this would cause guest SPRG to be clobbered. Linux 386ccd47702SNicholas Piggin * guests should always have MSR[RI]=0 when its scratch SPRG is in use, so 387ccd47702SNicholas Piggin * that would work. However any other guest OS that may have the SPRG live 388ccd47702SNicholas Piggin * and MSR[RI]=1 could encounter silent corruption. 389ccd47702SNicholas Piggin * 390ccd47702SNicholas Piggin * Builds that do not support KVM could take this second option to increase 391ccd47702SNicholas Piggin * the recoverability of NMIs. 392ccd47702SNicholas Piggin */ 393ccd47702SNicholas Piggin void hv_nmi_check_nonrecoverable(struct pt_regs *regs) 394ccd47702SNicholas Piggin { 395ccd47702SNicholas Piggin #ifdef CONFIG_PPC_POWERNV 396ccd47702SNicholas Piggin unsigned long kbase = (unsigned long)_stext; 397ccd47702SNicholas Piggin unsigned long nip = regs->nip; 398ccd47702SNicholas Piggin 399ccd47702SNicholas Piggin if (!(regs->msr & MSR_RI)) 400ccd47702SNicholas Piggin return; 401ccd47702SNicholas Piggin if (!(regs->msr & MSR_HV)) 402ccd47702SNicholas Piggin return; 403ccd47702SNicholas Piggin if (regs->msr & MSR_PR) 404ccd47702SNicholas Piggin return; 405ccd47702SNicholas Piggin 406ccd47702SNicholas Piggin /* 407ccd47702SNicholas Piggin * Now test if the interrupt has hit a range that may be using 408ccd47702SNicholas Piggin * HSPRG1 without having RI=0 (i.e., an HSRR interrupt). The 409ccd47702SNicholas Piggin * problem ranges all run un-relocated. Test real and virt modes 410ccd47702SNicholas Piggin * at the same time by droping the high bit of the nip (virt mode 411ccd47702SNicholas Piggin * entry points still have the +0x4000 offset). 412ccd47702SNicholas Piggin */ 413ccd47702SNicholas Piggin nip &= ~0xc000000000000000ULL; 414ccd47702SNicholas Piggin if ((nip >= 0x500 && nip < 0x600) || (nip >= 0x4500 && nip < 0x4600)) 415ccd47702SNicholas Piggin goto nonrecoverable; 416ccd47702SNicholas Piggin if ((nip >= 0x980 && nip < 0xa00) || (nip >= 0x4980 && nip < 0x4a00)) 417ccd47702SNicholas Piggin goto nonrecoverable; 418ccd47702SNicholas Piggin if ((nip >= 0xe00 && nip < 0xec0) || (nip >= 0x4e00 && nip < 0x4ec0)) 419ccd47702SNicholas Piggin goto nonrecoverable; 420ccd47702SNicholas Piggin if ((nip >= 0xf80 && nip < 0xfa0) || (nip >= 0x4f80 && nip < 0x4fa0)) 421ccd47702SNicholas Piggin goto nonrecoverable; 422*bd3524feSNicholas Piggin 423ccd47702SNicholas Piggin /* Trampoline code runs un-relocated so subtract kbase. */ 424*bd3524feSNicholas Piggin if (nip >= (unsigned long)(start_real_trampolines - kbase) && 425*bd3524feSNicholas Piggin nip < (unsigned long)(end_real_trampolines - kbase)) 426ccd47702SNicholas Piggin goto nonrecoverable; 427*bd3524feSNicholas Piggin if (nip >= (unsigned long)(start_virt_trampolines - kbase) && 428*bd3524feSNicholas Piggin nip < (unsigned long)(end_virt_trampolines - kbase)) 429ccd47702SNicholas Piggin goto nonrecoverable; 430ccd47702SNicholas Piggin return; 431ccd47702SNicholas Piggin 432ccd47702SNicholas Piggin nonrecoverable: 433ccd47702SNicholas Piggin regs->msr &= ~MSR_RI; 434ccd47702SNicholas Piggin #endif 435ccd47702SNicholas Piggin } 436ccd47702SNicholas Piggin 43714cf11afSPaul Mackerras void system_reset_exception(struct pt_regs *regs) 43814cf11afSPaul Mackerras { 439cbf2ba95SNicholas Piggin unsigned long hsrr0, hsrr1; 440cbf2ba95SNicholas Piggin bool nested = in_nmi(); 441cbf2ba95SNicholas Piggin bool saved_hsrrs = false; 442cbf2ba95SNicholas Piggin 4432b4f3ac5SNicholas Piggin /* 4442b4f3ac5SNicholas Piggin * Avoid crashes in case of nested NMI exceptions. Recoverability 4452b4f3ac5SNicholas Piggin * is determined by RI and in_nmi 4462b4f3ac5SNicholas Piggin */ 4472b4f3ac5SNicholas Piggin if (!nested) 4482b4f3ac5SNicholas Piggin nmi_enter(); 4492b4f3ac5SNicholas Piggin 450cbf2ba95SNicholas Piggin /* 451cbf2ba95SNicholas Piggin * System reset can interrupt code where HSRRs are live and MSR[RI]=1. 452cbf2ba95SNicholas Piggin * The system reset interrupt itself may clobber HSRRs (e.g., to call 453cbf2ba95SNicholas Piggin * OPAL), so save them here and restore them before returning. 454cbf2ba95SNicholas Piggin * 455cbf2ba95SNicholas Piggin * Machine checks don't need to save HSRRs, as the real mode handler 456cbf2ba95SNicholas Piggin * is careful to avoid them, and the regular handler is not delivered 457cbf2ba95SNicholas Piggin * as an NMI. 458cbf2ba95SNicholas Piggin */ 459cbf2ba95SNicholas Piggin if (cpu_has_feature(CPU_FTR_HVMODE)) { 460cbf2ba95SNicholas Piggin hsrr0 = mfspr(SPRN_HSRR0); 461cbf2ba95SNicholas Piggin hsrr1 = mfspr(SPRN_HSRR1); 462cbf2ba95SNicholas Piggin saved_hsrrs = true; 463cbf2ba95SNicholas Piggin } 464cbf2ba95SNicholas Piggin 465ccd47702SNicholas Piggin hv_nmi_check_nonrecoverable(regs); 466ccd47702SNicholas Piggin 467ca41ad43SNicholas Piggin __this_cpu_inc(irq_stat.sreset_irqs); 468ca41ad43SNicholas Piggin 46914cf11afSPaul Mackerras /* See if any machine dependent calls */ 470c902be71SArnd Bergmann if (ppc_md.system_reset_exception) { 471c902be71SArnd Bergmann if (ppc_md.system_reset_exception(regs)) 472c4f3b52cSNicholas Piggin goto out; 473c902be71SArnd Bergmann } 47414cf11afSPaul Mackerras 4754388c9b3SNicholas Piggin if (debugger(regs)) 4764388c9b3SNicholas Piggin goto out; 4774388c9b3SNicholas Piggin 4784388c9b3SNicholas Piggin /* 4794388c9b3SNicholas Piggin * A system reset is a request to dump, so we always send 4804388c9b3SNicholas Piggin * it through the crashdump code (if fadump or kdump are 4814388c9b3SNicholas Piggin * registered). 4824388c9b3SNicholas Piggin */ 4834388c9b3SNicholas Piggin crash_fadump(regs, "System Reset"); 4844388c9b3SNicholas Piggin 4854388c9b3SNicholas Piggin crash_kexec(regs); 4864388c9b3SNicholas Piggin 4874388c9b3SNicholas Piggin /* 4884388c9b3SNicholas Piggin * We aren't the primary crash CPU. We need to send it 4894388c9b3SNicholas Piggin * to a holding pattern to avoid it ending up in the panic 4904388c9b3SNicholas Piggin * code. 4914388c9b3SNicholas Piggin */ 4924388c9b3SNicholas Piggin crash_kexec_secondary(regs); 4934388c9b3SNicholas Piggin 4944388c9b3SNicholas Piggin /* 4954388c9b3SNicholas Piggin * No debugger or crash dump registered, print logs then 4964388c9b3SNicholas Piggin * panic. 4974388c9b3SNicholas Piggin */ 4984552d128SNicholas Piggin die("System Reset", regs, SIGABRT); 4994388c9b3SNicholas Piggin 5004388c9b3SNicholas Piggin mdelay(2*MSEC_PER_SEC); /* Wait a little while for others to print */ 5014388c9b3SNicholas Piggin add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); 5024388c9b3SNicholas Piggin nmi_panic(regs, "System Reset"); 50314cf11afSPaul Mackerras 504c4f3b52cSNicholas Piggin out: 505c4f3b52cSNicholas Piggin #ifdef CONFIG_PPC_BOOK3S_64 506c4f3b52cSNicholas Piggin BUG_ON(get_paca()->in_nmi == 0); 507c4f3b52cSNicholas Piggin if (get_paca()->in_nmi > 1) 5084388c9b3SNicholas Piggin nmi_panic(regs, "Unrecoverable nested System Reset"); 509c4f3b52cSNicholas Piggin #endif 51014cf11afSPaul Mackerras /* Must die if the interrupt is not recoverable */ 51114cf11afSPaul Mackerras if (!(regs->msr & MSR_RI)) 5124388c9b3SNicholas Piggin nmi_panic(regs, "Unrecoverable System Reset"); 51314cf11afSPaul Mackerras 514cbf2ba95SNicholas Piggin if (saved_hsrrs) { 515cbf2ba95SNicholas Piggin mtspr(SPRN_HSRR0, hsrr0); 516cbf2ba95SNicholas Piggin mtspr(SPRN_HSRR1, hsrr1); 517cbf2ba95SNicholas Piggin } 518cbf2ba95SNicholas Piggin 5192b4f3ac5SNicholas Piggin if (!nested) 5202b4f3ac5SNicholas Piggin nmi_exit(); 5212b4f3ac5SNicholas Piggin 52214cf11afSPaul Mackerras /* What should we do here? We could issue a shutdown or hard reset. */ 52314cf11afSPaul Mackerras } 5241e9b4507SMahesh Salgaonkar 52514cf11afSPaul Mackerras /* 52614cf11afSPaul Mackerras * I/O accesses can cause machine checks on powermacs. 52714cf11afSPaul Mackerras * Check if the NIP corresponds to the address of a sync 52814cf11afSPaul Mackerras * instruction for which there is an entry in the exception 52914cf11afSPaul Mackerras * table. 53014cf11afSPaul Mackerras * Note that the 601 only takes a machine check on TEA 53114cf11afSPaul Mackerras * (transfer error ack) signal assertion, and does not 53214cf11afSPaul Mackerras * set any of the top 16 bits of SRR1. 53314cf11afSPaul Mackerras * -- paulus. 53414cf11afSPaul Mackerras */ 53514cf11afSPaul Mackerras static inline int check_io_access(struct pt_regs *regs) 53614cf11afSPaul Mackerras { 53768a64357SBenjamin Herrenschmidt #ifdef CONFIG_PPC32 53814cf11afSPaul Mackerras unsigned long msr = regs->msr; 53914cf11afSPaul Mackerras const struct exception_table_entry *entry; 54014cf11afSPaul Mackerras unsigned int *nip = (unsigned int *)regs->nip; 54114cf11afSPaul Mackerras 54214cf11afSPaul Mackerras if (((msr & 0xffff0000) == 0 || (msr & (0x80000 | 0x40000))) 54314cf11afSPaul Mackerras && (entry = search_exception_tables(regs->nip)) != NULL) { 54414cf11afSPaul Mackerras /* 54514cf11afSPaul Mackerras * Check that it's a sync instruction, or somewhere 54614cf11afSPaul Mackerras * in the twi; isync; nop sequence that inb/inw/inl uses. 54714cf11afSPaul Mackerras * As the address is in the exception table 54814cf11afSPaul Mackerras * we should be able to read the instr there. 54914cf11afSPaul Mackerras * For the debug message, we look at the preceding 55014cf11afSPaul Mackerras * load or store. 55114cf11afSPaul Mackerras */ 552ddc6cd0dSChristophe Leroy if (*nip == PPC_INST_NOP) 55314cf11afSPaul Mackerras nip -= 2; 554ddc6cd0dSChristophe Leroy else if (*nip == PPC_INST_ISYNC) 55514cf11afSPaul Mackerras --nip; 556ddc6cd0dSChristophe Leroy if (*nip == PPC_INST_SYNC || (*nip >> 26) == OP_TRAP) { 55714cf11afSPaul Mackerras unsigned int rb; 55814cf11afSPaul Mackerras 55914cf11afSPaul Mackerras --nip; 56014cf11afSPaul Mackerras rb = (*nip >> 11) & 0x1f; 56114cf11afSPaul Mackerras printk(KERN_DEBUG "%s bad port %lx at %p\n", 56214cf11afSPaul Mackerras (*nip & 0x100)? "OUT to": "IN from", 56314cf11afSPaul Mackerras regs->gpr[rb] - _IO_BASE, nip); 56414cf11afSPaul Mackerras regs->msr |= MSR_RI; 56561a92f70SNicholas Piggin regs->nip = extable_fixup(entry); 56614cf11afSPaul Mackerras return 1; 56714cf11afSPaul Mackerras } 56814cf11afSPaul Mackerras } 56968a64357SBenjamin Herrenschmidt #endif /* CONFIG_PPC32 */ 57014cf11afSPaul Mackerras return 0; 57114cf11afSPaul Mackerras } 57214cf11afSPaul Mackerras 573172ae2e7SDave Kleikamp #ifdef CONFIG_PPC_ADV_DEBUG_REGS 57414cf11afSPaul Mackerras /* On 4xx, the reason for the machine check or program exception 57514cf11afSPaul Mackerras is in the ESR. */ 57614cf11afSPaul Mackerras #define get_reason(regs) ((regs)->dsisr) 57714cf11afSPaul Mackerras #define REASON_FP ESR_FP 57814cf11afSPaul Mackerras #define REASON_ILLEGAL (ESR_PIL | ESR_PUO) 57914cf11afSPaul Mackerras #define REASON_PRIVILEGED ESR_PPR 58014cf11afSPaul Mackerras #define REASON_TRAP ESR_PTR 58114cf11afSPaul Mackerras 58214cf11afSPaul Mackerras /* single-step stuff */ 58351ae8d4aSBharat Bhushan #define single_stepping(regs) (current->thread.debug.dbcr0 & DBCR0_IC) 58451ae8d4aSBharat Bhushan #define clear_single_step(regs) (current->thread.debug.dbcr0 &= ~DBCR0_IC) 5850e524e76SMatt Evans #define clear_br_trace(regs) do {} while(0) 58614cf11afSPaul Mackerras #else 58714cf11afSPaul Mackerras /* On non-4xx, the reason for the machine check or program 58814cf11afSPaul Mackerras exception is in the MSR. */ 58914cf11afSPaul Mackerras #define get_reason(regs) ((regs)->msr) 590d30a5a52SMichael Ellerman #define REASON_TM SRR1_PROGTM 591d30a5a52SMichael Ellerman #define REASON_FP SRR1_PROGFPE 592d30a5a52SMichael Ellerman #define REASON_ILLEGAL SRR1_PROGILL 593d30a5a52SMichael Ellerman #define REASON_PRIVILEGED SRR1_PROGPRIV 594d30a5a52SMichael Ellerman #define REASON_TRAP SRR1_PROGTRAP 59514cf11afSPaul Mackerras 59614cf11afSPaul Mackerras #define single_stepping(regs) ((regs)->msr & MSR_SE) 59714cf11afSPaul Mackerras #define clear_single_step(regs) ((regs)->msr &= ~MSR_SE) 5980e524e76SMatt Evans #define clear_br_trace(regs) ((regs)->msr &= ~MSR_BE) 59914cf11afSPaul Mackerras #endif 60014cf11afSPaul Mackerras 6010d0935b3SMichael Ellerman #if defined(CONFIG_E500) 602fe04b112SScott Wood int machine_check_e500mc(struct pt_regs *regs) 603fe04b112SScott Wood { 604fe04b112SScott Wood unsigned long mcsr = mfspr(SPRN_MCSR); 605a4e89ffbSMatt Weber unsigned long pvr = mfspr(SPRN_PVR); 606fe04b112SScott Wood unsigned long reason = mcsr; 607fe04b112SScott Wood int recoverable = 1; 608fe04b112SScott Wood 60982a9a480SScott Wood if (reason & MCSR_LD) { 610cce1f106SShaohui Xie recoverable = fsl_rio_mcheck_exception(regs); 611cce1f106SShaohui Xie if (recoverable == 1) 612cce1f106SShaohui Xie goto silent_out; 613cce1f106SShaohui Xie } 614cce1f106SShaohui Xie 615fe04b112SScott Wood printk("Machine check in kernel mode.\n"); 616fe04b112SScott Wood printk("Caused by (from MCSR=%lx): ", reason); 617fe04b112SScott Wood 618fe04b112SScott Wood if (reason & MCSR_MCP) 619422123ccSChristophe Leroy pr_cont("Machine Check Signal\n"); 620fe04b112SScott Wood 621fe04b112SScott Wood if (reason & MCSR_ICPERR) { 622422123ccSChristophe Leroy pr_cont("Instruction Cache Parity Error\n"); 623fe04b112SScott Wood 624fe04b112SScott Wood /* 625fe04b112SScott Wood * This is recoverable by invalidating the i-cache. 626fe04b112SScott Wood */ 627fe04b112SScott Wood mtspr(SPRN_L1CSR1, mfspr(SPRN_L1CSR1) | L1CSR1_ICFI); 628fe04b112SScott Wood while (mfspr(SPRN_L1CSR1) & L1CSR1_ICFI) 629fe04b112SScott Wood ; 630fe04b112SScott Wood 631fe04b112SScott Wood /* 632fe04b112SScott Wood * This will generally be accompanied by an instruction 633fe04b112SScott Wood * fetch error report -- only treat MCSR_IF as fatal 634fe04b112SScott Wood * if it wasn't due to an L1 parity error. 635fe04b112SScott Wood */ 636fe04b112SScott Wood reason &= ~MCSR_IF; 637fe04b112SScott Wood } 638fe04b112SScott Wood 639fe04b112SScott Wood if (reason & MCSR_DCPERR_MC) { 640422123ccSChristophe Leroy pr_cont("Data Cache Parity Error\n"); 64137caf9f2SKumar Gala 64237caf9f2SKumar Gala /* 64337caf9f2SKumar Gala * In write shadow mode we auto-recover from the error, but it 64437caf9f2SKumar Gala * may still get logged and cause a machine check. We should 64537caf9f2SKumar Gala * only treat the non-write shadow case as non-recoverable. 64637caf9f2SKumar Gala */ 647a4e89ffbSMatt Weber /* On e6500 core, L1 DCWS (Data cache write shadow mode) bit 648a4e89ffbSMatt Weber * is not implemented but L1 data cache always runs in write 649a4e89ffbSMatt Weber * shadow mode. Hence on data cache parity errors HW will 650a4e89ffbSMatt Weber * automatically invalidate the L1 Data Cache. 651a4e89ffbSMatt Weber */ 652a4e89ffbSMatt Weber if (PVR_VER(pvr) != PVR_VER_E6500) { 65337caf9f2SKumar Gala if (!(mfspr(SPRN_L1CSR2) & L1CSR2_DCWS)) 654fe04b112SScott Wood recoverable = 0; 655fe04b112SScott Wood } 656a4e89ffbSMatt Weber } 657fe04b112SScott Wood 658fe04b112SScott Wood if (reason & MCSR_L2MMU_MHIT) { 659422123ccSChristophe Leroy pr_cont("Hit on multiple TLB entries\n"); 660fe04b112SScott Wood recoverable = 0; 661fe04b112SScott Wood } 662fe04b112SScott Wood 663fe04b112SScott Wood if (reason & MCSR_NMI) 664422123ccSChristophe Leroy pr_cont("Non-maskable interrupt\n"); 665fe04b112SScott Wood 666fe04b112SScott Wood if (reason & MCSR_IF) { 667422123ccSChristophe Leroy pr_cont("Instruction Fetch Error Report\n"); 668fe04b112SScott Wood recoverable = 0; 669fe04b112SScott Wood } 670fe04b112SScott Wood 671fe04b112SScott Wood if (reason & MCSR_LD) { 672422123ccSChristophe Leroy pr_cont("Load Error Report\n"); 673fe04b112SScott Wood recoverable = 0; 674fe04b112SScott Wood } 675fe04b112SScott Wood 676fe04b112SScott Wood if (reason & MCSR_ST) { 677422123ccSChristophe Leroy pr_cont("Store Error Report\n"); 678fe04b112SScott Wood recoverable = 0; 679fe04b112SScott Wood } 680fe04b112SScott Wood 681fe04b112SScott Wood if (reason & MCSR_LDG) { 682422123ccSChristophe Leroy pr_cont("Guarded Load Error Report\n"); 683fe04b112SScott Wood recoverable = 0; 684fe04b112SScott Wood } 685fe04b112SScott Wood 686fe04b112SScott Wood if (reason & MCSR_TLBSYNC) 687422123ccSChristophe Leroy pr_cont("Simultaneous tlbsync operations\n"); 688fe04b112SScott Wood 689fe04b112SScott Wood if (reason & MCSR_BSL2_ERR) { 690422123ccSChristophe Leroy pr_cont("Level 2 Cache Error\n"); 691fe04b112SScott Wood recoverable = 0; 692fe04b112SScott Wood } 693fe04b112SScott Wood 694fe04b112SScott Wood if (reason & MCSR_MAV) { 695fe04b112SScott Wood u64 addr; 696fe04b112SScott Wood 697fe04b112SScott Wood addr = mfspr(SPRN_MCAR); 698fe04b112SScott Wood addr |= (u64)mfspr(SPRN_MCARU) << 32; 699fe04b112SScott Wood 700422123ccSChristophe Leroy pr_cont("Machine Check %s Address: %#llx\n", 701fe04b112SScott Wood reason & MCSR_MEA ? "Effective" : "Physical", addr); 702fe04b112SScott Wood } 703fe04b112SScott Wood 704cce1f106SShaohui Xie silent_out: 705fe04b112SScott Wood mtspr(SPRN_MCSR, mcsr); 706fe04b112SScott Wood return mfspr(SPRN_MCSR) == 0 && recoverable; 707fe04b112SScott Wood } 708fe04b112SScott Wood 70947c0bd1aSBenjamin Herrenschmidt int machine_check_e500(struct pt_regs *regs) 71047c0bd1aSBenjamin Herrenschmidt { 71142bff234SMichael Ellerman unsigned long reason = mfspr(SPRN_MCSR); 71247c0bd1aSBenjamin Herrenschmidt 713cce1f106SShaohui Xie if (reason & MCSR_BUS_RBERR) { 714cce1f106SShaohui Xie if (fsl_rio_mcheck_exception(regs)) 715cce1f106SShaohui Xie return 1; 7164e0e3435SHongtao Jia if (fsl_pci_mcheck_exception(regs)) 7174e0e3435SHongtao Jia return 1; 718cce1f106SShaohui Xie } 719cce1f106SShaohui Xie 72014cf11afSPaul Mackerras printk("Machine check in kernel mode.\n"); 72114cf11afSPaul Mackerras printk("Caused by (from MCSR=%lx): ", reason); 72214cf11afSPaul Mackerras 72314cf11afSPaul Mackerras if (reason & MCSR_MCP) 724422123ccSChristophe Leroy pr_cont("Machine Check Signal\n"); 72514cf11afSPaul Mackerras if (reason & MCSR_ICPERR) 726422123ccSChristophe Leroy pr_cont("Instruction Cache Parity Error\n"); 72714cf11afSPaul Mackerras if (reason & MCSR_DCP_PERR) 728422123ccSChristophe Leroy pr_cont("Data Cache Push Parity Error\n"); 72914cf11afSPaul Mackerras if (reason & MCSR_DCPERR) 730422123ccSChristophe Leroy pr_cont("Data Cache Parity Error\n"); 73114cf11afSPaul Mackerras if (reason & MCSR_BUS_IAERR) 732422123ccSChristophe Leroy pr_cont("Bus - Instruction Address Error\n"); 73314cf11afSPaul Mackerras if (reason & MCSR_BUS_RAERR) 734422123ccSChristophe Leroy pr_cont("Bus - Read Address Error\n"); 73514cf11afSPaul Mackerras if (reason & MCSR_BUS_WAERR) 736422123ccSChristophe Leroy pr_cont("Bus - Write Address Error\n"); 73714cf11afSPaul Mackerras if (reason & MCSR_BUS_IBERR) 738422123ccSChristophe Leroy pr_cont("Bus - Instruction Data Error\n"); 73914cf11afSPaul Mackerras if (reason & MCSR_BUS_RBERR) 740422123ccSChristophe Leroy pr_cont("Bus - Read Data Bus Error\n"); 74114cf11afSPaul Mackerras if (reason & MCSR_BUS_WBERR) 742422123ccSChristophe Leroy pr_cont("Bus - Write Data Bus Error\n"); 74314cf11afSPaul Mackerras if (reason & MCSR_BUS_IPERR) 744422123ccSChristophe Leroy pr_cont("Bus - Instruction Parity Error\n"); 74514cf11afSPaul Mackerras if (reason & MCSR_BUS_RPERR) 746422123ccSChristophe Leroy pr_cont("Bus - Read Parity Error\n"); 74747c0bd1aSBenjamin Herrenschmidt 74847c0bd1aSBenjamin Herrenschmidt return 0; 74947c0bd1aSBenjamin Herrenschmidt } 7504490c06bSKumar Gala 7514490c06bSKumar Gala int machine_check_generic(struct pt_regs *regs) 7524490c06bSKumar Gala { 7534490c06bSKumar Gala return 0; 7544490c06bSKumar Gala } 75514cf11afSPaul Mackerras #elif defined(CONFIG_E200) 75647c0bd1aSBenjamin Herrenschmidt int machine_check_e200(struct pt_regs *regs) 75747c0bd1aSBenjamin Herrenschmidt { 75842bff234SMichael Ellerman unsigned long reason = mfspr(SPRN_MCSR); 75947c0bd1aSBenjamin Herrenschmidt 76014cf11afSPaul Mackerras printk("Machine check in kernel mode.\n"); 76114cf11afSPaul Mackerras printk("Caused by (from MCSR=%lx): ", reason); 76214cf11afSPaul Mackerras 76314cf11afSPaul Mackerras if (reason & MCSR_MCP) 764422123ccSChristophe Leroy pr_cont("Machine Check Signal\n"); 76514cf11afSPaul Mackerras if (reason & MCSR_CP_PERR) 766422123ccSChristophe Leroy pr_cont("Cache Push Parity Error\n"); 76714cf11afSPaul Mackerras if (reason & MCSR_CPERR) 768422123ccSChristophe Leroy pr_cont("Cache Parity Error\n"); 76914cf11afSPaul Mackerras if (reason & MCSR_EXCP_ERR) 770422123ccSChristophe Leroy pr_cont("ISI, ITLB, or Bus Error on first instruction fetch for an exception handler\n"); 77114cf11afSPaul Mackerras if (reason & MCSR_BUS_IRERR) 772422123ccSChristophe Leroy pr_cont("Bus - Read Bus Error on instruction fetch\n"); 77314cf11afSPaul Mackerras if (reason & MCSR_BUS_DRERR) 774422123ccSChristophe Leroy pr_cont("Bus - Read Bus Error on data load\n"); 77514cf11afSPaul Mackerras if (reason & MCSR_BUS_WRERR) 776422123ccSChristophe Leroy pr_cont("Bus - Write Bus Error on buffered store or cache line push\n"); 77747c0bd1aSBenjamin Herrenschmidt 77847c0bd1aSBenjamin Herrenschmidt return 0; 77947c0bd1aSBenjamin Herrenschmidt } 7807f3f819eSMichael Ellerman #elif defined(CONFIG_PPC32) 78147c0bd1aSBenjamin Herrenschmidt int machine_check_generic(struct pt_regs *regs) 78247c0bd1aSBenjamin Herrenschmidt { 78342bff234SMichael Ellerman unsigned long reason = regs->msr; 78447c0bd1aSBenjamin Herrenschmidt 78514cf11afSPaul Mackerras printk("Machine check in kernel mode.\n"); 78614cf11afSPaul Mackerras printk("Caused by (from SRR1=%lx): ", reason); 78714cf11afSPaul Mackerras switch (reason & 0x601F0000) { 78814cf11afSPaul Mackerras case 0x80000: 789422123ccSChristophe Leroy pr_cont("Machine check signal\n"); 79014cf11afSPaul Mackerras break; 79114cf11afSPaul Mackerras case 0: /* for 601 */ 79214cf11afSPaul Mackerras case 0x40000: 79314cf11afSPaul Mackerras case 0x140000: /* 7450 MSS error and TEA */ 794422123ccSChristophe Leroy pr_cont("Transfer error ack signal\n"); 79514cf11afSPaul Mackerras break; 79614cf11afSPaul Mackerras case 0x20000: 797422123ccSChristophe Leroy pr_cont("Data parity error signal\n"); 79814cf11afSPaul Mackerras break; 79914cf11afSPaul Mackerras case 0x10000: 800422123ccSChristophe Leroy pr_cont("Address parity error signal\n"); 80114cf11afSPaul Mackerras break; 80214cf11afSPaul Mackerras case 0x20000000: 803422123ccSChristophe Leroy pr_cont("L1 Data Cache error\n"); 80414cf11afSPaul Mackerras break; 80514cf11afSPaul Mackerras case 0x40000000: 806422123ccSChristophe Leroy pr_cont("L1 Instruction Cache error\n"); 80714cf11afSPaul Mackerras break; 80814cf11afSPaul Mackerras case 0x00100000: 809422123ccSChristophe Leroy pr_cont("L2 data cache parity error\n"); 81014cf11afSPaul Mackerras break; 81114cf11afSPaul Mackerras default: 812422123ccSChristophe Leroy pr_cont("Unknown values in msr\n"); 81314cf11afSPaul Mackerras } 81475918a4bSOlof Johansson return 0; 81575918a4bSOlof Johansson } 81647c0bd1aSBenjamin Herrenschmidt #endif /* everything else */ 81775918a4bSOlof Johansson 81875918a4bSOlof Johansson void machine_check_exception(struct pt_regs *regs) 81975918a4bSOlof Johansson { 82075918a4bSOlof Johansson int recover = 0; 821b96672ddSNicholas Piggin bool nested = in_nmi(); 822b96672ddSNicholas Piggin if (!nested) 823b96672ddSNicholas Piggin nmi_enter(); 82475918a4bSOlof Johansson 82569111bacSChristoph Lameter __this_cpu_inc(irq_stat.mce_exceptions); 82689713ed1SAnton Blanchard 827d93b0ac0SMahesh Salgaonkar add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE); 828d93b0ac0SMahesh Salgaonkar 82947c0bd1aSBenjamin Herrenschmidt /* See if any machine dependent calls. In theory, we would want 83047c0bd1aSBenjamin Herrenschmidt * to call the CPU first, and call the ppc_md. one if the CPU 83147c0bd1aSBenjamin Herrenschmidt * one returns a positive number. However there is existing code 83247c0bd1aSBenjamin Herrenschmidt * that assumes the board gets a first chance, so let's keep it 83347c0bd1aSBenjamin Herrenschmidt * that way for now and fix things later. --BenH. 83447c0bd1aSBenjamin Herrenschmidt */ 83575918a4bSOlof Johansson if (ppc_md.machine_check_exception) 83675918a4bSOlof Johansson recover = ppc_md.machine_check_exception(regs); 83747c0bd1aSBenjamin Herrenschmidt else if (cur_cpu_spec->machine_check) 83847c0bd1aSBenjamin Herrenschmidt recover = cur_cpu_spec->machine_check(regs); 83975918a4bSOlof Johansson 84047c0bd1aSBenjamin Herrenschmidt if (recover > 0) 841ba12eedeSLi Zhong goto bail; 84275918a4bSOlof Johansson 843a443506bSAnton Blanchard if (debugger_fault_handler(regs)) 844ba12eedeSLi Zhong goto bail; 84575918a4bSOlof Johansson 84675918a4bSOlof Johansson if (check_io_access(regs)) 847ba12eedeSLi Zhong goto bail; 84875918a4bSOlof Johansson 849daf00ae7SChristophe Leroy if (!nested) 850daf00ae7SChristophe Leroy nmi_exit(); 851daf00ae7SChristophe Leroy 852daf00ae7SChristophe Leroy die("Machine check", regs, SIGBUS); 853daf00ae7SChristophe Leroy 8540bbea75cSChristophe Leroy /* Must die if the interrupt is not recoverable */ 8550bbea75cSChristophe Leroy if (!(regs->msr & MSR_RI)) 8560bbea75cSChristophe Leroy nmi_panic(regs, "Unrecoverable Machine check"); 8570bbea75cSChristophe Leroy 858daf00ae7SChristophe Leroy return; 859daf00ae7SChristophe Leroy 860ba12eedeSLi Zhong bail: 861b96672ddSNicholas Piggin if (!nested) 862b96672ddSNicholas Piggin nmi_exit(); 86314cf11afSPaul Mackerras } 86414cf11afSPaul Mackerras 86514cf11afSPaul Mackerras void SMIException(struct pt_regs *regs) 86614cf11afSPaul Mackerras { 86714cf11afSPaul Mackerras die("System Management Interrupt", regs, SIGABRT); 86814cf11afSPaul Mackerras } 86914cf11afSPaul Mackerras 8705080332cSMichael Neuling #ifdef CONFIG_VSX 8715080332cSMichael Neuling static void p9_hmi_special_emu(struct pt_regs *regs) 8725080332cSMichael Neuling { 8735080332cSMichael Neuling unsigned int ra, rb, t, i, sel, instr, rc; 8745080332cSMichael Neuling const void __user *addr; 8755080332cSMichael Neuling u8 vbuf[16], *vdst; 8765080332cSMichael Neuling unsigned long ea, msr, msr_mask; 8775080332cSMichael Neuling bool swap; 8785080332cSMichael Neuling 8795080332cSMichael Neuling if (__get_user_inatomic(instr, (unsigned int __user *)regs->nip)) 8805080332cSMichael Neuling return; 8815080332cSMichael Neuling 8825080332cSMichael Neuling /* 8835080332cSMichael Neuling * lxvb16x opcode: 0x7c0006d8 8845080332cSMichael Neuling * lxvd2x opcode: 0x7c000698 8855080332cSMichael Neuling * lxvh8x opcode: 0x7c000658 8865080332cSMichael Neuling * lxvw4x opcode: 0x7c000618 8875080332cSMichael Neuling */ 8885080332cSMichael Neuling if ((instr & 0xfc00073e) != 0x7c000618) { 8895080332cSMichael Neuling pr_devel("HMI vec emu: not vector CI %i:%s[%d] nip=%016lx" 8905080332cSMichael Neuling " instr=%08x\n", 8915080332cSMichael Neuling smp_processor_id(), current->comm, current->pid, 8925080332cSMichael Neuling regs->nip, instr); 8935080332cSMichael Neuling return; 8945080332cSMichael Neuling } 8955080332cSMichael Neuling 8965080332cSMichael Neuling /* Grab vector registers into the task struct */ 8975080332cSMichael Neuling msr = regs->msr; /* Grab msr before we flush the bits */ 8985080332cSMichael Neuling flush_vsx_to_thread(current); 8995080332cSMichael Neuling enable_kernel_altivec(); 9005080332cSMichael Neuling 9015080332cSMichael Neuling /* 9025080332cSMichael Neuling * Is userspace running with a different endian (this is rare but 9035080332cSMichael Neuling * not impossible) 9045080332cSMichael Neuling */ 9055080332cSMichael Neuling swap = (msr & MSR_LE) != (MSR_KERNEL & MSR_LE); 9065080332cSMichael Neuling 9075080332cSMichael Neuling /* Decode the instruction */ 9085080332cSMichael Neuling ra = (instr >> 16) & 0x1f; 9095080332cSMichael Neuling rb = (instr >> 11) & 0x1f; 9105080332cSMichael Neuling t = (instr >> 21) & 0x1f; 9115080332cSMichael Neuling if (instr & 1) 9125080332cSMichael Neuling vdst = (u8 *)¤t->thread.vr_state.vr[t]; 9135080332cSMichael Neuling else 9145080332cSMichael Neuling vdst = (u8 *)¤t->thread.fp_state.fpr[t][0]; 9155080332cSMichael Neuling 9165080332cSMichael Neuling /* Grab the vector address */ 9175080332cSMichael Neuling ea = regs->gpr[rb] + (ra ? regs->gpr[ra] : 0); 9185080332cSMichael Neuling if (is_32bit_task()) 9195080332cSMichael Neuling ea &= 0xfffffffful; 9205080332cSMichael Neuling addr = (__force const void __user *)ea; 9215080332cSMichael Neuling 9225080332cSMichael Neuling /* Check it */ 92396d4f267SLinus Torvalds if (!access_ok(addr, 16)) { 9245080332cSMichael Neuling pr_devel("HMI vec emu: bad access %i:%s[%d] nip=%016lx" 9255080332cSMichael Neuling " instr=%08x addr=%016lx\n", 9265080332cSMichael Neuling smp_processor_id(), current->comm, current->pid, 9275080332cSMichael Neuling regs->nip, instr, (unsigned long)addr); 9285080332cSMichael Neuling return; 9295080332cSMichael Neuling } 9305080332cSMichael Neuling 9315080332cSMichael Neuling /* Read the vector */ 9325080332cSMichael Neuling rc = 0; 9335080332cSMichael Neuling if ((unsigned long)addr & 0xfUL) 9345080332cSMichael Neuling /* unaligned case */ 9355080332cSMichael Neuling rc = __copy_from_user_inatomic(vbuf, addr, 16); 9365080332cSMichael Neuling else 9375080332cSMichael Neuling __get_user_atomic_128_aligned(vbuf, addr, rc); 9385080332cSMichael Neuling if (rc) { 9395080332cSMichael Neuling pr_devel("HMI vec emu: page fault %i:%s[%d] nip=%016lx" 9405080332cSMichael Neuling " instr=%08x addr=%016lx\n", 9415080332cSMichael Neuling smp_processor_id(), current->comm, current->pid, 9425080332cSMichael Neuling regs->nip, instr, (unsigned long)addr); 9435080332cSMichael Neuling return; 9445080332cSMichael Neuling } 9455080332cSMichael Neuling 9465080332cSMichael Neuling pr_devel("HMI vec emu: emulated vector CI %i:%s[%d] nip=%016lx" 9475080332cSMichael Neuling " instr=%08x addr=%016lx\n", 9485080332cSMichael Neuling smp_processor_id(), current->comm, current->pid, regs->nip, 9495080332cSMichael Neuling instr, (unsigned long) addr); 9505080332cSMichael Neuling 9515080332cSMichael Neuling /* Grab instruction "selector" */ 9525080332cSMichael Neuling sel = (instr >> 6) & 3; 9535080332cSMichael Neuling 9545080332cSMichael Neuling /* 9555080332cSMichael Neuling * Check to make sure the facility is actually enabled. This 9565080332cSMichael Neuling * could happen if we get a false positive hit. 9575080332cSMichael Neuling * 9585080332cSMichael Neuling * lxvd2x/lxvw4x always check MSR VSX sel = 0,2 9595080332cSMichael Neuling * lxvh8x/lxvb16x check MSR VSX or VEC depending on VSR used sel = 1,3 9605080332cSMichael Neuling */ 9615080332cSMichael Neuling msr_mask = MSR_VSX; 9625080332cSMichael Neuling if ((sel & 1) && (instr & 1)) /* lxvh8x & lxvb16x + VSR >= 32 */ 9635080332cSMichael Neuling msr_mask = MSR_VEC; 9645080332cSMichael Neuling if (!(msr & msr_mask)) { 9655080332cSMichael Neuling pr_devel("HMI vec emu: MSR fac clear %i:%s[%d] nip=%016lx" 9665080332cSMichael Neuling " instr=%08x msr:%016lx\n", 9675080332cSMichael Neuling smp_processor_id(), current->comm, current->pid, 9685080332cSMichael Neuling regs->nip, instr, msr); 9695080332cSMichael Neuling return; 9705080332cSMichael Neuling } 9715080332cSMichael Neuling 9725080332cSMichael Neuling /* Do logging here before we modify sel based on endian */ 9735080332cSMichael Neuling switch (sel) { 9745080332cSMichael Neuling case 0: /* lxvw4x */ 9755080332cSMichael Neuling PPC_WARN_EMULATED(lxvw4x, regs); 9765080332cSMichael Neuling break; 9775080332cSMichael Neuling case 1: /* lxvh8x */ 9785080332cSMichael Neuling PPC_WARN_EMULATED(lxvh8x, regs); 9795080332cSMichael Neuling break; 9805080332cSMichael Neuling case 2: /* lxvd2x */ 9815080332cSMichael Neuling PPC_WARN_EMULATED(lxvd2x, regs); 9825080332cSMichael Neuling break; 9835080332cSMichael Neuling case 3: /* lxvb16x */ 9845080332cSMichael Neuling PPC_WARN_EMULATED(lxvb16x, regs); 9855080332cSMichael Neuling break; 9865080332cSMichael Neuling } 9875080332cSMichael Neuling 9885080332cSMichael Neuling #ifdef __LITTLE_ENDIAN__ 9895080332cSMichael Neuling /* 9905080332cSMichael Neuling * An LE kernel stores the vector in the task struct as an LE 9915080332cSMichael Neuling * byte array (effectively swapping both the components and 9925080332cSMichael Neuling * the content of the components). Those instructions expect 9935080332cSMichael Neuling * the components to remain in ascending address order, so we 9945080332cSMichael Neuling * swap them back. 9955080332cSMichael Neuling * 9965080332cSMichael Neuling * If we are running a BE user space, the expectation is that 9975080332cSMichael Neuling * of a simple memcpy, so forcing the emulation to look like 9985080332cSMichael Neuling * a lxvb16x should do the trick. 9995080332cSMichael Neuling */ 10005080332cSMichael Neuling if (swap) 10015080332cSMichael Neuling sel = 3; 10025080332cSMichael Neuling 10035080332cSMichael Neuling switch (sel) { 10045080332cSMichael Neuling case 0: /* lxvw4x */ 10055080332cSMichael Neuling for (i = 0; i < 4; i++) 10065080332cSMichael Neuling ((u32 *)vdst)[i] = ((u32 *)vbuf)[3-i]; 10075080332cSMichael Neuling break; 10085080332cSMichael Neuling case 1: /* lxvh8x */ 10095080332cSMichael Neuling for (i = 0; i < 8; i++) 10105080332cSMichael Neuling ((u16 *)vdst)[i] = ((u16 *)vbuf)[7-i]; 10115080332cSMichael Neuling break; 10125080332cSMichael Neuling case 2: /* lxvd2x */ 10135080332cSMichael Neuling for (i = 0; i < 2; i++) 10145080332cSMichael Neuling ((u64 *)vdst)[i] = ((u64 *)vbuf)[1-i]; 10155080332cSMichael Neuling break; 10165080332cSMichael Neuling case 3: /* lxvb16x */ 10175080332cSMichael Neuling for (i = 0; i < 16; i++) 10185080332cSMichael Neuling vdst[i] = vbuf[15-i]; 10195080332cSMichael Neuling break; 10205080332cSMichael Neuling } 10215080332cSMichael Neuling #else /* __LITTLE_ENDIAN__ */ 10225080332cSMichael Neuling /* On a big endian kernel, a BE userspace only needs a memcpy */ 10235080332cSMichael Neuling if (!swap) 10245080332cSMichael Neuling sel = 3; 10255080332cSMichael Neuling 10265080332cSMichael Neuling /* Otherwise, we need to swap the content of the components */ 10275080332cSMichael Neuling switch (sel) { 10285080332cSMichael Neuling case 0: /* lxvw4x */ 10295080332cSMichael Neuling for (i = 0; i < 4; i++) 10305080332cSMichael Neuling ((u32 *)vdst)[i] = cpu_to_le32(((u32 *)vbuf)[i]); 10315080332cSMichael Neuling break; 10325080332cSMichael Neuling case 1: /* lxvh8x */ 10335080332cSMichael Neuling for (i = 0; i < 8; i++) 10345080332cSMichael Neuling ((u16 *)vdst)[i] = cpu_to_le16(((u16 *)vbuf)[i]); 10355080332cSMichael Neuling break; 10365080332cSMichael Neuling case 2: /* lxvd2x */ 10375080332cSMichael Neuling for (i = 0; i < 2; i++) 10385080332cSMichael Neuling ((u64 *)vdst)[i] = cpu_to_le64(((u64 *)vbuf)[i]); 10395080332cSMichael Neuling break; 10405080332cSMichael Neuling case 3: /* lxvb16x */ 10415080332cSMichael Neuling memcpy(vdst, vbuf, 16); 10425080332cSMichael Neuling break; 10435080332cSMichael Neuling } 10445080332cSMichael Neuling #endif /* !__LITTLE_ENDIAN__ */ 10455080332cSMichael Neuling 10465080332cSMichael Neuling /* Go to next instruction */ 10475080332cSMichael Neuling regs->nip += 4; 10485080332cSMichael Neuling } 10495080332cSMichael Neuling #endif /* CONFIG_VSX */ 10505080332cSMichael Neuling 10510869b6fdSMahesh Salgaonkar void handle_hmi_exception(struct pt_regs *regs) 10520869b6fdSMahesh Salgaonkar { 10530869b6fdSMahesh Salgaonkar struct pt_regs *old_regs; 10540869b6fdSMahesh Salgaonkar 10550869b6fdSMahesh Salgaonkar old_regs = set_irq_regs(regs); 10560869b6fdSMahesh Salgaonkar irq_enter(); 10570869b6fdSMahesh Salgaonkar 10585080332cSMichael Neuling #ifdef CONFIG_VSX 10595080332cSMichael Neuling /* Real mode flagged P9 special emu is needed */ 10605080332cSMichael Neuling if (local_paca->hmi_p9_special_emu) { 10615080332cSMichael Neuling local_paca->hmi_p9_special_emu = 0; 10625080332cSMichael Neuling 10635080332cSMichael Neuling /* 10645080332cSMichael Neuling * We don't want to take page faults while doing the 10655080332cSMichael Neuling * emulation, we just replay the instruction if necessary. 10665080332cSMichael Neuling */ 10675080332cSMichael Neuling pagefault_disable(); 10685080332cSMichael Neuling p9_hmi_special_emu(regs); 10695080332cSMichael Neuling pagefault_enable(); 10705080332cSMichael Neuling } 10715080332cSMichael Neuling #endif /* CONFIG_VSX */ 10725080332cSMichael Neuling 10730869b6fdSMahesh Salgaonkar if (ppc_md.handle_hmi_exception) 10740869b6fdSMahesh Salgaonkar ppc_md.handle_hmi_exception(regs); 10750869b6fdSMahesh Salgaonkar 10760869b6fdSMahesh Salgaonkar irq_exit(); 10770869b6fdSMahesh Salgaonkar set_irq_regs(old_regs); 10780869b6fdSMahesh Salgaonkar } 10790869b6fdSMahesh Salgaonkar 1080dc1c1ca3SStephen Rothwell void unknown_exception(struct pt_regs *regs) 108114cf11afSPaul Mackerras { 1082ba12eedeSLi Zhong enum ctx_state prev_state = exception_enter(); 1083ba12eedeSLi Zhong 108414cf11afSPaul Mackerras printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n", 108514cf11afSPaul Mackerras regs->nip, regs->msr, regs->trap); 108614cf11afSPaul Mackerras 1087e821fa42SEric W. Biederman _exception(SIGTRAP, regs, TRAP_UNK, 0); 1088ba12eedeSLi Zhong 1089ba12eedeSLi Zhong exception_exit(prev_state); 109014cf11afSPaul Mackerras } 109114cf11afSPaul Mackerras 1092dc1c1ca3SStephen Rothwell void instruction_breakpoint_exception(struct pt_regs *regs) 109314cf11afSPaul Mackerras { 1094ba12eedeSLi Zhong enum ctx_state prev_state = exception_enter(); 1095ba12eedeSLi Zhong 109614cf11afSPaul Mackerras if (notify_die(DIE_IABR_MATCH, "iabr_match", regs, 5, 109714cf11afSPaul Mackerras 5, SIGTRAP) == NOTIFY_STOP) 1098ba12eedeSLi Zhong goto bail; 109914cf11afSPaul Mackerras if (debugger_iabr_match(regs)) 1100ba12eedeSLi Zhong goto bail; 110114cf11afSPaul Mackerras _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip); 1102ba12eedeSLi Zhong 1103ba12eedeSLi Zhong bail: 1104ba12eedeSLi Zhong exception_exit(prev_state); 110514cf11afSPaul Mackerras } 110614cf11afSPaul Mackerras 110714cf11afSPaul Mackerras void RunModeException(struct pt_regs *regs) 110814cf11afSPaul Mackerras { 1109e821fa42SEric W. Biederman _exception(SIGTRAP, regs, TRAP_UNK, 0); 111014cf11afSPaul Mackerras } 111114cf11afSPaul Mackerras 111203465f89SNicholas Piggin void single_step_exception(struct pt_regs *regs) 111314cf11afSPaul Mackerras { 1114ba12eedeSLi Zhong enum ctx_state prev_state = exception_enter(); 1115ba12eedeSLi Zhong 11162538c2d0SK.Prasad clear_single_step(regs); 11170e524e76SMatt Evans clear_br_trace(regs); 111814cf11afSPaul Mackerras 11196cc89badSNaveen N. Rao if (kprobe_post_handler(regs)) 11206cc89badSNaveen N. Rao return; 11216cc89badSNaveen N. Rao 112214cf11afSPaul Mackerras if (notify_die(DIE_SSTEP, "single_step", regs, 5, 112314cf11afSPaul Mackerras 5, SIGTRAP) == NOTIFY_STOP) 1124ba12eedeSLi Zhong goto bail; 112514cf11afSPaul Mackerras if (debugger_sstep(regs)) 1126ba12eedeSLi Zhong goto bail; 112714cf11afSPaul Mackerras 112814cf11afSPaul Mackerras _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip); 1129ba12eedeSLi Zhong 1130ba12eedeSLi Zhong bail: 1131ba12eedeSLi Zhong exception_exit(prev_state); 113214cf11afSPaul Mackerras } 113303465f89SNicholas Piggin NOKPROBE_SYMBOL(single_step_exception); 113414cf11afSPaul Mackerras 113514cf11afSPaul Mackerras /* 113614cf11afSPaul Mackerras * After we have successfully emulated an instruction, we have to 113714cf11afSPaul Mackerras * check if the instruction was being single-stepped, and if so, 113814cf11afSPaul Mackerras * pretend we got a single-step exception. This was pointed out 113914cf11afSPaul Mackerras * by Kumar Gala. -- paulus 114014cf11afSPaul Mackerras */ 11418dad3f92SPaul Mackerras static void emulate_single_step(struct pt_regs *regs) 114214cf11afSPaul Mackerras { 11432538c2d0SK.Prasad if (single_stepping(regs)) 11442538c2d0SK.Prasad single_step_exception(regs); 114514cf11afSPaul Mackerras } 114614cf11afSPaul Mackerras 11475fad293bSKumar Gala static inline int __parse_fpscr(unsigned long fpscr) 1148dc1c1ca3SStephen Rothwell { 1149aeb1c0f6SEric W. Biederman int ret = FPE_FLTUNK; 1150dc1c1ca3SStephen Rothwell 1151dc1c1ca3SStephen Rothwell /* Invalid operation */ 1152dc1c1ca3SStephen Rothwell if ((fpscr & FPSCR_VE) && (fpscr & FPSCR_VX)) 11535fad293bSKumar Gala ret = FPE_FLTINV; 1154dc1c1ca3SStephen Rothwell 1155dc1c1ca3SStephen Rothwell /* Overflow */ 1156dc1c1ca3SStephen Rothwell else if ((fpscr & FPSCR_OE) && (fpscr & FPSCR_OX)) 11575fad293bSKumar Gala ret = FPE_FLTOVF; 1158dc1c1ca3SStephen Rothwell 1159dc1c1ca3SStephen Rothwell /* Underflow */ 1160dc1c1ca3SStephen Rothwell else if ((fpscr & FPSCR_UE) && (fpscr & FPSCR_UX)) 11615fad293bSKumar Gala ret = FPE_FLTUND; 1162dc1c1ca3SStephen Rothwell 1163dc1c1ca3SStephen Rothwell /* Divide by zero */ 1164dc1c1ca3SStephen Rothwell else if ((fpscr & FPSCR_ZE) && (fpscr & FPSCR_ZX)) 11655fad293bSKumar Gala ret = FPE_FLTDIV; 1166dc1c1ca3SStephen Rothwell 1167dc1c1ca3SStephen Rothwell /* Inexact result */ 1168dc1c1ca3SStephen Rothwell else if ((fpscr & FPSCR_XE) && (fpscr & FPSCR_XX)) 11695fad293bSKumar Gala ret = FPE_FLTRES; 11705fad293bSKumar Gala 11715fad293bSKumar Gala return ret; 11725fad293bSKumar Gala } 11735fad293bSKumar Gala 11745fad293bSKumar Gala static void parse_fpe(struct pt_regs *regs) 11755fad293bSKumar Gala { 11765fad293bSKumar Gala int code = 0; 11775fad293bSKumar Gala 11785fad293bSKumar Gala flush_fp_to_thread(current); 11795fad293bSKumar Gala 1180de79f7b9SPaul Mackerras code = __parse_fpscr(current->thread.fp_state.fpscr); 1181dc1c1ca3SStephen Rothwell 1182dc1c1ca3SStephen Rothwell _exception(SIGFPE, regs, code, regs->nip); 1183dc1c1ca3SStephen Rothwell } 1184dc1c1ca3SStephen Rothwell 1185dc1c1ca3SStephen Rothwell /* 1186dc1c1ca3SStephen Rothwell * Illegal instruction emulation support. Originally written to 118714cf11afSPaul Mackerras * provide the PVR to user applications using the mfspr rd, PVR. 118814cf11afSPaul Mackerras * Return non-zero if we can't emulate, or -EFAULT if the associated 118914cf11afSPaul Mackerras * memory access caused an access fault. Return zero on success. 119014cf11afSPaul Mackerras * 119114cf11afSPaul Mackerras * There are a couple of ways to do this, either "decode" the instruction 119214cf11afSPaul Mackerras * or directly match lots of bits. In this case, matching lots of 119314cf11afSPaul Mackerras * bits is faster and easier. 119486417780SPaul Mackerras * 119514cf11afSPaul Mackerras */ 119614cf11afSPaul Mackerras static int emulate_string_inst(struct pt_regs *regs, u32 instword) 119714cf11afSPaul Mackerras { 119814cf11afSPaul Mackerras u8 rT = (instword >> 21) & 0x1f; 119914cf11afSPaul Mackerras u8 rA = (instword >> 16) & 0x1f; 120014cf11afSPaul Mackerras u8 NB_RB = (instword >> 11) & 0x1f; 120114cf11afSPaul Mackerras u32 num_bytes; 120214cf11afSPaul Mackerras unsigned long EA; 120314cf11afSPaul Mackerras int pos = 0; 120414cf11afSPaul Mackerras 120514cf11afSPaul Mackerras /* Early out if we are an invalid form of lswx */ 120616c57b36SKumar Gala if ((instword & PPC_INST_STRING_MASK) == PPC_INST_LSWX) 120714cf11afSPaul Mackerras if ((rT == rA) || (rT == NB_RB)) 120814cf11afSPaul Mackerras return -EINVAL; 120914cf11afSPaul Mackerras 121014cf11afSPaul Mackerras EA = (rA == 0) ? 0 : regs->gpr[rA]; 121114cf11afSPaul Mackerras 121216c57b36SKumar Gala switch (instword & PPC_INST_STRING_MASK) { 121316c57b36SKumar Gala case PPC_INST_LSWX: 121416c57b36SKumar Gala case PPC_INST_STSWX: 121514cf11afSPaul Mackerras EA += NB_RB; 121614cf11afSPaul Mackerras num_bytes = regs->xer & 0x7f; 121714cf11afSPaul Mackerras break; 121816c57b36SKumar Gala case PPC_INST_LSWI: 121916c57b36SKumar Gala case PPC_INST_STSWI: 122014cf11afSPaul Mackerras num_bytes = (NB_RB == 0) ? 32 : NB_RB; 122114cf11afSPaul Mackerras break; 122214cf11afSPaul Mackerras default: 122314cf11afSPaul Mackerras return -EINVAL; 122414cf11afSPaul Mackerras } 122514cf11afSPaul Mackerras 122614cf11afSPaul Mackerras while (num_bytes != 0) 122714cf11afSPaul Mackerras { 122814cf11afSPaul Mackerras u8 val; 122914cf11afSPaul Mackerras u32 shift = 8 * (3 - (pos & 0x3)); 123014cf11afSPaul Mackerras 123180aa0fb4SJames Yang /* if process is 32-bit, clear upper 32 bits of EA */ 123280aa0fb4SJames Yang if ((regs->msr & MSR_64BIT) == 0) 123380aa0fb4SJames Yang EA &= 0xFFFFFFFF; 123480aa0fb4SJames Yang 123516c57b36SKumar Gala switch ((instword & PPC_INST_STRING_MASK)) { 123616c57b36SKumar Gala case PPC_INST_LSWX: 123716c57b36SKumar Gala case PPC_INST_LSWI: 123814cf11afSPaul Mackerras if (get_user(val, (u8 __user *)EA)) 123914cf11afSPaul Mackerras return -EFAULT; 124014cf11afSPaul Mackerras /* first time updating this reg, 124114cf11afSPaul Mackerras * zero it out */ 124214cf11afSPaul Mackerras if (pos == 0) 124314cf11afSPaul Mackerras regs->gpr[rT] = 0; 124414cf11afSPaul Mackerras regs->gpr[rT] |= val << shift; 124514cf11afSPaul Mackerras break; 124616c57b36SKumar Gala case PPC_INST_STSWI: 124716c57b36SKumar Gala case PPC_INST_STSWX: 124814cf11afSPaul Mackerras val = regs->gpr[rT] >> shift; 124914cf11afSPaul Mackerras if (put_user(val, (u8 __user *)EA)) 125014cf11afSPaul Mackerras return -EFAULT; 125114cf11afSPaul Mackerras break; 125214cf11afSPaul Mackerras } 125314cf11afSPaul Mackerras /* move EA to next address */ 125414cf11afSPaul Mackerras EA += 1; 125514cf11afSPaul Mackerras num_bytes--; 125614cf11afSPaul Mackerras 125714cf11afSPaul Mackerras /* manage our position within the register */ 125814cf11afSPaul Mackerras if (++pos == 4) { 125914cf11afSPaul Mackerras pos = 0; 126014cf11afSPaul Mackerras if (++rT == 32) 126114cf11afSPaul Mackerras rT = 0; 126214cf11afSPaul Mackerras } 126314cf11afSPaul Mackerras } 126414cf11afSPaul Mackerras 126514cf11afSPaul Mackerras return 0; 126614cf11afSPaul Mackerras } 126714cf11afSPaul Mackerras 1268c3412dcbSWill Schmidt static int emulate_popcntb_inst(struct pt_regs *regs, u32 instword) 1269c3412dcbSWill Schmidt { 1270c3412dcbSWill Schmidt u32 ra,rs; 1271c3412dcbSWill Schmidt unsigned long tmp; 1272c3412dcbSWill Schmidt 1273c3412dcbSWill Schmidt ra = (instword >> 16) & 0x1f; 1274c3412dcbSWill Schmidt rs = (instword >> 21) & 0x1f; 1275c3412dcbSWill Schmidt 1276c3412dcbSWill Schmidt tmp = regs->gpr[rs]; 1277c3412dcbSWill Schmidt tmp = tmp - ((tmp >> 1) & 0x5555555555555555ULL); 1278c3412dcbSWill Schmidt tmp = (tmp & 0x3333333333333333ULL) + ((tmp >> 2) & 0x3333333333333333ULL); 1279c3412dcbSWill Schmidt tmp = (tmp + (tmp >> 4)) & 0x0f0f0f0f0f0f0f0fULL; 1280c3412dcbSWill Schmidt regs->gpr[ra] = tmp; 1281c3412dcbSWill Schmidt 1282c3412dcbSWill Schmidt return 0; 1283c3412dcbSWill Schmidt } 1284c3412dcbSWill Schmidt 1285c1469f13SKumar Gala static int emulate_isel(struct pt_regs *regs, u32 instword) 1286c1469f13SKumar Gala { 1287c1469f13SKumar Gala u8 rT = (instword >> 21) & 0x1f; 1288c1469f13SKumar Gala u8 rA = (instword >> 16) & 0x1f; 1289c1469f13SKumar Gala u8 rB = (instword >> 11) & 0x1f; 1290c1469f13SKumar Gala u8 BC = (instword >> 6) & 0x1f; 1291c1469f13SKumar Gala u8 bit; 1292c1469f13SKumar Gala unsigned long tmp; 1293c1469f13SKumar Gala 1294c1469f13SKumar Gala tmp = (rA == 0) ? 0 : regs->gpr[rA]; 1295c1469f13SKumar Gala bit = (regs->ccr >> (31 - BC)) & 0x1; 1296c1469f13SKumar Gala 1297c1469f13SKumar Gala regs->gpr[rT] = bit ? tmp : regs->gpr[rB]; 1298c1469f13SKumar Gala 1299c1469f13SKumar Gala return 0; 1300c1469f13SKumar Gala } 1301c1469f13SKumar Gala 13026ce6c629SMichael Neuling #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 13036ce6c629SMichael Neuling static inline bool tm_abort_check(struct pt_regs *regs, int cause) 13046ce6c629SMichael Neuling { 13056ce6c629SMichael Neuling /* If we're emulating a load/store in an active transaction, we cannot 13066ce6c629SMichael Neuling * emulate it as the kernel operates in transaction suspended context. 13076ce6c629SMichael Neuling * We need to abort the transaction. This creates a persistent TM 13086ce6c629SMichael Neuling * abort so tell the user what caused it with a new code. 13096ce6c629SMichael Neuling */ 13106ce6c629SMichael Neuling if (MSR_TM_TRANSACTIONAL(regs->msr)) { 13116ce6c629SMichael Neuling tm_enable(); 13126ce6c629SMichael Neuling tm_abort(cause); 13136ce6c629SMichael Neuling return true; 13146ce6c629SMichael Neuling } 13156ce6c629SMichael Neuling return false; 13166ce6c629SMichael Neuling } 13176ce6c629SMichael Neuling #else 13186ce6c629SMichael Neuling static inline bool tm_abort_check(struct pt_regs *regs, int reason) 13196ce6c629SMichael Neuling { 13206ce6c629SMichael Neuling return false; 13216ce6c629SMichael Neuling } 13226ce6c629SMichael Neuling #endif 13236ce6c629SMichael Neuling 132414cf11afSPaul Mackerras static int emulate_instruction(struct pt_regs *regs) 132514cf11afSPaul Mackerras { 132614cf11afSPaul Mackerras u32 instword; 132714cf11afSPaul Mackerras u32 rd; 132814cf11afSPaul Mackerras 13294288e343SAnton Blanchard if (!user_mode(regs)) 133014cf11afSPaul Mackerras return -EINVAL; 133114cf11afSPaul Mackerras CHECK_FULL_REGS(regs); 133214cf11afSPaul Mackerras 133314cf11afSPaul Mackerras if (get_user(instword, (u32 __user *)(regs->nip))) 133414cf11afSPaul Mackerras return -EFAULT; 133514cf11afSPaul Mackerras 133614cf11afSPaul Mackerras /* Emulate the mfspr rD, PVR. */ 133716c57b36SKumar Gala if ((instword & PPC_INST_MFSPR_PVR_MASK) == PPC_INST_MFSPR_PVR) { 1338eecff81dSAnton Blanchard PPC_WARN_EMULATED(mfpvr, regs); 133914cf11afSPaul Mackerras rd = (instword >> 21) & 0x1f; 134014cf11afSPaul Mackerras regs->gpr[rd] = mfspr(SPRN_PVR); 134114cf11afSPaul Mackerras return 0; 134214cf11afSPaul Mackerras } 134314cf11afSPaul Mackerras 134414cf11afSPaul Mackerras /* Emulating the dcba insn is just a no-op. */ 134580947e7cSGeert Uytterhoeven if ((instword & PPC_INST_DCBA_MASK) == PPC_INST_DCBA) { 1346eecff81dSAnton Blanchard PPC_WARN_EMULATED(dcba, regs); 134714cf11afSPaul Mackerras return 0; 134880947e7cSGeert Uytterhoeven } 134914cf11afSPaul Mackerras 135014cf11afSPaul Mackerras /* Emulate the mcrxr insn. */ 135116c57b36SKumar Gala if ((instword & PPC_INST_MCRXR_MASK) == PPC_INST_MCRXR) { 135286417780SPaul Mackerras int shift = (instword >> 21) & 0x1c; 135314cf11afSPaul Mackerras unsigned long msk = 0xf0000000UL >> shift; 135414cf11afSPaul Mackerras 1355eecff81dSAnton Blanchard PPC_WARN_EMULATED(mcrxr, regs); 135614cf11afSPaul Mackerras regs->ccr = (regs->ccr & ~msk) | ((regs->xer >> shift) & msk); 135714cf11afSPaul Mackerras regs->xer &= ~0xf0000000UL; 135814cf11afSPaul Mackerras return 0; 135914cf11afSPaul Mackerras } 136014cf11afSPaul Mackerras 136114cf11afSPaul Mackerras /* Emulate load/store string insn. */ 136280947e7cSGeert Uytterhoeven if ((instword & PPC_INST_STRING_GEN_MASK) == PPC_INST_STRING) { 13636ce6c629SMichael Neuling if (tm_abort_check(regs, 13646ce6c629SMichael Neuling TM_CAUSE_EMULATE | TM_CAUSE_PERSISTENT)) 13656ce6c629SMichael Neuling return -EINVAL; 1366eecff81dSAnton Blanchard PPC_WARN_EMULATED(string, regs); 136714cf11afSPaul Mackerras return emulate_string_inst(regs, instword); 136880947e7cSGeert Uytterhoeven } 136914cf11afSPaul Mackerras 1370c3412dcbSWill Schmidt /* Emulate the popcntb (Population Count Bytes) instruction. */ 137116c57b36SKumar Gala if ((instword & PPC_INST_POPCNTB_MASK) == PPC_INST_POPCNTB) { 1372eecff81dSAnton Blanchard PPC_WARN_EMULATED(popcntb, regs); 1373c3412dcbSWill Schmidt return emulate_popcntb_inst(regs, instword); 1374c3412dcbSWill Schmidt } 1375c3412dcbSWill Schmidt 1376c1469f13SKumar Gala /* Emulate isel (Integer Select) instruction */ 137716c57b36SKumar Gala if ((instword & PPC_INST_ISEL_MASK) == PPC_INST_ISEL) { 1378eecff81dSAnton Blanchard PPC_WARN_EMULATED(isel, regs); 1379c1469f13SKumar Gala return emulate_isel(regs, instword); 1380c1469f13SKumar Gala } 1381c1469f13SKumar Gala 13829863c28aSJames Yang /* Emulate sync instruction variants */ 13839863c28aSJames Yang if ((instword & PPC_INST_SYNC_MASK) == PPC_INST_SYNC) { 13849863c28aSJames Yang PPC_WARN_EMULATED(sync, regs); 13859863c28aSJames Yang asm volatile("sync"); 13869863c28aSJames Yang return 0; 13879863c28aSJames Yang } 13889863c28aSJames Yang 1389efcac658SAlexey Kardashevskiy #ifdef CONFIG_PPC64 1390efcac658SAlexey Kardashevskiy /* Emulate the mfspr rD, DSCR. */ 139173d2fb75SAnton Blanchard if ((((instword & PPC_INST_MFSPR_DSCR_USER_MASK) == 139273d2fb75SAnton Blanchard PPC_INST_MFSPR_DSCR_USER) || 139373d2fb75SAnton Blanchard ((instword & PPC_INST_MFSPR_DSCR_MASK) == 139473d2fb75SAnton Blanchard PPC_INST_MFSPR_DSCR)) && 1395efcac658SAlexey Kardashevskiy cpu_has_feature(CPU_FTR_DSCR)) { 1396efcac658SAlexey Kardashevskiy PPC_WARN_EMULATED(mfdscr, regs); 1397efcac658SAlexey Kardashevskiy rd = (instword >> 21) & 0x1f; 1398efcac658SAlexey Kardashevskiy regs->gpr[rd] = mfspr(SPRN_DSCR); 1399efcac658SAlexey Kardashevskiy return 0; 1400efcac658SAlexey Kardashevskiy } 1401efcac658SAlexey Kardashevskiy /* Emulate the mtspr DSCR, rD. */ 140273d2fb75SAnton Blanchard if ((((instword & PPC_INST_MTSPR_DSCR_USER_MASK) == 140373d2fb75SAnton Blanchard PPC_INST_MTSPR_DSCR_USER) || 140473d2fb75SAnton Blanchard ((instword & PPC_INST_MTSPR_DSCR_MASK) == 140573d2fb75SAnton Blanchard PPC_INST_MTSPR_DSCR)) && 1406efcac658SAlexey Kardashevskiy cpu_has_feature(CPU_FTR_DSCR)) { 1407efcac658SAlexey Kardashevskiy PPC_WARN_EMULATED(mtdscr, regs); 1408efcac658SAlexey Kardashevskiy rd = (instword >> 21) & 0x1f; 140900ca0de0SAnton Blanchard current->thread.dscr = regs->gpr[rd]; 1410efcac658SAlexey Kardashevskiy current->thread.dscr_inherit = 1; 141100ca0de0SAnton Blanchard mtspr(SPRN_DSCR, current->thread.dscr); 1412efcac658SAlexey Kardashevskiy return 0; 1413efcac658SAlexey Kardashevskiy } 1414efcac658SAlexey Kardashevskiy #endif 1415efcac658SAlexey Kardashevskiy 141614cf11afSPaul Mackerras return -EINVAL; 141714cf11afSPaul Mackerras } 141814cf11afSPaul Mackerras 141973c9ceabSJeremy Fitzhardinge int is_valid_bugaddr(unsigned long addr) 142014cf11afSPaul Mackerras { 142173c9ceabSJeremy Fitzhardinge return is_kernel_addr(addr); 142214cf11afSPaul Mackerras } 142314cf11afSPaul Mackerras 14243a3b5aa6SKevin Hao #ifdef CONFIG_MATH_EMULATION 14253a3b5aa6SKevin Hao static int emulate_math(struct pt_regs *regs) 14263a3b5aa6SKevin Hao { 14273a3b5aa6SKevin Hao int ret; 14283a3b5aa6SKevin Hao extern int do_mathemu(struct pt_regs *regs); 14293a3b5aa6SKevin Hao 14303a3b5aa6SKevin Hao ret = do_mathemu(regs); 14313a3b5aa6SKevin Hao if (ret >= 0) 14323a3b5aa6SKevin Hao PPC_WARN_EMULATED(math, regs); 14333a3b5aa6SKevin Hao 14343a3b5aa6SKevin Hao switch (ret) { 14353a3b5aa6SKevin Hao case 0: 14363a3b5aa6SKevin Hao emulate_single_step(regs); 14373a3b5aa6SKevin Hao return 0; 14383a3b5aa6SKevin Hao case 1: { 14393a3b5aa6SKevin Hao int code = 0; 1440de79f7b9SPaul Mackerras code = __parse_fpscr(current->thread.fp_state.fpscr); 14413a3b5aa6SKevin Hao _exception(SIGFPE, regs, code, regs->nip); 14423a3b5aa6SKevin Hao return 0; 14433a3b5aa6SKevin Hao } 14443a3b5aa6SKevin Hao case -EFAULT: 14453a3b5aa6SKevin Hao _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip); 14463a3b5aa6SKevin Hao return 0; 14473a3b5aa6SKevin Hao } 14483a3b5aa6SKevin Hao 14493a3b5aa6SKevin Hao return -1; 14503a3b5aa6SKevin Hao } 14513a3b5aa6SKevin Hao #else 14523a3b5aa6SKevin Hao static inline int emulate_math(struct pt_regs *regs) { return -1; } 14533a3b5aa6SKevin Hao #endif 14543a3b5aa6SKevin Hao 145503465f89SNicholas Piggin void program_check_exception(struct pt_regs *regs) 145614cf11afSPaul Mackerras { 1457ba12eedeSLi Zhong enum ctx_state prev_state = exception_enter(); 145814cf11afSPaul Mackerras unsigned int reason = get_reason(regs); 145914cf11afSPaul Mackerras 1460aa42c69cSKim Phillips /* We can now get here via a FP Unavailable exception if the core 146104903a30SKumar Gala * has no FPU, in that case the reason flags will be 0 */ 146214cf11afSPaul Mackerras 146314cf11afSPaul Mackerras if (reason & REASON_FP) { 146414cf11afSPaul Mackerras /* IEEE FP exception */ 1465dc1c1ca3SStephen Rothwell parse_fpe(regs); 1466ba12eedeSLi Zhong goto bail; 14678dad3f92SPaul Mackerras } 14688dad3f92SPaul Mackerras if (reason & REASON_TRAP) { 1469a4c3f909SBalbir Singh unsigned long bugaddr; 1470ba797b28SJason Wessel /* Debugger is first in line to stop recursive faults in 1471ba797b28SJason Wessel * rcu_lock, notify_die, or atomic_notifier_call_chain */ 1472ba797b28SJason Wessel if (debugger_bpt(regs)) 1473ba12eedeSLi Zhong goto bail; 1474ba797b28SJason Wessel 14756cc89badSNaveen N. Rao if (kprobe_handler(regs)) 14766cc89badSNaveen N. Rao goto bail; 14776cc89badSNaveen N. Rao 147814cf11afSPaul Mackerras /* trap exception */ 1479dc1c1ca3SStephen Rothwell if (notify_die(DIE_BPT, "breakpoint", regs, 5, 5, SIGTRAP) 1480dc1c1ca3SStephen Rothwell == NOTIFY_STOP) 1481ba12eedeSLi Zhong goto bail; 148273c9ceabSJeremy Fitzhardinge 1483a4c3f909SBalbir Singh bugaddr = regs->nip; 1484a4c3f909SBalbir Singh /* 1485a4c3f909SBalbir Singh * Fixup bugaddr for BUG_ON() in real mode 1486a4c3f909SBalbir Singh */ 1487a4c3f909SBalbir Singh if (!is_kernel_addr(bugaddr) && !(regs->msr & MSR_IR)) 1488a4c3f909SBalbir Singh bugaddr += PAGE_OFFSET; 1489a4c3f909SBalbir Singh 149073c9ceabSJeremy Fitzhardinge if (!(regs->msr & MSR_PR) && /* not user-mode */ 1491a4c3f909SBalbir Singh report_bug(bugaddr, regs) == BUG_TRAP_TYPE_WARN) { 149214cf11afSPaul Mackerras regs->nip += 4; 1493ba12eedeSLi Zhong goto bail; 149414cf11afSPaul Mackerras } 14958dad3f92SPaul Mackerras _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip); 1496ba12eedeSLi Zhong goto bail; 14978dad3f92SPaul Mackerras } 1498bc2a9408SMichael Neuling #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1499bc2a9408SMichael Neuling if (reason & REASON_TM) { 1500bc2a9408SMichael Neuling /* This is a TM "Bad Thing Exception" program check. 1501bc2a9408SMichael Neuling * This occurs when: 1502bc2a9408SMichael Neuling * - An rfid/hrfid/mtmsrd attempts to cause an illegal 1503bc2a9408SMichael Neuling * transition in TM states. 1504bc2a9408SMichael Neuling * - A trechkpt is attempted when transactional. 1505bc2a9408SMichael Neuling * - A treclaim is attempted when non transactional. 1506bc2a9408SMichael Neuling * - A tend is illegally attempted. 1507bc2a9408SMichael Neuling * - writing a TM SPR when transactional. 1508632f0574SMichael Ellerman * 1509632f0574SMichael Ellerman * If usermode caused this, it's done something illegal and 1510bc2a9408SMichael Neuling * gets a SIGILL slap on the wrist. We call it an illegal 1511bc2a9408SMichael Neuling * operand to distinguish from the instruction just being bad 1512bc2a9408SMichael Neuling * (e.g. executing a 'tend' on a CPU without TM!); it's an 1513bc2a9408SMichael Neuling * illegal /placement/ of a valid instruction. 1514bc2a9408SMichael Neuling */ 1515bc2a9408SMichael Neuling if (user_mode(regs)) { 1516bc2a9408SMichael Neuling _exception(SIGILL, regs, ILL_ILLOPN, regs->nip); 1517ba12eedeSLi Zhong goto bail; 1518bc2a9408SMichael Neuling } else { 1519bc2a9408SMichael Neuling printk(KERN_EMERG "Unexpected TM Bad Thing exception " 152011be3958SBreno Leitao "at %lx (msr 0x%lx) tm_scratch=%llx\n", 152111be3958SBreno Leitao regs->nip, regs->msr, get_paca()->tm_scratch); 1522bc2a9408SMichael Neuling die("Unrecoverable exception", regs, SIGABRT); 1523bc2a9408SMichael Neuling } 1524bc2a9408SMichael Neuling } 1525bc2a9408SMichael Neuling #endif 15268dad3f92SPaul Mackerras 1527b3f6a459SMichael Ellerman /* 1528b3f6a459SMichael Ellerman * If we took the program check in the kernel skip down to sending a 1529b3f6a459SMichael Ellerman * SIGILL. The subsequent cases all relate to emulating instructions 1530b3f6a459SMichael Ellerman * which we should only do for userspace. We also do not want to enable 1531b3f6a459SMichael Ellerman * interrupts for kernel faults because that might lead to further 1532b3f6a459SMichael Ellerman * faults, and loose the context of the original exception. 1533b3f6a459SMichael Ellerman */ 1534b3f6a459SMichael Ellerman if (!user_mode(regs)) 1535b3f6a459SMichael Ellerman goto sigill; 1536b3f6a459SMichael Ellerman 1537a3512b2dSBenjamin Herrenschmidt /* We restore the interrupt state now */ 1538a3512b2dSBenjamin Herrenschmidt if (!arch_irq_disabled_regs(regs)) 1539cd8a5673SPaul Mackerras local_irq_enable(); 1540cd8a5673SPaul Mackerras 154104903a30SKumar Gala /* (reason & REASON_ILLEGAL) would be the obvious thing here, 154204903a30SKumar Gala * but there seems to be a hardware bug on the 405GP (RevD) 154304903a30SKumar Gala * that means ESR is sometimes set incorrectly - either to 154404903a30SKumar Gala * ESR_DST (!?) or 0. In the process of chasing this with the 154504903a30SKumar Gala * hardware people - not sure if it can happen on any illegal 154604903a30SKumar Gala * instruction or only on FP instructions, whether there is a 15474e63f8edSBenjamin Herrenschmidt * pattern to occurrences etc. -dgibson 31/Mar/2003 15484e63f8edSBenjamin Herrenschmidt */ 15493a3b5aa6SKevin Hao if (!emulate_math(regs)) 1550ba12eedeSLi Zhong goto bail; 155104903a30SKumar Gala 15528dad3f92SPaul Mackerras /* Try to emulate it if we should. */ 15538dad3f92SPaul Mackerras if (reason & (REASON_ILLEGAL | REASON_PRIVILEGED)) { 155414cf11afSPaul Mackerras switch (emulate_instruction(regs)) { 155514cf11afSPaul Mackerras case 0: 155614cf11afSPaul Mackerras regs->nip += 4; 155714cf11afSPaul Mackerras emulate_single_step(regs); 1558ba12eedeSLi Zhong goto bail; 155914cf11afSPaul Mackerras case -EFAULT: 156014cf11afSPaul Mackerras _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip); 1561ba12eedeSLi Zhong goto bail; 15628dad3f92SPaul Mackerras } 15638dad3f92SPaul Mackerras } 15648dad3f92SPaul Mackerras 1565b3f6a459SMichael Ellerman sigill: 156614cf11afSPaul Mackerras if (reason & REASON_PRIVILEGED) 156714cf11afSPaul Mackerras _exception(SIGILL, regs, ILL_PRVOPC, regs->nip); 156814cf11afSPaul Mackerras else 156914cf11afSPaul Mackerras _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 1570ba12eedeSLi Zhong 1571ba12eedeSLi Zhong bail: 1572ba12eedeSLi Zhong exception_exit(prev_state); 157314cf11afSPaul Mackerras } 157403465f89SNicholas Piggin NOKPROBE_SYMBOL(program_check_exception); 157514cf11afSPaul Mackerras 1576bf593907SPaul Mackerras /* 1577bf593907SPaul Mackerras * This occurs when running in hypervisor mode on POWER6 or later 1578bf593907SPaul Mackerras * and an illegal instruction is encountered. 1579bf593907SPaul Mackerras */ 158003465f89SNicholas Piggin void emulation_assist_interrupt(struct pt_regs *regs) 1581bf593907SPaul Mackerras { 1582bf593907SPaul Mackerras regs->msr |= REASON_ILLEGAL; 1583bf593907SPaul Mackerras program_check_exception(regs); 1584bf593907SPaul Mackerras } 158503465f89SNicholas Piggin NOKPROBE_SYMBOL(emulation_assist_interrupt); 1586bf593907SPaul Mackerras 1587dc1c1ca3SStephen Rothwell void alignment_exception(struct pt_regs *regs) 158814cf11afSPaul Mackerras { 1589ba12eedeSLi Zhong enum ctx_state prev_state = exception_enter(); 15904393c4f6SBenjamin Herrenschmidt int sig, code, fixed = 0; 159114cf11afSPaul Mackerras 1592a3512b2dSBenjamin Herrenschmidt /* We restore the interrupt state now */ 1593a3512b2dSBenjamin Herrenschmidt if (!arch_irq_disabled_regs(regs)) 1594a3512b2dSBenjamin Herrenschmidt local_irq_enable(); 1595a3512b2dSBenjamin Herrenschmidt 15966ce6c629SMichael Neuling if (tm_abort_check(regs, TM_CAUSE_ALIGNMENT | TM_CAUSE_PERSISTENT)) 15976ce6c629SMichael Neuling goto bail; 15986ce6c629SMichael Neuling 1599e9370ae1SPaul Mackerras /* we don't implement logging of alignment exceptions */ 1600e9370ae1SPaul Mackerras if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS)) 160114cf11afSPaul Mackerras fixed = fix_alignment(regs); 160214cf11afSPaul Mackerras 160314cf11afSPaul Mackerras if (fixed == 1) { 160414cf11afSPaul Mackerras regs->nip += 4; /* skip over emulated instruction */ 160514cf11afSPaul Mackerras emulate_single_step(regs); 1606ba12eedeSLi Zhong goto bail; 160714cf11afSPaul Mackerras } 160814cf11afSPaul Mackerras 160914cf11afSPaul Mackerras /* Operand address was bad */ 161014cf11afSPaul Mackerras if (fixed == -EFAULT) { 16114393c4f6SBenjamin Herrenschmidt sig = SIGSEGV; 16124393c4f6SBenjamin Herrenschmidt code = SEGV_ACCERR; 16134393c4f6SBenjamin Herrenschmidt } else { 16144393c4f6SBenjamin Herrenschmidt sig = SIGBUS; 16154393c4f6SBenjamin Herrenschmidt code = BUS_ADRALN; 161614cf11afSPaul Mackerras } 16174393c4f6SBenjamin Herrenschmidt if (user_mode(regs)) 16184393c4f6SBenjamin Herrenschmidt _exception(sig, regs, code, regs->dar); 16194393c4f6SBenjamin Herrenschmidt else 16204393c4f6SBenjamin Herrenschmidt bad_page_fault(regs, regs->dar, sig); 1621ba12eedeSLi Zhong 1622ba12eedeSLi Zhong bail: 1623ba12eedeSLi Zhong exception_exit(prev_state); 162414cf11afSPaul Mackerras } 162514cf11afSPaul Mackerras 162614cf11afSPaul Mackerras void StackOverflow(struct pt_regs *regs) 162714cf11afSPaul Mackerras { 16289bf3d3c4SChristophe Leroy pr_crit("Kernel stack overflow in process %s[%d], r1=%lx\n", 16299bf3d3c4SChristophe Leroy current->comm, task_pid_nr(current), regs->gpr[1]); 163014cf11afSPaul Mackerras debugger(regs); 163114cf11afSPaul Mackerras show_regs(regs); 163214cf11afSPaul Mackerras panic("kernel stack overflow"); 163314cf11afSPaul Mackerras } 163414cf11afSPaul Mackerras 1635dc1c1ca3SStephen Rothwell void kernel_fp_unavailable_exception(struct pt_regs *regs) 1636dc1c1ca3SStephen Rothwell { 1637ba12eedeSLi Zhong enum ctx_state prev_state = exception_enter(); 1638ba12eedeSLi Zhong 1639dc1c1ca3SStephen Rothwell printk(KERN_EMERG "Unrecoverable FP Unavailable Exception " 1640dc1c1ca3SStephen Rothwell "%lx at %lx\n", regs->trap, regs->nip); 1641dc1c1ca3SStephen Rothwell die("Unrecoverable FP Unavailable Exception", regs, SIGABRT); 1642ba12eedeSLi Zhong 1643ba12eedeSLi Zhong exception_exit(prev_state); 1644dc1c1ca3SStephen Rothwell } 1645dc1c1ca3SStephen Rothwell 1646dc1c1ca3SStephen Rothwell void altivec_unavailable_exception(struct pt_regs *regs) 1647dc1c1ca3SStephen Rothwell { 1648ba12eedeSLi Zhong enum ctx_state prev_state = exception_enter(); 1649ba12eedeSLi Zhong 1650dc1c1ca3SStephen Rothwell if (user_mode(regs)) { 1651dc1c1ca3SStephen Rothwell /* A user program has executed an altivec instruction, 1652dc1c1ca3SStephen Rothwell but this kernel doesn't support altivec. */ 1653dc1c1ca3SStephen Rothwell _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 1654ba12eedeSLi Zhong goto bail; 1655dc1c1ca3SStephen Rothwell } 16566c4841c2SAnton Blanchard 1657dc1c1ca3SStephen Rothwell printk(KERN_EMERG "Unrecoverable VMX/Altivec Unavailable Exception " 1658dc1c1ca3SStephen Rothwell "%lx at %lx\n", regs->trap, regs->nip); 1659dc1c1ca3SStephen Rothwell die("Unrecoverable VMX/Altivec Unavailable Exception", regs, SIGABRT); 1660ba12eedeSLi Zhong 1661ba12eedeSLi Zhong bail: 1662ba12eedeSLi Zhong exception_exit(prev_state); 1663dc1c1ca3SStephen Rothwell } 1664dc1c1ca3SStephen Rothwell 1665ce48b210SMichael Neuling void vsx_unavailable_exception(struct pt_regs *regs) 1666ce48b210SMichael Neuling { 1667ce48b210SMichael Neuling if (user_mode(regs)) { 1668ce48b210SMichael Neuling /* A user program has executed an vsx instruction, 1669ce48b210SMichael Neuling but this kernel doesn't support vsx. */ 1670ce48b210SMichael Neuling _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 1671ce48b210SMichael Neuling return; 1672ce48b210SMichael Neuling } 1673ce48b210SMichael Neuling 1674ce48b210SMichael Neuling printk(KERN_EMERG "Unrecoverable VSX Unavailable Exception " 1675ce48b210SMichael Neuling "%lx at %lx\n", regs->trap, regs->nip); 1676ce48b210SMichael Neuling die("Unrecoverable VSX Unavailable Exception", regs, SIGABRT); 1677ce48b210SMichael Neuling } 1678ce48b210SMichael Neuling 16792517617eSMichael Neuling #ifdef CONFIG_PPC64 1680172f7aaaSCyril Bur static void tm_unavailable(struct pt_regs *regs) 1681172f7aaaSCyril Bur { 16825d176f75SCyril Bur #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 16835d176f75SCyril Bur if (user_mode(regs)) { 16845d176f75SCyril Bur current->thread.load_tm++; 16855d176f75SCyril Bur regs->msr |= MSR_TM; 16865d176f75SCyril Bur tm_enable(); 16875d176f75SCyril Bur tm_restore_sprs(¤t->thread); 16885d176f75SCyril Bur return; 16895d176f75SCyril Bur } 16905d176f75SCyril Bur #endif 1691172f7aaaSCyril Bur pr_emerg("Unrecoverable TM Unavailable Exception " 1692172f7aaaSCyril Bur "%lx at %lx\n", regs->trap, regs->nip); 1693172f7aaaSCyril Bur die("Unrecoverable TM Unavailable Exception", regs, SIGABRT); 1694172f7aaaSCyril Bur } 1695172f7aaaSCyril Bur 1696021424a1SMichael Ellerman void facility_unavailable_exception(struct pt_regs *regs) 1697d0c0c9a1SMichael Neuling { 1698021424a1SMichael Ellerman static char *facility_strings[] = { 16992517617eSMichael Neuling [FSCR_FP_LG] = "FPU", 17002517617eSMichael Neuling [FSCR_VECVSX_LG] = "VMX/VSX", 17012517617eSMichael Neuling [FSCR_DSCR_LG] = "DSCR", 17022517617eSMichael Neuling [FSCR_PM_LG] = "PMU SPRs", 17032517617eSMichael Neuling [FSCR_BHRB_LG] = "BHRB", 17042517617eSMichael Neuling [FSCR_TM_LG] = "TM", 17052517617eSMichael Neuling [FSCR_EBB_LG] = "EBB", 17062517617eSMichael Neuling [FSCR_TAR_LG] = "TAR", 1707794464f4SNicholas Piggin [FSCR_MSGP_LG] = "MSGP", 17089b7ff0c6SNicholas Piggin [FSCR_SCV_LG] = "SCV", 1709021424a1SMichael Ellerman }; 17102517617eSMichael Neuling char *facility = "unknown"; 1711021424a1SMichael Ellerman u64 value; 1712c952c1c4SAnshuman Khandual u32 instword, rd; 17132517617eSMichael Neuling u8 status; 17142517617eSMichael Neuling bool hv; 1715021424a1SMichael Ellerman 17162271db20SBenjamin Herrenschmidt hv = (TRAP(regs) == 0xf80); 17172517617eSMichael Neuling if (hv) 1718b14b6260SMichael Ellerman value = mfspr(SPRN_HFSCR); 17192517617eSMichael Neuling else 17202517617eSMichael Neuling value = mfspr(SPRN_FSCR); 17212517617eSMichael Neuling 17222517617eSMichael Neuling status = value >> 56; 1723709b973cSAnshuman Khandual if ((hv || status >= 2) && 1724709b973cSAnshuman Khandual (status < ARRAY_SIZE(facility_strings)) && 1725709b973cSAnshuman Khandual facility_strings[status]) 1726709b973cSAnshuman Khandual facility = facility_strings[status]; 1727709b973cSAnshuman Khandual 1728709b973cSAnshuman Khandual /* We should not have taken this interrupt in kernel */ 1729709b973cSAnshuman Khandual if (!user_mode(regs)) { 1730709b973cSAnshuman Khandual pr_emerg("Facility '%s' unavailable (%d) exception in kernel mode at %lx\n", 1731709b973cSAnshuman Khandual facility, status, regs->nip); 1732709b973cSAnshuman Khandual die("Unexpected facility unavailable exception", regs, SIGABRT); 1733709b973cSAnshuman Khandual } 1734709b973cSAnshuman Khandual 1735709b973cSAnshuman Khandual /* We restore the interrupt state now */ 1736709b973cSAnshuman Khandual if (!arch_irq_disabled_regs(regs)) 1737709b973cSAnshuman Khandual local_irq_enable(); 1738709b973cSAnshuman Khandual 17392517617eSMichael Neuling if (status == FSCR_DSCR_LG) { 1740c952c1c4SAnshuman Khandual /* 1741c952c1c4SAnshuman Khandual * User is accessing the DSCR register using the problem 1742c952c1c4SAnshuman Khandual * state only SPR number (0x03) either through a mfspr or 1743c952c1c4SAnshuman Khandual * a mtspr instruction. If it is a write attempt through 1744c952c1c4SAnshuman Khandual * a mtspr, then we set the inherit bit. This also allows 1745c952c1c4SAnshuman Khandual * the user to write or read the register directly in the 1746c952c1c4SAnshuman Khandual * future by setting via the FSCR DSCR bit. But in case it 1747c952c1c4SAnshuman Khandual * is a read DSCR attempt through a mfspr instruction, we 1748c952c1c4SAnshuman Khandual * just emulate the instruction instead. This code path will 1749c952c1c4SAnshuman Khandual * always emulate all the mfspr instructions till the user 1750c952c1c4SAnshuman Khandual * has attempted at least one mtspr instruction. This way it 1751c952c1c4SAnshuman Khandual * preserves the same behaviour when the user is accessing 1752c952c1c4SAnshuman Khandual * the DSCR through privilege level only SPR number (0x11) 1753c952c1c4SAnshuman Khandual * which is emulated through illegal instruction exception. 1754c952c1c4SAnshuman Khandual * We always leave HFSCR DSCR set. 17552517617eSMichael Neuling */ 1756c952c1c4SAnshuman Khandual if (get_user(instword, (u32 __user *)(regs->nip))) { 1757c952c1c4SAnshuman Khandual pr_err("Failed to fetch the user instruction\n"); 1758c952c1c4SAnshuman Khandual return; 1759c952c1c4SAnshuman Khandual } 1760c952c1c4SAnshuman Khandual 1761c952c1c4SAnshuman Khandual /* Write into DSCR (mtspr 0x03, RS) */ 1762c952c1c4SAnshuman Khandual if ((instword & PPC_INST_MTSPR_DSCR_USER_MASK) 1763c952c1c4SAnshuman Khandual == PPC_INST_MTSPR_DSCR_USER) { 1764c952c1c4SAnshuman Khandual rd = (instword >> 21) & 0x1f; 1765c952c1c4SAnshuman Khandual current->thread.dscr = regs->gpr[rd]; 17662517617eSMichael Neuling current->thread.dscr_inherit = 1; 1767b57bd2deSMichael Neuling current->thread.fscr |= FSCR_DSCR; 1768b57bd2deSMichael Neuling mtspr(SPRN_FSCR, current->thread.fscr); 1769c952c1c4SAnshuman Khandual } 1770c952c1c4SAnshuman Khandual 1771c952c1c4SAnshuman Khandual /* Read from DSCR (mfspr RT, 0x03) */ 1772c952c1c4SAnshuman Khandual if ((instword & PPC_INST_MFSPR_DSCR_USER_MASK) 1773c952c1c4SAnshuman Khandual == PPC_INST_MFSPR_DSCR_USER) { 1774c952c1c4SAnshuman Khandual if (emulate_instruction(regs)) { 1775c952c1c4SAnshuman Khandual pr_err("DSCR based mfspr emulation failed\n"); 1776c952c1c4SAnshuman Khandual return; 1777c952c1c4SAnshuman Khandual } 1778c952c1c4SAnshuman Khandual regs->nip += 4; 1779c952c1c4SAnshuman Khandual emulate_single_step(regs); 1780c952c1c4SAnshuman Khandual } 17812517617eSMichael Neuling return; 1782b14b6260SMichael Ellerman } 1783b14b6260SMichael Ellerman 1784172f7aaaSCyril Bur if (status == FSCR_TM_LG) { 1785172f7aaaSCyril Bur /* 1786172f7aaaSCyril Bur * If we're here then the hardware is TM aware because it 1787172f7aaaSCyril Bur * generated an exception with FSRM_TM set. 1788172f7aaaSCyril Bur * 1789172f7aaaSCyril Bur * If cpu_has_feature(CPU_FTR_TM) is false, then either firmware 1790172f7aaaSCyril Bur * told us not to do TM, or the kernel is not built with TM 1791172f7aaaSCyril Bur * support. 1792172f7aaaSCyril Bur * 1793172f7aaaSCyril Bur * If both of those things are true, then userspace can spam the 1794172f7aaaSCyril Bur * console by triggering the printk() below just by continually 1795172f7aaaSCyril Bur * doing tbegin (or any TM instruction). So in that case just 1796172f7aaaSCyril Bur * send the process a SIGILL immediately. 1797172f7aaaSCyril Bur */ 1798172f7aaaSCyril Bur if (!cpu_has_feature(CPU_FTR_TM)) 1799172f7aaaSCyril Bur goto out; 1800172f7aaaSCyril Bur 1801172f7aaaSCyril Bur tm_unavailable(regs); 1802172f7aaaSCyril Bur return; 1803172f7aaaSCyril Bur } 1804172f7aaaSCyril Bur 180593c2ec0fSBalbir Singh pr_err_ratelimited("%sFacility '%s' unavailable (%d), exception at 0x%lx, MSR=%lx\n", 180693c2ec0fSBalbir Singh hv ? "Hypervisor " : "", facility, status, regs->nip, regs->msr); 1807d0c0c9a1SMichael Neuling 1808172f7aaaSCyril Bur out: 1809d0c0c9a1SMichael Neuling _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 1810d0c0c9a1SMichael Neuling } 18112517617eSMichael Neuling #endif 1812d0c0c9a1SMichael Neuling 1813f54db641SMichael Neuling #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1814f54db641SMichael Neuling 1815f54db641SMichael Neuling void fp_unavailable_tm(struct pt_regs *regs) 1816f54db641SMichael Neuling { 1817f54db641SMichael Neuling /* Note: This does not handle any kind of FP laziness. */ 1818f54db641SMichael Neuling 1819f54db641SMichael Neuling TM_DEBUG("FP Unavailable trap whilst transactional at 0x%lx, MSR=%lx\n", 1820f54db641SMichael Neuling regs->nip, regs->msr); 1821f54db641SMichael Neuling 1822f54db641SMichael Neuling /* We can only have got here if the task started using FP after 1823f54db641SMichael Neuling * beginning the transaction. So, the transactional regs are just a 1824f54db641SMichael Neuling * copy of the checkpointed ones. But, we still need to recheckpoint 1825f54db641SMichael Neuling * as we're enabling FP for the process; it will return, abort the 1826f54db641SMichael Neuling * transaction, and probably retry but now with FP enabled. So the 1827f54db641SMichael Neuling * checkpointed FP registers need to be loaded. 1828f54db641SMichael Neuling */ 1829d31626f7SPaul Mackerras tm_reclaim_current(TM_CAUSE_FAC_UNAV); 183096695563SBreno Leitao 183196695563SBreno Leitao /* 183296695563SBreno Leitao * Reclaim initially saved out bogus (lazy) FPRs to ckfp_state, and 183396695563SBreno Leitao * then it was overwrite by the thr->fp_state by tm_reclaim_thread(). 183496695563SBreno Leitao * 183596695563SBreno Leitao * At this point, ck{fp,vr}_state contains the exact values we want to 183696695563SBreno Leitao * recheckpoint. 183796695563SBreno Leitao */ 1838f54db641SMichael Neuling 1839f54db641SMichael Neuling /* Enable FP for the task: */ 1840a7771176SCyril Bur current->thread.load_fp = 1; 1841f54db641SMichael Neuling 184296695563SBreno Leitao /* 184396695563SBreno Leitao * Recheckpoint all the checkpointed ckpt, ck{fp, vr}_state registers. 1844f54db641SMichael Neuling */ 1845eb5c3f1cSCyril Bur tm_recheckpoint(¤t->thread); 1846f54db641SMichael Neuling } 1847f54db641SMichael Neuling 1848f54db641SMichael Neuling void altivec_unavailable_tm(struct pt_regs *regs) 1849f54db641SMichael Neuling { 1850f54db641SMichael Neuling /* See the comments in fp_unavailable_tm(). This function operates 1851f54db641SMichael Neuling * the same way. 1852f54db641SMichael Neuling */ 1853f54db641SMichael Neuling 1854f54db641SMichael Neuling TM_DEBUG("Vector Unavailable trap whilst transactional at 0x%lx," 1855f54db641SMichael Neuling "MSR=%lx\n", 1856f54db641SMichael Neuling regs->nip, regs->msr); 1857d31626f7SPaul Mackerras tm_reclaim_current(TM_CAUSE_FAC_UNAV); 1858a7771176SCyril Bur current->thread.load_vec = 1; 1859eb5c3f1cSCyril Bur tm_recheckpoint(¤t->thread); 1860f54db641SMichael Neuling current->thread.used_vr = 1; 18613ac8ff1cSPaul Mackerras } 18623ac8ff1cSPaul Mackerras 1863f54db641SMichael Neuling void vsx_unavailable_tm(struct pt_regs *regs) 1864f54db641SMichael Neuling { 1865f54db641SMichael Neuling /* See the comments in fp_unavailable_tm(). This works similarly, 1866f54db641SMichael Neuling * though we're loading both FP and VEC registers in here. 1867f54db641SMichael Neuling * 1868f54db641SMichael Neuling * If FP isn't in use, load FP regs. If VEC isn't in use, load VEC 1869f54db641SMichael Neuling * regs. Either way, set MSR_VSX. 1870f54db641SMichael Neuling */ 1871f54db641SMichael Neuling 1872f54db641SMichael Neuling TM_DEBUG("VSX Unavailable trap whilst transactional at 0x%lx," 1873f54db641SMichael Neuling "MSR=%lx\n", 1874f54db641SMichael Neuling regs->nip, regs->msr); 1875f54db641SMichael Neuling 18763ac8ff1cSPaul Mackerras current->thread.used_vsr = 1; 18773ac8ff1cSPaul Mackerras 1878f54db641SMichael Neuling /* This reclaims FP and/or VR regs if they're already enabled */ 1879d31626f7SPaul Mackerras tm_reclaim_current(TM_CAUSE_FAC_UNAV); 1880f54db641SMichael Neuling 1881a7771176SCyril Bur current->thread.load_vec = 1; 1882a7771176SCyril Bur current->thread.load_fp = 1; 18833ac8ff1cSPaul Mackerras 1884eb5c3f1cSCyril Bur tm_recheckpoint(¤t->thread); 1885f54db641SMichael Neuling } 1886f54db641SMichael Neuling #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ 1887f54db641SMichael Neuling 1888dc1c1ca3SStephen Rothwell void performance_monitor_exception(struct pt_regs *regs) 1889dc1c1ca3SStephen Rothwell { 189069111bacSChristoph Lameter __this_cpu_inc(irq_stat.pmu_irqs); 189189713ed1SAnton Blanchard 1892dc1c1ca3SStephen Rothwell perf_irq(regs); 1893dc1c1ca3SStephen Rothwell } 1894dc1c1ca3SStephen Rothwell 1895172ae2e7SDave Kleikamp #ifdef CONFIG_PPC_ADV_DEBUG_REGS 18963bffb652SDave Kleikamp static void handle_debug(struct pt_regs *regs, unsigned long debug_status) 18973bffb652SDave Kleikamp { 18983bffb652SDave Kleikamp int changed = 0; 18993bffb652SDave Kleikamp /* 19003bffb652SDave Kleikamp * Determine the cause of the debug event, clear the 19013bffb652SDave Kleikamp * event flags and send a trap to the handler. Torez 19023bffb652SDave Kleikamp */ 19033bffb652SDave Kleikamp if (debug_status & (DBSR_DAC1R | DBSR_DAC1W)) { 19043bffb652SDave Kleikamp dbcr_dac(current) &= ~(DBCR_DAC1R | DBCR_DAC1W); 19053bffb652SDave Kleikamp #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE 190651ae8d4aSBharat Bhushan current->thread.debug.dbcr2 &= ~DBCR2_DAC12MODE; 19073bffb652SDave Kleikamp #endif 190847355040SEric W. Biederman do_send_trap(regs, mfspr(SPRN_DAC1), debug_status, 19093bffb652SDave Kleikamp 5); 19103bffb652SDave Kleikamp changed |= 0x01; 19113bffb652SDave Kleikamp } else if (debug_status & (DBSR_DAC2R | DBSR_DAC2W)) { 19123bffb652SDave Kleikamp dbcr_dac(current) &= ~(DBCR_DAC2R | DBCR_DAC2W); 191347355040SEric W. Biederman do_send_trap(regs, mfspr(SPRN_DAC2), debug_status, 19143bffb652SDave Kleikamp 6); 19153bffb652SDave Kleikamp changed |= 0x01; 19163bffb652SDave Kleikamp } else if (debug_status & DBSR_IAC1) { 191751ae8d4aSBharat Bhushan current->thread.debug.dbcr0 &= ~DBCR0_IAC1; 19183bffb652SDave Kleikamp dbcr_iac_range(current) &= ~DBCR_IAC12MODE; 191947355040SEric W. Biederman do_send_trap(regs, mfspr(SPRN_IAC1), debug_status, 19203bffb652SDave Kleikamp 1); 19213bffb652SDave Kleikamp changed |= 0x01; 19223bffb652SDave Kleikamp } else if (debug_status & DBSR_IAC2) { 192351ae8d4aSBharat Bhushan current->thread.debug.dbcr0 &= ~DBCR0_IAC2; 192447355040SEric W. Biederman do_send_trap(regs, mfspr(SPRN_IAC2), debug_status, 19253bffb652SDave Kleikamp 2); 19263bffb652SDave Kleikamp changed |= 0x01; 19273bffb652SDave Kleikamp } else if (debug_status & DBSR_IAC3) { 192851ae8d4aSBharat Bhushan current->thread.debug.dbcr0 &= ~DBCR0_IAC3; 19293bffb652SDave Kleikamp dbcr_iac_range(current) &= ~DBCR_IAC34MODE; 193047355040SEric W. Biederman do_send_trap(regs, mfspr(SPRN_IAC3), debug_status, 19313bffb652SDave Kleikamp 3); 19323bffb652SDave Kleikamp changed |= 0x01; 19333bffb652SDave Kleikamp } else if (debug_status & DBSR_IAC4) { 193451ae8d4aSBharat Bhushan current->thread.debug.dbcr0 &= ~DBCR0_IAC4; 193547355040SEric W. Biederman do_send_trap(regs, mfspr(SPRN_IAC4), debug_status, 19363bffb652SDave Kleikamp 4); 19373bffb652SDave Kleikamp changed |= 0x01; 19383bffb652SDave Kleikamp } 19393bffb652SDave Kleikamp /* 19403bffb652SDave Kleikamp * At the point this routine was called, the MSR(DE) was turned off. 19413bffb652SDave Kleikamp * Check all other debug flags and see if that bit needs to be turned 19423bffb652SDave Kleikamp * back on or not. 19433bffb652SDave Kleikamp */ 194451ae8d4aSBharat Bhushan if (DBCR_ACTIVE_EVENTS(current->thread.debug.dbcr0, 194551ae8d4aSBharat Bhushan current->thread.debug.dbcr1)) 19463bffb652SDave Kleikamp regs->msr |= MSR_DE; 19473bffb652SDave Kleikamp else 19483bffb652SDave Kleikamp /* Make sure the IDM flag is off */ 194951ae8d4aSBharat Bhushan current->thread.debug.dbcr0 &= ~DBCR0_IDM; 19503bffb652SDave Kleikamp 19513bffb652SDave Kleikamp if (changed & 0x01) 195251ae8d4aSBharat Bhushan mtspr(SPRN_DBCR0, current->thread.debug.dbcr0); 19533bffb652SDave Kleikamp } 195414cf11afSPaul Mackerras 195503465f89SNicholas Piggin void DebugException(struct pt_regs *regs, unsigned long debug_status) 195614cf11afSPaul Mackerras { 195751ae8d4aSBharat Bhushan current->thread.debug.dbsr = debug_status; 19583bffb652SDave Kleikamp 1959ec097c84SRoland McGrath /* Hack alert: On BookE, Branch Taken stops on the branch itself, while 1960ec097c84SRoland McGrath * on server, it stops on the target of the branch. In order to simulate 1961ec097c84SRoland McGrath * the server behaviour, we thus restart right away with a single step 1962ec097c84SRoland McGrath * instead of stopping here when hitting a BT 1963ec097c84SRoland McGrath */ 1964ec097c84SRoland McGrath if (debug_status & DBSR_BT) { 1965ec097c84SRoland McGrath regs->msr &= ~MSR_DE; 1966ec097c84SRoland McGrath 1967ec097c84SRoland McGrath /* Disable BT */ 1968ec097c84SRoland McGrath mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_BT); 1969ec097c84SRoland McGrath /* Clear the BT event */ 1970ec097c84SRoland McGrath mtspr(SPRN_DBSR, DBSR_BT); 1971ec097c84SRoland McGrath 1972ec097c84SRoland McGrath /* Do the single step trick only when coming from userspace */ 1973ec097c84SRoland McGrath if (user_mode(regs)) { 197451ae8d4aSBharat Bhushan current->thread.debug.dbcr0 &= ~DBCR0_BT; 197551ae8d4aSBharat Bhushan current->thread.debug.dbcr0 |= DBCR0_IDM | DBCR0_IC; 1976ec097c84SRoland McGrath regs->msr |= MSR_DE; 1977ec097c84SRoland McGrath return; 1978ec097c84SRoland McGrath } 1979ec097c84SRoland McGrath 19806cc89badSNaveen N. Rao if (kprobe_post_handler(regs)) 19816cc89badSNaveen N. Rao return; 19826cc89badSNaveen N. Rao 1983ec097c84SRoland McGrath if (notify_die(DIE_SSTEP, "block_step", regs, 5, 1984ec097c84SRoland McGrath 5, SIGTRAP) == NOTIFY_STOP) { 1985ec097c84SRoland McGrath return; 1986ec097c84SRoland McGrath } 1987ec097c84SRoland McGrath if (debugger_sstep(regs)) 1988ec097c84SRoland McGrath return; 1989ec097c84SRoland McGrath } else if (debug_status & DBSR_IC) { /* Instruction complete */ 199014cf11afSPaul Mackerras regs->msr &= ~MSR_DE; 1991f8279621SKumar Gala 199214cf11afSPaul Mackerras /* Disable instruction completion */ 199314cf11afSPaul Mackerras mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_IC); 199414cf11afSPaul Mackerras /* Clear the instruction completion event */ 199514cf11afSPaul Mackerras mtspr(SPRN_DBSR, DBSR_IC); 1996f8279621SKumar Gala 19976cc89badSNaveen N. Rao if (kprobe_post_handler(regs)) 19986cc89badSNaveen N. Rao return; 19996cc89badSNaveen N. Rao 2000f8279621SKumar Gala if (notify_die(DIE_SSTEP, "single_step", regs, 5, 2001f8279621SKumar Gala 5, SIGTRAP) == NOTIFY_STOP) { 200214cf11afSPaul Mackerras return; 200314cf11afSPaul Mackerras } 2004f8279621SKumar Gala 2005f8279621SKumar Gala if (debugger_sstep(regs)) 2006f8279621SKumar Gala return; 2007f8279621SKumar Gala 20083bffb652SDave Kleikamp if (user_mode(regs)) { 200951ae8d4aSBharat Bhushan current->thread.debug.dbcr0 &= ~DBCR0_IC; 201051ae8d4aSBharat Bhushan if (DBCR_ACTIVE_EVENTS(current->thread.debug.dbcr0, 201151ae8d4aSBharat Bhushan current->thread.debug.dbcr1)) 20123bffb652SDave Kleikamp regs->msr |= MSR_DE; 20133bffb652SDave Kleikamp else 20143bffb652SDave Kleikamp /* Make sure the IDM bit is off */ 201551ae8d4aSBharat Bhushan current->thread.debug.dbcr0 &= ~DBCR0_IDM; 20163bffb652SDave Kleikamp } 2017f8279621SKumar Gala 2018f8279621SKumar Gala _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip); 20193bffb652SDave Kleikamp } else 20203bffb652SDave Kleikamp handle_debug(regs, debug_status); 202114cf11afSPaul Mackerras } 202203465f89SNicholas Piggin NOKPROBE_SYMBOL(DebugException); 2023172ae2e7SDave Kleikamp #endif /* CONFIG_PPC_ADV_DEBUG_REGS */ 202414cf11afSPaul Mackerras 202514cf11afSPaul Mackerras #if !defined(CONFIG_TAU_INT) 202614cf11afSPaul Mackerras void TAUException(struct pt_regs *regs) 202714cf11afSPaul Mackerras { 202814cf11afSPaul Mackerras printk("TAU trap at PC: %lx, MSR: %lx, vector=%lx %s\n", 202914cf11afSPaul Mackerras regs->nip, regs->msr, regs->trap, print_tainted()); 203014cf11afSPaul Mackerras } 203114cf11afSPaul Mackerras #endif /* CONFIG_INT_TAU */ 203214cf11afSPaul Mackerras 203314cf11afSPaul Mackerras #ifdef CONFIG_ALTIVEC 2034dc1c1ca3SStephen Rothwell void altivec_assist_exception(struct pt_regs *regs) 203514cf11afSPaul Mackerras { 203614cf11afSPaul Mackerras int err; 203714cf11afSPaul Mackerras 203814cf11afSPaul Mackerras if (!user_mode(regs)) { 203914cf11afSPaul Mackerras printk(KERN_EMERG "VMX/Altivec assist exception in kernel mode" 204014cf11afSPaul Mackerras " at %lx\n", regs->nip); 20418dad3f92SPaul Mackerras die("Kernel VMX/Altivec assist exception", regs, SIGILL); 204214cf11afSPaul Mackerras } 204314cf11afSPaul Mackerras 2044dc1c1ca3SStephen Rothwell flush_altivec_to_thread(current); 2045dc1c1ca3SStephen Rothwell 2046eecff81dSAnton Blanchard PPC_WARN_EMULATED(altivec, regs); 204714cf11afSPaul Mackerras err = emulate_altivec(regs); 204814cf11afSPaul Mackerras if (err == 0) { 204914cf11afSPaul Mackerras regs->nip += 4; /* skip emulated instruction */ 205014cf11afSPaul Mackerras emulate_single_step(regs); 205114cf11afSPaul Mackerras return; 205214cf11afSPaul Mackerras } 205314cf11afSPaul Mackerras 205414cf11afSPaul Mackerras if (err == -EFAULT) { 205514cf11afSPaul Mackerras /* got an error reading the instruction */ 205614cf11afSPaul Mackerras _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip); 205714cf11afSPaul Mackerras } else { 205814cf11afSPaul Mackerras /* didn't recognize the instruction */ 205914cf11afSPaul Mackerras /* XXX quick hack for now: set the non-Java bit in the VSCR */ 206076462232SChristian Dietrich printk_ratelimited(KERN_ERR "Unrecognized altivec instruction " 206114cf11afSPaul Mackerras "in %s at %lx\n", current->comm, regs->nip); 2062de79f7b9SPaul Mackerras current->thread.vr_state.vscr.u[3] |= 0x10000; 206314cf11afSPaul Mackerras } 206414cf11afSPaul Mackerras } 206514cf11afSPaul Mackerras #endif /* CONFIG_ALTIVEC */ 206614cf11afSPaul Mackerras 206714cf11afSPaul Mackerras #ifdef CONFIG_FSL_BOOKE 206814cf11afSPaul Mackerras void CacheLockingException(struct pt_regs *regs, unsigned long address, 206914cf11afSPaul Mackerras unsigned long error_code) 207014cf11afSPaul Mackerras { 207114cf11afSPaul Mackerras /* We treat cache locking instructions from the user 207214cf11afSPaul Mackerras * as priv ops, in the future we could try to do 207314cf11afSPaul Mackerras * something smarter 207414cf11afSPaul Mackerras */ 207514cf11afSPaul Mackerras if (error_code & (ESR_DLK|ESR_ILK)) 207614cf11afSPaul Mackerras _exception(SIGILL, regs, ILL_PRVOPC, regs->nip); 207714cf11afSPaul Mackerras return; 207814cf11afSPaul Mackerras } 207914cf11afSPaul Mackerras #endif /* CONFIG_FSL_BOOKE */ 208014cf11afSPaul Mackerras 208114cf11afSPaul Mackerras #ifdef CONFIG_SPE 208214cf11afSPaul Mackerras void SPEFloatingPointException(struct pt_regs *regs) 208314cf11afSPaul Mackerras { 20846a800f36SLiu Yu extern int do_spe_mathemu(struct pt_regs *regs); 208514cf11afSPaul Mackerras unsigned long spefscr; 208614cf11afSPaul Mackerras int fpexc_mode; 2087aeb1c0f6SEric W. Biederman int code = FPE_FLTUNK; 20886a800f36SLiu Yu int err; 20896a800f36SLiu Yu 2090685659eeSyu liu flush_spe_to_thread(current); 209114cf11afSPaul Mackerras 209214cf11afSPaul Mackerras spefscr = current->thread.spefscr; 209314cf11afSPaul Mackerras fpexc_mode = current->thread.fpexc_mode; 209414cf11afSPaul Mackerras 209514cf11afSPaul Mackerras if ((spefscr & SPEFSCR_FOVF) && (fpexc_mode & PR_FP_EXC_OVF)) { 209614cf11afSPaul Mackerras code = FPE_FLTOVF; 209714cf11afSPaul Mackerras } 209814cf11afSPaul Mackerras else if ((spefscr & SPEFSCR_FUNF) && (fpexc_mode & PR_FP_EXC_UND)) { 209914cf11afSPaul Mackerras code = FPE_FLTUND; 210014cf11afSPaul Mackerras } 210114cf11afSPaul Mackerras else if ((spefscr & SPEFSCR_FDBZ) && (fpexc_mode & PR_FP_EXC_DIV)) 210214cf11afSPaul Mackerras code = FPE_FLTDIV; 210314cf11afSPaul Mackerras else if ((spefscr & SPEFSCR_FINV) && (fpexc_mode & PR_FP_EXC_INV)) { 210414cf11afSPaul Mackerras code = FPE_FLTINV; 210514cf11afSPaul Mackerras } 210614cf11afSPaul Mackerras else if ((spefscr & (SPEFSCR_FG | SPEFSCR_FX)) && (fpexc_mode & PR_FP_EXC_RES)) 210714cf11afSPaul Mackerras code = FPE_FLTRES; 210814cf11afSPaul Mackerras 21096a800f36SLiu Yu err = do_spe_mathemu(regs); 21106a800f36SLiu Yu if (err == 0) { 21116a800f36SLiu Yu regs->nip += 4; /* skip emulated instruction */ 21126a800f36SLiu Yu emulate_single_step(regs); 211314cf11afSPaul Mackerras return; 211414cf11afSPaul Mackerras } 21156a800f36SLiu Yu 21166a800f36SLiu Yu if (err == -EFAULT) { 21176a800f36SLiu Yu /* got an error reading the instruction */ 21186a800f36SLiu Yu _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip); 21196a800f36SLiu Yu } else if (err == -EINVAL) { 21206a800f36SLiu Yu /* didn't recognize the instruction */ 21216a800f36SLiu Yu printk(KERN_ERR "unrecognized spe instruction " 21226a800f36SLiu Yu "in %s at %lx\n", current->comm, regs->nip); 21236a800f36SLiu Yu } else { 21246a800f36SLiu Yu _exception(SIGFPE, regs, code, regs->nip); 21256a800f36SLiu Yu } 21266a800f36SLiu Yu 21276a800f36SLiu Yu return; 21286a800f36SLiu Yu } 21296a800f36SLiu Yu 21306a800f36SLiu Yu void SPEFloatingPointRoundException(struct pt_regs *regs) 21316a800f36SLiu Yu { 21326a800f36SLiu Yu extern int speround_handler(struct pt_regs *regs); 21336a800f36SLiu Yu int err; 21346a800f36SLiu Yu 21356a800f36SLiu Yu preempt_disable(); 21366a800f36SLiu Yu if (regs->msr & MSR_SPE) 21376a800f36SLiu Yu giveup_spe(current); 21386a800f36SLiu Yu preempt_enable(); 21396a800f36SLiu Yu 21406a800f36SLiu Yu regs->nip -= 4; 21416a800f36SLiu Yu err = speround_handler(regs); 21426a800f36SLiu Yu if (err == 0) { 21436a800f36SLiu Yu regs->nip += 4; /* skip emulated instruction */ 21446a800f36SLiu Yu emulate_single_step(regs); 21456a800f36SLiu Yu return; 21466a800f36SLiu Yu } 21476a800f36SLiu Yu 21486a800f36SLiu Yu if (err == -EFAULT) { 21496a800f36SLiu Yu /* got an error reading the instruction */ 21506a800f36SLiu Yu _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip); 21516a800f36SLiu Yu } else if (err == -EINVAL) { 21526a800f36SLiu Yu /* didn't recognize the instruction */ 21536a800f36SLiu Yu printk(KERN_ERR "unrecognized spe instruction " 21546a800f36SLiu Yu "in %s at %lx\n", current->comm, regs->nip); 21556a800f36SLiu Yu } else { 2156aeb1c0f6SEric W. Biederman _exception(SIGFPE, regs, FPE_FLTUNK, regs->nip); 21576a800f36SLiu Yu return; 21586a800f36SLiu Yu } 21596a800f36SLiu Yu } 216014cf11afSPaul Mackerras #endif 216114cf11afSPaul Mackerras 2162dc1c1ca3SStephen Rothwell /* 2163dc1c1ca3SStephen Rothwell * We enter here if we get an unrecoverable exception, that is, one 2164dc1c1ca3SStephen Rothwell * that happened at a point where the RI (recoverable interrupt) bit 2165dc1c1ca3SStephen Rothwell * in the MSR is 0. This indicates that SRR0/1 are live, and that 2166dc1c1ca3SStephen Rothwell * we therefore lost state by taking this exception. 2167dc1c1ca3SStephen Rothwell */ 2168dc1c1ca3SStephen Rothwell void unrecoverable_exception(struct pt_regs *regs) 2169dc1c1ca3SStephen Rothwell { 217051423a9cSChristophe Leroy pr_emerg("Unrecoverable exception %lx at %lx (msr=%lx)\n", 217151423a9cSChristophe Leroy regs->trap, regs->nip, regs->msr); 2172dc1c1ca3SStephen Rothwell die("Unrecoverable exception", regs, SIGABRT); 2173dc1c1ca3SStephen Rothwell } 217415770a13SNaveen N. Rao NOKPROBE_SYMBOL(unrecoverable_exception); 2175dc1c1ca3SStephen Rothwell 21761e18c17aSJason Gunthorpe #if defined(CONFIG_BOOKE_WDT) || defined(CONFIG_40x) 217714cf11afSPaul Mackerras /* 217814cf11afSPaul Mackerras * Default handler for a Watchdog exception, 217914cf11afSPaul Mackerras * spins until a reboot occurs 218014cf11afSPaul Mackerras */ 218114cf11afSPaul Mackerras void __attribute__ ((weak)) WatchdogHandler(struct pt_regs *regs) 218214cf11afSPaul Mackerras { 218314cf11afSPaul Mackerras /* Generic WatchdogHandler, implement your own */ 218414cf11afSPaul Mackerras mtspr(SPRN_TCR, mfspr(SPRN_TCR)&(~TCR_WIE)); 218514cf11afSPaul Mackerras return; 218614cf11afSPaul Mackerras } 218714cf11afSPaul Mackerras 218814cf11afSPaul Mackerras void WatchdogException(struct pt_regs *regs) 218914cf11afSPaul Mackerras { 219014cf11afSPaul Mackerras printk (KERN_EMERG "PowerPC Book-E Watchdog Exception\n"); 219114cf11afSPaul Mackerras WatchdogHandler(regs); 219214cf11afSPaul Mackerras } 219314cf11afSPaul Mackerras #endif 2194dc1c1ca3SStephen Rothwell 2195dc1c1ca3SStephen Rothwell /* 2196dc1c1ca3SStephen Rothwell * We enter here if we discover during exception entry that we are 2197dc1c1ca3SStephen Rothwell * running in supervisor mode with a userspace value in the stack pointer. 2198dc1c1ca3SStephen Rothwell */ 2199dc1c1ca3SStephen Rothwell void kernel_bad_stack(struct pt_regs *regs) 2200dc1c1ca3SStephen Rothwell { 2201dc1c1ca3SStephen Rothwell printk(KERN_EMERG "Bad kernel stack pointer %lx at %lx\n", 2202dc1c1ca3SStephen Rothwell regs->gpr[1], regs->nip); 2203dc1c1ca3SStephen Rothwell die("Bad kernel stack pointer", regs, SIGABRT); 2204dc1c1ca3SStephen Rothwell } 220515770a13SNaveen N. Rao NOKPROBE_SYMBOL(kernel_bad_stack); 220614cf11afSPaul Mackerras 220714cf11afSPaul Mackerras void __init trap_init(void) 220814cf11afSPaul Mackerras { 220914cf11afSPaul Mackerras } 221080947e7cSGeert Uytterhoeven 221180947e7cSGeert Uytterhoeven 221280947e7cSGeert Uytterhoeven #ifdef CONFIG_PPC_EMULATED_STATS 221380947e7cSGeert Uytterhoeven 221480947e7cSGeert Uytterhoeven #define WARN_EMULATED_SETUP(type) .type = { .name = #type } 221580947e7cSGeert Uytterhoeven 221680947e7cSGeert Uytterhoeven struct ppc_emulated ppc_emulated = { 221780947e7cSGeert Uytterhoeven #ifdef CONFIG_ALTIVEC 221880947e7cSGeert Uytterhoeven WARN_EMULATED_SETUP(altivec), 221980947e7cSGeert Uytterhoeven #endif 222080947e7cSGeert Uytterhoeven WARN_EMULATED_SETUP(dcba), 222180947e7cSGeert Uytterhoeven WARN_EMULATED_SETUP(dcbz), 222280947e7cSGeert Uytterhoeven WARN_EMULATED_SETUP(fp_pair), 222380947e7cSGeert Uytterhoeven WARN_EMULATED_SETUP(isel), 222480947e7cSGeert Uytterhoeven WARN_EMULATED_SETUP(mcrxr), 222580947e7cSGeert Uytterhoeven WARN_EMULATED_SETUP(mfpvr), 222680947e7cSGeert Uytterhoeven WARN_EMULATED_SETUP(multiple), 222780947e7cSGeert Uytterhoeven WARN_EMULATED_SETUP(popcntb), 222880947e7cSGeert Uytterhoeven WARN_EMULATED_SETUP(spe), 222980947e7cSGeert Uytterhoeven WARN_EMULATED_SETUP(string), 2230a3821b2aSScott Wood WARN_EMULATED_SETUP(sync), 223180947e7cSGeert Uytterhoeven WARN_EMULATED_SETUP(unaligned), 223280947e7cSGeert Uytterhoeven #ifdef CONFIG_MATH_EMULATION 223380947e7cSGeert Uytterhoeven WARN_EMULATED_SETUP(math), 223480947e7cSGeert Uytterhoeven #endif 223580947e7cSGeert Uytterhoeven #ifdef CONFIG_VSX 223680947e7cSGeert Uytterhoeven WARN_EMULATED_SETUP(vsx), 223780947e7cSGeert Uytterhoeven #endif 2238efcac658SAlexey Kardashevskiy #ifdef CONFIG_PPC64 2239efcac658SAlexey Kardashevskiy WARN_EMULATED_SETUP(mfdscr), 2240efcac658SAlexey Kardashevskiy WARN_EMULATED_SETUP(mtdscr), 2241f83319d7SAnton Blanchard WARN_EMULATED_SETUP(lq_stq), 22425080332cSMichael Neuling WARN_EMULATED_SETUP(lxvw4x), 22435080332cSMichael Neuling WARN_EMULATED_SETUP(lxvh8x), 22445080332cSMichael Neuling WARN_EMULATED_SETUP(lxvd2x), 22455080332cSMichael Neuling WARN_EMULATED_SETUP(lxvb16x), 2246efcac658SAlexey Kardashevskiy #endif 224780947e7cSGeert Uytterhoeven }; 224880947e7cSGeert Uytterhoeven 224980947e7cSGeert Uytterhoeven u32 ppc_warn_emulated; 225080947e7cSGeert Uytterhoeven 225180947e7cSGeert Uytterhoeven void ppc_warn_emulated_print(const char *type) 225280947e7cSGeert Uytterhoeven { 225376462232SChristian Dietrich pr_warn_ratelimited("%s used emulated %s instruction\n", current->comm, 225480947e7cSGeert Uytterhoeven type); 225580947e7cSGeert Uytterhoeven } 225680947e7cSGeert Uytterhoeven 225780947e7cSGeert Uytterhoeven static int __init ppc_warn_emulated_init(void) 225880947e7cSGeert Uytterhoeven { 225980947e7cSGeert Uytterhoeven struct dentry *dir, *d; 226080947e7cSGeert Uytterhoeven unsigned int i; 226180947e7cSGeert Uytterhoeven struct ppc_emulated_entry *entries = (void *)&ppc_emulated; 226280947e7cSGeert Uytterhoeven 226380947e7cSGeert Uytterhoeven if (!powerpc_debugfs_root) 226480947e7cSGeert Uytterhoeven return -ENODEV; 226580947e7cSGeert Uytterhoeven 226680947e7cSGeert Uytterhoeven dir = debugfs_create_dir("emulated_instructions", 226780947e7cSGeert Uytterhoeven powerpc_debugfs_root); 226880947e7cSGeert Uytterhoeven if (!dir) 226980947e7cSGeert Uytterhoeven return -ENOMEM; 227080947e7cSGeert Uytterhoeven 227157ad583fSRussell Currey d = debugfs_create_u32("do_warn", 0644, dir, 227280947e7cSGeert Uytterhoeven &ppc_warn_emulated); 227380947e7cSGeert Uytterhoeven if (!d) 227480947e7cSGeert Uytterhoeven goto fail; 227580947e7cSGeert Uytterhoeven 227680947e7cSGeert Uytterhoeven for (i = 0; i < sizeof(ppc_emulated)/sizeof(*entries); i++) { 227757ad583fSRussell Currey d = debugfs_create_u32(entries[i].name, 0644, dir, 227880947e7cSGeert Uytterhoeven (u32 *)&entries[i].val.counter); 227980947e7cSGeert Uytterhoeven if (!d) 228080947e7cSGeert Uytterhoeven goto fail; 228180947e7cSGeert Uytterhoeven } 228280947e7cSGeert Uytterhoeven 228380947e7cSGeert Uytterhoeven return 0; 228480947e7cSGeert Uytterhoeven 228580947e7cSGeert Uytterhoeven fail: 228680947e7cSGeert Uytterhoeven debugfs_remove_recursive(dir); 228780947e7cSGeert Uytterhoeven return -ENOMEM; 228880947e7cSGeert Uytterhoeven } 228980947e7cSGeert Uytterhoeven 229080947e7cSGeert Uytterhoeven device_initcall(ppc_warn_emulated_init); 229180947e7cSGeert Uytterhoeven 229280947e7cSGeert Uytterhoeven #endif /* CONFIG_PPC_EMULATED_STATS */ 2293