1 /* 2 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 3 * Copyright 2007-2010 Freescale Semiconductor, Inc. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License 7 * as published by the Free Software Foundation; either version 8 * 2 of the License, or (at your option) any later version. 9 * 10 * Modified by Cort Dougan (cort@cs.nmt.edu) 11 * and Paul Mackerras (paulus@samba.org) 12 */ 13 14 /* 15 * This file handles the architecture-dependent parts of hardware exceptions 16 */ 17 18 #include <linux/errno.h> 19 #include <linux/sched.h> 20 #include <linux/sched/debug.h> 21 #include <linux/kernel.h> 22 #include <linux/mm.h> 23 #include <linux/stddef.h> 24 #include <linux/unistd.h> 25 #include <linux/ptrace.h> 26 #include <linux/user.h> 27 #include <linux/interrupt.h> 28 #include <linux/init.h> 29 #include <linux/extable.h> 30 #include <linux/module.h> /* print_modules */ 31 #include <linux/prctl.h> 32 #include <linux/delay.h> 33 #include <linux/kprobes.h> 34 #include <linux/kexec.h> 35 #include <linux/backlight.h> 36 #include <linux/bug.h> 37 #include <linux/kdebug.h> 38 #include <linux/debugfs.h> 39 #include <linux/ratelimit.h> 40 #include <linux/context_tracking.h> 41 42 #include <asm/emulated_ops.h> 43 #include <asm/pgtable.h> 44 #include <linux/uaccess.h> 45 #include <asm/io.h> 46 #include <asm/machdep.h> 47 #include <asm/rtas.h> 48 #include <asm/pmc.h> 49 #include <asm/reg.h> 50 #ifdef CONFIG_PMAC_BACKLIGHT 51 #include <asm/backlight.h> 52 #endif 53 #ifdef CONFIG_PPC64 54 #include <asm/firmware.h> 55 #include <asm/processor.h> 56 #include <asm/tm.h> 57 #endif 58 #include <asm/kexec.h> 59 #include <asm/ppc-opcode.h> 60 #include <asm/rio.h> 61 #include <asm/fadump.h> 62 #include <asm/switch_to.h> 63 #include <asm/tm.h> 64 #include <asm/debug.h> 65 #include <asm/asm-prototypes.h> 66 #include <asm/hmi.h> 67 #include <sysdev/fsl_pci.h> 68 #include <asm/kprobes.h> 69 70 #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC_CORE) 71 int (*__debugger)(struct pt_regs *regs) __read_mostly; 72 int (*__debugger_ipi)(struct pt_regs *regs) __read_mostly; 73 int (*__debugger_bpt)(struct pt_regs *regs) __read_mostly; 74 int (*__debugger_sstep)(struct pt_regs *regs) __read_mostly; 75 int (*__debugger_iabr_match)(struct pt_regs *regs) __read_mostly; 76 int (*__debugger_break_match)(struct pt_regs *regs) __read_mostly; 77 int (*__debugger_fault_handler)(struct pt_regs *regs) __read_mostly; 78 79 EXPORT_SYMBOL(__debugger); 80 EXPORT_SYMBOL(__debugger_ipi); 81 EXPORT_SYMBOL(__debugger_bpt); 82 EXPORT_SYMBOL(__debugger_sstep); 83 EXPORT_SYMBOL(__debugger_iabr_match); 84 EXPORT_SYMBOL(__debugger_break_match); 85 EXPORT_SYMBOL(__debugger_fault_handler); 86 #endif 87 88 /* Transactional Memory trap debug */ 89 #ifdef TM_DEBUG_SW 90 #define TM_DEBUG(x...) printk(KERN_INFO x) 91 #else 92 #define TM_DEBUG(x...) do { } while(0) 93 #endif 94 95 /* 96 * Trap & Exception support 97 */ 98 99 #ifdef CONFIG_PMAC_BACKLIGHT 100 static void pmac_backlight_unblank(void) 101 { 102 mutex_lock(&pmac_backlight_mutex); 103 if (pmac_backlight) { 104 struct backlight_properties *props; 105 106 props = &pmac_backlight->props; 107 props->brightness = props->max_brightness; 108 props->power = FB_BLANK_UNBLANK; 109 backlight_update_status(pmac_backlight); 110 } 111 mutex_unlock(&pmac_backlight_mutex); 112 } 113 #else 114 static inline void pmac_backlight_unblank(void) { } 115 #endif 116 117 static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED; 118 static int die_owner = -1; 119 static unsigned int die_nest_count; 120 static int die_counter; 121 122 static unsigned long oops_begin(struct pt_regs *regs) 123 { 124 int cpu; 125 unsigned long flags; 126 127 oops_enter(); 128 129 /* racy, but better than risking deadlock. */ 130 raw_local_irq_save(flags); 131 cpu = smp_processor_id(); 132 if (!arch_spin_trylock(&die_lock)) { 133 if (cpu == die_owner) 134 /* nested oops. should stop eventually */; 135 else 136 arch_spin_lock(&die_lock); 137 } 138 die_nest_count++; 139 die_owner = cpu; 140 console_verbose(); 141 bust_spinlocks(1); 142 if (machine_is(powermac)) 143 pmac_backlight_unblank(); 144 return flags; 145 } 146 NOKPROBE_SYMBOL(oops_begin); 147 148 static void oops_end(unsigned long flags, struct pt_regs *regs, 149 int signr) 150 { 151 bust_spinlocks(0); 152 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); 153 die_nest_count--; 154 oops_exit(); 155 printk("\n"); 156 if (!die_nest_count) { 157 /* Nest count reaches zero, release the lock. */ 158 die_owner = -1; 159 arch_spin_unlock(&die_lock); 160 } 161 raw_local_irq_restore(flags); 162 163 crash_fadump(regs, "die oops"); 164 165 /* 166 * A system reset (0x100) is a request to dump, so we always send 167 * it through the crashdump code. 168 */ 169 if (kexec_should_crash(current) || (TRAP(regs) == 0x100)) { 170 crash_kexec(regs); 171 172 /* 173 * We aren't the primary crash CPU. We need to send it 174 * to a holding pattern to avoid it ending up in the panic 175 * code. 176 */ 177 crash_kexec_secondary(regs); 178 } 179 180 if (!signr) 181 return; 182 183 /* 184 * While our oops output is serialised by a spinlock, output 185 * from panic() called below can race and corrupt it. If we 186 * know we are going to panic, delay for 1 second so we have a 187 * chance to get clean backtraces from all CPUs that are oopsing. 188 */ 189 if (in_interrupt() || panic_on_oops || !current->pid || 190 is_global_init(current)) { 191 mdelay(MSEC_PER_SEC); 192 } 193 194 if (in_interrupt()) 195 panic("Fatal exception in interrupt"); 196 if (panic_on_oops) 197 panic("Fatal exception"); 198 do_exit(signr); 199 } 200 NOKPROBE_SYMBOL(oops_end); 201 202 static int __die(const char *str, struct pt_regs *regs, long err) 203 { 204 printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter); 205 #ifdef CONFIG_PREEMPT 206 printk("PREEMPT "); 207 #endif 208 #ifdef CONFIG_SMP 209 printk("SMP NR_CPUS=%d ", NR_CPUS); 210 #endif 211 if (debug_pagealloc_enabled()) 212 printk("DEBUG_PAGEALLOC "); 213 #ifdef CONFIG_NUMA 214 printk("NUMA "); 215 #endif 216 printk("%s\n", ppc_md.name ? ppc_md.name : ""); 217 218 if (notify_die(DIE_OOPS, str, regs, err, 255, SIGSEGV) == NOTIFY_STOP) 219 return 1; 220 221 print_modules(); 222 show_regs(regs); 223 224 return 0; 225 } 226 NOKPROBE_SYMBOL(__die); 227 228 void die(const char *str, struct pt_regs *regs, long err) 229 { 230 unsigned long flags; 231 232 if (debugger(regs)) 233 return; 234 235 flags = oops_begin(regs); 236 if (__die(str, regs, err)) 237 err = 0; 238 oops_end(flags, regs, err); 239 } 240 241 void user_single_step_siginfo(struct task_struct *tsk, 242 struct pt_regs *regs, siginfo_t *info) 243 { 244 memset(info, 0, sizeof(*info)); 245 info->si_signo = SIGTRAP; 246 info->si_code = TRAP_TRACE; 247 info->si_addr = (void __user *)regs->nip; 248 } 249 250 void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr) 251 { 252 siginfo_t info; 253 const char fmt32[] = KERN_INFO "%s[%d]: unhandled signal %d " \ 254 "at %08lx nip %08lx lr %08lx code %x\n"; 255 const char fmt64[] = KERN_INFO "%s[%d]: unhandled signal %d " \ 256 "at %016lx nip %016lx lr %016lx code %x\n"; 257 258 if (!user_mode(regs)) { 259 die("Exception in kernel mode", regs, signr); 260 return; 261 } 262 263 if (show_unhandled_signals && unhandled_signal(current, signr)) { 264 printk_ratelimited(regs->msr & MSR_64BIT ? fmt64 : fmt32, 265 current->comm, current->pid, signr, 266 addr, regs->nip, regs->link, code); 267 } 268 269 if (arch_irqs_disabled() && !arch_irq_disabled_regs(regs)) 270 local_irq_enable(); 271 272 current->thread.trap_nr = code; 273 memset(&info, 0, sizeof(info)); 274 info.si_signo = signr; 275 info.si_code = code; 276 info.si_addr = (void __user *) addr; 277 force_sig_info(signr, &info, current); 278 } 279 280 void system_reset_exception(struct pt_regs *regs) 281 { 282 /* See if any machine dependent calls */ 283 if (ppc_md.system_reset_exception) { 284 if (ppc_md.system_reset_exception(regs)) 285 return; 286 } 287 288 die("System Reset", regs, SIGABRT); 289 290 /* Must die if the interrupt is not recoverable */ 291 if (!(regs->msr & MSR_RI)) 292 panic("Unrecoverable System Reset"); 293 294 /* What should we do here? We could issue a shutdown or hard reset. */ 295 } 296 297 #ifdef CONFIG_PPC64 298 /* 299 * This function is called in real mode. Strictly no printk's please. 300 * 301 * regs->nip and regs->msr contains srr0 and ssr1. 302 */ 303 long machine_check_early(struct pt_regs *regs) 304 { 305 long handled = 0; 306 307 __this_cpu_inc(irq_stat.mce_exceptions); 308 309 add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE); 310 311 if (cur_cpu_spec && cur_cpu_spec->machine_check_early) 312 handled = cur_cpu_spec->machine_check_early(regs); 313 return handled; 314 } 315 316 long hmi_exception_realmode(struct pt_regs *regs) 317 { 318 __this_cpu_inc(irq_stat.hmi_exceptions); 319 320 wait_for_subcore_guest_exit(); 321 322 if (ppc_md.hmi_exception_early) 323 ppc_md.hmi_exception_early(regs); 324 325 wait_for_tb_resync(); 326 327 return 0; 328 } 329 330 #endif 331 332 /* 333 * I/O accesses can cause machine checks on powermacs. 334 * Check if the NIP corresponds to the address of a sync 335 * instruction for which there is an entry in the exception 336 * table. 337 * Note that the 601 only takes a machine check on TEA 338 * (transfer error ack) signal assertion, and does not 339 * set any of the top 16 bits of SRR1. 340 * -- paulus. 341 */ 342 static inline int check_io_access(struct pt_regs *regs) 343 { 344 #ifdef CONFIG_PPC32 345 unsigned long msr = regs->msr; 346 const struct exception_table_entry *entry; 347 unsigned int *nip = (unsigned int *)regs->nip; 348 349 if (((msr & 0xffff0000) == 0 || (msr & (0x80000 | 0x40000))) 350 && (entry = search_exception_tables(regs->nip)) != NULL) { 351 /* 352 * Check that it's a sync instruction, or somewhere 353 * in the twi; isync; nop sequence that inb/inw/inl uses. 354 * As the address is in the exception table 355 * we should be able to read the instr there. 356 * For the debug message, we look at the preceding 357 * load or store. 358 */ 359 if (*nip == PPC_INST_NOP) 360 nip -= 2; 361 else if (*nip == PPC_INST_ISYNC) 362 --nip; 363 if (*nip == PPC_INST_SYNC || (*nip >> 26) == OP_TRAP) { 364 unsigned int rb; 365 366 --nip; 367 rb = (*nip >> 11) & 0x1f; 368 printk(KERN_DEBUG "%s bad port %lx at %p\n", 369 (*nip & 0x100)? "OUT to": "IN from", 370 regs->gpr[rb] - _IO_BASE, nip); 371 regs->msr |= MSR_RI; 372 regs->nip = extable_fixup(entry); 373 return 1; 374 } 375 } 376 #endif /* CONFIG_PPC32 */ 377 return 0; 378 } 379 380 #ifdef CONFIG_PPC_ADV_DEBUG_REGS 381 /* On 4xx, the reason for the machine check or program exception 382 is in the ESR. */ 383 #define get_reason(regs) ((regs)->dsisr) 384 #ifndef CONFIG_FSL_BOOKE 385 #define get_mc_reason(regs) ((regs)->dsisr) 386 #else 387 #define get_mc_reason(regs) (mfspr(SPRN_MCSR)) 388 #endif 389 #define REASON_FP ESR_FP 390 #define REASON_ILLEGAL (ESR_PIL | ESR_PUO) 391 #define REASON_PRIVILEGED ESR_PPR 392 #define REASON_TRAP ESR_PTR 393 394 /* single-step stuff */ 395 #define single_stepping(regs) (current->thread.debug.dbcr0 & DBCR0_IC) 396 #define clear_single_step(regs) (current->thread.debug.dbcr0 &= ~DBCR0_IC) 397 398 #else 399 /* On non-4xx, the reason for the machine check or program 400 exception is in the MSR. */ 401 #define get_reason(regs) ((regs)->msr) 402 #define get_mc_reason(regs) ((regs)->msr) 403 #define REASON_TM 0x200000 404 #define REASON_FP 0x100000 405 #define REASON_ILLEGAL 0x80000 406 #define REASON_PRIVILEGED 0x40000 407 #define REASON_TRAP 0x20000 408 409 #define single_stepping(regs) ((regs)->msr & MSR_SE) 410 #define clear_single_step(regs) ((regs)->msr &= ~MSR_SE) 411 #endif 412 413 #if defined(CONFIG_4xx) 414 int machine_check_4xx(struct pt_regs *regs) 415 { 416 unsigned long reason = get_mc_reason(regs); 417 418 if (reason & ESR_IMCP) { 419 printk("Instruction"); 420 mtspr(SPRN_ESR, reason & ~ESR_IMCP); 421 } else 422 printk("Data"); 423 printk(" machine check in kernel mode.\n"); 424 425 return 0; 426 } 427 428 int machine_check_440A(struct pt_regs *regs) 429 { 430 unsigned long reason = get_mc_reason(regs); 431 432 printk("Machine check in kernel mode.\n"); 433 if (reason & ESR_IMCP){ 434 printk("Instruction Synchronous Machine Check exception\n"); 435 mtspr(SPRN_ESR, reason & ~ESR_IMCP); 436 } 437 else { 438 u32 mcsr = mfspr(SPRN_MCSR); 439 if (mcsr & MCSR_IB) 440 printk("Instruction Read PLB Error\n"); 441 if (mcsr & MCSR_DRB) 442 printk("Data Read PLB Error\n"); 443 if (mcsr & MCSR_DWB) 444 printk("Data Write PLB Error\n"); 445 if (mcsr & MCSR_TLBP) 446 printk("TLB Parity Error\n"); 447 if (mcsr & MCSR_ICP){ 448 flush_instruction_cache(); 449 printk("I-Cache Parity Error\n"); 450 } 451 if (mcsr & MCSR_DCSP) 452 printk("D-Cache Search Parity Error\n"); 453 if (mcsr & MCSR_DCFP) 454 printk("D-Cache Flush Parity Error\n"); 455 if (mcsr & MCSR_IMPE) 456 printk("Machine Check exception is imprecise\n"); 457 458 /* Clear MCSR */ 459 mtspr(SPRN_MCSR, mcsr); 460 } 461 return 0; 462 } 463 464 int machine_check_47x(struct pt_regs *regs) 465 { 466 unsigned long reason = get_mc_reason(regs); 467 u32 mcsr; 468 469 printk(KERN_ERR "Machine check in kernel mode.\n"); 470 if (reason & ESR_IMCP) { 471 printk(KERN_ERR 472 "Instruction Synchronous Machine Check exception\n"); 473 mtspr(SPRN_ESR, reason & ~ESR_IMCP); 474 return 0; 475 } 476 mcsr = mfspr(SPRN_MCSR); 477 if (mcsr & MCSR_IB) 478 printk(KERN_ERR "Instruction Read PLB Error\n"); 479 if (mcsr & MCSR_DRB) 480 printk(KERN_ERR "Data Read PLB Error\n"); 481 if (mcsr & MCSR_DWB) 482 printk(KERN_ERR "Data Write PLB Error\n"); 483 if (mcsr & MCSR_TLBP) 484 printk(KERN_ERR "TLB Parity Error\n"); 485 if (mcsr & MCSR_ICP) { 486 flush_instruction_cache(); 487 printk(KERN_ERR "I-Cache Parity Error\n"); 488 } 489 if (mcsr & MCSR_DCSP) 490 printk(KERN_ERR "D-Cache Search Parity Error\n"); 491 if (mcsr & PPC47x_MCSR_GPR) 492 printk(KERN_ERR "GPR Parity Error\n"); 493 if (mcsr & PPC47x_MCSR_FPR) 494 printk(KERN_ERR "FPR Parity Error\n"); 495 if (mcsr & PPC47x_MCSR_IPR) 496 printk(KERN_ERR "Machine Check exception is imprecise\n"); 497 498 /* Clear MCSR */ 499 mtspr(SPRN_MCSR, mcsr); 500 501 return 0; 502 } 503 #elif defined(CONFIG_E500) 504 int machine_check_e500mc(struct pt_regs *regs) 505 { 506 unsigned long mcsr = mfspr(SPRN_MCSR); 507 unsigned long reason = mcsr; 508 int recoverable = 1; 509 510 if (reason & MCSR_LD) { 511 recoverable = fsl_rio_mcheck_exception(regs); 512 if (recoverable == 1) 513 goto silent_out; 514 } 515 516 printk("Machine check in kernel mode.\n"); 517 printk("Caused by (from MCSR=%lx): ", reason); 518 519 if (reason & MCSR_MCP) 520 printk("Machine Check Signal\n"); 521 522 if (reason & MCSR_ICPERR) { 523 printk("Instruction Cache Parity Error\n"); 524 525 /* 526 * This is recoverable by invalidating the i-cache. 527 */ 528 mtspr(SPRN_L1CSR1, mfspr(SPRN_L1CSR1) | L1CSR1_ICFI); 529 while (mfspr(SPRN_L1CSR1) & L1CSR1_ICFI) 530 ; 531 532 /* 533 * This will generally be accompanied by an instruction 534 * fetch error report -- only treat MCSR_IF as fatal 535 * if it wasn't due to an L1 parity error. 536 */ 537 reason &= ~MCSR_IF; 538 } 539 540 if (reason & MCSR_DCPERR_MC) { 541 printk("Data Cache Parity Error\n"); 542 543 /* 544 * In write shadow mode we auto-recover from the error, but it 545 * may still get logged and cause a machine check. We should 546 * only treat the non-write shadow case as non-recoverable. 547 */ 548 if (!(mfspr(SPRN_L1CSR2) & L1CSR2_DCWS)) 549 recoverable = 0; 550 } 551 552 if (reason & MCSR_L2MMU_MHIT) { 553 printk("Hit on multiple TLB entries\n"); 554 recoverable = 0; 555 } 556 557 if (reason & MCSR_NMI) 558 printk("Non-maskable interrupt\n"); 559 560 if (reason & MCSR_IF) { 561 printk("Instruction Fetch Error Report\n"); 562 recoverable = 0; 563 } 564 565 if (reason & MCSR_LD) { 566 printk("Load Error Report\n"); 567 recoverable = 0; 568 } 569 570 if (reason & MCSR_ST) { 571 printk("Store Error Report\n"); 572 recoverable = 0; 573 } 574 575 if (reason & MCSR_LDG) { 576 printk("Guarded Load Error Report\n"); 577 recoverable = 0; 578 } 579 580 if (reason & MCSR_TLBSYNC) 581 printk("Simultaneous tlbsync operations\n"); 582 583 if (reason & MCSR_BSL2_ERR) { 584 printk("Level 2 Cache Error\n"); 585 recoverable = 0; 586 } 587 588 if (reason & MCSR_MAV) { 589 u64 addr; 590 591 addr = mfspr(SPRN_MCAR); 592 addr |= (u64)mfspr(SPRN_MCARU) << 32; 593 594 printk("Machine Check %s Address: %#llx\n", 595 reason & MCSR_MEA ? "Effective" : "Physical", addr); 596 } 597 598 silent_out: 599 mtspr(SPRN_MCSR, mcsr); 600 return mfspr(SPRN_MCSR) == 0 && recoverable; 601 } 602 603 int machine_check_e500(struct pt_regs *regs) 604 { 605 unsigned long reason = get_mc_reason(regs); 606 607 if (reason & MCSR_BUS_RBERR) { 608 if (fsl_rio_mcheck_exception(regs)) 609 return 1; 610 if (fsl_pci_mcheck_exception(regs)) 611 return 1; 612 } 613 614 printk("Machine check in kernel mode.\n"); 615 printk("Caused by (from MCSR=%lx): ", reason); 616 617 if (reason & MCSR_MCP) 618 printk("Machine Check Signal\n"); 619 if (reason & MCSR_ICPERR) 620 printk("Instruction Cache Parity Error\n"); 621 if (reason & MCSR_DCP_PERR) 622 printk("Data Cache Push Parity Error\n"); 623 if (reason & MCSR_DCPERR) 624 printk("Data Cache Parity Error\n"); 625 if (reason & MCSR_BUS_IAERR) 626 printk("Bus - Instruction Address Error\n"); 627 if (reason & MCSR_BUS_RAERR) 628 printk("Bus - Read Address Error\n"); 629 if (reason & MCSR_BUS_WAERR) 630 printk("Bus - Write Address Error\n"); 631 if (reason & MCSR_BUS_IBERR) 632 printk("Bus - Instruction Data Error\n"); 633 if (reason & MCSR_BUS_RBERR) 634 printk("Bus - Read Data Bus Error\n"); 635 if (reason & MCSR_BUS_WBERR) 636 printk("Bus - Write Data Bus Error\n"); 637 if (reason & MCSR_BUS_IPERR) 638 printk("Bus - Instruction Parity Error\n"); 639 if (reason & MCSR_BUS_RPERR) 640 printk("Bus - Read Parity Error\n"); 641 642 return 0; 643 } 644 645 int machine_check_generic(struct pt_regs *regs) 646 { 647 return 0; 648 } 649 #elif defined(CONFIG_E200) 650 int machine_check_e200(struct pt_regs *regs) 651 { 652 unsigned long reason = get_mc_reason(regs); 653 654 printk("Machine check in kernel mode.\n"); 655 printk("Caused by (from MCSR=%lx): ", reason); 656 657 if (reason & MCSR_MCP) 658 printk("Machine Check Signal\n"); 659 if (reason & MCSR_CP_PERR) 660 printk("Cache Push Parity Error\n"); 661 if (reason & MCSR_CPERR) 662 printk("Cache Parity Error\n"); 663 if (reason & MCSR_EXCP_ERR) 664 printk("ISI, ITLB, or Bus Error on first instruction fetch for an exception handler\n"); 665 if (reason & MCSR_BUS_IRERR) 666 printk("Bus - Read Bus Error on instruction fetch\n"); 667 if (reason & MCSR_BUS_DRERR) 668 printk("Bus - Read Bus Error on data load\n"); 669 if (reason & MCSR_BUS_WRERR) 670 printk("Bus - Write Bus Error on buffered store or cache line push\n"); 671 672 return 0; 673 } 674 #elif defined(CONFIG_PPC_8xx) 675 int machine_check_8xx(struct pt_regs *regs) 676 { 677 unsigned long reason = get_mc_reason(regs); 678 679 pr_err("Machine check in kernel mode.\n"); 680 pr_err("Caused by (from SRR1=%lx): ", reason); 681 if (reason & 0x40000000) 682 pr_err("Fetch error at address %lx\n", regs->nip); 683 else 684 pr_err("Data access error at address %lx\n", regs->dar); 685 686 #ifdef CONFIG_PCI 687 /* the qspan pci read routines can cause machine checks -- Cort 688 * 689 * yuck !!! that totally needs to go away ! There are better ways 690 * to deal with that than having a wart in the mcheck handler. 691 * -- BenH 692 */ 693 bad_page_fault(regs, regs->dar, SIGBUS); 694 return 1; 695 #else 696 return 0; 697 #endif 698 } 699 #else 700 int machine_check_generic(struct pt_regs *regs) 701 { 702 unsigned long reason = get_mc_reason(regs); 703 704 printk("Machine check in kernel mode.\n"); 705 printk("Caused by (from SRR1=%lx): ", reason); 706 switch (reason & 0x601F0000) { 707 case 0x80000: 708 printk("Machine check signal\n"); 709 break; 710 case 0: /* for 601 */ 711 case 0x40000: 712 case 0x140000: /* 7450 MSS error and TEA */ 713 printk("Transfer error ack signal\n"); 714 break; 715 case 0x20000: 716 printk("Data parity error signal\n"); 717 break; 718 case 0x10000: 719 printk("Address parity error signal\n"); 720 break; 721 case 0x20000000: 722 printk("L1 Data Cache error\n"); 723 break; 724 case 0x40000000: 725 printk("L1 Instruction Cache error\n"); 726 break; 727 case 0x00100000: 728 printk("L2 data cache parity error\n"); 729 break; 730 default: 731 printk("Unknown values in msr\n"); 732 } 733 return 0; 734 } 735 #endif /* everything else */ 736 737 void machine_check_exception(struct pt_regs *regs) 738 { 739 enum ctx_state prev_state = exception_enter(); 740 int recover = 0; 741 742 __this_cpu_inc(irq_stat.mce_exceptions); 743 744 /* See if any machine dependent calls. In theory, we would want 745 * to call the CPU first, and call the ppc_md. one if the CPU 746 * one returns a positive number. However there is existing code 747 * that assumes the board gets a first chance, so let's keep it 748 * that way for now and fix things later. --BenH. 749 */ 750 if (ppc_md.machine_check_exception) 751 recover = ppc_md.machine_check_exception(regs); 752 else if (cur_cpu_spec->machine_check) 753 recover = cur_cpu_spec->machine_check(regs); 754 755 if (recover > 0) 756 goto bail; 757 758 if (debugger_fault_handler(regs)) 759 goto bail; 760 761 if (check_io_access(regs)) 762 goto bail; 763 764 die("Machine check", regs, SIGBUS); 765 766 /* Must die if the interrupt is not recoverable */ 767 if (!(regs->msr & MSR_RI)) 768 panic("Unrecoverable Machine check"); 769 770 bail: 771 exception_exit(prev_state); 772 } 773 774 void SMIException(struct pt_regs *regs) 775 { 776 die("System Management Interrupt", regs, SIGABRT); 777 } 778 779 void handle_hmi_exception(struct pt_regs *regs) 780 { 781 struct pt_regs *old_regs; 782 783 old_regs = set_irq_regs(regs); 784 irq_enter(); 785 786 if (ppc_md.handle_hmi_exception) 787 ppc_md.handle_hmi_exception(regs); 788 789 irq_exit(); 790 set_irq_regs(old_regs); 791 } 792 793 void unknown_exception(struct pt_regs *regs) 794 { 795 enum ctx_state prev_state = exception_enter(); 796 797 printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n", 798 regs->nip, regs->msr, regs->trap); 799 800 _exception(SIGTRAP, regs, 0, 0); 801 802 exception_exit(prev_state); 803 } 804 805 void instruction_breakpoint_exception(struct pt_regs *regs) 806 { 807 enum ctx_state prev_state = exception_enter(); 808 809 if (notify_die(DIE_IABR_MATCH, "iabr_match", regs, 5, 810 5, SIGTRAP) == NOTIFY_STOP) 811 goto bail; 812 if (debugger_iabr_match(regs)) 813 goto bail; 814 _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip); 815 816 bail: 817 exception_exit(prev_state); 818 } 819 820 void RunModeException(struct pt_regs *regs) 821 { 822 _exception(SIGTRAP, regs, 0, 0); 823 } 824 825 void single_step_exception(struct pt_regs *regs) 826 { 827 enum ctx_state prev_state = exception_enter(); 828 829 clear_single_step(regs); 830 831 if (kprobe_post_handler(regs)) 832 return; 833 834 if (notify_die(DIE_SSTEP, "single_step", regs, 5, 835 5, SIGTRAP) == NOTIFY_STOP) 836 goto bail; 837 if (debugger_sstep(regs)) 838 goto bail; 839 840 _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip); 841 842 bail: 843 exception_exit(prev_state); 844 } 845 NOKPROBE_SYMBOL(single_step_exception); 846 847 /* 848 * After we have successfully emulated an instruction, we have to 849 * check if the instruction was being single-stepped, and if so, 850 * pretend we got a single-step exception. This was pointed out 851 * by Kumar Gala. -- paulus 852 */ 853 static void emulate_single_step(struct pt_regs *regs) 854 { 855 if (single_stepping(regs)) 856 single_step_exception(regs); 857 } 858 859 static inline int __parse_fpscr(unsigned long fpscr) 860 { 861 int ret = 0; 862 863 /* Invalid operation */ 864 if ((fpscr & FPSCR_VE) && (fpscr & FPSCR_VX)) 865 ret = FPE_FLTINV; 866 867 /* Overflow */ 868 else if ((fpscr & FPSCR_OE) && (fpscr & FPSCR_OX)) 869 ret = FPE_FLTOVF; 870 871 /* Underflow */ 872 else if ((fpscr & FPSCR_UE) && (fpscr & FPSCR_UX)) 873 ret = FPE_FLTUND; 874 875 /* Divide by zero */ 876 else if ((fpscr & FPSCR_ZE) && (fpscr & FPSCR_ZX)) 877 ret = FPE_FLTDIV; 878 879 /* Inexact result */ 880 else if ((fpscr & FPSCR_XE) && (fpscr & FPSCR_XX)) 881 ret = FPE_FLTRES; 882 883 return ret; 884 } 885 886 static void parse_fpe(struct pt_regs *regs) 887 { 888 int code = 0; 889 890 flush_fp_to_thread(current); 891 892 code = __parse_fpscr(current->thread.fp_state.fpscr); 893 894 _exception(SIGFPE, regs, code, regs->nip); 895 } 896 897 /* 898 * Illegal instruction emulation support. Originally written to 899 * provide the PVR to user applications using the mfspr rd, PVR. 900 * Return non-zero if we can't emulate, or -EFAULT if the associated 901 * memory access caused an access fault. Return zero on success. 902 * 903 * There are a couple of ways to do this, either "decode" the instruction 904 * or directly match lots of bits. In this case, matching lots of 905 * bits is faster and easier. 906 * 907 */ 908 static int emulate_string_inst(struct pt_regs *regs, u32 instword) 909 { 910 u8 rT = (instword >> 21) & 0x1f; 911 u8 rA = (instword >> 16) & 0x1f; 912 u8 NB_RB = (instword >> 11) & 0x1f; 913 u32 num_bytes; 914 unsigned long EA; 915 int pos = 0; 916 917 /* Early out if we are an invalid form of lswx */ 918 if ((instword & PPC_INST_STRING_MASK) == PPC_INST_LSWX) 919 if ((rT == rA) || (rT == NB_RB)) 920 return -EINVAL; 921 922 EA = (rA == 0) ? 0 : regs->gpr[rA]; 923 924 switch (instword & PPC_INST_STRING_MASK) { 925 case PPC_INST_LSWX: 926 case PPC_INST_STSWX: 927 EA += NB_RB; 928 num_bytes = regs->xer & 0x7f; 929 break; 930 case PPC_INST_LSWI: 931 case PPC_INST_STSWI: 932 num_bytes = (NB_RB == 0) ? 32 : NB_RB; 933 break; 934 default: 935 return -EINVAL; 936 } 937 938 while (num_bytes != 0) 939 { 940 u8 val; 941 u32 shift = 8 * (3 - (pos & 0x3)); 942 943 /* if process is 32-bit, clear upper 32 bits of EA */ 944 if ((regs->msr & MSR_64BIT) == 0) 945 EA &= 0xFFFFFFFF; 946 947 switch ((instword & PPC_INST_STRING_MASK)) { 948 case PPC_INST_LSWX: 949 case PPC_INST_LSWI: 950 if (get_user(val, (u8 __user *)EA)) 951 return -EFAULT; 952 /* first time updating this reg, 953 * zero it out */ 954 if (pos == 0) 955 regs->gpr[rT] = 0; 956 regs->gpr[rT] |= val << shift; 957 break; 958 case PPC_INST_STSWI: 959 case PPC_INST_STSWX: 960 val = regs->gpr[rT] >> shift; 961 if (put_user(val, (u8 __user *)EA)) 962 return -EFAULT; 963 break; 964 } 965 /* move EA to next address */ 966 EA += 1; 967 num_bytes--; 968 969 /* manage our position within the register */ 970 if (++pos == 4) { 971 pos = 0; 972 if (++rT == 32) 973 rT = 0; 974 } 975 } 976 977 return 0; 978 } 979 980 static int emulate_popcntb_inst(struct pt_regs *regs, u32 instword) 981 { 982 u32 ra,rs; 983 unsigned long tmp; 984 985 ra = (instword >> 16) & 0x1f; 986 rs = (instword >> 21) & 0x1f; 987 988 tmp = regs->gpr[rs]; 989 tmp = tmp - ((tmp >> 1) & 0x5555555555555555ULL); 990 tmp = (tmp & 0x3333333333333333ULL) + ((tmp >> 2) & 0x3333333333333333ULL); 991 tmp = (tmp + (tmp >> 4)) & 0x0f0f0f0f0f0f0f0fULL; 992 regs->gpr[ra] = tmp; 993 994 return 0; 995 } 996 997 static int emulate_isel(struct pt_regs *regs, u32 instword) 998 { 999 u8 rT = (instword >> 21) & 0x1f; 1000 u8 rA = (instword >> 16) & 0x1f; 1001 u8 rB = (instword >> 11) & 0x1f; 1002 u8 BC = (instword >> 6) & 0x1f; 1003 u8 bit; 1004 unsigned long tmp; 1005 1006 tmp = (rA == 0) ? 0 : regs->gpr[rA]; 1007 bit = (regs->ccr >> (31 - BC)) & 0x1; 1008 1009 regs->gpr[rT] = bit ? tmp : regs->gpr[rB]; 1010 1011 return 0; 1012 } 1013 1014 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1015 static inline bool tm_abort_check(struct pt_regs *regs, int cause) 1016 { 1017 /* If we're emulating a load/store in an active transaction, we cannot 1018 * emulate it as the kernel operates in transaction suspended context. 1019 * We need to abort the transaction. This creates a persistent TM 1020 * abort so tell the user what caused it with a new code. 1021 */ 1022 if (MSR_TM_TRANSACTIONAL(regs->msr)) { 1023 tm_enable(); 1024 tm_abort(cause); 1025 return true; 1026 } 1027 return false; 1028 } 1029 #else 1030 static inline bool tm_abort_check(struct pt_regs *regs, int reason) 1031 { 1032 return false; 1033 } 1034 #endif 1035 1036 static int emulate_instruction(struct pt_regs *regs) 1037 { 1038 u32 instword; 1039 u32 rd; 1040 1041 if (!user_mode(regs)) 1042 return -EINVAL; 1043 CHECK_FULL_REGS(regs); 1044 1045 if (get_user(instword, (u32 __user *)(regs->nip))) 1046 return -EFAULT; 1047 1048 /* Emulate the mfspr rD, PVR. */ 1049 if ((instword & PPC_INST_MFSPR_PVR_MASK) == PPC_INST_MFSPR_PVR) { 1050 PPC_WARN_EMULATED(mfpvr, regs); 1051 rd = (instword >> 21) & 0x1f; 1052 regs->gpr[rd] = mfspr(SPRN_PVR); 1053 return 0; 1054 } 1055 1056 /* Emulating the dcba insn is just a no-op. */ 1057 if ((instword & PPC_INST_DCBA_MASK) == PPC_INST_DCBA) { 1058 PPC_WARN_EMULATED(dcba, regs); 1059 return 0; 1060 } 1061 1062 /* Emulate the mcrxr insn. */ 1063 if ((instword & PPC_INST_MCRXR_MASK) == PPC_INST_MCRXR) { 1064 int shift = (instword >> 21) & 0x1c; 1065 unsigned long msk = 0xf0000000UL >> shift; 1066 1067 PPC_WARN_EMULATED(mcrxr, regs); 1068 regs->ccr = (regs->ccr & ~msk) | ((regs->xer >> shift) & msk); 1069 regs->xer &= ~0xf0000000UL; 1070 return 0; 1071 } 1072 1073 /* Emulate load/store string insn. */ 1074 if ((instword & PPC_INST_STRING_GEN_MASK) == PPC_INST_STRING) { 1075 if (tm_abort_check(regs, 1076 TM_CAUSE_EMULATE | TM_CAUSE_PERSISTENT)) 1077 return -EINVAL; 1078 PPC_WARN_EMULATED(string, regs); 1079 return emulate_string_inst(regs, instword); 1080 } 1081 1082 /* Emulate the popcntb (Population Count Bytes) instruction. */ 1083 if ((instword & PPC_INST_POPCNTB_MASK) == PPC_INST_POPCNTB) { 1084 PPC_WARN_EMULATED(popcntb, regs); 1085 return emulate_popcntb_inst(regs, instword); 1086 } 1087 1088 /* Emulate isel (Integer Select) instruction */ 1089 if ((instword & PPC_INST_ISEL_MASK) == PPC_INST_ISEL) { 1090 PPC_WARN_EMULATED(isel, regs); 1091 return emulate_isel(regs, instword); 1092 } 1093 1094 /* Emulate sync instruction variants */ 1095 if ((instword & PPC_INST_SYNC_MASK) == PPC_INST_SYNC) { 1096 PPC_WARN_EMULATED(sync, regs); 1097 asm volatile("sync"); 1098 return 0; 1099 } 1100 1101 #ifdef CONFIG_PPC64 1102 /* Emulate the mfspr rD, DSCR. */ 1103 if ((((instword & PPC_INST_MFSPR_DSCR_USER_MASK) == 1104 PPC_INST_MFSPR_DSCR_USER) || 1105 ((instword & PPC_INST_MFSPR_DSCR_MASK) == 1106 PPC_INST_MFSPR_DSCR)) && 1107 cpu_has_feature(CPU_FTR_DSCR)) { 1108 PPC_WARN_EMULATED(mfdscr, regs); 1109 rd = (instword >> 21) & 0x1f; 1110 regs->gpr[rd] = mfspr(SPRN_DSCR); 1111 return 0; 1112 } 1113 /* Emulate the mtspr DSCR, rD. */ 1114 if ((((instword & PPC_INST_MTSPR_DSCR_USER_MASK) == 1115 PPC_INST_MTSPR_DSCR_USER) || 1116 ((instword & PPC_INST_MTSPR_DSCR_MASK) == 1117 PPC_INST_MTSPR_DSCR)) && 1118 cpu_has_feature(CPU_FTR_DSCR)) { 1119 PPC_WARN_EMULATED(mtdscr, regs); 1120 rd = (instword >> 21) & 0x1f; 1121 current->thread.dscr = regs->gpr[rd]; 1122 current->thread.dscr_inherit = 1; 1123 mtspr(SPRN_DSCR, current->thread.dscr); 1124 return 0; 1125 } 1126 #endif 1127 1128 return -EINVAL; 1129 } 1130 1131 int is_valid_bugaddr(unsigned long addr) 1132 { 1133 return is_kernel_addr(addr); 1134 } 1135 1136 #ifdef CONFIG_MATH_EMULATION 1137 static int emulate_math(struct pt_regs *regs) 1138 { 1139 int ret; 1140 extern int do_mathemu(struct pt_regs *regs); 1141 1142 ret = do_mathemu(regs); 1143 if (ret >= 0) 1144 PPC_WARN_EMULATED(math, regs); 1145 1146 switch (ret) { 1147 case 0: 1148 emulate_single_step(regs); 1149 return 0; 1150 case 1: { 1151 int code = 0; 1152 code = __parse_fpscr(current->thread.fp_state.fpscr); 1153 _exception(SIGFPE, regs, code, regs->nip); 1154 return 0; 1155 } 1156 case -EFAULT: 1157 _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip); 1158 return 0; 1159 } 1160 1161 return -1; 1162 } 1163 #else 1164 static inline int emulate_math(struct pt_regs *regs) { return -1; } 1165 #endif 1166 1167 void program_check_exception(struct pt_regs *regs) 1168 { 1169 enum ctx_state prev_state = exception_enter(); 1170 unsigned int reason = get_reason(regs); 1171 1172 /* We can now get here via a FP Unavailable exception if the core 1173 * has no FPU, in that case the reason flags will be 0 */ 1174 1175 if (reason & REASON_FP) { 1176 /* IEEE FP exception */ 1177 parse_fpe(regs); 1178 goto bail; 1179 } 1180 if (reason & REASON_TRAP) { 1181 unsigned long bugaddr; 1182 /* Debugger is first in line to stop recursive faults in 1183 * rcu_lock, notify_die, or atomic_notifier_call_chain */ 1184 if (debugger_bpt(regs)) 1185 goto bail; 1186 1187 if (kprobe_handler(regs)) 1188 goto bail; 1189 1190 /* trap exception */ 1191 if (notify_die(DIE_BPT, "breakpoint", regs, 5, 5, SIGTRAP) 1192 == NOTIFY_STOP) 1193 goto bail; 1194 1195 bugaddr = regs->nip; 1196 /* 1197 * Fixup bugaddr for BUG_ON() in real mode 1198 */ 1199 if (!is_kernel_addr(bugaddr) && !(regs->msr & MSR_IR)) 1200 bugaddr += PAGE_OFFSET; 1201 1202 if (!(regs->msr & MSR_PR) && /* not user-mode */ 1203 report_bug(bugaddr, regs) == BUG_TRAP_TYPE_WARN) { 1204 regs->nip += 4; 1205 goto bail; 1206 } 1207 _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip); 1208 goto bail; 1209 } 1210 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1211 if (reason & REASON_TM) { 1212 /* This is a TM "Bad Thing Exception" program check. 1213 * This occurs when: 1214 * - An rfid/hrfid/mtmsrd attempts to cause an illegal 1215 * transition in TM states. 1216 * - A trechkpt is attempted when transactional. 1217 * - A treclaim is attempted when non transactional. 1218 * - A tend is illegally attempted. 1219 * - writing a TM SPR when transactional. 1220 */ 1221 if (!user_mode(regs) && 1222 report_bug(regs->nip, regs) == BUG_TRAP_TYPE_WARN) { 1223 regs->nip += 4; 1224 goto bail; 1225 } 1226 /* If usermode caused this, it's done something illegal and 1227 * gets a SIGILL slap on the wrist. We call it an illegal 1228 * operand to distinguish from the instruction just being bad 1229 * (e.g. executing a 'tend' on a CPU without TM!); it's an 1230 * illegal /placement/ of a valid instruction. 1231 */ 1232 if (user_mode(regs)) { 1233 _exception(SIGILL, regs, ILL_ILLOPN, regs->nip); 1234 goto bail; 1235 } else { 1236 printk(KERN_EMERG "Unexpected TM Bad Thing exception " 1237 "at %lx (msr 0x%x)\n", regs->nip, reason); 1238 die("Unrecoverable exception", regs, SIGABRT); 1239 } 1240 } 1241 #endif 1242 1243 /* 1244 * If we took the program check in the kernel skip down to sending a 1245 * SIGILL. The subsequent cases all relate to emulating instructions 1246 * which we should only do for userspace. We also do not want to enable 1247 * interrupts for kernel faults because that might lead to further 1248 * faults, and loose the context of the original exception. 1249 */ 1250 if (!user_mode(regs)) 1251 goto sigill; 1252 1253 /* We restore the interrupt state now */ 1254 if (!arch_irq_disabled_regs(regs)) 1255 local_irq_enable(); 1256 1257 /* (reason & REASON_ILLEGAL) would be the obvious thing here, 1258 * but there seems to be a hardware bug on the 405GP (RevD) 1259 * that means ESR is sometimes set incorrectly - either to 1260 * ESR_DST (!?) or 0. In the process of chasing this with the 1261 * hardware people - not sure if it can happen on any illegal 1262 * instruction or only on FP instructions, whether there is a 1263 * pattern to occurrences etc. -dgibson 31/Mar/2003 1264 */ 1265 if (!emulate_math(regs)) 1266 goto bail; 1267 1268 /* Try to emulate it if we should. */ 1269 if (reason & (REASON_ILLEGAL | REASON_PRIVILEGED)) { 1270 switch (emulate_instruction(regs)) { 1271 case 0: 1272 regs->nip += 4; 1273 emulate_single_step(regs); 1274 goto bail; 1275 case -EFAULT: 1276 _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip); 1277 goto bail; 1278 } 1279 } 1280 1281 sigill: 1282 if (reason & REASON_PRIVILEGED) 1283 _exception(SIGILL, regs, ILL_PRVOPC, regs->nip); 1284 else 1285 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 1286 1287 bail: 1288 exception_exit(prev_state); 1289 } 1290 NOKPROBE_SYMBOL(program_check_exception); 1291 1292 /* 1293 * This occurs when running in hypervisor mode on POWER6 or later 1294 * and an illegal instruction is encountered. 1295 */ 1296 void emulation_assist_interrupt(struct pt_regs *regs) 1297 { 1298 regs->msr |= REASON_ILLEGAL; 1299 program_check_exception(regs); 1300 } 1301 NOKPROBE_SYMBOL(emulation_assist_interrupt); 1302 1303 void alignment_exception(struct pt_regs *regs) 1304 { 1305 enum ctx_state prev_state = exception_enter(); 1306 int sig, code, fixed = 0; 1307 1308 /* We restore the interrupt state now */ 1309 if (!arch_irq_disabled_regs(regs)) 1310 local_irq_enable(); 1311 1312 if (tm_abort_check(regs, TM_CAUSE_ALIGNMENT | TM_CAUSE_PERSISTENT)) 1313 goto bail; 1314 1315 /* we don't implement logging of alignment exceptions */ 1316 if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS)) 1317 fixed = fix_alignment(regs); 1318 1319 if (fixed == 1) { 1320 regs->nip += 4; /* skip over emulated instruction */ 1321 emulate_single_step(regs); 1322 goto bail; 1323 } 1324 1325 /* Operand address was bad */ 1326 if (fixed == -EFAULT) { 1327 sig = SIGSEGV; 1328 code = SEGV_ACCERR; 1329 } else { 1330 sig = SIGBUS; 1331 code = BUS_ADRALN; 1332 } 1333 if (user_mode(regs)) 1334 _exception(sig, regs, code, regs->dar); 1335 else 1336 bad_page_fault(regs, regs->dar, sig); 1337 1338 bail: 1339 exception_exit(prev_state); 1340 } 1341 1342 void slb_miss_bad_addr(struct pt_regs *regs) 1343 { 1344 enum ctx_state prev_state = exception_enter(); 1345 1346 if (user_mode(regs)) 1347 _exception(SIGSEGV, regs, SEGV_BNDERR, regs->dar); 1348 else 1349 bad_page_fault(regs, regs->dar, SIGSEGV); 1350 1351 exception_exit(prev_state); 1352 } 1353 1354 void StackOverflow(struct pt_regs *regs) 1355 { 1356 printk(KERN_CRIT "Kernel stack overflow in process %p, r1=%lx\n", 1357 current, regs->gpr[1]); 1358 debugger(regs); 1359 show_regs(regs); 1360 panic("kernel stack overflow"); 1361 } 1362 1363 void nonrecoverable_exception(struct pt_regs *regs) 1364 { 1365 printk(KERN_ERR "Non-recoverable exception at PC=%lx MSR=%lx\n", 1366 regs->nip, regs->msr); 1367 debugger(regs); 1368 die("nonrecoverable exception", regs, SIGKILL); 1369 } 1370 1371 void kernel_fp_unavailable_exception(struct pt_regs *regs) 1372 { 1373 enum ctx_state prev_state = exception_enter(); 1374 1375 printk(KERN_EMERG "Unrecoverable FP Unavailable Exception " 1376 "%lx at %lx\n", regs->trap, regs->nip); 1377 die("Unrecoverable FP Unavailable Exception", regs, SIGABRT); 1378 1379 exception_exit(prev_state); 1380 } 1381 1382 void altivec_unavailable_exception(struct pt_regs *regs) 1383 { 1384 enum ctx_state prev_state = exception_enter(); 1385 1386 if (user_mode(regs)) { 1387 /* A user program has executed an altivec instruction, 1388 but this kernel doesn't support altivec. */ 1389 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 1390 goto bail; 1391 } 1392 1393 printk(KERN_EMERG "Unrecoverable VMX/Altivec Unavailable Exception " 1394 "%lx at %lx\n", regs->trap, regs->nip); 1395 die("Unrecoverable VMX/Altivec Unavailable Exception", regs, SIGABRT); 1396 1397 bail: 1398 exception_exit(prev_state); 1399 } 1400 1401 void vsx_unavailable_exception(struct pt_regs *regs) 1402 { 1403 if (user_mode(regs)) { 1404 /* A user program has executed an vsx instruction, 1405 but this kernel doesn't support vsx. */ 1406 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 1407 return; 1408 } 1409 1410 printk(KERN_EMERG "Unrecoverable VSX Unavailable Exception " 1411 "%lx at %lx\n", regs->trap, regs->nip); 1412 die("Unrecoverable VSX Unavailable Exception", regs, SIGABRT); 1413 } 1414 1415 #ifdef CONFIG_PPC64 1416 static void tm_unavailable(struct pt_regs *regs) 1417 { 1418 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1419 if (user_mode(regs)) { 1420 current->thread.load_tm++; 1421 regs->msr |= MSR_TM; 1422 tm_enable(); 1423 tm_restore_sprs(¤t->thread); 1424 return; 1425 } 1426 #endif 1427 pr_emerg("Unrecoverable TM Unavailable Exception " 1428 "%lx at %lx\n", regs->trap, regs->nip); 1429 die("Unrecoverable TM Unavailable Exception", regs, SIGABRT); 1430 } 1431 1432 void facility_unavailable_exception(struct pt_regs *regs) 1433 { 1434 static char *facility_strings[] = { 1435 [FSCR_FP_LG] = "FPU", 1436 [FSCR_VECVSX_LG] = "VMX/VSX", 1437 [FSCR_DSCR_LG] = "DSCR", 1438 [FSCR_PM_LG] = "PMU SPRs", 1439 [FSCR_BHRB_LG] = "BHRB", 1440 [FSCR_TM_LG] = "TM", 1441 [FSCR_EBB_LG] = "EBB", 1442 [FSCR_TAR_LG] = "TAR", 1443 }; 1444 char *facility = "unknown"; 1445 u64 value; 1446 u32 instword, rd; 1447 u8 status; 1448 bool hv; 1449 1450 hv = (regs->trap == 0xf80); 1451 if (hv) 1452 value = mfspr(SPRN_HFSCR); 1453 else 1454 value = mfspr(SPRN_FSCR); 1455 1456 status = value >> 56; 1457 if (status == FSCR_DSCR_LG) { 1458 /* 1459 * User is accessing the DSCR register using the problem 1460 * state only SPR number (0x03) either through a mfspr or 1461 * a mtspr instruction. If it is a write attempt through 1462 * a mtspr, then we set the inherit bit. This also allows 1463 * the user to write or read the register directly in the 1464 * future by setting via the FSCR DSCR bit. But in case it 1465 * is a read DSCR attempt through a mfspr instruction, we 1466 * just emulate the instruction instead. This code path will 1467 * always emulate all the mfspr instructions till the user 1468 * has attempted at least one mtspr instruction. This way it 1469 * preserves the same behaviour when the user is accessing 1470 * the DSCR through privilege level only SPR number (0x11) 1471 * which is emulated through illegal instruction exception. 1472 * We always leave HFSCR DSCR set. 1473 */ 1474 if (get_user(instword, (u32 __user *)(regs->nip))) { 1475 pr_err("Failed to fetch the user instruction\n"); 1476 return; 1477 } 1478 1479 /* Write into DSCR (mtspr 0x03, RS) */ 1480 if ((instword & PPC_INST_MTSPR_DSCR_USER_MASK) 1481 == PPC_INST_MTSPR_DSCR_USER) { 1482 rd = (instword >> 21) & 0x1f; 1483 current->thread.dscr = regs->gpr[rd]; 1484 current->thread.dscr_inherit = 1; 1485 current->thread.fscr |= FSCR_DSCR; 1486 mtspr(SPRN_FSCR, current->thread.fscr); 1487 } 1488 1489 /* Read from DSCR (mfspr RT, 0x03) */ 1490 if ((instword & PPC_INST_MFSPR_DSCR_USER_MASK) 1491 == PPC_INST_MFSPR_DSCR_USER) { 1492 if (emulate_instruction(regs)) { 1493 pr_err("DSCR based mfspr emulation failed\n"); 1494 return; 1495 } 1496 regs->nip += 4; 1497 emulate_single_step(regs); 1498 } 1499 return; 1500 } 1501 1502 if (status == FSCR_TM_LG) { 1503 /* 1504 * If we're here then the hardware is TM aware because it 1505 * generated an exception with FSRM_TM set. 1506 * 1507 * If cpu_has_feature(CPU_FTR_TM) is false, then either firmware 1508 * told us not to do TM, or the kernel is not built with TM 1509 * support. 1510 * 1511 * If both of those things are true, then userspace can spam the 1512 * console by triggering the printk() below just by continually 1513 * doing tbegin (or any TM instruction). So in that case just 1514 * send the process a SIGILL immediately. 1515 */ 1516 if (!cpu_has_feature(CPU_FTR_TM)) 1517 goto out; 1518 1519 tm_unavailable(regs); 1520 return; 1521 } 1522 1523 if ((hv || status >= 2) && 1524 (status < ARRAY_SIZE(facility_strings)) && 1525 facility_strings[status]) 1526 facility = facility_strings[status]; 1527 1528 /* We restore the interrupt state now */ 1529 if (!arch_irq_disabled_regs(regs)) 1530 local_irq_enable(); 1531 1532 pr_err_ratelimited("%sFacility '%s' unavailable (%d), exception at 0x%lx, MSR=%lx\n", 1533 hv ? "Hypervisor " : "", facility, status, regs->nip, regs->msr); 1534 1535 out: 1536 if (user_mode(regs)) { 1537 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 1538 return; 1539 } 1540 1541 die("Unexpected facility unavailable exception", regs, SIGABRT); 1542 } 1543 #endif 1544 1545 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1546 1547 void fp_unavailable_tm(struct pt_regs *regs) 1548 { 1549 /* Note: This does not handle any kind of FP laziness. */ 1550 1551 TM_DEBUG("FP Unavailable trap whilst transactional at 0x%lx, MSR=%lx\n", 1552 regs->nip, regs->msr); 1553 1554 /* We can only have got here if the task started using FP after 1555 * beginning the transaction. So, the transactional regs are just a 1556 * copy of the checkpointed ones. But, we still need to recheckpoint 1557 * as we're enabling FP for the process; it will return, abort the 1558 * transaction, and probably retry but now with FP enabled. So the 1559 * checkpointed FP registers need to be loaded. 1560 */ 1561 tm_reclaim_current(TM_CAUSE_FAC_UNAV); 1562 /* Reclaim didn't save out any FPRs to transact_fprs. */ 1563 1564 /* Enable FP for the task: */ 1565 regs->msr |= (MSR_FP | current->thread.fpexc_mode); 1566 1567 /* This loads and recheckpoints the FP registers from 1568 * thread.fpr[]. They will remain in registers after the 1569 * checkpoint so we don't need to reload them after. 1570 * If VMX is in use, the VRs now hold checkpointed values, 1571 * so we don't want to load the VRs from the thread_struct. 1572 */ 1573 tm_recheckpoint(¤t->thread, MSR_FP); 1574 1575 /* If VMX is in use, get the transactional values back */ 1576 if (regs->msr & MSR_VEC) { 1577 msr_check_and_set(MSR_VEC); 1578 load_vr_state(¤t->thread.vr_state); 1579 /* At this point all the VSX state is loaded, so enable it */ 1580 regs->msr |= MSR_VSX; 1581 } 1582 } 1583 1584 void altivec_unavailable_tm(struct pt_regs *regs) 1585 { 1586 /* See the comments in fp_unavailable_tm(). This function operates 1587 * the same way. 1588 */ 1589 1590 TM_DEBUG("Vector Unavailable trap whilst transactional at 0x%lx," 1591 "MSR=%lx\n", 1592 regs->nip, regs->msr); 1593 tm_reclaim_current(TM_CAUSE_FAC_UNAV); 1594 regs->msr |= MSR_VEC; 1595 tm_recheckpoint(¤t->thread, MSR_VEC); 1596 current->thread.used_vr = 1; 1597 1598 if (regs->msr & MSR_FP) { 1599 msr_check_and_set(MSR_FP); 1600 load_fp_state(¤t->thread.fp_state); 1601 regs->msr |= MSR_VSX; 1602 } 1603 } 1604 1605 void vsx_unavailable_tm(struct pt_regs *regs) 1606 { 1607 unsigned long orig_msr = regs->msr; 1608 1609 /* See the comments in fp_unavailable_tm(). This works similarly, 1610 * though we're loading both FP and VEC registers in here. 1611 * 1612 * If FP isn't in use, load FP regs. If VEC isn't in use, load VEC 1613 * regs. Either way, set MSR_VSX. 1614 */ 1615 1616 TM_DEBUG("VSX Unavailable trap whilst transactional at 0x%lx," 1617 "MSR=%lx\n", 1618 regs->nip, regs->msr); 1619 1620 current->thread.used_vsr = 1; 1621 1622 /* If FP and VMX are already loaded, we have all the state we need */ 1623 if ((orig_msr & (MSR_FP | MSR_VEC)) == (MSR_FP | MSR_VEC)) { 1624 regs->msr |= MSR_VSX; 1625 return; 1626 } 1627 1628 /* This reclaims FP and/or VR regs if they're already enabled */ 1629 tm_reclaim_current(TM_CAUSE_FAC_UNAV); 1630 1631 regs->msr |= MSR_VEC | MSR_FP | current->thread.fpexc_mode | 1632 MSR_VSX; 1633 1634 /* This loads & recheckpoints FP and VRs; but we have 1635 * to be sure not to overwrite previously-valid state. 1636 */ 1637 tm_recheckpoint(¤t->thread, regs->msr & ~orig_msr); 1638 1639 msr_check_and_set(orig_msr & (MSR_FP | MSR_VEC)); 1640 1641 if (orig_msr & MSR_FP) 1642 load_fp_state(¤t->thread.fp_state); 1643 if (orig_msr & MSR_VEC) 1644 load_vr_state(¤t->thread.vr_state); 1645 } 1646 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ 1647 1648 void performance_monitor_exception(struct pt_regs *regs) 1649 { 1650 __this_cpu_inc(irq_stat.pmu_irqs); 1651 1652 perf_irq(regs); 1653 } 1654 1655 #ifdef CONFIG_8xx 1656 void SoftwareEmulation(struct pt_regs *regs) 1657 { 1658 CHECK_FULL_REGS(regs); 1659 1660 if (!user_mode(regs)) { 1661 debugger(regs); 1662 die("Kernel Mode Unimplemented Instruction or SW FPU Emulation", 1663 regs, SIGFPE); 1664 } 1665 1666 if (!emulate_math(regs)) 1667 return; 1668 1669 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 1670 } 1671 #endif /* CONFIG_8xx */ 1672 1673 #ifdef CONFIG_PPC_ADV_DEBUG_REGS 1674 static void handle_debug(struct pt_regs *regs, unsigned long debug_status) 1675 { 1676 int changed = 0; 1677 /* 1678 * Determine the cause of the debug event, clear the 1679 * event flags and send a trap to the handler. Torez 1680 */ 1681 if (debug_status & (DBSR_DAC1R | DBSR_DAC1W)) { 1682 dbcr_dac(current) &= ~(DBCR_DAC1R | DBCR_DAC1W); 1683 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE 1684 current->thread.debug.dbcr2 &= ~DBCR2_DAC12MODE; 1685 #endif 1686 do_send_trap(regs, mfspr(SPRN_DAC1), debug_status, TRAP_HWBKPT, 1687 5); 1688 changed |= 0x01; 1689 } else if (debug_status & (DBSR_DAC2R | DBSR_DAC2W)) { 1690 dbcr_dac(current) &= ~(DBCR_DAC2R | DBCR_DAC2W); 1691 do_send_trap(regs, mfspr(SPRN_DAC2), debug_status, TRAP_HWBKPT, 1692 6); 1693 changed |= 0x01; 1694 } else if (debug_status & DBSR_IAC1) { 1695 current->thread.debug.dbcr0 &= ~DBCR0_IAC1; 1696 dbcr_iac_range(current) &= ~DBCR_IAC12MODE; 1697 do_send_trap(regs, mfspr(SPRN_IAC1), debug_status, TRAP_HWBKPT, 1698 1); 1699 changed |= 0x01; 1700 } else if (debug_status & DBSR_IAC2) { 1701 current->thread.debug.dbcr0 &= ~DBCR0_IAC2; 1702 do_send_trap(regs, mfspr(SPRN_IAC2), debug_status, TRAP_HWBKPT, 1703 2); 1704 changed |= 0x01; 1705 } else if (debug_status & DBSR_IAC3) { 1706 current->thread.debug.dbcr0 &= ~DBCR0_IAC3; 1707 dbcr_iac_range(current) &= ~DBCR_IAC34MODE; 1708 do_send_trap(regs, mfspr(SPRN_IAC3), debug_status, TRAP_HWBKPT, 1709 3); 1710 changed |= 0x01; 1711 } else if (debug_status & DBSR_IAC4) { 1712 current->thread.debug.dbcr0 &= ~DBCR0_IAC4; 1713 do_send_trap(regs, mfspr(SPRN_IAC4), debug_status, TRAP_HWBKPT, 1714 4); 1715 changed |= 0x01; 1716 } 1717 /* 1718 * At the point this routine was called, the MSR(DE) was turned off. 1719 * Check all other debug flags and see if that bit needs to be turned 1720 * back on or not. 1721 */ 1722 if (DBCR_ACTIVE_EVENTS(current->thread.debug.dbcr0, 1723 current->thread.debug.dbcr1)) 1724 regs->msr |= MSR_DE; 1725 else 1726 /* Make sure the IDM flag is off */ 1727 current->thread.debug.dbcr0 &= ~DBCR0_IDM; 1728 1729 if (changed & 0x01) 1730 mtspr(SPRN_DBCR0, current->thread.debug.dbcr0); 1731 } 1732 1733 void DebugException(struct pt_regs *regs, unsigned long debug_status) 1734 { 1735 current->thread.debug.dbsr = debug_status; 1736 1737 /* Hack alert: On BookE, Branch Taken stops on the branch itself, while 1738 * on server, it stops on the target of the branch. In order to simulate 1739 * the server behaviour, we thus restart right away with a single step 1740 * instead of stopping here when hitting a BT 1741 */ 1742 if (debug_status & DBSR_BT) { 1743 regs->msr &= ~MSR_DE; 1744 1745 /* Disable BT */ 1746 mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_BT); 1747 /* Clear the BT event */ 1748 mtspr(SPRN_DBSR, DBSR_BT); 1749 1750 /* Do the single step trick only when coming from userspace */ 1751 if (user_mode(regs)) { 1752 current->thread.debug.dbcr0 &= ~DBCR0_BT; 1753 current->thread.debug.dbcr0 |= DBCR0_IDM | DBCR0_IC; 1754 regs->msr |= MSR_DE; 1755 return; 1756 } 1757 1758 if (kprobe_post_handler(regs)) 1759 return; 1760 1761 if (notify_die(DIE_SSTEP, "block_step", regs, 5, 1762 5, SIGTRAP) == NOTIFY_STOP) { 1763 return; 1764 } 1765 if (debugger_sstep(regs)) 1766 return; 1767 } else if (debug_status & DBSR_IC) { /* Instruction complete */ 1768 regs->msr &= ~MSR_DE; 1769 1770 /* Disable instruction completion */ 1771 mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_IC); 1772 /* Clear the instruction completion event */ 1773 mtspr(SPRN_DBSR, DBSR_IC); 1774 1775 if (kprobe_post_handler(regs)) 1776 return; 1777 1778 if (notify_die(DIE_SSTEP, "single_step", regs, 5, 1779 5, SIGTRAP) == NOTIFY_STOP) { 1780 return; 1781 } 1782 1783 if (debugger_sstep(regs)) 1784 return; 1785 1786 if (user_mode(regs)) { 1787 current->thread.debug.dbcr0 &= ~DBCR0_IC; 1788 if (DBCR_ACTIVE_EVENTS(current->thread.debug.dbcr0, 1789 current->thread.debug.dbcr1)) 1790 regs->msr |= MSR_DE; 1791 else 1792 /* Make sure the IDM bit is off */ 1793 current->thread.debug.dbcr0 &= ~DBCR0_IDM; 1794 } 1795 1796 _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip); 1797 } else 1798 handle_debug(regs, debug_status); 1799 } 1800 NOKPROBE_SYMBOL(DebugException); 1801 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */ 1802 1803 #if !defined(CONFIG_TAU_INT) 1804 void TAUException(struct pt_regs *regs) 1805 { 1806 printk("TAU trap at PC: %lx, MSR: %lx, vector=%lx %s\n", 1807 regs->nip, regs->msr, regs->trap, print_tainted()); 1808 } 1809 #endif /* CONFIG_INT_TAU */ 1810 1811 #ifdef CONFIG_ALTIVEC 1812 void altivec_assist_exception(struct pt_regs *regs) 1813 { 1814 int err; 1815 1816 if (!user_mode(regs)) { 1817 printk(KERN_EMERG "VMX/Altivec assist exception in kernel mode" 1818 " at %lx\n", regs->nip); 1819 die("Kernel VMX/Altivec assist exception", regs, SIGILL); 1820 } 1821 1822 flush_altivec_to_thread(current); 1823 1824 PPC_WARN_EMULATED(altivec, regs); 1825 err = emulate_altivec(regs); 1826 if (err == 0) { 1827 regs->nip += 4; /* skip emulated instruction */ 1828 emulate_single_step(regs); 1829 return; 1830 } 1831 1832 if (err == -EFAULT) { 1833 /* got an error reading the instruction */ 1834 _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip); 1835 } else { 1836 /* didn't recognize the instruction */ 1837 /* XXX quick hack for now: set the non-Java bit in the VSCR */ 1838 printk_ratelimited(KERN_ERR "Unrecognized altivec instruction " 1839 "in %s at %lx\n", current->comm, regs->nip); 1840 current->thread.vr_state.vscr.u[3] |= 0x10000; 1841 } 1842 } 1843 #endif /* CONFIG_ALTIVEC */ 1844 1845 #ifdef CONFIG_FSL_BOOKE 1846 void CacheLockingException(struct pt_regs *regs, unsigned long address, 1847 unsigned long error_code) 1848 { 1849 /* We treat cache locking instructions from the user 1850 * as priv ops, in the future we could try to do 1851 * something smarter 1852 */ 1853 if (error_code & (ESR_DLK|ESR_ILK)) 1854 _exception(SIGILL, regs, ILL_PRVOPC, regs->nip); 1855 return; 1856 } 1857 #endif /* CONFIG_FSL_BOOKE */ 1858 1859 #ifdef CONFIG_SPE 1860 void SPEFloatingPointException(struct pt_regs *regs) 1861 { 1862 extern int do_spe_mathemu(struct pt_regs *regs); 1863 unsigned long spefscr; 1864 int fpexc_mode; 1865 int code = 0; 1866 int err; 1867 1868 flush_spe_to_thread(current); 1869 1870 spefscr = current->thread.spefscr; 1871 fpexc_mode = current->thread.fpexc_mode; 1872 1873 if ((spefscr & SPEFSCR_FOVF) && (fpexc_mode & PR_FP_EXC_OVF)) { 1874 code = FPE_FLTOVF; 1875 } 1876 else if ((spefscr & SPEFSCR_FUNF) && (fpexc_mode & PR_FP_EXC_UND)) { 1877 code = FPE_FLTUND; 1878 } 1879 else if ((spefscr & SPEFSCR_FDBZ) && (fpexc_mode & PR_FP_EXC_DIV)) 1880 code = FPE_FLTDIV; 1881 else if ((spefscr & SPEFSCR_FINV) && (fpexc_mode & PR_FP_EXC_INV)) { 1882 code = FPE_FLTINV; 1883 } 1884 else if ((spefscr & (SPEFSCR_FG | SPEFSCR_FX)) && (fpexc_mode & PR_FP_EXC_RES)) 1885 code = FPE_FLTRES; 1886 1887 err = do_spe_mathemu(regs); 1888 if (err == 0) { 1889 regs->nip += 4; /* skip emulated instruction */ 1890 emulate_single_step(regs); 1891 return; 1892 } 1893 1894 if (err == -EFAULT) { 1895 /* got an error reading the instruction */ 1896 _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip); 1897 } else if (err == -EINVAL) { 1898 /* didn't recognize the instruction */ 1899 printk(KERN_ERR "unrecognized spe instruction " 1900 "in %s at %lx\n", current->comm, regs->nip); 1901 } else { 1902 _exception(SIGFPE, regs, code, regs->nip); 1903 } 1904 1905 return; 1906 } 1907 1908 void SPEFloatingPointRoundException(struct pt_regs *regs) 1909 { 1910 extern int speround_handler(struct pt_regs *regs); 1911 int err; 1912 1913 preempt_disable(); 1914 if (regs->msr & MSR_SPE) 1915 giveup_spe(current); 1916 preempt_enable(); 1917 1918 regs->nip -= 4; 1919 err = speround_handler(regs); 1920 if (err == 0) { 1921 regs->nip += 4; /* skip emulated instruction */ 1922 emulate_single_step(regs); 1923 return; 1924 } 1925 1926 if (err == -EFAULT) { 1927 /* got an error reading the instruction */ 1928 _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip); 1929 } else if (err == -EINVAL) { 1930 /* didn't recognize the instruction */ 1931 printk(KERN_ERR "unrecognized spe instruction " 1932 "in %s at %lx\n", current->comm, regs->nip); 1933 } else { 1934 _exception(SIGFPE, regs, 0, regs->nip); 1935 return; 1936 } 1937 } 1938 #endif 1939 1940 /* 1941 * We enter here if we get an unrecoverable exception, that is, one 1942 * that happened at a point where the RI (recoverable interrupt) bit 1943 * in the MSR is 0. This indicates that SRR0/1 are live, and that 1944 * we therefore lost state by taking this exception. 1945 */ 1946 void unrecoverable_exception(struct pt_regs *regs) 1947 { 1948 printk(KERN_EMERG "Unrecoverable exception %lx at %lx\n", 1949 regs->trap, regs->nip); 1950 die("Unrecoverable exception", regs, SIGABRT); 1951 } 1952 1953 #if defined(CONFIG_BOOKE_WDT) || defined(CONFIG_40x) 1954 /* 1955 * Default handler for a Watchdog exception, 1956 * spins until a reboot occurs 1957 */ 1958 void __attribute__ ((weak)) WatchdogHandler(struct pt_regs *regs) 1959 { 1960 /* Generic WatchdogHandler, implement your own */ 1961 mtspr(SPRN_TCR, mfspr(SPRN_TCR)&(~TCR_WIE)); 1962 return; 1963 } 1964 1965 void WatchdogException(struct pt_regs *regs) 1966 { 1967 printk (KERN_EMERG "PowerPC Book-E Watchdog Exception\n"); 1968 WatchdogHandler(regs); 1969 } 1970 #endif 1971 1972 /* 1973 * We enter here if we discover during exception entry that we are 1974 * running in supervisor mode with a userspace value in the stack pointer. 1975 */ 1976 void kernel_bad_stack(struct pt_regs *regs) 1977 { 1978 printk(KERN_EMERG "Bad kernel stack pointer %lx at %lx\n", 1979 regs->gpr[1], regs->nip); 1980 die("Bad kernel stack pointer", regs, SIGABRT); 1981 } 1982 1983 void __init trap_init(void) 1984 { 1985 } 1986 1987 1988 #ifdef CONFIG_PPC_EMULATED_STATS 1989 1990 #define WARN_EMULATED_SETUP(type) .type = { .name = #type } 1991 1992 struct ppc_emulated ppc_emulated = { 1993 #ifdef CONFIG_ALTIVEC 1994 WARN_EMULATED_SETUP(altivec), 1995 #endif 1996 WARN_EMULATED_SETUP(dcba), 1997 WARN_EMULATED_SETUP(dcbz), 1998 WARN_EMULATED_SETUP(fp_pair), 1999 WARN_EMULATED_SETUP(isel), 2000 WARN_EMULATED_SETUP(mcrxr), 2001 WARN_EMULATED_SETUP(mfpvr), 2002 WARN_EMULATED_SETUP(multiple), 2003 WARN_EMULATED_SETUP(popcntb), 2004 WARN_EMULATED_SETUP(spe), 2005 WARN_EMULATED_SETUP(string), 2006 WARN_EMULATED_SETUP(sync), 2007 WARN_EMULATED_SETUP(unaligned), 2008 #ifdef CONFIG_MATH_EMULATION 2009 WARN_EMULATED_SETUP(math), 2010 #endif 2011 #ifdef CONFIG_VSX 2012 WARN_EMULATED_SETUP(vsx), 2013 #endif 2014 #ifdef CONFIG_PPC64 2015 WARN_EMULATED_SETUP(mfdscr), 2016 WARN_EMULATED_SETUP(mtdscr), 2017 WARN_EMULATED_SETUP(lq_stq), 2018 #endif 2019 }; 2020 2021 u32 ppc_warn_emulated; 2022 2023 void ppc_warn_emulated_print(const char *type) 2024 { 2025 pr_warn_ratelimited("%s used emulated %s instruction\n", current->comm, 2026 type); 2027 } 2028 2029 static int __init ppc_warn_emulated_init(void) 2030 { 2031 struct dentry *dir, *d; 2032 unsigned int i; 2033 struct ppc_emulated_entry *entries = (void *)&ppc_emulated; 2034 2035 if (!powerpc_debugfs_root) 2036 return -ENODEV; 2037 2038 dir = debugfs_create_dir("emulated_instructions", 2039 powerpc_debugfs_root); 2040 if (!dir) 2041 return -ENOMEM; 2042 2043 d = debugfs_create_u32("do_warn", S_IRUGO | S_IWUSR, dir, 2044 &ppc_warn_emulated); 2045 if (!d) 2046 goto fail; 2047 2048 for (i = 0; i < sizeof(ppc_emulated)/sizeof(*entries); i++) { 2049 d = debugfs_create_u32(entries[i].name, S_IRUGO | S_IWUSR, dir, 2050 (u32 *)&entries[i].val.counter); 2051 if (!d) 2052 goto fail; 2053 } 2054 2055 return 0; 2056 2057 fail: 2058 debugfs_remove_recursive(dir); 2059 return -ENOMEM; 2060 } 2061 2062 device_initcall(ppc_warn_emulated_init); 2063 2064 #endif /* CONFIG_PPC_EMULATED_STATS */ 2065