1 /* 2 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 3 * Copyright 2007-2010 Freescale Semiconductor, Inc. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License 7 * as published by the Free Software Foundation; either version 8 * 2 of the License, or (at your option) any later version. 9 * 10 * Modified by Cort Dougan (cort@cs.nmt.edu) 11 * and Paul Mackerras (paulus@samba.org) 12 */ 13 14 /* 15 * This file handles the architecture-dependent parts of hardware exceptions 16 */ 17 18 #include <linux/errno.h> 19 #include <linux/sched.h> 20 #include <linux/kernel.h> 21 #include <linux/mm.h> 22 #include <linux/stddef.h> 23 #include <linux/unistd.h> 24 #include <linux/ptrace.h> 25 #include <linux/user.h> 26 #include <linux/interrupt.h> 27 #include <linux/init.h> 28 #include <linux/module.h> 29 #include <linux/prctl.h> 30 #include <linux/delay.h> 31 #include <linux/kprobes.h> 32 #include <linux/kexec.h> 33 #include <linux/backlight.h> 34 #include <linux/bug.h> 35 #include <linux/kdebug.h> 36 #include <linux/debugfs.h> 37 #include <linux/ratelimit.h> 38 #include <linux/context_tracking.h> 39 40 #include <asm/emulated_ops.h> 41 #include <asm/pgtable.h> 42 #include <asm/uaccess.h> 43 #include <asm/io.h> 44 #include <asm/machdep.h> 45 #include <asm/rtas.h> 46 #include <asm/pmc.h> 47 #include <asm/reg.h> 48 #ifdef CONFIG_PMAC_BACKLIGHT 49 #include <asm/backlight.h> 50 #endif 51 #ifdef CONFIG_PPC64 52 #include <asm/firmware.h> 53 #include <asm/processor.h> 54 #include <asm/tm.h> 55 #endif 56 #include <asm/kexec.h> 57 #include <asm/ppc-opcode.h> 58 #include <asm/rio.h> 59 #include <asm/fadump.h> 60 #include <asm/switch_to.h> 61 #include <asm/tm.h> 62 #include <asm/debug.h> 63 #include <sysdev/fsl_pci.h> 64 65 #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC) 66 int (*__debugger)(struct pt_regs *regs) __read_mostly; 67 int (*__debugger_ipi)(struct pt_regs *regs) __read_mostly; 68 int (*__debugger_bpt)(struct pt_regs *regs) __read_mostly; 69 int (*__debugger_sstep)(struct pt_regs *regs) __read_mostly; 70 int (*__debugger_iabr_match)(struct pt_regs *regs) __read_mostly; 71 int (*__debugger_break_match)(struct pt_regs *regs) __read_mostly; 72 int (*__debugger_fault_handler)(struct pt_regs *regs) __read_mostly; 73 74 EXPORT_SYMBOL(__debugger); 75 EXPORT_SYMBOL(__debugger_ipi); 76 EXPORT_SYMBOL(__debugger_bpt); 77 EXPORT_SYMBOL(__debugger_sstep); 78 EXPORT_SYMBOL(__debugger_iabr_match); 79 EXPORT_SYMBOL(__debugger_break_match); 80 EXPORT_SYMBOL(__debugger_fault_handler); 81 #endif 82 83 /* Transactional Memory trap debug */ 84 #ifdef TM_DEBUG_SW 85 #define TM_DEBUG(x...) printk(KERN_INFO x) 86 #else 87 #define TM_DEBUG(x...) do { } while(0) 88 #endif 89 90 /* 91 * Trap & Exception support 92 */ 93 94 #ifdef CONFIG_PMAC_BACKLIGHT 95 static void pmac_backlight_unblank(void) 96 { 97 mutex_lock(&pmac_backlight_mutex); 98 if (pmac_backlight) { 99 struct backlight_properties *props; 100 101 props = &pmac_backlight->props; 102 props->brightness = props->max_brightness; 103 props->power = FB_BLANK_UNBLANK; 104 backlight_update_status(pmac_backlight); 105 } 106 mutex_unlock(&pmac_backlight_mutex); 107 } 108 #else 109 static inline void pmac_backlight_unblank(void) { } 110 #endif 111 112 static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED; 113 static int die_owner = -1; 114 static unsigned int die_nest_count; 115 static int die_counter; 116 117 static unsigned __kprobes long oops_begin(struct pt_regs *regs) 118 { 119 int cpu; 120 unsigned long flags; 121 122 if (debugger(regs)) 123 return 1; 124 125 oops_enter(); 126 127 /* racy, but better than risking deadlock. */ 128 raw_local_irq_save(flags); 129 cpu = smp_processor_id(); 130 if (!arch_spin_trylock(&die_lock)) { 131 if (cpu == die_owner) 132 /* nested oops. should stop eventually */; 133 else 134 arch_spin_lock(&die_lock); 135 } 136 die_nest_count++; 137 die_owner = cpu; 138 console_verbose(); 139 bust_spinlocks(1); 140 if (machine_is(powermac)) 141 pmac_backlight_unblank(); 142 return flags; 143 } 144 145 static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, 146 int signr) 147 { 148 bust_spinlocks(0); 149 die_owner = -1; 150 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); 151 die_nest_count--; 152 oops_exit(); 153 printk("\n"); 154 if (!die_nest_count) 155 /* Nest count reaches zero, release the lock. */ 156 arch_spin_unlock(&die_lock); 157 raw_local_irq_restore(flags); 158 159 crash_fadump(regs, "die oops"); 160 161 /* 162 * A system reset (0x100) is a request to dump, so we always send 163 * it through the crashdump code. 164 */ 165 if (kexec_should_crash(current) || (TRAP(regs) == 0x100)) { 166 crash_kexec(regs); 167 168 /* 169 * We aren't the primary crash CPU. We need to send it 170 * to a holding pattern to avoid it ending up in the panic 171 * code. 172 */ 173 crash_kexec_secondary(regs); 174 } 175 176 if (!signr) 177 return; 178 179 /* 180 * While our oops output is serialised by a spinlock, output 181 * from panic() called below can race and corrupt it. If we 182 * know we are going to panic, delay for 1 second so we have a 183 * chance to get clean backtraces from all CPUs that are oopsing. 184 */ 185 if (in_interrupt() || panic_on_oops || !current->pid || 186 is_global_init(current)) { 187 mdelay(MSEC_PER_SEC); 188 } 189 190 if (in_interrupt()) 191 panic("Fatal exception in interrupt"); 192 if (panic_on_oops) 193 panic("Fatal exception"); 194 do_exit(signr); 195 } 196 197 static int __kprobes __die(const char *str, struct pt_regs *regs, long err) 198 { 199 printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter); 200 #ifdef CONFIG_PREEMPT 201 printk("PREEMPT "); 202 #endif 203 #ifdef CONFIG_SMP 204 printk("SMP NR_CPUS=%d ", NR_CPUS); 205 #endif 206 #ifdef CONFIG_DEBUG_PAGEALLOC 207 printk("DEBUG_PAGEALLOC "); 208 #endif 209 #ifdef CONFIG_NUMA 210 printk("NUMA "); 211 #endif 212 printk("%s\n", ppc_md.name ? ppc_md.name : ""); 213 214 if (notify_die(DIE_OOPS, str, regs, err, 255, SIGSEGV) == NOTIFY_STOP) 215 return 1; 216 217 print_modules(); 218 show_regs(regs); 219 220 return 0; 221 } 222 223 void die(const char *str, struct pt_regs *regs, long err) 224 { 225 unsigned long flags = oops_begin(regs); 226 227 if (__die(str, regs, err)) 228 err = 0; 229 oops_end(flags, regs, err); 230 } 231 232 void user_single_step_siginfo(struct task_struct *tsk, 233 struct pt_regs *regs, siginfo_t *info) 234 { 235 memset(info, 0, sizeof(*info)); 236 info->si_signo = SIGTRAP; 237 info->si_code = TRAP_TRACE; 238 info->si_addr = (void __user *)regs->nip; 239 } 240 241 void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr) 242 { 243 siginfo_t info; 244 const char fmt32[] = KERN_INFO "%s[%d]: unhandled signal %d " \ 245 "at %08lx nip %08lx lr %08lx code %x\n"; 246 const char fmt64[] = KERN_INFO "%s[%d]: unhandled signal %d " \ 247 "at %016lx nip %016lx lr %016lx code %x\n"; 248 249 if (!user_mode(regs)) { 250 die("Exception in kernel mode", regs, signr); 251 return; 252 } 253 254 if (show_unhandled_signals && unhandled_signal(current, signr)) { 255 printk_ratelimited(regs->msr & MSR_64BIT ? fmt64 : fmt32, 256 current->comm, current->pid, signr, 257 addr, regs->nip, regs->link, code); 258 } 259 260 if (arch_irqs_disabled() && !arch_irq_disabled_regs(regs)) 261 local_irq_enable(); 262 263 current->thread.trap_nr = code; 264 memset(&info, 0, sizeof(info)); 265 info.si_signo = signr; 266 info.si_code = code; 267 info.si_addr = (void __user *) addr; 268 force_sig_info(signr, &info, current); 269 } 270 271 #ifdef CONFIG_PPC64 272 void system_reset_exception(struct pt_regs *regs) 273 { 274 /* See if any machine dependent calls */ 275 if (ppc_md.system_reset_exception) { 276 if (ppc_md.system_reset_exception(regs)) 277 return; 278 } 279 280 die("System Reset", regs, SIGABRT); 281 282 /* Must die if the interrupt is not recoverable */ 283 if (!(regs->msr & MSR_RI)) 284 panic("Unrecoverable System Reset"); 285 286 /* What should we do here? We could issue a shutdown or hard reset. */ 287 } 288 289 /* 290 * This function is called in real mode. Strictly no printk's please. 291 * 292 * regs->nip and regs->msr contains srr0 and ssr1. 293 */ 294 long machine_check_early(struct pt_regs *regs) 295 { 296 long handled = 0; 297 298 __this_cpu_inc(irq_stat.mce_exceptions); 299 300 add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE); 301 302 if (cur_cpu_spec && cur_cpu_spec->machine_check_early) 303 handled = cur_cpu_spec->machine_check_early(regs); 304 return handled; 305 } 306 307 long hmi_exception_realmode(struct pt_regs *regs) 308 { 309 __this_cpu_inc(irq_stat.hmi_exceptions); 310 311 if (ppc_md.hmi_exception_early) 312 ppc_md.hmi_exception_early(regs); 313 314 return 0; 315 } 316 317 #endif 318 319 /* 320 * I/O accesses can cause machine checks on powermacs. 321 * Check if the NIP corresponds to the address of a sync 322 * instruction for which there is an entry in the exception 323 * table. 324 * Note that the 601 only takes a machine check on TEA 325 * (transfer error ack) signal assertion, and does not 326 * set any of the top 16 bits of SRR1. 327 * -- paulus. 328 */ 329 static inline int check_io_access(struct pt_regs *regs) 330 { 331 #ifdef CONFIG_PPC32 332 unsigned long msr = regs->msr; 333 const struct exception_table_entry *entry; 334 unsigned int *nip = (unsigned int *)regs->nip; 335 336 if (((msr & 0xffff0000) == 0 || (msr & (0x80000 | 0x40000))) 337 && (entry = search_exception_tables(regs->nip)) != NULL) { 338 /* 339 * Check that it's a sync instruction, or somewhere 340 * in the twi; isync; nop sequence that inb/inw/inl uses. 341 * As the address is in the exception table 342 * we should be able to read the instr there. 343 * For the debug message, we look at the preceding 344 * load or store. 345 */ 346 if (*nip == 0x60000000) /* nop */ 347 nip -= 2; 348 else if (*nip == 0x4c00012c) /* isync */ 349 --nip; 350 if (*nip == 0x7c0004ac || (*nip >> 26) == 3) { 351 /* sync or twi */ 352 unsigned int rb; 353 354 --nip; 355 rb = (*nip >> 11) & 0x1f; 356 printk(KERN_DEBUG "%s bad port %lx at %p\n", 357 (*nip & 0x100)? "OUT to": "IN from", 358 regs->gpr[rb] - _IO_BASE, nip); 359 regs->msr |= MSR_RI; 360 regs->nip = entry->fixup; 361 return 1; 362 } 363 } 364 #endif /* CONFIG_PPC32 */ 365 return 0; 366 } 367 368 #ifdef CONFIG_PPC_ADV_DEBUG_REGS 369 /* On 4xx, the reason for the machine check or program exception 370 is in the ESR. */ 371 #define get_reason(regs) ((regs)->dsisr) 372 #ifndef CONFIG_FSL_BOOKE 373 #define get_mc_reason(regs) ((regs)->dsisr) 374 #else 375 #define get_mc_reason(regs) (mfspr(SPRN_MCSR)) 376 #endif 377 #define REASON_FP ESR_FP 378 #define REASON_ILLEGAL (ESR_PIL | ESR_PUO) 379 #define REASON_PRIVILEGED ESR_PPR 380 #define REASON_TRAP ESR_PTR 381 382 /* single-step stuff */ 383 #define single_stepping(regs) (current->thread.debug.dbcr0 & DBCR0_IC) 384 #define clear_single_step(regs) (current->thread.debug.dbcr0 &= ~DBCR0_IC) 385 386 #else 387 /* On non-4xx, the reason for the machine check or program 388 exception is in the MSR. */ 389 #define get_reason(regs) ((regs)->msr) 390 #define get_mc_reason(regs) ((regs)->msr) 391 #define REASON_TM 0x200000 392 #define REASON_FP 0x100000 393 #define REASON_ILLEGAL 0x80000 394 #define REASON_PRIVILEGED 0x40000 395 #define REASON_TRAP 0x20000 396 397 #define single_stepping(regs) ((regs)->msr & MSR_SE) 398 #define clear_single_step(regs) ((regs)->msr &= ~MSR_SE) 399 #endif 400 401 #if defined(CONFIG_4xx) 402 int machine_check_4xx(struct pt_regs *regs) 403 { 404 unsigned long reason = get_mc_reason(regs); 405 406 if (reason & ESR_IMCP) { 407 printk("Instruction"); 408 mtspr(SPRN_ESR, reason & ~ESR_IMCP); 409 } else 410 printk("Data"); 411 printk(" machine check in kernel mode.\n"); 412 413 return 0; 414 } 415 416 int machine_check_440A(struct pt_regs *regs) 417 { 418 unsigned long reason = get_mc_reason(regs); 419 420 printk("Machine check in kernel mode.\n"); 421 if (reason & ESR_IMCP){ 422 printk("Instruction Synchronous Machine Check exception\n"); 423 mtspr(SPRN_ESR, reason & ~ESR_IMCP); 424 } 425 else { 426 u32 mcsr = mfspr(SPRN_MCSR); 427 if (mcsr & MCSR_IB) 428 printk("Instruction Read PLB Error\n"); 429 if (mcsr & MCSR_DRB) 430 printk("Data Read PLB Error\n"); 431 if (mcsr & MCSR_DWB) 432 printk("Data Write PLB Error\n"); 433 if (mcsr & MCSR_TLBP) 434 printk("TLB Parity Error\n"); 435 if (mcsr & MCSR_ICP){ 436 flush_instruction_cache(); 437 printk("I-Cache Parity Error\n"); 438 } 439 if (mcsr & MCSR_DCSP) 440 printk("D-Cache Search Parity Error\n"); 441 if (mcsr & MCSR_DCFP) 442 printk("D-Cache Flush Parity Error\n"); 443 if (mcsr & MCSR_IMPE) 444 printk("Machine Check exception is imprecise\n"); 445 446 /* Clear MCSR */ 447 mtspr(SPRN_MCSR, mcsr); 448 } 449 return 0; 450 } 451 452 int machine_check_47x(struct pt_regs *regs) 453 { 454 unsigned long reason = get_mc_reason(regs); 455 u32 mcsr; 456 457 printk(KERN_ERR "Machine check in kernel mode.\n"); 458 if (reason & ESR_IMCP) { 459 printk(KERN_ERR 460 "Instruction Synchronous Machine Check exception\n"); 461 mtspr(SPRN_ESR, reason & ~ESR_IMCP); 462 return 0; 463 } 464 mcsr = mfspr(SPRN_MCSR); 465 if (mcsr & MCSR_IB) 466 printk(KERN_ERR "Instruction Read PLB Error\n"); 467 if (mcsr & MCSR_DRB) 468 printk(KERN_ERR "Data Read PLB Error\n"); 469 if (mcsr & MCSR_DWB) 470 printk(KERN_ERR "Data Write PLB Error\n"); 471 if (mcsr & MCSR_TLBP) 472 printk(KERN_ERR "TLB Parity Error\n"); 473 if (mcsr & MCSR_ICP) { 474 flush_instruction_cache(); 475 printk(KERN_ERR "I-Cache Parity Error\n"); 476 } 477 if (mcsr & MCSR_DCSP) 478 printk(KERN_ERR "D-Cache Search Parity Error\n"); 479 if (mcsr & PPC47x_MCSR_GPR) 480 printk(KERN_ERR "GPR Parity Error\n"); 481 if (mcsr & PPC47x_MCSR_FPR) 482 printk(KERN_ERR "FPR Parity Error\n"); 483 if (mcsr & PPC47x_MCSR_IPR) 484 printk(KERN_ERR "Machine Check exception is imprecise\n"); 485 486 /* Clear MCSR */ 487 mtspr(SPRN_MCSR, mcsr); 488 489 return 0; 490 } 491 #elif defined(CONFIG_E500) 492 int machine_check_e500mc(struct pt_regs *regs) 493 { 494 unsigned long mcsr = mfspr(SPRN_MCSR); 495 unsigned long reason = mcsr; 496 int recoverable = 1; 497 498 if (reason & MCSR_LD) { 499 recoverable = fsl_rio_mcheck_exception(regs); 500 if (recoverable == 1) 501 goto silent_out; 502 } 503 504 printk("Machine check in kernel mode.\n"); 505 printk("Caused by (from MCSR=%lx): ", reason); 506 507 if (reason & MCSR_MCP) 508 printk("Machine Check Signal\n"); 509 510 if (reason & MCSR_ICPERR) { 511 printk("Instruction Cache Parity Error\n"); 512 513 /* 514 * This is recoverable by invalidating the i-cache. 515 */ 516 mtspr(SPRN_L1CSR1, mfspr(SPRN_L1CSR1) | L1CSR1_ICFI); 517 while (mfspr(SPRN_L1CSR1) & L1CSR1_ICFI) 518 ; 519 520 /* 521 * This will generally be accompanied by an instruction 522 * fetch error report -- only treat MCSR_IF as fatal 523 * if it wasn't due to an L1 parity error. 524 */ 525 reason &= ~MCSR_IF; 526 } 527 528 if (reason & MCSR_DCPERR_MC) { 529 printk("Data Cache Parity Error\n"); 530 531 /* 532 * In write shadow mode we auto-recover from the error, but it 533 * may still get logged and cause a machine check. We should 534 * only treat the non-write shadow case as non-recoverable. 535 */ 536 if (!(mfspr(SPRN_L1CSR2) & L1CSR2_DCWS)) 537 recoverable = 0; 538 } 539 540 if (reason & MCSR_L2MMU_MHIT) { 541 printk("Hit on multiple TLB entries\n"); 542 recoverable = 0; 543 } 544 545 if (reason & MCSR_NMI) 546 printk("Non-maskable interrupt\n"); 547 548 if (reason & MCSR_IF) { 549 printk("Instruction Fetch Error Report\n"); 550 recoverable = 0; 551 } 552 553 if (reason & MCSR_LD) { 554 printk("Load Error Report\n"); 555 recoverable = 0; 556 } 557 558 if (reason & MCSR_ST) { 559 printk("Store Error Report\n"); 560 recoverable = 0; 561 } 562 563 if (reason & MCSR_LDG) { 564 printk("Guarded Load Error Report\n"); 565 recoverable = 0; 566 } 567 568 if (reason & MCSR_TLBSYNC) 569 printk("Simultaneous tlbsync operations\n"); 570 571 if (reason & MCSR_BSL2_ERR) { 572 printk("Level 2 Cache Error\n"); 573 recoverable = 0; 574 } 575 576 if (reason & MCSR_MAV) { 577 u64 addr; 578 579 addr = mfspr(SPRN_MCAR); 580 addr |= (u64)mfspr(SPRN_MCARU) << 32; 581 582 printk("Machine Check %s Address: %#llx\n", 583 reason & MCSR_MEA ? "Effective" : "Physical", addr); 584 } 585 586 silent_out: 587 mtspr(SPRN_MCSR, mcsr); 588 return mfspr(SPRN_MCSR) == 0 && recoverable; 589 } 590 591 int machine_check_e500(struct pt_regs *regs) 592 { 593 unsigned long reason = get_mc_reason(regs); 594 595 if (reason & MCSR_BUS_RBERR) { 596 if (fsl_rio_mcheck_exception(regs)) 597 return 1; 598 if (fsl_pci_mcheck_exception(regs)) 599 return 1; 600 } 601 602 printk("Machine check in kernel mode.\n"); 603 printk("Caused by (from MCSR=%lx): ", reason); 604 605 if (reason & MCSR_MCP) 606 printk("Machine Check Signal\n"); 607 if (reason & MCSR_ICPERR) 608 printk("Instruction Cache Parity Error\n"); 609 if (reason & MCSR_DCP_PERR) 610 printk("Data Cache Push Parity Error\n"); 611 if (reason & MCSR_DCPERR) 612 printk("Data Cache Parity Error\n"); 613 if (reason & MCSR_BUS_IAERR) 614 printk("Bus - Instruction Address Error\n"); 615 if (reason & MCSR_BUS_RAERR) 616 printk("Bus - Read Address Error\n"); 617 if (reason & MCSR_BUS_WAERR) 618 printk("Bus - Write Address Error\n"); 619 if (reason & MCSR_BUS_IBERR) 620 printk("Bus - Instruction Data Error\n"); 621 if (reason & MCSR_BUS_RBERR) 622 printk("Bus - Read Data Bus Error\n"); 623 if (reason & MCSR_BUS_WBERR) 624 printk("Bus - Write Data Bus Error\n"); 625 if (reason & MCSR_BUS_IPERR) 626 printk("Bus - Instruction Parity Error\n"); 627 if (reason & MCSR_BUS_RPERR) 628 printk("Bus - Read Parity Error\n"); 629 630 return 0; 631 } 632 633 int machine_check_generic(struct pt_regs *regs) 634 { 635 return 0; 636 } 637 #elif defined(CONFIG_E200) 638 int machine_check_e200(struct pt_regs *regs) 639 { 640 unsigned long reason = get_mc_reason(regs); 641 642 printk("Machine check in kernel mode.\n"); 643 printk("Caused by (from MCSR=%lx): ", reason); 644 645 if (reason & MCSR_MCP) 646 printk("Machine Check Signal\n"); 647 if (reason & MCSR_CP_PERR) 648 printk("Cache Push Parity Error\n"); 649 if (reason & MCSR_CPERR) 650 printk("Cache Parity Error\n"); 651 if (reason & MCSR_EXCP_ERR) 652 printk("ISI, ITLB, or Bus Error on first instruction fetch for an exception handler\n"); 653 if (reason & MCSR_BUS_IRERR) 654 printk("Bus - Read Bus Error on instruction fetch\n"); 655 if (reason & MCSR_BUS_DRERR) 656 printk("Bus - Read Bus Error on data load\n"); 657 if (reason & MCSR_BUS_WRERR) 658 printk("Bus - Write Bus Error on buffered store or cache line push\n"); 659 660 return 0; 661 } 662 #else 663 int machine_check_generic(struct pt_regs *regs) 664 { 665 unsigned long reason = get_mc_reason(regs); 666 667 printk("Machine check in kernel mode.\n"); 668 printk("Caused by (from SRR1=%lx): ", reason); 669 switch (reason & 0x601F0000) { 670 case 0x80000: 671 printk("Machine check signal\n"); 672 break; 673 case 0: /* for 601 */ 674 case 0x40000: 675 case 0x140000: /* 7450 MSS error and TEA */ 676 printk("Transfer error ack signal\n"); 677 break; 678 case 0x20000: 679 printk("Data parity error signal\n"); 680 break; 681 case 0x10000: 682 printk("Address parity error signal\n"); 683 break; 684 case 0x20000000: 685 printk("L1 Data Cache error\n"); 686 break; 687 case 0x40000000: 688 printk("L1 Instruction Cache error\n"); 689 break; 690 case 0x00100000: 691 printk("L2 data cache parity error\n"); 692 break; 693 default: 694 printk("Unknown values in msr\n"); 695 } 696 return 0; 697 } 698 #endif /* everything else */ 699 700 void machine_check_exception(struct pt_regs *regs) 701 { 702 enum ctx_state prev_state = exception_enter(); 703 int recover = 0; 704 705 __this_cpu_inc(irq_stat.mce_exceptions); 706 707 /* See if any machine dependent calls. In theory, we would want 708 * to call the CPU first, and call the ppc_md. one if the CPU 709 * one returns a positive number. However there is existing code 710 * that assumes the board gets a first chance, so let's keep it 711 * that way for now and fix things later. --BenH. 712 */ 713 if (ppc_md.machine_check_exception) 714 recover = ppc_md.machine_check_exception(regs); 715 else if (cur_cpu_spec->machine_check) 716 recover = cur_cpu_spec->machine_check(regs); 717 718 if (recover > 0) 719 goto bail; 720 721 #if defined(CONFIG_8xx) && defined(CONFIG_PCI) 722 /* the qspan pci read routines can cause machine checks -- Cort 723 * 724 * yuck !!! that totally needs to go away ! There are better ways 725 * to deal with that than having a wart in the mcheck handler. 726 * -- BenH 727 */ 728 bad_page_fault(regs, regs->dar, SIGBUS); 729 goto bail; 730 #endif 731 732 if (debugger_fault_handler(regs)) 733 goto bail; 734 735 if (check_io_access(regs)) 736 goto bail; 737 738 die("Machine check", regs, SIGBUS); 739 740 /* Must die if the interrupt is not recoverable */ 741 if (!(regs->msr & MSR_RI)) 742 panic("Unrecoverable Machine check"); 743 744 bail: 745 exception_exit(prev_state); 746 } 747 748 void SMIException(struct pt_regs *regs) 749 { 750 die("System Management Interrupt", regs, SIGABRT); 751 } 752 753 void handle_hmi_exception(struct pt_regs *regs) 754 { 755 struct pt_regs *old_regs; 756 757 old_regs = set_irq_regs(regs); 758 irq_enter(); 759 760 if (ppc_md.handle_hmi_exception) 761 ppc_md.handle_hmi_exception(regs); 762 763 irq_exit(); 764 set_irq_regs(old_regs); 765 } 766 767 void unknown_exception(struct pt_regs *regs) 768 { 769 enum ctx_state prev_state = exception_enter(); 770 771 printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n", 772 regs->nip, regs->msr, regs->trap); 773 774 _exception(SIGTRAP, regs, 0, 0); 775 776 exception_exit(prev_state); 777 } 778 779 void instruction_breakpoint_exception(struct pt_regs *regs) 780 { 781 enum ctx_state prev_state = exception_enter(); 782 783 if (notify_die(DIE_IABR_MATCH, "iabr_match", regs, 5, 784 5, SIGTRAP) == NOTIFY_STOP) 785 goto bail; 786 if (debugger_iabr_match(regs)) 787 goto bail; 788 _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip); 789 790 bail: 791 exception_exit(prev_state); 792 } 793 794 void RunModeException(struct pt_regs *regs) 795 { 796 _exception(SIGTRAP, regs, 0, 0); 797 } 798 799 void __kprobes single_step_exception(struct pt_regs *regs) 800 { 801 enum ctx_state prev_state = exception_enter(); 802 803 clear_single_step(regs); 804 805 if (notify_die(DIE_SSTEP, "single_step", regs, 5, 806 5, SIGTRAP) == NOTIFY_STOP) 807 goto bail; 808 if (debugger_sstep(regs)) 809 goto bail; 810 811 _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip); 812 813 bail: 814 exception_exit(prev_state); 815 } 816 817 /* 818 * After we have successfully emulated an instruction, we have to 819 * check if the instruction was being single-stepped, and if so, 820 * pretend we got a single-step exception. This was pointed out 821 * by Kumar Gala. -- paulus 822 */ 823 static void emulate_single_step(struct pt_regs *regs) 824 { 825 if (single_stepping(regs)) 826 single_step_exception(regs); 827 } 828 829 static inline int __parse_fpscr(unsigned long fpscr) 830 { 831 int ret = 0; 832 833 /* Invalid operation */ 834 if ((fpscr & FPSCR_VE) && (fpscr & FPSCR_VX)) 835 ret = FPE_FLTINV; 836 837 /* Overflow */ 838 else if ((fpscr & FPSCR_OE) && (fpscr & FPSCR_OX)) 839 ret = FPE_FLTOVF; 840 841 /* Underflow */ 842 else if ((fpscr & FPSCR_UE) && (fpscr & FPSCR_UX)) 843 ret = FPE_FLTUND; 844 845 /* Divide by zero */ 846 else if ((fpscr & FPSCR_ZE) && (fpscr & FPSCR_ZX)) 847 ret = FPE_FLTDIV; 848 849 /* Inexact result */ 850 else if ((fpscr & FPSCR_XE) && (fpscr & FPSCR_XX)) 851 ret = FPE_FLTRES; 852 853 return ret; 854 } 855 856 static void parse_fpe(struct pt_regs *regs) 857 { 858 int code = 0; 859 860 flush_fp_to_thread(current); 861 862 code = __parse_fpscr(current->thread.fp_state.fpscr); 863 864 _exception(SIGFPE, regs, code, regs->nip); 865 } 866 867 /* 868 * Illegal instruction emulation support. Originally written to 869 * provide the PVR to user applications using the mfspr rd, PVR. 870 * Return non-zero if we can't emulate, or -EFAULT if the associated 871 * memory access caused an access fault. Return zero on success. 872 * 873 * There are a couple of ways to do this, either "decode" the instruction 874 * or directly match lots of bits. In this case, matching lots of 875 * bits is faster and easier. 876 * 877 */ 878 static int emulate_string_inst(struct pt_regs *regs, u32 instword) 879 { 880 u8 rT = (instword >> 21) & 0x1f; 881 u8 rA = (instword >> 16) & 0x1f; 882 u8 NB_RB = (instword >> 11) & 0x1f; 883 u32 num_bytes; 884 unsigned long EA; 885 int pos = 0; 886 887 /* Early out if we are an invalid form of lswx */ 888 if ((instword & PPC_INST_STRING_MASK) == PPC_INST_LSWX) 889 if ((rT == rA) || (rT == NB_RB)) 890 return -EINVAL; 891 892 EA = (rA == 0) ? 0 : regs->gpr[rA]; 893 894 switch (instword & PPC_INST_STRING_MASK) { 895 case PPC_INST_LSWX: 896 case PPC_INST_STSWX: 897 EA += NB_RB; 898 num_bytes = regs->xer & 0x7f; 899 break; 900 case PPC_INST_LSWI: 901 case PPC_INST_STSWI: 902 num_bytes = (NB_RB == 0) ? 32 : NB_RB; 903 break; 904 default: 905 return -EINVAL; 906 } 907 908 while (num_bytes != 0) 909 { 910 u8 val; 911 u32 shift = 8 * (3 - (pos & 0x3)); 912 913 /* if process is 32-bit, clear upper 32 bits of EA */ 914 if ((regs->msr & MSR_64BIT) == 0) 915 EA &= 0xFFFFFFFF; 916 917 switch ((instword & PPC_INST_STRING_MASK)) { 918 case PPC_INST_LSWX: 919 case PPC_INST_LSWI: 920 if (get_user(val, (u8 __user *)EA)) 921 return -EFAULT; 922 /* first time updating this reg, 923 * zero it out */ 924 if (pos == 0) 925 regs->gpr[rT] = 0; 926 regs->gpr[rT] |= val << shift; 927 break; 928 case PPC_INST_STSWI: 929 case PPC_INST_STSWX: 930 val = regs->gpr[rT] >> shift; 931 if (put_user(val, (u8 __user *)EA)) 932 return -EFAULT; 933 break; 934 } 935 /* move EA to next address */ 936 EA += 1; 937 num_bytes--; 938 939 /* manage our position within the register */ 940 if (++pos == 4) { 941 pos = 0; 942 if (++rT == 32) 943 rT = 0; 944 } 945 } 946 947 return 0; 948 } 949 950 static int emulate_popcntb_inst(struct pt_regs *regs, u32 instword) 951 { 952 u32 ra,rs; 953 unsigned long tmp; 954 955 ra = (instword >> 16) & 0x1f; 956 rs = (instword >> 21) & 0x1f; 957 958 tmp = regs->gpr[rs]; 959 tmp = tmp - ((tmp >> 1) & 0x5555555555555555ULL); 960 tmp = (tmp & 0x3333333333333333ULL) + ((tmp >> 2) & 0x3333333333333333ULL); 961 tmp = (tmp + (tmp >> 4)) & 0x0f0f0f0f0f0f0f0fULL; 962 regs->gpr[ra] = tmp; 963 964 return 0; 965 } 966 967 static int emulate_isel(struct pt_regs *regs, u32 instword) 968 { 969 u8 rT = (instword >> 21) & 0x1f; 970 u8 rA = (instword >> 16) & 0x1f; 971 u8 rB = (instword >> 11) & 0x1f; 972 u8 BC = (instword >> 6) & 0x1f; 973 u8 bit; 974 unsigned long tmp; 975 976 tmp = (rA == 0) ? 0 : regs->gpr[rA]; 977 bit = (regs->ccr >> (31 - BC)) & 0x1; 978 979 regs->gpr[rT] = bit ? tmp : regs->gpr[rB]; 980 981 return 0; 982 } 983 984 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 985 static inline bool tm_abort_check(struct pt_regs *regs, int cause) 986 { 987 /* If we're emulating a load/store in an active transaction, we cannot 988 * emulate it as the kernel operates in transaction suspended context. 989 * We need to abort the transaction. This creates a persistent TM 990 * abort so tell the user what caused it with a new code. 991 */ 992 if (MSR_TM_TRANSACTIONAL(regs->msr)) { 993 tm_enable(); 994 tm_abort(cause); 995 return true; 996 } 997 return false; 998 } 999 #else 1000 static inline bool tm_abort_check(struct pt_regs *regs, int reason) 1001 { 1002 return false; 1003 } 1004 #endif 1005 1006 static int emulate_instruction(struct pt_regs *regs) 1007 { 1008 u32 instword; 1009 u32 rd; 1010 1011 if (!user_mode(regs)) 1012 return -EINVAL; 1013 CHECK_FULL_REGS(regs); 1014 1015 if (get_user(instword, (u32 __user *)(regs->nip))) 1016 return -EFAULT; 1017 1018 /* Emulate the mfspr rD, PVR. */ 1019 if ((instword & PPC_INST_MFSPR_PVR_MASK) == PPC_INST_MFSPR_PVR) { 1020 PPC_WARN_EMULATED(mfpvr, regs); 1021 rd = (instword >> 21) & 0x1f; 1022 regs->gpr[rd] = mfspr(SPRN_PVR); 1023 return 0; 1024 } 1025 1026 /* Emulating the dcba insn is just a no-op. */ 1027 if ((instword & PPC_INST_DCBA_MASK) == PPC_INST_DCBA) { 1028 PPC_WARN_EMULATED(dcba, regs); 1029 return 0; 1030 } 1031 1032 /* Emulate the mcrxr insn. */ 1033 if ((instword & PPC_INST_MCRXR_MASK) == PPC_INST_MCRXR) { 1034 int shift = (instword >> 21) & 0x1c; 1035 unsigned long msk = 0xf0000000UL >> shift; 1036 1037 PPC_WARN_EMULATED(mcrxr, regs); 1038 regs->ccr = (regs->ccr & ~msk) | ((regs->xer >> shift) & msk); 1039 regs->xer &= ~0xf0000000UL; 1040 return 0; 1041 } 1042 1043 /* Emulate load/store string insn. */ 1044 if ((instword & PPC_INST_STRING_GEN_MASK) == PPC_INST_STRING) { 1045 if (tm_abort_check(regs, 1046 TM_CAUSE_EMULATE | TM_CAUSE_PERSISTENT)) 1047 return -EINVAL; 1048 PPC_WARN_EMULATED(string, regs); 1049 return emulate_string_inst(regs, instword); 1050 } 1051 1052 /* Emulate the popcntb (Population Count Bytes) instruction. */ 1053 if ((instword & PPC_INST_POPCNTB_MASK) == PPC_INST_POPCNTB) { 1054 PPC_WARN_EMULATED(popcntb, regs); 1055 return emulate_popcntb_inst(regs, instword); 1056 } 1057 1058 /* Emulate isel (Integer Select) instruction */ 1059 if ((instword & PPC_INST_ISEL_MASK) == PPC_INST_ISEL) { 1060 PPC_WARN_EMULATED(isel, regs); 1061 return emulate_isel(regs, instword); 1062 } 1063 1064 /* Emulate sync instruction variants */ 1065 if ((instword & PPC_INST_SYNC_MASK) == PPC_INST_SYNC) { 1066 PPC_WARN_EMULATED(sync, regs); 1067 asm volatile("sync"); 1068 return 0; 1069 } 1070 1071 #ifdef CONFIG_PPC64 1072 /* Emulate the mfspr rD, DSCR. */ 1073 if ((((instword & PPC_INST_MFSPR_DSCR_USER_MASK) == 1074 PPC_INST_MFSPR_DSCR_USER) || 1075 ((instword & PPC_INST_MFSPR_DSCR_MASK) == 1076 PPC_INST_MFSPR_DSCR)) && 1077 cpu_has_feature(CPU_FTR_DSCR)) { 1078 PPC_WARN_EMULATED(mfdscr, regs); 1079 rd = (instword >> 21) & 0x1f; 1080 regs->gpr[rd] = mfspr(SPRN_DSCR); 1081 return 0; 1082 } 1083 /* Emulate the mtspr DSCR, rD. */ 1084 if ((((instword & PPC_INST_MTSPR_DSCR_USER_MASK) == 1085 PPC_INST_MTSPR_DSCR_USER) || 1086 ((instword & PPC_INST_MTSPR_DSCR_MASK) == 1087 PPC_INST_MTSPR_DSCR)) && 1088 cpu_has_feature(CPU_FTR_DSCR)) { 1089 PPC_WARN_EMULATED(mtdscr, regs); 1090 rd = (instword >> 21) & 0x1f; 1091 current->thread.dscr = regs->gpr[rd]; 1092 current->thread.dscr_inherit = 1; 1093 mtspr(SPRN_DSCR, current->thread.dscr); 1094 return 0; 1095 } 1096 #endif 1097 1098 return -EINVAL; 1099 } 1100 1101 int is_valid_bugaddr(unsigned long addr) 1102 { 1103 return is_kernel_addr(addr); 1104 } 1105 1106 #ifdef CONFIG_MATH_EMULATION 1107 static int emulate_math(struct pt_regs *regs) 1108 { 1109 int ret; 1110 extern int do_mathemu(struct pt_regs *regs); 1111 1112 ret = do_mathemu(regs); 1113 if (ret >= 0) 1114 PPC_WARN_EMULATED(math, regs); 1115 1116 switch (ret) { 1117 case 0: 1118 emulate_single_step(regs); 1119 return 0; 1120 case 1: { 1121 int code = 0; 1122 code = __parse_fpscr(current->thread.fp_state.fpscr); 1123 _exception(SIGFPE, regs, code, regs->nip); 1124 return 0; 1125 } 1126 case -EFAULT: 1127 _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip); 1128 return 0; 1129 } 1130 1131 return -1; 1132 } 1133 #else 1134 static inline int emulate_math(struct pt_regs *regs) { return -1; } 1135 #endif 1136 1137 void __kprobes program_check_exception(struct pt_regs *regs) 1138 { 1139 enum ctx_state prev_state = exception_enter(); 1140 unsigned int reason = get_reason(regs); 1141 1142 /* We can now get here via a FP Unavailable exception if the core 1143 * has no FPU, in that case the reason flags will be 0 */ 1144 1145 if (reason & REASON_FP) { 1146 /* IEEE FP exception */ 1147 parse_fpe(regs); 1148 goto bail; 1149 } 1150 if (reason & REASON_TRAP) { 1151 /* Debugger is first in line to stop recursive faults in 1152 * rcu_lock, notify_die, or atomic_notifier_call_chain */ 1153 if (debugger_bpt(regs)) 1154 goto bail; 1155 1156 /* trap exception */ 1157 if (notify_die(DIE_BPT, "breakpoint", regs, 5, 5, SIGTRAP) 1158 == NOTIFY_STOP) 1159 goto bail; 1160 1161 if (!(regs->msr & MSR_PR) && /* not user-mode */ 1162 report_bug(regs->nip, regs) == BUG_TRAP_TYPE_WARN) { 1163 regs->nip += 4; 1164 goto bail; 1165 } 1166 _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip); 1167 goto bail; 1168 } 1169 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1170 if (reason & REASON_TM) { 1171 /* This is a TM "Bad Thing Exception" program check. 1172 * This occurs when: 1173 * - An rfid/hrfid/mtmsrd attempts to cause an illegal 1174 * transition in TM states. 1175 * - A trechkpt is attempted when transactional. 1176 * - A treclaim is attempted when non transactional. 1177 * - A tend is illegally attempted. 1178 * - writing a TM SPR when transactional. 1179 */ 1180 if (!user_mode(regs) && 1181 report_bug(regs->nip, regs) == BUG_TRAP_TYPE_WARN) { 1182 regs->nip += 4; 1183 goto bail; 1184 } 1185 /* If usermode caused this, it's done something illegal and 1186 * gets a SIGILL slap on the wrist. We call it an illegal 1187 * operand to distinguish from the instruction just being bad 1188 * (e.g. executing a 'tend' on a CPU without TM!); it's an 1189 * illegal /placement/ of a valid instruction. 1190 */ 1191 if (user_mode(regs)) { 1192 _exception(SIGILL, regs, ILL_ILLOPN, regs->nip); 1193 goto bail; 1194 } else { 1195 printk(KERN_EMERG "Unexpected TM Bad Thing exception " 1196 "at %lx (msr 0x%x)\n", regs->nip, reason); 1197 die("Unrecoverable exception", regs, SIGABRT); 1198 } 1199 } 1200 #endif 1201 1202 /* 1203 * If we took the program check in the kernel skip down to sending a 1204 * SIGILL. The subsequent cases all relate to emulating instructions 1205 * which we should only do for userspace. We also do not want to enable 1206 * interrupts for kernel faults because that might lead to further 1207 * faults, and loose the context of the original exception. 1208 */ 1209 if (!user_mode(regs)) 1210 goto sigill; 1211 1212 /* We restore the interrupt state now */ 1213 if (!arch_irq_disabled_regs(regs)) 1214 local_irq_enable(); 1215 1216 /* (reason & REASON_ILLEGAL) would be the obvious thing here, 1217 * but there seems to be a hardware bug on the 405GP (RevD) 1218 * that means ESR is sometimes set incorrectly - either to 1219 * ESR_DST (!?) or 0. In the process of chasing this with the 1220 * hardware people - not sure if it can happen on any illegal 1221 * instruction or only on FP instructions, whether there is a 1222 * pattern to occurrences etc. -dgibson 31/Mar/2003 1223 */ 1224 if (!emulate_math(regs)) 1225 goto bail; 1226 1227 /* Try to emulate it if we should. */ 1228 if (reason & (REASON_ILLEGAL | REASON_PRIVILEGED)) { 1229 switch (emulate_instruction(regs)) { 1230 case 0: 1231 regs->nip += 4; 1232 emulate_single_step(regs); 1233 goto bail; 1234 case -EFAULT: 1235 _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip); 1236 goto bail; 1237 } 1238 } 1239 1240 sigill: 1241 if (reason & REASON_PRIVILEGED) 1242 _exception(SIGILL, regs, ILL_PRVOPC, regs->nip); 1243 else 1244 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 1245 1246 bail: 1247 exception_exit(prev_state); 1248 } 1249 1250 /* 1251 * This occurs when running in hypervisor mode on POWER6 or later 1252 * and an illegal instruction is encountered. 1253 */ 1254 void __kprobes emulation_assist_interrupt(struct pt_regs *regs) 1255 { 1256 regs->msr |= REASON_ILLEGAL; 1257 program_check_exception(regs); 1258 } 1259 1260 void alignment_exception(struct pt_regs *regs) 1261 { 1262 enum ctx_state prev_state = exception_enter(); 1263 int sig, code, fixed = 0; 1264 1265 /* We restore the interrupt state now */ 1266 if (!arch_irq_disabled_regs(regs)) 1267 local_irq_enable(); 1268 1269 if (tm_abort_check(regs, TM_CAUSE_ALIGNMENT | TM_CAUSE_PERSISTENT)) 1270 goto bail; 1271 1272 /* we don't implement logging of alignment exceptions */ 1273 if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS)) 1274 fixed = fix_alignment(regs); 1275 1276 if (fixed == 1) { 1277 regs->nip += 4; /* skip over emulated instruction */ 1278 emulate_single_step(regs); 1279 goto bail; 1280 } 1281 1282 /* Operand address was bad */ 1283 if (fixed == -EFAULT) { 1284 sig = SIGSEGV; 1285 code = SEGV_ACCERR; 1286 } else { 1287 sig = SIGBUS; 1288 code = BUS_ADRALN; 1289 } 1290 if (user_mode(regs)) 1291 _exception(sig, regs, code, regs->dar); 1292 else 1293 bad_page_fault(regs, regs->dar, sig); 1294 1295 bail: 1296 exception_exit(prev_state); 1297 } 1298 1299 void StackOverflow(struct pt_regs *regs) 1300 { 1301 printk(KERN_CRIT "Kernel stack overflow in process %p, r1=%lx\n", 1302 current, regs->gpr[1]); 1303 debugger(regs); 1304 show_regs(regs); 1305 panic("kernel stack overflow"); 1306 } 1307 1308 void nonrecoverable_exception(struct pt_regs *regs) 1309 { 1310 printk(KERN_ERR "Non-recoverable exception at PC=%lx MSR=%lx\n", 1311 regs->nip, regs->msr); 1312 debugger(regs); 1313 die("nonrecoverable exception", regs, SIGKILL); 1314 } 1315 1316 void kernel_fp_unavailable_exception(struct pt_regs *regs) 1317 { 1318 enum ctx_state prev_state = exception_enter(); 1319 1320 printk(KERN_EMERG "Unrecoverable FP Unavailable Exception " 1321 "%lx at %lx\n", regs->trap, regs->nip); 1322 die("Unrecoverable FP Unavailable Exception", regs, SIGABRT); 1323 1324 exception_exit(prev_state); 1325 } 1326 1327 void altivec_unavailable_exception(struct pt_regs *regs) 1328 { 1329 enum ctx_state prev_state = exception_enter(); 1330 1331 if (user_mode(regs)) { 1332 /* A user program has executed an altivec instruction, 1333 but this kernel doesn't support altivec. */ 1334 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 1335 goto bail; 1336 } 1337 1338 printk(KERN_EMERG "Unrecoverable VMX/Altivec Unavailable Exception " 1339 "%lx at %lx\n", regs->trap, regs->nip); 1340 die("Unrecoverable VMX/Altivec Unavailable Exception", regs, SIGABRT); 1341 1342 bail: 1343 exception_exit(prev_state); 1344 } 1345 1346 void vsx_unavailable_exception(struct pt_regs *regs) 1347 { 1348 if (user_mode(regs)) { 1349 /* A user program has executed an vsx instruction, 1350 but this kernel doesn't support vsx. */ 1351 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 1352 return; 1353 } 1354 1355 printk(KERN_EMERG "Unrecoverable VSX Unavailable Exception " 1356 "%lx at %lx\n", regs->trap, regs->nip); 1357 die("Unrecoverable VSX Unavailable Exception", regs, SIGABRT); 1358 } 1359 1360 #ifdef CONFIG_PPC64 1361 void facility_unavailable_exception(struct pt_regs *regs) 1362 { 1363 static char *facility_strings[] = { 1364 [FSCR_FP_LG] = "FPU", 1365 [FSCR_VECVSX_LG] = "VMX/VSX", 1366 [FSCR_DSCR_LG] = "DSCR", 1367 [FSCR_PM_LG] = "PMU SPRs", 1368 [FSCR_BHRB_LG] = "BHRB", 1369 [FSCR_TM_LG] = "TM", 1370 [FSCR_EBB_LG] = "EBB", 1371 [FSCR_TAR_LG] = "TAR", 1372 }; 1373 char *facility = "unknown"; 1374 u64 value; 1375 u32 instword, rd; 1376 u8 status; 1377 bool hv; 1378 1379 hv = (regs->trap == 0xf80); 1380 if (hv) 1381 value = mfspr(SPRN_HFSCR); 1382 else 1383 value = mfspr(SPRN_FSCR); 1384 1385 status = value >> 56; 1386 if (status == FSCR_DSCR_LG) { 1387 /* 1388 * User is accessing the DSCR register using the problem 1389 * state only SPR number (0x03) either through a mfspr or 1390 * a mtspr instruction. If it is a write attempt through 1391 * a mtspr, then we set the inherit bit. This also allows 1392 * the user to write or read the register directly in the 1393 * future by setting via the FSCR DSCR bit. But in case it 1394 * is a read DSCR attempt through a mfspr instruction, we 1395 * just emulate the instruction instead. This code path will 1396 * always emulate all the mfspr instructions till the user 1397 * has attempted atleast one mtspr instruction. This way it 1398 * preserves the same behaviour when the user is accessing 1399 * the DSCR through privilege level only SPR number (0x11) 1400 * which is emulated through illegal instruction exception. 1401 * We always leave HFSCR DSCR set. 1402 */ 1403 if (get_user(instword, (u32 __user *)(regs->nip))) { 1404 pr_err("Failed to fetch the user instruction\n"); 1405 return; 1406 } 1407 1408 /* Write into DSCR (mtspr 0x03, RS) */ 1409 if ((instword & PPC_INST_MTSPR_DSCR_USER_MASK) 1410 == PPC_INST_MTSPR_DSCR_USER) { 1411 rd = (instword >> 21) & 0x1f; 1412 current->thread.dscr = regs->gpr[rd]; 1413 current->thread.dscr_inherit = 1; 1414 mtspr(SPRN_FSCR, value | FSCR_DSCR); 1415 } 1416 1417 /* Read from DSCR (mfspr RT, 0x03) */ 1418 if ((instword & PPC_INST_MFSPR_DSCR_USER_MASK) 1419 == PPC_INST_MFSPR_DSCR_USER) { 1420 if (emulate_instruction(regs)) { 1421 pr_err("DSCR based mfspr emulation failed\n"); 1422 return; 1423 } 1424 regs->nip += 4; 1425 emulate_single_step(regs); 1426 } 1427 return; 1428 } 1429 1430 if ((status < ARRAY_SIZE(facility_strings)) && 1431 facility_strings[status]) 1432 facility = facility_strings[status]; 1433 1434 /* We restore the interrupt state now */ 1435 if (!arch_irq_disabled_regs(regs)) 1436 local_irq_enable(); 1437 1438 pr_err_ratelimited( 1439 "%sFacility '%s' unavailable, exception at 0x%lx, MSR=%lx\n", 1440 hv ? "Hypervisor " : "", facility, regs->nip, regs->msr); 1441 1442 if (user_mode(regs)) { 1443 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 1444 return; 1445 } 1446 1447 die("Unexpected facility unavailable exception", regs, SIGABRT); 1448 } 1449 #endif 1450 1451 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1452 1453 void fp_unavailable_tm(struct pt_regs *regs) 1454 { 1455 /* Note: This does not handle any kind of FP laziness. */ 1456 1457 TM_DEBUG("FP Unavailable trap whilst transactional at 0x%lx, MSR=%lx\n", 1458 regs->nip, regs->msr); 1459 1460 /* We can only have got here if the task started using FP after 1461 * beginning the transaction. So, the transactional regs are just a 1462 * copy of the checkpointed ones. But, we still need to recheckpoint 1463 * as we're enabling FP for the process; it will return, abort the 1464 * transaction, and probably retry but now with FP enabled. So the 1465 * checkpointed FP registers need to be loaded. 1466 */ 1467 tm_reclaim_current(TM_CAUSE_FAC_UNAV); 1468 /* Reclaim didn't save out any FPRs to transact_fprs. */ 1469 1470 /* Enable FP for the task: */ 1471 regs->msr |= (MSR_FP | current->thread.fpexc_mode); 1472 1473 /* This loads and recheckpoints the FP registers from 1474 * thread.fpr[]. They will remain in registers after the 1475 * checkpoint so we don't need to reload them after. 1476 * If VMX is in use, the VRs now hold checkpointed values, 1477 * so we don't want to load the VRs from the thread_struct. 1478 */ 1479 tm_recheckpoint(¤t->thread, MSR_FP); 1480 1481 /* If VMX is in use, get the transactional values back */ 1482 if (regs->msr & MSR_VEC) { 1483 do_load_up_transact_altivec(¤t->thread); 1484 /* At this point all the VSX state is loaded, so enable it */ 1485 regs->msr |= MSR_VSX; 1486 } 1487 } 1488 1489 void altivec_unavailable_tm(struct pt_regs *regs) 1490 { 1491 /* See the comments in fp_unavailable_tm(). This function operates 1492 * the same way. 1493 */ 1494 1495 TM_DEBUG("Vector Unavailable trap whilst transactional at 0x%lx," 1496 "MSR=%lx\n", 1497 regs->nip, regs->msr); 1498 tm_reclaim_current(TM_CAUSE_FAC_UNAV); 1499 regs->msr |= MSR_VEC; 1500 tm_recheckpoint(¤t->thread, MSR_VEC); 1501 current->thread.used_vr = 1; 1502 1503 if (regs->msr & MSR_FP) { 1504 do_load_up_transact_fpu(¤t->thread); 1505 regs->msr |= MSR_VSX; 1506 } 1507 } 1508 1509 void vsx_unavailable_tm(struct pt_regs *regs) 1510 { 1511 unsigned long orig_msr = regs->msr; 1512 1513 /* See the comments in fp_unavailable_tm(). This works similarly, 1514 * though we're loading both FP and VEC registers in here. 1515 * 1516 * If FP isn't in use, load FP regs. If VEC isn't in use, load VEC 1517 * regs. Either way, set MSR_VSX. 1518 */ 1519 1520 TM_DEBUG("VSX Unavailable trap whilst transactional at 0x%lx," 1521 "MSR=%lx\n", 1522 regs->nip, regs->msr); 1523 1524 current->thread.used_vsr = 1; 1525 1526 /* If FP and VMX are already loaded, we have all the state we need */ 1527 if ((orig_msr & (MSR_FP | MSR_VEC)) == (MSR_FP | MSR_VEC)) { 1528 regs->msr |= MSR_VSX; 1529 return; 1530 } 1531 1532 /* This reclaims FP and/or VR regs if they're already enabled */ 1533 tm_reclaim_current(TM_CAUSE_FAC_UNAV); 1534 1535 regs->msr |= MSR_VEC | MSR_FP | current->thread.fpexc_mode | 1536 MSR_VSX; 1537 1538 /* This loads & recheckpoints FP and VRs; but we have 1539 * to be sure not to overwrite previously-valid state. 1540 */ 1541 tm_recheckpoint(¤t->thread, regs->msr & ~orig_msr); 1542 1543 if (orig_msr & MSR_FP) 1544 do_load_up_transact_fpu(¤t->thread); 1545 if (orig_msr & MSR_VEC) 1546 do_load_up_transact_altivec(¤t->thread); 1547 } 1548 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ 1549 1550 void performance_monitor_exception(struct pt_regs *regs) 1551 { 1552 __this_cpu_inc(irq_stat.pmu_irqs); 1553 1554 perf_irq(regs); 1555 } 1556 1557 #ifdef CONFIG_8xx 1558 void SoftwareEmulation(struct pt_regs *regs) 1559 { 1560 CHECK_FULL_REGS(regs); 1561 1562 if (!user_mode(regs)) { 1563 debugger(regs); 1564 die("Kernel Mode Unimplemented Instruction or SW FPU Emulation", 1565 regs, SIGFPE); 1566 } 1567 1568 if (!emulate_math(regs)) 1569 return; 1570 1571 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 1572 } 1573 #endif /* CONFIG_8xx */ 1574 1575 #ifdef CONFIG_PPC_ADV_DEBUG_REGS 1576 static void handle_debug(struct pt_regs *regs, unsigned long debug_status) 1577 { 1578 int changed = 0; 1579 /* 1580 * Determine the cause of the debug event, clear the 1581 * event flags and send a trap to the handler. Torez 1582 */ 1583 if (debug_status & (DBSR_DAC1R | DBSR_DAC1W)) { 1584 dbcr_dac(current) &= ~(DBCR_DAC1R | DBCR_DAC1W); 1585 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE 1586 current->thread.debug.dbcr2 &= ~DBCR2_DAC12MODE; 1587 #endif 1588 do_send_trap(regs, mfspr(SPRN_DAC1), debug_status, TRAP_HWBKPT, 1589 5); 1590 changed |= 0x01; 1591 } else if (debug_status & (DBSR_DAC2R | DBSR_DAC2W)) { 1592 dbcr_dac(current) &= ~(DBCR_DAC2R | DBCR_DAC2W); 1593 do_send_trap(regs, mfspr(SPRN_DAC2), debug_status, TRAP_HWBKPT, 1594 6); 1595 changed |= 0x01; 1596 } else if (debug_status & DBSR_IAC1) { 1597 current->thread.debug.dbcr0 &= ~DBCR0_IAC1; 1598 dbcr_iac_range(current) &= ~DBCR_IAC12MODE; 1599 do_send_trap(regs, mfspr(SPRN_IAC1), debug_status, TRAP_HWBKPT, 1600 1); 1601 changed |= 0x01; 1602 } else if (debug_status & DBSR_IAC2) { 1603 current->thread.debug.dbcr0 &= ~DBCR0_IAC2; 1604 do_send_trap(regs, mfspr(SPRN_IAC2), debug_status, TRAP_HWBKPT, 1605 2); 1606 changed |= 0x01; 1607 } else if (debug_status & DBSR_IAC3) { 1608 current->thread.debug.dbcr0 &= ~DBCR0_IAC3; 1609 dbcr_iac_range(current) &= ~DBCR_IAC34MODE; 1610 do_send_trap(regs, mfspr(SPRN_IAC3), debug_status, TRAP_HWBKPT, 1611 3); 1612 changed |= 0x01; 1613 } else if (debug_status & DBSR_IAC4) { 1614 current->thread.debug.dbcr0 &= ~DBCR0_IAC4; 1615 do_send_trap(regs, mfspr(SPRN_IAC4), debug_status, TRAP_HWBKPT, 1616 4); 1617 changed |= 0x01; 1618 } 1619 /* 1620 * At the point this routine was called, the MSR(DE) was turned off. 1621 * Check all other debug flags and see if that bit needs to be turned 1622 * back on or not. 1623 */ 1624 if (DBCR_ACTIVE_EVENTS(current->thread.debug.dbcr0, 1625 current->thread.debug.dbcr1)) 1626 regs->msr |= MSR_DE; 1627 else 1628 /* Make sure the IDM flag is off */ 1629 current->thread.debug.dbcr0 &= ~DBCR0_IDM; 1630 1631 if (changed & 0x01) 1632 mtspr(SPRN_DBCR0, current->thread.debug.dbcr0); 1633 } 1634 1635 void __kprobes DebugException(struct pt_regs *regs, unsigned long debug_status) 1636 { 1637 current->thread.debug.dbsr = debug_status; 1638 1639 /* Hack alert: On BookE, Branch Taken stops on the branch itself, while 1640 * on server, it stops on the target of the branch. In order to simulate 1641 * the server behaviour, we thus restart right away with a single step 1642 * instead of stopping here when hitting a BT 1643 */ 1644 if (debug_status & DBSR_BT) { 1645 regs->msr &= ~MSR_DE; 1646 1647 /* Disable BT */ 1648 mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_BT); 1649 /* Clear the BT event */ 1650 mtspr(SPRN_DBSR, DBSR_BT); 1651 1652 /* Do the single step trick only when coming from userspace */ 1653 if (user_mode(regs)) { 1654 current->thread.debug.dbcr0 &= ~DBCR0_BT; 1655 current->thread.debug.dbcr0 |= DBCR0_IDM | DBCR0_IC; 1656 regs->msr |= MSR_DE; 1657 return; 1658 } 1659 1660 if (notify_die(DIE_SSTEP, "block_step", regs, 5, 1661 5, SIGTRAP) == NOTIFY_STOP) { 1662 return; 1663 } 1664 if (debugger_sstep(regs)) 1665 return; 1666 } else if (debug_status & DBSR_IC) { /* Instruction complete */ 1667 regs->msr &= ~MSR_DE; 1668 1669 /* Disable instruction completion */ 1670 mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_IC); 1671 /* Clear the instruction completion event */ 1672 mtspr(SPRN_DBSR, DBSR_IC); 1673 1674 if (notify_die(DIE_SSTEP, "single_step", regs, 5, 1675 5, SIGTRAP) == NOTIFY_STOP) { 1676 return; 1677 } 1678 1679 if (debugger_sstep(regs)) 1680 return; 1681 1682 if (user_mode(regs)) { 1683 current->thread.debug.dbcr0 &= ~DBCR0_IC; 1684 if (DBCR_ACTIVE_EVENTS(current->thread.debug.dbcr0, 1685 current->thread.debug.dbcr1)) 1686 regs->msr |= MSR_DE; 1687 else 1688 /* Make sure the IDM bit is off */ 1689 current->thread.debug.dbcr0 &= ~DBCR0_IDM; 1690 } 1691 1692 _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip); 1693 } else 1694 handle_debug(regs, debug_status); 1695 } 1696 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */ 1697 1698 #if !defined(CONFIG_TAU_INT) 1699 void TAUException(struct pt_regs *regs) 1700 { 1701 printk("TAU trap at PC: %lx, MSR: %lx, vector=%lx %s\n", 1702 regs->nip, regs->msr, regs->trap, print_tainted()); 1703 } 1704 #endif /* CONFIG_INT_TAU */ 1705 1706 #ifdef CONFIG_ALTIVEC 1707 void altivec_assist_exception(struct pt_regs *regs) 1708 { 1709 int err; 1710 1711 if (!user_mode(regs)) { 1712 printk(KERN_EMERG "VMX/Altivec assist exception in kernel mode" 1713 " at %lx\n", regs->nip); 1714 die("Kernel VMX/Altivec assist exception", regs, SIGILL); 1715 } 1716 1717 flush_altivec_to_thread(current); 1718 1719 PPC_WARN_EMULATED(altivec, regs); 1720 err = emulate_altivec(regs); 1721 if (err == 0) { 1722 regs->nip += 4; /* skip emulated instruction */ 1723 emulate_single_step(regs); 1724 return; 1725 } 1726 1727 if (err == -EFAULT) { 1728 /* got an error reading the instruction */ 1729 _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip); 1730 } else { 1731 /* didn't recognize the instruction */ 1732 /* XXX quick hack for now: set the non-Java bit in the VSCR */ 1733 printk_ratelimited(KERN_ERR "Unrecognized altivec instruction " 1734 "in %s at %lx\n", current->comm, regs->nip); 1735 current->thread.vr_state.vscr.u[3] |= 0x10000; 1736 } 1737 } 1738 #endif /* CONFIG_ALTIVEC */ 1739 1740 #ifdef CONFIG_FSL_BOOKE 1741 void CacheLockingException(struct pt_regs *regs, unsigned long address, 1742 unsigned long error_code) 1743 { 1744 /* We treat cache locking instructions from the user 1745 * as priv ops, in the future we could try to do 1746 * something smarter 1747 */ 1748 if (error_code & (ESR_DLK|ESR_ILK)) 1749 _exception(SIGILL, regs, ILL_PRVOPC, regs->nip); 1750 return; 1751 } 1752 #endif /* CONFIG_FSL_BOOKE */ 1753 1754 #ifdef CONFIG_SPE 1755 void SPEFloatingPointException(struct pt_regs *regs) 1756 { 1757 extern int do_spe_mathemu(struct pt_regs *regs); 1758 unsigned long spefscr; 1759 int fpexc_mode; 1760 int code = 0; 1761 int err; 1762 1763 flush_spe_to_thread(current); 1764 1765 spefscr = current->thread.spefscr; 1766 fpexc_mode = current->thread.fpexc_mode; 1767 1768 if ((spefscr & SPEFSCR_FOVF) && (fpexc_mode & PR_FP_EXC_OVF)) { 1769 code = FPE_FLTOVF; 1770 } 1771 else if ((spefscr & SPEFSCR_FUNF) && (fpexc_mode & PR_FP_EXC_UND)) { 1772 code = FPE_FLTUND; 1773 } 1774 else if ((spefscr & SPEFSCR_FDBZ) && (fpexc_mode & PR_FP_EXC_DIV)) 1775 code = FPE_FLTDIV; 1776 else if ((spefscr & SPEFSCR_FINV) && (fpexc_mode & PR_FP_EXC_INV)) { 1777 code = FPE_FLTINV; 1778 } 1779 else if ((spefscr & (SPEFSCR_FG | SPEFSCR_FX)) && (fpexc_mode & PR_FP_EXC_RES)) 1780 code = FPE_FLTRES; 1781 1782 err = do_spe_mathemu(regs); 1783 if (err == 0) { 1784 regs->nip += 4; /* skip emulated instruction */ 1785 emulate_single_step(regs); 1786 return; 1787 } 1788 1789 if (err == -EFAULT) { 1790 /* got an error reading the instruction */ 1791 _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip); 1792 } else if (err == -EINVAL) { 1793 /* didn't recognize the instruction */ 1794 printk(KERN_ERR "unrecognized spe instruction " 1795 "in %s at %lx\n", current->comm, regs->nip); 1796 } else { 1797 _exception(SIGFPE, regs, code, regs->nip); 1798 } 1799 1800 return; 1801 } 1802 1803 void SPEFloatingPointRoundException(struct pt_regs *regs) 1804 { 1805 extern int speround_handler(struct pt_regs *regs); 1806 int err; 1807 1808 preempt_disable(); 1809 if (regs->msr & MSR_SPE) 1810 giveup_spe(current); 1811 preempt_enable(); 1812 1813 regs->nip -= 4; 1814 err = speround_handler(regs); 1815 if (err == 0) { 1816 regs->nip += 4; /* skip emulated instruction */ 1817 emulate_single_step(regs); 1818 return; 1819 } 1820 1821 if (err == -EFAULT) { 1822 /* got an error reading the instruction */ 1823 _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip); 1824 } else if (err == -EINVAL) { 1825 /* didn't recognize the instruction */ 1826 printk(KERN_ERR "unrecognized spe instruction " 1827 "in %s at %lx\n", current->comm, regs->nip); 1828 } else { 1829 _exception(SIGFPE, regs, 0, regs->nip); 1830 return; 1831 } 1832 } 1833 #endif 1834 1835 /* 1836 * We enter here if we get an unrecoverable exception, that is, one 1837 * that happened at a point where the RI (recoverable interrupt) bit 1838 * in the MSR is 0. This indicates that SRR0/1 are live, and that 1839 * we therefore lost state by taking this exception. 1840 */ 1841 void unrecoverable_exception(struct pt_regs *regs) 1842 { 1843 printk(KERN_EMERG "Unrecoverable exception %lx at %lx\n", 1844 regs->trap, regs->nip); 1845 die("Unrecoverable exception", regs, SIGABRT); 1846 } 1847 1848 #if defined(CONFIG_BOOKE_WDT) || defined(CONFIG_40x) 1849 /* 1850 * Default handler for a Watchdog exception, 1851 * spins until a reboot occurs 1852 */ 1853 void __attribute__ ((weak)) WatchdogHandler(struct pt_regs *regs) 1854 { 1855 /* Generic WatchdogHandler, implement your own */ 1856 mtspr(SPRN_TCR, mfspr(SPRN_TCR)&(~TCR_WIE)); 1857 return; 1858 } 1859 1860 void WatchdogException(struct pt_regs *regs) 1861 { 1862 printk (KERN_EMERG "PowerPC Book-E Watchdog Exception\n"); 1863 WatchdogHandler(regs); 1864 } 1865 #endif 1866 1867 /* 1868 * We enter here if we discover during exception entry that we are 1869 * running in supervisor mode with a userspace value in the stack pointer. 1870 */ 1871 void kernel_bad_stack(struct pt_regs *regs) 1872 { 1873 printk(KERN_EMERG "Bad kernel stack pointer %lx at %lx\n", 1874 regs->gpr[1], regs->nip); 1875 die("Bad kernel stack pointer", regs, SIGABRT); 1876 } 1877 1878 void __init trap_init(void) 1879 { 1880 } 1881 1882 1883 #ifdef CONFIG_PPC_EMULATED_STATS 1884 1885 #define WARN_EMULATED_SETUP(type) .type = { .name = #type } 1886 1887 struct ppc_emulated ppc_emulated = { 1888 #ifdef CONFIG_ALTIVEC 1889 WARN_EMULATED_SETUP(altivec), 1890 #endif 1891 WARN_EMULATED_SETUP(dcba), 1892 WARN_EMULATED_SETUP(dcbz), 1893 WARN_EMULATED_SETUP(fp_pair), 1894 WARN_EMULATED_SETUP(isel), 1895 WARN_EMULATED_SETUP(mcrxr), 1896 WARN_EMULATED_SETUP(mfpvr), 1897 WARN_EMULATED_SETUP(multiple), 1898 WARN_EMULATED_SETUP(popcntb), 1899 WARN_EMULATED_SETUP(spe), 1900 WARN_EMULATED_SETUP(string), 1901 WARN_EMULATED_SETUP(sync), 1902 WARN_EMULATED_SETUP(unaligned), 1903 #ifdef CONFIG_MATH_EMULATION 1904 WARN_EMULATED_SETUP(math), 1905 #endif 1906 #ifdef CONFIG_VSX 1907 WARN_EMULATED_SETUP(vsx), 1908 #endif 1909 #ifdef CONFIG_PPC64 1910 WARN_EMULATED_SETUP(mfdscr), 1911 WARN_EMULATED_SETUP(mtdscr), 1912 WARN_EMULATED_SETUP(lq_stq), 1913 #endif 1914 }; 1915 1916 u32 ppc_warn_emulated; 1917 1918 void ppc_warn_emulated_print(const char *type) 1919 { 1920 pr_warn_ratelimited("%s used emulated %s instruction\n", current->comm, 1921 type); 1922 } 1923 1924 static int __init ppc_warn_emulated_init(void) 1925 { 1926 struct dentry *dir, *d; 1927 unsigned int i; 1928 struct ppc_emulated_entry *entries = (void *)&ppc_emulated; 1929 1930 if (!powerpc_debugfs_root) 1931 return -ENODEV; 1932 1933 dir = debugfs_create_dir("emulated_instructions", 1934 powerpc_debugfs_root); 1935 if (!dir) 1936 return -ENOMEM; 1937 1938 d = debugfs_create_u32("do_warn", S_IRUGO | S_IWUSR, dir, 1939 &ppc_warn_emulated); 1940 if (!d) 1941 goto fail; 1942 1943 for (i = 0; i < sizeof(ppc_emulated)/sizeof(*entries); i++) { 1944 d = debugfs_create_u32(entries[i].name, S_IRUGO | S_IWUSR, dir, 1945 (u32 *)&entries[i].val.counter); 1946 if (!d) 1947 goto fail; 1948 } 1949 1950 return 0; 1951 1952 fail: 1953 debugfs_remove_recursive(dir); 1954 return -ENOMEM; 1955 } 1956 1957 device_initcall(ppc_warn_emulated_init); 1958 1959 #endif /* CONFIG_PPC_EMULATED_STATS */ 1960