1 /* 2 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 3 * Copyright 2007-2010 Freescale Semiconductor, Inc. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License 7 * as published by the Free Software Foundation; either version 8 * 2 of the License, or (at your option) any later version. 9 * 10 * Modified by Cort Dougan (cort@cs.nmt.edu) 11 * and Paul Mackerras (paulus@samba.org) 12 */ 13 14 /* 15 * This file handles the architecture-dependent parts of hardware exceptions 16 */ 17 18 #include <linux/errno.h> 19 #include <linux/sched.h> 20 #include <linux/kernel.h> 21 #include <linux/mm.h> 22 #include <linux/stddef.h> 23 #include <linux/unistd.h> 24 #include <linux/ptrace.h> 25 #include <linux/user.h> 26 #include <linux/interrupt.h> 27 #include <linux/init.h> 28 #include <linux/module.h> 29 #include <linux/prctl.h> 30 #include <linux/delay.h> 31 #include <linux/kprobes.h> 32 #include <linux/kexec.h> 33 #include <linux/backlight.h> 34 #include <linux/bug.h> 35 #include <linux/kdebug.h> 36 #include <linux/debugfs.h> 37 #include <linux/ratelimit.h> 38 #include <linux/context_tracking.h> 39 40 #include <asm/emulated_ops.h> 41 #include <asm/pgtable.h> 42 #include <asm/uaccess.h> 43 #include <asm/io.h> 44 #include <asm/machdep.h> 45 #include <asm/rtas.h> 46 #include <asm/pmc.h> 47 #include <asm/reg.h> 48 #ifdef CONFIG_PMAC_BACKLIGHT 49 #include <asm/backlight.h> 50 #endif 51 #ifdef CONFIG_PPC64 52 #include <asm/firmware.h> 53 #include <asm/processor.h> 54 #include <asm/tm.h> 55 #endif 56 #include <asm/kexec.h> 57 #include <asm/ppc-opcode.h> 58 #include <asm/rio.h> 59 #include <asm/fadump.h> 60 #include <asm/switch_to.h> 61 #include <asm/tm.h> 62 #include <asm/debug.h> 63 #include <sysdev/fsl_pci.h> 64 65 #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC) 66 int (*__debugger)(struct pt_regs *regs) __read_mostly; 67 int (*__debugger_ipi)(struct pt_regs *regs) __read_mostly; 68 int (*__debugger_bpt)(struct pt_regs *regs) __read_mostly; 69 int (*__debugger_sstep)(struct pt_regs *regs) __read_mostly; 70 int (*__debugger_iabr_match)(struct pt_regs *regs) __read_mostly; 71 int (*__debugger_break_match)(struct pt_regs *regs) __read_mostly; 72 int (*__debugger_fault_handler)(struct pt_regs *regs) __read_mostly; 73 74 EXPORT_SYMBOL(__debugger); 75 EXPORT_SYMBOL(__debugger_ipi); 76 EXPORT_SYMBOL(__debugger_bpt); 77 EXPORT_SYMBOL(__debugger_sstep); 78 EXPORT_SYMBOL(__debugger_iabr_match); 79 EXPORT_SYMBOL(__debugger_break_match); 80 EXPORT_SYMBOL(__debugger_fault_handler); 81 #endif 82 83 /* Transactional Memory trap debug */ 84 #ifdef TM_DEBUG_SW 85 #define TM_DEBUG(x...) printk(KERN_INFO x) 86 #else 87 #define TM_DEBUG(x...) do { } while(0) 88 #endif 89 90 /* 91 * Trap & Exception support 92 */ 93 94 #ifdef CONFIG_PMAC_BACKLIGHT 95 static void pmac_backlight_unblank(void) 96 { 97 mutex_lock(&pmac_backlight_mutex); 98 if (pmac_backlight) { 99 struct backlight_properties *props; 100 101 props = &pmac_backlight->props; 102 props->brightness = props->max_brightness; 103 props->power = FB_BLANK_UNBLANK; 104 backlight_update_status(pmac_backlight); 105 } 106 mutex_unlock(&pmac_backlight_mutex); 107 } 108 #else 109 static inline void pmac_backlight_unblank(void) { } 110 #endif 111 112 static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED; 113 static int die_owner = -1; 114 static unsigned int die_nest_count; 115 static int die_counter; 116 117 static unsigned __kprobes long oops_begin(struct pt_regs *regs) 118 { 119 int cpu; 120 unsigned long flags; 121 122 if (debugger(regs)) 123 return 1; 124 125 oops_enter(); 126 127 /* racy, but better than risking deadlock. */ 128 raw_local_irq_save(flags); 129 cpu = smp_processor_id(); 130 if (!arch_spin_trylock(&die_lock)) { 131 if (cpu == die_owner) 132 /* nested oops. should stop eventually */; 133 else 134 arch_spin_lock(&die_lock); 135 } 136 die_nest_count++; 137 die_owner = cpu; 138 console_verbose(); 139 bust_spinlocks(1); 140 if (machine_is(powermac)) 141 pmac_backlight_unblank(); 142 return flags; 143 } 144 145 static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, 146 int signr) 147 { 148 bust_spinlocks(0); 149 die_owner = -1; 150 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); 151 die_nest_count--; 152 oops_exit(); 153 printk("\n"); 154 if (!die_nest_count) 155 /* Nest count reaches zero, release the lock. */ 156 arch_spin_unlock(&die_lock); 157 raw_local_irq_restore(flags); 158 159 crash_fadump(regs, "die oops"); 160 161 /* 162 * A system reset (0x100) is a request to dump, so we always send 163 * it through the crashdump code. 164 */ 165 if (kexec_should_crash(current) || (TRAP(regs) == 0x100)) { 166 crash_kexec(regs); 167 168 /* 169 * We aren't the primary crash CPU. We need to send it 170 * to a holding pattern to avoid it ending up in the panic 171 * code. 172 */ 173 crash_kexec_secondary(regs); 174 } 175 176 if (!signr) 177 return; 178 179 /* 180 * While our oops output is serialised by a spinlock, output 181 * from panic() called below can race and corrupt it. If we 182 * know we are going to panic, delay for 1 second so we have a 183 * chance to get clean backtraces from all CPUs that are oopsing. 184 */ 185 if (in_interrupt() || panic_on_oops || !current->pid || 186 is_global_init(current)) { 187 mdelay(MSEC_PER_SEC); 188 } 189 190 if (in_interrupt()) 191 panic("Fatal exception in interrupt"); 192 if (panic_on_oops) 193 panic("Fatal exception"); 194 do_exit(signr); 195 } 196 197 static int __kprobes __die(const char *str, struct pt_regs *regs, long err) 198 { 199 printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter); 200 #ifdef CONFIG_PREEMPT 201 printk("PREEMPT "); 202 #endif 203 #ifdef CONFIG_SMP 204 printk("SMP NR_CPUS=%d ", NR_CPUS); 205 #endif 206 #ifdef CONFIG_DEBUG_PAGEALLOC 207 printk("DEBUG_PAGEALLOC "); 208 #endif 209 #ifdef CONFIG_NUMA 210 printk("NUMA "); 211 #endif 212 printk("%s\n", ppc_md.name ? ppc_md.name : ""); 213 214 if (notify_die(DIE_OOPS, str, regs, err, 255, SIGSEGV) == NOTIFY_STOP) 215 return 1; 216 217 print_modules(); 218 show_regs(regs); 219 220 return 0; 221 } 222 223 void die(const char *str, struct pt_regs *regs, long err) 224 { 225 unsigned long flags = oops_begin(regs); 226 227 if (__die(str, regs, err)) 228 err = 0; 229 oops_end(flags, regs, err); 230 } 231 232 void user_single_step_siginfo(struct task_struct *tsk, 233 struct pt_regs *regs, siginfo_t *info) 234 { 235 memset(info, 0, sizeof(*info)); 236 info->si_signo = SIGTRAP; 237 info->si_code = TRAP_TRACE; 238 info->si_addr = (void __user *)regs->nip; 239 } 240 241 void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr) 242 { 243 siginfo_t info; 244 const char fmt32[] = KERN_INFO "%s[%d]: unhandled signal %d " \ 245 "at %08lx nip %08lx lr %08lx code %x\n"; 246 const char fmt64[] = KERN_INFO "%s[%d]: unhandled signal %d " \ 247 "at %016lx nip %016lx lr %016lx code %x\n"; 248 249 if (!user_mode(regs)) { 250 die("Exception in kernel mode", regs, signr); 251 return; 252 } 253 254 if (show_unhandled_signals && unhandled_signal(current, signr)) { 255 printk_ratelimited(regs->msr & MSR_64BIT ? fmt64 : fmt32, 256 current->comm, current->pid, signr, 257 addr, regs->nip, regs->link, code); 258 } 259 260 if (arch_irqs_disabled() && !arch_irq_disabled_regs(regs)) 261 local_irq_enable(); 262 263 current->thread.trap_nr = code; 264 memset(&info, 0, sizeof(info)); 265 info.si_signo = signr; 266 info.si_code = code; 267 info.si_addr = (void __user *) addr; 268 force_sig_info(signr, &info, current); 269 } 270 271 #ifdef CONFIG_PPC64 272 void system_reset_exception(struct pt_regs *regs) 273 { 274 /* See if any machine dependent calls */ 275 if (ppc_md.system_reset_exception) { 276 if (ppc_md.system_reset_exception(regs)) 277 return; 278 } 279 280 die("System Reset", regs, SIGABRT); 281 282 /* Must die if the interrupt is not recoverable */ 283 if (!(regs->msr & MSR_RI)) 284 panic("Unrecoverable System Reset"); 285 286 /* What should we do here? We could issue a shutdown or hard reset. */ 287 } 288 289 /* 290 * This function is called in real mode. Strictly no printk's please. 291 * 292 * regs->nip and regs->msr contains srr0 and ssr1. 293 */ 294 long machine_check_early(struct pt_regs *regs) 295 { 296 long handled = 0; 297 298 __this_cpu_inc(irq_stat.mce_exceptions); 299 300 if (cur_cpu_spec && cur_cpu_spec->machine_check_early) 301 handled = cur_cpu_spec->machine_check_early(regs); 302 return handled; 303 } 304 305 long hmi_exception_realmode(struct pt_regs *regs) 306 { 307 __this_cpu_inc(irq_stat.hmi_exceptions); 308 309 if (ppc_md.hmi_exception_early) 310 ppc_md.hmi_exception_early(regs); 311 312 return 0; 313 } 314 315 #endif 316 317 /* 318 * I/O accesses can cause machine checks on powermacs. 319 * Check if the NIP corresponds to the address of a sync 320 * instruction for which there is an entry in the exception 321 * table. 322 * Note that the 601 only takes a machine check on TEA 323 * (transfer error ack) signal assertion, and does not 324 * set any of the top 16 bits of SRR1. 325 * -- paulus. 326 */ 327 static inline int check_io_access(struct pt_regs *regs) 328 { 329 #ifdef CONFIG_PPC32 330 unsigned long msr = regs->msr; 331 const struct exception_table_entry *entry; 332 unsigned int *nip = (unsigned int *)regs->nip; 333 334 if (((msr & 0xffff0000) == 0 || (msr & (0x80000 | 0x40000))) 335 && (entry = search_exception_tables(regs->nip)) != NULL) { 336 /* 337 * Check that it's a sync instruction, or somewhere 338 * in the twi; isync; nop sequence that inb/inw/inl uses. 339 * As the address is in the exception table 340 * we should be able to read the instr there. 341 * For the debug message, we look at the preceding 342 * load or store. 343 */ 344 if (*nip == 0x60000000) /* nop */ 345 nip -= 2; 346 else if (*nip == 0x4c00012c) /* isync */ 347 --nip; 348 if (*nip == 0x7c0004ac || (*nip >> 26) == 3) { 349 /* sync or twi */ 350 unsigned int rb; 351 352 --nip; 353 rb = (*nip >> 11) & 0x1f; 354 printk(KERN_DEBUG "%s bad port %lx at %p\n", 355 (*nip & 0x100)? "OUT to": "IN from", 356 regs->gpr[rb] - _IO_BASE, nip); 357 regs->msr |= MSR_RI; 358 regs->nip = entry->fixup; 359 return 1; 360 } 361 } 362 #endif /* CONFIG_PPC32 */ 363 return 0; 364 } 365 366 #ifdef CONFIG_PPC_ADV_DEBUG_REGS 367 /* On 4xx, the reason for the machine check or program exception 368 is in the ESR. */ 369 #define get_reason(regs) ((regs)->dsisr) 370 #ifndef CONFIG_FSL_BOOKE 371 #define get_mc_reason(regs) ((regs)->dsisr) 372 #else 373 #define get_mc_reason(regs) (mfspr(SPRN_MCSR)) 374 #endif 375 #define REASON_FP ESR_FP 376 #define REASON_ILLEGAL (ESR_PIL | ESR_PUO) 377 #define REASON_PRIVILEGED ESR_PPR 378 #define REASON_TRAP ESR_PTR 379 380 /* single-step stuff */ 381 #define single_stepping(regs) (current->thread.debug.dbcr0 & DBCR0_IC) 382 #define clear_single_step(regs) (current->thread.debug.dbcr0 &= ~DBCR0_IC) 383 384 #else 385 /* On non-4xx, the reason for the machine check or program 386 exception is in the MSR. */ 387 #define get_reason(regs) ((regs)->msr) 388 #define get_mc_reason(regs) ((regs)->msr) 389 #define REASON_TM 0x200000 390 #define REASON_FP 0x100000 391 #define REASON_ILLEGAL 0x80000 392 #define REASON_PRIVILEGED 0x40000 393 #define REASON_TRAP 0x20000 394 395 #define single_stepping(regs) ((regs)->msr & MSR_SE) 396 #define clear_single_step(regs) ((regs)->msr &= ~MSR_SE) 397 #endif 398 399 #if defined(CONFIG_4xx) 400 int machine_check_4xx(struct pt_regs *regs) 401 { 402 unsigned long reason = get_mc_reason(regs); 403 404 if (reason & ESR_IMCP) { 405 printk("Instruction"); 406 mtspr(SPRN_ESR, reason & ~ESR_IMCP); 407 } else 408 printk("Data"); 409 printk(" machine check in kernel mode.\n"); 410 411 return 0; 412 } 413 414 int machine_check_440A(struct pt_regs *regs) 415 { 416 unsigned long reason = get_mc_reason(regs); 417 418 printk("Machine check in kernel mode.\n"); 419 if (reason & ESR_IMCP){ 420 printk("Instruction Synchronous Machine Check exception\n"); 421 mtspr(SPRN_ESR, reason & ~ESR_IMCP); 422 } 423 else { 424 u32 mcsr = mfspr(SPRN_MCSR); 425 if (mcsr & MCSR_IB) 426 printk("Instruction Read PLB Error\n"); 427 if (mcsr & MCSR_DRB) 428 printk("Data Read PLB Error\n"); 429 if (mcsr & MCSR_DWB) 430 printk("Data Write PLB Error\n"); 431 if (mcsr & MCSR_TLBP) 432 printk("TLB Parity Error\n"); 433 if (mcsr & MCSR_ICP){ 434 flush_instruction_cache(); 435 printk("I-Cache Parity Error\n"); 436 } 437 if (mcsr & MCSR_DCSP) 438 printk("D-Cache Search Parity Error\n"); 439 if (mcsr & MCSR_DCFP) 440 printk("D-Cache Flush Parity Error\n"); 441 if (mcsr & MCSR_IMPE) 442 printk("Machine Check exception is imprecise\n"); 443 444 /* Clear MCSR */ 445 mtspr(SPRN_MCSR, mcsr); 446 } 447 return 0; 448 } 449 450 int machine_check_47x(struct pt_regs *regs) 451 { 452 unsigned long reason = get_mc_reason(regs); 453 u32 mcsr; 454 455 printk(KERN_ERR "Machine check in kernel mode.\n"); 456 if (reason & ESR_IMCP) { 457 printk(KERN_ERR 458 "Instruction Synchronous Machine Check exception\n"); 459 mtspr(SPRN_ESR, reason & ~ESR_IMCP); 460 return 0; 461 } 462 mcsr = mfspr(SPRN_MCSR); 463 if (mcsr & MCSR_IB) 464 printk(KERN_ERR "Instruction Read PLB Error\n"); 465 if (mcsr & MCSR_DRB) 466 printk(KERN_ERR "Data Read PLB Error\n"); 467 if (mcsr & MCSR_DWB) 468 printk(KERN_ERR "Data Write PLB Error\n"); 469 if (mcsr & MCSR_TLBP) 470 printk(KERN_ERR "TLB Parity Error\n"); 471 if (mcsr & MCSR_ICP) { 472 flush_instruction_cache(); 473 printk(KERN_ERR "I-Cache Parity Error\n"); 474 } 475 if (mcsr & MCSR_DCSP) 476 printk(KERN_ERR "D-Cache Search Parity Error\n"); 477 if (mcsr & PPC47x_MCSR_GPR) 478 printk(KERN_ERR "GPR Parity Error\n"); 479 if (mcsr & PPC47x_MCSR_FPR) 480 printk(KERN_ERR "FPR Parity Error\n"); 481 if (mcsr & PPC47x_MCSR_IPR) 482 printk(KERN_ERR "Machine Check exception is imprecise\n"); 483 484 /* Clear MCSR */ 485 mtspr(SPRN_MCSR, mcsr); 486 487 return 0; 488 } 489 #elif defined(CONFIG_E500) 490 int machine_check_e500mc(struct pt_regs *regs) 491 { 492 unsigned long mcsr = mfspr(SPRN_MCSR); 493 unsigned long reason = mcsr; 494 int recoverable = 1; 495 496 if (reason & MCSR_LD) { 497 recoverable = fsl_rio_mcheck_exception(regs); 498 if (recoverable == 1) 499 goto silent_out; 500 } 501 502 printk("Machine check in kernel mode.\n"); 503 printk("Caused by (from MCSR=%lx): ", reason); 504 505 if (reason & MCSR_MCP) 506 printk("Machine Check Signal\n"); 507 508 if (reason & MCSR_ICPERR) { 509 printk("Instruction Cache Parity Error\n"); 510 511 /* 512 * This is recoverable by invalidating the i-cache. 513 */ 514 mtspr(SPRN_L1CSR1, mfspr(SPRN_L1CSR1) | L1CSR1_ICFI); 515 while (mfspr(SPRN_L1CSR1) & L1CSR1_ICFI) 516 ; 517 518 /* 519 * This will generally be accompanied by an instruction 520 * fetch error report -- only treat MCSR_IF as fatal 521 * if it wasn't due to an L1 parity error. 522 */ 523 reason &= ~MCSR_IF; 524 } 525 526 if (reason & MCSR_DCPERR_MC) { 527 printk("Data Cache Parity Error\n"); 528 529 /* 530 * In write shadow mode we auto-recover from the error, but it 531 * may still get logged and cause a machine check. We should 532 * only treat the non-write shadow case as non-recoverable. 533 */ 534 if (!(mfspr(SPRN_L1CSR2) & L1CSR2_DCWS)) 535 recoverable = 0; 536 } 537 538 if (reason & MCSR_L2MMU_MHIT) { 539 printk("Hit on multiple TLB entries\n"); 540 recoverable = 0; 541 } 542 543 if (reason & MCSR_NMI) 544 printk("Non-maskable interrupt\n"); 545 546 if (reason & MCSR_IF) { 547 printk("Instruction Fetch Error Report\n"); 548 recoverable = 0; 549 } 550 551 if (reason & MCSR_LD) { 552 printk("Load Error Report\n"); 553 recoverable = 0; 554 } 555 556 if (reason & MCSR_ST) { 557 printk("Store Error Report\n"); 558 recoverable = 0; 559 } 560 561 if (reason & MCSR_LDG) { 562 printk("Guarded Load Error Report\n"); 563 recoverable = 0; 564 } 565 566 if (reason & MCSR_TLBSYNC) 567 printk("Simultaneous tlbsync operations\n"); 568 569 if (reason & MCSR_BSL2_ERR) { 570 printk("Level 2 Cache Error\n"); 571 recoverable = 0; 572 } 573 574 if (reason & MCSR_MAV) { 575 u64 addr; 576 577 addr = mfspr(SPRN_MCAR); 578 addr |= (u64)mfspr(SPRN_MCARU) << 32; 579 580 printk("Machine Check %s Address: %#llx\n", 581 reason & MCSR_MEA ? "Effective" : "Physical", addr); 582 } 583 584 silent_out: 585 mtspr(SPRN_MCSR, mcsr); 586 return mfspr(SPRN_MCSR) == 0 && recoverable; 587 } 588 589 int machine_check_e500(struct pt_regs *regs) 590 { 591 unsigned long reason = get_mc_reason(regs); 592 593 if (reason & MCSR_BUS_RBERR) { 594 if (fsl_rio_mcheck_exception(regs)) 595 return 1; 596 if (fsl_pci_mcheck_exception(regs)) 597 return 1; 598 } 599 600 printk("Machine check in kernel mode.\n"); 601 printk("Caused by (from MCSR=%lx): ", reason); 602 603 if (reason & MCSR_MCP) 604 printk("Machine Check Signal\n"); 605 if (reason & MCSR_ICPERR) 606 printk("Instruction Cache Parity Error\n"); 607 if (reason & MCSR_DCP_PERR) 608 printk("Data Cache Push Parity Error\n"); 609 if (reason & MCSR_DCPERR) 610 printk("Data Cache Parity Error\n"); 611 if (reason & MCSR_BUS_IAERR) 612 printk("Bus - Instruction Address Error\n"); 613 if (reason & MCSR_BUS_RAERR) 614 printk("Bus - Read Address Error\n"); 615 if (reason & MCSR_BUS_WAERR) 616 printk("Bus - Write Address Error\n"); 617 if (reason & MCSR_BUS_IBERR) 618 printk("Bus - Instruction Data Error\n"); 619 if (reason & MCSR_BUS_RBERR) 620 printk("Bus - Read Data Bus Error\n"); 621 if (reason & MCSR_BUS_WBERR) 622 printk("Bus - Write Data Bus Error\n"); 623 if (reason & MCSR_BUS_IPERR) 624 printk("Bus - Instruction Parity Error\n"); 625 if (reason & MCSR_BUS_RPERR) 626 printk("Bus - Read Parity Error\n"); 627 628 return 0; 629 } 630 631 int machine_check_generic(struct pt_regs *regs) 632 { 633 return 0; 634 } 635 #elif defined(CONFIG_E200) 636 int machine_check_e200(struct pt_regs *regs) 637 { 638 unsigned long reason = get_mc_reason(regs); 639 640 printk("Machine check in kernel mode.\n"); 641 printk("Caused by (from MCSR=%lx): ", reason); 642 643 if (reason & MCSR_MCP) 644 printk("Machine Check Signal\n"); 645 if (reason & MCSR_CP_PERR) 646 printk("Cache Push Parity Error\n"); 647 if (reason & MCSR_CPERR) 648 printk("Cache Parity Error\n"); 649 if (reason & MCSR_EXCP_ERR) 650 printk("ISI, ITLB, or Bus Error on first instruction fetch for an exception handler\n"); 651 if (reason & MCSR_BUS_IRERR) 652 printk("Bus - Read Bus Error on instruction fetch\n"); 653 if (reason & MCSR_BUS_DRERR) 654 printk("Bus - Read Bus Error on data load\n"); 655 if (reason & MCSR_BUS_WRERR) 656 printk("Bus - Write Bus Error on buffered store or cache line push\n"); 657 658 return 0; 659 } 660 #else 661 int machine_check_generic(struct pt_regs *regs) 662 { 663 unsigned long reason = get_mc_reason(regs); 664 665 printk("Machine check in kernel mode.\n"); 666 printk("Caused by (from SRR1=%lx): ", reason); 667 switch (reason & 0x601F0000) { 668 case 0x80000: 669 printk("Machine check signal\n"); 670 break; 671 case 0: /* for 601 */ 672 case 0x40000: 673 case 0x140000: /* 7450 MSS error and TEA */ 674 printk("Transfer error ack signal\n"); 675 break; 676 case 0x20000: 677 printk("Data parity error signal\n"); 678 break; 679 case 0x10000: 680 printk("Address parity error signal\n"); 681 break; 682 case 0x20000000: 683 printk("L1 Data Cache error\n"); 684 break; 685 case 0x40000000: 686 printk("L1 Instruction Cache error\n"); 687 break; 688 case 0x00100000: 689 printk("L2 data cache parity error\n"); 690 break; 691 default: 692 printk("Unknown values in msr\n"); 693 } 694 return 0; 695 } 696 #endif /* everything else */ 697 698 void machine_check_exception(struct pt_regs *regs) 699 { 700 enum ctx_state prev_state = exception_enter(); 701 int recover = 0; 702 703 __this_cpu_inc(irq_stat.mce_exceptions); 704 705 /* See if any machine dependent calls. In theory, we would want 706 * to call the CPU first, and call the ppc_md. one if the CPU 707 * one returns a positive number. However there is existing code 708 * that assumes the board gets a first chance, so let's keep it 709 * that way for now and fix things later. --BenH. 710 */ 711 if (ppc_md.machine_check_exception) 712 recover = ppc_md.machine_check_exception(regs); 713 else if (cur_cpu_spec->machine_check) 714 recover = cur_cpu_spec->machine_check(regs); 715 716 if (recover > 0) 717 goto bail; 718 719 #if defined(CONFIG_8xx) && defined(CONFIG_PCI) 720 /* the qspan pci read routines can cause machine checks -- Cort 721 * 722 * yuck !!! that totally needs to go away ! There are better ways 723 * to deal with that than having a wart in the mcheck handler. 724 * -- BenH 725 */ 726 bad_page_fault(regs, regs->dar, SIGBUS); 727 goto bail; 728 #endif 729 730 if (debugger_fault_handler(regs)) 731 goto bail; 732 733 if (check_io_access(regs)) 734 goto bail; 735 736 die("Machine check", regs, SIGBUS); 737 738 /* Must die if the interrupt is not recoverable */ 739 if (!(regs->msr & MSR_RI)) 740 panic("Unrecoverable Machine check"); 741 742 bail: 743 exception_exit(prev_state); 744 } 745 746 void SMIException(struct pt_regs *regs) 747 { 748 die("System Management Interrupt", regs, SIGABRT); 749 } 750 751 void handle_hmi_exception(struct pt_regs *regs) 752 { 753 struct pt_regs *old_regs; 754 755 old_regs = set_irq_regs(regs); 756 irq_enter(); 757 758 if (ppc_md.handle_hmi_exception) 759 ppc_md.handle_hmi_exception(regs); 760 761 irq_exit(); 762 set_irq_regs(old_regs); 763 } 764 765 void unknown_exception(struct pt_regs *regs) 766 { 767 enum ctx_state prev_state = exception_enter(); 768 769 printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n", 770 regs->nip, regs->msr, regs->trap); 771 772 _exception(SIGTRAP, regs, 0, 0); 773 774 exception_exit(prev_state); 775 } 776 777 void instruction_breakpoint_exception(struct pt_regs *regs) 778 { 779 enum ctx_state prev_state = exception_enter(); 780 781 if (notify_die(DIE_IABR_MATCH, "iabr_match", regs, 5, 782 5, SIGTRAP) == NOTIFY_STOP) 783 goto bail; 784 if (debugger_iabr_match(regs)) 785 goto bail; 786 _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip); 787 788 bail: 789 exception_exit(prev_state); 790 } 791 792 void RunModeException(struct pt_regs *regs) 793 { 794 _exception(SIGTRAP, regs, 0, 0); 795 } 796 797 void __kprobes single_step_exception(struct pt_regs *regs) 798 { 799 enum ctx_state prev_state = exception_enter(); 800 801 clear_single_step(regs); 802 803 if (notify_die(DIE_SSTEP, "single_step", regs, 5, 804 5, SIGTRAP) == NOTIFY_STOP) 805 goto bail; 806 if (debugger_sstep(regs)) 807 goto bail; 808 809 _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip); 810 811 bail: 812 exception_exit(prev_state); 813 } 814 815 /* 816 * After we have successfully emulated an instruction, we have to 817 * check if the instruction was being single-stepped, and if so, 818 * pretend we got a single-step exception. This was pointed out 819 * by Kumar Gala. -- paulus 820 */ 821 static void emulate_single_step(struct pt_regs *regs) 822 { 823 if (single_stepping(regs)) 824 single_step_exception(regs); 825 } 826 827 static inline int __parse_fpscr(unsigned long fpscr) 828 { 829 int ret = 0; 830 831 /* Invalid operation */ 832 if ((fpscr & FPSCR_VE) && (fpscr & FPSCR_VX)) 833 ret = FPE_FLTINV; 834 835 /* Overflow */ 836 else if ((fpscr & FPSCR_OE) && (fpscr & FPSCR_OX)) 837 ret = FPE_FLTOVF; 838 839 /* Underflow */ 840 else if ((fpscr & FPSCR_UE) && (fpscr & FPSCR_UX)) 841 ret = FPE_FLTUND; 842 843 /* Divide by zero */ 844 else if ((fpscr & FPSCR_ZE) && (fpscr & FPSCR_ZX)) 845 ret = FPE_FLTDIV; 846 847 /* Inexact result */ 848 else if ((fpscr & FPSCR_XE) && (fpscr & FPSCR_XX)) 849 ret = FPE_FLTRES; 850 851 return ret; 852 } 853 854 static void parse_fpe(struct pt_regs *regs) 855 { 856 int code = 0; 857 858 flush_fp_to_thread(current); 859 860 code = __parse_fpscr(current->thread.fp_state.fpscr); 861 862 _exception(SIGFPE, regs, code, regs->nip); 863 } 864 865 /* 866 * Illegal instruction emulation support. Originally written to 867 * provide the PVR to user applications using the mfspr rd, PVR. 868 * Return non-zero if we can't emulate, or -EFAULT if the associated 869 * memory access caused an access fault. Return zero on success. 870 * 871 * There are a couple of ways to do this, either "decode" the instruction 872 * or directly match lots of bits. In this case, matching lots of 873 * bits is faster and easier. 874 * 875 */ 876 static int emulate_string_inst(struct pt_regs *regs, u32 instword) 877 { 878 u8 rT = (instword >> 21) & 0x1f; 879 u8 rA = (instword >> 16) & 0x1f; 880 u8 NB_RB = (instword >> 11) & 0x1f; 881 u32 num_bytes; 882 unsigned long EA; 883 int pos = 0; 884 885 /* Early out if we are an invalid form of lswx */ 886 if ((instword & PPC_INST_STRING_MASK) == PPC_INST_LSWX) 887 if ((rT == rA) || (rT == NB_RB)) 888 return -EINVAL; 889 890 EA = (rA == 0) ? 0 : regs->gpr[rA]; 891 892 switch (instword & PPC_INST_STRING_MASK) { 893 case PPC_INST_LSWX: 894 case PPC_INST_STSWX: 895 EA += NB_RB; 896 num_bytes = regs->xer & 0x7f; 897 break; 898 case PPC_INST_LSWI: 899 case PPC_INST_STSWI: 900 num_bytes = (NB_RB == 0) ? 32 : NB_RB; 901 break; 902 default: 903 return -EINVAL; 904 } 905 906 while (num_bytes != 0) 907 { 908 u8 val; 909 u32 shift = 8 * (3 - (pos & 0x3)); 910 911 /* if process is 32-bit, clear upper 32 bits of EA */ 912 if ((regs->msr & MSR_64BIT) == 0) 913 EA &= 0xFFFFFFFF; 914 915 switch ((instword & PPC_INST_STRING_MASK)) { 916 case PPC_INST_LSWX: 917 case PPC_INST_LSWI: 918 if (get_user(val, (u8 __user *)EA)) 919 return -EFAULT; 920 /* first time updating this reg, 921 * zero it out */ 922 if (pos == 0) 923 regs->gpr[rT] = 0; 924 regs->gpr[rT] |= val << shift; 925 break; 926 case PPC_INST_STSWI: 927 case PPC_INST_STSWX: 928 val = regs->gpr[rT] >> shift; 929 if (put_user(val, (u8 __user *)EA)) 930 return -EFAULT; 931 break; 932 } 933 /* move EA to next address */ 934 EA += 1; 935 num_bytes--; 936 937 /* manage our position within the register */ 938 if (++pos == 4) { 939 pos = 0; 940 if (++rT == 32) 941 rT = 0; 942 } 943 } 944 945 return 0; 946 } 947 948 static int emulate_popcntb_inst(struct pt_regs *regs, u32 instword) 949 { 950 u32 ra,rs; 951 unsigned long tmp; 952 953 ra = (instword >> 16) & 0x1f; 954 rs = (instword >> 21) & 0x1f; 955 956 tmp = regs->gpr[rs]; 957 tmp = tmp - ((tmp >> 1) & 0x5555555555555555ULL); 958 tmp = (tmp & 0x3333333333333333ULL) + ((tmp >> 2) & 0x3333333333333333ULL); 959 tmp = (tmp + (tmp >> 4)) & 0x0f0f0f0f0f0f0f0fULL; 960 regs->gpr[ra] = tmp; 961 962 return 0; 963 } 964 965 static int emulate_isel(struct pt_regs *regs, u32 instword) 966 { 967 u8 rT = (instword >> 21) & 0x1f; 968 u8 rA = (instword >> 16) & 0x1f; 969 u8 rB = (instword >> 11) & 0x1f; 970 u8 BC = (instword >> 6) & 0x1f; 971 u8 bit; 972 unsigned long tmp; 973 974 tmp = (rA == 0) ? 0 : regs->gpr[rA]; 975 bit = (regs->ccr >> (31 - BC)) & 0x1; 976 977 regs->gpr[rT] = bit ? tmp : regs->gpr[rB]; 978 979 return 0; 980 } 981 982 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 983 static inline bool tm_abort_check(struct pt_regs *regs, int cause) 984 { 985 /* If we're emulating a load/store in an active transaction, we cannot 986 * emulate it as the kernel operates in transaction suspended context. 987 * We need to abort the transaction. This creates a persistent TM 988 * abort so tell the user what caused it with a new code. 989 */ 990 if (MSR_TM_TRANSACTIONAL(regs->msr)) { 991 tm_enable(); 992 tm_abort(cause); 993 return true; 994 } 995 return false; 996 } 997 #else 998 static inline bool tm_abort_check(struct pt_regs *regs, int reason) 999 { 1000 return false; 1001 } 1002 #endif 1003 1004 static int emulate_instruction(struct pt_regs *regs) 1005 { 1006 u32 instword; 1007 u32 rd; 1008 1009 if (!user_mode(regs)) 1010 return -EINVAL; 1011 CHECK_FULL_REGS(regs); 1012 1013 if (get_user(instword, (u32 __user *)(regs->nip))) 1014 return -EFAULT; 1015 1016 /* Emulate the mfspr rD, PVR. */ 1017 if ((instword & PPC_INST_MFSPR_PVR_MASK) == PPC_INST_MFSPR_PVR) { 1018 PPC_WARN_EMULATED(mfpvr, regs); 1019 rd = (instword >> 21) & 0x1f; 1020 regs->gpr[rd] = mfspr(SPRN_PVR); 1021 return 0; 1022 } 1023 1024 /* Emulating the dcba insn is just a no-op. */ 1025 if ((instword & PPC_INST_DCBA_MASK) == PPC_INST_DCBA) { 1026 PPC_WARN_EMULATED(dcba, regs); 1027 return 0; 1028 } 1029 1030 /* Emulate the mcrxr insn. */ 1031 if ((instword & PPC_INST_MCRXR_MASK) == PPC_INST_MCRXR) { 1032 int shift = (instword >> 21) & 0x1c; 1033 unsigned long msk = 0xf0000000UL >> shift; 1034 1035 PPC_WARN_EMULATED(mcrxr, regs); 1036 regs->ccr = (regs->ccr & ~msk) | ((regs->xer >> shift) & msk); 1037 regs->xer &= ~0xf0000000UL; 1038 return 0; 1039 } 1040 1041 /* Emulate load/store string insn. */ 1042 if ((instword & PPC_INST_STRING_GEN_MASK) == PPC_INST_STRING) { 1043 if (tm_abort_check(regs, 1044 TM_CAUSE_EMULATE | TM_CAUSE_PERSISTENT)) 1045 return -EINVAL; 1046 PPC_WARN_EMULATED(string, regs); 1047 return emulate_string_inst(regs, instword); 1048 } 1049 1050 /* Emulate the popcntb (Population Count Bytes) instruction. */ 1051 if ((instword & PPC_INST_POPCNTB_MASK) == PPC_INST_POPCNTB) { 1052 PPC_WARN_EMULATED(popcntb, regs); 1053 return emulate_popcntb_inst(regs, instword); 1054 } 1055 1056 /* Emulate isel (Integer Select) instruction */ 1057 if ((instword & PPC_INST_ISEL_MASK) == PPC_INST_ISEL) { 1058 PPC_WARN_EMULATED(isel, regs); 1059 return emulate_isel(regs, instword); 1060 } 1061 1062 /* Emulate sync instruction variants */ 1063 if ((instword & PPC_INST_SYNC_MASK) == PPC_INST_SYNC) { 1064 PPC_WARN_EMULATED(sync, regs); 1065 asm volatile("sync"); 1066 return 0; 1067 } 1068 1069 #ifdef CONFIG_PPC64 1070 /* Emulate the mfspr rD, DSCR. */ 1071 if ((((instword & PPC_INST_MFSPR_DSCR_USER_MASK) == 1072 PPC_INST_MFSPR_DSCR_USER) || 1073 ((instword & PPC_INST_MFSPR_DSCR_MASK) == 1074 PPC_INST_MFSPR_DSCR)) && 1075 cpu_has_feature(CPU_FTR_DSCR)) { 1076 PPC_WARN_EMULATED(mfdscr, regs); 1077 rd = (instword >> 21) & 0x1f; 1078 regs->gpr[rd] = mfspr(SPRN_DSCR); 1079 return 0; 1080 } 1081 /* Emulate the mtspr DSCR, rD. */ 1082 if ((((instword & PPC_INST_MTSPR_DSCR_USER_MASK) == 1083 PPC_INST_MTSPR_DSCR_USER) || 1084 ((instword & PPC_INST_MTSPR_DSCR_MASK) == 1085 PPC_INST_MTSPR_DSCR)) && 1086 cpu_has_feature(CPU_FTR_DSCR)) { 1087 PPC_WARN_EMULATED(mtdscr, regs); 1088 rd = (instword >> 21) & 0x1f; 1089 current->thread.dscr = regs->gpr[rd]; 1090 current->thread.dscr_inherit = 1; 1091 mtspr(SPRN_DSCR, current->thread.dscr); 1092 return 0; 1093 } 1094 #endif 1095 1096 return -EINVAL; 1097 } 1098 1099 int is_valid_bugaddr(unsigned long addr) 1100 { 1101 return is_kernel_addr(addr); 1102 } 1103 1104 #ifdef CONFIG_MATH_EMULATION 1105 static int emulate_math(struct pt_regs *regs) 1106 { 1107 int ret; 1108 extern int do_mathemu(struct pt_regs *regs); 1109 1110 ret = do_mathemu(regs); 1111 if (ret >= 0) 1112 PPC_WARN_EMULATED(math, regs); 1113 1114 switch (ret) { 1115 case 0: 1116 emulate_single_step(regs); 1117 return 0; 1118 case 1: { 1119 int code = 0; 1120 code = __parse_fpscr(current->thread.fp_state.fpscr); 1121 _exception(SIGFPE, regs, code, regs->nip); 1122 return 0; 1123 } 1124 case -EFAULT: 1125 _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip); 1126 return 0; 1127 } 1128 1129 return -1; 1130 } 1131 #else 1132 static inline int emulate_math(struct pt_regs *regs) { return -1; } 1133 #endif 1134 1135 void __kprobes program_check_exception(struct pt_regs *regs) 1136 { 1137 enum ctx_state prev_state = exception_enter(); 1138 unsigned int reason = get_reason(regs); 1139 1140 /* We can now get here via a FP Unavailable exception if the core 1141 * has no FPU, in that case the reason flags will be 0 */ 1142 1143 if (reason & REASON_FP) { 1144 /* IEEE FP exception */ 1145 parse_fpe(regs); 1146 goto bail; 1147 } 1148 if (reason & REASON_TRAP) { 1149 /* Debugger is first in line to stop recursive faults in 1150 * rcu_lock, notify_die, or atomic_notifier_call_chain */ 1151 if (debugger_bpt(regs)) 1152 goto bail; 1153 1154 /* trap exception */ 1155 if (notify_die(DIE_BPT, "breakpoint", regs, 5, 5, SIGTRAP) 1156 == NOTIFY_STOP) 1157 goto bail; 1158 1159 if (!(regs->msr & MSR_PR) && /* not user-mode */ 1160 report_bug(regs->nip, regs) == BUG_TRAP_TYPE_WARN) { 1161 regs->nip += 4; 1162 goto bail; 1163 } 1164 _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip); 1165 goto bail; 1166 } 1167 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1168 if (reason & REASON_TM) { 1169 /* This is a TM "Bad Thing Exception" program check. 1170 * This occurs when: 1171 * - An rfid/hrfid/mtmsrd attempts to cause an illegal 1172 * transition in TM states. 1173 * - A trechkpt is attempted when transactional. 1174 * - A treclaim is attempted when non transactional. 1175 * - A tend is illegally attempted. 1176 * - writing a TM SPR when transactional. 1177 */ 1178 if (!user_mode(regs) && 1179 report_bug(regs->nip, regs) == BUG_TRAP_TYPE_WARN) { 1180 regs->nip += 4; 1181 goto bail; 1182 } 1183 /* If usermode caused this, it's done something illegal and 1184 * gets a SIGILL slap on the wrist. We call it an illegal 1185 * operand to distinguish from the instruction just being bad 1186 * (e.g. executing a 'tend' on a CPU without TM!); it's an 1187 * illegal /placement/ of a valid instruction. 1188 */ 1189 if (user_mode(regs)) { 1190 _exception(SIGILL, regs, ILL_ILLOPN, regs->nip); 1191 goto bail; 1192 } else { 1193 printk(KERN_EMERG "Unexpected TM Bad Thing exception " 1194 "at %lx (msr 0x%x)\n", regs->nip, reason); 1195 die("Unrecoverable exception", regs, SIGABRT); 1196 } 1197 } 1198 #endif 1199 1200 /* 1201 * If we took the program check in the kernel skip down to sending a 1202 * SIGILL. The subsequent cases all relate to emulating instructions 1203 * which we should only do for userspace. We also do not want to enable 1204 * interrupts for kernel faults because that might lead to further 1205 * faults, and loose the context of the original exception. 1206 */ 1207 if (!user_mode(regs)) 1208 goto sigill; 1209 1210 /* We restore the interrupt state now */ 1211 if (!arch_irq_disabled_regs(regs)) 1212 local_irq_enable(); 1213 1214 /* (reason & REASON_ILLEGAL) would be the obvious thing here, 1215 * but there seems to be a hardware bug on the 405GP (RevD) 1216 * that means ESR is sometimes set incorrectly - either to 1217 * ESR_DST (!?) or 0. In the process of chasing this with the 1218 * hardware people - not sure if it can happen on any illegal 1219 * instruction or only on FP instructions, whether there is a 1220 * pattern to occurrences etc. -dgibson 31/Mar/2003 1221 */ 1222 if (!emulate_math(regs)) 1223 goto bail; 1224 1225 /* Try to emulate it if we should. */ 1226 if (reason & (REASON_ILLEGAL | REASON_PRIVILEGED)) { 1227 switch (emulate_instruction(regs)) { 1228 case 0: 1229 regs->nip += 4; 1230 emulate_single_step(regs); 1231 goto bail; 1232 case -EFAULT: 1233 _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip); 1234 goto bail; 1235 } 1236 } 1237 1238 sigill: 1239 if (reason & REASON_PRIVILEGED) 1240 _exception(SIGILL, regs, ILL_PRVOPC, regs->nip); 1241 else 1242 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 1243 1244 bail: 1245 exception_exit(prev_state); 1246 } 1247 1248 /* 1249 * This occurs when running in hypervisor mode on POWER6 or later 1250 * and an illegal instruction is encountered. 1251 */ 1252 void __kprobes emulation_assist_interrupt(struct pt_regs *regs) 1253 { 1254 regs->msr |= REASON_ILLEGAL; 1255 program_check_exception(regs); 1256 } 1257 1258 void alignment_exception(struct pt_regs *regs) 1259 { 1260 enum ctx_state prev_state = exception_enter(); 1261 int sig, code, fixed = 0; 1262 1263 /* We restore the interrupt state now */ 1264 if (!arch_irq_disabled_regs(regs)) 1265 local_irq_enable(); 1266 1267 if (tm_abort_check(regs, TM_CAUSE_ALIGNMENT | TM_CAUSE_PERSISTENT)) 1268 goto bail; 1269 1270 /* we don't implement logging of alignment exceptions */ 1271 if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS)) 1272 fixed = fix_alignment(regs); 1273 1274 if (fixed == 1) { 1275 regs->nip += 4; /* skip over emulated instruction */ 1276 emulate_single_step(regs); 1277 goto bail; 1278 } 1279 1280 /* Operand address was bad */ 1281 if (fixed == -EFAULT) { 1282 sig = SIGSEGV; 1283 code = SEGV_ACCERR; 1284 } else { 1285 sig = SIGBUS; 1286 code = BUS_ADRALN; 1287 } 1288 if (user_mode(regs)) 1289 _exception(sig, regs, code, regs->dar); 1290 else 1291 bad_page_fault(regs, regs->dar, sig); 1292 1293 bail: 1294 exception_exit(prev_state); 1295 } 1296 1297 void StackOverflow(struct pt_regs *regs) 1298 { 1299 printk(KERN_CRIT "Kernel stack overflow in process %p, r1=%lx\n", 1300 current, regs->gpr[1]); 1301 debugger(regs); 1302 show_regs(regs); 1303 panic("kernel stack overflow"); 1304 } 1305 1306 void nonrecoverable_exception(struct pt_regs *regs) 1307 { 1308 printk(KERN_ERR "Non-recoverable exception at PC=%lx MSR=%lx\n", 1309 regs->nip, regs->msr); 1310 debugger(regs); 1311 die("nonrecoverable exception", regs, SIGKILL); 1312 } 1313 1314 void trace_syscall(struct pt_regs *regs) 1315 { 1316 printk("Task: %p(%d), PC: %08lX/%08lX, Syscall: %3ld, Result: %s%ld %s\n", 1317 current, task_pid_nr(current), regs->nip, regs->link, regs->gpr[0], 1318 regs->ccr&0x10000000?"Error=":"", regs->gpr[3], print_tainted()); 1319 } 1320 1321 void kernel_fp_unavailable_exception(struct pt_regs *regs) 1322 { 1323 enum ctx_state prev_state = exception_enter(); 1324 1325 printk(KERN_EMERG "Unrecoverable FP Unavailable Exception " 1326 "%lx at %lx\n", regs->trap, regs->nip); 1327 die("Unrecoverable FP Unavailable Exception", regs, SIGABRT); 1328 1329 exception_exit(prev_state); 1330 } 1331 1332 void altivec_unavailable_exception(struct pt_regs *regs) 1333 { 1334 enum ctx_state prev_state = exception_enter(); 1335 1336 if (user_mode(regs)) { 1337 /* A user program has executed an altivec instruction, 1338 but this kernel doesn't support altivec. */ 1339 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 1340 goto bail; 1341 } 1342 1343 printk(KERN_EMERG "Unrecoverable VMX/Altivec Unavailable Exception " 1344 "%lx at %lx\n", regs->trap, regs->nip); 1345 die("Unrecoverable VMX/Altivec Unavailable Exception", regs, SIGABRT); 1346 1347 bail: 1348 exception_exit(prev_state); 1349 } 1350 1351 void vsx_unavailable_exception(struct pt_regs *regs) 1352 { 1353 if (user_mode(regs)) { 1354 /* A user program has executed an vsx instruction, 1355 but this kernel doesn't support vsx. */ 1356 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 1357 return; 1358 } 1359 1360 printk(KERN_EMERG "Unrecoverable VSX Unavailable Exception " 1361 "%lx at %lx\n", regs->trap, regs->nip); 1362 die("Unrecoverable VSX Unavailable Exception", regs, SIGABRT); 1363 } 1364 1365 #ifdef CONFIG_PPC64 1366 void facility_unavailable_exception(struct pt_regs *regs) 1367 { 1368 static char *facility_strings[] = { 1369 [FSCR_FP_LG] = "FPU", 1370 [FSCR_VECVSX_LG] = "VMX/VSX", 1371 [FSCR_DSCR_LG] = "DSCR", 1372 [FSCR_PM_LG] = "PMU SPRs", 1373 [FSCR_BHRB_LG] = "BHRB", 1374 [FSCR_TM_LG] = "TM", 1375 [FSCR_EBB_LG] = "EBB", 1376 [FSCR_TAR_LG] = "TAR", 1377 }; 1378 char *facility = "unknown"; 1379 u64 value; 1380 u8 status; 1381 bool hv; 1382 1383 hv = (regs->trap == 0xf80); 1384 if (hv) 1385 value = mfspr(SPRN_HFSCR); 1386 else 1387 value = mfspr(SPRN_FSCR); 1388 1389 status = value >> 56; 1390 if (status == FSCR_DSCR_LG) { 1391 /* User is acessing the DSCR. Set the inherit bit and allow 1392 * the user to set it directly in future by setting via the 1393 * FSCR DSCR bit. We always leave HFSCR DSCR set. 1394 */ 1395 current->thread.dscr_inherit = 1; 1396 mtspr(SPRN_FSCR, value | FSCR_DSCR); 1397 return; 1398 } 1399 1400 if ((status < ARRAY_SIZE(facility_strings)) && 1401 facility_strings[status]) 1402 facility = facility_strings[status]; 1403 1404 /* We restore the interrupt state now */ 1405 if (!arch_irq_disabled_regs(regs)) 1406 local_irq_enable(); 1407 1408 pr_err_ratelimited( 1409 "%sFacility '%s' unavailable, exception at 0x%lx, MSR=%lx\n", 1410 hv ? "Hypervisor " : "", facility, regs->nip, regs->msr); 1411 1412 if (user_mode(regs)) { 1413 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 1414 return; 1415 } 1416 1417 die("Unexpected facility unavailable exception", regs, SIGABRT); 1418 } 1419 #endif 1420 1421 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1422 1423 void fp_unavailable_tm(struct pt_regs *regs) 1424 { 1425 /* Note: This does not handle any kind of FP laziness. */ 1426 1427 TM_DEBUG("FP Unavailable trap whilst transactional at 0x%lx, MSR=%lx\n", 1428 regs->nip, regs->msr); 1429 1430 /* We can only have got here if the task started using FP after 1431 * beginning the transaction. So, the transactional regs are just a 1432 * copy of the checkpointed ones. But, we still need to recheckpoint 1433 * as we're enabling FP for the process; it will return, abort the 1434 * transaction, and probably retry but now with FP enabled. So the 1435 * checkpointed FP registers need to be loaded. 1436 */ 1437 tm_reclaim_current(TM_CAUSE_FAC_UNAV); 1438 /* Reclaim didn't save out any FPRs to transact_fprs. */ 1439 1440 /* Enable FP for the task: */ 1441 regs->msr |= (MSR_FP | current->thread.fpexc_mode); 1442 1443 /* This loads and recheckpoints the FP registers from 1444 * thread.fpr[]. They will remain in registers after the 1445 * checkpoint so we don't need to reload them after. 1446 * If VMX is in use, the VRs now hold checkpointed values, 1447 * so we don't want to load the VRs from the thread_struct. 1448 */ 1449 tm_recheckpoint(¤t->thread, MSR_FP); 1450 1451 /* If VMX is in use, get the transactional values back */ 1452 if (regs->msr & MSR_VEC) { 1453 do_load_up_transact_altivec(¤t->thread); 1454 /* At this point all the VSX state is loaded, so enable it */ 1455 regs->msr |= MSR_VSX; 1456 } 1457 } 1458 1459 void altivec_unavailable_tm(struct pt_regs *regs) 1460 { 1461 /* See the comments in fp_unavailable_tm(). This function operates 1462 * the same way. 1463 */ 1464 1465 TM_DEBUG("Vector Unavailable trap whilst transactional at 0x%lx," 1466 "MSR=%lx\n", 1467 regs->nip, regs->msr); 1468 tm_reclaim_current(TM_CAUSE_FAC_UNAV); 1469 regs->msr |= MSR_VEC; 1470 tm_recheckpoint(¤t->thread, MSR_VEC); 1471 current->thread.used_vr = 1; 1472 1473 if (regs->msr & MSR_FP) { 1474 do_load_up_transact_fpu(¤t->thread); 1475 regs->msr |= MSR_VSX; 1476 } 1477 } 1478 1479 void vsx_unavailable_tm(struct pt_regs *regs) 1480 { 1481 unsigned long orig_msr = regs->msr; 1482 1483 /* See the comments in fp_unavailable_tm(). This works similarly, 1484 * though we're loading both FP and VEC registers in here. 1485 * 1486 * If FP isn't in use, load FP regs. If VEC isn't in use, load VEC 1487 * regs. Either way, set MSR_VSX. 1488 */ 1489 1490 TM_DEBUG("VSX Unavailable trap whilst transactional at 0x%lx," 1491 "MSR=%lx\n", 1492 regs->nip, regs->msr); 1493 1494 current->thread.used_vsr = 1; 1495 1496 /* If FP and VMX are already loaded, we have all the state we need */ 1497 if ((orig_msr & (MSR_FP | MSR_VEC)) == (MSR_FP | MSR_VEC)) { 1498 regs->msr |= MSR_VSX; 1499 return; 1500 } 1501 1502 /* This reclaims FP and/or VR regs if they're already enabled */ 1503 tm_reclaim_current(TM_CAUSE_FAC_UNAV); 1504 1505 regs->msr |= MSR_VEC | MSR_FP | current->thread.fpexc_mode | 1506 MSR_VSX; 1507 1508 /* This loads & recheckpoints FP and VRs; but we have 1509 * to be sure not to overwrite previously-valid state. 1510 */ 1511 tm_recheckpoint(¤t->thread, regs->msr & ~orig_msr); 1512 1513 if (orig_msr & MSR_FP) 1514 do_load_up_transact_fpu(¤t->thread); 1515 if (orig_msr & MSR_VEC) 1516 do_load_up_transact_altivec(¤t->thread); 1517 } 1518 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ 1519 1520 void performance_monitor_exception(struct pt_regs *regs) 1521 { 1522 __this_cpu_inc(irq_stat.pmu_irqs); 1523 1524 perf_irq(regs); 1525 } 1526 1527 #ifdef CONFIG_8xx 1528 void SoftwareEmulation(struct pt_regs *regs) 1529 { 1530 CHECK_FULL_REGS(regs); 1531 1532 if (!user_mode(regs)) { 1533 debugger(regs); 1534 die("Kernel Mode Unimplemented Instruction or SW FPU Emulation", 1535 regs, SIGFPE); 1536 } 1537 1538 if (!emulate_math(regs)) 1539 return; 1540 1541 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 1542 } 1543 #endif /* CONFIG_8xx */ 1544 1545 #ifdef CONFIG_PPC_ADV_DEBUG_REGS 1546 static void handle_debug(struct pt_regs *regs, unsigned long debug_status) 1547 { 1548 int changed = 0; 1549 /* 1550 * Determine the cause of the debug event, clear the 1551 * event flags and send a trap to the handler. Torez 1552 */ 1553 if (debug_status & (DBSR_DAC1R | DBSR_DAC1W)) { 1554 dbcr_dac(current) &= ~(DBCR_DAC1R | DBCR_DAC1W); 1555 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE 1556 current->thread.debug.dbcr2 &= ~DBCR2_DAC12MODE; 1557 #endif 1558 do_send_trap(regs, mfspr(SPRN_DAC1), debug_status, TRAP_HWBKPT, 1559 5); 1560 changed |= 0x01; 1561 } else if (debug_status & (DBSR_DAC2R | DBSR_DAC2W)) { 1562 dbcr_dac(current) &= ~(DBCR_DAC2R | DBCR_DAC2W); 1563 do_send_trap(regs, mfspr(SPRN_DAC2), debug_status, TRAP_HWBKPT, 1564 6); 1565 changed |= 0x01; 1566 } else if (debug_status & DBSR_IAC1) { 1567 current->thread.debug.dbcr0 &= ~DBCR0_IAC1; 1568 dbcr_iac_range(current) &= ~DBCR_IAC12MODE; 1569 do_send_trap(regs, mfspr(SPRN_IAC1), debug_status, TRAP_HWBKPT, 1570 1); 1571 changed |= 0x01; 1572 } else if (debug_status & DBSR_IAC2) { 1573 current->thread.debug.dbcr0 &= ~DBCR0_IAC2; 1574 do_send_trap(regs, mfspr(SPRN_IAC2), debug_status, TRAP_HWBKPT, 1575 2); 1576 changed |= 0x01; 1577 } else if (debug_status & DBSR_IAC3) { 1578 current->thread.debug.dbcr0 &= ~DBCR0_IAC3; 1579 dbcr_iac_range(current) &= ~DBCR_IAC34MODE; 1580 do_send_trap(regs, mfspr(SPRN_IAC3), debug_status, TRAP_HWBKPT, 1581 3); 1582 changed |= 0x01; 1583 } else if (debug_status & DBSR_IAC4) { 1584 current->thread.debug.dbcr0 &= ~DBCR0_IAC4; 1585 do_send_trap(regs, mfspr(SPRN_IAC4), debug_status, TRAP_HWBKPT, 1586 4); 1587 changed |= 0x01; 1588 } 1589 /* 1590 * At the point this routine was called, the MSR(DE) was turned off. 1591 * Check all other debug flags and see if that bit needs to be turned 1592 * back on or not. 1593 */ 1594 if (DBCR_ACTIVE_EVENTS(current->thread.debug.dbcr0, 1595 current->thread.debug.dbcr1)) 1596 regs->msr |= MSR_DE; 1597 else 1598 /* Make sure the IDM flag is off */ 1599 current->thread.debug.dbcr0 &= ~DBCR0_IDM; 1600 1601 if (changed & 0x01) 1602 mtspr(SPRN_DBCR0, current->thread.debug.dbcr0); 1603 } 1604 1605 void __kprobes DebugException(struct pt_regs *regs, unsigned long debug_status) 1606 { 1607 current->thread.debug.dbsr = debug_status; 1608 1609 /* Hack alert: On BookE, Branch Taken stops on the branch itself, while 1610 * on server, it stops on the target of the branch. In order to simulate 1611 * the server behaviour, we thus restart right away with a single step 1612 * instead of stopping here when hitting a BT 1613 */ 1614 if (debug_status & DBSR_BT) { 1615 regs->msr &= ~MSR_DE; 1616 1617 /* Disable BT */ 1618 mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_BT); 1619 /* Clear the BT event */ 1620 mtspr(SPRN_DBSR, DBSR_BT); 1621 1622 /* Do the single step trick only when coming from userspace */ 1623 if (user_mode(regs)) { 1624 current->thread.debug.dbcr0 &= ~DBCR0_BT; 1625 current->thread.debug.dbcr0 |= DBCR0_IDM | DBCR0_IC; 1626 regs->msr |= MSR_DE; 1627 return; 1628 } 1629 1630 if (notify_die(DIE_SSTEP, "block_step", regs, 5, 1631 5, SIGTRAP) == NOTIFY_STOP) { 1632 return; 1633 } 1634 if (debugger_sstep(regs)) 1635 return; 1636 } else if (debug_status & DBSR_IC) { /* Instruction complete */ 1637 regs->msr &= ~MSR_DE; 1638 1639 /* Disable instruction completion */ 1640 mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_IC); 1641 /* Clear the instruction completion event */ 1642 mtspr(SPRN_DBSR, DBSR_IC); 1643 1644 if (notify_die(DIE_SSTEP, "single_step", regs, 5, 1645 5, SIGTRAP) == NOTIFY_STOP) { 1646 return; 1647 } 1648 1649 if (debugger_sstep(regs)) 1650 return; 1651 1652 if (user_mode(regs)) { 1653 current->thread.debug.dbcr0 &= ~DBCR0_IC; 1654 if (DBCR_ACTIVE_EVENTS(current->thread.debug.dbcr0, 1655 current->thread.debug.dbcr1)) 1656 regs->msr |= MSR_DE; 1657 else 1658 /* Make sure the IDM bit is off */ 1659 current->thread.debug.dbcr0 &= ~DBCR0_IDM; 1660 } 1661 1662 _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip); 1663 } else 1664 handle_debug(regs, debug_status); 1665 } 1666 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */ 1667 1668 #if !defined(CONFIG_TAU_INT) 1669 void TAUException(struct pt_regs *regs) 1670 { 1671 printk("TAU trap at PC: %lx, MSR: %lx, vector=%lx %s\n", 1672 regs->nip, regs->msr, regs->trap, print_tainted()); 1673 } 1674 #endif /* CONFIG_INT_TAU */ 1675 1676 #ifdef CONFIG_ALTIVEC 1677 void altivec_assist_exception(struct pt_regs *regs) 1678 { 1679 int err; 1680 1681 if (!user_mode(regs)) { 1682 printk(KERN_EMERG "VMX/Altivec assist exception in kernel mode" 1683 " at %lx\n", regs->nip); 1684 die("Kernel VMX/Altivec assist exception", regs, SIGILL); 1685 } 1686 1687 flush_altivec_to_thread(current); 1688 1689 PPC_WARN_EMULATED(altivec, regs); 1690 err = emulate_altivec(regs); 1691 if (err == 0) { 1692 regs->nip += 4; /* skip emulated instruction */ 1693 emulate_single_step(regs); 1694 return; 1695 } 1696 1697 if (err == -EFAULT) { 1698 /* got an error reading the instruction */ 1699 _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip); 1700 } else { 1701 /* didn't recognize the instruction */ 1702 /* XXX quick hack for now: set the non-Java bit in the VSCR */ 1703 printk_ratelimited(KERN_ERR "Unrecognized altivec instruction " 1704 "in %s at %lx\n", current->comm, regs->nip); 1705 current->thread.vr_state.vscr.u[3] |= 0x10000; 1706 } 1707 } 1708 #endif /* CONFIG_ALTIVEC */ 1709 1710 #ifdef CONFIG_FSL_BOOKE 1711 void CacheLockingException(struct pt_regs *regs, unsigned long address, 1712 unsigned long error_code) 1713 { 1714 /* We treat cache locking instructions from the user 1715 * as priv ops, in the future we could try to do 1716 * something smarter 1717 */ 1718 if (error_code & (ESR_DLK|ESR_ILK)) 1719 _exception(SIGILL, regs, ILL_PRVOPC, regs->nip); 1720 return; 1721 } 1722 #endif /* CONFIG_FSL_BOOKE */ 1723 1724 #ifdef CONFIG_SPE 1725 void SPEFloatingPointException(struct pt_regs *regs) 1726 { 1727 extern int do_spe_mathemu(struct pt_regs *regs); 1728 unsigned long spefscr; 1729 int fpexc_mode; 1730 int code = 0; 1731 int err; 1732 1733 flush_spe_to_thread(current); 1734 1735 spefscr = current->thread.spefscr; 1736 fpexc_mode = current->thread.fpexc_mode; 1737 1738 if ((spefscr & SPEFSCR_FOVF) && (fpexc_mode & PR_FP_EXC_OVF)) { 1739 code = FPE_FLTOVF; 1740 } 1741 else if ((spefscr & SPEFSCR_FUNF) && (fpexc_mode & PR_FP_EXC_UND)) { 1742 code = FPE_FLTUND; 1743 } 1744 else if ((spefscr & SPEFSCR_FDBZ) && (fpexc_mode & PR_FP_EXC_DIV)) 1745 code = FPE_FLTDIV; 1746 else if ((spefscr & SPEFSCR_FINV) && (fpexc_mode & PR_FP_EXC_INV)) { 1747 code = FPE_FLTINV; 1748 } 1749 else if ((spefscr & (SPEFSCR_FG | SPEFSCR_FX)) && (fpexc_mode & PR_FP_EXC_RES)) 1750 code = FPE_FLTRES; 1751 1752 err = do_spe_mathemu(regs); 1753 if (err == 0) { 1754 regs->nip += 4; /* skip emulated instruction */ 1755 emulate_single_step(regs); 1756 return; 1757 } 1758 1759 if (err == -EFAULT) { 1760 /* got an error reading the instruction */ 1761 _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip); 1762 } else if (err == -EINVAL) { 1763 /* didn't recognize the instruction */ 1764 printk(KERN_ERR "unrecognized spe instruction " 1765 "in %s at %lx\n", current->comm, regs->nip); 1766 } else { 1767 _exception(SIGFPE, regs, code, regs->nip); 1768 } 1769 1770 return; 1771 } 1772 1773 void SPEFloatingPointRoundException(struct pt_regs *regs) 1774 { 1775 extern int speround_handler(struct pt_regs *regs); 1776 int err; 1777 1778 preempt_disable(); 1779 if (regs->msr & MSR_SPE) 1780 giveup_spe(current); 1781 preempt_enable(); 1782 1783 regs->nip -= 4; 1784 err = speround_handler(regs); 1785 if (err == 0) { 1786 regs->nip += 4; /* skip emulated instruction */ 1787 emulate_single_step(regs); 1788 return; 1789 } 1790 1791 if (err == -EFAULT) { 1792 /* got an error reading the instruction */ 1793 _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip); 1794 } else if (err == -EINVAL) { 1795 /* didn't recognize the instruction */ 1796 printk(KERN_ERR "unrecognized spe instruction " 1797 "in %s at %lx\n", current->comm, regs->nip); 1798 } else { 1799 _exception(SIGFPE, regs, 0, regs->nip); 1800 return; 1801 } 1802 } 1803 #endif 1804 1805 /* 1806 * We enter here if we get an unrecoverable exception, that is, one 1807 * that happened at a point where the RI (recoverable interrupt) bit 1808 * in the MSR is 0. This indicates that SRR0/1 are live, and that 1809 * we therefore lost state by taking this exception. 1810 */ 1811 void unrecoverable_exception(struct pt_regs *regs) 1812 { 1813 printk(KERN_EMERG "Unrecoverable exception %lx at %lx\n", 1814 regs->trap, regs->nip); 1815 die("Unrecoverable exception", regs, SIGABRT); 1816 } 1817 1818 #if defined(CONFIG_BOOKE_WDT) || defined(CONFIG_40x) 1819 /* 1820 * Default handler for a Watchdog exception, 1821 * spins until a reboot occurs 1822 */ 1823 void __attribute__ ((weak)) WatchdogHandler(struct pt_regs *regs) 1824 { 1825 /* Generic WatchdogHandler, implement your own */ 1826 mtspr(SPRN_TCR, mfspr(SPRN_TCR)&(~TCR_WIE)); 1827 return; 1828 } 1829 1830 void WatchdogException(struct pt_regs *regs) 1831 { 1832 printk (KERN_EMERG "PowerPC Book-E Watchdog Exception\n"); 1833 WatchdogHandler(regs); 1834 } 1835 #endif 1836 1837 /* 1838 * We enter here if we discover during exception entry that we are 1839 * running in supervisor mode with a userspace value in the stack pointer. 1840 */ 1841 void kernel_bad_stack(struct pt_regs *regs) 1842 { 1843 printk(KERN_EMERG "Bad kernel stack pointer %lx at %lx\n", 1844 regs->gpr[1], regs->nip); 1845 die("Bad kernel stack pointer", regs, SIGABRT); 1846 } 1847 1848 void __init trap_init(void) 1849 { 1850 } 1851 1852 1853 #ifdef CONFIG_PPC_EMULATED_STATS 1854 1855 #define WARN_EMULATED_SETUP(type) .type = { .name = #type } 1856 1857 struct ppc_emulated ppc_emulated = { 1858 #ifdef CONFIG_ALTIVEC 1859 WARN_EMULATED_SETUP(altivec), 1860 #endif 1861 WARN_EMULATED_SETUP(dcba), 1862 WARN_EMULATED_SETUP(dcbz), 1863 WARN_EMULATED_SETUP(fp_pair), 1864 WARN_EMULATED_SETUP(isel), 1865 WARN_EMULATED_SETUP(mcrxr), 1866 WARN_EMULATED_SETUP(mfpvr), 1867 WARN_EMULATED_SETUP(multiple), 1868 WARN_EMULATED_SETUP(popcntb), 1869 WARN_EMULATED_SETUP(spe), 1870 WARN_EMULATED_SETUP(string), 1871 WARN_EMULATED_SETUP(sync), 1872 WARN_EMULATED_SETUP(unaligned), 1873 #ifdef CONFIG_MATH_EMULATION 1874 WARN_EMULATED_SETUP(math), 1875 #endif 1876 #ifdef CONFIG_VSX 1877 WARN_EMULATED_SETUP(vsx), 1878 #endif 1879 #ifdef CONFIG_PPC64 1880 WARN_EMULATED_SETUP(mfdscr), 1881 WARN_EMULATED_SETUP(mtdscr), 1882 WARN_EMULATED_SETUP(lq_stq), 1883 #endif 1884 }; 1885 1886 u32 ppc_warn_emulated; 1887 1888 void ppc_warn_emulated_print(const char *type) 1889 { 1890 pr_warn_ratelimited("%s used emulated %s instruction\n", current->comm, 1891 type); 1892 } 1893 1894 static int __init ppc_warn_emulated_init(void) 1895 { 1896 struct dentry *dir, *d; 1897 unsigned int i; 1898 struct ppc_emulated_entry *entries = (void *)&ppc_emulated; 1899 1900 if (!powerpc_debugfs_root) 1901 return -ENODEV; 1902 1903 dir = debugfs_create_dir("emulated_instructions", 1904 powerpc_debugfs_root); 1905 if (!dir) 1906 return -ENOMEM; 1907 1908 d = debugfs_create_u32("do_warn", S_IRUGO | S_IWUSR, dir, 1909 &ppc_warn_emulated); 1910 if (!d) 1911 goto fail; 1912 1913 for (i = 0; i < sizeof(ppc_emulated)/sizeof(*entries); i++) { 1914 d = debugfs_create_u32(entries[i].name, S_IRUGO | S_IWUSR, dir, 1915 (u32 *)&entries[i].val.counter); 1916 if (!d) 1917 goto fail; 1918 } 1919 1920 return 0; 1921 1922 fail: 1923 debugfs_remove_recursive(dir); 1924 return -ENOMEM; 1925 } 1926 1927 device_initcall(ppc_warn_emulated_init); 1928 1929 #endif /* CONFIG_PPC_EMULATED_STATS */ 1930