1 /* 2 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 3 * Copyright 2007-2010 Freescale Semiconductor, Inc. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License 7 * as published by the Free Software Foundation; either version 8 * 2 of the License, or (at your option) any later version. 9 * 10 * Modified by Cort Dougan (cort@cs.nmt.edu) 11 * and Paul Mackerras (paulus@samba.org) 12 */ 13 14 /* 15 * This file handles the architecture-dependent parts of hardware exceptions 16 */ 17 18 #include <linux/errno.h> 19 #include <linux/sched.h> 20 #include <linux/kernel.h> 21 #include <linux/mm.h> 22 #include <linux/stddef.h> 23 #include <linux/unistd.h> 24 #include <linux/ptrace.h> 25 #include <linux/user.h> 26 #include <linux/interrupt.h> 27 #include <linux/init.h> 28 #include <linux/module.h> 29 #include <linux/prctl.h> 30 #include <linux/delay.h> 31 #include <linux/kprobes.h> 32 #include <linux/kexec.h> 33 #include <linux/backlight.h> 34 #include <linux/bug.h> 35 #include <linux/kdebug.h> 36 #include <linux/debugfs.h> 37 #include <linux/ratelimit.h> 38 #include <linux/context_tracking.h> 39 40 #include <asm/emulated_ops.h> 41 #include <asm/pgtable.h> 42 #include <asm/uaccess.h> 43 #include <asm/io.h> 44 #include <asm/machdep.h> 45 #include <asm/rtas.h> 46 #include <asm/pmc.h> 47 #ifdef CONFIG_PPC32 48 #include <asm/reg.h> 49 #endif 50 #ifdef CONFIG_PMAC_BACKLIGHT 51 #include <asm/backlight.h> 52 #endif 53 #ifdef CONFIG_PPC64 54 #include <asm/firmware.h> 55 #include <asm/processor.h> 56 #include <asm/tm.h> 57 #endif 58 #include <asm/kexec.h> 59 #include <asm/ppc-opcode.h> 60 #include <asm/rio.h> 61 #include <asm/fadump.h> 62 #include <asm/switch_to.h> 63 #include <asm/tm.h> 64 #include <asm/debug.h> 65 #include <sysdev/fsl_pci.h> 66 67 #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC) 68 int (*__debugger)(struct pt_regs *regs) __read_mostly; 69 int (*__debugger_ipi)(struct pt_regs *regs) __read_mostly; 70 int (*__debugger_bpt)(struct pt_regs *regs) __read_mostly; 71 int (*__debugger_sstep)(struct pt_regs *regs) __read_mostly; 72 int (*__debugger_iabr_match)(struct pt_regs *regs) __read_mostly; 73 int (*__debugger_break_match)(struct pt_regs *regs) __read_mostly; 74 int (*__debugger_fault_handler)(struct pt_regs *regs) __read_mostly; 75 76 EXPORT_SYMBOL(__debugger); 77 EXPORT_SYMBOL(__debugger_ipi); 78 EXPORT_SYMBOL(__debugger_bpt); 79 EXPORT_SYMBOL(__debugger_sstep); 80 EXPORT_SYMBOL(__debugger_iabr_match); 81 EXPORT_SYMBOL(__debugger_break_match); 82 EXPORT_SYMBOL(__debugger_fault_handler); 83 #endif 84 85 /* Transactional Memory trap debug */ 86 #ifdef TM_DEBUG_SW 87 #define TM_DEBUG(x...) printk(KERN_INFO x) 88 #else 89 #define TM_DEBUG(x...) do { } while(0) 90 #endif 91 92 /* 93 * Trap & Exception support 94 */ 95 96 #ifdef CONFIG_PMAC_BACKLIGHT 97 static void pmac_backlight_unblank(void) 98 { 99 mutex_lock(&pmac_backlight_mutex); 100 if (pmac_backlight) { 101 struct backlight_properties *props; 102 103 props = &pmac_backlight->props; 104 props->brightness = props->max_brightness; 105 props->power = FB_BLANK_UNBLANK; 106 backlight_update_status(pmac_backlight); 107 } 108 mutex_unlock(&pmac_backlight_mutex); 109 } 110 #else 111 static inline void pmac_backlight_unblank(void) { } 112 #endif 113 114 static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED; 115 static int die_owner = -1; 116 static unsigned int die_nest_count; 117 static int die_counter; 118 119 static unsigned __kprobes long oops_begin(struct pt_regs *regs) 120 { 121 int cpu; 122 unsigned long flags; 123 124 if (debugger(regs)) 125 return 1; 126 127 oops_enter(); 128 129 /* racy, but better than risking deadlock. */ 130 raw_local_irq_save(flags); 131 cpu = smp_processor_id(); 132 if (!arch_spin_trylock(&die_lock)) { 133 if (cpu == die_owner) 134 /* nested oops. should stop eventually */; 135 else 136 arch_spin_lock(&die_lock); 137 } 138 die_nest_count++; 139 die_owner = cpu; 140 console_verbose(); 141 bust_spinlocks(1); 142 if (machine_is(powermac)) 143 pmac_backlight_unblank(); 144 return flags; 145 } 146 147 static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, 148 int signr) 149 { 150 bust_spinlocks(0); 151 die_owner = -1; 152 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); 153 die_nest_count--; 154 oops_exit(); 155 printk("\n"); 156 if (!die_nest_count) 157 /* Nest count reaches zero, release the lock. */ 158 arch_spin_unlock(&die_lock); 159 raw_local_irq_restore(flags); 160 161 crash_fadump(regs, "die oops"); 162 163 /* 164 * A system reset (0x100) is a request to dump, so we always send 165 * it through the crashdump code. 166 */ 167 if (kexec_should_crash(current) || (TRAP(regs) == 0x100)) { 168 crash_kexec(regs); 169 170 /* 171 * We aren't the primary crash CPU. We need to send it 172 * to a holding pattern to avoid it ending up in the panic 173 * code. 174 */ 175 crash_kexec_secondary(regs); 176 } 177 178 if (!signr) 179 return; 180 181 /* 182 * While our oops output is serialised by a spinlock, output 183 * from panic() called below can race and corrupt it. If we 184 * know we are going to panic, delay for 1 second so we have a 185 * chance to get clean backtraces from all CPUs that are oopsing. 186 */ 187 if (in_interrupt() || panic_on_oops || !current->pid || 188 is_global_init(current)) { 189 mdelay(MSEC_PER_SEC); 190 } 191 192 if (in_interrupt()) 193 panic("Fatal exception in interrupt"); 194 if (panic_on_oops) 195 panic("Fatal exception"); 196 do_exit(signr); 197 } 198 199 static int __kprobes __die(const char *str, struct pt_regs *regs, long err) 200 { 201 printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter); 202 #ifdef CONFIG_PREEMPT 203 printk("PREEMPT "); 204 #endif 205 #ifdef CONFIG_SMP 206 printk("SMP NR_CPUS=%d ", NR_CPUS); 207 #endif 208 #ifdef CONFIG_DEBUG_PAGEALLOC 209 printk("DEBUG_PAGEALLOC "); 210 #endif 211 #ifdef CONFIG_NUMA 212 printk("NUMA "); 213 #endif 214 printk("%s\n", ppc_md.name ? ppc_md.name : ""); 215 216 if (notify_die(DIE_OOPS, str, regs, err, 255, SIGSEGV) == NOTIFY_STOP) 217 return 1; 218 219 print_modules(); 220 show_regs(regs); 221 222 return 0; 223 } 224 225 void die(const char *str, struct pt_regs *regs, long err) 226 { 227 unsigned long flags = oops_begin(regs); 228 229 if (__die(str, regs, err)) 230 err = 0; 231 oops_end(flags, regs, err); 232 } 233 234 void user_single_step_siginfo(struct task_struct *tsk, 235 struct pt_regs *regs, siginfo_t *info) 236 { 237 memset(info, 0, sizeof(*info)); 238 info->si_signo = SIGTRAP; 239 info->si_code = TRAP_TRACE; 240 info->si_addr = (void __user *)regs->nip; 241 } 242 243 void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr) 244 { 245 siginfo_t info; 246 const char fmt32[] = KERN_INFO "%s[%d]: unhandled signal %d " \ 247 "at %08lx nip %08lx lr %08lx code %x\n"; 248 const char fmt64[] = KERN_INFO "%s[%d]: unhandled signal %d " \ 249 "at %016lx nip %016lx lr %016lx code %x\n"; 250 251 if (!user_mode(regs)) { 252 die("Exception in kernel mode", regs, signr); 253 return; 254 } 255 256 if (show_unhandled_signals && unhandled_signal(current, signr)) { 257 printk_ratelimited(regs->msr & MSR_64BIT ? fmt64 : fmt32, 258 current->comm, current->pid, signr, 259 addr, regs->nip, regs->link, code); 260 } 261 262 if (arch_irqs_disabled() && !arch_irq_disabled_regs(regs)) 263 local_irq_enable(); 264 265 current->thread.trap_nr = code; 266 memset(&info, 0, sizeof(info)); 267 info.si_signo = signr; 268 info.si_code = code; 269 info.si_addr = (void __user *) addr; 270 force_sig_info(signr, &info, current); 271 } 272 273 #ifdef CONFIG_PPC64 274 void system_reset_exception(struct pt_regs *regs) 275 { 276 /* See if any machine dependent calls */ 277 if (ppc_md.system_reset_exception) { 278 if (ppc_md.system_reset_exception(regs)) 279 return; 280 } 281 282 die("System Reset", regs, SIGABRT); 283 284 /* Must die if the interrupt is not recoverable */ 285 if (!(regs->msr & MSR_RI)) 286 panic("Unrecoverable System Reset"); 287 288 /* What should we do here? We could issue a shutdown or hard reset. */ 289 } 290 #endif 291 292 /* 293 * I/O accesses can cause machine checks on powermacs. 294 * Check if the NIP corresponds to the address of a sync 295 * instruction for which there is an entry in the exception 296 * table. 297 * Note that the 601 only takes a machine check on TEA 298 * (transfer error ack) signal assertion, and does not 299 * set any of the top 16 bits of SRR1. 300 * -- paulus. 301 */ 302 static inline int check_io_access(struct pt_regs *regs) 303 { 304 #ifdef CONFIG_PPC32 305 unsigned long msr = regs->msr; 306 const struct exception_table_entry *entry; 307 unsigned int *nip = (unsigned int *)regs->nip; 308 309 if (((msr & 0xffff0000) == 0 || (msr & (0x80000 | 0x40000))) 310 && (entry = search_exception_tables(regs->nip)) != NULL) { 311 /* 312 * Check that it's a sync instruction, or somewhere 313 * in the twi; isync; nop sequence that inb/inw/inl uses. 314 * As the address is in the exception table 315 * we should be able to read the instr there. 316 * For the debug message, we look at the preceding 317 * load or store. 318 */ 319 if (*nip == 0x60000000) /* nop */ 320 nip -= 2; 321 else if (*nip == 0x4c00012c) /* isync */ 322 --nip; 323 if (*nip == 0x7c0004ac || (*nip >> 26) == 3) { 324 /* sync or twi */ 325 unsigned int rb; 326 327 --nip; 328 rb = (*nip >> 11) & 0x1f; 329 printk(KERN_DEBUG "%s bad port %lx at %p\n", 330 (*nip & 0x100)? "OUT to": "IN from", 331 regs->gpr[rb] - _IO_BASE, nip); 332 regs->msr |= MSR_RI; 333 regs->nip = entry->fixup; 334 return 1; 335 } 336 } 337 #endif /* CONFIG_PPC32 */ 338 return 0; 339 } 340 341 #ifdef CONFIG_PPC_ADV_DEBUG_REGS 342 /* On 4xx, the reason for the machine check or program exception 343 is in the ESR. */ 344 #define get_reason(regs) ((regs)->dsisr) 345 #ifndef CONFIG_FSL_BOOKE 346 #define get_mc_reason(regs) ((regs)->dsisr) 347 #else 348 #define get_mc_reason(regs) (mfspr(SPRN_MCSR)) 349 #endif 350 #define REASON_FP ESR_FP 351 #define REASON_ILLEGAL (ESR_PIL | ESR_PUO) 352 #define REASON_PRIVILEGED ESR_PPR 353 #define REASON_TRAP ESR_PTR 354 355 /* single-step stuff */ 356 #define single_stepping(regs) (current->thread.dbcr0 & DBCR0_IC) 357 #define clear_single_step(regs) (current->thread.dbcr0 &= ~DBCR0_IC) 358 359 #else 360 /* On non-4xx, the reason for the machine check or program 361 exception is in the MSR. */ 362 #define get_reason(regs) ((regs)->msr) 363 #define get_mc_reason(regs) ((regs)->msr) 364 #define REASON_TM 0x200000 365 #define REASON_FP 0x100000 366 #define REASON_ILLEGAL 0x80000 367 #define REASON_PRIVILEGED 0x40000 368 #define REASON_TRAP 0x20000 369 370 #define single_stepping(regs) ((regs)->msr & MSR_SE) 371 #define clear_single_step(regs) ((regs)->msr &= ~MSR_SE) 372 #endif 373 374 #if defined(CONFIG_4xx) 375 int machine_check_4xx(struct pt_regs *regs) 376 { 377 unsigned long reason = get_mc_reason(regs); 378 379 if (reason & ESR_IMCP) { 380 printk("Instruction"); 381 mtspr(SPRN_ESR, reason & ~ESR_IMCP); 382 } else 383 printk("Data"); 384 printk(" machine check in kernel mode.\n"); 385 386 return 0; 387 } 388 389 int machine_check_440A(struct pt_regs *regs) 390 { 391 unsigned long reason = get_mc_reason(regs); 392 393 printk("Machine check in kernel mode.\n"); 394 if (reason & ESR_IMCP){ 395 printk("Instruction Synchronous Machine Check exception\n"); 396 mtspr(SPRN_ESR, reason & ~ESR_IMCP); 397 } 398 else { 399 u32 mcsr = mfspr(SPRN_MCSR); 400 if (mcsr & MCSR_IB) 401 printk("Instruction Read PLB Error\n"); 402 if (mcsr & MCSR_DRB) 403 printk("Data Read PLB Error\n"); 404 if (mcsr & MCSR_DWB) 405 printk("Data Write PLB Error\n"); 406 if (mcsr & MCSR_TLBP) 407 printk("TLB Parity Error\n"); 408 if (mcsr & MCSR_ICP){ 409 flush_instruction_cache(); 410 printk("I-Cache Parity Error\n"); 411 } 412 if (mcsr & MCSR_DCSP) 413 printk("D-Cache Search Parity Error\n"); 414 if (mcsr & MCSR_DCFP) 415 printk("D-Cache Flush Parity Error\n"); 416 if (mcsr & MCSR_IMPE) 417 printk("Machine Check exception is imprecise\n"); 418 419 /* Clear MCSR */ 420 mtspr(SPRN_MCSR, mcsr); 421 } 422 return 0; 423 } 424 425 int machine_check_47x(struct pt_regs *regs) 426 { 427 unsigned long reason = get_mc_reason(regs); 428 u32 mcsr; 429 430 printk(KERN_ERR "Machine check in kernel mode.\n"); 431 if (reason & ESR_IMCP) { 432 printk(KERN_ERR 433 "Instruction Synchronous Machine Check exception\n"); 434 mtspr(SPRN_ESR, reason & ~ESR_IMCP); 435 return 0; 436 } 437 mcsr = mfspr(SPRN_MCSR); 438 if (mcsr & MCSR_IB) 439 printk(KERN_ERR "Instruction Read PLB Error\n"); 440 if (mcsr & MCSR_DRB) 441 printk(KERN_ERR "Data Read PLB Error\n"); 442 if (mcsr & MCSR_DWB) 443 printk(KERN_ERR "Data Write PLB Error\n"); 444 if (mcsr & MCSR_TLBP) 445 printk(KERN_ERR "TLB Parity Error\n"); 446 if (mcsr & MCSR_ICP) { 447 flush_instruction_cache(); 448 printk(KERN_ERR "I-Cache Parity Error\n"); 449 } 450 if (mcsr & MCSR_DCSP) 451 printk(KERN_ERR "D-Cache Search Parity Error\n"); 452 if (mcsr & PPC47x_MCSR_GPR) 453 printk(KERN_ERR "GPR Parity Error\n"); 454 if (mcsr & PPC47x_MCSR_FPR) 455 printk(KERN_ERR "FPR Parity Error\n"); 456 if (mcsr & PPC47x_MCSR_IPR) 457 printk(KERN_ERR "Machine Check exception is imprecise\n"); 458 459 /* Clear MCSR */ 460 mtspr(SPRN_MCSR, mcsr); 461 462 return 0; 463 } 464 #elif defined(CONFIG_E500) 465 int machine_check_e500mc(struct pt_regs *regs) 466 { 467 unsigned long mcsr = mfspr(SPRN_MCSR); 468 unsigned long reason = mcsr; 469 int recoverable = 1; 470 471 if (reason & MCSR_LD) { 472 recoverable = fsl_rio_mcheck_exception(regs); 473 if (recoverable == 1) 474 goto silent_out; 475 } 476 477 printk("Machine check in kernel mode.\n"); 478 printk("Caused by (from MCSR=%lx): ", reason); 479 480 if (reason & MCSR_MCP) 481 printk("Machine Check Signal\n"); 482 483 if (reason & MCSR_ICPERR) { 484 printk("Instruction Cache Parity Error\n"); 485 486 /* 487 * This is recoverable by invalidating the i-cache. 488 */ 489 mtspr(SPRN_L1CSR1, mfspr(SPRN_L1CSR1) | L1CSR1_ICFI); 490 while (mfspr(SPRN_L1CSR1) & L1CSR1_ICFI) 491 ; 492 493 /* 494 * This will generally be accompanied by an instruction 495 * fetch error report -- only treat MCSR_IF as fatal 496 * if it wasn't due to an L1 parity error. 497 */ 498 reason &= ~MCSR_IF; 499 } 500 501 if (reason & MCSR_DCPERR_MC) { 502 printk("Data Cache Parity Error\n"); 503 504 /* 505 * In write shadow mode we auto-recover from the error, but it 506 * may still get logged and cause a machine check. We should 507 * only treat the non-write shadow case as non-recoverable. 508 */ 509 if (!(mfspr(SPRN_L1CSR2) & L1CSR2_DCWS)) 510 recoverable = 0; 511 } 512 513 if (reason & MCSR_L2MMU_MHIT) { 514 printk("Hit on multiple TLB entries\n"); 515 recoverable = 0; 516 } 517 518 if (reason & MCSR_NMI) 519 printk("Non-maskable interrupt\n"); 520 521 if (reason & MCSR_IF) { 522 printk("Instruction Fetch Error Report\n"); 523 recoverable = 0; 524 } 525 526 if (reason & MCSR_LD) { 527 printk("Load Error Report\n"); 528 recoverable = 0; 529 } 530 531 if (reason & MCSR_ST) { 532 printk("Store Error Report\n"); 533 recoverable = 0; 534 } 535 536 if (reason & MCSR_LDG) { 537 printk("Guarded Load Error Report\n"); 538 recoverable = 0; 539 } 540 541 if (reason & MCSR_TLBSYNC) 542 printk("Simultaneous tlbsync operations\n"); 543 544 if (reason & MCSR_BSL2_ERR) { 545 printk("Level 2 Cache Error\n"); 546 recoverable = 0; 547 } 548 549 if (reason & MCSR_MAV) { 550 u64 addr; 551 552 addr = mfspr(SPRN_MCAR); 553 addr |= (u64)mfspr(SPRN_MCARU) << 32; 554 555 printk("Machine Check %s Address: %#llx\n", 556 reason & MCSR_MEA ? "Effective" : "Physical", addr); 557 } 558 559 silent_out: 560 mtspr(SPRN_MCSR, mcsr); 561 return mfspr(SPRN_MCSR) == 0 && recoverable; 562 } 563 564 int machine_check_e500(struct pt_regs *regs) 565 { 566 unsigned long reason = get_mc_reason(regs); 567 568 if (reason & MCSR_BUS_RBERR) { 569 if (fsl_rio_mcheck_exception(regs)) 570 return 1; 571 if (fsl_pci_mcheck_exception(regs)) 572 return 1; 573 } 574 575 printk("Machine check in kernel mode.\n"); 576 printk("Caused by (from MCSR=%lx): ", reason); 577 578 if (reason & MCSR_MCP) 579 printk("Machine Check Signal\n"); 580 if (reason & MCSR_ICPERR) 581 printk("Instruction Cache Parity Error\n"); 582 if (reason & MCSR_DCP_PERR) 583 printk("Data Cache Push Parity Error\n"); 584 if (reason & MCSR_DCPERR) 585 printk("Data Cache Parity Error\n"); 586 if (reason & MCSR_BUS_IAERR) 587 printk("Bus - Instruction Address Error\n"); 588 if (reason & MCSR_BUS_RAERR) 589 printk("Bus - Read Address Error\n"); 590 if (reason & MCSR_BUS_WAERR) 591 printk("Bus - Write Address Error\n"); 592 if (reason & MCSR_BUS_IBERR) 593 printk("Bus - Instruction Data Error\n"); 594 if (reason & MCSR_BUS_RBERR) 595 printk("Bus - Read Data Bus Error\n"); 596 if (reason & MCSR_BUS_WBERR) 597 printk("Bus - Read Data Bus Error\n"); 598 if (reason & MCSR_BUS_IPERR) 599 printk("Bus - Instruction Parity Error\n"); 600 if (reason & MCSR_BUS_RPERR) 601 printk("Bus - Read Parity Error\n"); 602 603 return 0; 604 } 605 606 int machine_check_generic(struct pt_regs *regs) 607 { 608 return 0; 609 } 610 #elif defined(CONFIG_E200) 611 int machine_check_e200(struct pt_regs *regs) 612 { 613 unsigned long reason = get_mc_reason(regs); 614 615 printk("Machine check in kernel mode.\n"); 616 printk("Caused by (from MCSR=%lx): ", reason); 617 618 if (reason & MCSR_MCP) 619 printk("Machine Check Signal\n"); 620 if (reason & MCSR_CP_PERR) 621 printk("Cache Push Parity Error\n"); 622 if (reason & MCSR_CPERR) 623 printk("Cache Parity Error\n"); 624 if (reason & MCSR_EXCP_ERR) 625 printk("ISI, ITLB, or Bus Error on first instruction fetch for an exception handler\n"); 626 if (reason & MCSR_BUS_IRERR) 627 printk("Bus - Read Bus Error on instruction fetch\n"); 628 if (reason & MCSR_BUS_DRERR) 629 printk("Bus - Read Bus Error on data load\n"); 630 if (reason & MCSR_BUS_WRERR) 631 printk("Bus - Write Bus Error on buffered store or cache line push\n"); 632 633 return 0; 634 } 635 #else 636 int machine_check_generic(struct pt_regs *regs) 637 { 638 unsigned long reason = get_mc_reason(regs); 639 640 printk("Machine check in kernel mode.\n"); 641 printk("Caused by (from SRR1=%lx): ", reason); 642 switch (reason & 0x601F0000) { 643 case 0x80000: 644 printk("Machine check signal\n"); 645 break; 646 case 0: /* for 601 */ 647 case 0x40000: 648 case 0x140000: /* 7450 MSS error and TEA */ 649 printk("Transfer error ack signal\n"); 650 break; 651 case 0x20000: 652 printk("Data parity error signal\n"); 653 break; 654 case 0x10000: 655 printk("Address parity error signal\n"); 656 break; 657 case 0x20000000: 658 printk("L1 Data Cache error\n"); 659 break; 660 case 0x40000000: 661 printk("L1 Instruction Cache error\n"); 662 break; 663 case 0x00100000: 664 printk("L2 data cache parity error\n"); 665 break; 666 default: 667 printk("Unknown values in msr\n"); 668 } 669 return 0; 670 } 671 #endif /* everything else */ 672 673 void machine_check_exception(struct pt_regs *regs) 674 { 675 enum ctx_state prev_state = exception_enter(); 676 int recover = 0; 677 678 __get_cpu_var(irq_stat).mce_exceptions++; 679 680 /* See if any machine dependent calls. In theory, we would want 681 * to call the CPU first, and call the ppc_md. one if the CPU 682 * one returns a positive number. However there is existing code 683 * that assumes the board gets a first chance, so let's keep it 684 * that way for now and fix things later. --BenH. 685 */ 686 if (ppc_md.machine_check_exception) 687 recover = ppc_md.machine_check_exception(regs); 688 else if (cur_cpu_spec->machine_check) 689 recover = cur_cpu_spec->machine_check(regs); 690 691 if (recover > 0) 692 goto bail; 693 694 #if defined(CONFIG_8xx) && defined(CONFIG_PCI) 695 /* the qspan pci read routines can cause machine checks -- Cort 696 * 697 * yuck !!! that totally needs to go away ! There are better ways 698 * to deal with that than having a wart in the mcheck handler. 699 * -- BenH 700 */ 701 bad_page_fault(regs, regs->dar, SIGBUS); 702 goto bail; 703 #endif 704 705 if (debugger_fault_handler(regs)) 706 goto bail; 707 708 if (check_io_access(regs)) 709 goto bail; 710 711 die("Machine check", regs, SIGBUS); 712 713 /* Must die if the interrupt is not recoverable */ 714 if (!(regs->msr & MSR_RI)) 715 panic("Unrecoverable Machine check"); 716 717 bail: 718 exception_exit(prev_state); 719 } 720 721 void SMIException(struct pt_regs *regs) 722 { 723 die("System Management Interrupt", regs, SIGABRT); 724 } 725 726 void unknown_exception(struct pt_regs *regs) 727 { 728 enum ctx_state prev_state = exception_enter(); 729 730 printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n", 731 regs->nip, regs->msr, regs->trap); 732 733 _exception(SIGTRAP, regs, 0, 0); 734 735 exception_exit(prev_state); 736 } 737 738 void instruction_breakpoint_exception(struct pt_regs *regs) 739 { 740 enum ctx_state prev_state = exception_enter(); 741 742 if (notify_die(DIE_IABR_MATCH, "iabr_match", regs, 5, 743 5, SIGTRAP) == NOTIFY_STOP) 744 goto bail; 745 if (debugger_iabr_match(regs)) 746 goto bail; 747 _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip); 748 749 bail: 750 exception_exit(prev_state); 751 } 752 753 void RunModeException(struct pt_regs *regs) 754 { 755 _exception(SIGTRAP, regs, 0, 0); 756 } 757 758 void __kprobes single_step_exception(struct pt_regs *regs) 759 { 760 enum ctx_state prev_state = exception_enter(); 761 762 clear_single_step(regs); 763 764 if (notify_die(DIE_SSTEP, "single_step", regs, 5, 765 5, SIGTRAP) == NOTIFY_STOP) 766 goto bail; 767 if (debugger_sstep(regs)) 768 goto bail; 769 770 _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip); 771 772 bail: 773 exception_exit(prev_state); 774 } 775 776 /* 777 * After we have successfully emulated an instruction, we have to 778 * check if the instruction was being single-stepped, and if so, 779 * pretend we got a single-step exception. This was pointed out 780 * by Kumar Gala. -- paulus 781 */ 782 static void emulate_single_step(struct pt_regs *regs) 783 { 784 if (single_stepping(regs)) 785 single_step_exception(regs); 786 } 787 788 static inline int __parse_fpscr(unsigned long fpscr) 789 { 790 int ret = 0; 791 792 /* Invalid operation */ 793 if ((fpscr & FPSCR_VE) && (fpscr & FPSCR_VX)) 794 ret = FPE_FLTINV; 795 796 /* Overflow */ 797 else if ((fpscr & FPSCR_OE) && (fpscr & FPSCR_OX)) 798 ret = FPE_FLTOVF; 799 800 /* Underflow */ 801 else if ((fpscr & FPSCR_UE) && (fpscr & FPSCR_UX)) 802 ret = FPE_FLTUND; 803 804 /* Divide by zero */ 805 else if ((fpscr & FPSCR_ZE) && (fpscr & FPSCR_ZX)) 806 ret = FPE_FLTDIV; 807 808 /* Inexact result */ 809 else if ((fpscr & FPSCR_XE) && (fpscr & FPSCR_XX)) 810 ret = FPE_FLTRES; 811 812 return ret; 813 } 814 815 static void parse_fpe(struct pt_regs *regs) 816 { 817 int code = 0; 818 819 flush_fp_to_thread(current); 820 821 code = __parse_fpscr(current->thread.fpscr.val); 822 823 _exception(SIGFPE, regs, code, regs->nip); 824 } 825 826 /* 827 * Illegal instruction emulation support. Originally written to 828 * provide the PVR to user applications using the mfspr rd, PVR. 829 * Return non-zero if we can't emulate, or -EFAULT if the associated 830 * memory access caused an access fault. Return zero on success. 831 * 832 * There are a couple of ways to do this, either "decode" the instruction 833 * or directly match lots of bits. In this case, matching lots of 834 * bits is faster and easier. 835 * 836 */ 837 static int emulate_string_inst(struct pt_regs *regs, u32 instword) 838 { 839 u8 rT = (instword >> 21) & 0x1f; 840 u8 rA = (instword >> 16) & 0x1f; 841 u8 NB_RB = (instword >> 11) & 0x1f; 842 u32 num_bytes; 843 unsigned long EA; 844 int pos = 0; 845 846 /* Early out if we are an invalid form of lswx */ 847 if ((instword & PPC_INST_STRING_MASK) == PPC_INST_LSWX) 848 if ((rT == rA) || (rT == NB_RB)) 849 return -EINVAL; 850 851 EA = (rA == 0) ? 0 : regs->gpr[rA]; 852 853 switch (instword & PPC_INST_STRING_MASK) { 854 case PPC_INST_LSWX: 855 case PPC_INST_STSWX: 856 EA += NB_RB; 857 num_bytes = regs->xer & 0x7f; 858 break; 859 case PPC_INST_LSWI: 860 case PPC_INST_STSWI: 861 num_bytes = (NB_RB == 0) ? 32 : NB_RB; 862 break; 863 default: 864 return -EINVAL; 865 } 866 867 while (num_bytes != 0) 868 { 869 u8 val; 870 u32 shift = 8 * (3 - (pos & 0x3)); 871 872 /* if process is 32-bit, clear upper 32 bits of EA */ 873 if ((regs->msr & MSR_64BIT) == 0) 874 EA &= 0xFFFFFFFF; 875 876 switch ((instword & PPC_INST_STRING_MASK)) { 877 case PPC_INST_LSWX: 878 case PPC_INST_LSWI: 879 if (get_user(val, (u8 __user *)EA)) 880 return -EFAULT; 881 /* first time updating this reg, 882 * zero it out */ 883 if (pos == 0) 884 regs->gpr[rT] = 0; 885 regs->gpr[rT] |= val << shift; 886 break; 887 case PPC_INST_STSWI: 888 case PPC_INST_STSWX: 889 val = regs->gpr[rT] >> shift; 890 if (put_user(val, (u8 __user *)EA)) 891 return -EFAULT; 892 break; 893 } 894 /* move EA to next address */ 895 EA += 1; 896 num_bytes--; 897 898 /* manage our position within the register */ 899 if (++pos == 4) { 900 pos = 0; 901 if (++rT == 32) 902 rT = 0; 903 } 904 } 905 906 return 0; 907 } 908 909 static int emulate_popcntb_inst(struct pt_regs *regs, u32 instword) 910 { 911 u32 ra,rs; 912 unsigned long tmp; 913 914 ra = (instword >> 16) & 0x1f; 915 rs = (instword >> 21) & 0x1f; 916 917 tmp = regs->gpr[rs]; 918 tmp = tmp - ((tmp >> 1) & 0x5555555555555555ULL); 919 tmp = (tmp & 0x3333333333333333ULL) + ((tmp >> 2) & 0x3333333333333333ULL); 920 tmp = (tmp + (tmp >> 4)) & 0x0f0f0f0f0f0f0f0fULL; 921 regs->gpr[ra] = tmp; 922 923 return 0; 924 } 925 926 static int emulate_isel(struct pt_regs *regs, u32 instword) 927 { 928 u8 rT = (instword >> 21) & 0x1f; 929 u8 rA = (instword >> 16) & 0x1f; 930 u8 rB = (instword >> 11) & 0x1f; 931 u8 BC = (instword >> 6) & 0x1f; 932 u8 bit; 933 unsigned long tmp; 934 935 tmp = (rA == 0) ? 0 : regs->gpr[rA]; 936 bit = (regs->ccr >> (31 - BC)) & 0x1; 937 938 regs->gpr[rT] = bit ? tmp : regs->gpr[rB]; 939 940 return 0; 941 } 942 943 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 944 static inline bool tm_abort_check(struct pt_regs *regs, int cause) 945 { 946 /* If we're emulating a load/store in an active transaction, we cannot 947 * emulate it as the kernel operates in transaction suspended context. 948 * We need to abort the transaction. This creates a persistent TM 949 * abort so tell the user what caused it with a new code. 950 */ 951 if (MSR_TM_TRANSACTIONAL(regs->msr)) { 952 tm_enable(); 953 tm_abort(cause); 954 return true; 955 } 956 return false; 957 } 958 #else 959 static inline bool tm_abort_check(struct pt_regs *regs, int reason) 960 { 961 return false; 962 } 963 #endif 964 965 static int emulate_instruction(struct pt_regs *regs) 966 { 967 u32 instword; 968 u32 rd; 969 970 if (!user_mode(regs) || (regs->msr & MSR_LE)) 971 return -EINVAL; 972 CHECK_FULL_REGS(regs); 973 974 if (get_user(instword, (u32 __user *)(regs->nip))) 975 return -EFAULT; 976 977 /* Emulate the mfspr rD, PVR. */ 978 if ((instword & PPC_INST_MFSPR_PVR_MASK) == PPC_INST_MFSPR_PVR) { 979 PPC_WARN_EMULATED(mfpvr, regs); 980 rd = (instword >> 21) & 0x1f; 981 regs->gpr[rd] = mfspr(SPRN_PVR); 982 return 0; 983 } 984 985 /* Emulating the dcba insn is just a no-op. */ 986 if ((instword & PPC_INST_DCBA_MASK) == PPC_INST_DCBA) { 987 PPC_WARN_EMULATED(dcba, regs); 988 return 0; 989 } 990 991 /* Emulate the mcrxr insn. */ 992 if ((instword & PPC_INST_MCRXR_MASK) == PPC_INST_MCRXR) { 993 int shift = (instword >> 21) & 0x1c; 994 unsigned long msk = 0xf0000000UL >> shift; 995 996 PPC_WARN_EMULATED(mcrxr, regs); 997 regs->ccr = (regs->ccr & ~msk) | ((regs->xer >> shift) & msk); 998 regs->xer &= ~0xf0000000UL; 999 return 0; 1000 } 1001 1002 /* Emulate load/store string insn. */ 1003 if ((instword & PPC_INST_STRING_GEN_MASK) == PPC_INST_STRING) { 1004 if (tm_abort_check(regs, 1005 TM_CAUSE_EMULATE | TM_CAUSE_PERSISTENT)) 1006 return -EINVAL; 1007 PPC_WARN_EMULATED(string, regs); 1008 return emulate_string_inst(regs, instword); 1009 } 1010 1011 /* Emulate the popcntb (Population Count Bytes) instruction. */ 1012 if ((instword & PPC_INST_POPCNTB_MASK) == PPC_INST_POPCNTB) { 1013 PPC_WARN_EMULATED(popcntb, regs); 1014 return emulate_popcntb_inst(regs, instword); 1015 } 1016 1017 /* Emulate isel (Integer Select) instruction */ 1018 if ((instword & PPC_INST_ISEL_MASK) == PPC_INST_ISEL) { 1019 PPC_WARN_EMULATED(isel, regs); 1020 return emulate_isel(regs, instword); 1021 } 1022 1023 #ifdef CONFIG_PPC64 1024 /* Emulate the mfspr rD, DSCR. */ 1025 if ((((instword & PPC_INST_MFSPR_DSCR_USER_MASK) == 1026 PPC_INST_MFSPR_DSCR_USER) || 1027 ((instword & PPC_INST_MFSPR_DSCR_MASK) == 1028 PPC_INST_MFSPR_DSCR)) && 1029 cpu_has_feature(CPU_FTR_DSCR)) { 1030 PPC_WARN_EMULATED(mfdscr, regs); 1031 rd = (instword >> 21) & 0x1f; 1032 regs->gpr[rd] = mfspr(SPRN_DSCR); 1033 return 0; 1034 } 1035 /* Emulate the mtspr DSCR, rD. */ 1036 if ((((instword & PPC_INST_MTSPR_DSCR_USER_MASK) == 1037 PPC_INST_MTSPR_DSCR_USER) || 1038 ((instword & PPC_INST_MTSPR_DSCR_MASK) == 1039 PPC_INST_MTSPR_DSCR)) && 1040 cpu_has_feature(CPU_FTR_DSCR)) { 1041 PPC_WARN_EMULATED(mtdscr, regs); 1042 rd = (instword >> 21) & 0x1f; 1043 current->thread.dscr = regs->gpr[rd]; 1044 current->thread.dscr_inherit = 1; 1045 mtspr(SPRN_DSCR, current->thread.dscr); 1046 return 0; 1047 } 1048 #endif 1049 1050 return -EINVAL; 1051 } 1052 1053 int is_valid_bugaddr(unsigned long addr) 1054 { 1055 return is_kernel_addr(addr); 1056 } 1057 1058 #ifdef CONFIG_MATH_EMULATION 1059 static int emulate_math(struct pt_regs *regs) 1060 { 1061 int ret; 1062 extern int do_mathemu(struct pt_regs *regs); 1063 1064 ret = do_mathemu(regs); 1065 if (ret >= 0) 1066 PPC_WARN_EMULATED(math, regs); 1067 1068 switch (ret) { 1069 case 0: 1070 emulate_single_step(regs); 1071 return 0; 1072 case 1: { 1073 int code = 0; 1074 code = __parse_fpscr(current->thread.fpscr.val); 1075 _exception(SIGFPE, regs, code, regs->nip); 1076 return 0; 1077 } 1078 case -EFAULT: 1079 _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip); 1080 return 0; 1081 } 1082 1083 return -1; 1084 } 1085 #else 1086 static inline int emulate_math(struct pt_regs *regs) { return -1; } 1087 #endif 1088 1089 void __kprobes program_check_exception(struct pt_regs *regs) 1090 { 1091 enum ctx_state prev_state = exception_enter(); 1092 unsigned int reason = get_reason(regs); 1093 1094 /* We can now get here via a FP Unavailable exception if the core 1095 * has no FPU, in that case the reason flags will be 0 */ 1096 1097 if (reason & REASON_FP) { 1098 /* IEEE FP exception */ 1099 parse_fpe(regs); 1100 goto bail; 1101 } 1102 if (reason & REASON_TRAP) { 1103 /* Debugger is first in line to stop recursive faults in 1104 * rcu_lock, notify_die, or atomic_notifier_call_chain */ 1105 if (debugger_bpt(regs)) 1106 goto bail; 1107 1108 /* trap exception */ 1109 if (notify_die(DIE_BPT, "breakpoint", regs, 5, 5, SIGTRAP) 1110 == NOTIFY_STOP) 1111 goto bail; 1112 1113 if (!(regs->msr & MSR_PR) && /* not user-mode */ 1114 report_bug(regs->nip, regs) == BUG_TRAP_TYPE_WARN) { 1115 regs->nip += 4; 1116 goto bail; 1117 } 1118 _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip); 1119 goto bail; 1120 } 1121 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1122 if (reason & REASON_TM) { 1123 /* This is a TM "Bad Thing Exception" program check. 1124 * This occurs when: 1125 * - An rfid/hrfid/mtmsrd attempts to cause an illegal 1126 * transition in TM states. 1127 * - A trechkpt is attempted when transactional. 1128 * - A treclaim is attempted when non transactional. 1129 * - A tend is illegally attempted. 1130 * - writing a TM SPR when transactional. 1131 */ 1132 if (!user_mode(regs) && 1133 report_bug(regs->nip, regs) == BUG_TRAP_TYPE_WARN) { 1134 regs->nip += 4; 1135 goto bail; 1136 } 1137 /* If usermode caused this, it's done something illegal and 1138 * gets a SIGILL slap on the wrist. We call it an illegal 1139 * operand to distinguish from the instruction just being bad 1140 * (e.g. executing a 'tend' on a CPU without TM!); it's an 1141 * illegal /placement/ of a valid instruction. 1142 */ 1143 if (user_mode(regs)) { 1144 _exception(SIGILL, regs, ILL_ILLOPN, regs->nip); 1145 goto bail; 1146 } else { 1147 printk(KERN_EMERG "Unexpected TM Bad Thing exception " 1148 "at %lx (msr 0x%x)\n", regs->nip, reason); 1149 die("Unrecoverable exception", regs, SIGABRT); 1150 } 1151 } 1152 #endif 1153 1154 /* We restore the interrupt state now */ 1155 if (!arch_irq_disabled_regs(regs)) 1156 local_irq_enable(); 1157 1158 /* (reason & REASON_ILLEGAL) would be the obvious thing here, 1159 * but there seems to be a hardware bug on the 405GP (RevD) 1160 * that means ESR is sometimes set incorrectly - either to 1161 * ESR_DST (!?) or 0. In the process of chasing this with the 1162 * hardware people - not sure if it can happen on any illegal 1163 * instruction or only on FP instructions, whether there is a 1164 * pattern to occurrences etc. -dgibson 31/Mar/2003 1165 */ 1166 if (!emulate_math(regs)) 1167 goto bail; 1168 1169 /* Try to emulate it if we should. */ 1170 if (reason & (REASON_ILLEGAL | REASON_PRIVILEGED)) { 1171 switch (emulate_instruction(regs)) { 1172 case 0: 1173 regs->nip += 4; 1174 emulate_single_step(regs); 1175 goto bail; 1176 case -EFAULT: 1177 _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip); 1178 goto bail; 1179 } 1180 } 1181 1182 if (reason & REASON_PRIVILEGED) 1183 _exception(SIGILL, regs, ILL_PRVOPC, regs->nip); 1184 else 1185 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 1186 1187 bail: 1188 exception_exit(prev_state); 1189 } 1190 1191 /* 1192 * This occurs when running in hypervisor mode on POWER6 or later 1193 * and an illegal instruction is encountered. 1194 */ 1195 void __kprobes emulation_assist_interrupt(struct pt_regs *regs) 1196 { 1197 regs->msr |= REASON_ILLEGAL; 1198 program_check_exception(regs); 1199 } 1200 1201 void alignment_exception(struct pt_regs *regs) 1202 { 1203 enum ctx_state prev_state = exception_enter(); 1204 int sig, code, fixed = 0; 1205 1206 /* We restore the interrupt state now */ 1207 if (!arch_irq_disabled_regs(regs)) 1208 local_irq_enable(); 1209 1210 if (tm_abort_check(regs, TM_CAUSE_ALIGNMENT | TM_CAUSE_PERSISTENT)) 1211 goto bail; 1212 1213 /* we don't implement logging of alignment exceptions */ 1214 if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS)) 1215 fixed = fix_alignment(regs); 1216 1217 if (fixed == 1) { 1218 regs->nip += 4; /* skip over emulated instruction */ 1219 emulate_single_step(regs); 1220 goto bail; 1221 } 1222 1223 /* Operand address was bad */ 1224 if (fixed == -EFAULT) { 1225 sig = SIGSEGV; 1226 code = SEGV_ACCERR; 1227 } else { 1228 sig = SIGBUS; 1229 code = BUS_ADRALN; 1230 } 1231 if (user_mode(regs)) 1232 _exception(sig, regs, code, regs->dar); 1233 else 1234 bad_page_fault(regs, regs->dar, sig); 1235 1236 bail: 1237 exception_exit(prev_state); 1238 } 1239 1240 void StackOverflow(struct pt_regs *regs) 1241 { 1242 printk(KERN_CRIT "Kernel stack overflow in process %p, r1=%lx\n", 1243 current, regs->gpr[1]); 1244 debugger(regs); 1245 show_regs(regs); 1246 panic("kernel stack overflow"); 1247 } 1248 1249 void nonrecoverable_exception(struct pt_regs *regs) 1250 { 1251 printk(KERN_ERR "Non-recoverable exception at PC=%lx MSR=%lx\n", 1252 regs->nip, regs->msr); 1253 debugger(regs); 1254 die("nonrecoverable exception", regs, SIGKILL); 1255 } 1256 1257 void trace_syscall(struct pt_regs *regs) 1258 { 1259 printk("Task: %p(%d), PC: %08lX/%08lX, Syscall: %3ld, Result: %s%ld %s\n", 1260 current, task_pid_nr(current), regs->nip, regs->link, regs->gpr[0], 1261 regs->ccr&0x10000000?"Error=":"", regs->gpr[3], print_tainted()); 1262 } 1263 1264 void kernel_fp_unavailable_exception(struct pt_regs *regs) 1265 { 1266 enum ctx_state prev_state = exception_enter(); 1267 1268 printk(KERN_EMERG "Unrecoverable FP Unavailable Exception " 1269 "%lx at %lx\n", regs->trap, regs->nip); 1270 die("Unrecoverable FP Unavailable Exception", regs, SIGABRT); 1271 1272 exception_exit(prev_state); 1273 } 1274 1275 void altivec_unavailable_exception(struct pt_regs *regs) 1276 { 1277 enum ctx_state prev_state = exception_enter(); 1278 1279 if (user_mode(regs)) { 1280 /* A user program has executed an altivec instruction, 1281 but this kernel doesn't support altivec. */ 1282 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 1283 goto bail; 1284 } 1285 1286 printk(KERN_EMERG "Unrecoverable VMX/Altivec Unavailable Exception " 1287 "%lx at %lx\n", regs->trap, regs->nip); 1288 die("Unrecoverable VMX/Altivec Unavailable Exception", regs, SIGABRT); 1289 1290 bail: 1291 exception_exit(prev_state); 1292 } 1293 1294 void vsx_unavailable_exception(struct pt_regs *regs) 1295 { 1296 if (user_mode(regs)) { 1297 /* A user program has executed an vsx instruction, 1298 but this kernel doesn't support vsx. */ 1299 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 1300 return; 1301 } 1302 1303 printk(KERN_EMERG "Unrecoverable VSX Unavailable Exception " 1304 "%lx at %lx\n", regs->trap, regs->nip); 1305 die("Unrecoverable VSX Unavailable Exception", regs, SIGABRT); 1306 } 1307 1308 void facility_unavailable_exception(struct pt_regs *regs) 1309 { 1310 static char *facility_strings[] = { 1311 "FPU", 1312 "VMX/VSX", 1313 "DSCR", 1314 "PMU SPRs", 1315 "BHRB", 1316 "TM", 1317 "AT", 1318 "EBB", 1319 "TAR", 1320 }; 1321 char *facility, *prefix; 1322 u64 value; 1323 1324 if (regs->trap == 0xf60) { 1325 value = mfspr(SPRN_FSCR); 1326 prefix = ""; 1327 } else { 1328 value = mfspr(SPRN_HFSCR); 1329 prefix = "Hypervisor "; 1330 } 1331 1332 value = value >> 56; 1333 1334 /* We restore the interrupt state now */ 1335 if (!arch_irq_disabled_regs(regs)) 1336 local_irq_enable(); 1337 1338 if (value < ARRAY_SIZE(facility_strings)) 1339 facility = facility_strings[value]; 1340 else 1341 facility = "unknown"; 1342 1343 pr_err("%sFacility '%s' unavailable, exception at 0x%lx, MSR=%lx\n", 1344 prefix, facility, regs->nip, regs->msr); 1345 1346 if (user_mode(regs)) { 1347 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 1348 return; 1349 } 1350 1351 die("Unexpected facility unavailable exception", regs, SIGABRT); 1352 } 1353 1354 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1355 1356 extern void do_load_up_fpu(struct pt_regs *regs); 1357 1358 void fp_unavailable_tm(struct pt_regs *regs) 1359 { 1360 /* Note: This does not handle any kind of FP laziness. */ 1361 1362 TM_DEBUG("FP Unavailable trap whilst transactional at 0x%lx, MSR=%lx\n", 1363 regs->nip, regs->msr); 1364 tm_enable(); 1365 1366 /* We can only have got here if the task started using FP after 1367 * beginning the transaction. So, the transactional regs are just a 1368 * copy of the checkpointed ones. But, we still need to recheckpoint 1369 * as we're enabling FP for the process; it will return, abort the 1370 * transaction, and probably retry but now with FP enabled. So the 1371 * checkpointed FP registers need to be loaded. 1372 */ 1373 tm_reclaim(¤t->thread, current->thread.regs->msr, 1374 TM_CAUSE_FAC_UNAV); 1375 /* Reclaim didn't save out any FPRs to transact_fprs. */ 1376 1377 /* Enable FP for the task: */ 1378 regs->msr |= (MSR_FP | current->thread.fpexc_mode); 1379 1380 /* This loads and recheckpoints the FP registers from 1381 * thread.fpr[]. They will remain in registers after the 1382 * checkpoint so we don't need to reload them after. 1383 */ 1384 tm_recheckpoint(¤t->thread, regs->msr); 1385 } 1386 1387 #ifdef CONFIG_ALTIVEC 1388 extern void do_load_up_altivec(struct pt_regs *regs); 1389 1390 void altivec_unavailable_tm(struct pt_regs *regs) 1391 { 1392 /* See the comments in fp_unavailable_tm(). This function operates 1393 * the same way. 1394 */ 1395 1396 TM_DEBUG("Vector Unavailable trap whilst transactional at 0x%lx," 1397 "MSR=%lx\n", 1398 regs->nip, regs->msr); 1399 tm_enable(); 1400 tm_reclaim(¤t->thread, current->thread.regs->msr, 1401 TM_CAUSE_FAC_UNAV); 1402 regs->msr |= MSR_VEC; 1403 tm_recheckpoint(¤t->thread, regs->msr); 1404 current->thread.used_vr = 1; 1405 } 1406 #endif 1407 1408 #ifdef CONFIG_VSX 1409 void vsx_unavailable_tm(struct pt_regs *regs) 1410 { 1411 /* See the comments in fp_unavailable_tm(). This works similarly, 1412 * though we're loading both FP and VEC registers in here. 1413 * 1414 * If FP isn't in use, load FP regs. If VEC isn't in use, load VEC 1415 * regs. Either way, set MSR_VSX. 1416 */ 1417 1418 TM_DEBUG("VSX Unavailable trap whilst transactional at 0x%lx," 1419 "MSR=%lx\n", 1420 regs->nip, regs->msr); 1421 1422 tm_enable(); 1423 /* This reclaims FP and/or VR regs if they're already enabled */ 1424 tm_reclaim(¤t->thread, current->thread.regs->msr, 1425 TM_CAUSE_FAC_UNAV); 1426 1427 regs->msr |= MSR_VEC | MSR_FP | current->thread.fpexc_mode | 1428 MSR_VSX; 1429 /* This loads & recheckpoints FP and VRs. */ 1430 tm_recheckpoint(¤t->thread, regs->msr); 1431 current->thread.used_vsr = 1; 1432 } 1433 #endif 1434 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ 1435 1436 void performance_monitor_exception(struct pt_regs *regs) 1437 { 1438 __get_cpu_var(irq_stat).pmu_irqs++; 1439 1440 perf_irq(regs); 1441 } 1442 1443 #ifdef CONFIG_8xx 1444 void SoftwareEmulation(struct pt_regs *regs) 1445 { 1446 CHECK_FULL_REGS(regs); 1447 1448 if (!user_mode(regs)) { 1449 debugger(regs); 1450 die("Kernel Mode Software FPU Emulation", regs, SIGFPE); 1451 } 1452 1453 if (!emulate_math(regs)) 1454 return; 1455 1456 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 1457 } 1458 #endif /* CONFIG_8xx */ 1459 1460 #ifdef CONFIG_PPC_ADV_DEBUG_REGS 1461 static void handle_debug(struct pt_regs *regs, unsigned long debug_status) 1462 { 1463 int changed = 0; 1464 /* 1465 * Determine the cause of the debug event, clear the 1466 * event flags and send a trap to the handler. Torez 1467 */ 1468 if (debug_status & (DBSR_DAC1R | DBSR_DAC1W)) { 1469 dbcr_dac(current) &= ~(DBCR_DAC1R | DBCR_DAC1W); 1470 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE 1471 current->thread.dbcr2 &= ~DBCR2_DAC12MODE; 1472 #endif 1473 do_send_trap(regs, mfspr(SPRN_DAC1), debug_status, TRAP_HWBKPT, 1474 5); 1475 changed |= 0x01; 1476 } else if (debug_status & (DBSR_DAC2R | DBSR_DAC2W)) { 1477 dbcr_dac(current) &= ~(DBCR_DAC2R | DBCR_DAC2W); 1478 do_send_trap(regs, mfspr(SPRN_DAC2), debug_status, TRAP_HWBKPT, 1479 6); 1480 changed |= 0x01; 1481 } else if (debug_status & DBSR_IAC1) { 1482 current->thread.dbcr0 &= ~DBCR0_IAC1; 1483 dbcr_iac_range(current) &= ~DBCR_IAC12MODE; 1484 do_send_trap(regs, mfspr(SPRN_IAC1), debug_status, TRAP_HWBKPT, 1485 1); 1486 changed |= 0x01; 1487 } else if (debug_status & DBSR_IAC2) { 1488 current->thread.dbcr0 &= ~DBCR0_IAC2; 1489 do_send_trap(regs, mfspr(SPRN_IAC2), debug_status, TRAP_HWBKPT, 1490 2); 1491 changed |= 0x01; 1492 } else if (debug_status & DBSR_IAC3) { 1493 current->thread.dbcr0 &= ~DBCR0_IAC3; 1494 dbcr_iac_range(current) &= ~DBCR_IAC34MODE; 1495 do_send_trap(regs, mfspr(SPRN_IAC3), debug_status, TRAP_HWBKPT, 1496 3); 1497 changed |= 0x01; 1498 } else if (debug_status & DBSR_IAC4) { 1499 current->thread.dbcr0 &= ~DBCR0_IAC4; 1500 do_send_trap(regs, mfspr(SPRN_IAC4), debug_status, TRAP_HWBKPT, 1501 4); 1502 changed |= 0x01; 1503 } 1504 /* 1505 * At the point this routine was called, the MSR(DE) was turned off. 1506 * Check all other debug flags and see if that bit needs to be turned 1507 * back on or not. 1508 */ 1509 if (DBCR_ACTIVE_EVENTS(current->thread.dbcr0, current->thread.dbcr1)) 1510 regs->msr |= MSR_DE; 1511 else 1512 /* Make sure the IDM flag is off */ 1513 current->thread.dbcr0 &= ~DBCR0_IDM; 1514 1515 if (changed & 0x01) 1516 mtspr(SPRN_DBCR0, current->thread.dbcr0); 1517 } 1518 1519 void __kprobes DebugException(struct pt_regs *regs, unsigned long debug_status) 1520 { 1521 current->thread.dbsr = debug_status; 1522 1523 /* Hack alert: On BookE, Branch Taken stops on the branch itself, while 1524 * on server, it stops on the target of the branch. In order to simulate 1525 * the server behaviour, we thus restart right away with a single step 1526 * instead of stopping here when hitting a BT 1527 */ 1528 if (debug_status & DBSR_BT) { 1529 regs->msr &= ~MSR_DE; 1530 1531 /* Disable BT */ 1532 mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_BT); 1533 /* Clear the BT event */ 1534 mtspr(SPRN_DBSR, DBSR_BT); 1535 1536 /* Do the single step trick only when coming from userspace */ 1537 if (user_mode(regs)) { 1538 current->thread.dbcr0 &= ~DBCR0_BT; 1539 current->thread.dbcr0 |= DBCR0_IDM | DBCR0_IC; 1540 regs->msr |= MSR_DE; 1541 return; 1542 } 1543 1544 if (notify_die(DIE_SSTEP, "block_step", regs, 5, 1545 5, SIGTRAP) == NOTIFY_STOP) { 1546 return; 1547 } 1548 if (debugger_sstep(regs)) 1549 return; 1550 } else if (debug_status & DBSR_IC) { /* Instruction complete */ 1551 regs->msr &= ~MSR_DE; 1552 1553 /* Disable instruction completion */ 1554 mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_IC); 1555 /* Clear the instruction completion event */ 1556 mtspr(SPRN_DBSR, DBSR_IC); 1557 1558 if (notify_die(DIE_SSTEP, "single_step", regs, 5, 1559 5, SIGTRAP) == NOTIFY_STOP) { 1560 return; 1561 } 1562 1563 if (debugger_sstep(regs)) 1564 return; 1565 1566 if (user_mode(regs)) { 1567 current->thread.dbcr0 &= ~DBCR0_IC; 1568 if (DBCR_ACTIVE_EVENTS(current->thread.dbcr0, 1569 current->thread.dbcr1)) 1570 regs->msr |= MSR_DE; 1571 else 1572 /* Make sure the IDM bit is off */ 1573 current->thread.dbcr0 &= ~DBCR0_IDM; 1574 } 1575 1576 _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip); 1577 } else 1578 handle_debug(regs, debug_status); 1579 } 1580 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */ 1581 1582 #if !defined(CONFIG_TAU_INT) 1583 void TAUException(struct pt_regs *regs) 1584 { 1585 printk("TAU trap at PC: %lx, MSR: %lx, vector=%lx %s\n", 1586 regs->nip, regs->msr, regs->trap, print_tainted()); 1587 } 1588 #endif /* CONFIG_INT_TAU */ 1589 1590 #ifdef CONFIG_ALTIVEC 1591 void altivec_assist_exception(struct pt_regs *regs) 1592 { 1593 int err; 1594 1595 if (!user_mode(regs)) { 1596 printk(KERN_EMERG "VMX/Altivec assist exception in kernel mode" 1597 " at %lx\n", regs->nip); 1598 die("Kernel VMX/Altivec assist exception", regs, SIGILL); 1599 } 1600 1601 flush_altivec_to_thread(current); 1602 1603 PPC_WARN_EMULATED(altivec, regs); 1604 err = emulate_altivec(regs); 1605 if (err == 0) { 1606 regs->nip += 4; /* skip emulated instruction */ 1607 emulate_single_step(regs); 1608 return; 1609 } 1610 1611 if (err == -EFAULT) { 1612 /* got an error reading the instruction */ 1613 _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip); 1614 } else { 1615 /* didn't recognize the instruction */ 1616 /* XXX quick hack for now: set the non-Java bit in the VSCR */ 1617 printk_ratelimited(KERN_ERR "Unrecognized altivec instruction " 1618 "in %s at %lx\n", current->comm, regs->nip); 1619 current->thread.vscr.u[3] |= 0x10000; 1620 } 1621 } 1622 #endif /* CONFIG_ALTIVEC */ 1623 1624 #ifdef CONFIG_VSX 1625 void vsx_assist_exception(struct pt_regs *regs) 1626 { 1627 if (!user_mode(regs)) { 1628 printk(KERN_EMERG "VSX assist exception in kernel mode" 1629 " at %lx\n", regs->nip); 1630 die("Kernel VSX assist exception", regs, SIGILL); 1631 } 1632 1633 flush_vsx_to_thread(current); 1634 printk(KERN_INFO "VSX assist not supported at %lx\n", regs->nip); 1635 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 1636 } 1637 #endif /* CONFIG_VSX */ 1638 1639 #ifdef CONFIG_FSL_BOOKE 1640 void CacheLockingException(struct pt_regs *regs, unsigned long address, 1641 unsigned long error_code) 1642 { 1643 /* We treat cache locking instructions from the user 1644 * as priv ops, in the future we could try to do 1645 * something smarter 1646 */ 1647 if (error_code & (ESR_DLK|ESR_ILK)) 1648 _exception(SIGILL, regs, ILL_PRVOPC, regs->nip); 1649 return; 1650 } 1651 #endif /* CONFIG_FSL_BOOKE */ 1652 1653 #ifdef CONFIG_SPE 1654 void SPEFloatingPointException(struct pt_regs *regs) 1655 { 1656 extern int do_spe_mathemu(struct pt_regs *regs); 1657 unsigned long spefscr; 1658 int fpexc_mode; 1659 int code = 0; 1660 int err; 1661 1662 flush_spe_to_thread(current); 1663 1664 spefscr = current->thread.spefscr; 1665 fpexc_mode = current->thread.fpexc_mode; 1666 1667 if ((spefscr & SPEFSCR_FOVF) && (fpexc_mode & PR_FP_EXC_OVF)) { 1668 code = FPE_FLTOVF; 1669 } 1670 else if ((spefscr & SPEFSCR_FUNF) && (fpexc_mode & PR_FP_EXC_UND)) { 1671 code = FPE_FLTUND; 1672 } 1673 else if ((spefscr & SPEFSCR_FDBZ) && (fpexc_mode & PR_FP_EXC_DIV)) 1674 code = FPE_FLTDIV; 1675 else if ((spefscr & SPEFSCR_FINV) && (fpexc_mode & PR_FP_EXC_INV)) { 1676 code = FPE_FLTINV; 1677 } 1678 else if ((spefscr & (SPEFSCR_FG | SPEFSCR_FX)) && (fpexc_mode & PR_FP_EXC_RES)) 1679 code = FPE_FLTRES; 1680 1681 err = do_spe_mathemu(regs); 1682 if (err == 0) { 1683 regs->nip += 4; /* skip emulated instruction */ 1684 emulate_single_step(regs); 1685 return; 1686 } 1687 1688 if (err == -EFAULT) { 1689 /* got an error reading the instruction */ 1690 _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip); 1691 } else if (err == -EINVAL) { 1692 /* didn't recognize the instruction */ 1693 printk(KERN_ERR "unrecognized spe instruction " 1694 "in %s at %lx\n", current->comm, regs->nip); 1695 } else { 1696 _exception(SIGFPE, regs, code, regs->nip); 1697 } 1698 1699 return; 1700 } 1701 1702 void SPEFloatingPointRoundException(struct pt_regs *regs) 1703 { 1704 extern int speround_handler(struct pt_regs *regs); 1705 int err; 1706 1707 preempt_disable(); 1708 if (regs->msr & MSR_SPE) 1709 giveup_spe(current); 1710 preempt_enable(); 1711 1712 regs->nip -= 4; 1713 err = speround_handler(regs); 1714 if (err == 0) { 1715 regs->nip += 4; /* skip emulated instruction */ 1716 emulate_single_step(regs); 1717 return; 1718 } 1719 1720 if (err == -EFAULT) { 1721 /* got an error reading the instruction */ 1722 _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip); 1723 } else if (err == -EINVAL) { 1724 /* didn't recognize the instruction */ 1725 printk(KERN_ERR "unrecognized spe instruction " 1726 "in %s at %lx\n", current->comm, regs->nip); 1727 } else { 1728 _exception(SIGFPE, regs, 0, regs->nip); 1729 return; 1730 } 1731 } 1732 #endif 1733 1734 /* 1735 * We enter here if we get an unrecoverable exception, that is, one 1736 * that happened at a point where the RI (recoverable interrupt) bit 1737 * in the MSR is 0. This indicates that SRR0/1 are live, and that 1738 * we therefore lost state by taking this exception. 1739 */ 1740 void unrecoverable_exception(struct pt_regs *regs) 1741 { 1742 printk(KERN_EMERG "Unrecoverable exception %lx at %lx\n", 1743 regs->trap, regs->nip); 1744 die("Unrecoverable exception", regs, SIGABRT); 1745 } 1746 1747 #if defined(CONFIG_BOOKE_WDT) || defined(CONFIG_40x) 1748 /* 1749 * Default handler for a Watchdog exception, 1750 * spins until a reboot occurs 1751 */ 1752 void __attribute__ ((weak)) WatchdogHandler(struct pt_regs *regs) 1753 { 1754 /* Generic WatchdogHandler, implement your own */ 1755 mtspr(SPRN_TCR, mfspr(SPRN_TCR)&(~TCR_WIE)); 1756 return; 1757 } 1758 1759 void WatchdogException(struct pt_regs *regs) 1760 { 1761 printk (KERN_EMERG "PowerPC Book-E Watchdog Exception\n"); 1762 WatchdogHandler(regs); 1763 } 1764 #endif 1765 1766 /* 1767 * We enter here if we discover during exception entry that we are 1768 * running in supervisor mode with a userspace value in the stack pointer. 1769 */ 1770 void kernel_bad_stack(struct pt_regs *regs) 1771 { 1772 printk(KERN_EMERG "Bad kernel stack pointer %lx at %lx\n", 1773 regs->gpr[1], regs->nip); 1774 die("Bad kernel stack pointer", regs, SIGABRT); 1775 } 1776 1777 void __init trap_init(void) 1778 { 1779 } 1780 1781 1782 #ifdef CONFIG_PPC_EMULATED_STATS 1783 1784 #define WARN_EMULATED_SETUP(type) .type = { .name = #type } 1785 1786 struct ppc_emulated ppc_emulated = { 1787 #ifdef CONFIG_ALTIVEC 1788 WARN_EMULATED_SETUP(altivec), 1789 #endif 1790 WARN_EMULATED_SETUP(dcba), 1791 WARN_EMULATED_SETUP(dcbz), 1792 WARN_EMULATED_SETUP(fp_pair), 1793 WARN_EMULATED_SETUP(isel), 1794 WARN_EMULATED_SETUP(mcrxr), 1795 WARN_EMULATED_SETUP(mfpvr), 1796 WARN_EMULATED_SETUP(multiple), 1797 WARN_EMULATED_SETUP(popcntb), 1798 WARN_EMULATED_SETUP(spe), 1799 WARN_EMULATED_SETUP(string), 1800 WARN_EMULATED_SETUP(unaligned), 1801 #ifdef CONFIG_MATH_EMULATION 1802 WARN_EMULATED_SETUP(math), 1803 #endif 1804 #ifdef CONFIG_VSX 1805 WARN_EMULATED_SETUP(vsx), 1806 #endif 1807 #ifdef CONFIG_PPC64 1808 WARN_EMULATED_SETUP(mfdscr), 1809 WARN_EMULATED_SETUP(mtdscr), 1810 #endif 1811 }; 1812 1813 u32 ppc_warn_emulated; 1814 1815 void ppc_warn_emulated_print(const char *type) 1816 { 1817 pr_warn_ratelimited("%s used emulated %s instruction\n", current->comm, 1818 type); 1819 } 1820 1821 static int __init ppc_warn_emulated_init(void) 1822 { 1823 struct dentry *dir, *d; 1824 unsigned int i; 1825 struct ppc_emulated_entry *entries = (void *)&ppc_emulated; 1826 1827 if (!powerpc_debugfs_root) 1828 return -ENODEV; 1829 1830 dir = debugfs_create_dir("emulated_instructions", 1831 powerpc_debugfs_root); 1832 if (!dir) 1833 return -ENOMEM; 1834 1835 d = debugfs_create_u32("do_warn", S_IRUGO | S_IWUSR, dir, 1836 &ppc_warn_emulated); 1837 if (!d) 1838 goto fail; 1839 1840 for (i = 0; i < sizeof(ppc_emulated)/sizeof(*entries); i++) { 1841 d = debugfs_create_u32(entries[i].name, S_IRUGO | S_IWUSR, dir, 1842 (u32 *)&entries[i].val.counter); 1843 if (!d) 1844 goto fail; 1845 } 1846 1847 return 0; 1848 1849 fail: 1850 debugfs_remove_recursive(dir); 1851 return -ENOMEM; 1852 } 1853 1854 device_initcall(ppc_warn_emulated_init); 1855 1856 #endif /* CONFIG_PPC_EMULATED_STATS */ 1857