1 /* 2 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 3 * Copyright 2007-2010 Freescale Semiconductor, Inc. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License 7 * as published by the Free Software Foundation; either version 8 * 2 of the License, or (at your option) any later version. 9 * 10 * Modified by Cort Dougan (cort@cs.nmt.edu) 11 * and Paul Mackerras (paulus@samba.org) 12 */ 13 14 /* 15 * This file handles the architecture-dependent parts of hardware exceptions 16 */ 17 18 #include <linux/errno.h> 19 #include <linux/sched.h> 20 #include <linux/kernel.h> 21 #include <linux/mm.h> 22 #include <linux/stddef.h> 23 #include <linux/unistd.h> 24 #include <linux/ptrace.h> 25 #include <linux/user.h> 26 #include <linux/interrupt.h> 27 #include <linux/init.h> 28 #include <linux/module.h> 29 #include <linux/prctl.h> 30 #include <linux/delay.h> 31 #include <linux/kprobes.h> 32 #include <linux/kexec.h> 33 #include <linux/backlight.h> 34 #include <linux/bug.h> 35 #include <linux/kdebug.h> 36 #include <linux/debugfs.h> 37 #include <linux/ratelimit.h> 38 #include <linux/context_tracking.h> 39 40 #include <asm/emulated_ops.h> 41 #include <asm/pgtable.h> 42 #include <asm/uaccess.h> 43 #include <asm/io.h> 44 #include <asm/machdep.h> 45 #include <asm/rtas.h> 46 #include <asm/pmc.h> 47 #include <asm/reg.h> 48 #ifdef CONFIG_PMAC_BACKLIGHT 49 #include <asm/backlight.h> 50 #endif 51 #ifdef CONFIG_PPC64 52 #include <asm/firmware.h> 53 #include <asm/processor.h> 54 #include <asm/tm.h> 55 #endif 56 #include <asm/kexec.h> 57 #include <asm/ppc-opcode.h> 58 #include <asm/rio.h> 59 #include <asm/fadump.h> 60 #include <asm/switch_to.h> 61 #include <asm/tm.h> 62 #include <asm/debug.h> 63 #include <sysdev/fsl_pci.h> 64 65 #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC) 66 int (*__debugger)(struct pt_regs *regs) __read_mostly; 67 int (*__debugger_ipi)(struct pt_regs *regs) __read_mostly; 68 int (*__debugger_bpt)(struct pt_regs *regs) __read_mostly; 69 int (*__debugger_sstep)(struct pt_regs *regs) __read_mostly; 70 int (*__debugger_iabr_match)(struct pt_regs *regs) __read_mostly; 71 int (*__debugger_break_match)(struct pt_regs *regs) __read_mostly; 72 int (*__debugger_fault_handler)(struct pt_regs *regs) __read_mostly; 73 74 EXPORT_SYMBOL(__debugger); 75 EXPORT_SYMBOL(__debugger_ipi); 76 EXPORT_SYMBOL(__debugger_bpt); 77 EXPORT_SYMBOL(__debugger_sstep); 78 EXPORT_SYMBOL(__debugger_iabr_match); 79 EXPORT_SYMBOL(__debugger_break_match); 80 EXPORT_SYMBOL(__debugger_fault_handler); 81 #endif 82 83 /* Transactional Memory trap debug */ 84 #ifdef TM_DEBUG_SW 85 #define TM_DEBUG(x...) printk(KERN_INFO x) 86 #else 87 #define TM_DEBUG(x...) do { } while(0) 88 #endif 89 90 /* 91 * Trap & Exception support 92 */ 93 94 #ifdef CONFIG_PMAC_BACKLIGHT 95 static void pmac_backlight_unblank(void) 96 { 97 mutex_lock(&pmac_backlight_mutex); 98 if (pmac_backlight) { 99 struct backlight_properties *props; 100 101 props = &pmac_backlight->props; 102 props->brightness = props->max_brightness; 103 props->power = FB_BLANK_UNBLANK; 104 backlight_update_status(pmac_backlight); 105 } 106 mutex_unlock(&pmac_backlight_mutex); 107 } 108 #else 109 static inline void pmac_backlight_unblank(void) { } 110 #endif 111 112 static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED; 113 static int die_owner = -1; 114 static unsigned int die_nest_count; 115 static int die_counter; 116 117 static unsigned __kprobes long oops_begin(struct pt_regs *regs) 118 { 119 int cpu; 120 unsigned long flags; 121 122 if (debugger(regs)) 123 return 1; 124 125 oops_enter(); 126 127 /* racy, but better than risking deadlock. */ 128 raw_local_irq_save(flags); 129 cpu = smp_processor_id(); 130 if (!arch_spin_trylock(&die_lock)) { 131 if (cpu == die_owner) 132 /* nested oops. should stop eventually */; 133 else 134 arch_spin_lock(&die_lock); 135 } 136 die_nest_count++; 137 die_owner = cpu; 138 console_verbose(); 139 bust_spinlocks(1); 140 if (machine_is(powermac)) 141 pmac_backlight_unblank(); 142 return flags; 143 } 144 145 static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, 146 int signr) 147 { 148 bust_spinlocks(0); 149 die_owner = -1; 150 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); 151 die_nest_count--; 152 oops_exit(); 153 printk("\n"); 154 if (!die_nest_count) 155 /* Nest count reaches zero, release the lock. */ 156 arch_spin_unlock(&die_lock); 157 raw_local_irq_restore(flags); 158 159 crash_fadump(regs, "die oops"); 160 161 /* 162 * A system reset (0x100) is a request to dump, so we always send 163 * it through the crashdump code. 164 */ 165 if (kexec_should_crash(current) || (TRAP(regs) == 0x100)) { 166 crash_kexec(regs); 167 168 /* 169 * We aren't the primary crash CPU. We need to send it 170 * to a holding pattern to avoid it ending up in the panic 171 * code. 172 */ 173 crash_kexec_secondary(regs); 174 } 175 176 if (!signr) 177 return; 178 179 /* 180 * While our oops output is serialised by a spinlock, output 181 * from panic() called below can race and corrupt it. If we 182 * know we are going to panic, delay for 1 second so we have a 183 * chance to get clean backtraces from all CPUs that are oopsing. 184 */ 185 if (in_interrupt() || panic_on_oops || !current->pid || 186 is_global_init(current)) { 187 mdelay(MSEC_PER_SEC); 188 } 189 190 if (in_interrupt()) 191 panic("Fatal exception in interrupt"); 192 if (panic_on_oops) 193 panic("Fatal exception"); 194 do_exit(signr); 195 } 196 197 static int __kprobes __die(const char *str, struct pt_regs *regs, long err) 198 { 199 printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter); 200 #ifdef CONFIG_PREEMPT 201 printk("PREEMPT "); 202 #endif 203 #ifdef CONFIG_SMP 204 printk("SMP NR_CPUS=%d ", NR_CPUS); 205 #endif 206 #ifdef CONFIG_DEBUG_PAGEALLOC 207 printk("DEBUG_PAGEALLOC "); 208 #endif 209 #ifdef CONFIG_NUMA 210 printk("NUMA "); 211 #endif 212 printk("%s\n", ppc_md.name ? ppc_md.name : ""); 213 214 if (notify_die(DIE_OOPS, str, regs, err, 255, SIGSEGV) == NOTIFY_STOP) 215 return 1; 216 217 print_modules(); 218 show_regs(regs); 219 220 return 0; 221 } 222 223 void die(const char *str, struct pt_regs *regs, long err) 224 { 225 unsigned long flags = oops_begin(regs); 226 227 if (__die(str, regs, err)) 228 err = 0; 229 oops_end(flags, regs, err); 230 } 231 232 void user_single_step_siginfo(struct task_struct *tsk, 233 struct pt_regs *regs, siginfo_t *info) 234 { 235 memset(info, 0, sizeof(*info)); 236 info->si_signo = SIGTRAP; 237 info->si_code = TRAP_TRACE; 238 info->si_addr = (void __user *)regs->nip; 239 } 240 241 void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr) 242 { 243 siginfo_t info; 244 const char fmt32[] = KERN_INFO "%s[%d]: unhandled signal %d " \ 245 "at %08lx nip %08lx lr %08lx code %x\n"; 246 const char fmt64[] = KERN_INFO "%s[%d]: unhandled signal %d " \ 247 "at %016lx nip %016lx lr %016lx code %x\n"; 248 249 if (!user_mode(regs)) { 250 die("Exception in kernel mode", regs, signr); 251 return; 252 } 253 254 if (show_unhandled_signals && unhandled_signal(current, signr)) { 255 printk_ratelimited(regs->msr & MSR_64BIT ? fmt64 : fmt32, 256 current->comm, current->pid, signr, 257 addr, regs->nip, regs->link, code); 258 } 259 260 if (arch_irqs_disabled() && !arch_irq_disabled_regs(regs)) 261 local_irq_enable(); 262 263 current->thread.trap_nr = code; 264 memset(&info, 0, sizeof(info)); 265 info.si_signo = signr; 266 info.si_code = code; 267 info.si_addr = (void __user *) addr; 268 force_sig_info(signr, &info, current); 269 } 270 271 #ifdef CONFIG_PPC64 272 void system_reset_exception(struct pt_regs *regs) 273 { 274 /* See if any machine dependent calls */ 275 if (ppc_md.system_reset_exception) { 276 if (ppc_md.system_reset_exception(regs)) 277 return; 278 } 279 280 die("System Reset", regs, SIGABRT); 281 282 /* Must die if the interrupt is not recoverable */ 283 if (!(regs->msr & MSR_RI)) 284 panic("Unrecoverable System Reset"); 285 286 /* What should we do here? We could issue a shutdown or hard reset. */ 287 } 288 #endif 289 290 /* 291 * I/O accesses can cause machine checks on powermacs. 292 * Check if the NIP corresponds to the address of a sync 293 * instruction for which there is an entry in the exception 294 * table. 295 * Note that the 601 only takes a machine check on TEA 296 * (transfer error ack) signal assertion, and does not 297 * set any of the top 16 bits of SRR1. 298 * -- paulus. 299 */ 300 static inline int check_io_access(struct pt_regs *regs) 301 { 302 #ifdef CONFIG_PPC32 303 unsigned long msr = regs->msr; 304 const struct exception_table_entry *entry; 305 unsigned int *nip = (unsigned int *)regs->nip; 306 307 if (((msr & 0xffff0000) == 0 || (msr & (0x80000 | 0x40000))) 308 && (entry = search_exception_tables(regs->nip)) != NULL) { 309 /* 310 * Check that it's a sync instruction, or somewhere 311 * in the twi; isync; nop sequence that inb/inw/inl uses. 312 * As the address is in the exception table 313 * we should be able to read the instr there. 314 * For the debug message, we look at the preceding 315 * load or store. 316 */ 317 if (*nip == 0x60000000) /* nop */ 318 nip -= 2; 319 else if (*nip == 0x4c00012c) /* isync */ 320 --nip; 321 if (*nip == 0x7c0004ac || (*nip >> 26) == 3) { 322 /* sync or twi */ 323 unsigned int rb; 324 325 --nip; 326 rb = (*nip >> 11) & 0x1f; 327 printk(KERN_DEBUG "%s bad port %lx at %p\n", 328 (*nip & 0x100)? "OUT to": "IN from", 329 regs->gpr[rb] - _IO_BASE, nip); 330 regs->msr |= MSR_RI; 331 regs->nip = entry->fixup; 332 return 1; 333 } 334 } 335 #endif /* CONFIG_PPC32 */ 336 return 0; 337 } 338 339 #ifdef CONFIG_PPC_ADV_DEBUG_REGS 340 /* On 4xx, the reason for the machine check or program exception 341 is in the ESR. */ 342 #define get_reason(regs) ((regs)->dsisr) 343 #ifndef CONFIG_FSL_BOOKE 344 #define get_mc_reason(regs) ((regs)->dsisr) 345 #else 346 #define get_mc_reason(regs) (mfspr(SPRN_MCSR)) 347 #endif 348 #define REASON_FP ESR_FP 349 #define REASON_ILLEGAL (ESR_PIL | ESR_PUO) 350 #define REASON_PRIVILEGED ESR_PPR 351 #define REASON_TRAP ESR_PTR 352 353 /* single-step stuff */ 354 #define single_stepping(regs) (current->thread.debug.dbcr0 & DBCR0_IC) 355 #define clear_single_step(regs) (current->thread.debug.dbcr0 &= ~DBCR0_IC) 356 357 #else 358 /* On non-4xx, the reason for the machine check or program 359 exception is in the MSR. */ 360 #define get_reason(regs) ((regs)->msr) 361 #define get_mc_reason(regs) ((regs)->msr) 362 #define REASON_TM 0x200000 363 #define REASON_FP 0x100000 364 #define REASON_ILLEGAL 0x80000 365 #define REASON_PRIVILEGED 0x40000 366 #define REASON_TRAP 0x20000 367 368 #define single_stepping(regs) ((regs)->msr & MSR_SE) 369 #define clear_single_step(regs) ((regs)->msr &= ~MSR_SE) 370 #endif 371 372 #if defined(CONFIG_4xx) 373 int machine_check_4xx(struct pt_regs *regs) 374 { 375 unsigned long reason = get_mc_reason(regs); 376 377 if (reason & ESR_IMCP) { 378 printk("Instruction"); 379 mtspr(SPRN_ESR, reason & ~ESR_IMCP); 380 } else 381 printk("Data"); 382 printk(" machine check in kernel mode.\n"); 383 384 return 0; 385 } 386 387 int machine_check_440A(struct pt_regs *regs) 388 { 389 unsigned long reason = get_mc_reason(regs); 390 391 printk("Machine check in kernel mode.\n"); 392 if (reason & ESR_IMCP){ 393 printk("Instruction Synchronous Machine Check exception\n"); 394 mtspr(SPRN_ESR, reason & ~ESR_IMCP); 395 } 396 else { 397 u32 mcsr = mfspr(SPRN_MCSR); 398 if (mcsr & MCSR_IB) 399 printk("Instruction Read PLB Error\n"); 400 if (mcsr & MCSR_DRB) 401 printk("Data Read PLB Error\n"); 402 if (mcsr & MCSR_DWB) 403 printk("Data Write PLB Error\n"); 404 if (mcsr & MCSR_TLBP) 405 printk("TLB Parity Error\n"); 406 if (mcsr & MCSR_ICP){ 407 flush_instruction_cache(); 408 printk("I-Cache Parity Error\n"); 409 } 410 if (mcsr & MCSR_DCSP) 411 printk("D-Cache Search Parity Error\n"); 412 if (mcsr & MCSR_DCFP) 413 printk("D-Cache Flush Parity Error\n"); 414 if (mcsr & MCSR_IMPE) 415 printk("Machine Check exception is imprecise\n"); 416 417 /* Clear MCSR */ 418 mtspr(SPRN_MCSR, mcsr); 419 } 420 return 0; 421 } 422 423 int machine_check_47x(struct pt_regs *regs) 424 { 425 unsigned long reason = get_mc_reason(regs); 426 u32 mcsr; 427 428 printk(KERN_ERR "Machine check in kernel mode.\n"); 429 if (reason & ESR_IMCP) { 430 printk(KERN_ERR 431 "Instruction Synchronous Machine Check exception\n"); 432 mtspr(SPRN_ESR, reason & ~ESR_IMCP); 433 return 0; 434 } 435 mcsr = mfspr(SPRN_MCSR); 436 if (mcsr & MCSR_IB) 437 printk(KERN_ERR "Instruction Read PLB Error\n"); 438 if (mcsr & MCSR_DRB) 439 printk(KERN_ERR "Data Read PLB Error\n"); 440 if (mcsr & MCSR_DWB) 441 printk(KERN_ERR "Data Write PLB Error\n"); 442 if (mcsr & MCSR_TLBP) 443 printk(KERN_ERR "TLB Parity Error\n"); 444 if (mcsr & MCSR_ICP) { 445 flush_instruction_cache(); 446 printk(KERN_ERR "I-Cache Parity Error\n"); 447 } 448 if (mcsr & MCSR_DCSP) 449 printk(KERN_ERR "D-Cache Search Parity Error\n"); 450 if (mcsr & PPC47x_MCSR_GPR) 451 printk(KERN_ERR "GPR Parity Error\n"); 452 if (mcsr & PPC47x_MCSR_FPR) 453 printk(KERN_ERR "FPR Parity Error\n"); 454 if (mcsr & PPC47x_MCSR_IPR) 455 printk(KERN_ERR "Machine Check exception is imprecise\n"); 456 457 /* Clear MCSR */ 458 mtspr(SPRN_MCSR, mcsr); 459 460 return 0; 461 } 462 #elif defined(CONFIG_E500) 463 int machine_check_e500mc(struct pt_regs *regs) 464 { 465 unsigned long mcsr = mfspr(SPRN_MCSR); 466 unsigned long reason = mcsr; 467 int recoverable = 1; 468 469 if (reason & MCSR_LD) { 470 recoverable = fsl_rio_mcheck_exception(regs); 471 if (recoverable == 1) 472 goto silent_out; 473 } 474 475 printk("Machine check in kernel mode.\n"); 476 printk("Caused by (from MCSR=%lx): ", reason); 477 478 if (reason & MCSR_MCP) 479 printk("Machine Check Signal\n"); 480 481 if (reason & MCSR_ICPERR) { 482 printk("Instruction Cache Parity Error\n"); 483 484 /* 485 * This is recoverable by invalidating the i-cache. 486 */ 487 mtspr(SPRN_L1CSR1, mfspr(SPRN_L1CSR1) | L1CSR1_ICFI); 488 while (mfspr(SPRN_L1CSR1) & L1CSR1_ICFI) 489 ; 490 491 /* 492 * This will generally be accompanied by an instruction 493 * fetch error report -- only treat MCSR_IF as fatal 494 * if it wasn't due to an L1 parity error. 495 */ 496 reason &= ~MCSR_IF; 497 } 498 499 if (reason & MCSR_DCPERR_MC) { 500 printk("Data Cache Parity Error\n"); 501 502 /* 503 * In write shadow mode we auto-recover from the error, but it 504 * may still get logged and cause a machine check. We should 505 * only treat the non-write shadow case as non-recoverable. 506 */ 507 if (!(mfspr(SPRN_L1CSR2) & L1CSR2_DCWS)) 508 recoverable = 0; 509 } 510 511 if (reason & MCSR_L2MMU_MHIT) { 512 printk("Hit on multiple TLB entries\n"); 513 recoverable = 0; 514 } 515 516 if (reason & MCSR_NMI) 517 printk("Non-maskable interrupt\n"); 518 519 if (reason & MCSR_IF) { 520 printk("Instruction Fetch Error Report\n"); 521 recoverable = 0; 522 } 523 524 if (reason & MCSR_LD) { 525 printk("Load Error Report\n"); 526 recoverable = 0; 527 } 528 529 if (reason & MCSR_ST) { 530 printk("Store Error Report\n"); 531 recoverable = 0; 532 } 533 534 if (reason & MCSR_LDG) { 535 printk("Guarded Load Error Report\n"); 536 recoverable = 0; 537 } 538 539 if (reason & MCSR_TLBSYNC) 540 printk("Simultaneous tlbsync operations\n"); 541 542 if (reason & MCSR_BSL2_ERR) { 543 printk("Level 2 Cache Error\n"); 544 recoverable = 0; 545 } 546 547 if (reason & MCSR_MAV) { 548 u64 addr; 549 550 addr = mfspr(SPRN_MCAR); 551 addr |= (u64)mfspr(SPRN_MCARU) << 32; 552 553 printk("Machine Check %s Address: %#llx\n", 554 reason & MCSR_MEA ? "Effective" : "Physical", addr); 555 } 556 557 silent_out: 558 mtspr(SPRN_MCSR, mcsr); 559 return mfspr(SPRN_MCSR) == 0 && recoverable; 560 } 561 562 int machine_check_e500(struct pt_regs *regs) 563 { 564 unsigned long reason = get_mc_reason(regs); 565 566 if (reason & MCSR_BUS_RBERR) { 567 if (fsl_rio_mcheck_exception(regs)) 568 return 1; 569 if (fsl_pci_mcheck_exception(regs)) 570 return 1; 571 } 572 573 printk("Machine check in kernel mode.\n"); 574 printk("Caused by (from MCSR=%lx): ", reason); 575 576 if (reason & MCSR_MCP) 577 printk("Machine Check Signal\n"); 578 if (reason & MCSR_ICPERR) 579 printk("Instruction Cache Parity Error\n"); 580 if (reason & MCSR_DCP_PERR) 581 printk("Data Cache Push Parity Error\n"); 582 if (reason & MCSR_DCPERR) 583 printk("Data Cache Parity Error\n"); 584 if (reason & MCSR_BUS_IAERR) 585 printk("Bus - Instruction Address Error\n"); 586 if (reason & MCSR_BUS_RAERR) 587 printk("Bus - Read Address Error\n"); 588 if (reason & MCSR_BUS_WAERR) 589 printk("Bus - Write Address Error\n"); 590 if (reason & MCSR_BUS_IBERR) 591 printk("Bus - Instruction Data Error\n"); 592 if (reason & MCSR_BUS_RBERR) 593 printk("Bus - Read Data Bus Error\n"); 594 if (reason & MCSR_BUS_WBERR) 595 printk("Bus - Read Data Bus Error\n"); 596 if (reason & MCSR_BUS_IPERR) 597 printk("Bus - Instruction Parity Error\n"); 598 if (reason & MCSR_BUS_RPERR) 599 printk("Bus - Read Parity Error\n"); 600 601 return 0; 602 } 603 604 int machine_check_generic(struct pt_regs *regs) 605 { 606 return 0; 607 } 608 #elif defined(CONFIG_E200) 609 int machine_check_e200(struct pt_regs *regs) 610 { 611 unsigned long reason = get_mc_reason(regs); 612 613 printk("Machine check in kernel mode.\n"); 614 printk("Caused by (from MCSR=%lx): ", reason); 615 616 if (reason & MCSR_MCP) 617 printk("Machine Check Signal\n"); 618 if (reason & MCSR_CP_PERR) 619 printk("Cache Push Parity Error\n"); 620 if (reason & MCSR_CPERR) 621 printk("Cache Parity Error\n"); 622 if (reason & MCSR_EXCP_ERR) 623 printk("ISI, ITLB, or Bus Error on first instruction fetch for an exception handler\n"); 624 if (reason & MCSR_BUS_IRERR) 625 printk("Bus - Read Bus Error on instruction fetch\n"); 626 if (reason & MCSR_BUS_DRERR) 627 printk("Bus - Read Bus Error on data load\n"); 628 if (reason & MCSR_BUS_WRERR) 629 printk("Bus - Write Bus Error on buffered store or cache line push\n"); 630 631 return 0; 632 } 633 #else 634 int machine_check_generic(struct pt_regs *regs) 635 { 636 unsigned long reason = get_mc_reason(regs); 637 638 printk("Machine check in kernel mode.\n"); 639 printk("Caused by (from SRR1=%lx): ", reason); 640 switch (reason & 0x601F0000) { 641 case 0x80000: 642 printk("Machine check signal\n"); 643 break; 644 case 0: /* for 601 */ 645 case 0x40000: 646 case 0x140000: /* 7450 MSS error and TEA */ 647 printk("Transfer error ack signal\n"); 648 break; 649 case 0x20000: 650 printk("Data parity error signal\n"); 651 break; 652 case 0x10000: 653 printk("Address parity error signal\n"); 654 break; 655 case 0x20000000: 656 printk("L1 Data Cache error\n"); 657 break; 658 case 0x40000000: 659 printk("L1 Instruction Cache error\n"); 660 break; 661 case 0x00100000: 662 printk("L2 data cache parity error\n"); 663 break; 664 default: 665 printk("Unknown values in msr\n"); 666 } 667 return 0; 668 } 669 #endif /* everything else */ 670 671 void machine_check_exception(struct pt_regs *regs) 672 { 673 enum ctx_state prev_state = exception_enter(); 674 int recover = 0; 675 676 __get_cpu_var(irq_stat).mce_exceptions++; 677 678 /* See if any machine dependent calls. In theory, we would want 679 * to call the CPU first, and call the ppc_md. one if the CPU 680 * one returns a positive number. However there is existing code 681 * that assumes the board gets a first chance, so let's keep it 682 * that way for now and fix things later. --BenH. 683 */ 684 if (ppc_md.machine_check_exception) 685 recover = ppc_md.machine_check_exception(regs); 686 else if (cur_cpu_spec->machine_check) 687 recover = cur_cpu_spec->machine_check(regs); 688 689 if (recover > 0) 690 goto bail; 691 692 #if defined(CONFIG_8xx) && defined(CONFIG_PCI) 693 /* the qspan pci read routines can cause machine checks -- Cort 694 * 695 * yuck !!! that totally needs to go away ! There are better ways 696 * to deal with that than having a wart in the mcheck handler. 697 * -- BenH 698 */ 699 bad_page_fault(regs, regs->dar, SIGBUS); 700 goto bail; 701 #endif 702 703 if (debugger_fault_handler(regs)) 704 goto bail; 705 706 if (check_io_access(regs)) 707 goto bail; 708 709 die("Machine check", regs, SIGBUS); 710 711 /* Must die if the interrupt is not recoverable */ 712 if (!(regs->msr & MSR_RI)) 713 panic("Unrecoverable Machine check"); 714 715 bail: 716 exception_exit(prev_state); 717 } 718 719 void SMIException(struct pt_regs *regs) 720 { 721 die("System Management Interrupt", regs, SIGABRT); 722 } 723 724 void unknown_exception(struct pt_regs *regs) 725 { 726 enum ctx_state prev_state = exception_enter(); 727 728 printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n", 729 regs->nip, regs->msr, regs->trap); 730 731 _exception(SIGTRAP, regs, 0, 0); 732 733 exception_exit(prev_state); 734 } 735 736 void instruction_breakpoint_exception(struct pt_regs *regs) 737 { 738 enum ctx_state prev_state = exception_enter(); 739 740 if (notify_die(DIE_IABR_MATCH, "iabr_match", regs, 5, 741 5, SIGTRAP) == NOTIFY_STOP) 742 goto bail; 743 if (debugger_iabr_match(regs)) 744 goto bail; 745 _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip); 746 747 bail: 748 exception_exit(prev_state); 749 } 750 751 void RunModeException(struct pt_regs *regs) 752 { 753 _exception(SIGTRAP, regs, 0, 0); 754 } 755 756 void __kprobes single_step_exception(struct pt_regs *regs) 757 { 758 enum ctx_state prev_state = exception_enter(); 759 760 clear_single_step(regs); 761 762 if (notify_die(DIE_SSTEP, "single_step", regs, 5, 763 5, SIGTRAP) == NOTIFY_STOP) 764 goto bail; 765 if (debugger_sstep(regs)) 766 goto bail; 767 768 _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip); 769 770 bail: 771 exception_exit(prev_state); 772 } 773 774 /* 775 * After we have successfully emulated an instruction, we have to 776 * check if the instruction was being single-stepped, and if so, 777 * pretend we got a single-step exception. This was pointed out 778 * by Kumar Gala. -- paulus 779 */ 780 static void emulate_single_step(struct pt_regs *regs) 781 { 782 if (single_stepping(regs)) 783 single_step_exception(regs); 784 } 785 786 static inline int __parse_fpscr(unsigned long fpscr) 787 { 788 int ret = 0; 789 790 /* Invalid operation */ 791 if ((fpscr & FPSCR_VE) && (fpscr & FPSCR_VX)) 792 ret = FPE_FLTINV; 793 794 /* Overflow */ 795 else if ((fpscr & FPSCR_OE) && (fpscr & FPSCR_OX)) 796 ret = FPE_FLTOVF; 797 798 /* Underflow */ 799 else if ((fpscr & FPSCR_UE) && (fpscr & FPSCR_UX)) 800 ret = FPE_FLTUND; 801 802 /* Divide by zero */ 803 else if ((fpscr & FPSCR_ZE) && (fpscr & FPSCR_ZX)) 804 ret = FPE_FLTDIV; 805 806 /* Inexact result */ 807 else if ((fpscr & FPSCR_XE) && (fpscr & FPSCR_XX)) 808 ret = FPE_FLTRES; 809 810 return ret; 811 } 812 813 static void parse_fpe(struct pt_regs *regs) 814 { 815 int code = 0; 816 817 flush_fp_to_thread(current); 818 819 code = __parse_fpscr(current->thread.fp_state.fpscr); 820 821 _exception(SIGFPE, regs, code, regs->nip); 822 } 823 824 /* 825 * Illegal instruction emulation support. Originally written to 826 * provide the PVR to user applications using the mfspr rd, PVR. 827 * Return non-zero if we can't emulate, or -EFAULT if the associated 828 * memory access caused an access fault. Return zero on success. 829 * 830 * There are a couple of ways to do this, either "decode" the instruction 831 * or directly match lots of bits. In this case, matching lots of 832 * bits is faster and easier. 833 * 834 */ 835 static int emulate_string_inst(struct pt_regs *regs, u32 instword) 836 { 837 u8 rT = (instword >> 21) & 0x1f; 838 u8 rA = (instword >> 16) & 0x1f; 839 u8 NB_RB = (instword >> 11) & 0x1f; 840 u32 num_bytes; 841 unsigned long EA; 842 int pos = 0; 843 844 /* Early out if we are an invalid form of lswx */ 845 if ((instword & PPC_INST_STRING_MASK) == PPC_INST_LSWX) 846 if ((rT == rA) || (rT == NB_RB)) 847 return -EINVAL; 848 849 EA = (rA == 0) ? 0 : regs->gpr[rA]; 850 851 switch (instword & PPC_INST_STRING_MASK) { 852 case PPC_INST_LSWX: 853 case PPC_INST_STSWX: 854 EA += NB_RB; 855 num_bytes = regs->xer & 0x7f; 856 break; 857 case PPC_INST_LSWI: 858 case PPC_INST_STSWI: 859 num_bytes = (NB_RB == 0) ? 32 : NB_RB; 860 break; 861 default: 862 return -EINVAL; 863 } 864 865 while (num_bytes != 0) 866 { 867 u8 val; 868 u32 shift = 8 * (3 - (pos & 0x3)); 869 870 /* if process is 32-bit, clear upper 32 bits of EA */ 871 if ((regs->msr & MSR_64BIT) == 0) 872 EA &= 0xFFFFFFFF; 873 874 switch ((instword & PPC_INST_STRING_MASK)) { 875 case PPC_INST_LSWX: 876 case PPC_INST_LSWI: 877 if (get_user(val, (u8 __user *)EA)) 878 return -EFAULT; 879 /* first time updating this reg, 880 * zero it out */ 881 if (pos == 0) 882 regs->gpr[rT] = 0; 883 regs->gpr[rT] |= val << shift; 884 break; 885 case PPC_INST_STSWI: 886 case PPC_INST_STSWX: 887 val = regs->gpr[rT] >> shift; 888 if (put_user(val, (u8 __user *)EA)) 889 return -EFAULT; 890 break; 891 } 892 /* move EA to next address */ 893 EA += 1; 894 num_bytes--; 895 896 /* manage our position within the register */ 897 if (++pos == 4) { 898 pos = 0; 899 if (++rT == 32) 900 rT = 0; 901 } 902 } 903 904 return 0; 905 } 906 907 static int emulate_popcntb_inst(struct pt_regs *regs, u32 instword) 908 { 909 u32 ra,rs; 910 unsigned long tmp; 911 912 ra = (instword >> 16) & 0x1f; 913 rs = (instword >> 21) & 0x1f; 914 915 tmp = regs->gpr[rs]; 916 tmp = tmp - ((tmp >> 1) & 0x5555555555555555ULL); 917 tmp = (tmp & 0x3333333333333333ULL) + ((tmp >> 2) & 0x3333333333333333ULL); 918 tmp = (tmp + (tmp >> 4)) & 0x0f0f0f0f0f0f0f0fULL; 919 regs->gpr[ra] = tmp; 920 921 return 0; 922 } 923 924 static int emulate_isel(struct pt_regs *regs, u32 instword) 925 { 926 u8 rT = (instword >> 21) & 0x1f; 927 u8 rA = (instword >> 16) & 0x1f; 928 u8 rB = (instword >> 11) & 0x1f; 929 u8 BC = (instword >> 6) & 0x1f; 930 u8 bit; 931 unsigned long tmp; 932 933 tmp = (rA == 0) ? 0 : regs->gpr[rA]; 934 bit = (regs->ccr >> (31 - BC)) & 0x1; 935 936 regs->gpr[rT] = bit ? tmp : regs->gpr[rB]; 937 938 return 0; 939 } 940 941 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 942 static inline bool tm_abort_check(struct pt_regs *regs, int cause) 943 { 944 /* If we're emulating a load/store in an active transaction, we cannot 945 * emulate it as the kernel operates in transaction suspended context. 946 * We need to abort the transaction. This creates a persistent TM 947 * abort so tell the user what caused it with a new code. 948 */ 949 if (MSR_TM_TRANSACTIONAL(regs->msr)) { 950 tm_enable(); 951 tm_abort(cause); 952 return true; 953 } 954 return false; 955 } 956 #else 957 static inline bool tm_abort_check(struct pt_regs *regs, int reason) 958 { 959 return false; 960 } 961 #endif 962 963 static int emulate_instruction(struct pt_regs *regs) 964 { 965 u32 instword; 966 u32 rd; 967 968 if (!user_mode(regs)) 969 return -EINVAL; 970 CHECK_FULL_REGS(regs); 971 972 if (get_user(instword, (u32 __user *)(regs->nip))) 973 return -EFAULT; 974 975 /* Emulate the mfspr rD, PVR. */ 976 if ((instword & PPC_INST_MFSPR_PVR_MASK) == PPC_INST_MFSPR_PVR) { 977 PPC_WARN_EMULATED(mfpvr, regs); 978 rd = (instword >> 21) & 0x1f; 979 regs->gpr[rd] = mfspr(SPRN_PVR); 980 return 0; 981 } 982 983 /* Emulating the dcba insn is just a no-op. */ 984 if ((instword & PPC_INST_DCBA_MASK) == PPC_INST_DCBA) { 985 PPC_WARN_EMULATED(dcba, regs); 986 return 0; 987 } 988 989 /* Emulate the mcrxr insn. */ 990 if ((instword & PPC_INST_MCRXR_MASK) == PPC_INST_MCRXR) { 991 int shift = (instword >> 21) & 0x1c; 992 unsigned long msk = 0xf0000000UL >> shift; 993 994 PPC_WARN_EMULATED(mcrxr, regs); 995 regs->ccr = (regs->ccr & ~msk) | ((regs->xer >> shift) & msk); 996 regs->xer &= ~0xf0000000UL; 997 return 0; 998 } 999 1000 /* Emulate load/store string insn. */ 1001 if ((instword & PPC_INST_STRING_GEN_MASK) == PPC_INST_STRING) { 1002 if (tm_abort_check(regs, 1003 TM_CAUSE_EMULATE | TM_CAUSE_PERSISTENT)) 1004 return -EINVAL; 1005 PPC_WARN_EMULATED(string, regs); 1006 return emulate_string_inst(regs, instword); 1007 } 1008 1009 /* Emulate the popcntb (Population Count Bytes) instruction. */ 1010 if ((instword & PPC_INST_POPCNTB_MASK) == PPC_INST_POPCNTB) { 1011 PPC_WARN_EMULATED(popcntb, regs); 1012 return emulate_popcntb_inst(regs, instword); 1013 } 1014 1015 /* Emulate isel (Integer Select) instruction */ 1016 if ((instword & PPC_INST_ISEL_MASK) == PPC_INST_ISEL) { 1017 PPC_WARN_EMULATED(isel, regs); 1018 return emulate_isel(regs, instword); 1019 } 1020 1021 /* Emulate sync instruction variants */ 1022 if ((instword & PPC_INST_SYNC_MASK) == PPC_INST_SYNC) { 1023 PPC_WARN_EMULATED(sync, regs); 1024 asm volatile("sync"); 1025 return 0; 1026 } 1027 1028 #ifdef CONFIG_PPC64 1029 /* Emulate the mfspr rD, DSCR. */ 1030 if ((((instword & PPC_INST_MFSPR_DSCR_USER_MASK) == 1031 PPC_INST_MFSPR_DSCR_USER) || 1032 ((instword & PPC_INST_MFSPR_DSCR_MASK) == 1033 PPC_INST_MFSPR_DSCR)) && 1034 cpu_has_feature(CPU_FTR_DSCR)) { 1035 PPC_WARN_EMULATED(mfdscr, regs); 1036 rd = (instword >> 21) & 0x1f; 1037 regs->gpr[rd] = mfspr(SPRN_DSCR); 1038 return 0; 1039 } 1040 /* Emulate the mtspr DSCR, rD. */ 1041 if ((((instword & PPC_INST_MTSPR_DSCR_USER_MASK) == 1042 PPC_INST_MTSPR_DSCR_USER) || 1043 ((instword & PPC_INST_MTSPR_DSCR_MASK) == 1044 PPC_INST_MTSPR_DSCR)) && 1045 cpu_has_feature(CPU_FTR_DSCR)) { 1046 PPC_WARN_EMULATED(mtdscr, regs); 1047 rd = (instword >> 21) & 0x1f; 1048 current->thread.dscr = regs->gpr[rd]; 1049 current->thread.dscr_inherit = 1; 1050 mtspr(SPRN_DSCR, current->thread.dscr); 1051 return 0; 1052 } 1053 #endif 1054 1055 return -EINVAL; 1056 } 1057 1058 int is_valid_bugaddr(unsigned long addr) 1059 { 1060 return is_kernel_addr(addr); 1061 } 1062 1063 #ifdef CONFIG_MATH_EMULATION 1064 static int emulate_math(struct pt_regs *regs) 1065 { 1066 int ret; 1067 extern int do_mathemu(struct pt_regs *regs); 1068 1069 ret = do_mathemu(regs); 1070 if (ret >= 0) 1071 PPC_WARN_EMULATED(math, regs); 1072 1073 switch (ret) { 1074 case 0: 1075 emulate_single_step(regs); 1076 return 0; 1077 case 1: { 1078 int code = 0; 1079 code = __parse_fpscr(current->thread.fp_state.fpscr); 1080 _exception(SIGFPE, regs, code, regs->nip); 1081 return 0; 1082 } 1083 case -EFAULT: 1084 _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip); 1085 return 0; 1086 } 1087 1088 return -1; 1089 } 1090 #else 1091 static inline int emulate_math(struct pt_regs *regs) { return -1; } 1092 #endif 1093 1094 void __kprobes program_check_exception(struct pt_regs *regs) 1095 { 1096 enum ctx_state prev_state = exception_enter(); 1097 unsigned int reason = get_reason(regs); 1098 1099 /* We can now get here via a FP Unavailable exception if the core 1100 * has no FPU, in that case the reason flags will be 0 */ 1101 1102 if (reason & REASON_FP) { 1103 /* IEEE FP exception */ 1104 parse_fpe(regs); 1105 goto bail; 1106 } 1107 if (reason & REASON_TRAP) { 1108 /* Debugger is first in line to stop recursive faults in 1109 * rcu_lock, notify_die, or atomic_notifier_call_chain */ 1110 if (debugger_bpt(regs)) 1111 goto bail; 1112 1113 /* trap exception */ 1114 if (notify_die(DIE_BPT, "breakpoint", regs, 5, 5, SIGTRAP) 1115 == NOTIFY_STOP) 1116 goto bail; 1117 1118 if (!(regs->msr & MSR_PR) && /* not user-mode */ 1119 report_bug(regs->nip, regs) == BUG_TRAP_TYPE_WARN) { 1120 regs->nip += 4; 1121 goto bail; 1122 } 1123 _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip); 1124 goto bail; 1125 } 1126 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1127 if (reason & REASON_TM) { 1128 /* This is a TM "Bad Thing Exception" program check. 1129 * This occurs when: 1130 * - An rfid/hrfid/mtmsrd attempts to cause an illegal 1131 * transition in TM states. 1132 * - A trechkpt is attempted when transactional. 1133 * - A treclaim is attempted when non transactional. 1134 * - A tend is illegally attempted. 1135 * - writing a TM SPR when transactional. 1136 */ 1137 if (!user_mode(regs) && 1138 report_bug(regs->nip, regs) == BUG_TRAP_TYPE_WARN) { 1139 regs->nip += 4; 1140 goto bail; 1141 } 1142 /* If usermode caused this, it's done something illegal and 1143 * gets a SIGILL slap on the wrist. We call it an illegal 1144 * operand to distinguish from the instruction just being bad 1145 * (e.g. executing a 'tend' on a CPU without TM!); it's an 1146 * illegal /placement/ of a valid instruction. 1147 */ 1148 if (user_mode(regs)) { 1149 _exception(SIGILL, regs, ILL_ILLOPN, regs->nip); 1150 goto bail; 1151 } else { 1152 printk(KERN_EMERG "Unexpected TM Bad Thing exception " 1153 "at %lx (msr 0x%x)\n", regs->nip, reason); 1154 die("Unrecoverable exception", regs, SIGABRT); 1155 } 1156 } 1157 #endif 1158 1159 /* 1160 * If we took the program check in the kernel skip down to sending a 1161 * SIGILL. The subsequent cases all relate to emulating instructions 1162 * which we should only do for userspace. We also do not want to enable 1163 * interrupts for kernel faults because that might lead to further 1164 * faults, and loose the context of the original exception. 1165 */ 1166 if (!user_mode(regs)) 1167 goto sigill; 1168 1169 /* We restore the interrupt state now */ 1170 if (!arch_irq_disabled_regs(regs)) 1171 local_irq_enable(); 1172 1173 /* (reason & REASON_ILLEGAL) would be the obvious thing here, 1174 * but there seems to be a hardware bug on the 405GP (RevD) 1175 * that means ESR is sometimes set incorrectly - either to 1176 * ESR_DST (!?) or 0. In the process of chasing this with the 1177 * hardware people - not sure if it can happen on any illegal 1178 * instruction or only on FP instructions, whether there is a 1179 * pattern to occurrences etc. -dgibson 31/Mar/2003 1180 */ 1181 if (!emulate_math(regs)) 1182 goto bail; 1183 1184 /* Try to emulate it if we should. */ 1185 if (reason & (REASON_ILLEGAL | REASON_PRIVILEGED)) { 1186 switch (emulate_instruction(regs)) { 1187 case 0: 1188 regs->nip += 4; 1189 emulate_single_step(regs); 1190 goto bail; 1191 case -EFAULT: 1192 _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip); 1193 goto bail; 1194 } 1195 } 1196 1197 sigill: 1198 if (reason & REASON_PRIVILEGED) 1199 _exception(SIGILL, regs, ILL_PRVOPC, regs->nip); 1200 else 1201 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 1202 1203 bail: 1204 exception_exit(prev_state); 1205 } 1206 1207 /* 1208 * This occurs when running in hypervisor mode on POWER6 or later 1209 * and an illegal instruction is encountered. 1210 */ 1211 void __kprobes emulation_assist_interrupt(struct pt_regs *regs) 1212 { 1213 regs->msr |= REASON_ILLEGAL; 1214 program_check_exception(regs); 1215 } 1216 1217 void alignment_exception(struct pt_regs *regs) 1218 { 1219 enum ctx_state prev_state = exception_enter(); 1220 int sig, code, fixed = 0; 1221 1222 /* We restore the interrupt state now */ 1223 if (!arch_irq_disabled_regs(regs)) 1224 local_irq_enable(); 1225 1226 if (tm_abort_check(regs, TM_CAUSE_ALIGNMENT | TM_CAUSE_PERSISTENT)) 1227 goto bail; 1228 1229 /* we don't implement logging of alignment exceptions */ 1230 if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS)) 1231 fixed = fix_alignment(regs); 1232 1233 if (fixed == 1) { 1234 regs->nip += 4; /* skip over emulated instruction */ 1235 emulate_single_step(regs); 1236 goto bail; 1237 } 1238 1239 /* Operand address was bad */ 1240 if (fixed == -EFAULT) { 1241 sig = SIGSEGV; 1242 code = SEGV_ACCERR; 1243 } else { 1244 sig = SIGBUS; 1245 code = BUS_ADRALN; 1246 } 1247 if (user_mode(regs)) 1248 _exception(sig, regs, code, regs->dar); 1249 else 1250 bad_page_fault(regs, regs->dar, sig); 1251 1252 bail: 1253 exception_exit(prev_state); 1254 } 1255 1256 void StackOverflow(struct pt_regs *regs) 1257 { 1258 printk(KERN_CRIT "Kernel stack overflow in process %p, r1=%lx\n", 1259 current, regs->gpr[1]); 1260 debugger(regs); 1261 show_regs(regs); 1262 panic("kernel stack overflow"); 1263 } 1264 1265 void nonrecoverable_exception(struct pt_regs *regs) 1266 { 1267 printk(KERN_ERR "Non-recoverable exception at PC=%lx MSR=%lx\n", 1268 regs->nip, regs->msr); 1269 debugger(regs); 1270 die("nonrecoverable exception", regs, SIGKILL); 1271 } 1272 1273 void trace_syscall(struct pt_regs *regs) 1274 { 1275 printk("Task: %p(%d), PC: %08lX/%08lX, Syscall: %3ld, Result: %s%ld %s\n", 1276 current, task_pid_nr(current), regs->nip, regs->link, regs->gpr[0], 1277 regs->ccr&0x10000000?"Error=":"", regs->gpr[3], print_tainted()); 1278 } 1279 1280 void kernel_fp_unavailable_exception(struct pt_regs *regs) 1281 { 1282 enum ctx_state prev_state = exception_enter(); 1283 1284 printk(KERN_EMERG "Unrecoverable FP Unavailable Exception " 1285 "%lx at %lx\n", regs->trap, regs->nip); 1286 die("Unrecoverable FP Unavailable Exception", regs, SIGABRT); 1287 1288 exception_exit(prev_state); 1289 } 1290 1291 void altivec_unavailable_exception(struct pt_regs *regs) 1292 { 1293 enum ctx_state prev_state = exception_enter(); 1294 1295 if (user_mode(regs)) { 1296 /* A user program has executed an altivec instruction, 1297 but this kernel doesn't support altivec. */ 1298 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 1299 goto bail; 1300 } 1301 1302 printk(KERN_EMERG "Unrecoverable VMX/Altivec Unavailable Exception " 1303 "%lx at %lx\n", regs->trap, regs->nip); 1304 die("Unrecoverable VMX/Altivec Unavailable Exception", regs, SIGABRT); 1305 1306 bail: 1307 exception_exit(prev_state); 1308 } 1309 1310 void vsx_unavailable_exception(struct pt_regs *regs) 1311 { 1312 if (user_mode(regs)) { 1313 /* A user program has executed an vsx instruction, 1314 but this kernel doesn't support vsx. */ 1315 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 1316 return; 1317 } 1318 1319 printk(KERN_EMERG "Unrecoverable VSX Unavailable Exception " 1320 "%lx at %lx\n", regs->trap, regs->nip); 1321 die("Unrecoverable VSX Unavailable Exception", regs, SIGABRT); 1322 } 1323 1324 #ifdef CONFIG_PPC64 1325 void facility_unavailable_exception(struct pt_regs *regs) 1326 { 1327 static char *facility_strings[] = { 1328 [FSCR_FP_LG] = "FPU", 1329 [FSCR_VECVSX_LG] = "VMX/VSX", 1330 [FSCR_DSCR_LG] = "DSCR", 1331 [FSCR_PM_LG] = "PMU SPRs", 1332 [FSCR_BHRB_LG] = "BHRB", 1333 [FSCR_TM_LG] = "TM", 1334 [FSCR_EBB_LG] = "EBB", 1335 [FSCR_TAR_LG] = "TAR", 1336 }; 1337 char *facility = "unknown"; 1338 u64 value; 1339 u8 status; 1340 bool hv; 1341 1342 hv = (regs->trap == 0xf80); 1343 if (hv) 1344 value = mfspr(SPRN_HFSCR); 1345 else 1346 value = mfspr(SPRN_FSCR); 1347 1348 status = value >> 56; 1349 if (status == FSCR_DSCR_LG) { 1350 /* User is acessing the DSCR. Set the inherit bit and allow 1351 * the user to set it directly in future by setting via the 1352 * FSCR DSCR bit. We always leave HFSCR DSCR set. 1353 */ 1354 current->thread.dscr_inherit = 1; 1355 mtspr(SPRN_FSCR, value | FSCR_DSCR); 1356 return; 1357 } 1358 1359 if ((status < ARRAY_SIZE(facility_strings)) && 1360 facility_strings[status]) 1361 facility = facility_strings[status]; 1362 1363 /* We restore the interrupt state now */ 1364 if (!arch_irq_disabled_regs(regs)) 1365 local_irq_enable(); 1366 1367 pr_err("%sFacility '%s' unavailable, exception at 0x%lx, MSR=%lx\n", 1368 hv ? "Hypervisor " : "", facility, regs->nip, regs->msr); 1369 1370 if (user_mode(regs)) { 1371 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 1372 return; 1373 } 1374 1375 die("Unexpected facility unavailable exception", regs, SIGABRT); 1376 } 1377 #endif 1378 1379 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1380 1381 void fp_unavailable_tm(struct pt_regs *regs) 1382 { 1383 /* Note: This does not handle any kind of FP laziness. */ 1384 1385 TM_DEBUG("FP Unavailable trap whilst transactional at 0x%lx, MSR=%lx\n", 1386 regs->nip, regs->msr); 1387 tm_enable(); 1388 1389 /* We can only have got here if the task started using FP after 1390 * beginning the transaction. So, the transactional regs are just a 1391 * copy of the checkpointed ones. But, we still need to recheckpoint 1392 * as we're enabling FP for the process; it will return, abort the 1393 * transaction, and probably retry but now with FP enabled. So the 1394 * checkpointed FP registers need to be loaded. 1395 */ 1396 tm_reclaim(¤t->thread, current->thread.regs->msr, 1397 TM_CAUSE_FAC_UNAV); 1398 /* Reclaim didn't save out any FPRs to transact_fprs. */ 1399 1400 /* Enable FP for the task: */ 1401 regs->msr |= (MSR_FP | current->thread.fpexc_mode); 1402 1403 /* This loads and recheckpoints the FP registers from 1404 * thread.fpr[]. They will remain in registers after the 1405 * checkpoint so we don't need to reload them after. 1406 */ 1407 tm_recheckpoint(¤t->thread, regs->msr); 1408 } 1409 1410 #ifdef CONFIG_ALTIVEC 1411 void altivec_unavailable_tm(struct pt_regs *regs) 1412 { 1413 /* See the comments in fp_unavailable_tm(). This function operates 1414 * the same way. 1415 */ 1416 1417 TM_DEBUG("Vector Unavailable trap whilst transactional at 0x%lx," 1418 "MSR=%lx\n", 1419 regs->nip, regs->msr); 1420 tm_enable(); 1421 tm_reclaim(¤t->thread, current->thread.regs->msr, 1422 TM_CAUSE_FAC_UNAV); 1423 regs->msr |= MSR_VEC; 1424 tm_recheckpoint(¤t->thread, regs->msr); 1425 current->thread.used_vr = 1; 1426 } 1427 #endif 1428 1429 #ifdef CONFIG_VSX 1430 void vsx_unavailable_tm(struct pt_regs *regs) 1431 { 1432 /* See the comments in fp_unavailable_tm(). This works similarly, 1433 * though we're loading both FP and VEC registers in here. 1434 * 1435 * If FP isn't in use, load FP regs. If VEC isn't in use, load VEC 1436 * regs. Either way, set MSR_VSX. 1437 */ 1438 1439 TM_DEBUG("VSX Unavailable trap whilst transactional at 0x%lx," 1440 "MSR=%lx\n", 1441 regs->nip, regs->msr); 1442 1443 tm_enable(); 1444 /* This reclaims FP and/or VR regs if they're already enabled */ 1445 tm_reclaim(¤t->thread, current->thread.regs->msr, 1446 TM_CAUSE_FAC_UNAV); 1447 1448 regs->msr |= MSR_VEC | MSR_FP | current->thread.fpexc_mode | 1449 MSR_VSX; 1450 /* This loads & recheckpoints FP and VRs. */ 1451 tm_recheckpoint(¤t->thread, regs->msr); 1452 current->thread.used_vsr = 1; 1453 } 1454 #endif 1455 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ 1456 1457 void performance_monitor_exception(struct pt_regs *regs) 1458 { 1459 __get_cpu_var(irq_stat).pmu_irqs++; 1460 1461 perf_irq(regs); 1462 } 1463 1464 #ifdef CONFIG_8xx 1465 void SoftwareEmulation(struct pt_regs *regs) 1466 { 1467 CHECK_FULL_REGS(regs); 1468 1469 if (!user_mode(regs)) { 1470 debugger(regs); 1471 die("Kernel Mode Unimplemented Instruction or SW FPU Emulation", 1472 regs, SIGFPE); 1473 } 1474 1475 if (!emulate_math(regs)) 1476 return; 1477 1478 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 1479 } 1480 #endif /* CONFIG_8xx */ 1481 1482 #ifdef CONFIG_PPC_ADV_DEBUG_REGS 1483 static void handle_debug(struct pt_regs *regs, unsigned long debug_status) 1484 { 1485 int changed = 0; 1486 /* 1487 * Determine the cause of the debug event, clear the 1488 * event flags and send a trap to the handler. Torez 1489 */ 1490 if (debug_status & (DBSR_DAC1R | DBSR_DAC1W)) { 1491 dbcr_dac(current) &= ~(DBCR_DAC1R | DBCR_DAC1W); 1492 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE 1493 current->thread.debug.dbcr2 &= ~DBCR2_DAC12MODE; 1494 #endif 1495 do_send_trap(regs, mfspr(SPRN_DAC1), debug_status, TRAP_HWBKPT, 1496 5); 1497 changed |= 0x01; 1498 } else if (debug_status & (DBSR_DAC2R | DBSR_DAC2W)) { 1499 dbcr_dac(current) &= ~(DBCR_DAC2R | DBCR_DAC2W); 1500 do_send_trap(regs, mfspr(SPRN_DAC2), debug_status, TRAP_HWBKPT, 1501 6); 1502 changed |= 0x01; 1503 } else if (debug_status & DBSR_IAC1) { 1504 current->thread.debug.dbcr0 &= ~DBCR0_IAC1; 1505 dbcr_iac_range(current) &= ~DBCR_IAC12MODE; 1506 do_send_trap(regs, mfspr(SPRN_IAC1), debug_status, TRAP_HWBKPT, 1507 1); 1508 changed |= 0x01; 1509 } else if (debug_status & DBSR_IAC2) { 1510 current->thread.debug.dbcr0 &= ~DBCR0_IAC2; 1511 do_send_trap(regs, mfspr(SPRN_IAC2), debug_status, TRAP_HWBKPT, 1512 2); 1513 changed |= 0x01; 1514 } else if (debug_status & DBSR_IAC3) { 1515 current->thread.debug.dbcr0 &= ~DBCR0_IAC3; 1516 dbcr_iac_range(current) &= ~DBCR_IAC34MODE; 1517 do_send_trap(regs, mfspr(SPRN_IAC3), debug_status, TRAP_HWBKPT, 1518 3); 1519 changed |= 0x01; 1520 } else if (debug_status & DBSR_IAC4) { 1521 current->thread.debug.dbcr0 &= ~DBCR0_IAC4; 1522 do_send_trap(regs, mfspr(SPRN_IAC4), debug_status, TRAP_HWBKPT, 1523 4); 1524 changed |= 0x01; 1525 } 1526 /* 1527 * At the point this routine was called, the MSR(DE) was turned off. 1528 * Check all other debug flags and see if that bit needs to be turned 1529 * back on or not. 1530 */ 1531 if (DBCR_ACTIVE_EVENTS(current->thread.debug.dbcr0, 1532 current->thread.debug.dbcr1)) 1533 regs->msr |= MSR_DE; 1534 else 1535 /* Make sure the IDM flag is off */ 1536 current->thread.debug.dbcr0 &= ~DBCR0_IDM; 1537 1538 if (changed & 0x01) 1539 mtspr(SPRN_DBCR0, current->thread.debug.dbcr0); 1540 } 1541 1542 void __kprobes DebugException(struct pt_regs *regs, unsigned long debug_status) 1543 { 1544 current->thread.debug.dbsr = debug_status; 1545 1546 /* Hack alert: On BookE, Branch Taken stops on the branch itself, while 1547 * on server, it stops on the target of the branch. In order to simulate 1548 * the server behaviour, we thus restart right away with a single step 1549 * instead of stopping here when hitting a BT 1550 */ 1551 if (debug_status & DBSR_BT) { 1552 regs->msr &= ~MSR_DE; 1553 1554 /* Disable BT */ 1555 mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_BT); 1556 /* Clear the BT event */ 1557 mtspr(SPRN_DBSR, DBSR_BT); 1558 1559 /* Do the single step trick only when coming from userspace */ 1560 if (user_mode(regs)) { 1561 current->thread.debug.dbcr0 &= ~DBCR0_BT; 1562 current->thread.debug.dbcr0 |= DBCR0_IDM | DBCR0_IC; 1563 regs->msr |= MSR_DE; 1564 return; 1565 } 1566 1567 if (notify_die(DIE_SSTEP, "block_step", regs, 5, 1568 5, SIGTRAP) == NOTIFY_STOP) { 1569 return; 1570 } 1571 if (debugger_sstep(regs)) 1572 return; 1573 } else if (debug_status & DBSR_IC) { /* Instruction complete */ 1574 regs->msr &= ~MSR_DE; 1575 1576 /* Disable instruction completion */ 1577 mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_IC); 1578 /* Clear the instruction completion event */ 1579 mtspr(SPRN_DBSR, DBSR_IC); 1580 1581 if (notify_die(DIE_SSTEP, "single_step", regs, 5, 1582 5, SIGTRAP) == NOTIFY_STOP) { 1583 return; 1584 } 1585 1586 if (debugger_sstep(regs)) 1587 return; 1588 1589 if (user_mode(regs)) { 1590 current->thread.debug.dbcr0 &= ~DBCR0_IC; 1591 if (DBCR_ACTIVE_EVENTS(current->thread.debug.dbcr0, 1592 current->thread.debug.dbcr1)) 1593 regs->msr |= MSR_DE; 1594 else 1595 /* Make sure the IDM bit is off */ 1596 current->thread.debug.dbcr0 &= ~DBCR0_IDM; 1597 } 1598 1599 _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip); 1600 } else 1601 handle_debug(regs, debug_status); 1602 } 1603 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */ 1604 1605 #if !defined(CONFIG_TAU_INT) 1606 void TAUException(struct pt_regs *regs) 1607 { 1608 printk("TAU trap at PC: %lx, MSR: %lx, vector=%lx %s\n", 1609 regs->nip, regs->msr, regs->trap, print_tainted()); 1610 } 1611 #endif /* CONFIG_INT_TAU */ 1612 1613 #ifdef CONFIG_ALTIVEC 1614 void altivec_assist_exception(struct pt_regs *regs) 1615 { 1616 int err; 1617 1618 if (!user_mode(regs)) { 1619 printk(KERN_EMERG "VMX/Altivec assist exception in kernel mode" 1620 " at %lx\n", regs->nip); 1621 die("Kernel VMX/Altivec assist exception", regs, SIGILL); 1622 } 1623 1624 flush_altivec_to_thread(current); 1625 1626 PPC_WARN_EMULATED(altivec, regs); 1627 err = emulate_altivec(regs); 1628 if (err == 0) { 1629 regs->nip += 4; /* skip emulated instruction */ 1630 emulate_single_step(regs); 1631 return; 1632 } 1633 1634 if (err == -EFAULT) { 1635 /* got an error reading the instruction */ 1636 _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip); 1637 } else { 1638 /* didn't recognize the instruction */ 1639 /* XXX quick hack for now: set the non-Java bit in the VSCR */ 1640 printk_ratelimited(KERN_ERR "Unrecognized altivec instruction " 1641 "in %s at %lx\n", current->comm, regs->nip); 1642 current->thread.vr_state.vscr.u[3] |= 0x10000; 1643 } 1644 } 1645 #endif /* CONFIG_ALTIVEC */ 1646 1647 #ifdef CONFIG_VSX 1648 void vsx_assist_exception(struct pt_regs *regs) 1649 { 1650 if (!user_mode(regs)) { 1651 printk(KERN_EMERG "VSX assist exception in kernel mode" 1652 " at %lx\n", regs->nip); 1653 die("Kernel VSX assist exception", regs, SIGILL); 1654 } 1655 1656 flush_vsx_to_thread(current); 1657 printk(KERN_INFO "VSX assist not supported at %lx\n", regs->nip); 1658 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 1659 } 1660 #endif /* CONFIG_VSX */ 1661 1662 #ifdef CONFIG_FSL_BOOKE 1663 void CacheLockingException(struct pt_regs *regs, unsigned long address, 1664 unsigned long error_code) 1665 { 1666 /* We treat cache locking instructions from the user 1667 * as priv ops, in the future we could try to do 1668 * something smarter 1669 */ 1670 if (error_code & (ESR_DLK|ESR_ILK)) 1671 _exception(SIGILL, regs, ILL_PRVOPC, regs->nip); 1672 return; 1673 } 1674 #endif /* CONFIG_FSL_BOOKE */ 1675 1676 #ifdef CONFIG_SPE 1677 void SPEFloatingPointException(struct pt_regs *regs) 1678 { 1679 extern int do_spe_mathemu(struct pt_regs *regs); 1680 unsigned long spefscr; 1681 int fpexc_mode; 1682 int code = 0; 1683 int err; 1684 1685 flush_spe_to_thread(current); 1686 1687 spefscr = current->thread.spefscr; 1688 fpexc_mode = current->thread.fpexc_mode; 1689 1690 if ((spefscr & SPEFSCR_FOVF) && (fpexc_mode & PR_FP_EXC_OVF)) { 1691 code = FPE_FLTOVF; 1692 } 1693 else if ((spefscr & SPEFSCR_FUNF) && (fpexc_mode & PR_FP_EXC_UND)) { 1694 code = FPE_FLTUND; 1695 } 1696 else if ((spefscr & SPEFSCR_FDBZ) && (fpexc_mode & PR_FP_EXC_DIV)) 1697 code = FPE_FLTDIV; 1698 else if ((spefscr & SPEFSCR_FINV) && (fpexc_mode & PR_FP_EXC_INV)) { 1699 code = FPE_FLTINV; 1700 } 1701 else if ((spefscr & (SPEFSCR_FG | SPEFSCR_FX)) && (fpexc_mode & PR_FP_EXC_RES)) 1702 code = FPE_FLTRES; 1703 1704 err = do_spe_mathemu(regs); 1705 if (err == 0) { 1706 regs->nip += 4; /* skip emulated instruction */ 1707 emulate_single_step(regs); 1708 return; 1709 } 1710 1711 if (err == -EFAULT) { 1712 /* got an error reading the instruction */ 1713 _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip); 1714 } else if (err == -EINVAL) { 1715 /* didn't recognize the instruction */ 1716 printk(KERN_ERR "unrecognized spe instruction " 1717 "in %s at %lx\n", current->comm, regs->nip); 1718 } else { 1719 _exception(SIGFPE, regs, code, regs->nip); 1720 } 1721 1722 return; 1723 } 1724 1725 void SPEFloatingPointRoundException(struct pt_regs *regs) 1726 { 1727 extern int speround_handler(struct pt_regs *regs); 1728 int err; 1729 1730 preempt_disable(); 1731 if (regs->msr & MSR_SPE) 1732 giveup_spe(current); 1733 preempt_enable(); 1734 1735 regs->nip -= 4; 1736 err = speround_handler(regs); 1737 if (err == 0) { 1738 regs->nip += 4; /* skip emulated instruction */ 1739 emulate_single_step(regs); 1740 return; 1741 } 1742 1743 if (err == -EFAULT) { 1744 /* got an error reading the instruction */ 1745 _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip); 1746 } else if (err == -EINVAL) { 1747 /* didn't recognize the instruction */ 1748 printk(KERN_ERR "unrecognized spe instruction " 1749 "in %s at %lx\n", current->comm, regs->nip); 1750 } else { 1751 _exception(SIGFPE, regs, 0, regs->nip); 1752 return; 1753 } 1754 } 1755 #endif 1756 1757 /* 1758 * We enter here if we get an unrecoverable exception, that is, one 1759 * that happened at a point where the RI (recoverable interrupt) bit 1760 * in the MSR is 0. This indicates that SRR0/1 are live, and that 1761 * we therefore lost state by taking this exception. 1762 */ 1763 void unrecoverable_exception(struct pt_regs *regs) 1764 { 1765 printk(KERN_EMERG "Unrecoverable exception %lx at %lx\n", 1766 regs->trap, regs->nip); 1767 die("Unrecoverable exception", regs, SIGABRT); 1768 } 1769 1770 #if defined(CONFIG_BOOKE_WDT) || defined(CONFIG_40x) 1771 /* 1772 * Default handler for a Watchdog exception, 1773 * spins until a reboot occurs 1774 */ 1775 void __attribute__ ((weak)) WatchdogHandler(struct pt_regs *regs) 1776 { 1777 /* Generic WatchdogHandler, implement your own */ 1778 mtspr(SPRN_TCR, mfspr(SPRN_TCR)&(~TCR_WIE)); 1779 return; 1780 } 1781 1782 void WatchdogException(struct pt_regs *regs) 1783 { 1784 printk (KERN_EMERG "PowerPC Book-E Watchdog Exception\n"); 1785 WatchdogHandler(regs); 1786 } 1787 #endif 1788 1789 /* 1790 * We enter here if we discover during exception entry that we are 1791 * running in supervisor mode with a userspace value in the stack pointer. 1792 */ 1793 void kernel_bad_stack(struct pt_regs *regs) 1794 { 1795 printk(KERN_EMERG "Bad kernel stack pointer %lx at %lx\n", 1796 regs->gpr[1], regs->nip); 1797 die("Bad kernel stack pointer", regs, SIGABRT); 1798 } 1799 1800 void __init trap_init(void) 1801 { 1802 } 1803 1804 1805 #ifdef CONFIG_PPC_EMULATED_STATS 1806 1807 #define WARN_EMULATED_SETUP(type) .type = { .name = #type } 1808 1809 struct ppc_emulated ppc_emulated = { 1810 #ifdef CONFIG_ALTIVEC 1811 WARN_EMULATED_SETUP(altivec), 1812 #endif 1813 WARN_EMULATED_SETUP(dcba), 1814 WARN_EMULATED_SETUP(dcbz), 1815 WARN_EMULATED_SETUP(fp_pair), 1816 WARN_EMULATED_SETUP(isel), 1817 WARN_EMULATED_SETUP(mcrxr), 1818 WARN_EMULATED_SETUP(mfpvr), 1819 WARN_EMULATED_SETUP(multiple), 1820 WARN_EMULATED_SETUP(popcntb), 1821 WARN_EMULATED_SETUP(spe), 1822 WARN_EMULATED_SETUP(string), 1823 WARN_EMULATED_SETUP(sync), 1824 WARN_EMULATED_SETUP(unaligned), 1825 #ifdef CONFIG_MATH_EMULATION 1826 WARN_EMULATED_SETUP(math), 1827 #endif 1828 #ifdef CONFIG_VSX 1829 WARN_EMULATED_SETUP(vsx), 1830 #endif 1831 #ifdef CONFIG_PPC64 1832 WARN_EMULATED_SETUP(mfdscr), 1833 WARN_EMULATED_SETUP(mtdscr), 1834 #endif 1835 }; 1836 1837 u32 ppc_warn_emulated; 1838 1839 void ppc_warn_emulated_print(const char *type) 1840 { 1841 pr_warn_ratelimited("%s used emulated %s instruction\n", current->comm, 1842 type); 1843 } 1844 1845 static int __init ppc_warn_emulated_init(void) 1846 { 1847 struct dentry *dir, *d; 1848 unsigned int i; 1849 struct ppc_emulated_entry *entries = (void *)&ppc_emulated; 1850 1851 if (!powerpc_debugfs_root) 1852 return -ENODEV; 1853 1854 dir = debugfs_create_dir("emulated_instructions", 1855 powerpc_debugfs_root); 1856 if (!dir) 1857 return -ENOMEM; 1858 1859 d = debugfs_create_u32("do_warn", S_IRUGO | S_IWUSR, dir, 1860 &ppc_warn_emulated); 1861 if (!d) 1862 goto fail; 1863 1864 for (i = 0; i < sizeof(ppc_emulated)/sizeof(*entries); i++) { 1865 d = debugfs_create_u32(entries[i].name, S_IRUGO | S_IWUSR, dir, 1866 (u32 *)&entries[i].val.counter); 1867 if (!d) 1868 goto fail; 1869 } 1870 1871 return 0; 1872 1873 fail: 1874 debugfs_remove_recursive(dir); 1875 return -ENOMEM; 1876 } 1877 1878 device_initcall(ppc_warn_emulated_init); 1879 1880 #endif /* CONFIG_PPC_EMULATED_STATS */ 1881