1 /* 2 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 3 * Copyright 2007-2010 Freescale Semiconductor, Inc. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License 7 * as published by the Free Software Foundation; either version 8 * 2 of the License, or (at your option) any later version. 9 * 10 * Modified by Cort Dougan (cort@cs.nmt.edu) 11 * and Paul Mackerras (paulus@samba.org) 12 */ 13 14 /* 15 * This file handles the architecture-dependent parts of hardware exceptions 16 */ 17 18 #include <linux/errno.h> 19 #include <linux/sched.h> 20 #include <linux/kernel.h> 21 #include <linux/mm.h> 22 #include <linux/stddef.h> 23 #include <linux/unistd.h> 24 #include <linux/ptrace.h> 25 #include <linux/user.h> 26 #include <linux/interrupt.h> 27 #include <linux/init.h> 28 #include <linux/module.h> 29 #include <linux/prctl.h> 30 #include <linux/delay.h> 31 #include <linux/kprobes.h> 32 #include <linux/kexec.h> 33 #include <linux/backlight.h> 34 #include <linux/bug.h> 35 #include <linux/kdebug.h> 36 #include <linux/debugfs.h> 37 #include <linux/ratelimit.h> 38 #include <linux/context_tracking.h> 39 40 #include <asm/emulated_ops.h> 41 #include <asm/pgtable.h> 42 #include <asm/uaccess.h> 43 #include <asm/io.h> 44 #include <asm/machdep.h> 45 #include <asm/rtas.h> 46 #include <asm/pmc.h> 47 #include <asm/reg.h> 48 #ifdef CONFIG_PMAC_BACKLIGHT 49 #include <asm/backlight.h> 50 #endif 51 #ifdef CONFIG_PPC64 52 #include <asm/firmware.h> 53 #include <asm/processor.h> 54 #include <asm/tm.h> 55 #endif 56 #include <asm/kexec.h> 57 #include <asm/ppc-opcode.h> 58 #include <asm/rio.h> 59 #include <asm/fadump.h> 60 #include <asm/switch_to.h> 61 #include <asm/tm.h> 62 #include <asm/debug.h> 63 #include <sysdev/fsl_pci.h> 64 65 #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC) 66 int (*__debugger)(struct pt_regs *regs) __read_mostly; 67 int (*__debugger_ipi)(struct pt_regs *regs) __read_mostly; 68 int (*__debugger_bpt)(struct pt_regs *regs) __read_mostly; 69 int (*__debugger_sstep)(struct pt_regs *regs) __read_mostly; 70 int (*__debugger_iabr_match)(struct pt_regs *regs) __read_mostly; 71 int (*__debugger_break_match)(struct pt_regs *regs) __read_mostly; 72 int (*__debugger_fault_handler)(struct pt_regs *regs) __read_mostly; 73 74 EXPORT_SYMBOL(__debugger); 75 EXPORT_SYMBOL(__debugger_ipi); 76 EXPORT_SYMBOL(__debugger_bpt); 77 EXPORT_SYMBOL(__debugger_sstep); 78 EXPORT_SYMBOL(__debugger_iabr_match); 79 EXPORT_SYMBOL(__debugger_break_match); 80 EXPORT_SYMBOL(__debugger_fault_handler); 81 #endif 82 83 /* Transactional Memory trap debug */ 84 #ifdef TM_DEBUG_SW 85 #define TM_DEBUG(x...) printk(KERN_INFO x) 86 #else 87 #define TM_DEBUG(x...) do { } while(0) 88 #endif 89 90 /* 91 * Trap & Exception support 92 */ 93 94 #ifdef CONFIG_PMAC_BACKLIGHT 95 static void pmac_backlight_unblank(void) 96 { 97 mutex_lock(&pmac_backlight_mutex); 98 if (pmac_backlight) { 99 struct backlight_properties *props; 100 101 props = &pmac_backlight->props; 102 props->brightness = props->max_brightness; 103 props->power = FB_BLANK_UNBLANK; 104 backlight_update_status(pmac_backlight); 105 } 106 mutex_unlock(&pmac_backlight_mutex); 107 } 108 #else 109 static inline void pmac_backlight_unblank(void) { } 110 #endif 111 112 static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED; 113 static int die_owner = -1; 114 static unsigned int die_nest_count; 115 static int die_counter; 116 117 static unsigned __kprobes long oops_begin(struct pt_regs *regs) 118 { 119 int cpu; 120 unsigned long flags; 121 122 if (debugger(regs)) 123 return 1; 124 125 oops_enter(); 126 127 /* racy, but better than risking deadlock. */ 128 raw_local_irq_save(flags); 129 cpu = smp_processor_id(); 130 if (!arch_spin_trylock(&die_lock)) { 131 if (cpu == die_owner) 132 /* nested oops. should stop eventually */; 133 else 134 arch_spin_lock(&die_lock); 135 } 136 die_nest_count++; 137 die_owner = cpu; 138 console_verbose(); 139 bust_spinlocks(1); 140 if (machine_is(powermac)) 141 pmac_backlight_unblank(); 142 return flags; 143 } 144 145 static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, 146 int signr) 147 { 148 bust_spinlocks(0); 149 die_owner = -1; 150 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); 151 die_nest_count--; 152 oops_exit(); 153 printk("\n"); 154 if (!die_nest_count) 155 /* Nest count reaches zero, release the lock. */ 156 arch_spin_unlock(&die_lock); 157 raw_local_irq_restore(flags); 158 159 crash_fadump(regs, "die oops"); 160 161 /* 162 * A system reset (0x100) is a request to dump, so we always send 163 * it through the crashdump code. 164 */ 165 if (kexec_should_crash(current) || (TRAP(regs) == 0x100)) { 166 crash_kexec(regs); 167 168 /* 169 * We aren't the primary crash CPU. We need to send it 170 * to a holding pattern to avoid it ending up in the panic 171 * code. 172 */ 173 crash_kexec_secondary(regs); 174 } 175 176 if (!signr) 177 return; 178 179 /* 180 * While our oops output is serialised by a spinlock, output 181 * from panic() called below can race and corrupt it. If we 182 * know we are going to panic, delay for 1 second so we have a 183 * chance to get clean backtraces from all CPUs that are oopsing. 184 */ 185 if (in_interrupt() || panic_on_oops || !current->pid || 186 is_global_init(current)) { 187 mdelay(MSEC_PER_SEC); 188 } 189 190 if (in_interrupt()) 191 panic("Fatal exception in interrupt"); 192 if (panic_on_oops) 193 panic("Fatal exception"); 194 do_exit(signr); 195 } 196 197 static int __kprobes __die(const char *str, struct pt_regs *regs, long err) 198 { 199 printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter); 200 #ifdef CONFIG_PREEMPT 201 printk("PREEMPT "); 202 #endif 203 #ifdef CONFIG_SMP 204 printk("SMP NR_CPUS=%d ", NR_CPUS); 205 #endif 206 #ifdef CONFIG_DEBUG_PAGEALLOC 207 printk("DEBUG_PAGEALLOC "); 208 #endif 209 #ifdef CONFIG_NUMA 210 printk("NUMA "); 211 #endif 212 printk("%s\n", ppc_md.name ? ppc_md.name : ""); 213 214 if (notify_die(DIE_OOPS, str, regs, err, 255, SIGSEGV) == NOTIFY_STOP) 215 return 1; 216 217 print_modules(); 218 show_regs(regs); 219 220 return 0; 221 } 222 223 void die(const char *str, struct pt_regs *regs, long err) 224 { 225 unsigned long flags = oops_begin(regs); 226 227 if (__die(str, regs, err)) 228 err = 0; 229 oops_end(flags, regs, err); 230 } 231 232 void user_single_step_siginfo(struct task_struct *tsk, 233 struct pt_regs *regs, siginfo_t *info) 234 { 235 memset(info, 0, sizeof(*info)); 236 info->si_signo = SIGTRAP; 237 info->si_code = TRAP_TRACE; 238 info->si_addr = (void __user *)regs->nip; 239 } 240 241 void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr) 242 { 243 siginfo_t info; 244 const char fmt32[] = KERN_INFO "%s[%d]: unhandled signal %d " \ 245 "at %08lx nip %08lx lr %08lx code %x\n"; 246 const char fmt64[] = KERN_INFO "%s[%d]: unhandled signal %d " \ 247 "at %016lx nip %016lx lr %016lx code %x\n"; 248 249 if (!user_mode(regs)) { 250 die("Exception in kernel mode", regs, signr); 251 return; 252 } 253 254 if (show_unhandled_signals && unhandled_signal(current, signr)) { 255 printk_ratelimited(regs->msr & MSR_64BIT ? fmt64 : fmt32, 256 current->comm, current->pid, signr, 257 addr, regs->nip, regs->link, code); 258 } 259 260 if (arch_irqs_disabled() && !arch_irq_disabled_regs(regs)) 261 local_irq_enable(); 262 263 current->thread.trap_nr = code; 264 memset(&info, 0, sizeof(info)); 265 info.si_signo = signr; 266 info.si_code = code; 267 info.si_addr = (void __user *) addr; 268 force_sig_info(signr, &info, current); 269 } 270 271 #ifdef CONFIG_PPC64 272 void system_reset_exception(struct pt_regs *regs) 273 { 274 /* See if any machine dependent calls */ 275 if (ppc_md.system_reset_exception) { 276 if (ppc_md.system_reset_exception(regs)) 277 return; 278 } 279 280 die("System Reset", regs, SIGABRT); 281 282 /* Must die if the interrupt is not recoverable */ 283 if (!(regs->msr & MSR_RI)) 284 panic("Unrecoverable System Reset"); 285 286 /* What should we do here? We could issue a shutdown or hard reset. */ 287 } 288 289 /* 290 * This function is called in real mode. Strictly no printk's please. 291 * 292 * regs->nip and regs->msr contains srr0 and ssr1. 293 */ 294 long machine_check_early(struct pt_regs *regs) 295 { 296 long handled = 0; 297 298 __get_cpu_var(irq_stat).mce_exceptions++; 299 300 if (cur_cpu_spec && cur_cpu_spec->machine_check_early) 301 handled = cur_cpu_spec->machine_check_early(regs); 302 return handled; 303 } 304 305 #endif 306 307 /* 308 * I/O accesses can cause machine checks on powermacs. 309 * Check if the NIP corresponds to the address of a sync 310 * instruction for which there is an entry in the exception 311 * table. 312 * Note that the 601 only takes a machine check on TEA 313 * (transfer error ack) signal assertion, and does not 314 * set any of the top 16 bits of SRR1. 315 * -- paulus. 316 */ 317 static inline int check_io_access(struct pt_regs *regs) 318 { 319 #ifdef CONFIG_PPC32 320 unsigned long msr = regs->msr; 321 const struct exception_table_entry *entry; 322 unsigned int *nip = (unsigned int *)regs->nip; 323 324 if (((msr & 0xffff0000) == 0 || (msr & (0x80000 | 0x40000))) 325 && (entry = search_exception_tables(regs->nip)) != NULL) { 326 /* 327 * Check that it's a sync instruction, or somewhere 328 * in the twi; isync; nop sequence that inb/inw/inl uses. 329 * As the address is in the exception table 330 * we should be able to read the instr there. 331 * For the debug message, we look at the preceding 332 * load or store. 333 */ 334 if (*nip == 0x60000000) /* nop */ 335 nip -= 2; 336 else if (*nip == 0x4c00012c) /* isync */ 337 --nip; 338 if (*nip == 0x7c0004ac || (*nip >> 26) == 3) { 339 /* sync or twi */ 340 unsigned int rb; 341 342 --nip; 343 rb = (*nip >> 11) & 0x1f; 344 printk(KERN_DEBUG "%s bad port %lx at %p\n", 345 (*nip & 0x100)? "OUT to": "IN from", 346 regs->gpr[rb] - _IO_BASE, nip); 347 regs->msr |= MSR_RI; 348 regs->nip = entry->fixup; 349 return 1; 350 } 351 } 352 #endif /* CONFIG_PPC32 */ 353 return 0; 354 } 355 356 #ifdef CONFIG_PPC_ADV_DEBUG_REGS 357 /* On 4xx, the reason for the machine check or program exception 358 is in the ESR. */ 359 #define get_reason(regs) ((regs)->dsisr) 360 #ifndef CONFIG_FSL_BOOKE 361 #define get_mc_reason(regs) ((regs)->dsisr) 362 #else 363 #define get_mc_reason(regs) (mfspr(SPRN_MCSR)) 364 #endif 365 #define REASON_FP ESR_FP 366 #define REASON_ILLEGAL (ESR_PIL | ESR_PUO) 367 #define REASON_PRIVILEGED ESR_PPR 368 #define REASON_TRAP ESR_PTR 369 370 /* single-step stuff */ 371 #define single_stepping(regs) (current->thread.debug.dbcr0 & DBCR0_IC) 372 #define clear_single_step(regs) (current->thread.debug.dbcr0 &= ~DBCR0_IC) 373 374 #else 375 /* On non-4xx, the reason for the machine check or program 376 exception is in the MSR. */ 377 #define get_reason(regs) ((regs)->msr) 378 #define get_mc_reason(regs) ((regs)->msr) 379 #define REASON_TM 0x200000 380 #define REASON_FP 0x100000 381 #define REASON_ILLEGAL 0x80000 382 #define REASON_PRIVILEGED 0x40000 383 #define REASON_TRAP 0x20000 384 385 #define single_stepping(regs) ((regs)->msr & MSR_SE) 386 #define clear_single_step(regs) ((regs)->msr &= ~MSR_SE) 387 #endif 388 389 #if defined(CONFIG_4xx) 390 int machine_check_4xx(struct pt_regs *regs) 391 { 392 unsigned long reason = get_mc_reason(regs); 393 394 if (reason & ESR_IMCP) { 395 printk("Instruction"); 396 mtspr(SPRN_ESR, reason & ~ESR_IMCP); 397 } else 398 printk("Data"); 399 printk(" machine check in kernel mode.\n"); 400 401 return 0; 402 } 403 404 int machine_check_440A(struct pt_regs *regs) 405 { 406 unsigned long reason = get_mc_reason(regs); 407 408 printk("Machine check in kernel mode.\n"); 409 if (reason & ESR_IMCP){ 410 printk("Instruction Synchronous Machine Check exception\n"); 411 mtspr(SPRN_ESR, reason & ~ESR_IMCP); 412 } 413 else { 414 u32 mcsr = mfspr(SPRN_MCSR); 415 if (mcsr & MCSR_IB) 416 printk("Instruction Read PLB Error\n"); 417 if (mcsr & MCSR_DRB) 418 printk("Data Read PLB Error\n"); 419 if (mcsr & MCSR_DWB) 420 printk("Data Write PLB Error\n"); 421 if (mcsr & MCSR_TLBP) 422 printk("TLB Parity Error\n"); 423 if (mcsr & MCSR_ICP){ 424 flush_instruction_cache(); 425 printk("I-Cache Parity Error\n"); 426 } 427 if (mcsr & MCSR_DCSP) 428 printk("D-Cache Search Parity Error\n"); 429 if (mcsr & MCSR_DCFP) 430 printk("D-Cache Flush Parity Error\n"); 431 if (mcsr & MCSR_IMPE) 432 printk("Machine Check exception is imprecise\n"); 433 434 /* Clear MCSR */ 435 mtspr(SPRN_MCSR, mcsr); 436 } 437 return 0; 438 } 439 440 int machine_check_47x(struct pt_regs *regs) 441 { 442 unsigned long reason = get_mc_reason(regs); 443 u32 mcsr; 444 445 printk(KERN_ERR "Machine check in kernel mode.\n"); 446 if (reason & ESR_IMCP) { 447 printk(KERN_ERR 448 "Instruction Synchronous Machine Check exception\n"); 449 mtspr(SPRN_ESR, reason & ~ESR_IMCP); 450 return 0; 451 } 452 mcsr = mfspr(SPRN_MCSR); 453 if (mcsr & MCSR_IB) 454 printk(KERN_ERR "Instruction Read PLB Error\n"); 455 if (mcsr & MCSR_DRB) 456 printk(KERN_ERR "Data Read PLB Error\n"); 457 if (mcsr & MCSR_DWB) 458 printk(KERN_ERR "Data Write PLB Error\n"); 459 if (mcsr & MCSR_TLBP) 460 printk(KERN_ERR "TLB Parity Error\n"); 461 if (mcsr & MCSR_ICP) { 462 flush_instruction_cache(); 463 printk(KERN_ERR "I-Cache Parity Error\n"); 464 } 465 if (mcsr & MCSR_DCSP) 466 printk(KERN_ERR "D-Cache Search Parity Error\n"); 467 if (mcsr & PPC47x_MCSR_GPR) 468 printk(KERN_ERR "GPR Parity Error\n"); 469 if (mcsr & PPC47x_MCSR_FPR) 470 printk(KERN_ERR "FPR Parity Error\n"); 471 if (mcsr & PPC47x_MCSR_IPR) 472 printk(KERN_ERR "Machine Check exception is imprecise\n"); 473 474 /* Clear MCSR */ 475 mtspr(SPRN_MCSR, mcsr); 476 477 return 0; 478 } 479 #elif defined(CONFIG_E500) 480 int machine_check_e500mc(struct pt_regs *regs) 481 { 482 unsigned long mcsr = mfspr(SPRN_MCSR); 483 unsigned long reason = mcsr; 484 int recoverable = 1; 485 486 if (reason & MCSR_LD) { 487 recoverable = fsl_rio_mcheck_exception(regs); 488 if (recoverable == 1) 489 goto silent_out; 490 } 491 492 printk("Machine check in kernel mode.\n"); 493 printk("Caused by (from MCSR=%lx): ", reason); 494 495 if (reason & MCSR_MCP) 496 printk("Machine Check Signal\n"); 497 498 if (reason & MCSR_ICPERR) { 499 printk("Instruction Cache Parity Error\n"); 500 501 /* 502 * This is recoverable by invalidating the i-cache. 503 */ 504 mtspr(SPRN_L1CSR1, mfspr(SPRN_L1CSR1) | L1CSR1_ICFI); 505 while (mfspr(SPRN_L1CSR1) & L1CSR1_ICFI) 506 ; 507 508 /* 509 * This will generally be accompanied by an instruction 510 * fetch error report -- only treat MCSR_IF as fatal 511 * if it wasn't due to an L1 parity error. 512 */ 513 reason &= ~MCSR_IF; 514 } 515 516 if (reason & MCSR_DCPERR_MC) { 517 printk("Data Cache Parity Error\n"); 518 519 /* 520 * In write shadow mode we auto-recover from the error, but it 521 * may still get logged and cause a machine check. We should 522 * only treat the non-write shadow case as non-recoverable. 523 */ 524 if (!(mfspr(SPRN_L1CSR2) & L1CSR2_DCWS)) 525 recoverable = 0; 526 } 527 528 if (reason & MCSR_L2MMU_MHIT) { 529 printk("Hit on multiple TLB entries\n"); 530 recoverable = 0; 531 } 532 533 if (reason & MCSR_NMI) 534 printk("Non-maskable interrupt\n"); 535 536 if (reason & MCSR_IF) { 537 printk("Instruction Fetch Error Report\n"); 538 recoverable = 0; 539 } 540 541 if (reason & MCSR_LD) { 542 printk("Load Error Report\n"); 543 recoverable = 0; 544 } 545 546 if (reason & MCSR_ST) { 547 printk("Store Error Report\n"); 548 recoverable = 0; 549 } 550 551 if (reason & MCSR_LDG) { 552 printk("Guarded Load Error Report\n"); 553 recoverable = 0; 554 } 555 556 if (reason & MCSR_TLBSYNC) 557 printk("Simultaneous tlbsync operations\n"); 558 559 if (reason & MCSR_BSL2_ERR) { 560 printk("Level 2 Cache Error\n"); 561 recoverable = 0; 562 } 563 564 if (reason & MCSR_MAV) { 565 u64 addr; 566 567 addr = mfspr(SPRN_MCAR); 568 addr |= (u64)mfspr(SPRN_MCARU) << 32; 569 570 printk("Machine Check %s Address: %#llx\n", 571 reason & MCSR_MEA ? "Effective" : "Physical", addr); 572 } 573 574 silent_out: 575 mtspr(SPRN_MCSR, mcsr); 576 return mfspr(SPRN_MCSR) == 0 && recoverable; 577 } 578 579 int machine_check_e500(struct pt_regs *regs) 580 { 581 unsigned long reason = get_mc_reason(regs); 582 583 if (reason & MCSR_BUS_RBERR) { 584 if (fsl_rio_mcheck_exception(regs)) 585 return 1; 586 if (fsl_pci_mcheck_exception(regs)) 587 return 1; 588 } 589 590 printk("Machine check in kernel mode.\n"); 591 printk("Caused by (from MCSR=%lx): ", reason); 592 593 if (reason & MCSR_MCP) 594 printk("Machine Check Signal\n"); 595 if (reason & MCSR_ICPERR) 596 printk("Instruction Cache Parity Error\n"); 597 if (reason & MCSR_DCP_PERR) 598 printk("Data Cache Push Parity Error\n"); 599 if (reason & MCSR_DCPERR) 600 printk("Data Cache Parity Error\n"); 601 if (reason & MCSR_BUS_IAERR) 602 printk("Bus - Instruction Address Error\n"); 603 if (reason & MCSR_BUS_RAERR) 604 printk("Bus - Read Address Error\n"); 605 if (reason & MCSR_BUS_WAERR) 606 printk("Bus - Write Address Error\n"); 607 if (reason & MCSR_BUS_IBERR) 608 printk("Bus - Instruction Data Error\n"); 609 if (reason & MCSR_BUS_RBERR) 610 printk("Bus - Read Data Bus Error\n"); 611 if (reason & MCSR_BUS_WBERR) 612 printk("Bus - Read Data Bus Error\n"); 613 if (reason & MCSR_BUS_IPERR) 614 printk("Bus - Instruction Parity Error\n"); 615 if (reason & MCSR_BUS_RPERR) 616 printk("Bus - Read Parity Error\n"); 617 618 return 0; 619 } 620 621 int machine_check_generic(struct pt_regs *regs) 622 { 623 return 0; 624 } 625 #elif defined(CONFIG_E200) 626 int machine_check_e200(struct pt_regs *regs) 627 { 628 unsigned long reason = get_mc_reason(regs); 629 630 printk("Machine check in kernel mode.\n"); 631 printk("Caused by (from MCSR=%lx): ", reason); 632 633 if (reason & MCSR_MCP) 634 printk("Machine Check Signal\n"); 635 if (reason & MCSR_CP_PERR) 636 printk("Cache Push Parity Error\n"); 637 if (reason & MCSR_CPERR) 638 printk("Cache Parity Error\n"); 639 if (reason & MCSR_EXCP_ERR) 640 printk("ISI, ITLB, or Bus Error on first instruction fetch for an exception handler\n"); 641 if (reason & MCSR_BUS_IRERR) 642 printk("Bus - Read Bus Error on instruction fetch\n"); 643 if (reason & MCSR_BUS_DRERR) 644 printk("Bus - Read Bus Error on data load\n"); 645 if (reason & MCSR_BUS_WRERR) 646 printk("Bus - Write Bus Error on buffered store or cache line push\n"); 647 648 return 0; 649 } 650 #else 651 int machine_check_generic(struct pt_regs *regs) 652 { 653 unsigned long reason = get_mc_reason(regs); 654 655 printk("Machine check in kernel mode.\n"); 656 printk("Caused by (from SRR1=%lx): ", reason); 657 switch (reason & 0x601F0000) { 658 case 0x80000: 659 printk("Machine check signal\n"); 660 break; 661 case 0: /* for 601 */ 662 case 0x40000: 663 case 0x140000: /* 7450 MSS error and TEA */ 664 printk("Transfer error ack signal\n"); 665 break; 666 case 0x20000: 667 printk("Data parity error signal\n"); 668 break; 669 case 0x10000: 670 printk("Address parity error signal\n"); 671 break; 672 case 0x20000000: 673 printk("L1 Data Cache error\n"); 674 break; 675 case 0x40000000: 676 printk("L1 Instruction Cache error\n"); 677 break; 678 case 0x00100000: 679 printk("L2 data cache parity error\n"); 680 break; 681 default: 682 printk("Unknown values in msr\n"); 683 } 684 return 0; 685 } 686 #endif /* everything else */ 687 688 void machine_check_exception(struct pt_regs *regs) 689 { 690 enum ctx_state prev_state = exception_enter(); 691 int recover = 0; 692 693 __get_cpu_var(irq_stat).mce_exceptions++; 694 695 /* See if any machine dependent calls. In theory, we would want 696 * to call the CPU first, and call the ppc_md. one if the CPU 697 * one returns a positive number. However there is existing code 698 * that assumes the board gets a first chance, so let's keep it 699 * that way for now and fix things later. --BenH. 700 */ 701 if (ppc_md.machine_check_exception) 702 recover = ppc_md.machine_check_exception(regs); 703 else if (cur_cpu_spec->machine_check) 704 recover = cur_cpu_spec->machine_check(regs); 705 706 if (recover > 0) 707 goto bail; 708 709 #if defined(CONFIG_8xx) && defined(CONFIG_PCI) 710 /* the qspan pci read routines can cause machine checks -- Cort 711 * 712 * yuck !!! that totally needs to go away ! There are better ways 713 * to deal with that than having a wart in the mcheck handler. 714 * -- BenH 715 */ 716 bad_page_fault(regs, regs->dar, SIGBUS); 717 goto bail; 718 #endif 719 720 if (debugger_fault_handler(regs)) 721 goto bail; 722 723 if (check_io_access(regs)) 724 goto bail; 725 726 die("Machine check", regs, SIGBUS); 727 728 /* Must die if the interrupt is not recoverable */ 729 if (!(regs->msr & MSR_RI)) 730 panic("Unrecoverable Machine check"); 731 732 bail: 733 exception_exit(prev_state); 734 } 735 736 void SMIException(struct pt_regs *regs) 737 { 738 die("System Management Interrupt", regs, SIGABRT); 739 } 740 741 void unknown_exception(struct pt_regs *regs) 742 { 743 enum ctx_state prev_state = exception_enter(); 744 745 printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n", 746 regs->nip, regs->msr, regs->trap); 747 748 _exception(SIGTRAP, regs, 0, 0); 749 750 exception_exit(prev_state); 751 } 752 753 void instruction_breakpoint_exception(struct pt_regs *regs) 754 { 755 enum ctx_state prev_state = exception_enter(); 756 757 if (notify_die(DIE_IABR_MATCH, "iabr_match", regs, 5, 758 5, SIGTRAP) == NOTIFY_STOP) 759 goto bail; 760 if (debugger_iabr_match(regs)) 761 goto bail; 762 _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip); 763 764 bail: 765 exception_exit(prev_state); 766 } 767 768 void RunModeException(struct pt_regs *regs) 769 { 770 _exception(SIGTRAP, regs, 0, 0); 771 } 772 773 void __kprobes single_step_exception(struct pt_regs *regs) 774 { 775 enum ctx_state prev_state = exception_enter(); 776 777 clear_single_step(regs); 778 779 if (notify_die(DIE_SSTEP, "single_step", regs, 5, 780 5, SIGTRAP) == NOTIFY_STOP) 781 goto bail; 782 if (debugger_sstep(regs)) 783 goto bail; 784 785 _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip); 786 787 bail: 788 exception_exit(prev_state); 789 } 790 791 /* 792 * After we have successfully emulated an instruction, we have to 793 * check if the instruction was being single-stepped, and if so, 794 * pretend we got a single-step exception. This was pointed out 795 * by Kumar Gala. -- paulus 796 */ 797 static void emulate_single_step(struct pt_regs *regs) 798 { 799 if (single_stepping(regs)) 800 single_step_exception(regs); 801 } 802 803 static inline int __parse_fpscr(unsigned long fpscr) 804 { 805 int ret = 0; 806 807 /* Invalid operation */ 808 if ((fpscr & FPSCR_VE) && (fpscr & FPSCR_VX)) 809 ret = FPE_FLTINV; 810 811 /* Overflow */ 812 else if ((fpscr & FPSCR_OE) && (fpscr & FPSCR_OX)) 813 ret = FPE_FLTOVF; 814 815 /* Underflow */ 816 else if ((fpscr & FPSCR_UE) && (fpscr & FPSCR_UX)) 817 ret = FPE_FLTUND; 818 819 /* Divide by zero */ 820 else if ((fpscr & FPSCR_ZE) && (fpscr & FPSCR_ZX)) 821 ret = FPE_FLTDIV; 822 823 /* Inexact result */ 824 else if ((fpscr & FPSCR_XE) && (fpscr & FPSCR_XX)) 825 ret = FPE_FLTRES; 826 827 return ret; 828 } 829 830 static void parse_fpe(struct pt_regs *regs) 831 { 832 int code = 0; 833 834 flush_fp_to_thread(current); 835 836 code = __parse_fpscr(current->thread.fp_state.fpscr); 837 838 _exception(SIGFPE, regs, code, regs->nip); 839 } 840 841 /* 842 * Illegal instruction emulation support. Originally written to 843 * provide the PVR to user applications using the mfspr rd, PVR. 844 * Return non-zero if we can't emulate, or -EFAULT if the associated 845 * memory access caused an access fault. Return zero on success. 846 * 847 * There are a couple of ways to do this, either "decode" the instruction 848 * or directly match lots of bits. In this case, matching lots of 849 * bits is faster and easier. 850 * 851 */ 852 static int emulate_string_inst(struct pt_regs *regs, u32 instword) 853 { 854 u8 rT = (instword >> 21) & 0x1f; 855 u8 rA = (instword >> 16) & 0x1f; 856 u8 NB_RB = (instword >> 11) & 0x1f; 857 u32 num_bytes; 858 unsigned long EA; 859 int pos = 0; 860 861 /* Early out if we are an invalid form of lswx */ 862 if ((instword & PPC_INST_STRING_MASK) == PPC_INST_LSWX) 863 if ((rT == rA) || (rT == NB_RB)) 864 return -EINVAL; 865 866 EA = (rA == 0) ? 0 : regs->gpr[rA]; 867 868 switch (instword & PPC_INST_STRING_MASK) { 869 case PPC_INST_LSWX: 870 case PPC_INST_STSWX: 871 EA += NB_RB; 872 num_bytes = regs->xer & 0x7f; 873 break; 874 case PPC_INST_LSWI: 875 case PPC_INST_STSWI: 876 num_bytes = (NB_RB == 0) ? 32 : NB_RB; 877 break; 878 default: 879 return -EINVAL; 880 } 881 882 while (num_bytes != 0) 883 { 884 u8 val; 885 u32 shift = 8 * (3 - (pos & 0x3)); 886 887 /* if process is 32-bit, clear upper 32 bits of EA */ 888 if ((regs->msr & MSR_64BIT) == 0) 889 EA &= 0xFFFFFFFF; 890 891 switch ((instword & PPC_INST_STRING_MASK)) { 892 case PPC_INST_LSWX: 893 case PPC_INST_LSWI: 894 if (get_user(val, (u8 __user *)EA)) 895 return -EFAULT; 896 /* first time updating this reg, 897 * zero it out */ 898 if (pos == 0) 899 regs->gpr[rT] = 0; 900 regs->gpr[rT] |= val << shift; 901 break; 902 case PPC_INST_STSWI: 903 case PPC_INST_STSWX: 904 val = regs->gpr[rT] >> shift; 905 if (put_user(val, (u8 __user *)EA)) 906 return -EFAULT; 907 break; 908 } 909 /* move EA to next address */ 910 EA += 1; 911 num_bytes--; 912 913 /* manage our position within the register */ 914 if (++pos == 4) { 915 pos = 0; 916 if (++rT == 32) 917 rT = 0; 918 } 919 } 920 921 return 0; 922 } 923 924 static int emulate_popcntb_inst(struct pt_regs *regs, u32 instword) 925 { 926 u32 ra,rs; 927 unsigned long tmp; 928 929 ra = (instword >> 16) & 0x1f; 930 rs = (instword >> 21) & 0x1f; 931 932 tmp = regs->gpr[rs]; 933 tmp = tmp - ((tmp >> 1) & 0x5555555555555555ULL); 934 tmp = (tmp & 0x3333333333333333ULL) + ((tmp >> 2) & 0x3333333333333333ULL); 935 tmp = (tmp + (tmp >> 4)) & 0x0f0f0f0f0f0f0f0fULL; 936 regs->gpr[ra] = tmp; 937 938 return 0; 939 } 940 941 static int emulate_isel(struct pt_regs *regs, u32 instword) 942 { 943 u8 rT = (instword >> 21) & 0x1f; 944 u8 rA = (instword >> 16) & 0x1f; 945 u8 rB = (instword >> 11) & 0x1f; 946 u8 BC = (instword >> 6) & 0x1f; 947 u8 bit; 948 unsigned long tmp; 949 950 tmp = (rA == 0) ? 0 : regs->gpr[rA]; 951 bit = (regs->ccr >> (31 - BC)) & 0x1; 952 953 regs->gpr[rT] = bit ? tmp : regs->gpr[rB]; 954 955 return 0; 956 } 957 958 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 959 static inline bool tm_abort_check(struct pt_regs *regs, int cause) 960 { 961 /* If we're emulating a load/store in an active transaction, we cannot 962 * emulate it as the kernel operates in transaction suspended context. 963 * We need to abort the transaction. This creates a persistent TM 964 * abort so tell the user what caused it with a new code. 965 */ 966 if (MSR_TM_TRANSACTIONAL(regs->msr)) { 967 tm_enable(); 968 tm_abort(cause); 969 return true; 970 } 971 return false; 972 } 973 #else 974 static inline bool tm_abort_check(struct pt_regs *regs, int reason) 975 { 976 return false; 977 } 978 #endif 979 980 static int emulate_instruction(struct pt_regs *regs) 981 { 982 u32 instword; 983 u32 rd; 984 985 if (!user_mode(regs)) 986 return -EINVAL; 987 CHECK_FULL_REGS(regs); 988 989 if (get_user(instword, (u32 __user *)(regs->nip))) 990 return -EFAULT; 991 992 /* Emulate the mfspr rD, PVR. */ 993 if ((instword & PPC_INST_MFSPR_PVR_MASK) == PPC_INST_MFSPR_PVR) { 994 PPC_WARN_EMULATED(mfpvr, regs); 995 rd = (instword >> 21) & 0x1f; 996 regs->gpr[rd] = mfspr(SPRN_PVR); 997 return 0; 998 } 999 1000 /* Emulating the dcba insn is just a no-op. */ 1001 if ((instword & PPC_INST_DCBA_MASK) == PPC_INST_DCBA) { 1002 PPC_WARN_EMULATED(dcba, regs); 1003 return 0; 1004 } 1005 1006 /* Emulate the mcrxr insn. */ 1007 if ((instword & PPC_INST_MCRXR_MASK) == PPC_INST_MCRXR) { 1008 int shift = (instword >> 21) & 0x1c; 1009 unsigned long msk = 0xf0000000UL >> shift; 1010 1011 PPC_WARN_EMULATED(mcrxr, regs); 1012 regs->ccr = (regs->ccr & ~msk) | ((regs->xer >> shift) & msk); 1013 regs->xer &= ~0xf0000000UL; 1014 return 0; 1015 } 1016 1017 /* Emulate load/store string insn. */ 1018 if ((instword & PPC_INST_STRING_GEN_MASK) == PPC_INST_STRING) { 1019 if (tm_abort_check(regs, 1020 TM_CAUSE_EMULATE | TM_CAUSE_PERSISTENT)) 1021 return -EINVAL; 1022 PPC_WARN_EMULATED(string, regs); 1023 return emulate_string_inst(regs, instword); 1024 } 1025 1026 /* Emulate the popcntb (Population Count Bytes) instruction. */ 1027 if ((instword & PPC_INST_POPCNTB_MASK) == PPC_INST_POPCNTB) { 1028 PPC_WARN_EMULATED(popcntb, regs); 1029 return emulate_popcntb_inst(regs, instword); 1030 } 1031 1032 /* Emulate isel (Integer Select) instruction */ 1033 if ((instword & PPC_INST_ISEL_MASK) == PPC_INST_ISEL) { 1034 PPC_WARN_EMULATED(isel, regs); 1035 return emulate_isel(regs, instword); 1036 } 1037 1038 /* Emulate sync instruction variants */ 1039 if ((instword & PPC_INST_SYNC_MASK) == PPC_INST_SYNC) { 1040 PPC_WARN_EMULATED(sync, regs); 1041 asm volatile("sync"); 1042 return 0; 1043 } 1044 1045 #ifdef CONFIG_PPC64 1046 /* Emulate the mfspr rD, DSCR. */ 1047 if ((((instword & PPC_INST_MFSPR_DSCR_USER_MASK) == 1048 PPC_INST_MFSPR_DSCR_USER) || 1049 ((instword & PPC_INST_MFSPR_DSCR_MASK) == 1050 PPC_INST_MFSPR_DSCR)) && 1051 cpu_has_feature(CPU_FTR_DSCR)) { 1052 PPC_WARN_EMULATED(mfdscr, regs); 1053 rd = (instword >> 21) & 0x1f; 1054 regs->gpr[rd] = mfspr(SPRN_DSCR); 1055 return 0; 1056 } 1057 /* Emulate the mtspr DSCR, rD. */ 1058 if ((((instword & PPC_INST_MTSPR_DSCR_USER_MASK) == 1059 PPC_INST_MTSPR_DSCR_USER) || 1060 ((instword & PPC_INST_MTSPR_DSCR_MASK) == 1061 PPC_INST_MTSPR_DSCR)) && 1062 cpu_has_feature(CPU_FTR_DSCR)) { 1063 PPC_WARN_EMULATED(mtdscr, regs); 1064 rd = (instword >> 21) & 0x1f; 1065 current->thread.dscr = regs->gpr[rd]; 1066 current->thread.dscr_inherit = 1; 1067 mtspr(SPRN_DSCR, current->thread.dscr); 1068 return 0; 1069 } 1070 #endif 1071 1072 return -EINVAL; 1073 } 1074 1075 int is_valid_bugaddr(unsigned long addr) 1076 { 1077 return is_kernel_addr(addr); 1078 } 1079 1080 #ifdef CONFIG_MATH_EMULATION 1081 static int emulate_math(struct pt_regs *regs) 1082 { 1083 int ret; 1084 extern int do_mathemu(struct pt_regs *regs); 1085 1086 ret = do_mathemu(regs); 1087 if (ret >= 0) 1088 PPC_WARN_EMULATED(math, regs); 1089 1090 switch (ret) { 1091 case 0: 1092 emulate_single_step(regs); 1093 return 0; 1094 case 1: { 1095 int code = 0; 1096 code = __parse_fpscr(current->thread.fp_state.fpscr); 1097 _exception(SIGFPE, regs, code, regs->nip); 1098 return 0; 1099 } 1100 case -EFAULT: 1101 _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip); 1102 return 0; 1103 } 1104 1105 return -1; 1106 } 1107 #else 1108 static inline int emulate_math(struct pt_regs *regs) { return -1; } 1109 #endif 1110 1111 void __kprobes program_check_exception(struct pt_regs *regs) 1112 { 1113 enum ctx_state prev_state = exception_enter(); 1114 unsigned int reason = get_reason(regs); 1115 1116 /* We can now get here via a FP Unavailable exception if the core 1117 * has no FPU, in that case the reason flags will be 0 */ 1118 1119 if (reason & REASON_FP) { 1120 /* IEEE FP exception */ 1121 parse_fpe(regs); 1122 goto bail; 1123 } 1124 if (reason & REASON_TRAP) { 1125 /* Debugger is first in line to stop recursive faults in 1126 * rcu_lock, notify_die, or atomic_notifier_call_chain */ 1127 if (debugger_bpt(regs)) 1128 goto bail; 1129 1130 /* trap exception */ 1131 if (notify_die(DIE_BPT, "breakpoint", regs, 5, 5, SIGTRAP) 1132 == NOTIFY_STOP) 1133 goto bail; 1134 1135 if (!(regs->msr & MSR_PR) && /* not user-mode */ 1136 report_bug(regs->nip, regs) == BUG_TRAP_TYPE_WARN) { 1137 regs->nip += 4; 1138 goto bail; 1139 } 1140 _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip); 1141 goto bail; 1142 } 1143 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1144 if (reason & REASON_TM) { 1145 /* This is a TM "Bad Thing Exception" program check. 1146 * This occurs when: 1147 * - An rfid/hrfid/mtmsrd attempts to cause an illegal 1148 * transition in TM states. 1149 * - A trechkpt is attempted when transactional. 1150 * - A treclaim is attempted when non transactional. 1151 * - A tend is illegally attempted. 1152 * - writing a TM SPR when transactional. 1153 */ 1154 if (!user_mode(regs) && 1155 report_bug(regs->nip, regs) == BUG_TRAP_TYPE_WARN) { 1156 regs->nip += 4; 1157 goto bail; 1158 } 1159 /* If usermode caused this, it's done something illegal and 1160 * gets a SIGILL slap on the wrist. We call it an illegal 1161 * operand to distinguish from the instruction just being bad 1162 * (e.g. executing a 'tend' on a CPU without TM!); it's an 1163 * illegal /placement/ of a valid instruction. 1164 */ 1165 if (user_mode(regs)) { 1166 _exception(SIGILL, regs, ILL_ILLOPN, regs->nip); 1167 goto bail; 1168 } else { 1169 printk(KERN_EMERG "Unexpected TM Bad Thing exception " 1170 "at %lx (msr 0x%x)\n", regs->nip, reason); 1171 die("Unrecoverable exception", regs, SIGABRT); 1172 } 1173 } 1174 #endif 1175 1176 /* 1177 * If we took the program check in the kernel skip down to sending a 1178 * SIGILL. The subsequent cases all relate to emulating instructions 1179 * which we should only do for userspace. We also do not want to enable 1180 * interrupts for kernel faults because that might lead to further 1181 * faults, and loose the context of the original exception. 1182 */ 1183 if (!user_mode(regs)) 1184 goto sigill; 1185 1186 /* We restore the interrupt state now */ 1187 if (!arch_irq_disabled_regs(regs)) 1188 local_irq_enable(); 1189 1190 /* (reason & REASON_ILLEGAL) would be the obvious thing here, 1191 * but there seems to be a hardware bug on the 405GP (RevD) 1192 * that means ESR is sometimes set incorrectly - either to 1193 * ESR_DST (!?) or 0. In the process of chasing this with the 1194 * hardware people - not sure if it can happen on any illegal 1195 * instruction or only on FP instructions, whether there is a 1196 * pattern to occurrences etc. -dgibson 31/Mar/2003 1197 */ 1198 if (!emulate_math(regs)) 1199 goto bail; 1200 1201 /* Try to emulate it if we should. */ 1202 if (reason & (REASON_ILLEGAL | REASON_PRIVILEGED)) { 1203 switch (emulate_instruction(regs)) { 1204 case 0: 1205 regs->nip += 4; 1206 emulate_single_step(regs); 1207 goto bail; 1208 case -EFAULT: 1209 _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip); 1210 goto bail; 1211 } 1212 } 1213 1214 sigill: 1215 if (reason & REASON_PRIVILEGED) 1216 _exception(SIGILL, regs, ILL_PRVOPC, regs->nip); 1217 else 1218 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 1219 1220 bail: 1221 exception_exit(prev_state); 1222 } 1223 1224 /* 1225 * This occurs when running in hypervisor mode on POWER6 or later 1226 * and an illegal instruction is encountered. 1227 */ 1228 void __kprobes emulation_assist_interrupt(struct pt_regs *regs) 1229 { 1230 regs->msr |= REASON_ILLEGAL; 1231 program_check_exception(regs); 1232 } 1233 1234 void alignment_exception(struct pt_regs *regs) 1235 { 1236 enum ctx_state prev_state = exception_enter(); 1237 int sig, code, fixed = 0; 1238 1239 /* We restore the interrupt state now */ 1240 if (!arch_irq_disabled_regs(regs)) 1241 local_irq_enable(); 1242 1243 if (tm_abort_check(regs, TM_CAUSE_ALIGNMENT | TM_CAUSE_PERSISTENT)) 1244 goto bail; 1245 1246 /* we don't implement logging of alignment exceptions */ 1247 if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS)) 1248 fixed = fix_alignment(regs); 1249 1250 if (fixed == 1) { 1251 regs->nip += 4; /* skip over emulated instruction */ 1252 emulate_single_step(regs); 1253 goto bail; 1254 } 1255 1256 /* Operand address was bad */ 1257 if (fixed == -EFAULT) { 1258 sig = SIGSEGV; 1259 code = SEGV_ACCERR; 1260 } else { 1261 sig = SIGBUS; 1262 code = BUS_ADRALN; 1263 } 1264 if (user_mode(regs)) 1265 _exception(sig, regs, code, regs->dar); 1266 else 1267 bad_page_fault(regs, regs->dar, sig); 1268 1269 bail: 1270 exception_exit(prev_state); 1271 } 1272 1273 void StackOverflow(struct pt_regs *regs) 1274 { 1275 printk(KERN_CRIT "Kernel stack overflow in process %p, r1=%lx\n", 1276 current, regs->gpr[1]); 1277 debugger(regs); 1278 show_regs(regs); 1279 panic("kernel stack overflow"); 1280 } 1281 1282 void nonrecoverable_exception(struct pt_regs *regs) 1283 { 1284 printk(KERN_ERR "Non-recoverable exception at PC=%lx MSR=%lx\n", 1285 regs->nip, regs->msr); 1286 debugger(regs); 1287 die("nonrecoverable exception", regs, SIGKILL); 1288 } 1289 1290 void trace_syscall(struct pt_regs *regs) 1291 { 1292 printk("Task: %p(%d), PC: %08lX/%08lX, Syscall: %3ld, Result: %s%ld %s\n", 1293 current, task_pid_nr(current), regs->nip, regs->link, regs->gpr[0], 1294 regs->ccr&0x10000000?"Error=":"", regs->gpr[3], print_tainted()); 1295 } 1296 1297 void kernel_fp_unavailable_exception(struct pt_regs *regs) 1298 { 1299 enum ctx_state prev_state = exception_enter(); 1300 1301 printk(KERN_EMERG "Unrecoverable FP Unavailable Exception " 1302 "%lx at %lx\n", regs->trap, regs->nip); 1303 die("Unrecoverable FP Unavailable Exception", regs, SIGABRT); 1304 1305 exception_exit(prev_state); 1306 } 1307 1308 void altivec_unavailable_exception(struct pt_regs *regs) 1309 { 1310 enum ctx_state prev_state = exception_enter(); 1311 1312 if (user_mode(regs)) { 1313 /* A user program has executed an altivec instruction, 1314 but this kernel doesn't support altivec. */ 1315 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 1316 goto bail; 1317 } 1318 1319 printk(KERN_EMERG "Unrecoverable VMX/Altivec Unavailable Exception " 1320 "%lx at %lx\n", regs->trap, regs->nip); 1321 die("Unrecoverable VMX/Altivec Unavailable Exception", regs, SIGABRT); 1322 1323 bail: 1324 exception_exit(prev_state); 1325 } 1326 1327 void vsx_unavailable_exception(struct pt_regs *regs) 1328 { 1329 if (user_mode(regs)) { 1330 /* A user program has executed an vsx instruction, 1331 but this kernel doesn't support vsx. */ 1332 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 1333 return; 1334 } 1335 1336 printk(KERN_EMERG "Unrecoverable VSX Unavailable Exception " 1337 "%lx at %lx\n", regs->trap, regs->nip); 1338 die("Unrecoverable VSX Unavailable Exception", regs, SIGABRT); 1339 } 1340 1341 #ifdef CONFIG_PPC64 1342 void facility_unavailable_exception(struct pt_regs *regs) 1343 { 1344 static char *facility_strings[] = { 1345 [FSCR_FP_LG] = "FPU", 1346 [FSCR_VECVSX_LG] = "VMX/VSX", 1347 [FSCR_DSCR_LG] = "DSCR", 1348 [FSCR_PM_LG] = "PMU SPRs", 1349 [FSCR_BHRB_LG] = "BHRB", 1350 [FSCR_TM_LG] = "TM", 1351 [FSCR_EBB_LG] = "EBB", 1352 [FSCR_TAR_LG] = "TAR", 1353 }; 1354 char *facility = "unknown"; 1355 u64 value; 1356 u8 status; 1357 bool hv; 1358 1359 hv = (regs->trap == 0xf80); 1360 if (hv) 1361 value = mfspr(SPRN_HFSCR); 1362 else 1363 value = mfspr(SPRN_FSCR); 1364 1365 status = value >> 56; 1366 if (status == FSCR_DSCR_LG) { 1367 /* User is acessing the DSCR. Set the inherit bit and allow 1368 * the user to set it directly in future by setting via the 1369 * FSCR DSCR bit. We always leave HFSCR DSCR set. 1370 */ 1371 current->thread.dscr_inherit = 1; 1372 mtspr(SPRN_FSCR, value | FSCR_DSCR); 1373 return; 1374 } 1375 1376 if ((status < ARRAY_SIZE(facility_strings)) && 1377 facility_strings[status]) 1378 facility = facility_strings[status]; 1379 1380 /* We restore the interrupt state now */ 1381 if (!arch_irq_disabled_regs(regs)) 1382 local_irq_enable(); 1383 1384 pr_err_ratelimited( 1385 "%sFacility '%s' unavailable, exception at 0x%lx, MSR=%lx\n", 1386 hv ? "Hypervisor " : "", facility, regs->nip, regs->msr); 1387 1388 if (user_mode(regs)) { 1389 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 1390 return; 1391 } 1392 1393 die("Unexpected facility unavailable exception", regs, SIGABRT); 1394 } 1395 #endif 1396 1397 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1398 1399 void fp_unavailable_tm(struct pt_regs *regs) 1400 { 1401 /* Note: This does not handle any kind of FP laziness. */ 1402 1403 TM_DEBUG("FP Unavailable trap whilst transactional at 0x%lx, MSR=%lx\n", 1404 regs->nip, regs->msr); 1405 1406 /* We can only have got here if the task started using FP after 1407 * beginning the transaction. So, the transactional regs are just a 1408 * copy of the checkpointed ones. But, we still need to recheckpoint 1409 * as we're enabling FP for the process; it will return, abort the 1410 * transaction, and probably retry but now with FP enabled. So the 1411 * checkpointed FP registers need to be loaded. 1412 */ 1413 tm_reclaim_current(TM_CAUSE_FAC_UNAV); 1414 /* Reclaim didn't save out any FPRs to transact_fprs. */ 1415 1416 /* Enable FP for the task: */ 1417 regs->msr |= (MSR_FP | current->thread.fpexc_mode); 1418 1419 /* This loads and recheckpoints the FP registers from 1420 * thread.fpr[]. They will remain in registers after the 1421 * checkpoint so we don't need to reload them after. 1422 * If VMX is in use, the VRs now hold checkpointed values, 1423 * so we don't want to load the VRs from the thread_struct. 1424 */ 1425 tm_recheckpoint(¤t->thread, MSR_FP); 1426 1427 /* If VMX is in use, get the transactional values back */ 1428 if (regs->msr & MSR_VEC) { 1429 do_load_up_transact_altivec(¤t->thread); 1430 /* At this point all the VSX state is loaded, so enable it */ 1431 regs->msr |= MSR_VSX; 1432 } 1433 } 1434 1435 void altivec_unavailable_tm(struct pt_regs *regs) 1436 { 1437 /* See the comments in fp_unavailable_tm(). This function operates 1438 * the same way. 1439 */ 1440 1441 TM_DEBUG("Vector Unavailable trap whilst transactional at 0x%lx," 1442 "MSR=%lx\n", 1443 regs->nip, regs->msr); 1444 tm_reclaim_current(TM_CAUSE_FAC_UNAV); 1445 regs->msr |= MSR_VEC; 1446 tm_recheckpoint(¤t->thread, MSR_VEC); 1447 current->thread.used_vr = 1; 1448 1449 if (regs->msr & MSR_FP) { 1450 do_load_up_transact_fpu(¤t->thread); 1451 regs->msr |= MSR_VSX; 1452 } 1453 } 1454 1455 void vsx_unavailable_tm(struct pt_regs *regs) 1456 { 1457 unsigned long orig_msr = regs->msr; 1458 1459 /* See the comments in fp_unavailable_tm(). This works similarly, 1460 * though we're loading both FP and VEC registers in here. 1461 * 1462 * If FP isn't in use, load FP regs. If VEC isn't in use, load VEC 1463 * regs. Either way, set MSR_VSX. 1464 */ 1465 1466 TM_DEBUG("VSX Unavailable trap whilst transactional at 0x%lx," 1467 "MSR=%lx\n", 1468 regs->nip, regs->msr); 1469 1470 current->thread.used_vsr = 1; 1471 1472 /* If FP and VMX are already loaded, we have all the state we need */ 1473 if ((orig_msr & (MSR_FP | MSR_VEC)) == (MSR_FP | MSR_VEC)) { 1474 regs->msr |= MSR_VSX; 1475 return; 1476 } 1477 1478 /* This reclaims FP and/or VR regs if they're already enabled */ 1479 tm_reclaim_current(TM_CAUSE_FAC_UNAV); 1480 1481 regs->msr |= MSR_VEC | MSR_FP | current->thread.fpexc_mode | 1482 MSR_VSX; 1483 1484 /* This loads & recheckpoints FP and VRs; but we have 1485 * to be sure not to overwrite previously-valid state. 1486 */ 1487 tm_recheckpoint(¤t->thread, regs->msr & ~orig_msr); 1488 1489 if (orig_msr & MSR_FP) 1490 do_load_up_transact_fpu(¤t->thread); 1491 if (orig_msr & MSR_VEC) 1492 do_load_up_transact_altivec(¤t->thread); 1493 } 1494 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ 1495 1496 void performance_monitor_exception(struct pt_regs *regs) 1497 { 1498 __get_cpu_var(irq_stat).pmu_irqs++; 1499 1500 perf_irq(regs); 1501 } 1502 1503 #ifdef CONFIG_8xx 1504 void SoftwareEmulation(struct pt_regs *regs) 1505 { 1506 CHECK_FULL_REGS(regs); 1507 1508 if (!user_mode(regs)) { 1509 debugger(regs); 1510 die("Kernel Mode Unimplemented Instruction or SW FPU Emulation", 1511 regs, SIGFPE); 1512 } 1513 1514 if (!emulate_math(regs)) 1515 return; 1516 1517 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 1518 } 1519 #endif /* CONFIG_8xx */ 1520 1521 #ifdef CONFIG_PPC_ADV_DEBUG_REGS 1522 static void handle_debug(struct pt_regs *regs, unsigned long debug_status) 1523 { 1524 int changed = 0; 1525 /* 1526 * Determine the cause of the debug event, clear the 1527 * event flags and send a trap to the handler. Torez 1528 */ 1529 if (debug_status & (DBSR_DAC1R | DBSR_DAC1W)) { 1530 dbcr_dac(current) &= ~(DBCR_DAC1R | DBCR_DAC1W); 1531 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE 1532 current->thread.debug.dbcr2 &= ~DBCR2_DAC12MODE; 1533 #endif 1534 do_send_trap(regs, mfspr(SPRN_DAC1), debug_status, TRAP_HWBKPT, 1535 5); 1536 changed |= 0x01; 1537 } else if (debug_status & (DBSR_DAC2R | DBSR_DAC2W)) { 1538 dbcr_dac(current) &= ~(DBCR_DAC2R | DBCR_DAC2W); 1539 do_send_trap(regs, mfspr(SPRN_DAC2), debug_status, TRAP_HWBKPT, 1540 6); 1541 changed |= 0x01; 1542 } else if (debug_status & DBSR_IAC1) { 1543 current->thread.debug.dbcr0 &= ~DBCR0_IAC1; 1544 dbcr_iac_range(current) &= ~DBCR_IAC12MODE; 1545 do_send_trap(regs, mfspr(SPRN_IAC1), debug_status, TRAP_HWBKPT, 1546 1); 1547 changed |= 0x01; 1548 } else if (debug_status & DBSR_IAC2) { 1549 current->thread.debug.dbcr0 &= ~DBCR0_IAC2; 1550 do_send_trap(regs, mfspr(SPRN_IAC2), debug_status, TRAP_HWBKPT, 1551 2); 1552 changed |= 0x01; 1553 } else if (debug_status & DBSR_IAC3) { 1554 current->thread.debug.dbcr0 &= ~DBCR0_IAC3; 1555 dbcr_iac_range(current) &= ~DBCR_IAC34MODE; 1556 do_send_trap(regs, mfspr(SPRN_IAC3), debug_status, TRAP_HWBKPT, 1557 3); 1558 changed |= 0x01; 1559 } else if (debug_status & DBSR_IAC4) { 1560 current->thread.debug.dbcr0 &= ~DBCR0_IAC4; 1561 do_send_trap(regs, mfspr(SPRN_IAC4), debug_status, TRAP_HWBKPT, 1562 4); 1563 changed |= 0x01; 1564 } 1565 /* 1566 * At the point this routine was called, the MSR(DE) was turned off. 1567 * Check all other debug flags and see if that bit needs to be turned 1568 * back on or not. 1569 */ 1570 if (DBCR_ACTIVE_EVENTS(current->thread.debug.dbcr0, 1571 current->thread.debug.dbcr1)) 1572 regs->msr |= MSR_DE; 1573 else 1574 /* Make sure the IDM flag is off */ 1575 current->thread.debug.dbcr0 &= ~DBCR0_IDM; 1576 1577 if (changed & 0x01) 1578 mtspr(SPRN_DBCR0, current->thread.debug.dbcr0); 1579 } 1580 1581 void __kprobes DebugException(struct pt_regs *regs, unsigned long debug_status) 1582 { 1583 current->thread.debug.dbsr = debug_status; 1584 1585 /* Hack alert: On BookE, Branch Taken stops on the branch itself, while 1586 * on server, it stops on the target of the branch. In order to simulate 1587 * the server behaviour, we thus restart right away with a single step 1588 * instead of stopping here when hitting a BT 1589 */ 1590 if (debug_status & DBSR_BT) { 1591 regs->msr &= ~MSR_DE; 1592 1593 /* Disable BT */ 1594 mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_BT); 1595 /* Clear the BT event */ 1596 mtspr(SPRN_DBSR, DBSR_BT); 1597 1598 /* Do the single step trick only when coming from userspace */ 1599 if (user_mode(regs)) { 1600 current->thread.debug.dbcr0 &= ~DBCR0_BT; 1601 current->thread.debug.dbcr0 |= DBCR0_IDM | DBCR0_IC; 1602 regs->msr |= MSR_DE; 1603 return; 1604 } 1605 1606 if (notify_die(DIE_SSTEP, "block_step", regs, 5, 1607 5, SIGTRAP) == NOTIFY_STOP) { 1608 return; 1609 } 1610 if (debugger_sstep(regs)) 1611 return; 1612 } else if (debug_status & DBSR_IC) { /* Instruction complete */ 1613 regs->msr &= ~MSR_DE; 1614 1615 /* Disable instruction completion */ 1616 mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_IC); 1617 /* Clear the instruction completion event */ 1618 mtspr(SPRN_DBSR, DBSR_IC); 1619 1620 if (notify_die(DIE_SSTEP, "single_step", regs, 5, 1621 5, SIGTRAP) == NOTIFY_STOP) { 1622 return; 1623 } 1624 1625 if (debugger_sstep(regs)) 1626 return; 1627 1628 if (user_mode(regs)) { 1629 current->thread.debug.dbcr0 &= ~DBCR0_IC; 1630 if (DBCR_ACTIVE_EVENTS(current->thread.debug.dbcr0, 1631 current->thread.debug.dbcr1)) 1632 regs->msr |= MSR_DE; 1633 else 1634 /* Make sure the IDM bit is off */ 1635 current->thread.debug.dbcr0 &= ~DBCR0_IDM; 1636 } 1637 1638 _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip); 1639 } else 1640 handle_debug(regs, debug_status); 1641 } 1642 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */ 1643 1644 #if !defined(CONFIG_TAU_INT) 1645 void TAUException(struct pt_regs *regs) 1646 { 1647 printk("TAU trap at PC: %lx, MSR: %lx, vector=%lx %s\n", 1648 regs->nip, regs->msr, regs->trap, print_tainted()); 1649 } 1650 #endif /* CONFIG_INT_TAU */ 1651 1652 #ifdef CONFIG_ALTIVEC 1653 void altivec_assist_exception(struct pt_regs *regs) 1654 { 1655 int err; 1656 1657 if (!user_mode(regs)) { 1658 printk(KERN_EMERG "VMX/Altivec assist exception in kernel mode" 1659 " at %lx\n", regs->nip); 1660 die("Kernel VMX/Altivec assist exception", regs, SIGILL); 1661 } 1662 1663 flush_altivec_to_thread(current); 1664 1665 PPC_WARN_EMULATED(altivec, regs); 1666 err = emulate_altivec(regs); 1667 if (err == 0) { 1668 regs->nip += 4; /* skip emulated instruction */ 1669 emulate_single_step(regs); 1670 return; 1671 } 1672 1673 if (err == -EFAULT) { 1674 /* got an error reading the instruction */ 1675 _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip); 1676 } else { 1677 /* didn't recognize the instruction */ 1678 /* XXX quick hack for now: set the non-Java bit in the VSCR */ 1679 printk_ratelimited(KERN_ERR "Unrecognized altivec instruction " 1680 "in %s at %lx\n", current->comm, regs->nip); 1681 current->thread.vr_state.vscr.u[3] |= 0x10000; 1682 } 1683 } 1684 #endif /* CONFIG_ALTIVEC */ 1685 1686 #ifdef CONFIG_VSX 1687 void vsx_assist_exception(struct pt_regs *regs) 1688 { 1689 if (!user_mode(regs)) { 1690 printk(KERN_EMERG "VSX assist exception in kernel mode" 1691 " at %lx\n", regs->nip); 1692 die("Kernel VSX assist exception", regs, SIGILL); 1693 } 1694 1695 flush_vsx_to_thread(current); 1696 printk(KERN_INFO "VSX assist not supported at %lx\n", regs->nip); 1697 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 1698 } 1699 #endif /* CONFIG_VSX */ 1700 1701 #ifdef CONFIG_FSL_BOOKE 1702 void CacheLockingException(struct pt_regs *regs, unsigned long address, 1703 unsigned long error_code) 1704 { 1705 /* We treat cache locking instructions from the user 1706 * as priv ops, in the future we could try to do 1707 * something smarter 1708 */ 1709 if (error_code & (ESR_DLK|ESR_ILK)) 1710 _exception(SIGILL, regs, ILL_PRVOPC, regs->nip); 1711 return; 1712 } 1713 #endif /* CONFIG_FSL_BOOKE */ 1714 1715 #ifdef CONFIG_SPE 1716 void SPEFloatingPointException(struct pt_regs *regs) 1717 { 1718 extern int do_spe_mathemu(struct pt_regs *regs); 1719 unsigned long spefscr; 1720 int fpexc_mode; 1721 int code = 0; 1722 int err; 1723 1724 flush_spe_to_thread(current); 1725 1726 spefscr = current->thread.spefscr; 1727 fpexc_mode = current->thread.fpexc_mode; 1728 1729 if ((spefscr & SPEFSCR_FOVF) && (fpexc_mode & PR_FP_EXC_OVF)) { 1730 code = FPE_FLTOVF; 1731 } 1732 else if ((spefscr & SPEFSCR_FUNF) && (fpexc_mode & PR_FP_EXC_UND)) { 1733 code = FPE_FLTUND; 1734 } 1735 else if ((spefscr & SPEFSCR_FDBZ) && (fpexc_mode & PR_FP_EXC_DIV)) 1736 code = FPE_FLTDIV; 1737 else if ((spefscr & SPEFSCR_FINV) && (fpexc_mode & PR_FP_EXC_INV)) { 1738 code = FPE_FLTINV; 1739 } 1740 else if ((spefscr & (SPEFSCR_FG | SPEFSCR_FX)) && (fpexc_mode & PR_FP_EXC_RES)) 1741 code = FPE_FLTRES; 1742 1743 err = do_spe_mathemu(regs); 1744 if (err == 0) { 1745 regs->nip += 4; /* skip emulated instruction */ 1746 emulate_single_step(regs); 1747 return; 1748 } 1749 1750 if (err == -EFAULT) { 1751 /* got an error reading the instruction */ 1752 _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip); 1753 } else if (err == -EINVAL) { 1754 /* didn't recognize the instruction */ 1755 printk(KERN_ERR "unrecognized spe instruction " 1756 "in %s at %lx\n", current->comm, regs->nip); 1757 } else { 1758 _exception(SIGFPE, regs, code, regs->nip); 1759 } 1760 1761 return; 1762 } 1763 1764 void SPEFloatingPointRoundException(struct pt_regs *regs) 1765 { 1766 extern int speround_handler(struct pt_regs *regs); 1767 int err; 1768 1769 preempt_disable(); 1770 if (regs->msr & MSR_SPE) 1771 giveup_spe(current); 1772 preempt_enable(); 1773 1774 regs->nip -= 4; 1775 err = speround_handler(regs); 1776 if (err == 0) { 1777 regs->nip += 4; /* skip emulated instruction */ 1778 emulate_single_step(regs); 1779 return; 1780 } 1781 1782 if (err == -EFAULT) { 1783 /* got an error reading the instruction */ 1784 _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip); 1785 } else if (err == -EINVAL) { 1786 /* didn't recognize the instruction */ 1787 printk(KERN_ERR "unrecognized spe instruction " 1788 "in %s at %lx\n", current->comm, regs->nip); 1789 } else { 1790 _exception(SIGFPE, regs, 0, regs->nip); 1791 return; 1792 } 1793 } 1794 #endif 1795 1796 /* 1797 * We enter here if we get an unrecoverable exception, that is, one 1798 * that happened at a point where the RI (recoverable interrupt) bit 1799 * in the MSR is 0. This indicates that SRR0/1 are live, and that 1800 * we therefore lost state by taking this exception. 1801 */ 1802 void unrecoverable_exception(struct pt_regs *regs) 1803 { 1804 printk(KERN_EMERG "Unrecoverable exception %lx at %lx\n", 1805 regs->trap, regs->nip); 1806 die("Unrecoverable exception", regs, SIGABRT); 1807 } 1808 1809 #if defined(CONFIG_BOOKE_WDT) || defined(CONFIG_40x) 1810 /* 1811 * Default handler for a Watchdog exception, 1812 * spins until a reboot occurs 1813 */ 1814 void __attribute__ ((weak)) WatchdogHandler(struct pt_regs *regs) 1815 { 1816 /* Generic WatchdogHandler, implement your own */ 1817 mtspr(SPRN_TCR, mfspr(SPRN_TCR)&(~TCR_WIE)); 1818 return; 1819 } 1820 1821 void WatchdogException(struct pt_regs *regs) 1822 { 1823 printk (KERN_EMERG "PowerPC Book-E Watchdog Exception\n"); 1824 WatchdogHandler(regs); 1825 } 1826 #endif 1827 1828 /* 1829 * We enter here if we discover during exception entry that we are 1830 * running in supervisor mode with a userspace value in the stack pointer. 1831 */ 1832 void kernel_bad_stack(struct pt_regs *regs) 1833 { 1834 printk(KERN_EMERG "Bad kernel stack pointer %lx at %lx\n", 1835 regs->gpr[1], regs->nip); 1836 die("Bad kernel stack pointer", regs, SIGABRT); 1837 } 1838 1839 void __init trap_init(void) 1840 { 1841 } 1842 1843 1844 #ifdef CONFIG_PPC_EMULATED_STATS 1845 1846 #define WARN_EMULATED_SETUP(type) .type = { .name = #type } 1847 1848 struct ppc_emulated ppc_emulated = { 1849 #ifdef CONFIG_ALTIVEC 1850 WARN_EMULATED_SETUP(altivec), 1851 #endif 1852 WARN_EMULATED_SETUP(dcba), 1853 WARN_EMULATED_SETUP(dcbz), 1854 WARN_EMULATED_SETUP(fp_pair), 1855 WARN_EMULATED_SETUP(isel), 1856 WARN_EMULATED_SETUP(mcrxr), 1857 WARN_EMULATED_SETUP(mfpvr), 1858 WARN_EMULATED_SETUP(multiple), 1859 WARN_EMULATED_SETUP(popcntb), 1860 WARN_EMULATED_SETUP(spe), 1861 WARN_EMULATED_SETUP(string), 1862 WARN_EMULATED_SETUP(sync), 1863 WARN_EMULATED_SETUP(unaligned), 1864 #ifdef CONFIG_MATH_EMULATION 1865 WARN_EMULATED_SETUP(math), 1866 #endif 1867 #ifdef CONFIG_VSX 1868 WARN_EMULATED_SETUP(vsx), 1869 #endif 1870 #ifdef CONFIG_PPC64 1871 WARN_EMULATED_SETUP(mfdscr), 1872 WARN_EMULATED_SETUP(mtdscr), 1873 WARN_EMULATED_SETUP(lq_stq), 1874 #endif 1875 }; 1876 1877 u32 ppc_warn_emulated; 1878 1879 void ppc_warn_emulated_print(const char *type) 1880 { 1881 pr_warn_ratelimited("%s used emulated %s instruction\n", current->comm, 1882 type); 1883 } 1884 1885 static int __init ppc_warn_emulated_init(void) 1886 { 1887 struct dentry *dir, *d; 1888 unsigned int i; 1889 struct ppc_emulated_entry *entries = (void *)&ppc_emulated; 1890 1891 if (!powerpc_debugfs_root) 1892 return -ENODEV; 1893 1894 dir = debugfs_create_dir("emulated_instructions", 1895 powerpc_debugfs_root); 1896 if (!dir) 1897 return -ENOMEM; 1898 1899 d = debugfs_create_u32("do_warn", S_IRUGO | S_IWUSR, dir, 1900 &ppc_warn_emulated); 1901 if (!d) 1902 goto fail; 1903 1904 for (i = 0; i < sizeof(ppc_emulated)/sizeof(*entries); i++) { 1905 d = debugfs_create_u32(entries[i].name, S_IRUGO | S_IWUSR, dir, 1906 (u32 *)&entries[i].val.counter); 1907 if (!d) 1908 goto fail; 1909 } 1910 1911 return 0; 1912 1913 fail: 1914 debugfs_remove_recursive(dir); 1915 return -ENOMEM; 1916 } 1917 1918 device_initcall(ppc_warn_emulated_init); 1919 1920 #endif /* CONFIG_PPC_EMULATED_STATS */ 1921