1 /* 2 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 3 * Copyright 2007-2010 Freescale Semiconductor, Inc. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License 7 * as published by the Free Software Foundation; either version 8 * 2 of the License, or (at your option) any later version. 9 * 10 * Modified by Cort Dougan (cort@cs.nmt.edu) 11 * and Paul Mackerras (paulus@samba.org) 12 */ 13 14 /* 15 * This file handles the architecture-dependent parts of hardware exceptions 16 */ 17 18 #include <linux/errno.h> 19 #include <linux/sched.h> 20 #include <linux/kernel.h> 21 #include <linux/mm.h> 22 #include <linux/stddef.h> 23 #include <linux/unistd.h> 24 #include <linux/ptrace.h> 25 #include <linux/user.h> 26 #include <linux/interrupt.h> 27 #include <linux/init.h> 28 #include <linux/module.h> 29 #include <linux/prctl.h> 30 #include <linux/delay.h> 31 #include <linux/kprobes.h> 32 #include <linux/kexec.h> 33 #include <linux/backlight.h> 34 #include <linux/bug.h> 35 #include <linux/kdebug.h> 36 #include <linux/debugfs.h> 37 #include <linux/ratelimit.h> 38 #include <linux/context_tracking.h> 39 40 #include <asm/emulated_ops.h> 41 #include <asm/pgtable.h> 42 #include <asm/uaccess.h> 43 #include <asm/io.h> 44 #include <asm/machdep.h> 45 #include <asm/rtas.h> 46 #include <asm/pmc.h> 47 #include <asm/reg.h> 48 #ifdef CONFIG_PMAC_BACKLIGHT 49 #include <asm/backlight.h> 50 #endif 51 #ifdef CONFIG_PPC64 52 #include <asm/firmware.h> 53 #include <asm/processor.h> 54 #include <asm/tm.h> 55 #endif 56 #include <asm/kexec.h> 57 #include <asm/ppc-opcode.h> 58 #include <asm/rio.h> 59 #include <asm/fadump.h> 60 #include <asm/switch_to.h> 61 #include <asm/tm.h> 62 #include <asm/debug.h> 63 #include <sysdev/fsl_pci.h> 64 65 #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC) 66 int (*__debugger)(struct pt_regs *regs) __read_mostly; 67 int (*__debugger_ipi)(struct pt_regs *regs) __read_mostly; 68 int (*__debugger_bpt)(struct pt_regs *regs) __read_mostly; 69 int (*__debugger_sstep)(struct pt_regs *regs) __read_mostly; 70 int (*__debugger_iabr_match)(struct pt_regs *regs) __read_mostly; 71 int (*__debugger_break_match)(struct pt_regs *regs) __read_mostly; 72 int (*__debugger_fault_handler)(struct pt_regs *regs) __read_mostly; 73 74 EXPORT_SYMBOL(__debugger); 75 EXPORT_SYMBOL(__debugger_ipi); 76 EXPORT_SYMBOL(__debugger_bpt); 77 EXPORT_SYMBOL(__debugger_sstep); 78 EXPORT_SYMBOL(__debugger_iabr_match); 79 EXPORT_SYMBOL(__debugger_break_match); 80 EXPORT_SYMBOL(__debugger_fault_handler); 81 #endif 82 83 /* Transactional Memory trap debug */ 84 #ifdef TM_DEBUG_SW 85 #define TM_DEBUG(x...) printk(KERN_INFO x) 86 #else 87 #define TM_DEBUG(x...) do { } while(0) 88 #endif 89 90 /* 91 * Trap & Exception support 92 */ 93 94 #ifdef CONFIG_PMAC_BACKLIGHT 95 static void pmac_backlight_unblank(void) 96 { 97 mutex_lock(&pmac_backlight_mutex); 98 if (pmac_backlight) { 99 struct backlight_properties *props; 100 101 props = &pmac_backlight->props; 102 props->brightness = props->max_brightness; 103 props->power = FB_BLANK_UNBLANK; 104 backlight_update_status(pmac_backlight); 105 } 106 mutex_unlock(&pmac_backlight_mutex); 107 } 108 #else 109 static inline void pmac_backlight_unblank(void) { } 110 #endif 111 112 static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED; 113 static int die_owner = -1; 114 static unsigned int die_nest_count; 115 static int die_counter; 116 117 static unsigned __kprobes long oops_begin(struct pt_regs *regs) 118 { 119 int cpu; 120 unsigned long flags; 121 122 if (debugger(regs)) 123 return 1; 124 125 oops_enter(); 126 127 /* racy, but better than risking deadlock. */ 128 raw_local_irq_save(flags); 129 cpu = smp_processor_id(); 130 if (!arch_spin_trylock(&die_lock)) { 131 if (cpu == die_owner) 132 /* nested oops. should stop eventually */; 133 else 134 arch_spin_lock(&die_lock); 135 } 136 die_nest_count++; 137 die_owner = cpu; 138 console_verbose(); 139 bust_spinlocks(1); 140 if (machine_is(powermac)) 141 pmac_backlight_unblank(); 142 return flags; 143 } 144 145 static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, 146 int signr) 147 { 148 bust_spinlocks(0); 149 die_owner = -1; 150 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); 151 die_nest_count--; 152 oops_exit(); 153 printk("\n"); 154 if (!die_nest_count) 155 /* Nest count reaches zero, release the lock. */ 156 arch_spin_unlock(&die_lock); 157 raw_local_irq_restore(flags); 158 159 crash_fadump(regs, "die oops"); 160 161 /* 162 * A system reset (0x100) is a request to dump, so we always send 163 * it through the crashdump code. 164 */ 165 if (kexec_should_crash(current) || (TRAP(regs) == 0x100)) { 166 crash_kexec(regs); 167 168 /* 169 * We aren't the primary crash CPU. We need to send it 170 * to a holding pattern to avoid it ending up in the panic 171 * code. 172 */ 173 crash_kexec_secondary(regs); 174 } 175 176 if (!signr) 177 return; 178 179 /* 180 * While our oops output is serialised by a spinlock, output 181 * from panic() called below can race and corrupt it. If we 182 * know we are going to panic, delay for 1 second so we have a 183 * chance to get clean backtraces from all CPUs that are oopsing. 184 */ 185 if (in_interrupt() || panic_on_oops || !current->pid || 186 is_global_init(current)) { 187 mdelay(MSEC_PER_SEC); 188 } 189 190 if (in_interrupt()) 191 panic("Fatal exception in interrupt"); 192 if (panic_on_oops) 193 panic("Fatal exception"); 194 do_exit(signr); 195 } 196 197 static int __kprobes __die(const char *str, struct pt_regs *regs, long err) 198 { 199 printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter); 200 #ifdef CONFIG_PREEMPT 201 printk("PREEMPT "); 202 #endif 203 #ifdef CONFIG_SMP 204 printk("SMP NR_CPUS=%d ", NR_CPUS); 205 #endif 206 #ifdef CONFIG_DEBUG_PAGEALLOC 207 printk("DEBUG_PAGEALLOC "); 208 #endif 209 #ifdef CONFIG_NUMA 210 printk("NUMA "); 211 #endif 212 printk("%s\n", ppc_md.name ? ppc_md.name : ""); 213 214 if (notify_die(DIE_OOPS, str, regs, err, 255, SIGSEGV) == NOTIFY_STOP) 215 return 1; 216 217 print_modules(); 218 show_regs(regs); 219 220 return 0; 221 } 222 223 void die(const char *str, struct pt_regs *regs, long err) 224 { 225 unsigned long flags = oops_begin(regs); 226 227 if (__die(str, regs, err)) 228 err = 0; 229 oops_end(flags, regs, err); 230 } 231 232 void user_single_step_siginfo(struct task_struct *tsk, 233 struct pt_regs *regs, siginfo_t *info) 234 { 235 memset(info, 0, sizeof(*info)); 236 info->si_signo = SIGTRAP; 237 info->si_code = TRAP_TRACE; 238 info->si_addr = (void __user *)regs->nip; 239 } 240 241 void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr) 242 { 243 siginfo_t info; 244 const char fmt32[] = KERN_INFO "%s[%d]: unhandled signal %d " \ 245 "at %08lx nip %08lx lr %08lx code %x\n"; 246 const char fmt64[] = KERN_INFO "%s[%d]: unhandled signal %d " \ 247 "at %016lx nip %016lx lr %016lx code %x\n"; 248 249 if (!user_mode(regs)) { 250 die("Exception in kernel mode", regs, signr); 251 return; 252 } 253 254 if (show_unhandled_signals && unhandled_signal(current, signr)) { 255 printk_ratelimited(regs->msr & MSR_64BIT ? fmt64 : fmt32, 256 current->comm, current->pid, signr, 257 addr, regs->nip, regs->link, code); 258 } 259 260 if (arch_irqs_disabled() && !arch_irq_disabled_regs(regs)) 261 local_irq_enable(); 262 263 current->thread.trap_nr = code; 264 memset(&info, 0, sizeof(info)); 265 info.si_signo = signr; 266 info.si_code = code; 267 info.si_addr = (void __user *) addr; 268 force_sig_info(signr, &info, current); 269 } 270 271 #ifdef CONFIG_PPC64 272 void system_reset_exception(struct pt_regs *regs) 273 { 274 /* See if any machine dependent calls */ 275 if (ppc_md.system_reset_exception) { 276 if (ppc_md.system_reset_exception(regs)) 277 return; 278 } 279 280 die("System Reset", regs, SIGABRT); 281 282 /* Must die if the interrupt is not recoverable */ 283 if (!(regs->msr & MSR_RI)) 284 panic("Unrecoverable System Reset"); 285 286 /* What should we do here? We could issue a shutdown or hard reset. */ 287 } 288 289 /* 290 * This function is called in real mode. Strictly no printk's please. 291 * 292 * regs->nip and regs->msr contains srr0 and ssr1. 293 */ 294 long machine_check_early(struct pt_regs *regs) 295 { 296 long handled = 0; 297 298 if (cur_cpu_spec && cur_cpu_spec->machine_check_early) 299 handled = cur_cpu_spec->machine_check_early(regs); 300 return handled; 301 } 302 303 #endif 304 305 /* 306 * I/O accesses can cause machine checks on powermacs. 307 * Check if the NIP corresponds to the address of a sync 308 * instruction for which there is an entry in the exception 309 * table. 310 * Note that the 601 only takes a machine check on TEA 311 * (transfer error ack) signal assertion, and does not 312 * set any of the top 16 bits of SRR1. 313 * -- paulus. 314 */ 315 static inline int check_io_access(struct pt_regs *regs) 316 { 317 #ifdef CONFIG_PPC32 318 unsigned long msr = regs->msr; 319 const struct exception_table_entry *entry; 320 unsigned int *nip = (unsigned int *)regs->nip; 321 322 if (((msr & 0xffff0000) == 0 || (msr & (0x80000 | 0x40000))) 323 && (entry = search_exception_tables(regs->nip)) != NULL) { 324 /* 325 * Check that it's a sync instruction, or somewhere 326 * in the twi; isync; nop sequence that inb/inw/inl uses. 327 * As the address is in the exception table 328 * we should be able to read the instr there. 329 * For the debug message, we look at the preceding 330 * load or store. 331 */ 332 if (*nip == 0x60000000) /* nop */ 333 nip -= 2; 334 else if (*nip == 0x4c00012c) /* isync */ 335 --nip; 336 if (*nip == 0x7c0004ac || (*nip >> 26) == 3) { 337 /* sync or twi */ 338 unsigned int rb; 339 340 --nip; 341 rb = (*nip >> 11) & 0x1f; 342 printk(KERN_DEBUG "%s bad port %lx at %p\n", 343 (*nip & 0x100)? "OUT to": "IN from", 344 regs->gpr[rb] - _IO_BASE, nip); 345 regs->msr |= MSR_RI; 346 regs->nip = entry->fixup; 347 return 1; 348 } 349 } 350 #endif /* CONFIG_PPC32 */ 351 return 0; 352 } 353 354 #ifdef CONFIG_PPC_ADV_DEBUG_REGS 355 /* On 4xx, the reason for the machine check or program exception 356 is in the ESR. */ 357 #define get_reason(regs) ((regs)->dsisr) 358 #ifndef CONFIG_FSL_BOOKE 359 #define get_mc_reason(regs) ((regs)->dsisr) 360 #else 361 #define get_mc_reason(regs) (mfspr(SPRN_MCSR)) 362 #endif 363 #define REASON_FP ESR_FP 364 #define REASON_ILLEGAL (ESR_PIL | ESR_PUO) 365 #define REASON_PRIVILEGED ESR_PPR 366 #define REASON_TRAP ESR_PTR 367 368 /* single-step stuff */ 369 #define single_stepping(regs) (current->thread.debug.dbcr0 & DBCR0_IC) 370 #define clear_single_step(regs) (current->thread.debug.dbcr0 &= ~DBCR0_IC) 371 372 #else 373 /* On non-4xx, the reason for the machine check or program 374 exception is in the MSR. */ 375 #define get_reason(regs) ((regs)->msr) 376 #define get_mc_reason(regs) ((regs)->msr) 377 #define REASON_TM 0x200000 378 #define REASON_FP 0x100000 379 #define REASON_ILLEGAL 0x80000 380 #define REASON_PRIVILEGED 0x40000 381 #define REASON_TRAP 0x20000 382 383 #define single_stepping(regs) ((regs)->msr & MSR_SE) 384 #define clear_single_step(regs) ((regs)->msr &= ~MSR_SE) 385 #endif 386 387 #if defined(CONFIG_4xx) 388 int machine_check_4xx(struct pt_regs *regs) 389 { 390 unsigned long reason = get_mc_reason(regs); 391 392 if (reason & ESR_IMCP) { 393 printk("Instruction"); 394 mtspr(SPRN_ESR, reason & ~ESR_IMCP); 395 } else 396 printk("Data"); 397 printk(" machine check in kernel mode.\n"); 398 399 return 0; 400 } 401 402 int machine_check_440A(struct pt_regs *regs) 403 { 404 unsigned long reason = get_mc_reason(regs); 405 406 printk("Machine check in kernel mode.\n"); 407 if (reason & ESR_IMCP){ 408 printk("Instruction Synchronous Machine Check exception\n"); 409 mtspr(SPRN_ESR, reason & ~ESR_IMCP); 410 } 411 else { 412 u32 mcsr = mfspr(SPRN_MCSR); 413 if (mcsr & MCSR_IB) 414 printk("Instruction Read PLB Error\n"); 415 if (mcsr & MCSR_DRB) 416 printk("Data Read PLB Error\n"); 417 if (mcsr & MCSR_DWB) 418 printk("Data Write PLB Error\n"); 419 if (mcsr & MCSR_TLBP) 420 printk("TLB Parity Error\n"); 421 if (mcsr & MCSR_ICP){ 422 flush_instruction_cache(); 423 printk("I-Cache Parity Error\n"); 424 } 425 if (mcsr & MCSR_DCSP) 426 printk("D-Cache Search Parity Error\n"); 427 if (mcsr & MCSR_DCFP) 428 printk("D-Cache Flush Parity Error\n"); 429 if (mcsr & MCSR_IMPE) 430 printk("Machine Check exception is imprecise\n"); 431 432 /* Clear MCSR */ 433 mtspr(SPRN_MCSR, mcsr); 434 } 435 return 0; 436 } 437 438 int machine_check_47x(struct pt_regs *regs) 439 { 440 unsigned long reason = get_mc_reason(regs); 441 u32 mcsr; 442 443 printk(KERN_ERR "Machine check in kernel mode.\n"); 444 if (reason & ESR_IMCP) { 445 printk(KERN_ERR 446 "Instruction Synchronous Machine Check exception\n"); 447 mtspr(SPRN_ESR, reason & ~ESR_IMCP); 448 return 0; 449 } 450 mcsr = mfspr(SPRN_MCSR); 451 if (mcsr & MCSR_IB) 452 printk(KERN_ERR "Instruction Read PLB Error\n"); 453 if (mcsr & MCSR_DRB) 454 printk(KERN_ERR "Data Read PLB Error\n"); 455 if (mcsr & MCSR_DWB) 456 printk(KERN_ERR "Data Write PLB Error\n"); 457 if (mcsr & MCSR_TLBP) 458 printk(KERN_ERR "TLB Parity Error\n"); 459 if (mcsr & MCSR_ICP) { 460 flush_instruction_cache(); 461 printk(KERN_ERR "I-Cache Parity Error\n"); 462 } 463 if (mcsr & MCSR_DCSP) 464 printk(KERN_ERR "D-Cache Search Parity Error\n"); 465 if (mcsr & PPC47x_MCSR_GPR) 466 printk(KERN_ERR "GPR Parity Error\n"); 467 if (mcsr & PPC47x_MCSR_FPR) 468 printk(KERN_ERR "FPR Parity Error\n"); 469 if (mcsr & PPC47x_MCSR_IPR) 470 printk(KERN_ERR "Machine Check exception is imprecise\n"); 471 472 /* Clear MCSR */ 473 mtspr(SPRN_MCSR, mcsr); 474 475 return 0; 476 } 477 #elif defined(CONFIG_E500) 478 int machine_check_e500mc(struct pt_regs *regs) 479 { 480 unsigned long mcsr = mfspr(SPRN_MCSR); 481 unsigned long reason = mcsr; 482 int recoverable = 1; 483 484 if (reason & MCSR_LD) { 485 recoverable = fsl_rio_mcheck_exception(regs); 486 if (recoverable == 1) 487 goto silent_out; 488 } 489 490 printk("Machine check in kernel mode.\n"); 491 printk("Caused by (from MCSR=%lx): ", reason); 492 493 if (reason & MCSR_MCP) 494 printk("Machine Check Signal\n"); 495 496 if (reason & MCSR_ICPERR) { 497 printk("Instruction Cache Parity Error\n"); 498 499 /* 500 * This is recoverable by invalidating the i-cache. 501 */ 502 mtspr(SPRN_L1CSR1, mfspr(SPRN_L1CSR1) | L1CSR1_ICFI); 503 while (mfspr(SPRN_L1CSR1) & L1CSR1_ICFI) 504 ; 505 506 /* 507 * This will generally be accompanied by an instruction 508 * fetch error report -- only treat MCSR_IF as fatal 509 * if it wasn't due to an L1 parity error. 510 */ 511 reason &= ~MCSR_IF; 512 } 513 514 if (reason & MCSR_DCPERR_MC) { 515 printk("Data Cache Parity Error\n"); 516 517 /* 518 * In write shadow mode we auto-recover from the error, but it 519 * may still get logged and cause a machine check. We should 520 * only treat the non-write shadow case as non-recoverable. 521 */ 522 if (!(mfspr(SPRN_L1CSR2) & L1CSR2_DCWS)) 523 recoverable = 0; 524 } 525 526 if (reason & MCSR_L2MMU_MHIT) { 527 printk("Hit on multiple TLB entries\n"); 528 recoverable = 0; 529 } 530 531 if (reason & MCSR_NMI) 532 printk("Non-maskable interrupt\n"); 533 534 if (reason & MCSR_IF) { 535 printk("Instruction Fetch Error Report\n"); 536 recoverable = 0; 537 } 538 539 if (reason & MCSR_LD) { 540 printk("Load Error Report\n"); 541 recoverable = 0; 542 } 543 544 if (reason & MCSR_ST) { 545 printk("Store Error Report\n"); 546 recoverable = 0; 547 } 548 549 if (reason & MCSR_LDG) { 550 printk("Guarded Load Error Report\n"); 551 recoverable = 0; 552 } 553 554 if (reason & MCSR_TLBSYNC) 555 printk("Simultaneous tlbsync operations\n"); 556 557 if (reason & MCSR_BSL2_ERR) { 558 printk("Level 2 Cache Error\n"); 559 recoverable = 0; 560 } 561 562 if (reason & MCSR_MAV) { 563 u64 addr; 564 565 addr = mfspr(SPRN_MCAR); 566 addr |= (u64)mfspr(SPRN_MCARU) << 32; 567 568 printk("Machine Check %s Address: %#llx\n", 569 reason & MCSR_MEA ? "Effective" : "Physical", addr); 570 } 571 572 silent_out: 573 mtspr(SPRN_MCSR, mcsr); 574 return mfspr(SPRN_MCSR) == 0 && recoverable; 575 } 576 577 int machine_check_e500(struct pt_regs *regs) 578 { 579 unsigned long reason = get_mc_reason(regs); 580 581 if (reason & MCSR_BUS_RBERR) { 582 if (fsl_rio_mcheck_exception(regs)) 583 return 1; 584 if (fsl_pci_mcheck_exception(regs)) 585 return 1; 586 } 587 588 printk("Machine check in kernel mode.\n"); 589 printk("Caused by (from MCSR=%lx): ", reason); 590 591 if (reason & MCSR_MCP) 592 printk("Machine Check Signal\n"); 593 if (reason & MCSR_ICPERR) 594 printk("Instruction Cache Parity Error\n"); 595 if (reason & MCSR_DCP_PERR) 596 printk("Data Cache Push Parity Error\n"); 597 if (reason & MCSR_DCPERR) 598 printk("Data Cache Parity Error\n"); 599 if (reason & MCSR_BUS_IAERR) 600 printk("Bus - Instruction Address Error\n"); 601 if (reason & MCSR_BUS_RAERR) 602 printk("Bus - Read Address Error\n"); 603 if (reason & MCSR_BUS_WAERR) 604 printk("Bus - Write Address Error\n"); 605 if (reason & MCSR_BUS_IBERR) 606 printk("Bus - Instruction Data Error\n"); 607 if (reason & MCSR_BUS_RBERR) 608 printk("Bus - Read Data Bus Error\n"); 609 if (reason & MCSR_BUS_WBERR) 610 printk("Bus - Read Data Bus Error\n"); 611 if (reason & MCSR_BUS_IPERR) 612 printk("Bus - Instruction Parity Error\n"); 613 if (reason & MCSR_BUS_RPERR) 614 printk("Bus - Read Parity Error\n"); 615 616 return 0; 617 } 618 619 int machine_check_generic(struct pt_regs *regs) 620 { 621 return 0; 622 } 623 #elif defined(CONFIG_E200) 624 int machine_check_e200(struct pt_regs *regs) 625 { 626 unsigned long reason = get_mc_reason(regs); 627 628 printk("Machine check in kernel mode.\n"); 629 printk("Caused by (from MCSR=%lx): ", reason); 630 631 if (reason & MCSR_MCP) 632 printk("Machine Check Signal\n"); 633 if (reason & MCSR_CP_PERR) 634 printk("Cache Push Parity Error\n"); 635 if (reason & MCSR_CPERR) 636 printk("Cache Parity Error\n"); 637 if (reason & MCSR_EXCP_ERR) 638 printk("ISI, ITLB, or Bus Error on first instruction fetch for an exception handler\n"); 639 if (reason & MCSR_BUS_IRERR) 640 printk("Bus - Read Bus Error on instruction fetch\n"); 641 if (reason & MCSR_BUS_DRERR) 642 printk("Bus - Read Bus Error on data load\n"); 643 if (reason & MCSR_BUS_WRERR) 644 printk("Bus - Write Bus Error on buffered store or cache line push\n"); 645 646 return 0; 647 } 648 #else 649 int machine_check_generic(struct pt_regs *regs) 650 { 651 unsigned long reason = get_mc_reason(regs); 652 653 printk("Machine check in kernel mode.\n"); 654 printk("Caused by (from SRR1=%lx): ", reason); 655 switch (reason & 0x601F0000) { 656 case 0x80000: 657 printk("Machine check signal\n"); 658 break; 659 case 0: /* for 601 */ 660 case 0x40000: 661 case 0x140000: /* 7450 MSS error and TEA */ 662 printk("Transfer error ack signal\n"); 663 break; 664 case 0x20000: 665 printk("Data parity error signal\n"); 666 break; 667 case 0x10000: 668 printk("Address parity error signal\n"); 669 break; 670 case 0x20000000: 671 printk("L1 Data Cache error\n"); 672 break; 673 case 0x40000000: 674 printk("L1 Instruction Cache error\n"); 675 break; 676 case 0x00100000: 677 printk("L2 data cache parity error\n"); 678 break; 679 default: 680 printk("Unknown values in msr\n"); 681 } 682 return 0; 683 } 684 #endif /* everything else */ 685 686 void machine_check_exception(struct pt_regs *regs) 687 { 688 enum ctx_state prev_state = exception_enter(); 689 int recover = 0; 690 691 __get_cpu_var(irq_stat).mce_exceptions++; 692 693 /* See if any machine dependent calls. In theory, we would want 694 * to call the CPU first, and call the ppc_md. one if the CPU 695 * one returns a positive number. However there is existing code 696 * that assumes the board gets a first chance, so let's keep it 697 * that way for now and fix things later. --BenH. 698 */ 699 if (ppc_md.machine_check_exception) 700 recover = ppc_md.machine_check_exception(regs); 701 else if (cur_cpu_spec->machine_check) 702 recover = cur_cpu_spec->machine_check(regs); 703 704 if (recover > 0) 705 goto bail; 706 707 #if defined(CONFIG_8xx) && defined(CONFIG_PCI) 708 /* the qspan pci read routines can cause machine checks -- Cort 709 * 710 * yuck !!! that totally needs to go away ! There are better ways 711 * to deal with that than having a wart in the mcheck handler. 712 * -- BenH 713 */ 714 bad_page_fault(regs, regs->dar, SIGBUS); 715 goto bail; 716 #endif 717 718 if (debugger_fault_handler(regs)) 719 goto bail; 720 721 if (check_io_access(regs)) 722 goto bail; 723 724 die("Machine check", regs, SIGBUS); 725 726 /* Must die if the interrupt is not recoverable */ 727 if (!(regs->msr & MSR_RI)) 728 panic("Unrecoverable Machine check"); 729 730 bail: 731 exception_exit(prev_state); 732 } 733 734 void SMIException(struct pt_regs *regs) 735 { 736 die("System Management Interrupt", regs, SIGABRT); 737 } 738 739 void unknown_exception(struct pt_regs *regs) 740 { 741 enum ctx_state prev_state = exception_enter(); 742 743 printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n", 744 regs->nip, regs->msr, regs->trap); 745 746 _exception(SIGTRAP, regs, 0, 0); 747 748 exception_exit(prev_state); 749 } 750 751 void instruction_breakpoint_exception(struct pt_regs *regs) 752 { 753 enum ctx_state prev_state = exception_enter(); 754 755 if (notify_die(DIE_IABR_MATCH, "iabr_match", regs, 5, 756 5, SIGTRAP) == NOTIFY_STOP) 757 goto bail; 758 if (debugger_iabr_match(regs)) 759 goto bail; 760 _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip); 761 762 bail: 763 exception_exit(prev_state); 764 } 765 766 void RunModeException(struct pt_regs *regs) 767 { 768 _exception(SIGTRAP, regs, 0, 0); 769 } 770 771 void __kprobes single_step_exception(struct pt_regs *regs) 772 { 773 enum ctx_state prev_state = exception_enter(); 774 775 clear_single_step(regs); 776 777 if (notify_die(DIE_SSTEP, "single_step", regs, 5, 778 5, SIGTRAP) == NOTIFY_STOP) 779 goto bail; 780 if (debugger_sstep(regs)) 781 goto bail; 782 783 _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip); 784 785 bail: 786 exception_exit(prev_state); 787 } 788 789 /* 790 * After we have successfully emulated an instruction, we have to 791 * check if the instruction was being single-stepped, and if so, 792 * pretend we got a single-step exception. This was pointed out 793 * by Kumar Gala. -- paulus 794 */ 795 static void emulate_single_step(struct pt_regs *regs) 796 { 797 if (single_stepping(regs)) 798 single_step_exception(regs); 799 } 800 801 static inline int __parse_fpscr(unsigned long fpscr) 802 { 803 int ret = 0; 804 805 /* Invalid operation */ 806 if ((fpscr & FPSCR_VE) && (fpscr & FPSCR_VX)) 807 ret = FPE_FLTINV; 808 809 /* Overflow */ 810 else if ((fpscr & FPSCR_OE) && (fpscr & FPSCR_OX)) 811 ret = FPE_FLTOVF; 812 813 /* Underflow */ 814 else if ((fpscr & FPSCR_UE) && (fpscr & FPSCR_UX)) 815 ret = FPE_FLTUND; 816 817 /* Divide by zero */ 818 else if ((fpscr & FPSCR_ZE) && (fpscr & FPSCR_ZX)) 819 ret = FPE_FLTDIV; 820 821 /* Inexact result */ 822 else if ((fpscr & FPSCR_XE) && (fpscr & FPSCR_XX)) 823 ret = FPE_FLTRES; 824 825 return ret; 826 } 827 828 static void parse_fpe(struct pt_regs *regs) 829 { 830 int code = 0; 831 832 flush_fp_to_thread(current); 833 834 code = __parse_fpscr(current->thread.fp_state.fpscr); 835 836 _exception(SIGFPE, regs, code, regs->nip); 837 } 838 839 /* 840 * Illegal instruction emulation support. Originally written to 841 * provide the PVR to user applications using the mfspr rd, PVR. 842 * Return non-zero if we can't emulate, or -EFAULT if the associated 843 * memory access caused an access fault. Return zero on success. 844 * 845 * There are a couple of ways to do this, either "decode" the instruction 846 * or directly match lots of bits. In this case, matching lots of 847 * bits is faster and easier. 848 * 849 */ 850 static int emulate_string_inst(struct pt_regs *regs, u32 instword) 851 { 852 u8 rT = (instword >> 21) & 0x1f; 853 u8 rA = (instword >> 16) & 0x1f; 854 u8 NB_RB = (instword >> 11) & 0x1f; 855 u32 num_bytes; 856 unsigned long EA; 857 int pos = 0; 858 859 /* Early out if we are an invalid form of lswx */ 860 if ((instword & PPC_INST_STRING_MASK) == PPC_INST_LSWX) 861 if ((rT == rA) || (rT == NB_RB)) 862 return -EINVAL; 863 864 EA = (rA == 0) ? 0 : regs->gpr[rA]; 865 866 switch (instword & PPC_INST_STRING_MASK) { 867 case PPC_INST_LSWX: 868 case PPC_INST_STSWX: 869 EA += NB_RB; 870 num_bytes = regs->xer & 0x7f; 871 break; 872 case PPC_INST_LSWI: 873 case PPC_INST_STSWI: 874 num_bytes = (NB_RB == 0) ? 32 : NB_RB; 875 break; 876 default: 877 return -EINVAL; 878 } 879 880 while (num_bytes != 0) 881 { 882 u8 val; 883 u32 shift = 8 * (3 - (pos & 0x3)); 884 885 /* if process is 32-bit, clear upper 32 bits of EA */ 886 if ((regs->msr & MSR_64BIT) == 0) 887 EA &= 0xFFFFFFFF; 888 889 switch ((instword & PPC_INST_STRING_MASK)) { 890 case PPC_INST_LSWX: 891 case PPC_INST_LSWI: 892 if (get_user(val, (u8 __user *)EA)) 893 return -EFAULT; 894 /* first time updating this reg, 895 * zero it out */ 896 if (pos == 0) 897 regs->gpr[rT] = 0; 898 regs->gpr[rT] |= val << shift; 899 break; 900 case PPC_INST_STSWI: 901 case PPC_INST_STSWX: 902 val = regs->gpr[rT] >> shift; 903 if (put_user(val, (u8 __user *)EA)) 904 return -EFAULT; 905 break; 906 } 907 /* move EA to next address */ 908 EA += 1; 909 num_bytes--; 910 911 /* manage our position within the register */ 912 if (++pos == 4) { 913 pos = 0; 914 if (++rT == 32) 915 rT = 0; 916 } 917 } 918 919 return 0; 920 } 921 922 static int emulate_popcntb_inst(struct pt_regs *regs, u32 instword) 923 { 924 u32 ra,rs; 925 unsigned long tmp; 926 927 ra = (instword >> 16) & 0x1f; 928 rs = (instword >> 21) & 0x1f; 929 930 tmp = regs->gpr[rs]; 931 tmp = tmp - ((tmp >> 1) & 0x5555555555555555ULL); 932 tmp = (tmp & 0x3333333333333333ULL) + ((tmp >> 2) & 0x3333333333333333ULL); 933 tmp = (tmp + (tmp >> 4)) & 0x0f0f0f0f0f0f0f0fULL; 934 regs->gpr[ra] = tmp; 935 936 return 0; 937 } 938 939 static int emulate_isel(struct pt_regs *regs, u32 instword) 940 { 941 u8 rT = (instword >> 21) & 0x1f; 942 u8 rA = (instword >> 16) & 0x1f; 943 u8 rB = (instword >> 11) & 0x1f; 944 u8 BC = (instword >> 6) & 0x1f; 945 u8 bit; 946 unsigned long tmp; 947 948 tmp = (rA == 0) ? 0 : regs->gpr[rA]; 949 bit = (regs->ccr >> (31 - BC)) & 0x1; 950 951 regs->gpr[rT] = bit ? tmp : regs->gpr[rB]; 952 953 return 0; 954 } 955 956 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 957 static inline bool tm_abort_check(struct pt_regs *regs, int cause) 958 { 959 /* If we're emulating a load/store in an active transaction, we cannot 960 * emulate it as the kernel operates in transaction suspended context. 961 * We need to abort the transaction. This creates a persistent TM 962 * abort so tell the user what caused it with a new code. 963 */ 964 if (MSR_TM_TRANSACTIONAL(regs->msr)) { 965 tm_enable(); 966 tm_abort(cause); 967 return true; 968 } 969 return false; 970 } 971 #else 972 static inline bool tm_abort_check(struct pt_regs *regs, int reason) 973 { 974 return false; 975 } 976 #endif 977 978 static int emulate_instruction(struct pt_regs *regs) 979 { 980 u32 instword; 981 u32 rd; 982 983 if (!user_mode(regs)) 984 return -EINVAL; 985 CHECK_FULL_REGS(regs); 986 987 if (get_user(instword, (u32 __user *)(regs->nip))) 988 return -EFAULT; 989 990 /* Emulate the mfspr rD, PVR. */ 991 if ((instword & PPC_INST_MFSPR_PVR_MASK) == PPC_INST_MFSPR_PVR) { 992 PPC_WARN_EMULATED(mfpvr, regs); 993 rd = (instword >> 21) & 0x1f; 994 regs->gpr[rd] = mfspr(SPRN_PVR); 995 return 0; 996 } 997 998 /* Emulating the dcba insn is just a no-op. */ 999 if ((instword & PPC_INST_DCBA_MASK) == PPC_INST_DCBA) { 1000 PPC_WARN_EMULATED(dcba, regs); 1001 return 0; 1002 } 1003 1004 /* Emulate the mcrxr insn. */ 1005 if ((instword & PPC_INST_MCRXR_MASK) == PPC_INST_MCRXR) { 1006 int shift = (instword >> 21) & 0x1c; 1007 unsigned long msk = 0xf0000000UL >> shift; 1008 1009 PPC_WARN_EMULATED(mcrxr, regs); 1010 regs->ccr = (regs->ccr & ~msk) | ((regs->xer >> shift) & msk); 1011 regs->xer &= ~0xf0000000UL; 1012 return 0; 1013 } 1014 1015 /* Emulate load/store string insn. */ 1016 if ((instword & PPC_INST_STRING_GEN_MASK) == PPC_INST_STRING) { 1017 if (tm_abort_check(regs, 1018 TM_CAUSE_EMULATE | TM_CAUSE_PERSISTENT)) 1019 return -EINVAL; 1020 PPC_WARN_EMULATED(string, regs); 1021 return emulate_string_inst(regs, instword); 1022 } 1023 1024 /* Emulate the popcntb (Population Count Bytes) instruction. */ 1025 if ((instword & PPC_INST_POPCNTB_MASK) == PPC_INST_POPCNTB) { 1026 PPC_WARN_EMULATED(popcntb, regs); 1027 return emulate_popcntb_inst(regs, instword); 1028 } 1029 1030 /* Emulate isel (Integer Select) instruction */ 1031 if ((instword & PPC_INST_ISEL_MASK) == PPC_INST_ISEL) { 1032 PPC_WARN_EMULATED(isel, regs); 1033 return emulate_isel(regs, instword); 1034 } 1035 1036 /* Emulate sync instruction variants */ 1037 if ((instword & PPC_INST_SYNC_MASK) == PPC_INST_SYNC) { 1038 PPC_WARN_EMULATED(sync, regs); 1039 asm volatile("sync"); 1040 return 0; 1041 } 1042 1043 #ifdef CONFIG_PPC64 1044 /* Emulate the mfspr rD, DSCR. */ 1045 if ((((instword & PPC_INST_MFSPR_DSCR_USER_MASK) == 1046 PPC_INST_MFSPR_DSCR_USER) || 1047 ((instword & PPC_INST_MFSPR_DSCR_MASK) == 1048 PPC_INST_MFSPR_DSCR)) && 1049 cpu_has_feature(CPU_FTR_DSCR)) { 1050 PPC_WARN_EMULATED(mfdscr, regs); 1051 rd = (instword >> 21) & 0x1f; 1052 regs->gpr[rd] = mfspr(SPRN_DSCR); 1053 return 0; 1054 } 1055 /* Emulate the mtspr DSCR, rD. */ 1056 if ((((instword & PPC_INST_MTSPR_DSCR_USER_MASK) == 1057 PPC_INST_MTSPR_DSCR_USER) || 1058 ((instword & PPC_INST_MTSPR_DSCR_MASK) == 1059 PPC_INST_MTSPR_DSCR)) && 1060 cpu_has_feature(CPU_FTR_DSCR)) { 1061 PPC_WARN_EMULATED(mtdscr, regs); 1062 rd = (instword >> 21) & 0x1f; 1063 current->thread.dscr = regs->gpr[rd]; 1064 current->thread.dscr_inherit = 1; 1065 mtspr(SPRN_DSCR, current->thread.dscr); 1066 return 0; 1067 } 1068 #endif 1069 1070 return -EINVAL; 1071 } 1072 1073 int is_valid_bugaddr(unsigned long addr) 1074 { 1075 return is_kernel_addr(addr); 1076 } 1077 1078 #ifdef CONFIG_MATH_EMULATION 1079 static int emulate_math(struct pt_regs *regs) 1080 { 1081 int ret; 1082 extern int do_mathemu(struct pt_regs *regs); 1083 1084 ret = do_mathemu(regs); 1085 if (ret >= 0) 1086 PPC_WARN_EMULATED(math, regs); 1087 1088 switch (ret) { 1089 case 0: 1090 emulate_single_step(regs); 1091 return 0; 1092 case 1: { 1093 int code = 0; 1094 code = __parse_fpscr(current->thread.fp_state.fpscr); 1095 _exception(SIGFPE, regs, code, regs->nip); 1096 return 0; 1097 } 1098 case -EFAULT: 1099 _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip); 1100 return 0; 1101 } 1102 1103 return -1; 1104 } 1105 #else 1106 static inline int emulate_math(struct pt_regs *regs) { return -1; } 1107 #endif 1108 1109 void __kprobes program_check_exception(struct pt_regs *regs) 1110 { 1111 enum ctx_state prev_state = exception_enter(); 1112 unsigned int reason = get_reason(regs); 1113 1114 /* We can now get here via a FP Unavailable exception if the core 1115 * has no FPU, in that case the reason flags will be 0 */ 1116 1117 if (reason & REASON_FP) { 1118 /* IEEE FP exception */ 1119 parse_fpe(regs); 1120 goto bail; 1121 } 1122 if (reason & REASON_TRAP) { 1123 /* Debugger is first in line to stop recursive faults in 1124 * rcu_lock, notify_die, or atomic_notifier_call_chain */ 1125 if (debugger_bpt(regs)) 1126 goto bail; 1127 1128 /* trap exception */ 1129 if (notify_die(DIE_BPT, "breakpoint", regs, 5, 5, SIGTRAP) 1130 == NOTIFY_STOP) 1131 goto bail; 1132 1133 if (!(regs->msr & MSR_PR) && /* not user-mode */ 1134 report_bug(regs->nip, regs) == BUG_TRAP_TYPE_WARN) { 1135 regs->nip += 4; 1136 goto bail; 1137 } 1138 _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip); 1139 goto bail; 1140 } 1141 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1142 if (reason & REASON_TM) { 1143 /* This is a TM "Bad Thing Exception" program check. 1144 * This occurs when: 1145 * - An rfid/hrfid/mtmsrd attempts to cause an illegal 1146 * transition in TM states. 1147 * - A trechkpt is attempted when transactional. 1148 * - A treclaim is attempted when non transactional. 1149 * - A tend is illegally attempted. 1150 * - writing a TM SPR when transactional. 1151 */ 1152 if (!user_mode(regs) && 1153 report_bug(regs->nip, regs) == BUG_TRAP_TYPE_WARN) { 1154 regs->nip += 4; 1155 goto bail; 1156 } 1157 /* If usermode caused this, it's done something illegal and 1158 * gets a SIGILL slap on the wrist. We call it an illegal 1159 * operand to distinguish from the instruction just being bad 1160 * (e.g. executing a 'tend' on a CPU without TM!); it's an 1161 * illegal /placement/ of a valid instruction. 1162 */ 1163 if (user_mode(regs)) { 1164 _exception(SIGILL, regs, ILL_ILLOPN, regs->nip); 1165 goto bail; 1166 } else { 1167 printk(KERN_EMERG "Unexpected TM Bad Thing exception " 1168 "at %lx (msr 0x%x)\n", regs->nip, reason); 1169 die("Unrecoverable exception", regs, SIGABRT); 1170 } 1171 } 1172 #endif 1173 1174 /* 1175 * If we took the program check in the kernel skip down to sending a 1176 * SIGILL. The subsequent cases all relate to emulating instructions 1177 * which we should only do for userspace. We also do not want to enable 1178 * interrupts for kernel faults because that might lead to further 1179 * faults, and loose the context of the original exception. 1180 */ 1181 if (!user_mode(regs)) 1182 goto sigill; 1183 1184 /* We restore the interrupt state now */ 1185 if (!arch_irq_disabled_regs(regs)) 1186 local_irq_enable(); 1187 1188 /* (reason & REASON_ILLEGAL) would be the obvious thing here, 1189 * but there seems to be a hardware bug on the 405GP (RevD) 1190 * that means ESR is sometimes set incorrectly - either to 1191 * ESR_DST (!?) or 0. In the process of chasing this with the 1192 * hardware people - not sure if it can happen on any illegal 1193 * instruction or only on FP instructions, whether there is a 1194 * pattern to occurrences etc. -dgibson 31/Mar/2003 1195 */ 1196 if (!emulate_math(regs)) 1197 goto bail; 1198 1199 /* Try to emulate it if we should. */ 1200 if (reason & (REASON_ILLEGAL | REASON_PRIVILEGED)) { 1201 switch (emulate_instruction(regs)) { 1202 case 0: 1203 regs->nip += 4; 1204 emulate_single_step(regs); 1205 goto bail; 1206 case -EFAULT: 1207 _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip); 1208 goto bail; 1209 } 1210 } 1211 1212 sigill: 1213 if (reason & REASON_PRIVILEGED) 1214 _exception(SIGILL, regs, ILL_PRVOPC, regs->nip); 1215 else 1216 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 1217 1218 bail: 1219 exception_exit(prev_state); 1220 } 1221 1222 /* 1223 * This occurs when running in hypervisor mode on POWER6 or later 1224 * and an illegal instruction is encountered. 1225 */ 1226 void __kprobes emulation_assist_interrupt(struct pt_regs *regs) 1227 { 1228 regs->msr |= REASON_ILLEGAL; 1229 program_check_exception(regs); 1230 } 1231 1232 void alignment_exception(struct pt_regs *regs) 1233 { 1234 enum ctx_state prev_state = exception_enter(); 1235 int sig, code, fixed = 0; 1236 1237 /* We restore the interrupt state now */ 1238 if (!arch_irq_disabled_regs(regs)) 1239 local_irq_enable(); 1240 1241 if (tm_abort_check(regs, TM_CAUSE_ALIGNMENT | TM_CAUSE_PERSISTENT)) 1242 goto bail; 1243 1244 /* we don't implement logging of alignment exceptions */ 1245 if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS)) 1246 fixed = fix_alignment(regs); 1247 1248 if (fixed == 1) { 1249 regs->nip += 4; /* skip over emulated instruction */ 1250 emulate_single_step(regs); 1251 goto bail; 1252 } 1253 1254 /* Operand address was bad */ 1255 if (fixed == -EFAULT) { 1256 sig = SIGSEGV; 1257 code = SEGV_ACCERR; 1258 } else { 1259 sig = SIGBUS; 1260 code = BUS_ADRALN; 1261 } 1262 if (user_mode(regs)) 1263 _exception(sig, regs, code, regs->dar); 1264 else 1265 bad_page_fault(regs, regs->dar, sig); 1266 1267 bail: 1268 exception_exit(prev_state); 1269 } 1270 1271 void StackOverflow(struct pt_regs *regs) 1272 { 1273 printk(KERN_CRIT "Kernel stack overflow in process %p, r1=%lx\n", 1274 current, regs->gpr[1]); 1275 debugger(regs); 1276 show_regs(regs); 1277 panic("kernel stack overflow"); 1278 } 1279 1280 void nonrecoverable_exception(struct pt_regs *regs) 1281 { 1282 printk(KERN_ERR "Non-recoverable exception at PC=%lx MSR=%lx\n", 1283 regs->nip, regs->msr); 1284 debugger(regs); 1285 die("nonrecoverable exception", regs, SIGKILL); 1286 } 1287 1288 void trace_syscall(struct pt_regs *regs) 1289 { 1290 printk("Task: %p(%d), PC: %08lX/%08lX, Syscall: %3ld, Result: %s%ld %s\n", 1291 current, task_pid_nr(current), regs->nip, regs->link, regs->gpr[0], 1292 regs->ccr&0x10000000?"Error=":"", regs->gpr[3], print_tainted()); 1293 } 1294 1295 void kernel_fp_unavailable_exception(struct pt_regs *regs) 1296 { 1297 enum ctx_state prev_state = exception_enter(); 1298 1299 printk(KERN_EMERG "Unrecoverable FP Unavailable Exception " 1300 "%lx at %lx\n", regs->trap, regs->nip); 1301 die("Unrecoverable FP Unavailable Exception", regs, SIGABRT); 1302 1303 exception_exit(prev_state); 1304 } 1305 1306 void altivec_unavailable_exception(struct pt_regs *regs) 1307 { 1308 enum ctx_state prev_state = exception_enter(); 1309 1310 if (user_mode(regs)) { 1311 /* A user program has executed an altivec instruction, 1312 but this kernel doesn't support altivec. */ 1313 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 1314 goto bail; 1315 } 1316 1317 printk(KERN_EMERG "Unrecoverable VMX/Altivec Unavailable Exception " 1318 "%lx at %lx\n", regs->trap, regs->nip); 1319 die("Unrecoverable VMX/Altivec Unavailable Exception", regs, SIGABRT); 1320 1321 bail: 1322 exception_exit(prev_state); 1323 } 1324 1325 void vsx_unavailable_exception(struct pt_regs *regs) 1326 { 1327 if (user_mode(regs)) { 1328 /* A user program has executed an vsx instruction, 1329 but this kernel doesn't support vsx. */ 1330 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 1331 return; 1332 } 1333 1334 printk(KERN_EMERG "Unrecoverable VSX Unavailable Exception " 1335 "%lx at %lx\n", regs->trap, regs->nip); 1336 die("Unrecoverable VSX Unavailable Exception", regs, SIGABRT); 1337 } 1338 1339 #ifdef CONFIG_PPC64 1340 void facility_unavailable_exception(struct pt_regs *regs) 1341 { 1342 static char *facility_strings[] = { 1343 [FSCR_FP_LG] = "FPU", 1344 [FSCR_VECVSX_LG] = "VMX/VSX", 1345 [FSCR_DSCR_LG] = "DSCR", 1346 [FSCR_PM_LG] = "PMU SPRs", 1347 [FSCR_BHRB_LG] = "BHRB", 1348 [FSCR_TM_LG] = "TM", 1349 [FSCR_EBB_LG] = "EBB", 1350 [FSCR_TAR_LG] = "TAR", 1351 }; 1352 char *facility = "unknown"; 1353 u64 value; 1354 u8 status; 1355 bool hv; 1356 1357 hv = (regs->trap == 0xf80); 1358 if (hv) 1359 value = mfspr(SPRN_HFSCR); 1360 else 1361 value = mfspr(SPRN_FSCR); 1362 1363 status = value >> 56; 1364 if (status == FSCR_DSCR_LG) { 1365 /* User is acessing the DSCR. Set the inherit bit and allow 1366 * the user to set it directly in future by setting via the 1367 * FSCR DSCR bit. We always leave HFSCR DSCR set. 1368 */ 1369 current->thread.dscr_inherit = 1; 1370 mtspr(SPRN_FSCR, value | FSCR_DSCR); 1371 return; 1372 } 1373 1374 if ((status < ARRAY_SIZE(facility_strings)) && 1375 facility_strings[status]) 1376 facility = facility_strings[status]; 1377 1378 /* We restore the interrupt state now */ 1379 if (!arch_irq_disabled_regs(regs)) 1380 local_irq_enable(); 1381 1382 pr_err_ratelimited( 1383 "%sFacility '%s' unavailable, exception at 0x%lx, MSR=%lx\n", 1384 hv ? "Hypervisor " : "", facility, regs->nip, regs->msr); 1385 1386 if (user_mode(regs)) { 1387 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 1388 return; 1389 } 1390 1391 die("Unexpected facility unavailable exception", regs, SIGABRT); 1392 } 1393 #endif 1394 1395 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1396 1397 void fp_unavailable_tm(struct pt_regs *regs) 1398 { 1399 /* Note: This does not handle any kind of FP laziness. */ 1400 1401 TM_DEBUG("FP Unavailable trap whilst transactional at 0x%lx, MSR=%lx\n", 1402 regs->nip, regs->msr); 1403 1404 /* We can only have got here if the task started using FP after 1405 * beginning the transaction. So, the transactional regs are just a 1406 * copy of the checkpointed ones. But, we still need to recheckpoint 1407 * as we're enabling FP for the process; it will return, abort the 1408 * transaction, and probably retry but now with FP enabled. So the 1409 * checkpointed FP registers need to be loaded. 1410 */ 1411 tm_reclaim_current(TM_CAUSE_FAC_UNAV); 1412 /* Reclaim didn't save out any FPRs to transact_fprs. */ 1413 1414 /* Enable FP for the task: */ 1415 regs->msr |= (MSR_FP | current->thread.fpexc_mode); 1416 1417 /* This loads and recheckpoints the FP registers from 1418 * thread.fpr[]. They will remain in registers after the 1419 * checkpoint so we don't need to reload them after. 1420 * If VMX is in use, the VRs now hold checkpointed values, 1421 * so we don't want to load the VRs from the thread_struct. 1422 */ 1423 tm_recheckpoint(¤t->thread, MSR_FP); 1424 1425 /* If VMX is in use, get the transactional values back */ 1426 if (regs->msr & MSR_VEC) { 1427 do_load_up_transact_altivec(¤t->thread); 1428 /* At this point all the VSX state is loaded, so enable it */ 1429 regs->msr |= MSR_VSX; 1430 } 1431 } 1432 1433 void altivec_unavailable_tm(struct pt_regs *regs) 1434 { 1435 /* See the comments in fp_unavailable_tm(). This function operates 1436 * the same way. 1437 */ 1438 1439 TM_DEBUG("Vector Unavailable trap whilst transactional at 0x%lx," 1440 "MSR=%lx\n", 1441 regs->nip, regs->msr); 1442 tm_reclaim_current(TM_CAUSE_FAC_UNAV); 1443 regs->msr |= MSR_VEC; 1444 tm_recheckpoint(¤t->thread, MSR_VEC); 1445 current->thread.used_vr = 1; 1446 1447 if (regs->msr & MSR_FP) { 1448 do_load_up_transact_fpu(¤t->thread); 1449 regs->msr |= MSR_VSX; 1450 } 1451 } 1452 1453 void vsx_unavailable_tm(struct pt_regs *regs) 1454 { 1455 unsigned long orig_msr = regs->msr; 1456 1457 /* See the comments in fp_unavailable_tm(). This works similarly, 1458 * though we're loading both FP and VEC registers in here. 1459 * 1460 * If FP isn't in use, load FP regs. If VEC isn't in use, load VEC 1461 * regs. Either way, set MSR_VSX. 1462 */ 1463 1464 TM_DEBUG("VSX Unavailable trap whilst transactional at 0x%lx," 1465 "MSR=%lx\n", 1466 regs->nip, regs->msr); 1467 1468 current->thread.used_vsr = 1; 1469 1470 /* If FP and VMX are already loaded, we have all the state we need */ 1471 if ((orig_msr & (MSR_FP | MSR_VEC)) == (MSR_FP | MSR_VEC)) { 1472 regs->msr |= MSR_VSX; 1473 return; 1474 } 1475 1476 /* This reclaims FP and/or VR regs if they're already enabled */ 1477 tm_reclaim_current(TM_CAUSE_FAC_UNAV); 1478 1479 regs->msr |= MSR_VEC | MSR_FP | current->thread.fpexc_mode | 1480 MSR_VSX; 1481 1482 /* This loads & recheckpoints FP and VRs; but we have 1483 * to be sure not to overwrite previously-valid state. 1484 */ 1485 tm_recheckpoint(¤t->thread, regs->msr & ~orig_msr); 1486 1487 if (orig_msr & MSR_FP) 1488 do_load_up_transact_fpu(¤t->thread); 1489 if (orig_msr & MSR_VEC) 1490 do_load_up_transact_altivec(¤t->thread); 1491 } 1492 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ 1493 1494 void performance_monitor_exception(struct pt_regs *regs) 1495 { 1496 __get_cpu_var(irq_stat).pmu_irqs++; 1497 1498 perf_irq(regs); 1499 } 1500 1501 #ifdef CONFIG_8xx 1502 void SoftwareEmulation(struct pt_regs *regs) 1503 { 1504 CHECK_FULL_REGS(regs); 1505 1506 if (!user_mode(regs)) { 1507 debugger(regs); 1508 die("Kernel Mode Unimplemented Instruction or SW FPU Emulation", 1509 regs, SIGFPE); 1510 } 1511 1512 if (!emulate_math(regs)) 1513 return; 1514 1515 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 1516 } 1517 #endif /* CONFIG_8xx */ 1518 1519 #ifdef CONFIG_PPC_ADV_DEBUG_REGS 1520 static void handle_debug(struct pt_regs *regs, unsigned long debug_status) 1521 { 1522 int changed = 0; 1523 /* 1524 * Determine the cause of the debug event, clear the 1525 * event flags and send a trap to the handler. Torez 1526 */ 1527 if (debug_status & (DBSR_DAC1R | DBSR_DAC1W)) { 1528 dbcr_dac(current) &= ~(DBCR_DAC1R | DBCR_DAC1W); 1529 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE 1530 current->thread.debug.dbcr2 &= ~DBCR2_DAC12MODE; 1531 #endif 1532 do_send_trap(regs, mfspr(SPRN_DAC1), debug_status, TRAP_HWBKPT, 1533 5); 1534 changed |= 0x01; 1535 } else if (debug_status & (DBSR_DAC2R | DBSR_DAC2W)) { 1536 dbcr_dac(current) &= ~(DBCR_DAC2R | DBCR_DAC2W); 1537 do_send_trap(regs, mfspr(SPRN_DAC2), debug_status, TRAP_HWBKPT, 1538 6); 1539 changed |= 0x01; 1540 } else if (debug_status & DBSR_IAC1) { 1541 current->thread.debug.dbcr0 &= ~DBCR0_IAC1; 1542 dbcr_iac_range(current) &= ~DBCR_IAC12MODE; 1543 do_send_trap(regs, mfspr(SPRN_IAC1), debug_status, TRAP_HWBKPT, 1544 1); 1545 changed |= 0x01; 1546 } else if (debug_status & DBSR_IAC2) { 1547 current->thread.debug.dbcr0 &= ~DBCR0_IAC2; 1548 do_send_trap(regs, mfspr(SPRN_IAC2), debug_status, TRAP_HWBKPT, 1549 2); 1550 changed |= 0x01; 1551 } else if (debug_status & DBSR_IAC3) { 1552 current->thread.debug.dbcr0 &= ~DBCR0_IAC3; 1553 dbcr_iac_range(current) &= ~DBCR_IAC34MODE; 1554 do_send_trap(regs, mfspr(SPRN_IAC3), debug_status, TRAP_HWBKPT, 1555 3); 1556 changed |= 0x01; 1557 } else if (debug_status & DBSR_IAC4) { 1558 current->thread.debug.dbcr0 &= ~DBCR0_IAC4; 1559 do_send_trap(regs, mfspr(SPRN_IAC4), debug_status, TRAP_HWBKPT, 1560 4); 1561 changed |= 0x01; 1562 } 1563 /* 1564 * At the point this routine was called, the MSR(DE) was turned off. 1565 * Check all other debug flags and see if that bit needs to be turned 1566 * back on or not. 1567 */ 1568 if (DBCR_ACTIVE_EVENTS(current->thread.debug.dbcr0, 1569 current->thread.debug.dbcr1)) 1570 regs->msr |= MSR_DE; 1571 else 1572 /* Make sure the IDM flag is off */ 1573 current->thread.debug.dbcr0 &= ~DBCR0_IDM; 1574 1575 if (changed & 0x01) 1576 mtspr(SPRN_DBCR0, current->thread.debug.dbcr0); 1577 } 1578 1579 void __kprobes DebugException(struct pt_regs *regs, unsigned long debug_status) 1580 { 1581 current->thread.debug.dbsr = debug_status; 1582 1583 /* Hack alert: On BookE, Branch Taken stops on the branch itself, while 1584 * on server, it stops on the target of the branch. In order to simulate 1585 * the server behaviour, we thus restart right away with a single step 1586 * instead of stopping here when hitting a BT 1587 */ 1588 if (debug_status & DBSR_BT) { 1589 regs->msr &= ~MSR_DE; 1590 1591 /* Disable BT */ 1592 mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_BT); 1593 /* Clear the BT event */ 1594 mtspr(SPRN_DBSR, DBSR_BT); 1595 1596 /* Do the single step trick only when coming from userspace */ 1597 if (user_mode(regs)) { 1598 current->thread.debug.dbcr0 &= ~DBCR0_BT; 1599 current->thread.debug.dbcr0 |= DBCR0_IDM | DBCR0_IC; 1600 regs->msr |= MSR_DE; 1601 return; 1602 } 1603 1604 if (notify_die(DIE_SSTEP, "block_step", regs, 5, 1605 5, SIGTRAP) == NOTIFY_STOP) { 1606 return; 1607 } 1608 if (debugger_sstep(regs)) 1609 return; 1610 } else if (debug_status & DBSR_IC) { /* Instruction complete */ 1611 regs->msr &= ~MSR_DE; 1612 1613 /* Disable instruction completion */ 1614 mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_IC); 1615 /* Clear the instruction completion event */ 1616 mtspr(SPRN_DBSR, DBSR_IC); 1617 1618 if (notify_die(DIE_SSTEP, "single_step", regs, 5, 1619 5, SIGTRAP) == NOTIFY_STOP) { 1620 return; 1621 } 1622 1623 if (debugger_sstep(regs)) 1624 return; 1625 1626 if (user_mode(regs)) { 1627 current->thread.debug.dbcr0 &= ~DBCR0_IC; 1628 if (DBCR_ACTIVE_EVENTS(current->thread.debug.dbcr0, 1629 current->thread.debug.dbcr1)) 1630 regs->msr |= MSR_DE; 1631 else 1632 /* Make sure the IDM bit is off */ 1633 current->thread.debug.dbcr0 &= ~DBCR0_IDM; 1634 } 1635 1636 _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip); 1637 } else 1638 handle_debug(regs, debug_status); 1639 } 1640 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */ 1641 1642 #if !defined(CONFIG_TAU_INT) 1643 void TAUException(struct pt_regs *regs) 1644 { 1645 printk("TAU trap at PC: %lx, MSR: %lx, vector=%lx %s\n", 1646 regs->nip, regs->msr, regs->trap, print_tainted()); 1647 } 1648 #endif /* CONFIG_INT_TAU */ 1649 1650 #ifdef CONFIG_ALTIVEC 1651 void altivec_assist_exception(struct pt_regs *regs) 1652 { 1653 int err; 1654 1655 if (!user_mode(regs)) { 1656 printk(KERN_EMERG "VMX/Altivec assist exception in kernel mode" 1657 " at %lx\n", regs->nip); 1658 die("Kernel VMX/Altivec assist exception", regs, SIGILL); 1659 } 1660 1661 flush_altivec_to_thread(current); 1662 1663 PPC_WARN_EMULATED(altivec, regs); 1664 err = emulate_altivec(regs); 1665 if (err == 0) { 1666 regs->nip += 4; /* skip emulated instruction */ 1667 emulate_single_step(regs); 1668 return; 1669 } 1670 1671 if (err == -EFAULT) { 1672 /* got an error reading the instruction */ 1673 _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip); 1674 } else { 1675 /* didn't recognize the instruction */ 1676 /* XXX quick hack for now: set the non-Java bit in the VSCR */ 1677 printk_ratelimited(KERN_ERR "Unrecognized altivec instruction " 1678 "in %s at %lx\n", current->comm, regs->nip); 1679 current->thread.vr_state.vscr.u[3] |= 0x10000; 1680 } 1681 } 1682 #endif /* CONFIG_ALTIVEC */ 1683 1684 #ifdef CONFIG_VSX 1685 void vsx_assist_exception(struct pt_regs *regs) 1686 { 1687 if (!user_mode(regs)) { 1688 printk(KERN_EMERG "VSX assist exception in kernel mode" 1689 " at %lx\n", regs->nip); 1690 die("Kernel VSX assist exception", regs, SIGILL); 1691 } 1692 1693 flush_vsx_to_thread(current); 1694 printk(KERN_INFO "VSX assist not supported at %lx\n", regs->nip); 1695 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 1696 } 1697 #endif /* CONFIG_VSX */ 1698 1699 #ifdef CONFIG_FSL_BOOKE 1700 void CacheLockingException(struct pt_regs *regs, unsigned long address, 1701 unsigned long error_code) 1702 { 1703 /* We treat cache locking instructions from the user 1704 * as priv ops, in the future we could try to do 1705 * something smarter 1706 */ 1707 if (error_code & (ESR_DLK|ESR_ILK)) 1708 _exception(SIGILL, regs, ILL_PRVOPC, regs->nip); 1709 return; 1710 } 1711 #endif /* CONFIG_FSL_BOOKE */ 1712 1713 #ifdef CONFIG_SPE 1714 void SPEFloatingPointException(struct pt_regs *regs) 1715 { 1716 extern int do_spe_mathemu(struct pt_regs *regs); 1717 unsigned long spefscr; 1718 int fpexc_mode; 1719 int code = 0; 1720 int err; 1721 1722 flush_spe_to_thread(current); 1723 1724 spefscr = current->thread.spefscr; 1725 fpexc_mode = current->thread.fpexc_mode; 1726 1727 if ((spefscr & SPEFSCR_FOVF) && (fpexc_mode & PR_FP_EXC_OVF)) { 1728 code = FPE_FLTOVF; 1729 } 1730 else if ((spefscr & SPEFSCR_FUNF) && (fpexc_mode & PR_FP_EXC_UND)) { 1731 code = FPE_FLTUND; 1732 } 1733 else if ((spefscr & SPEFSCR_FDBZ) && (fpexc_mode & PR_FP_EXC_DIV)) 1734 code = FPE_FLTDIV; 1735 else if ((spefscr & SPEFSCR_FINV) && (fpexc_mode & PR_FP_EXC_INV)) { 1736 code = FPE_FLTINV; 1737 } 1738 else if ((spefscr & (SPEFSCR_FG | SPEFSCR_FX)) && (fpexc_mode & PR_FP_EXC_RES)) 1739 code = FPE_FLTRES; 1740 1741 err = do_spe_mathemu(regs); 1742 if (err == 0) { 1743 regs->nip += 4; /* skip emulated instruction */ 1744 emulate_single_step(regs); 1745 return; 1746 } 1747 1748 if (err == -EFAULT) { 1749 /* got an error reading the instruction */ 1750 _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip); 1751 } else if (err == -EINVAL) { 1752 /* didn't recognize the instruction */ 1753 printk(KERN_ERR "unrecognized spe instruction " 1754 "in %s at %lx\n", current->comm, regs->nip); 1755 } else { 1756 _exception(SIGFPE, regs, code, regs->nip); 1757 } 1758 1759 return; 1760 } 1761 1762 void SPEFloatingPointRoundException(struct pt_regs *regs) 1763 { 1764 extern int speround_handler(struct pt_regs *regs); 1765 int err; 1766 1767 preempt_disable(); 1768 if (regs->msr & MSR_SPE) 1769 giveup_spe(current); 1770 preempt_enable(); 1771 1772 regs->nip -= 4; 1773 err = speround_handler(regs); 1774 if (err == 0) { 1775 regs->nip += 4; /* skip emulated instruction */ 1776 emulate_single_step(regs); 1777 return; 1778 } 1779 1780 if (err == -EFAULT) { 1781 /* got an error reading the instruction */ 1782 _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip); 1783 } else if (err == -EINVAL) { 1784 /* didn't recognize the instruction */ 1785 printk(KERN_ERR "unrecognized spe instruction " 1786 "in %s at %lx\n", current->comm, regs->nip); 1787 } else { 1788 _exception(SIGFPE, regs, 0, regs->nip); 1789 return; 1790 } 1791 } 1792 #endif 1793 1794 /* 1795 * We enter here if we get an unrecoverable exception, that is, one 1796 * that happened at a point where the RI (recoverable interrupt) bit 1797 * in the MSR is 0. This indicates that SRR0/1 are live, and that 1798 * we therefore lost state by taking this exception. 1799 */ 1800 void unrecoverable_exception(struct pt_regs *regs) 1801 { 1802 printk(KERN_EMERG "Unrecoverable exception %lx at %lx\n", 1803 regs->trap, regs->nip); 1804 die("Unrecoverable exception", regs, SIGABRT); 1805 } 1806 1807 #if defined(CONFIG_BOOKE_WDT) || defined(CONFIG_40x) 1808 /* 1809 * Default handler for a Watchdog exception, 1810 * spins until a reboot occurs 1811 */ 1812 void __attribute__ ((weak)) WatchdogHandler(struct pt_regs *regs) 1813 { 1814 /* Generic WatchdogHandler, implement your own */ 1815 mtspr(SPRN_TCR, mfspr(SPRN_TCR)&(~TCR_WIE)); 1816 return; 1817 } 1818 1819 void WatchdogException(struct pt_regs *regs) 1820 { 1821 printk (KERN_EMERG "PowerPC Book-E Watchdog Exception\n"); 1822 WatchdogHandler(regs); 1823 } 1824 #endif 1825 1826 /* 1827 * We enter here if we discover during exception entry that we are 1828 * running in supervisor mode with a userspace value in the stack pointer. 1829 */ 1830 void kernel_bad_stack(struct pt_regs *regs) 1831 { 1832 printk(KERN_EMERG "Bad kernel stack pointer %lx at %lx\n", 1833 regs->gpr[1], regs->nip); 1834 die("Bad kernel stack pointer", regs, SIGABRT); 1835 } 1836 1837 void __init trap_init(void) 1838 { 1839 } 1840 1841 1842 #ifdef CONFIG_PPC_EMULATED_STATS 1843 1844 #define WARN_EMULATED_SETUP(type) .type = { .name = #type } 1845 1846 struct ppc_emulated ppc_emulated = { 1847 #ifdef CONFIG_ALTIVEC 1848 WARN_EMULATED_SETUP(altivec), 1849 #endif 1850 WARN_EMULATED_SETUP(dcba), 1851 WARN_EMULATED_SETUP(dcbz), 1852 WARN_EMULATED_SETUP(fp_pair), 1853 WARN_EMULATED_SETUP(isel), 1854 WARN_EMULATED_SETUP(mcrxr), 1855 WARN_EMULATED_SETUP(mfpvr), 1856 WARN_EMULATED_SETUP(multiple), 1857 WARN_EMULATED_SETUP(popcntb), 1858 WARN_EMULATED_SETUP(spe), 1859 WARN_EMULATED_SETUP(string), 1860 WARN_EMULATED_SETUP(sync), 1861 WARN_EMULATED_SETUP(unaligned), 1862 #ifdef CONFIG_MATH_EMULATION 1863 WARN_EMULATED_SETUP(math), 1864 #endif 1865 #ifdef CONFIG_VSX 1866 WARN_EMULATED_SETUP(vsx), 1867 #endif 1868 #ifdef CONFIG_PPC64 1869 WARN_EMULATED_SETUP(mfdscr), 1870 WARN_EMULATED_SETUP(mtdscr), 1871 WARN_EMULATED_SETUP(lq_stq), 1872 #endif 1873 }; 1874 1875 u32 ppc_warn_emulated; 1876 1877 void ppc_warn_emulated_print(const char *type) 1878 { 1879 pr_warn_ratelimited("%s used emulated %s instruction\n", current->comm, 1880 type); 1881 } 1882 1883 static int __init ppc_warn_emulated_init(void) 1884 { 1885 struct dentry *dir, *d; 1886 unsigned int i; 1887 struct ppc_emulated_entry *entries = (void *)&ppc_emulated; 1888 1889 if (!powerpc_debugfs_root) 1890 return -ENODEV; 1891 1892 dir = debugfs_create_dir("emulated_instructions", 1893 powerpc_debugfs_root); 1894 if (!dir) 1895 return -ENOMEM; 1896 1897 d = debugfs_create_u32("do_warn", S_IRUGO | S_IWUSR, dir, 1898 &ppc_warn_emulated); 1899 if (!d) 1900 goto fail; 1901 1902 for (i = 0; i < sizeof(ppc_emulated)/sizeof(*entries); i++) { 1903 d = debugfs_create_u32(entries[i].name, S_IRUGO | S_IWUSR, dir, 1904 (u32 *)&entries[i].val.counter); 1905 if (!d) 1906 goto fail; 1907 } 1908 1909 return 0; 1910 1911 fail: 1912 debugfs_remove_recursive(dir); 1913 return -ENOMEM; 1914 } 1915 1916 device_initcall(ppc_warn_emulated_init); 1917 1918 #endif /* CONFIG_PPC_EMULATED_STATS */ 1919