1 /* 2 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 3 * Copyright 2007-2010 Freescale Semiconductor, Inc. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License 7 * as published by the Free Software Foundation; either version 8 * 2 of the License, or (at your option) any later version. 9 * 10 * Modified by Cort Dougan (cort@cs.nmt.edu) 11 * and Paul Mackerras (paulus@samba.org) 12 */ 13 14 /* 15 * This file handles the architecture-dependent parts of hardware exceptions 16 */ 17 18 #include <linux/errno.h> 19 #include <linux/sched.h> 20 #include <linux/kernel.h> 21 #include <linux/mm.h> 22 #include <linux/stddef.h> 23 #include <linux/unistd.h> 24 #include <linux/ptrace.h> 25 #include <linux/user.h> 26 #include <linux/interrupt.h> 27 #include <linux/init.h> 28 #include <linux/module.h> 29 #include <linux/prctl.h> 30 #include <linux/delay.h> 31 #include <linux/kprobes.h> 32 #include <linux/kexec.h> 33 #include <linux/backlight.h> 34 #include <linux/bug.h> 35 #include <linux/kdebug.h> 36 #include <linux/debugfs.h> 37 #include <linux/ratelimit.h> 38 #include <linux/context_tracking.h> 39 40 #include <asm/emulated_ops.h> 41 #include <asm/pgtable.h> 42 #include <asm/uaccess.h> 43 #include <asm/io.h> 44 #include <asm/machdep.h> 45 #include <asm/rtas.h> 46 #include <asm/pmc.h> 47 #include <asm/reg.h> 48 #ifdef CONFIG_PMAC_BACKLIGHT 49 #include <asm/backlight.h> 50 #endif 51 #ifdef CONFIG_PPC64 52 #include <asm/firmware.h> 53 #include <asm/processor.h> 54 #include <asm/tm.h> 55 #endif 56 #include <asm/kexec.h> 57 #include <asm/ppc-opcode.h> 58 #include <asm/rio.h> 59 #include <asm/fadump.h> 60 #include <asm/switch_to.h> 61 #include <asm/tm.h> 62 #include <asm/debug.h> 63 #include <asm/asm-prototypes.h> 64 #include <asm/hmi.h> 65 #include <sysdev/fsl_pci.h> 66 67 #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC) 68 int (*__debugger)(struct pt_regs *regs) __read_mostly; 69 int (*__debugger_ipi)(struct pt_regs *regs) __read_mostly; 70 int (*__debugger_bpt)(struct pt_regs *regs) __read_mostly; 71 int (*__debugger_sstep)(struct pt_regs *regs) __read_mostly; 72 int (*__debugger_iabr_match)(struct pt_regs *regs) __read_mostly; 73 int (*__debugger_break_match)(struct pt_regs *regs) __read_mostly; 74 int (*__debugger_fault_handler)(struct pt_regs *regs) __read_mostly; 75 76 EXPORT_SYMBOL(__debugger); 77 EXPORT_SYMBOL(__debugger_ipi); 78 EXPORT_SYMBOL(__debugger_bpt); 79 EXPORT_SYMBOL(__debugger_sstep); 80 EXPORT_SYMBOL(__debugger_iabr_match); 81 EXPORT_SYMBOL(__debugger_break_match); 82 EXPORT_SYMBOL(__debugger_fault_handler); 83 #endif 84 85 /* Transactional Memory trap debug */ 86 #ifdef TM_DEBUG_SW 87 #define TM_DEBUG(x...) printk(KERN_INFO x) 88 #else 89 #define TM_DEBUG(x...) do { } while(0) 90 #endif 91 92 /* 93 * Trap & Exception support 94 */ 95 96 #ifdef CONFIG_PMAC_BACKLIGHT 97 static void pmac_backlight_unblank(void) 98 { 99 mutex_lock(&pmac_backlight_mutex); 100 if (pmac_backlight) { 101 struct backlight_properties *props; 102 103 props = &pmac_backlight->props; 104 props->brightness = props->max_brightness; 105 props->power = FB_BLANK_UNBLANK; 106 backlight_update_status(pmac_backlight); 107 } 108 mutex_unlock(&pmac_backlight_mutex); 109 } 110 #else 111 static inline void pmac_backlight_unblank(void) { } 112 #endif 113 114 static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED; 115 static int die_owner = -1; 116 static unsigned int die_nest_count; 117 static int die_counter; 118 119 static unsigned __kprobes long oops_begin(struct pt_regs *regs) 120 { 121 int cpu; 122 unsigned long flags; 123 124 if (debugger(regs)) 125 return 1; 126 127 oops_enter(); 128 129 /* racy, but better than risking deadlock. */ 130 raw_local_irq_save(flags); 131 cpu = smp_processor_id(); 132 if (!arch_spin_trylock(&die_lock)) { 133 if (cpu == die_owner) 134 /* nested oops. should stop eventually */; 135 else 136 arch_spin_lock(&die_lock); 137 } 138 die_nest_count++; 139 die_owner = cpu; 140 console_verbose(); 141 bust_spinlocks(1); 142 if (machine_is(powermac)) 143 pmac_backlight_unblank(); 144 return flags; 145 } 146 147 static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, 148 int signr) 149 { 150 bust_spinlocks(0); 151 die_owner = -1; 152 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); 153 die_nest_count--; 154 oops_exit(); 155 printk("\n"); 156 if (!die_nest_count) 157 /* Nest count reaches zero, release the lock. */ 158 arch_spin_unlock(&die_lock); 159 raw_local_irq_restore(flags); 160 161 crash_fadump(regs, "die oops"); 162 163 /* 164 * A system reset (0x100) is a request to dump, so we always send 165 * it through the crashdump code. 166 */ 167 if (kexec_should_crash(current) || (TRAP(regs) == 0x100)) { 168 crash_kexec(regs); 169 170 /* 171 * We aren't the primary crash CPU. We need to send it 172 * to a holding pattern to avoid it ending up in the panic 173 * code. 174 */ 175 crash_kexec_secondary(regs); 176 } 177 178 if (!signr) 179 return; 180 181 /* 182 * While our oops output is serialised by a spinlock, output 183 * from panic() called below can race and corrupt it. If we 184 * know we are going to panic, delay for 1 second so we have a 185 * chance to get clean backtraces from all CPUs that are oopsing. 186 */ 187 if (in_interrupt() || panic_on_oops || !current->pid || 188 is_global_init(current)) { 189 mdelay(MSEC_PER_SEC); 190 } 191 192 if (in_interrupt()) 193 panic("Fatal exception in interrupt"); 194 if (panic_on_oops) 195 panic("Fatal exception"); 196 do_exit(signr); 197 } 198 199 static int __kprobes __die(const char *str, struct pt_regs *regs, long err) 200 { 201 printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter); 202 #ifdef CONFIG_PREEMPT 203 printk("PREEMPT "); 204 #endif 205 #ifdef CONFIG_SMP 206 printk("SMP NR_CPUS=%d ", NR_CPUS); 207 #endif 208 if (debug_pagealloc_enabled()) 209 printk("DEBUG_PAGEALLOC "); 210 #ifdef CONFIG_NUMA 211 printk("NUMA "); 212 #endif 213 printk("%s\n", ppc_md.name ? ppc_md.name : ""); 214 215 if (notify_die(DIE_OOPS, str, regs, err, 255, SIGSEGV) == NOTIFY_STOP) 216 return 1; 217 218 print_modules(); 219 show_regs(regs); 220 221 return 0; 222 } 223 224 void die(const char *str, struct pt_regs *regs, long err) 225 { 226 unsigned long flags = oops_begin(regs); 227 228 if (__die(str, regs, err)) 229 err = 0; 230 oops_end(flags, regs, err); 231 } 232 233 void user_single_step_siginfo(struct task_struct *tsk, 234 struct pt_regs *regs, siginfo_t *info) 235 { 236 memset(info, 0, sizeof(*info)); 237 info->si_signo = SIGTRAP; 238 info->si_code = TRAP_TRACE; 239 info->si_addr = (void __user *)regs->nip; 240 } 241 242 void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr) 243 { 244 siginfo_t info; 245 const char fmt32[] = KERN_INFO "%s[%d]: unhandled signal %d " \ 246 "at %08lx nip %08lx lr %08lx code %x\n"; 247 const char fmt64[] = KERN_INFO "%s[%d]: unhandled signal %d " \ 248 "at %016lx nip %016lx lr %016lx code %x\n"; 249 250 if (!user_mode(regs)) { 251 die("Exception in kernel mode", regs, signr); 252 return; 253 } 254 255 if (show_unhandled_signals && unhandled_signal(current, signr)) { 256 printk_ratelimited(regs->msr & MSR_64BIT ? fmt64 : fmt32, 257 current->comm, current->pid, signr, 258 addr, regs->nip, regs->link, code); 259 } 260 261 if (arch_irqs_disabled() && !arch_irq_disabled_regs(regs)) 262 local_irq_enable(); 263 264 current->thread.trap_nr = code; 265 memset(&info, 0, sizeof(info)); 266 info.si_signo = signr; 267 info.si_code = code; 268 info.si_addr = (void __user *) addr; 269 force_sig_info(signr, &info, current); 270 } 271 272 #ifdef CONFIG_PPC64 273 void system_reset_exception(struct pt_regs *regs) 274 { 275 /* See if any machine dependent calls */ 276 if (ppc_md.system_reset_exception) { 277 if (ppc_md.system_reset_exception(regs)) 278 return; 279 } 280 281 die("System Reset", regs, SIGABRT); 282 283 /* Must die if the interrupt is not recoverable */ 284 if (!(regs->msr & MSR_RI)) 285 panic("Unrecoverable System Reset"); 286 287 /* What should we do here? We could issue a shutdown or hard reset. */ 288 } 289 290 /* 291 * This function is called in real mode. Strictly no printk's please. 292 * 293 * regs->nip and regs->msr contains srr0 and ssr1. 294 */ 295 long machine_check_early(struct pt_regs *regs) 296 { 297 long handled = 0; 298 299 __this_cpu_inc(irq_stat.mce_exceptions); 300 301 add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE); 302 303 if (cur_cpu_spec && cur_cpu_spec->machine_check_early) 304 handled = cur_cpu_spec->machine_check_early(regs); 305 return handled; 306 } 307 308 long hmi_exception_realmode(struct pt_regs *regs) 309 { 310 __this_cpu_inc(irq_stat.hmi_exceptions); 311 312 wait_for_subcore_guest_exit(); 313 314 if (ppc_md.hmi_exception_early) 315 ppc_md.hmi_exception_early(regs); 316 317 wait_for_tb_resync(); 318 319 return 0; 320 } 321 322 #endif 323 324 /* 325 * I/O accesses can cause machine checks on powermacs. 326 * Check if the NIP corresponds to the address of a sync 327 * instruction for which there is an entry in the exception 328 * table. 329 * Note that the 601 only takes a machine check on TEA 330 * (transfer error ack) signal assertion, and does not 331 * set any of the top 16 bits of SRR1. 332 * -- paulus. 333 */ 334 static inline int check_io_access(struct pt_regs *regs) 335 { 336 #ifdef CONFIG_PPC32 337 unsigned long msr = regs->msr; 338 const struct exception_table_entry *entry; 339 unsigned int *nip = (unsigned int *)regs->nip; 340 341 if (((msr & 0xffff0000) == 0 || (msr & (0x80000 | 0x40000))) 342 && (entry = search_exception_tables(regs->nip)) != NULL) { 343 /* 344 * Check that it's a sync instruction, or somewhere 345 * in the twi; isync; nop sequence that inb/inw/inl uses. 346 * As the address is in the exception table 347 * we should be able to read the instr there. 348 * For the debug message, we look at the preceding 349 * load or store. 350 */ 351 if (*nip == 0x60000000) /* nop */ 352 nip -= 2; 353 else if (*nip == 0x4c00012c) /* isync */ 354 --nip; 355 if (*nip == 0x7c0004ac || (*nip >> 26) == 3) { 356 /* sync or twi */ 357 unsigned int rb; 358 359 --nip; 360 rb = (*nip >> 11) & 0x1f; 361 printk(KERN_DEBUG "%s bad port %lx at %p\n", 362 (*nip & 0x100)? "OUT to": "IN from", 363 regs->gpr[rb] - _IO_BASE, nip); 364 regs->msr |= MSR_RI; 365 regs->nip = entry->fixup; 366 return 1; 367 } 368 } 369 #endif /* CONFIG_PPC32 */ 370 return 0; 371 } 372 373 #ifdef CONFIG_PPC_ADV_DEBUG_REGS 374 /* On 4xx, the reason for the machine check or program exception 375 is in the ESR. */ 376 #define get_reason(regs) ((regs)->dsisr) 377 #ifndef CONFIG_FSL_BOOKE 378 #define get_mc_reason(regs) ((regs)->dsisr) 379 #else 380 #define get_mc_reason(regs) (mfspr(SPRN_MCSR)) 381 #endif 382 #define REASON_FP ESR_FP 383 #define REASON_ILLEGAL (ESR_PIL | ESR_PUO) 384 #define REASON_PRIVILEGED ESR_PPR 385 #define REASON_TRAP ESR_PTR 386 387 /* single-step stuff */ 388 #define single_stepping(regs) (current->thread.debug.dbcr0 & DBCR0_IC) 389 #define clear_single_step(regs) (current->thread.debug.dbcr0 &= ~DBCR0_IC) 390 391 #else 392 /* On non-4xx, the reason for the machine check or program 393 exception is in the MSR. */ 394 #define get_reason(regs) ((regs)->msr) 395 #define get_mc_reason(regs) ((regs)->msr) 396 #define REASON_TM 0x200000 397 #define REASON_FP 0x100000 398 #define REASON_ILLEGAL 0x80000 399 #define REASON_PRIVILEGED 0x40000 400 #define REASON_TRAP 0x20000 401 402 #define single_stepping(regs) ((regs)->msr & MSR_SE) 403 #define clear_single_step(regs) ((regs)->msr &= ~MSR_SE) 404 #endif 405 406 #if defined(CONFIG_4xx) 407 int machine_check_4xx(struct pt_regs *regs) 408 { 409 unsigned long reason = get_mc_reason(regs); 410 411 if (reason & ESR_IMCP) { 412 printk("Instruction"); 413 mtspr(SPRN_ESR, reason & ~ESR_IMCP); 414 } else 415 printk("Data"); 416 printk(" machine check in kernel mode.\n"); 417 418 return 0; 419 } 420 421 int machine_check_440A(struct pt_regs *regs) 422 { 423 unsigned long reason = get_mc_reason(regs); 424 425 printk("Machine check in kernel mode.\n"); 426 if (reason & ESR_IMCP){ 427 printk("Instruction Synchronous Machine Check exception\n"); 428 mtspr(SPRN_ESR, reason & ~ESR_IMCP); 429 } 430 else { 431 u32 mcsr = mfspr(SPRN_MCSR); 432 if (mcsr & MCSR_IB) 433 printk("Instruction Read PLB Error\n"); 434 if (mcsr & MCSR_DRB) 435 printk("Data Read PLB Error\n"); 436 if (mcsr & MCSR_DWB) 437 printk("Data Write PLB Error\n"); 438 if (mcsr & MCSR_TLBP) 439 printk("TLB Parity Error\n"); 440 if (mcsr & MCSR_ICP){ 441 flush_instruction_cache(); 442 printk("I-Cache Parity Error\n"); 443 } 444 if (mcsr & MCSR_DCSP) 445 printk("D-Cache Search Parity Error\n"); 446 if (mcsr & MCSR_DCFP) 447 printk("D-Cache Flush Parity Error\n"); 448 if (mcsr & MCSR_IMPE) 449 printk("Machine Check exception is imprecise\n"); 450 451 /* Clear MCSR */ 452 mtspr(SPRN_MCSR, mcsr); 453 } 454 return 0; 455 } 456 457 int machine_check_47x(struct pt_regs *regs) 458 { 459 unsigned long reason = get_mc_reason(regs); 460 u32 mcsr; 461 462 printk(KERN_ERR "Machine check in kernel mode.\n"); 463 if (reason & ESR_IMCP) { 464 printk(KERN_ERR 465 "Instruction Synchronous Machine Check exception\n"); 466 mtspr(SPRN_ESR, reason & ~ESR_IMCP); 467 return 0; 468 } 469 mcsr = mfspr(SPRN_MCSR); 470 if (mcsr & MCSR_IB) 471 printk(KERN_ERR "Instruction Read PLB Error\n"); 472 if (mcsr & MCSR_DRB) 473 printk(KERN_ERR "Data Read PLB Error\n"); 474 if (mcsr & MCSR_DWB) 475 printk(KERN_ERR "Data Write PLB Error\n"); 476 if (mcsr & MCSR_TLBP) 477 printk(KERN_ERR "TLB Parity Error\n"); 478 if (mcsr & MCSR_ICP) { 479 flush_instruction_cache(); 480 printk(KERN_ERR "I-Cache Parity Error\n"); 481 } 482 if (mcsr & MCSR_DCSP) 483 printk(KERN_ERR "D-Cache Search Parity Error\n"); 484 if (mcsr & PPC47x_MCSR_GPR) 485 printk(KERN_ERR "GPR Parity Error\n"); 486 if (mcsr & PPC47x_MCSR_FPR) 487 printk(KERN_ERR "FPR Parity Error\n"); 488 if (mcsr & PPC47x_MCSR_IPR) 489 printk(KERN_ERR "Machine Check exception is imprecise\n"); 490 491 /* Clear MCSR */ 492 mtspr(SPRN_MCSR, mcsr); 493 494 return 0; 495 } 496 #elif defined(CONFIG_E500) 497 int machine_check_e500mc(struct pt_regs *regs) 498 { 499 unsigned long mcsr = mfspr(SPRN_MCSR); 500 unsigned long reason = mcsr; 501 int recoverable = 1; 502 503 if (reason & MCSR_LD) { 504 recoverable = fsl_rio_mcheck_exception(regs); 505 if (recoverable == 1) 506 goto silent_out; 507 } 508 509 printk("Machine check in kernel mode.\n"); 510 printk("Caused by (from MCSR=%lx): ", reason); 511 512 if (reason & MCSR_MCP) 513 printk("Machine Check Signal\n"); 514 515 if (reason & MCSR_ICPERR) { 516 printk("Instruction Cache Parity Error\n"); 517 518 /* 519 * This is recoverable by invalidating the i-cache. 520 */ 521 mtspr(SPRN_L1CSR1, mfspr(SPRN_L1CSR1) | L1CSR1_ICFI); 522 while (mfspr(SPRN_L1CSR1) & L1CSR1_ICFI) 523 ; 524 525 /* 526 * This will generally be accompanied by an instruction 527 * fetch error report -- only treat MCSR_IF as fatal 528 * if it wasn't due to an L1 parity error. 529 */ 530 reason &= ~MCSR_IF; 531 } 532 533 if (reason & MCSR_DCPERR_MC) { 534 printk("Data Cache Parity Error\n"); 535 536 /* 537 * In write shadow mode we auto-recover from the error, but it 538 * may still get logged and cause a machine check. We should 539 * only treat the non-write shadow case as non-recoverable. 540 */ 541 if (!(mfspr(SPRN_L1CSR2) & L1CSR2_DCWS)) 542 recoverable = 0; 543 } 544 545 if (reason & MCSR_L2MMU_MHIT) { 546 printk("Hit on multiple TLB entries\n"); 547 recoverable = 0; 548 } 549 550 if (reason & MCSR_NMI) 551 printk("Non-maskable interrupt\n"); 552 553 if (reason & MCSR_IF) { 554 printk("Instruction Fetch Error Report\n"); 555 recoverable = 0; 556 } 557 558 if (reason & MCSR_LD) { 559 printk("Load Error Report\n"); 560 recoverable = 0; 561 } 562 563 if (reason & MCSR_ST) { 564 printk("Store Error Report\n"); 565 recoverable = 0; 566 } 567 568 if (reason & MCSR_LDG) { 569 printk("Guarded Load Error Report\n"); 570 recoverable = 0; 571 } 572 573 if (reason & MCSR_TLBSYNC) 574 printk("Simultaneous tlbsync operations\n"); 575 576 if (reason & MCSR_BSL2_ERR) { 577 printk("Level 2 Cache Error\n"); 578 recoverable = 0; 579 } 580 581 if (reason & MCSR_MAV) { 582 u64 addr; 583 584 addr = mfspr(SPRN_MCAR); 585 addr |= (u64)mfspr(SPRN_MCARU) << 32; 586 587 printk("Machine Check %s Address: %#llx\n", 588 reason & MCSR_MEA ? "Effective" : "Physical", addr); 589 } 590 591 silent_out: 592 mtspr(SPRN_MCSR, mcsr); 593 return mfspr(SPRN_MCSR) == 0 && recoverable; 594 } 595 596 int machine_check_e500(struct pt_regs *regs) 597 { 598 unsigned long reason = get_mc_reason(regs); 599 600 if (reason & MCSR_BUS_RBERR) { 601 if (fsl_rio_mcheck_exception(regs)) 602 return 1; 603 if (fsl_pci_mcheck_exception(regs)) 604 return 1; 605 } 606 607 printk("Machine check in kernel mode.\n"); 608 printk("Caused by (from MCSR=%lx): ", reason); 609 610 if (reason & MCSR_MCP) 611 printk("Machine Check Signal\n"); 612 if (reason & MCSR_ICPERR) 613 printk("Instruction Cache Parity Error\n"); 614 if (reason & MCSR_DCP_PERR) 615 printk("Data Cache Push Parity Error\n"); 616 if (reason & MCSR_DCPERR) 617 printk("Data Cache Parity Error\n"); 618 if (reason & MCSR_BUS_IAERR) 619 printk("Bus - Instruction Address Error\n"); 620 if (reason & MCSR_BUS_RAERR) 621 printk("Bus - Read Address Error\n"); 622 if (reason & MCSR_BUS_WAERR) 623 printk("Bus - Write Address Error\n"); 624 if (reason & MCSR_BUS_IBERR) 625 printk("Bus - Instruction Data Error\n"); 626 if (reason & MCSR_BUS_RBERR) 627 printk("Bus - Read Data Bus Error\n"); 628 if (reason & MCSR_BUS_WBERR) 629 printk("Bus - Write Data Bus Error\n"); 630 if (reason & MCSR_BUS_IPERR) 631 printk("Bus - Instruction Parity Error\n"); 632 if (reason & MCSR_BUS_RPERR) 633 printk("Bus - Read Parity Error\n"); 634 635 return 0; 636 } 637 638 int machine_check_generic(struct pt_regs *regs) 639 { 640 return 0; 641 } 642 #elif defined(CONFIG_E200) 643 int machine_check_e200(struct pt_regs *regs) 644 { 645 unsigned long reason = get_mc_reason(regs); 646 647 printk("Machine check in kernel mode.\n"); 648 printk("Caused by (from MCSR=%lx): ", reason); 649 650 if (reason & MCSR_MCP) 651 printk("Machine Check Signal\n"); 652 if (reason & MCSR_CP_PERR) 653 printk("Cache Push Parity Error\n"); 654 if (reason & MCSR_CPERR) 655 printk("Cache Parity Error\n"); 656 if (reason & MCSR_EXCP_ERR) 657 printk("ISI, ITLB, or Bus Error on first instruction fetch for an exception handler\n"); 658 if (reason & MCSR_BUS_IRERR) 659 printk("Bus - Read Bus Error on instruction fetch\n"); 660 if (reason & MCSR_BUS_DRERR) 661 printk("Bus - Read Bus Error on data load\n"); 662 if (reason & MCSR_BUS_WRERR) 663 printk("Bus - Write Bus Error on buffered store or cache line push\n"); 664 665 return 0; 666 } 667 #else 668 int machine_check_generic(struct pt_regs *regs) 669 { 670 unsigned long reason = get_mc_reason(regs); 671 672 printk("Machine check in kernel mode.\n"); 673 printk("Caused by (from SRR1=%lx): ", reason); 674 switch (reason & 0x601F0000) { 675 case 0x80000: 676 printk("Machine check signal\n"); 677 break; 678 case 0: /* for 601 */ 679 case 0x40000: 680 case 0x140000: /* 7450 MSS error and TEA */ 681 printk("Transfer error ack signal\n"); 682 break; 683 case 0x20000: 684 printk("Data parity error signal\n"); 685 break; 686 case 0x10000: 687 printk("Address parity error signal\n"); 688 break; 689 case 0x20000000: 690 printk("L1 Data Cache error\n"); 691 break; 692 case 0x40000000: 693 printk("L1 Instruction Cache error\n"); 694 break; 695 case 0x00100000: 696 printk("L2 data cache parity error\n"); 697 break; 698 default: 699 printk("Unknown values in msr\n"); 700 } 701 return 0; 702 } 703 #endif /* everything else */ 704 705 void machine_check_exception(struct pt_regs *regs) 706 { 707 enum ctx_state prev_state = exception_enter(); 708 int recover = 0; 709 710 __this_cpu_inc(irq_stat.mce_exceptions); 711 712 /* See if any machine dependent calls. In theory, we would want 713 * to call the CPU first, and call the ppc_md. one if the CPU 714 * one returns a positive number. However there is existing code 715 * that assumes the board gets a first chance, so let's keep it 716 * that way for now and fix things later. --BenH. 717 */ 718 if (ppc_md.machine_check_exception) 719 recover = ppc_md.machine_check_exception(regs); 720 else if (cur_cpu_spec->machine_check) 721 recover = cur_cpu_spec->machine_check(regs); 722 723 if (recover > 0) 724 goto bail; 725 726 #if defined(CONFIG_8xx) && defined(CONFIG_PCI) 727 /* the qspan pci read routines can cause machine checks -- Cort 728 * 729 * yuck !!! that totally needs to go away ! There are better ways 730 * to deal with that than having a wart in the mcheck handler. 731 * -- BenH 732 */ 733 bad_page_fault(regs, regs->dar, SIGBUS); 734 goto bail; 735 #endif 736 737 if (debugger_fault_handler(regs)) 738 goto bail; 739 740 if (check_io_access(regs)) 741 goto bail; 742 743 die("Machine check", regs, SIGBUS); 744 745 /* Must die if the interrupt is not recoverable */ 746 if (!(regs->msr & MSR_RI)) 747 panic("Unrecoverable Machine check"); 748 749 bail: 750 exception_exit(prev_state); 751 } 752 753 void SMIException(struct pt_regs *regs) 754 { 755 die("System Management Interrupt", regs, SIGABRT); 756 } 757 758 void handle_hmi_exception(struct pt_regs *regs) 759 { 760 struct pt_regs *old_regs; 761 762 old_regs = set_irq_regs(regs); 763 irq_enter(); 764 765 if (ppc_md.handle_hmi_exception) 766 ppc_md.handle_hmi_exception(regs); 767 768 irq_exit(); 769 set_irq_regs(old_regs); 770 } 771 772 void unknown_exception(struct pt_regs *regs) 773 { 774 enum ctx_state prev_state = exception_enter(); 775 776 printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n", 777 regs->nip, regs->msr, regs->trap); 778 779 _exception(SIGTRAP, regs, 0, 0); 780 781 exception_exit(prev_state); 782 } 783 784 void instruction_breakpoint_exception(struct pt_regs *regs) 785 { 786 enum ctx_state prev_state = exception_enter(); 787 788 if (notify_die(DIE_IABR_MATCH, "iabr_match", regs, 5, 789 5, SIGTRAP) == NOTIFY_STOP) 790 goto bail; 791 if (debugger_iabr_match(regs)) 792 goto bail; 793 _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip); 794 795 bail: 796 exception_exit(prev_state); 797 } 798 799 void RunModeException(struct pt_regs *regs) 800 { 801 _exception(SIGTRAP, regs, 0, 0); 802 } 803 804 void __kprobes single_step_exception(struct pt_regs *regs) 805 { 806 enum ctx_state prev_state = exception_enter(); 807 808 clear_single_step(regs); 809 810 if (notify_die(DIE_SSTEP, "single_step", regs, 5, 811 5, SIGTRAP) == NOTIFY_STOP) 812 goto bail; 813 if (debugger_sstep(regs)) 814 goto bail; 815 816 _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip); 817 818 bail: 819 exception_exit(prev_state); 820 } 821 822 /* 823 * After we have successfully emulated an instruction, we have to 824 * check if the instruction was being single-stepped, and if so, 825 * pretend we got a single-step exception. This was pointed out 826 * by Kumar Gala. -- paulus 827 */ 828 static void emulate_single_step(struct pt_regs *regs) 829 { 830 if (single_stepping(regs)) 831 single_step_exception(regs); 832 } 833 834 static inline int __parse_fpscr(unsigned long fpscr) 835 { 836 int ret = 0; 837 838 /* Invalid operation */ 839 if ((fpscr & FPSCR_VE) && (fpscr & FPSCR_VX)) 840 ret = FPE_FLTINV; 841 842 /* Overflow */ 843 else if ((fpscr & FPSCR_OE) && (fpscr & FPSCR_OX)) 844 ret = FPE_FLTOVF; 845 846 /* Underflow */ 847 else if ((fpscr & FPSCR_UE) && (fpscr & FPSCR_UX)) 848 ret = FPE_FLTUND; 849 850 /* Divide by zero */ 851 else if ((fpscr & FPSCR_ZE) && (fpscr & FPSCR_ZX)) 852 ret = FPE_FLTDIV; 853 854 /* Inexact result */ 855 else if ((fpscr & FPSCR_XE) && (fpscr & FPSCR_XX)) 856 ret = FPE_FLTRES; 857 858 return ret; 859 } 860 861 static void parse_fpe(struct pt_regs *regs) 862 { 863 int code = 0; 864 865 flush_fp_to_thread(current); 866 867 code = __parse_fpscr(current->thread.fp_state.fpscr); 868 869 _exception(SIGFPE, regs, code, regs->nip); 870 } 871 872 /* 873 * Illegal instruction emulation support. Originally written to 874 * provide the PVR to user applications using the mfspr rd, PVR. 875 * Return non-zero if we can't emulate, or -EFAULT if the associated 876 * memory access caused an access fault. Return zero on success. 877 * 878 * There are a couple of ways to do this, either "decode" the instruction 879 * or directly match lots of bits. In this case, matching lots of 880 * bits is faster and easier. 881 * 882 */ 883 static int emulate_string_inst(struct pt_regs *regs, u32 instword) 884 { 885 u8 rT = (instword >> 21) & 0x1f; 886 u8 rA = (instword >> 16) & 0x1f; 887 u8 NB_RB = (instword >> 11) & 0x1f; 888 u32 num_bytes; 889 unsigned long EA; 890 int pos = 0; 891 892 /* Early out if we are an invalid form of lswx */ 893 if ((instword & PPC_INST_STRING_MASK) == PPC_INST_LSWX) 894 if ((rT == rA) || (rT == NB_RB)) 895 return -EINVAL; 896 897 EA = (rA == 0) ? 0 : regs->gpr[rA]; 898 899 switch (instword & PPC_INST_STRING_MASK) { 900 case PPC_INST_LSWX: 901 case PPC_INST_STSWX: 902 EA += NB_RB; 903 num_bytes = regs->xer & 0x7f; 904 break; 905 case PPC_INST_LSWI: 906 case PPC_INST_STSWI: 907 num_bytes = (NB_RB == 0) ? 32 : NB_RB; 908 break; 909 default: 910 return -EINVAL; 911 } 912 913 while (num_bytes != 0) 914 { 915 u8 val; 916 u32 shift = 8 * (3 - (pos & 0x3)); 917 918 /* if process is 32-bit, clear upper 32 bits of EA */ 919 if ((regs->msr & MSR_64BIT) == 0) 920 EA &= 0xFFFFFFFF; 921 922 switch ((instword & PPC_INST_STRING_MASK)) { 923 case PPC_INST_LSWX: 924 case PPC_INST_LSWI: 925 if (get_user(val, (u8 __user *)EA)) 926 return -EFAULT; 927 /* first time updating this reg, 928 * zero it out */ 929 if (pos == 0) 930 regs->gpr[rT] = 0; 931 regs->gpr[rT] |= val << shift; 932 break; 933 case PPC_INST_STSWI: 934 case PPC_INST_STSWX: 935 val = regs->gpr[rT] >> shift; 936 if (put_user(val, (u8 __user *)EA)) 937 return -EFAULT; 938 break; 939 } 940 /* move EA to next address */ 941 EA += 1; 942 num_bytes--; 943 944 /* manage our position within the register */ 945 if (++pos == 4) { 946 pos = 0; 947 if (++rT == 32) 948 rT = 0; 949 } 950 } 951 952 return 0; 953 } 954 955 static int emulate_popcntb_inst(struct pt_regs *regs, u32 instword) 956 { 957 u32 ra,rs; 958 unsigned long tmp; 959 960 ra = (instword >> 16) & 0x1f; 961 rs = (instword >> 21) & 0x1f; 962 963 tmp = regs->gpr[rs]; 964 tmp = tmp - ((tmp >> 1) & 0x5555555555555555ULL); 965 tmp = (tmp & 0x3333333333333333ULL) + ((tmp >> 2) & 0x3333333333333333ULL); 966 tmp = (tmp + (tmp >> 4)) & 0x0f0f0f0f0f0f0f0fULL; 967 regs->gpr[ra] = tmp; 968 969 return 0; 970 } 971 972 static int emulate_isel(struct pt_regs *regs, u32 instword) 973 { 974 u8 rT = (instword >> 21) & 0x1f; 975 u8 rA = (instword >> 16) & 0x1f; 976 u8 rB = (instword >> 11) & 0x1f; 977 u8 BC = (instword >> 6) & 0x1f; 978 u8 bit; 979 unsigned long tmp; 980 981 tmp = (rA == 0) ? 0 : regs->gpr[rA]; 982 bit = (regs->ccr >> (31 - BC)) & 0x1; 983 984 regs->gpr[rT] = bit ? tmp : regs->gpr[rB]; 985 986 return 0; 987 } 988 989 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 990 static inline bool tm_abort_check(struct pt_regs *regs, int cause) 991 { 992 /* If we're emulating a load/store in an active transaction, we cannot 993 * emulate it as the kernel operates in transaction suspended context. 994 * We need to abort the transaction. This creates a persistent TM 995 * abort so tell the user what caused it with a new code. 996 */ 997 if (MSR_TM_TRANSACTIONAL(regs->msr)) { 998 tm_enable(); 999 tm_abort(cause); 1000 return true; 1001 } 1002 return false; 1003 } 1004 #else 1005 static inline bool tm_abort_check(struct pt_regs *regs, int reason) 1006 { 1007 return false; 1008 } 1009 #endif 1010 1011 static int emulate_instruction(struct pt_regs *regs) 1012 { 1013 u32 instword; 1014 u32 rd; 1015 1016 if (!user_mode(regs)) 1017 return -EINVAL; 1018 CHECK_FULL_REGS(regs); 1019 1020 if (get_user(instword, (u32 __user *)(regs->nip))) 1021 return -EFAULT; 1022 1023 /* Emulate the mfspr rD, PVR. */ 1024 if ((instword & PPC_INST_MFSPR_PVR_MASK) == PPC_INST_MFSPR_PVR) { 1025 PPC_WARN_EMULATED(mfpvr, regs); 1026 rd = (instword >> 21) & 0x1f; 1027 regs->gpr[rd] = mfspr(SPRN_PVR); 1028 return 0; 1029 } 1030 1031 /* Emulating the dcba insn is just a no-op. */ 1032 if ((instword & PPC_INST_DCBA_MASK) == PPC_INST_DCBA) { 1033 PPC_WARN_EMULATED(dcba, regs); 1034 return 0; 1035 } 1036 1037 /* Emulate the mcrxr insn. */ 1038 if ((instword & PPC_INST_MCRXR_MASK) == PPC_INST_MCRXR) { 1039 int shift = (instword >> 21) & 0x1c; 1040 unsigned long msk = 0xf0000000UL >> shift; 1041 1042 PPC_WARN_EMULATED(mcrxr, regs); 1043 regs->ccr = (regs->ccr & ~msk) | ((regs->xer >> shift) & msk); 1044 regs->xer &= ~0xf0000000UL; 1045 return 0; 1046 } 1047 1048 /* Emulate load/store string insn. */ 1049 if ((instword & PPC_INST_STRING_GEN_MASK) == PPC_INST_STRING) { 1050 if (tm_abort_check(regs, 1051 TM_CAUSE_EMULATE | TM_CAUSE_PERSISTENT)) 1052 return -EINVAL; 1053 PPC_WARN_EMULATED(string, regs); 1054 return emulate_string_inst(regs, instword); 1055 } 1056 1057 /* Emulate the popcntb (Population Count Bytes) instruction. */ 1058 if ((instword & PPC_INST_POPCNTB_MASK) == PPC_INST_POPCNTB) { 1059 PPC_WARN_EMULATED(popcntb, regs); 1060 return emulate_popcntb_inst(regs, instword); 1061 } 1062 1063 /* Emulate isel (Integer Select) instruction */ 1064 if ((instword & PPC_INST_ISEL_MASK) == PPC_INST_ISEL) { 1065 PPC_WARN_EMULATED(isel, regs); 1066 return emulate_isel(regs, instword); 1067 } 1068 1069 /* Emulate sync instruction variants */ 1070 if ((instword & PPC_INST_SYNC_MASK) == PPC_INST_SYNC) { 1071 PPC_WARN_EMULATED(sync, regs); 1072 asm volatile("sync"); 1073 return 0; 1074 } 1075 1076 #ifdef CONFIG_PPC64 1077 /* Emulate the mfspr rD, DSCR. */ 1078 if ((((instword & PPC_INST_MFSPR_DSCR_USER_MASK) == 1079 PPC_INST_MFSPR_DSCR_USER) || 1080 ((instword & PPC_INST_MFSPR_DSCR_MASK) == 1081 PPC_INST_MFSPR_DSCR)) && 1082 cpu_has_feature(CPU_FTR_DSCR)) { 1083 PPC_WARN_EMULATED(mfdscr, regs); 1084 rd = (instword >> 21) & 0x1f; 1085 regs->gpr[rd] = mfspr(SPRN_DSCR); 1086 return 0; 1087 } 1088 /* Emulate the mtspr DSCR, rD. */ 1089 if ((((instword & PPC_INST_MTSPR_DSCR_USER_MASK) == 1090 PPC_INST_MTSPR_DSCR_USER) || 1091 ((instword & PPC_INST_MTSPR_DSCR_MASK) == 1092 PPC_INST_MTSPR_DSCR)) && 1093 cpu_has_feature(CPU_FTR_DSCR)) { 1094 PPC_WARN_EMULATED(mtdscr, regs); 1095 rd = (instword >> 21) & 0x1f; 1096 current->thread.dscr = regs->gpr[rd]; 1097 current->thread.dscr_inherit = 1; 1098 mtspr(SPRN_DSCR, current->thread.dscr); 1099 return 0; 1100 } 1101 #endif 1102 1103 return -EINVAL; 1104 } 1105 1106 int is_valid_bugaddr(unsigned long addr) 1107 { 1108 return is_kernel_addr(addr); 1109 } 1110 1111 #ifdef CONFIG_MATH_EMULATION 1112 static int emulate_math(struct pt_regs *regs) 1113 { 1114 int ret; 1115 extern int do_mathemu(struct pt_regs *regs); 1116 1117 ret = do_mathemu(regs); 1118 if (ret >= 0) 1119 PPC_WARN_EMULATED(math, regs); 1120 1121 switch (ret) { 1122 case 0: 1123 emulate_single_step(regs); 1124 return 0; 1125 case 1: { 1126 int code = 0; 1127 code = __parse_fpscr(current->thread.fp_state.fpscr); 1128 _exception(SIGFPE, regs, code, regs->nip); 1129 return 0; 1130 } 1131 case -EFAULT: 1132 _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip); 1133 return 0; 1134 } 1135 1136 return -1; 1137 } 1138 #else 1139 static inline int emulate_math(struct pt_regs *regs) { return -1; } 1140 #endif 1141 1142 void __kprobes program_check_exception(struct pt_regs *regs) 1143 { 1144 enum ctx_state prev_state = exception_enter(); 1145 unsigned int reason = get_reason(regs); 1146 1147 /* We can now get here via a FP Unavailable exception if the core 1148 * has no FPU, in that case the reason flags will be 0 */ 1149 1150 if (reason & REASON_FP) { 1151 /* IEEE FP exception */ 1152 parse_fpe(regs); 1153 goto bail; 1154 } 1155 if (reason & REASON_TRAP) { 1156 unsigned long bugaddr; 1157 /* Debugger is first in line to stop recursive faults in 1158 * rcu_lock, notify_die, or atomic_notifier_call_chain */ 1159 if (debugger_bpt(regs)) 1160 goto bail; 1161 1162 /* trap exception */ 1163 if (notify_die(DIE_BPT, "breakpoint", regs, 5, 5, SIGTRAP) 1164 == NOTIFY_STOP) 1165 goto bail; 1166 1167 bugaddr = regs->nip; 1168 /* 1169 * Fixup bugaddr for BUG_ON() in real mode 1170 */ 1171 if (!is_kernel_addr(bugaddr) && !(regs->msr & MSR_IR)) 1172 bugaddr += PAGE_OFFSET; 1173 1174 if (!(regs->msr & MSR_PR) && /* not user-mode */ 1175 report_bug(bugaddr, regs) == BUG_TRAP_TYPE_WARN) { 1176 regs->nip += 4; 1177 goto bail; 1178 } 1179 _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip); 1180 goto bail; 1181 } 1182 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1183 if (reason & REASON_TM) { 1184 /* This is a TM "Bad Thing Exception" program check. 1185 * This occurs when: 1186 * - An rfid/hrfid/mtmsrd attempts to cause an illegal 1187 * transition in TM states. 1188 * - A trechkpt is attempted when transactional. 1189 * - A treclaim is attempted when non transactional. 1190 * - A tend is illegally attempted. 1191 * - writing a TM SPR when transactional. 1192 */ 1193 if (!user_mode(regs) && 1194 report_bug(regs->nip, regs) == BUG_TRAP_TYPE_WARN) { 1195 regs->nip += 4; 1196 goto bail; 1197 } 1198 /* If usermode caused this, it's done something illegal and 1199 * gets a SIGILL slap on the wrist. We call it an illegal 1200 * operand to distinguish from the instruction just being bad 1201 * (e.g. executing a 'tend' on a CPU without TM!); it's an 1202 * illegal /placement/ of a valid instruction. 1203 */ 1204 if (user_mode(regs)) { 1205 _exception(SIGILL, regs, ILL_ILLOPN, regs->nip); 1206 goto bail; 1207 } else { 1208 printk(KERN_EMERG "Unexpected TM Bad Thing exception " 1209 "at %lx (msr 0x%x)\n", regs->nip, reason); 1210 die("Unrecoverable exception", regs, SIGABRT); 1211 } 1212 } 1213 #endif 1214 1215 /* 1216 * If we took the program check in the kernel skip down to sending a 1217 * SIGILL. The subsequent cases all relate to emulating instructions 1218 * which we should only do for userspace. We also do not want to enable 1219 * interrupts for kernel faults because that might lead to further 1220 * faults, and loose the context of the original exception. 1221 */ 1222 if (!user_mode(regs)) 1223 goto sigill; 1224 1225 /* We restore the interrupt state now */ 1226 if (!arch_irq_disabled_regs(regs)) 1227 local_irq_enable(); 1228 1229 /* (reason & REASON_ILLEGAL) would be the obvious thing here, 1230 * but there seems to be a hardware bug on the 405GP (RevD) 1231 * that means ESR is sometimes set incorrectly - either to 1232 * ESR_DST (!?) or 0. In the process of chasing this with the 1233 * hardware people - not sure if it can happen on any illegal 1234 * instruction or only on FP instructions, whether there is a 1235 * pattern to occurrences etc. -dgibson 31/Mar/2003 1236 */ 1237 if (!emulate_math(regs)) 1238 goto bail; 1239 1240 /* Try to emulate it if we should. */ 1241 if (reason & (REASON_ILLEGAL | REASON_PRIVILEGED)) { 1242 switch (emulate_instruction(regs)) { 1243 case 0: 1244 regs->nip += 4; 1245 emulate_single_step(regs); 1246 goto bail; 1247 case -EFAULT: 1248 _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip); 1249 goto bail; 1250 } 1251 } 1252 1253 sigill: 1254 if (reason & REASON_PRIVILEGED) 1255 _exception(SIGILL, regs, ILL_PRVOPC, regs->nip); 1256 else 1257 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 1258 1259 bail: 1260 exception_exit(prev_state); 1261 } 1262 1263 /* 1264 * This occurs when running in hypervisor mode on POWER6 or later 1265 * and an illegal instruction is encountered. 1266 */ 1267 void __kprobes emulation_assist_interrupt(struct pt_regs *regs) 1268 { 1269 regs->msr |= REASON_ILLEGAL; 1270 program_check_exception(regs); 1271 } 1272 1273 void alignment_exception(struct pt_regs *regs) 1274 { 1275 enum ctx_state prev_state = exception_enter(); 1276 int sig, code, fixed = 0; 1277 1278 /* We restore the interrupt state now */ 1279 if (!arch_irq_disabled_regs(regs)) 1280 local_irq_enable(); 1281 1282 if (tm_abort_check(regs, TM_CAUSE_ALIGNMENT | TM_CAUSE_PERSISTENT)) 1283 goto bail; 1284 1285 /* we don't implement logging of alignment exceptions */ 1286 if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS)) 1287 fixed = fix_alignment(regs); 1288 1289 if (fixed == 1) { 1290 regs->nip += 4; /* skip over emulated instruction */ 1291 emulate_single_step(regs); 1292 goto bail; 1293 } 1294 1295 /* Operand address was bad */ 1296 if (fixed == -EFAULT) { 1297 sig = SIGSEGV; 1298 code = SEGV_ACCERR; 1299 } else { 1300 sig = SIGBUS; 1301 code = BUS_ADRALN; 1302 } 1303 if (user_mode(regs)) 1304 _exception(sig, regs, code, regs->dar); 1305 else 1306 bad_page_fault(regs, regs->dar, sig); 1307 1308 bail: 1309 exception_exit(prev_state); 1310 } 1311 1312 void StackOverflow(struct pt_regs *regs) 1313 { 1314 printk(KERN_CRIT "Kernel stack overflow in process %p, r1=%lx\n", 1315 current, regs->gpr[1]); 1316 debugger(regs); 1317 show_regs(regs); 1318 panic("kernel stack overflow"); 1319 } 1320 1321 void nonrecoverable_exception(struct pt_regs *regs) 1322 { 1323 printk(KERN_ERR "Non-recoverable exception at PC=%lx MSR=%lx\n", 1324 regs->nip, regs->msr); 1325 debugger(regs); 1326 die("nonrecoverable exception", regs, SIGKILL); 1327 } 1328 1329 void kernel_fp_unavailable_exception(struct pt_regs *regs) 1330 { 1331 enum ctx_state prev_state = exception_enter(); 1332 1333 printk(KERN_EMERG "Unrecoverable FP Unavailable Exception " 1334 "%lx at %lx\n", regs->trap, regs->nip); 1335 die("Unrecoverable FP Unavailable Exception", regs, SIGABRT); 1336 1337 exception_exit(prev_state); 1338 } 1339 1340 void altivec_unavailable_exception(struct pt_regs *regs) 1341 { 1342 enum ctx_state prev_state = exception_enter(); 1343 1344 if (user_mode(regs)) { 1345 /* A user program has executed an altivec instruction, 1346 but this kernel doesn't support altivec. */ 1347 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 1348 goto bail; 1349 } 1350 1351 printk(KERN_EMERG "Unrecoverable VMX/Altivec Unavailable Exception " 1352 "%lx at %lx\n", regs->trap, regs->nip); 1353 die("Unrecoverable VMX/Altivec Unavailable Exception", regs, SIGABRT); 1354 1355 bail: 1356 exception_exit(prev_state); 1357 } 1358 1359 void vsx_unavailable_exception(struct pt_regs *regs) 1360 { 1361 if (user_mode(regs)) { 1362 /* A user program has executed an vsx instruction, 1363 but this kernel doesn't support vsx. */ 1364 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 1365 return; 1366 } 1367 1368 printk(KERN_EMERG "Unrecoverable VSX Unavailable Exception " 1369 "%lx at %lx\n", regs->trap, regs->nip); 1370 die("Unrecoverable VSX Unavailable Exception", regs, SIGABRT); 1371 } 1372 1373 #ifdef CONFIG_PPC64 1374 void facility_unavailable_exception(struct pt_regs *regs) 1375 { 1376 static char *facility_strings[] = { 1377 [FSCR_FP_LG] = "FPU", 1378 [FSCR_VECVSX_LG] = "VMX/VSX", 1379 [FSCR_DSCR_LG] = "DSCR", 1380 [FSCR_PM_LG] = "PMU SPRs", 1381 [FSCR_BHRB_LG] = "BHRB", 1382 [FSCR_TM_LG] = "TM", 1383 [FSCR_EBB_LG] = "EBB", 1384 [FSCR_TAR_LG] = "TAR", 1385 [FSCR_LM_LG] = "LM", 1386 }; 1387 char *facility = "unknown"; 1388 u64 value; 1389 u32 instword, rd; 1390 u8 status; 1391 bool hv; 1392 1393 hv = (regs->trap == 0xf80); 1394 if (hv) 1395 value = mfspr(SPRN_HFSCR); 1396 else 1397 value = mfspr(SPRN_FSCR); 1398 1399 status = value >> 56; 1400 if (status == FSCR_DSCR_LG) { 1401 /* 1402 * User is accessing the DSCR register using the problem 1403 * state only SPR number (0x03) either through a mfspr or 1404 * a mtspr instruction. If it is a write attempt through 1405 * a mtspr, then we set the inherit bit. This also allows 1406 * the user to write or read the register directly in the 1407 * future by setting via the FSCR DSCR bit. But in case it 1408 * is a read DSCR attempt through a mfspr instruction, we 1409 * just emulate the instruction instead. This code path will 1410 * always emulate all the mfspr instructions till the user 1411 * has attempted at least one mtspr instruction. This way it 1412 * preserves the same behaviour when the user is accessing 1413 * the DSCR through privilege level only SPR number (0x11) 1414 * which is emulated through illegal instruction exception. 1415 * We always leave HFSCR DSCR set. 1416 */ 1417 if (get_user(instword, (u32 __user *)(regs->nip))) { 1418 pr_err("Failed to fetch the user instruction\n"); 1419 return; 1420 } 1421 1422 /* Write into DSCR (mtspr 0x03, RS) */ 1423 if ((instword & PPC_INST_MTSPR_DSCR_USER_MASK) 1424 == PPC_INST_MTSPR_DSCR_USER) { 1425 rd = (instword >> 21) & 0x1f; 1426 current->thread.dscr = regs->gpr[rd]; 1427 current->thread.dscr_inherit = 1; 1428 current->thread.fscr |= FSCR_DSCR; 1429 mtspr(SPRN_FSCR, current->thread.fscr); 1430 } 1431 1432 /* Read from DSCR (mfspr RT, 0x03) */ 1433 if ((instword & PPC_INST_MFSPR_DSCR_USER_MASK) 1434 == PPC_INST_MFSPR_DSCR_USER) { 1435 if (emulate_instruction(regs)) { 1436 pr_err("DSCR based mfspr emulation failed\n"); 1437 return; 1438 } 1439 regs->nip += 4; 1440 emulate_single_step(regs); 1441 } 1442 return; 1443 } else if ((status == FSCR_LM_LG) && cpu_has_feature(CPU_FTR_ARCH_300)) { 1444 /* 1445 * This process has touched LM, so turn it on forever 1446 * for this process 1447 */ 1448 current->thread.fscr |= FSCR_LM; 1449 mtspr(SPRN_FSCR, current->thread.fscr); 1450 return; 1451 } 1452 1453 if ((status < ARRAY_SIZE(facility_strings)) && 1454 facility_strings[status]) 1455 facility = facility_strings[status]; 1456 1457 /* We restore the interrupt state now */ 1458 if (!arch_irq_disabled_regs(regs)) 1459 local_irq_enable(); 1460 1461 pr_err_ratelimited( 1462 "%sFacility '%s' unavailable, exception at 0x%lx, MSR=%lx\n", 1463 hv ? "Hypervisor " : "", facility, regs->nip, regs->msr); 1464 1465 if (user_mode(regs)) { 1466 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 1467 return; 1468 } 1469 1470 die("Unexpected facility unavailable exception", regs, SIGABRT); 1471 } 1472 #endif 1473 1474 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1475 1476 void fp_unavailable_tm(struct pt_regs *regs) 1477 { 1478 /* Note: This does not handle any kind of FP laziness. */ 1479 1480 TM_DEBUG("FP Unavailable trap whilst transactional at 0x%lx, MSR=%lx\n", 1481 regs->nip, regs->msr); 1482 1483 /* We can only have got here if the task started using FP after 1484 * beginning the transaction. So, the transactional regs are just a 1485 * copy of the checkpointed ones. But, we still need to recheckpoint 1486 * as we're enabling FP for the process; it will return, abort the 1487 * transaction, and probably retry but now with FP enabled. So the 1488 * checkpointed FP registers need to be loaded. 1489 */ 1490 tm_reclaim_current(TM_CAUSE_FAC_UNAV); 1491 /* Reclaim didn't save out any FPRs to transact_fprs. */ 1492 1493 /* Enable FP for the task: */ 1494 regs->msr |= (MSR_FP | current->thread.fpexc_mode); 1495 1496 /* This loads and recheckpoints the FP registers from 1497 * thread.fpr[]. They will remain in registers after the 1498 * checkpoint so we don't need to reload them after. 1499 * If VMX is in use, the VRs now hold checkpointed values, 1500 * so we don't want to load the VRs from the thread_struct. 1501 */ 1502 tm_recheckpoint(¤t->thread, MSR_FP); 1503 1504 /* If VMX is in use, get the transactional values back */ 1505 if (regs->msr & MSR_VEC) { 1506 do_load_up_transact_altivec(¤t->thread); 1507 /* At this point all the VSX state is loaded, so enable it */ 1508 regs->msr |= MSR_VSX; 1509 } 1510 } 1511 1512 void altivec_unavailable_tm(struct pt_regs *regs) 1513 { 1514 /* See the comments in fp_unavailable_tm(). This function operates 1515 * the same way. 1516 */ 1517 1518 TM_DEBUG("Vector Unavailable trap whilst transactional at 0x%lx," 1519 "MSR=%lx\n", 1520 regs->nip, regs->msr); 1521 tm_reclaim_current(TM_CAUSE_FAC_UNAV); 1522 regs->msr |= MSR_VEC; 1523 tm_recheckpoint(¤t->thread, MSR_VEC); 1524 current->thread.used_vr = 1; 1525 1526 if (regs->msr & MSR_FP) { 1527 do_load_up_transact_fpu(¤t->thread); 1528 regs->msr |= MSR_VSX; 1529 } 1530 } 1531 1532 void vsx_unavailable_tm(struct pt_regs *regs) 1533 { 1534 unsigned long orig_msr = regs->msr; 1535 1536 /* See the comments in fp_unavailable_tm(). This works similarly, 1537 * though we're loading both FP and VEC registers in here. 1538 * 1539 * If FP isn't in use, load FP regs. If VEC isn't in use, load VEC 1540 * regs. Either way, set MSR_VSX. 1541 */ 1542 1543 TM_DEBUG("VSX Unavailable trap whilst transactional at 0x%lx," 1544 "MSR=%lx\n", 1545 regs->nip, regs->msr); 1546 1547 current->thread.used_vsr = 1; 1548 1549 /* If FP and VMX are already loaded, we have all the state we need */ 1550 if ((orig_msr & (MSR_FP | MSR_VEC)) == (MSR_FP | MSR_VEC)) { 1551 regs->msr |= MSR_VSX; 1552 return; 1553 } 1554 1555 /* This reclaims FP and/or VR regs if they're already enabled */ 1556 tm_reclaim_current(TM_CAUSE_FAC_UNAV); 1557 1558 regs->msr |= MSR_VEC | MSR_FP | current->thread.fpexc_mode | 1559 MSR_VSX; 1560 1561 /* This loads & recheckpoints FP and VRs; but we have 1562 * to be sure not to overwrite previously-valid state. 1563 */ 1564 tm_recheckpoint(¤t->thread, regs->msr & ~orig_msr); 1565 1566 if (orig_msr & MSR_FP) 1567 do_load_up_transact_fpu(¤t->thread); 1568 if (orig_msr & MSR_VEC) 1569 do_load_up_transact_altivec(¤t->thread); 1570 } 1571 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ 1572 1573 void performance_monitor_exception(struct pt_regs *regs) 1574 { 1575 __this_cpu_inc(irq_stat.pmu_irqs); 1576 1577 perf_irq(regs); 1578 } 1579 1580 #ifdef CONFIG_8xx 1581 void SoftwareEmulation(struct pt_regs *regs) 1582 { 1583 CHECK_FULL_REGS(regs); 1584 1585 if (!user_mode(regs)) { 1586 debugger(regs); 1587 die("Kernel Mode Unimplemented Instruction or SW FPU Emulation", 1588 regs, SIGFPE); 1589 } 1590 1591 if (!emulate_math(regs)) 1592 return; 1593 1594 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 1595 } 1596 #endif /* CONFIG_8xx */ 1597 1598 #ifdef CONFIG_PPC_ADV_DEBUG_REGS 1599 static void handle_debug(struct pt_regs *regs, unsigned long debug_status) 1600 { 1601 int changed = 0; 1602 /* 1603 * Determine the cause of the debug event, clear the 1604 * event flags and send a trap to the handler. Torez 1605 */ 1606 if (debug_status & (DBSR_DAC1R | DBSR_DAC1W)) { 1607 dbcr_dac(current) &= ~(DBCR_DAC1R | DBCR_DAC1W); 1608 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE 1609 current->thread.debug.dbcr2 &= ~DBCR2_DAC12MODE; 1610 #endif 1611 do_send_trap(regs, mfspr(SPRN_DAC1), debug_status, TRAP_HWBKPT, 1612 5); 1613 changed |= 0x01; 1614 } else if (debug_status & (DBSR_DAC2R | DBSR_DAC2W)) { 1615 dbcr_dac(current) &= ~(DBCR_DAC2R | DBCR_DAC2W); 1616 do_send_trap(regs, mfspr(SPRN_DAC2), debug_status, TRAP_HWBKPT, 1617 6); 1618 changed |= 0x01; 1619 } else if (debug_status & DBSR_IAC1) { 1620 current->thread.debug.dbcr0 &= ~DBCR0_IAC1; 1621 dbcr_iac_range(current) &= ~DBCR_IAC12MODE; 1622 do_send_trap(regs, mfspr(SPRN_IAC1), debug_status, TRAP_HWBKPT, 1623 1); 1624 changed |= 0x01; 1625 } else if (debug_status & DBSR_IAC2) { 1626 current->thread.debug.dbcr0 &= ~DBCR0_IAC2; 1627 do_send_trap(regs, mfspr(SPRN_IAC2), debug_status, TRAP_HWBKPT, 1628 2); 1629 changed |= 0x01; 1630 } else if (debug_status & DBSR_IAC3) { 1631 current->thread.debug.dbcr0 &= ~DBCR0_IAC3; 1632 dbcr_iac_range(current) &= ~DBCR_IAC34MODE; 1633 do_send_trap(regs, mfspr(SPRN_IAC3), debug_status, TRAP_HWBKPT, 1634 3); 1635 changed |= 0x01; 1636 } else if (debug_status & DBSR_IAC4) { 1637 current->thread.debug.dbcr0 &= ~DBCR0_IAC4; 1638 do_send_trap(regs, mfspr(SPRN_IAC4), debug_status, TRAP_HWBKPT, 1639 4); 1640 changed |= 0x01; 1641 } 1642 /* 1643 * At the point this routine was called, the MSR(DE) was turned off. 1644 * Check all other debug flags and see if that bit needs to be turned 1645 * back on or not. 1646 */ 1647 if (DBCR_ACTIVE_EVENTS(current->thread.debug.dbcr0, 1648 current->thread.debug.dbcr1)) 1649 regs->msr |= MSR_DE; 1650 else 1651 /* Make sure the IDM flag is off */ 1652 current->thread.debug.dbcr0 &= ~DBCR0_IDM; 1653 1654 if (changed & 0x01) 1655 mtspr(SPRN_DBCR0, current->thread.debug.dbcr0); 1656 } 1657 1658 void __kprobes DebugException(struct pt_regs *regs, unsigned long debug_status) 1659 { 1660 current->thread.debug.dbsr = debug_status; 1661 1662 /* Hack alert: On BookE, Branch Taken stops on the branch itself, while 1663 * on server, it stops on the target of the branch. In order to simulate 1664 * the server behaviour, we thus restart right away with a single step 1665 * instead of stopping here when hitting a BT 1666 */ 1667 if (debug_status & DBSR_BT) { 1668 regs->msr &= ~MSR_DE; 1669 1670 /* Disable BT */ 1671 mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_BT); 1672 /* Clear the BT event */ 1673 mtspr(SPRN_DBSR, DBSR_BT); 1674 1675 /* Do the single step trick only when coming from userspace */ 1676 if (user_mode(regs)) { 1677 current->thread.debug.dbcr0 &= ~DBCR0_BT; 1678 current->thread.debug.dbcr0 |= DBCR0_IDM | DBCR0_IC; 1679 regs->msr |= MSR_DE; 1680 return; 1681 } 1682 1683 if (notify_die(DIE_SSTEP, "block_step", regs, 5, 1684 5, SIGTRAP) == NOTIFY_STOP) { 1685 return; 1686 } 1687 if (debugger_sstep(regs)) 1688 return; 1689 } else if (debug_status & DBSR_IC) { /* Instruction complete */ 1690 regs->msr &= ~MSR_DE; 1691 1692 /* Disable instruction completion */ 1693 mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_IC); 1694 /* Clear the instruction completion event */ 1695 mtspr(SPRN_DBSR, DBSR_IC); 1696 1697 if (notify_die(DIE_SSTEP, "single_step", regs, 5, 1698 5, SIGTRAP) == NOTIFY_STOP) { 1699 return; 1700 } 1701 1702 if (debugger_sstep(regs)) 1703 return; 1704 1705 if (user_mode(regs)) { 1706 current->thread.debug.dbcr0 &= ~DBCR0_IC; 1707 if (DBCR_ACTIVE_EVENTS(current->thread.debug.dbcr0, 1708 current->thread.debug.dbcr1)) 1709 regs->msr |= MSR_DE; 1710 else 1711 /* Make sure the IDM bit is off */ 1712 current->thread.debug.dbcr0 &= ~DBCR0_IDM; 1713 } 1714 1715 _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip); 1716 } else 1717 handle_debug(regs, debug_status); 1718 } 1719 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */ 1720 1721 #if !defined(CONFIG_TAU_INT) 1722 void TAUException(struct pt_regs *regs) 1723 { 1724 printk("TAU trap at PC: %lx, MSR: %lx, vector=%lx %s\n", 1725 regs->nip, regs->msr, regs->trap, print_tainted()); 1726 } 1727 #endif /* CONFIG_INT_TAU */ 1728 1729 #ifdef CONFIG_ALTIVEC 1730 void altivec_assist_exception(struct pt_regs *regs) 1731 { 1732 int err; 1733 1734 if (!user_mode(regs)) { 1735 printk(KERN_EMERG "VMX/Altivec assist exception in kernel mode" 1736 " at %lx\n", regs->nip); 1737 die("Kernel VMX/Altivec assist exception", regs, SIGILL); 1738 } 1739 1740 flush_altivec_to_thread(current); 1741 1742 PPC_WARN_EMULATED(altivec, regs); 1743 err = emulate_altivec(regs); 1744 if (err == 0) { 1745 regs->nip += 4; /* skip emulated instruction */ 1746 emulate_single_step(regs); 1747 return; 1748 } 1749 1750 if (err == -EFAULT) { 1751 /* got an error reading the instruction */ 1752 _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip); 1753 } else { 1754 /* didn't recognize the instruction */ 1755 /* XXX quick hack for now: set the non-Java bit in the VSCR */ 1756 printk_ratelimited(KERN_ERR "Unrecognized altivec instruction " 1757 "in %s at %lx\n", current->comm, regs->nip); 1758 current->thread.vr_state.vscr.u[3] |= 0x10000; 1759 } 1760 } 1761 #endif /* CONFIG_ALTIVEC */ 1762 1763 #ifdef CONFIG_FSL_BOOKE 1764 void CacheLockingException(struct pt_regs *regs, unsigned long address, 1765 unsigned long error_code) 1766 { 1767 /* We treat cache locking instructions from the user 1768 * as priv ops, in the future we could try to do 1769 * something smarter 1770 */ 1771 if (error_code & (ESR_DLK|ESR_ILK)) 1772 _exception(SIGILL, regs, ILL_PRVOPC, regs->nip); 1773 return; 1774 } 1775 #endif /* CONFIG_FSL_BOOKE */ 1776 1777 #ifdef CONFIG_SPE 1778 void SPEFloatingPointException(struct pt_regs *regs) 1779 { 1780 extern int do_spe_mathemu(struct pt_regs *regs); 1781 unsigned long spefscr; 1782 int fpexc_mode; 1783 int code = 0; 1784 int err; 1785 1786 flush_spe_to_thread(current); 1787 1788 spefscr = current->thread.spefscr; 1789 fpexc_mode = current->thread.fpexc_mode; 1790 1791 if ((spefscr & SPEFSCR_FOVF) && (fpexc_mode & PR_FP_EXC_OVF)) { 1792 code = FPE_FLTOVF; 1793 } 1794 else if ((spefscr & SPEFSCR_FUNF) && (fpexc_mode & PR_FP_EXC_UND)) { 1795 code = FPE_FLTUND; 1796 } 1797 else if ((spefscr & SPEFSCR_FDBZ) && (fpexc_mode & PR_FP_EXC_DIV)) 1798 code = FPE_FLTDIV; 1799 else if ((spefscr & SPEFSCR_FINV) && (fpexc_mode & PR_FP_EXC_INV)) { 1800 code = FPE_FLTINV; 1801 } 1802 else if ((spefscr & (SPEFSCR_FG | SPEFSCR_FX)) && (fpexc_mode & PR_FP_EXC_RES)) 1803 code = FPE_FLTRES; 1804 1805 err = do_spe_mathemu(regs); 1806 if (err == 0) { 1807 regs->nip += 4; /* skip emulated instruction */ 1808 emulate_single_step(regs); 1809 return; 1810 } 1811 1812 if (err == -EFAULT) { 1813 /* got an error reading the instruction */ 1814 _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip); 1815 } else if (err == -EINVAL) { 1816 /* didn't recognize the instruction */ 1817 printk(KERN_ERR "unrecognized spe instruction " 1818 "in %s at %lx\n", current->comm, regs->nip); 1819 } else { 1820 _exception(SIGFPE, regs, code, regs->nip); 1821 } 1822 1823 return; 1824 } 1825 1826 void SPEFloatingPointRoundException(struct pt_regs *regs) 1827 { 1828 extern int speround_handler(struct pt_regs *regs); 1829 int err; 1830 1831 preempt_disable(); 1832 if (regs->msr & MSR_SPE) 1833 giveup_spe(current); 1834 preempt_enable(); 1835 1836 regs->nip -= 4; 1837 err = speround_handler(regs); 1838 if (err == 0) { 1839 regs->nip += 4; /* skip emulated instruction */ 1840 emulate_single_step(regs); 1841 return; 1842 } 1843 1844 if (err == -EFAULT) { 1845 /* got an error reading the instruction */ 1846 _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip); 1847 } else if (err == -EINVAL) { 1848 /* didn't recognize the instruction */ 1849 printk(KERN_ERR "unrecognized spe instruction " 1850 "in %s at %lx\n", current->comm, regs->nip); 1851 } else { 1852 _exception(SIGFPE, regs, 0, regs->nip); 1853 return; 1854 } 1855 } 1856 #endif 1857 1858 /* 1859 * We enter here if we get an unrecoverable exception, that is, one 1860 * that happened at a point where the RI (recoverable interrupt) bit 1861 * in the MSR is 0. This indicates that SRR0/1 are live, and that 1862 * we therefore lost state by taking this exception. 1863 */ 1864 void unrecoverable_exception(struct pt_regs *regs) 1865 { 1866 printk(KERN_EMERG "Unrecoverable exception %lx at %lx\n", 1867 regs->trap, regs->nip); 1868 die("Unrecoverable exception", regs, SIGABRT); 1869 } 1870 1871 #if defined(CONFIG_BOOKE_WDT) || defined(CONFIG_40x) 1872 /* 1873 * Default handler for a Watchdog exception, 1874 * spins until a reboot occurs 1875 */ 1876 void __attribute__ ((weak)) WatchdogHandler(struct pt_regs *regs) 1877 { 1878 /* Generic WatchdogHandler, implement your own */ 1879 mtspr(SPRN_TCR, mfspr(SPRN_TCR)&(~TCR_WIE)); 1880 return; 1881 } 1882 1883 void WatchdogException(struct pt_regs *regs) 1884 { 1885 printk (KERN_EMERG "PowerPC Book-E Watchdog Exception\n"); 1886 WatchdogHandler(regs); 1887 } 1888 #endif 1889 1890 /* 1891 * We enter here if we discover during exception entry that we are 1892 * running in supervisor mode with a userspace value in the stack pointer. 1893 */ 1894 void kernel_bad_stack(struct pt_regs *regs) 1895 { 1896 printk(KERN_EMERG "Bad kernel stack pointer %lx at %lx\n", 1897 regs->gpr[1], regs->nip); 1898 die("Bad kernel stack pointer", regs, SIGABRT); 1899 } 1900 1901 void __init trap_init(void) 1902 { 1903 } 1904 1905 1906 #ifdef CONFIG_PPC_EMULATED_STATS 1907 1908 #define WARN_EMULATED_SETUP(type) .type = { .name = #type } 1909 1910 struct ppc_emulated ppc_emulated = { 1911 #ifdef CONFIG_ALTIVEC 1912 WARN_EMULATED_SETUP(altivec), 1913 #endif 1914 WARN_EMULATED_SETUP(dcba), 1915 WARN_EMULATED_SETUP(dcbz), 1916 WARN_EMULATED_SETUP(fp_pair), 1917 WARN_EMULATED_SETUP(isel), 1918 WARN_EMULATED_SETUP(mcrxr), 1919 WARN_EMULATED_SETUP(mfpvr), 1920 WARN_EMULATED_SETUP(multiple), 1921 WARN_EMULATED_SETUP(popcntb), 1922 WARN_EMULATED_SETUP(spe), 1923 WARN_EMULATED_SETUP(string), 1924 WARN_EMULATED_SETUP(sync), 1925 WARN_EMULATED_SETUP(unaligned), 1926 #ifdef CONFIG_MATH_EMULATION 1927 WARN_EMULATED_SETUP(math), 1928 #endif 1929 #ifdef CONFIG_VSX 1930 WARN_EMULATED_SETUP(vsx), 1931 #endif 1932 #ifdef CONFIG_PPC64 1933 WARN_EMULATED_SETUP(mfdscr), 1934 WARN_EMULATED_SETUP(mtdscr), 1935 WARN_EMULATED_SETUP(lq_stq), 1936 #endif 1937 }; 1938 1939 u32 ppc_warn_emulated; 1940 1941 void ppc_warn_emulated_print(const char *type) 1942 { 1943 pr_warn_ratelimited("%s used emulated %s instruction\n", current->comm, 1944 type); 1945 } 1946 1947 static int __init ppc_warn_emulated_init(void) 1948 { 1949 struct dentry *dir, *d; 1950 unsigned int i; 1951 struct ppc_emulated_entry *entries = (void *)&ppc_emulated; 1952 1953 if (!powerpc_debugfs_root) 1954 return -ENODEV; 1955 1956 dir = debugfs_create_dir("emulated_instructions", 1957 powerpc_debugfs_root); 1958 if (!dir) 1959 return -ENOMEM; 1960 1961 d = debugfs_create_u32("do_warn", S_IRUGO | S_IWUSR, dir, 1962 &ppc_warn_emulated); 1963 if (!d) 1964 goto fail; 1965 1966 for (i = 0; i < sizeof(ppc_emulated)/sizeof(*entries); i++) { 1967 d = debugfs_create_u32(entries[i].name, S_IRUGO | S_IWUSR, dir, 1968 (u32 *)&entries[i].val.counter); 1969 if (!d) 1970 goto fail; 1971 } 1972 1973 return 0; 1974 1975 fail: 1976 debugfs_remove_recursive(dir); 1977 return -ENOMEM; 1978 } 1979 1980 device_initcall(ppc_warn_emulated_init); 1981 1982 #endif /* CONFIG_PPC_EMULATED_STATS */ 1983