1 /* 2 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 3 * Copyright 2007-2010 Freescale Semiconductor, Inc. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License 7 * as published by the Free Software Foundation; either version 8 * 2 of the License, or (at your option) any later version. 9 * 10 * Modified by Cort Dougan (cort@cs.nmt.edu) 11 * and Paul Mackerras (paulus@samba.org) 12 */ 13 14 /* 15 * This file handles the architecture-dependent parts of hardware exceptions 16 */ 17 18 #include <linux/errno.h> 19 #include <linux/sched.h> 20 #include <linux/sched/debug.h> 21 #include <linux/kernel.h> 22 #include <linux/mm.h> 23 #include <linux/stddef.h> 24 #include <linux/unistd.h> 25 #include <linux/ptrace.h> 26 #include <linux/user.h> 27 #include <linux/interrupt.h> 28 #include <linux/init.h> 29 #include <linux/extable.h> 30 #include <linux/module.h> /* print_modules */ 31 #include <linux/prctl.h> 32 #include <linux/delay.h> 33 #include <linux/kprobes.h> 34 #include <linux/kexec.h> 35 #include <linux/backlight.h> 36 #include <linux/bug.h> 37 #include <linux/kdebug.h> 38 #include <linux/ratelimit.h> 39 #include <linux/context_tracking.h> 40 41 #include <asm/emulated_ops.h> 42 #include <asm/pgtable.h> 43 #include <linux/uaccess.h> 44 #include <asm/debugfs.h> 45 #include <asm/io.h> 46 #include <asm/machdep.h> 47 #include <asm/rtas.h> 48 #include <asm/pmc.h> 49 #include <asm/reg.h> 50 #ifdef CONFIG_PMAC_BACKLIGHT 51 #include <asm/backlight.h> 52 #endif 53 #ifdef CONFIG_PPC64 54 #include <asm/firmware.h> 55 #include <asm/processor.h> 56 #include <asm/tm.h> 57 #endif 58 #include <asm/kexec.h> 59 #include <asm/ppc-opcode.h> 60 #include <asm/rio.h> 61 #include <asm/fadump.h> 62 #include <asm/switch_to.h> 63 #include <asm/tm.h> 64 #include <asm/debug.h> 65 #include <asm/asm-prototypes.h> 66 #include <asm/hmi.h> 67 #include <sysdev/fsl_pci.h> 68 #include <asm/kprobes.h> 69 70 #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC_CORE) 71 int (*__debugger)(struct pt_regs *regs) __read_mostly; 72 int (*__debugger_ipi)(struct pt_regs *regs) __read_mostly; 73 int (*__debugger_bpt)(struct pt_regs *regs) __read_mostly; 74 int (*__debugger_sstep)(struct pt_regs *regs) __read_mostly; 75 int (*__debugger_iabr_match)(struct pt_regs *regs) __read_mostly; 76 int (*__debugger_break_match)(struct pt_regs *regs) __read_mostly; 77 int (*__debugger_fault_handler)(struct pt_regs *regs) __read_mostly; 78 79 EXPORT_SYMBOL(__debugger); 80 EXPORT_SYMBOL(__debugger_ipi); 81 EXPORT_SYMBOL(__debugger_bpt); 82 EXPORT_SYMBOL(__debugger_sstep); 83 EXPORT_SYMBOL(__debugger_iabr_match); 84 EXPORT_SYMBOL(__debugger_break_match); 85 EXPORT_SYMBOL(__debugger_fault_handler); 86 #endif 87 88 /* Transactional Memory trap debug */ 89 #ifdef TM_DEBUG_SW 90 #define TM_DEBUG(x...) printk(KERN_INFO x) 91 #else 92 #define TM_DEBUG(x...) do { } while(0) 93 #endif 94 95 /* 96 * Trap & Exception support 97 */ 98 99 #ifdef CONFIG_PMAC_BACKLIGHT 100 static void pmac_backlight_unblank(void) 101 { 102 mutex_lock(&pmac_backlight_mutex); 103 if (pmac_backlight) { 104 struct backlight_properties *props; 105 106 props = &pmac_backlight->props; 107 props->brightness = props->max_brightness; 108 props->power = FB_BLANK_UNBLANK; 109 backlight_update_status(pmac_backlight); 110 } 111 mutex_unlock(&pmac_backlight_mutex); 112 } 113 #else 114 static inline void pmac_backlight_unblank(void) { } 115 #endif 116 117 /* 118 * If oops/die is expected to crash the machine, return true here. 119 * 120 * This should not be expected to be 100% accurate, there may be 121 * notifiers registered or other unexpected conditions that may bring 122 * down the kernel. Or if the current process in the kernel is holding 123 * locks or has other critical state, the kernel may become effectively 124 * unusable anyway. 125 */ 126 bool die_will_crash(void) 127 { 128 if (should_fadump_crash()) 129 return true; 130 if (kexec_should_crash(current)) 131 return true; 132 if (in_interrupt() || panic_on_oops || 133 !current->pid || is_global_init(current)) 134 return true; 135 136 return false; 137 } 138 139 static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED; 140 static int die_owner = -1; 141 static unsigned int die_nest_count; 142 static int die_counter; 143 144 static unsigned long oops_begin(struct pt_regs *regs) 145 { 146 int cpu; 147 unsigned long flags; 148 149 oops_enter(); 150 151 /* racy, but better than risking deadlock. */ 152 raw_local_irq_save(flags); 153 cpu = smp_processor_id(); 154 if (!arch_spin_trylock(&die_lock)) { 155 if (cpu == die_owner) 156 /* nested oops. should stop eventually */; 157 else 158 arch_spin_lock(&die_lock); 159 } 160 die_nest_count++; 161 die_owner = cpu; 162 console_verbose(); 163 bust_spinlocks(1); 164 if (machine_is(powermac)) 165 pmac_backlight_unblank(); 166 return flags; 167 } 168 NOKPROBE_SYMBOL(oops_begin); 169 170 static void oops_end(unsigned long flags, struct pt_regs *regs, 171 int signr) 172 { 173 bust_spinlocks(0); 174 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); 175 die_nest_count--; 176 oops_exit(); 177 printk("\n"); 178 if (!die_nest_count) { 179 /* Nest count reaches zero, release the lock. */ 180 die_owner = -1; 181 arch_spin_unlock(&die_lock); 182 } 183 raw_local_irq_restore(flags); 184 185 crash_fadump(regs, "die oops"); 186 187 if (kexec_should_crash(current)) 188 crash_kexec(regs); 189 190 if (!signr) 191 return; 192 193 /* 194 * While our oops output is serialised by a spinlock, output 195 * from panic() called below can race and corrupt it. If we 196 * know we are going to panic, delay for 1 second so we have a 197 * chance to get clean backtraces from all CPUs that are oopsing. 198 */ 199 if (in_interrupt() || panic_on_oops || !current->pid || 200 is_global_init(current)) { 201 mdelay(MSEC_PER_SEC); 202 } 203 204 if (in_interrupt()) 205 panic("Fatal exception in interrupt"); 206 if (panic_on_oops) 207 panic("Fatal exception"); 208 do_exit(signr); 209 } 210 NOKPROBE_SYMBOL(oops_end); 211 212 static int __die(const char *str, struct pt_regs *regs, long err) 213 { 214 printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter); 215 216 if (IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN)) 217 printk("LE "); 218 else 219 printk("BE "); 220 221 if (IS_ENABLED(CONFIG_PREEMPT)) 222 pr_cont("PREEMPT "); 223 224 if (IS_ENABLED(CONFIG_SMP)) 225 pr_cont("SMP NR_CPUS=%d ", NR_CPUS); 226 227 if (debug_pagealloc_enabled()) 228 pr_cont("DEBUG_PAGEALLOC "); 229 230 if (IS_ENABLED(CONFIG_NUMA)) 231 pr_cont("NUMA "); 232 233 pr_cont("%s\n", ppc_md.name ? ppc_md.name : ""); 234 235 if (notify_die(DIE_OOPS, str, regs, err, 255, SIGSEGV) == NOTIFY_STOP) 236 return 1; 237 238 print_modules(); 239 show_regs(regs); 240 241 return 0; 242 } 243 NOKPROBE_SYMBOL(__die); 244 245 void die(const char *str, struct pt_regs *regs, long err) 246 { 247 unsigned long flags; 248 249 if (debugger(regs)) 250 return; 251 252 flags = oops_begin(regs); 253 if (__die(str, regs, err)) 254 err = 0; 255 oops_end(flags, regs, err); 256 } 257 NOKPROBE_SYMBOL(die); 258 259 void user_single_step_siginfo(struct task_struct *tsk, 260 struct pt_regs *regs, siginfo_t *info) 261 { 262 memset(info, 0, sizeof(*info)); 263 info->si_signo = SIGTRAP; 264 info->si_code = TRAP_TRACE; 265 info->si_addr = (void __user *)regs->nip; 266 } 267 268 void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr) 269 { 270 siginfo_t info; 271 const char fmt32[] = KERN_INFO "%s[%d]: unhandled signal %d " \ 272 "at %08lx nip %08lx lr %08lx code %x\n"; 273 const char fmt64[] = KERN_INFO "%s[%d]: unhandled signal %d " \ 274 "at %016lx nip %016lx lr %016lx code %x\n"; 275 276 if (!user_mode(regs)) { 277 die("Exception in kernel mode", regs, signr); 278 return; 279 } 280 281 if (show_unhandled_signals && unhandled_signal(current, signr)) { 282 printk_ratelimited(regs->msr & MSR_64BIT ? fmt64 : fmt32, 283 current->comm, current->pid, signr, 284 addr, regs->nip, regs->link, code); 285 } 286 287 if (arch_irqs_disabled() && !arch_irq_disabled_regs(regs)) 288 local_irq_enable(); 289 290 current->thread.trap_nr = code; 291 memset(&info, 0, sizeof(info)); 292 info.si_signo = signr; 293 info.si_code = code; 294 info.si_addr = (void __user *) addr; 295 force_sig_info(signr, &info, current); 296 } 297 298 void system_reset_exception(struct pt_regs *regs) 299 { 300 /* 301 * Avoid crashes in case of nested NMI exceptions. Recoverability 302 * is determined by RI and in_nmi 303 */ 304 bool nested = in_nmi(); 305 if (!nested) 306 nmi_enter(); 307 308 __this_cpu_inc(irq_stat.sreset_irqs); 309 310 /* See if any machine dependent calls */ 311 if (ppc_md.system_reset_exception) { 312 if (ppc_md.system_reset_exception(regs)) 313 goto out; 314 } 315 316 if (debugger(regs)) 317 goto out; 318 319 /* 320 * A system reset is a request to dump, so we always send 321 * it through the crashdump code (if fadump or kdump are 322 * registered). 323 */ 324 crash_fadump(regs, "System Reset"); 325 326 crash_kexec(regs); 327 328 /* 329 * We aren't the primary crash CPU. We need to send it 330 * to a holding pattern to avoid it ending up in the panic 331 * code. 332 */ 333 crash_kexec_secondary(regs); 334 335 /* 336 * No debugger or crash dump registered, print logs then 337 * panic. 338 */ 339 __die("System Reset", regs, SIGABRT); 340 341 mdelay(2*MSEC_PER_SEC); /* Wait a little while for others to print */ 342 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); 343 nmi_panic(regs, "System Reset"); 344 345 out: 346 #ifdef CONFIG_PPC_BOOK3S_64 347 BUG_ON(get_paca()->in_nmi == 0); 348 if (get_paca()->in_nmi > 1) 349 nmi_panic(regs, "Unrecoverable nested System Reset"); 350 #endif 351 /* Must die if the interrupt is not recoverable */ 352 if (!(regs->msr & MSR_RI)) 353 nmi_panic(regs, "Unrecoverable System Reset"); 354 355 if (!nested) 356 nmi_exit(); 357 358 /* What should we do here? We could issue a shutdown or hard reset. */ 359 } 360 361 /* 362 * I/O accesses can cause machine checks on powermacs. 363 * Check if the NIP corresponds to the address of a sync 364 * instruction for which there is an entry in the exception 365 * table. 366 * Note that the 601 only takes a machine check on TEA 367 * (transfer error ack) signal assertion, and does not 368 * set any of the top 16 bits of SRR1. 369 * -- paulus. 370 */ 371 static inline int check_io_access(struct pt_regs *regs) 372 { 373 #ifdef CONFIG_PPC32 374 unsigned long msr = regs->msr; 375 const struct exception_table_entry *entry; 376 unsigned int *nip = (unsigned int *)regs->nip; 377 378 if (((msr & 0xffff0000) == 0 || (msr & (0x80000 | 0x40000))) 379 && (entry = search_exception_tables(regs->nip)) != NULL) { 380 /* 381 * Check that it's a sync instruction, or somewhere 382 * in the twi; isync; nop sequence that inb/inw/inl uses. 383 * As the address is in the exception table 384 * we should be able to read the instr there. 385 * For the debug message, we look at the preceding 386 * load or store. 387 */ 388 if (*nip == PPC_INST_NOP) 389 nip -= 2; 390 else if (*nip == PPC_INST_ISYNC) 391 --nip; 392 if (*nip == PPC_INST_SYNC || (*nip >> 26) == OP_TRAP) { 393 unsigned int rb; 394 395 --nip; 396 rb = (*nip >> 11) & 0x1f; 397 printk(KERN_DEBUG "%s bad port %lx at %p\n", 398 (*nip & 0x100)? "OUT to": "IN from", 399 regs->gpr[rb] - _IO_BASE, nip); 400 regs->msr |= MSR_RI; 401 regs->nip = extable_fixup(entry); 402 return 1; 403 } 404 } 405 #endif /* CONFIG_PPC32 */ 406 return 0; 407 } 408 409 #ifdef CONFIG_PPC_ADV_DEBUG_REGS 410 /* On 4xx, the reason for the machine check or program exception 411 is in the ESR. */ 412 #define get_reason(regs) ((regs)->dsisr) 413 #define REASON_FP ESR_FP 414 #define REASON_ILLEGAL (ESR_PIL | ESR_PUO) 415 #define REASON_PRIVILEGED ESR_PPR 416 #define REASON_TRAP ESR_PTR 417 418 /* single-step stuff */ 419 #define single_stepping(regs) (current->thread.debug.dbcr0 & DBCR0_IC) 420 #define clear_single_step(regs) (current->thread.debug.dbcr0 &= ~DBCR0_IC) 421 422 #else 423 /* On non-4xx, the reason for the machine check or program 424 exception is in the MSR. */ 425 #define get_reason(regs) ((regs)->msr) 426 #define REASON_TM SRR1_PROGTM 427 #define REASON_FP SRR1_PROGFPE 428 #define REASON_ILLEGAL SRR1_PROGILL 429 #define REASON_PRIVILEGED SRR1_PROGPRIV 430 #define REASON_TRAP SRR1_PROGTRAP 431 432 #define single_stepping(regs) ((regs)->msr & MSR_SE) 433 #define clear_single_step(regs) ((regs)->msr &= ~MSR_SE) 434 #endif 435 436 #if defined(CONFIG_E500) 437 int machine_check_e500mc(struct pt_regs *regs) 438 { 439 unsigned long mcsr = mfspr(SPRN_MCSR); 440 unsigned long pvr = mfspr(SPRN_PVR); 441 unsigned long reason = mcsr; 442 int recoverable = 1; 443 444 if (reason & MCSR_LD) { 445 recoverable = fsl_rio_mcheck_exception(regs); 446 if (recoverable == 1) 447 goto silent_out; 448 } 449 450 printk("Machine check in kernel mode.\n"); 451 printk("Caused by (from MCSR=%lx): ", reason); 452 453 if (reason & MCSR_MCP) 454 printk("Machine Check Signal\n"); 455 456 if (reason & MCSR_ICPERR) { 457 printk("Instruction Cache Parity Error\n"); 458 459 /* 460 * This is recoverable by invalidating the i-cache. 461 */ 462 mtspr(SPRN_L1CSR1, mfspr(SPRN_L1CSR1) | L1CSR1_ICFI); 463 while (mfspr(SPRN_L1CSR1) & L1CSR1_ICFI) 464 ; 465 466 /* 467 * This will generally be accompanied by an instruction 468 * fetch error report -- only treat MCSR_IF as fatal 469 * if it wasn't due to an L1 parity error. 470 */ 471 reason &= ~MCSR_IF; 472 } 473 474 if (reason & MCSR_DCPERR_MC) { 475 printk("Data Cache Parity Error\n"); 476 477 /* 478 * In write shadow mode we auto-recover from the error, but it 479 * may still get logged and cause a machine check. We should 480 * only treat the non-write shadow case as non-recoverable. 481 */ 482 /* On e6500 core, L1 DCWS (Data cache write shadow mode) bit 483 * is not implemented but L1 data cache always runs in write 484 * shadow mode. Hence on data cache parity errors HW will 485 * automatically invalidate the L1 Data Cache. 486 */ 487 if (PVR_VER(pvr) != PVR_VER_E6500) { 488 if (!(mfspr(SPRN_L1CSR2) & L1CSR2_DCWS)) 489 recoverable = 0; 490 } 491 } 492 493 if (reason & MCSR_L2MMU_MHIT) { 494 printk("Hit on multiple TLB entries\n"); 495 recoverable = 0; 496 } 497 498 if (reason & MCSR_NMI) 499 printk("Non-maskable interrupt\n"); 500 501 if (reason & MCSR_IF) { 502 printk("Instruction Fetch Error Report\n"); 503 recoverable = 0; 504 } 505 506 if (reason & MCSR_LD) { 507 printk("Load Error Report\n"); 508 recoverable = 0; 509 } 510 511 if (reason & MCSR_ST) { 512 printk("Store Error Report\n"); 513 recoverable = 0; 514 } 515 516 if (reason & MCSR_LDG) { 517 printk("Guarded Load Error Report\n"); 518 recoverable = 0; 519 } 520 521 if (reason & MCSR_TLBSYNC) 522 printk("Simultaneous tlbsync operations\n"); 523 524 if (reason & MCSR_BSL2_ERR) { 525 printk("Level 2 Cache Error\n"); 526 recoverable = 0; 527 } 528 529 if (reason & MCSR_MAV) { 530 u64 addr; 531 532 addr = mfspr(SPRN_MCAR); 533 addr |= (u64)mfspr(SPRN_MCARU) << 32; 534 535 printk("Machine Check %s Address: %#llx\n", 536 reason & MCSR_MEA ? "Effective" : "Physical", addr); 537 } 538 539 silent_out: 540 mtspr(SPRN_MCSR, mcsr); 541 return mfspr(SPRN_MCSR) == 0 && recoverable; 542 } 543 544 int machine_check_e500(struct pt_regs *regs) 545 { 546 unsigned long reason = mfspr(SPRN_MCSR); 547 548 if (reason & MCSR_BUS_RBERR) { 549 if (fsl_rio_mcheck_exception(regs)) 550 return 1; 551 if (fsl_pci_mcheck_exception(regs)) 552 return 1; 553 } 554 555 printk("Machine check in kernel mode.\n"); 556 printk("Caused by (from MCSR=%lx): ", reason); 557 558 if (reason & MCSR_MCP) 559 printk("Machine Check Signal\n"); 560 if (reason & MCSR_ICPERR) 561 printk("Instruction Cache Parity Error\n"); 562 if (reason & MCSR_DCP_PERR) 563 printk("Data Cache Push Parity Error\n"); 564 if (reason & MCSR_DCPERR) 565 printk("Data Cache Parity Error\n"); 566 if (reason & MCSR_BUS_IAERR) 567 printk("Bus - Instruction Address Error\n"); 568 if (reason & MCSR_BUS_RAERR) 569 printk("Bus - Read Address Error\n"); 570 if (reason & MCSR_BUS_WAERR) 571 printk("Bus - Write Address Error\n"); 572 if (reason & MCSR_BUS_IBERR) 573 printk("Bus - Instruction Data Error\n"); 574 if (reason & MCSR_BUS_RBERR) 575 printk("Bus - Read Data Bus Error\n"); 576 if (reason & MCSR_BUS_WBERR) 577 printk("Bus - Write Data Bus Error\n"); 578 if (reason & MCSR_BUS_IPERR) 579 printk("Bus - Instruction Parity Error\n"); 580 if (reason & MCSR_BUS_RPERR) 581 printk("Bus - Read Parity Error\n"); 582 583 return 0; 584 } 585 586 int machine_check_generic(struct pt_regs *regs) 587 { 588 return 0; 589 } 590 #elif defined(CONFIG_E200) 591 int machine_check_e200(struct pt_regs *regs) 592 { 593 unsigned long reason = mfspr(SPRN_MCSR); 594 595 printk("Machine check in kernel mode.\n"); 596 printk("Caused by (from MCSR=%lx): ", reason); 597 598 if (reason & MCSR_MCP) 599 printk("Machine Check Signal\n"); 600 if (reason & MCSR_CP_PERR) 601 printk("Cache Push Parity Error\n"); 602 if (reason & MCSR_CPERR) 603 printk("Cache Parity Error\n"); 604 if (reason & MCSR_EXCP_ERR) 605 printk("ISI, ITLB, or Bus Error on first instruction fetch for an exception handler\n"); 606 if (reason & MCSR_BUS_IRERR) 607 printk("Bus - Read Bus Error on instruction fetch\n"); 608 if (reason & MCSR_BUS_DRERR) 609 printk("Bus - Read Bus Error on data load\n"); 610 if (reason & MCSR_BUS_WRERR) 611 printk("Bus - Write Bus Error on buffered store or cache line push\n"); 612 613 return 0; 614 } 615 #elif defined(CONFIG_PPC32) 616 int machine_check_generic(struct pt_regs *regs) 617 { 618 unsigned long reason = regs->msr; 619 620 printk("Machine check in kernel mode.\n"); 621 printk("Caused by (from SRR1=%lx): ", reason); 622 switch (reason & 0x601F0000) { 623 case 0x80000: 624 printk("Machine check signal\n"); 625 break; 626 case 0: /* for 601 */ 627 case 0x40000: 628 case 0x140000: /* 7450 MSS error and TEA */ 629 printk("Transfer error ack signal\n"); 630 break; 631 case 0x20000: 632 printk("Data parity error signal\n"); 633 break; 634 case 0x10000: 635 printk("Address parity error signal\n"); 636 break; 637 case 0x20000000: 638 printk("L1 Data Cache error\n"); 639 break; 640 case 0x40000000: 641 printk("L1 Instruction Cache error\n"); 642 break; 643 case 0x00100000: 644 printk("L2 data cache parity error\n"); 645 break; 646 default: 647 printk("Unknown values in msr\n"); 648 } 649 return 0; 650 } 651 #endif /* everything else */ 652 653 void machine_check_exception(struct pt_regs *regs) 654 { 655 int recover = 0; 656 bool nested = in_nmi(); 657 if (!nested) 658 nmi_enter(); 659 660 /* 64s accounts the mce in machine_check_early when in HVMODE */ 661 if (!IS_ENABLED(CONFIG_PPC_BOOK3S_64) || !cpu_has_feature(CPU_FTR_HVMODE)) 662 __this_cpu_inc(irq_stat.mce_exceptions); 663 664 add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE); 665 666 /* See if any machine dependent calls. In theory, we would want 667 * to call the CPU first, and call the ppc_md. one if the CPU 668 * one returns a positive number. However there is existing code 669 * that assumes the board gets a first chance, so let's keep it 670 * that way for now and fix things later. --BenH. 671 */ 672 if (ppc_md.machine_check_exception) 673 recover = ppc_md.machine_check_exception(regs); 674 else if (cur_cpu_spec->machine_check) 675 recover = cur_cpu_spec->machine_check(regs); 676 677 if (recover > 0) 678 goto bail; 679 680 if (debugger_fault_handler(regs)) 681 goto bail; 682 683 if (check_io_access(regs)) 684 goto bail; 685 686 die("Machine check", regs, SIGBUS); 687 688 /* Must die if the interrupt is not recoverable */ 689 if (!(regs->msr & MSR_RI)) 690 nmi_panic(regs, "Unrecoverable Machine check"); 691 692 bail: 693 if (!nested) 694 nmi_exit(); 695 } 696 697 void SMIException(struct pt_regs *regs) 698 { 699 die("System Management Interrupt", regs, SIGABRT); 700 } 701 702 void handle_hmi_exception(struct pt_regs *regs) 703 { 704 struct pt_regs *old_regs; 705 706 old_regs = set_irq_regs(regs); 707 irq_enter(); 708 709 if (ppc_md.handle_hmi_exception) 710 ppc_md.handle_hmi_exception(regs); 711 712 irq_exit(); 713 set_irq_regs(old_regs); 714 } 715 716 void unknown_exception(struct pt_regs *regs) 717 { 718 enum ctx_state prev_state = exception_enter(); 719 720 printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n", 721 regs->nip, regs->msr, regs->trap); 722 723 _exception(SIGTRAP, regs, 0, 0); 724 725 exception_exit(prev_state); 726 } 727 728 void instruction_breakpoint_exception(struct pt_regs *regs) 729 { 730 enum ctx_state prev_state = exception_enter(); 731 732 if (notify_die(DIE_IABR_MATCH, "iabr_match", regs, 5, 733 5, SIGTRAP) == NOTIFY_STOP) 734 goto bail; 735 if (debugger_iabr_match(regs)) 736 goto bail; 737 _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip); 738 739 bail: 740 exception_exit(prev_state); 741 } 742 743 void RunModeException(struct pt_regs *regs) 744 { 745 _exception(SIGTRAP, regs, 0, 0); 746 } 747 748 void single_step_exception(struct pt_regs *regs) 749 { 750 enum ctx_state prev_state = exception_enter(); 751 752 clear_single_step(regs); 753 754 if (kprobe_post_handler(regs)) 755 return; 756 757 if (notify_die(DIE_SSTEP, "single_step", regs, 5, 758 5, SIGTRAP) == NOTIFY_STOP) 759 goto bail; 760 if (debugger_sstep(regs)) 761 goto bail; 762 763 _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip); 764 765 bail: 766 exception_exit(prev_state); 767 } 768 NOKPROBE_SYMBOL(single_step_exception); 769 770 /* 771 * After we have successfully emulated an instruction, we have to 772 * check if the instruction was being single-stepped, and if so, 773 * pretend we got a single-step exception. This was pointed out 774 * by Kumar Gala. -- paulus 775 */ 776 static void emulate_single_step(struct pt_regs *regs) 777 { 778 if (single_stepping(regs)) 779 single_step_exception(regs); 780 } 781 782 static inline int __parse_fpscr(unsigned long fpscr) 783 { 784 int ret = 0; 785 786 /* Invalid operation */ 787 if ((fpscr & FPSCR_VE) && (fpscr & FPSCR_VX)) 788 ret = FPE_FLTINV; 789 790 /* Overflow */ 791 else if ((fpscr & FPSCR_OE) && (fpscr & FPSCR_OX)) 792 ret = FPE_FLTOVF; 793 794 /* Underflow */ 795 else if ((fpscr & FPSCR_UE) && (fpscr & FPSCR_UX)) 796 ret = FPE_FLTUND; 797 798 /* Divide by zero */ 799 else if ((fpscr & FPSCR_ZE) && (fpscr & FPSCR_ZX)) 800 ret = FPE_FLTDIV; 801 802 /* Inexact result */ 803 else if ((fpscr & FPSCR_XE) && (fpscr & FPSCR_XX)) 804 ret = FPE_FLTRES; 805 806 return ret; 807 } 808 809 static void parse_fpe(struct pt_regs *regs) 810 { 811 int code = 0; 812 813 flush_fp_to_thread(current); 814 815 code = __parse_fpscr(current->thread.fp_state.fpscr); 816 817 _exception(SIGFPE, regs, code, regs->nip); 818 } 819 820 /* 821 * Illegal instruction emulation support. Originally written to 822 * provide the PVR to user applications using the mfspr rd, PVR. 823 * Return non-zero if we can't emulate, or -EFAULT if the associated 824 * memory access caused an access fault. Return zero on success. 825 * 826 * There are a couple of ways to do this, either "decode" the instruction 827 * or directly match lots of bits. In this case, matching lots of 828 * bits is faster and easier. 829 * 830 */ 831 static int emulate_string_inst(struct pt_regs *regs, u32 instword) 832 { 833 u8 rT = (instword >> 21) & 0x1f; 834 u8 rA = (instword >> 16) & 0x1f; 835 u8 NB_RB = (instword >> 11) & 0x1f; 836 u32 num_bytes; 837 unsigned long EA; 838 int pos = 0; 839 840 /* Early out if we are an invalid form of lswx */ 841 if ((instword & PPC_INST_STRING_MASK) == PPC_INST_LSWX) 842 if ((rT == rA) || (rT == NB_RB)) 843 return -EINVAL; 844 845 EA = (rA == 0) ? 0 : regs->gpr[rA]; 846 847 switch (instword & PPC_INST_STRING_MASK) { 848 case PPC_INST_LSWX: 849 case PPC_INST_STSWX: 850 EA += NB_RB; 851 num_bytes = regs->xer & 0x7f; 852 break; 853 case PPC_INST_LSWI: 854 case PPC_INST_STSWI: 855 num_bytes = (NB_RB == 0) ? 32 : NB_RB; 856 break; 857 default: 858 return -EINVAL; 859 } 860 861 while (num_bytes != 0) 862 { 863 u8 val; 864 u32 shift = 8 * (3 - (pos & 0x3)); 865 866 /* if process is 32-bit, clear upper 32 bits of EA */ 867 if ((regs->msr & MSR_64BIT) == 0) 868 EA &= 0xFFFFFFFF; 869 870 switch ((instword & PPC_INST_STRING_MASK)) { 871 case PPC_INST_LSWX: 872 case PPC_INST_LSWI: 873 if (get_user(val, (u8 __user *)EA)) 874 return -EFAULT; 875 /* first time updating this reg, 876 * zero it out */ 877 if (pos == 0) 878 regs->gpr[rT] = 0; 879 regs->gpr[rT] |= val << shift; 880 break; 881 case PPC_INST_STSWI: 882 case PPC_INST_STSWX: 883 val = regs->gpr[rT] >> shift; 884 if (put_user(val, (u8 __user *)EA)) 885 return -EFAULT; 886 break; 887 } 888 /* move EA to next address */ 889 EA += 1; 890 num_bytes--; 891 892 /* manage our position within the register */ 893 if (++pos == 4) { 894 pos = 0; 895 if (++rT == 32) 896 rT = 0; 897 } 898 } 899 900 return 0; 901 } 902 903 static int emulate_popcntb_inst(struct pt_regs *regs, u32 instword) 904 { 905 u32 ra,rs; 906 unsigned long tmp; 907 908 ra = (instword >> 16) & 0x1f; 909 rs = (instword >> 21) & 0x1f; 910 911 tmp = regs->gpr[rs]; 912 tmp = tmp - ((tmp >> 1) & 0x5555555555555555ULL); 913 tmp = (tmp & 0x3333333333333333ULL) + ((tmp >> 2) & 0x3333333333333333ULL); 914 tmp = (tmp + (tmp >> 4)) & 0x0f0f0f0f0f0f0f0fULL; 915 regs->gpr[ra] = tmp; 916 917 return 0; 918 } 919 920 static int emulate_isel(struct pt_regs *regs, u32 instword) 921 { 922 u8 rT = (instword >> 21) & 0x1f; 923 u8 rA = (instword >> 16) & 0x1f; 924 u8 rB = (instword >> 11) & 0x1f; 925 u8 BC = (instword >> 6) & 0x1f; 926 u8 bit; 927 unsigned long tmp; 928 929 tmp = (rA == 0) ? 0 : regs->gpr[rA]; 930 bit = (regs->ccr >> (31 - BC)) & 0x1; 931 932 regs->gpr[rT] = bit ? tmp : regs->gpr[rB]; 933 934 return 0; 935 } 936 937 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 938 static inline bool tm_abort_check(struct pt_regs *regs, int cause) 939 { 940 /* If we're emulating a load/store in an active transaction, we cannot 941 * emulate it as the kernel operates in transaction suspended context. 942 * We need to abort the transaction. This creates a persistent TM 943 * abort so tell the user what caused it with a new code. 944 */ 945 if (MSR_TM_TRANSACTIONAL(regs->msr)) { 946 tm_enable(); 947 tm_abort(cause); 948 return true; 949 } 950 return false; 951 } 952 #else 953 static inline bool tm_abort_check(struct pt_regs *regs, int reason) 954 { 955 return false; 956 } 957 #endif 958 959 static int emulate_instruction(struct pt_regs *regs) 960 { 961 u32 instword; 962 u32 rd; 963 964 if (!user_mode(regs)) 965 return -EINVAL; 966 CHECK_FULL_REGS(regs); 967 968 if (get_user(instword, (u32 __user *)(regs->nip))) 969 return -EFAULT; 970 971 /* Emulate the mfspr rD, PVR. */ 972 if ((instword & PPC_INST_MFSPR_PVR_MASK) == PPC_INST_MFSPR_PVR) { 973 PPC_WARN_EMULATED(mfpvr, regs); 974 rd = (instword >> 21) & 0x1f; 975 regs->gpr[rd] = mfspr(SPRN_PVR); 976 return 0; 977 } 978 979 /* Emulating the dcba insn is just a no-op. */ 980 if ((instword & PPC_INST_DCBA_MASK) == PPC_INST_DCBA) { 981 PPC_WARN_EMULATED(dcba, regs); 982 return 0; 983 } 984 985 /* Emulate the mcrxr insn. */ 986 if ((instword & PPC_INST_MCRXR_MASK) == PPC_INST_MCRXR) { 987 int shift = (instword >> 21) & 0x1c; 988 unsigned long msk = 0xf0000000UL >> shift; 989 990 PPC_WARN_EMULATED(mcrxr, regs); 991 regs->ccr = (regs->ccr & ~msk) | ((regs->xer >> shift) & msk); 992 regs->xer &= ~0xf0000000UL; 993 return 0; 994 } 995 996 /* Emulate load/store string insn. */ 997 if ((instword & PPC_INST_STRING_GEN_MASK) == PPC_INST_STRING) { 998 if (tm_abort_check(regs, 999 TM_CAUSE_EMULATE | TM_CAUSE_PERSISTENT)) 1000 return -EINVAL; 1001 PPC_WARN_EMULATED(string, regs); 1002 return emulate_string_inst(regs, instword); 1003 } 1004 1005 /* Emulate the popcntb (Population Count Bytes) instruction. */ 1006 if ((instword & PPC_INST_POPCNTB_MASK) == PPC_INST_POPCNTB) { 1007 PPC_WARN_EMULATED(popcntb, regs); 1008 return emulate_popcntb_inst(regs, instword); 1009 } 1010 1011 /* Emulate isel (Integer Select) instruction */ 1012 if ((instword & PPC_INST_ISEL_MASK) == PPC_INST_ISEL) { 1013 PPC_WARN_EMULATED(isel, regs); 1014 return emulate_isel(regs, instword); 1015 } 1016 1017 /* Emulate sync instruction variants */ 1018 if ((instword & PPC_INST_SYNC_MASK) == PPC_INST_SYNC) { 1019 PPC_WARN_EMULATED(sync, regs); 1020 asm volatile("sync"); 1021 return 0; 1022 } 1023 1024 #ifdef CONFIG_PPC64 1025 /* Emulate the mfspr rD, DSCR. */ 1026 if ((((instword & PPC_INST_MFSPR_DSCR_USER_MASK) == 1027 PPC_INST_MFSPR_DSCR_USER) || 1028 ((instword & PPC_INST_MFSPR_DSCR_MASK) == 1029 PPC_INST_MFSPR_DSCR)) && 1030 cpu_has_feature(CPU_FTR_DSCR)) { 1031 PPC_WARN_EMULATED(mfdscr, regs); 1032 rd = (instword >> 21) & 0x1f; 1033 regs->gpr[rd] = mfspr(SPRN_DSCR); 1034 return 0; 1035 } 1036 /* Emulate the mtspr DSCR, rD. */ 1037 if ((((instword & PPC_INST_MTSPR_DSCR_USER_MASK) == 1038 PPC_INST_MTSPR_DSCR_USER) || 1039 ((instword & PPC_INST_MTSPR_DSCR_MASK) == 1040 PPC_INST_MTSPR_DSCR)) && 1041 cpu_has_feature(CPU_FTR_DSCR)) { 1042 PPC_WARN_EMULATED(mtdscr, regs); 1043 rd = (instword >> 21) & 0x1f; 1044 current->thread.dscr = regs->gpr[rd]; 1045 current->thread.dscr_inherit = 1; 1046 mtspr(SPRN_DSCR, current->thread.dscr); 1047 return 0; 1048 } 1049 #endif 1050 1051 return -EINVAL; 1052 } 1053 1054 int is_valid_bugaddr(unsigned long addr) 1055 { 1056 return is_kernel_addr(addr); 1057 } 1058 1059 #ifdef CONFIG_MATH_EMULATION 1060 static int emulate_math(struct pt_regs *regs) 1061 { 1062 int ret; 1063 extern int do_mathemu(struct pt_regs *regs); 1064 1065 ret = do_mathemu(regs); 1066 if (ret >= 0) 1067 PPC_WARN_EMULATED(math, regs); 1068 1069 switch (ret) { 1070 case 0: 1071 emulate_single_step(regs); 1072 return 0; 1073 case 1: { 1074 int code = 0; 1075 code = __parse_fpscr(current->thread.fp_state.fpscr); 1076 _exception(SIGFPE, regs, code, regs->nip); 1077 return 0; 1078 } 1079 case -EFAULT: 1080 _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip); 1081 return 0; 1082 } 1083 1084 return -1; 1085 } 1086 #else 1087 static inline int emulate_math(struct pt_regs *regs) { return -1; } 1088 #endif 1089 1090 void program_check_exception(struct pt_regs *regs) 1091 { 1092 enum ctx_state prev_state = exception_enter(); 1093 unsigned int reason = get_reason(regs); 1094 1095 /* We can now get here via a FP Unavailable exception if the core 1096 * has no FPU, in that case the reason flags will be 0 */ 1097 1098 if (reason & REASON_FP) { 1099 /* IEEE FP exception */ 1100 parse_fpe(regs); 1101 goto bail; 1102 } 1103 if (reason & REASON_TRAP) { 1104 unsigned long bugaddr; 1105 /* Debugger is first in line to stop recursive faults in 1106 * rcu_lock, notify_die, or atomic_notifier_call_chain */ 1107 if (debugger_bpt(regs)) 1108 goto bail; 1109 1110 if (kprobe_handler(regs)) 1111 goto bail; 1112 1113 /* trap exception */ 1114 if (notify_die(DIE_BPT, "breakpoint", regs, 5, 5, SIGTRAP) 1115 == NOTIFY_STOP) 1116 goto bail; 1117 1118 bugaddr = regs->nip; 1119 /* 1120 * Fixup bugaddr for BUG_ON() in real mode 1121 */ 1122 if (!is_kernel_addr(bugaddr) && !(regs->msr & MSR_IR)) 1123 bugaddr += PAGE_OFFSET; 1124 1125 if (!(regs->msr & MSR_PR) && /* not user-mode */ 1126 report_bug(bugaddr, regs) == BUG_TRAP_TYPE_WARN) { 1127 regs->nip += 4; 1128 goto bail; 1129 } 1130 _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip); 1131 goto bail; 1132 } 1133 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1134 if (reason & REASON_TM) { 1135 /* This is a TM "Bad Thing Exception" program check. 1136 * This occurs when: 1137 * - An rfid/hrfid/mtmsrd attempts to cause an illegal 1138 * transition in TM states. 1139 * - A trechkpt is attempted when transactional. 1140 * - A treclaim is attempted when non transactional. 1141 * - A tend is illegally attempted. 1142 * - writing a TM SPR when transactional. 1143 */ 1144 if (!user_mode(regs) && 1145 report_bug(regs->nip, regs) == BUG_TRAP_TYPE_WARN) { 1146 regs->nip += 4; 1147 goto bail; 1148 } 1149 /* If usermode caused this, it's done something illegal and 1150 * gets a SIGILL slap on the wrist. We call it an illegal 1151 * operand to distinguish from the instruction just being bad 1152 * (e.g. executing a 'tend' on a CPU without TM!); it's an 1153 * illegal /placement/ of a valid instruction. 1154 */ 1155 if (user_mode(regs)) { 1156 _exception(SIGILL, regs, ILL_ILLOPN, regs->nip); 1157 goto bail; 1158 } else { 1159 printk(KERN_EMERG "Unexpected TM Bad Thing exception " 1160 "at %lx (msr 0x%x)\n", regs->nip, reason); 1161 die("Unrecoverable exception", regs, SIGABRT); 1162 } 1163 } 1164 #endif 1165 1166 /* 1167 * If we took the program check in the kernel skip down to sending a 1168 * SIGILL. The subsequent cases all relate to emulating instructions 1169 * which we should only do for userspace. We also do not want to enable 1170 * interrupts for kernel faults because that might lead to further 1171 * faults, and loose the context of the original exception. 1172 */ 1173 if (!user_mode(regs)) 1174 goto sigill; 1175 1176 /* We restore the interrupt state now */ 1177 if (!arch_irq_disabled_regs(regs)) 1178 local_irq_enable(); 1179 1180 /* (reason & REASON_ILLEGAL) would be the obvious thing here, 1181 * but there seems to be a hardware bug on the 405GP (RevD) 1182 * that means ESR is sometimes set incorrectly - either to 1183 * ESR_DST (!?) or 0. In the process of chasing this with the 1184 * hardware people - not sure if it can happen on any illegal 1185 * instruction or only on FP instructions, whether there is a 1186 * pattern to occurrences etc. -dgibson 31/Mar/2003 1187 */ 1188 if (!emulate_math(regs)) 1189 goto bail; 1190 1191 /* Try to emulate it if we should. */ 1192 if (reason & (REASON_ILLEGAL | REASON_PRIVILEGED)) { 1193 switch (emulate_instruction(regs)) { 1194 case 0: 1195 regs->nip += 4; 1196 emulate_single_step(regs); 1197 goto bail; 1198 case -EFAULT: 1199 _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip); 1200 goto bail; 1201 } 1202 } 1203 1204 sigill: 1205 if (reason & REASON_PRIVILEGED) 1206 _exception(SIGILL, regs, ILL_PRVOPC, regs->nip); 1207 else 1208 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 1209 1210 bail: 1211 exception_exit(prev_state); 1212 } 1213 NOKPROBE_SYMBOL(program_check_exception); 1214 1215 /* 1216 * This occurs when running in hypervisor mode on POWER6 or later 1217 * and an illegal instruction is encountered. 1218 */ 1219 void emulation_assist_interrupt(struct pt_regs *regs) 1220 { 1221 regs->msr |= REASON_ILLEGAL; 1222 program_check_exception(regs); 1223 } 1224 NOKPROBE_SYMBOL(emulation_assist_interrupt); 1225 1226 void alignment_exception(struct pt_regs *regs) 1227 { 1228 enum ctx_state prev_state = exception_enter(); 1229 int sig, code, fixed = 0; 1230 1231 /* We restore the interrupt state now */ 1232 if (!arch_irq_disabled_regs(regs)) 1233 local_irq_enable(); 1234 1235 if (tm_abort_check(regs, TM_CAUSE_ALIGNMENT | TM_CAUSE_PERSISTENT)) 1236 goto bail; 1237 1238 /* we don't implement logging of alignment exceptions */ 1239 if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS)) 1240 fixed = fix_alignment(regs); 1241 1242 if (fixed == 1) { 1243 regs->nip += 4; /* skip over emulated instruction */ 1244 emulate_single_step(regs); 1245 goto bail; 1246 } 1247 1248 /* Operand address was bad */ 1249 if (fixed == -EFAULT) { 1250 sig = SIGSEGV; 1251 code = SEGV_ACCERR; 1252 } else { 1253 sig = SIGBUS; 1254 code = BUS_ADRALN; 1255 } 1256 if (user_mode(regs)) 1257 _exception(sig, regs, code, regs->dar); 1258 else 1259 bad_page_fault(regs, regs->dar, sig); 1260 1261 bail: 1262 exception_exit(prev_state); 1263 } 1264 1265 void slb_miss_bad_addr(struct pt_regs *regs) 1266 { 1267 enum ctx_state prev_state = exception_enter(); 1268 1269 if (user_mode(regs)) 1270 _exception(SIGSEGV, regs, SEGV_BNDERR, regs->dar); 1271 else 1272 bad_page_fault(regs, regs->dar, SIGSEGV); 1273 1274 exception_exit(prev_state); 1275 } 1276 1277 void StackOverflow(struct pt_regs *regs) 1278 { 1279 printk(KERN_CRIT "Kernel stack overflow in process %p, r1=%lx\n", 1280 current, regs->gpr[1]); 1281 debugger(regs); 1282 show_regs(regs); 1283 panic("kernel stack overflow"); 1284 } 1285 1286 void nonrecoverable_exception(struct pt_regs *regs) 1287 { 1288 printk(KERN_ERR "Non-recoverable exception at PC=%lx MSR=%lx\n", 1289 regs->nip, regs->msr); 1290 debugger(regs); 1291 die("nonrecoverable exception", regs, SIGKILL); 1292 } 1293 1294 void kernel_fp_unavailable_exception(struct pt_regs *regs) 1295 { 1296 enum ctx_state prev_state = exception_enter(); 1297 1298 printk(KERN_EMERG "Unrecoverable FP Unavailable Exception " 1299 "%lx at %lx\n", regs->trap, regs->nip); 1300 die("Unrecoverable FP Unavailable Exception", regs, SIGABRT); 1301 1302 exception_exit(prev_state); 1303 } 1304 1305 void altivec_unavailable_exception(struct pt_regs *regs) 1306 { 1307 enum ctx_state prev_state = exception_enter(); 1308 1309 if (user_mode(regs)) { 1310 /* A user program has executed an altivec instruction, 1311 but this kernel doesn't support altivec. */ 1312 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 1313 goto bail; 1314 } 1315 1316 printk(KERN_EMERG "Unrecoverable VMX/Altivec Unavailable Exception " 1317 "%lx at %lx\n", regs->trap, regs->nip); 1318 die("Unrecoverable VMX/Altivec Unavailable Exception", regs, SIGABRT); 1319 1320 bail: 1321 exception_exit(prev_state); 1322 } 1323 1324 void vsx_unavailable_exception(struct pt_regs *regs) 1325 { 1326 if (user_mode(regs)) { 1327 /* A user program has executed an vsx instruction, 1328 but this kernel doesn't support vsx. */ 1329 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 1330 return; 1331 } 1332 1333 printk(KERN_EMERG "Unrecoverable VSX Unavailable Exception " 1334 "%lx at %lx\n", regs->trap, regs->nip); 1335 die("Unrecoverable VSX Unavailable Exception", regs, SIGABRT); 1336 } 1337 1338 #ifdef CONFIG_PPC64 1339 static void tm_unavailable(struct pt_regs *regs) 1340 { 1341 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1342 if (user_mode(regs)) { 1343 current->thread.load_tm++; 1344 regs->msr |= MSR_TM; 1345 tm_enable(); 1346 tm_restore_sprs(¤t->thread); 1347 return; 1348 } 1349 #endif 1350 pr_emerg("Unrecoverable TM Unavailable Exception " 1351 "%lx at %lx\n", regs->trap, regs->nip); 1352 die("Unrecoverable TM Unavailable Exception", regs, SIGABRT); 1353 } 1354 1355 void facility_unavailable_exception(struct pt_regs *regs) 1356 { 1357 static char *facility_strings[] = { 1358 [FSCR_FP_LG] = "FPU", 1359 [FSCR_VECVSX_LG] = "VMX/VSX", 1360 [FSCR_DSCR_LG] = "DSCR", 1361 [FSCR_PM_LG] = "PMU SPRs", 1362 [FSCR_BHRB_LG] = "BHRB", 1363 [FSCR_TM_LG] = "TM", 1364 [FSCR_EBB_LG] = "EBB", 1365 [FSCR_TAR_LG] = "TAR", 1366 [FSCR_MSGP_LG] = "MSGP", 1367 [FSCR_SCV_LG] = "SCV", 1368 }; 1369 char *facility = "unknown"; 1370 u64 value; 1371 u32 instword, rd; 1372 u8 status; 1373 bool hv; 1374 1375 hv = (regs->trap == 0xf80); 1376 if (hv) 1377 value = mfspr(SPRN_HFSCR); 1378 else 1379 value = mfspr(SPRN_FSCR); 1380 1381 status = value >> 56; 1382 if (status == FSCR_DSCR_LG) { 1383 /* 1384 * User is accessing the DSCR register using the problem 1385 * state only SPR number (0x03) either through a mfspr or 1386 * a mtspr instruction. If it is a write attempt through 1387 * a mtspr, then we set the inherit bit. This also allows 1388 * the user to write or read the register directly in the 1389 * future by setting via the FSCR DSCR bit. But in case it 1390 * is a read DSCR attempt through a mfspr instruction, we 1391 * just emulate the instruction instead. This code path will 1392 * always emulate all the mfspr instructions till the user 1393 * has attempted at least one mtspr instruction. This way it 1394 * preserves the same behaviour when the user is accessing 1395 * the DSCR through privilege level only SPR number (0x11) 1396 * which is emulated through illegal instruction exception. 1397 * We always leave HFSCR DSCR set. 1398 */ 1399 if (get_user(instword, (u32 __user *)(regs->nip))) { 1400 pr_err("Failed to fetch the user instruction\n"); 1401 return; 1402 } 1403 1404 /* Write into DSCR (mtspr 0x03, RS) */ 1405 if ((instword & PPC_INST_MTSPR_DSCR_USER_MASK) 1406 == PPC_INST_MTSPR_DSCR_USER) { 1407 rd = (instword >> 21) & 0x1f; 1408 current->thread.dscr = regs->gpr[rd]; 1409 current->thread.dscr_inherit = 1; 1410 current->thread.fscr |= FSCR_DSCR; 1411 mtspr(SPRN_FSCR, current->thread.fscr); 1412 } 1413 1414 /* Read from DSCR (mfspr RT, 0x03) */ 1415 if ((instword & PPC_INST_MFSPR_DSCR_USER_MASK) 1416 == PPC_INST_MFSPR_DSCR_USER) { 1417 if (emulate_instruction(regs)) { 1418 pr_err("DSCR based mfspr emulation failed\n"); 1419 return; 1420 } 1421 regs->nip += 4; 1422 emulate_single_step(regs); 1423 } 1424 return; 1425 } 1426 1427 if (status == FSCR_TM_LG) { 1428 /* 1429 * If we're here then the hardware is TM aware because it 1430 * generated an exception with FSRM_TM set. 1431 * 1432 * If cpu_has_feature(CPU_FTR_TM) is false, then either firmware 1433 * told us not to do TM, or the kernel is not built with TM 1434 * support. 1435 * 1436 * If both of those things are true, then userspace can spam the 1437 * console by triggering the printk() below just by continually 1438 * doing tbegin (or any TM instruction). So in that case just 1439 * send the process a SIGILL immediately. 1440 */ 1441 if (!cpu_has_feature(CPU_FTR_TM)) 1442 goto out; 1443 1444 tm_unavailable(regs); 1445 return; 1446 } 1447 1448 if ((hv || status >= 2) && 1449 (status < ARRAY_SIZE(facility_strings)) && 1450 facility_strings[status]) 1451 facility = facility_strings[status]; 1452 1453 /* We restore the interrupt state now */ 1454 if (!arch_irq_disabled_regs(regs)) 1455 local_irq_enable(); 1456 1457 pr_err_ratelimited("%sFacility '%s' unavailable (%d), exception at 0x%lx, MSR=%lx\n", 1458 hv ? "Hypervisor " : "", facility, status, regs->nip, regs->msr); 1459 1460 out: 1461 if (user_mode(regs)) { 1462 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 1463 return; 1464 } 1465 1466 die("Unexpected facility unavailable exception", regs, SIGABRT); 1467 } 1468 #endif 1469 1470 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1471 1472 void fp_unavailable_tm(struct pt_regs *regs) 1473 { 1474 /* Note: This does not handle any kind of FP laziness. */ 1475 1476 TM_DEBUG("FP Unavailable trap whilst transactional at 0x%lx, MSR=%lx\n", 1477 regs->nip, regs->msr); 1478 1479 /* We can only have got here if the task started using FP after 1480 * beginning the transaction. So, the transactional regs are just a 1481 * copy of the checkpointed ones. But, we still need to recheckpoint 1482 * as we're enabling FP for the process; it will return, abort the 1483 * transaction, and probably retry but now with FP enabled. So the 1484 * checkpointed FP registers need to be loaded. 1485 */ 1486 tm_reclaim_current(TM_CAUSE_FAC_UNAV); 1487 /* Reclaim didn't save out any FPRs to transact_fprs. */ 1488 1489 /* Enable FP for the task: */ 1490 regs->msr |= (MSR_FP | current->thread.fpexc_mode); 1491 1492 /* This loads and recheckpoints the FP registers from 1493 * thread.fpr[]. They will remain in registers after the 1494 * checkpoint so we don't need to reload them after. 1495 * If VMX is in use, the VRs now hold checkpointed values, 1496 * so we don't want to load the VRs from the thread_struct. 1497 */ 1498 tm_recheckpoint(¤t->thread, MSR_FP); 1499 1500 /* If VMX is in use, get the transactional values back */ 1501 if (regs->msr & MSR_VEC) { 1502 msr_check_and_set(MSR_VEC); 1503 load_vr_state(¤t->thread.vr_state); 1504 /* At this point all the VSX state is loaded, so enable it */ 1505 regs->msr |= MSR_VSX; 1506 } 1507 } 1508 1509 void altivec_unavailable_tm(struct pt_regs *regs) 1510 { 1511 /* See the comments in fp_unavailable_tm(). This function operates 1512 * the same way. 1513 */ 1514 1515 TM_DEBUG("Vector Unavailable trap whilst transactional at 0x%lx," 1516 "MSR=%lx\n", 1517 regs->nip, regs->msr); 1518 tm_reclaim_current(TM_CAUSE_FAC_UNAV); 1519 regs->msr |= MSR_VEC; 1520 tm_recheckpoint(¤t->thread, MSR_VEC); 1521 current->thread.used_vr = 1; 1522 1523 if (regs->msr & MSR_FP) { 1524 msr_check_and_set(MSR_FP); 1525 load_fp_state(¤t->thread.fp_state); 1526 regs->msr |= MSR_VSX; 1527 } 1528 } 1529 1530 void vsx_unavailable_tm(struct pt_regs *regs) 1531 { 1532 unsigned long orig_msr = regs->msr; 1533 1534 /* See the comments in fp_unavailable_tm(). This works similarly, 1535 * though we're loading both FP and VEC registers in here. 1536 * 1537 * If FP isn't in use, load FP regs. If VEC isn't in use, load VEC 1538 * regs. Either way, set MSR_VSX. 1539 */ 1540 1541 TM_DEBUG("VSX Unavailable trap whilst transactional at 0x%lx," 1542 "MSR=%lx\n", 1543 regs->nip, regs->msr); 1544 1545 current->thread.used_vsr = 1; 1546 1547 /* If FP and VMX are already loaded, we have all the state we need */ 1548 if ((orig_msr & (MSR_FP | MSR_VEC)) == (MSR_FP | MSR_VEC)) { 1549 regs->msr |= MSR_VSX; 1550 return; 1551 } 1552 1553 /* This reclaims FP and/or VR regs if they're already enabled */ 1554 tm_reclaim_current(TM_CAUSE_FAC_UNAV); 1555 1556 regs->msr |= MSR_VEC | MSR_FP | current->thread.fpexc_mode | 1557 MSR_VSX; 1558 1559 /* This loads & recheckpoints FP and VRs; but we have 1560 * to be sure not to overwrite previously-valid state. 1561 */ 1562 tm_recheckpoint(¤t->thread, regs->msr & ~orig_msr); 1563 1564 msr_check_and_set(orig_msr & (MSR_FP | MSR_VEC)); 1565 1566 if (orig_msr & MSR_FP) 1567 load_fp_state(¤t->thread.fp_state); 1568 if (orig_msr & MSR_VEC) 1569 load_vr_state(¤t->thread.vr_state); 1570 } 1571 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ 1572 1573 void performance_monitor_exception(struct pt_regs *regs) 1574 { 1575 __this_cpu_inc(irq_stat.pmu_irqs); 1576 1577 perf_irq(regs); 1578 } 1579 1580 #ifdef CONFIG_PPC_ADV_DEBUG_REGS 1581 static void handle_debug(struct pt_regs *regs, unsigned long debug_status) 1582 { 1583 int changed = 0; 1584 /* 1585 * Determine the cause of the debug event, clear the 1586 * event flags and send a trap to the handler. Torez 1587 */ 1588 if (debug_status & (DBSR_DAC1R | DBSR_DAC1W)) { 1589 dbcr_dac(current) &= ~(DBCR_DAC1R | DBCR_DAC1W); 1590 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE 1591 current->thread.debug.dbcr2 &= ~DBCR2_DAC12MODE; 1592 #endif 1593 do_send_trap(regs, mfspr(SPRN_DAC1), debug_status, TRAP_HWBKPT, 1594 5); 1595 changed |= 0x01; 1596 } else if (debug_status & (DBSR_DAC2R | DBSR_DAC2W)) { 1597 dbcr_dac(current) &= ~(DBCR_DAC2R | DBCR_DAC2W); 1598 do_send_trap(regs, mfspr(SPRN_DAC2), debug_status, TRAP_HWBKPT, 1599 6); 1600 changed |= 0x01; 1601 } else if (debug_status & DBSR_IAC1) { 1602 current->thread.debug.dbcr0 &= ~DBCR0_IAC1; 1603 dbcr_iac_range(current) &= ~DBCR_IAC12MODE; 1604 do_send_trap(regs, mfspr(SPRN_IAC1), debug_status, TRAP_HWBKPT, 1605 1); 1606 changed |= 0x01; 1607 } else if (debug_status & DBSR_IAC2) { 1608 current->thread.debug.dbcr0 &= ~DBCR0_IAC2; 1609 do_send_trap(regs, mfspr(SPRN_IAC2), debug_status, TRAP_HWBKPT, 1610 2); 1611 changed |= 0x01; 1612 } else if (debug_status & DBSR_IAC3) { 1613 current->thread.debug.dbcr0 &= ~DBCR0_IAC3; 1614 dbcr_iac_range(current) &= ~DBCR_IAC34MODE; 1615 do_send_trap(regs, mfspr(SPRN_IAC3), debug_status, TRAP_HWBKPT, 1616 3); 1617 changed |= 0x01; 1618 } else if (debug_status & DBSR_IAC4) { 1619 current->thread.debug.dbcr0 &= ~DBCR0_IAC4; 1620 do_send_trap(regs, mfspr(SPRN_IAC4), debug_status, TRAP_HWBKPT, 1621 4); 1622 changed |= 0x01; 1623 } 1624 /* 1625 * At the point this routine was called, the MSR(DE) was turned off. 1626 * Check all other debug flags and see if that bit needs to be turned 1627 * back on or not. 1628 */ 1629 if (DBCR_ACTIVE_EVENTS(current->thread.debug.dbcr0, 1630 current->thread.debug.dbcr1)) 1631 regs->msr |= MSR_DE; 1632 else 1633 /* Make sure the IDM flag is off */ 1634 current->thread.debug.dbcr0 &= ~DBCR0_IDM; 1635 1636 if (changed & 0x01) 1637 mtspr(SPRN_DBCR0, current->thread.debug.dbcr0); 1638 } 1639 1640 void DebugException(struct pt_regs *regs, unsigned long debug_status) 1641 { 1642 current->thread.debug.dbsr = debug_status; 1643 1644 /* Hack alert: On BookE, Branch Taken stops on the branch itself, while 1645 * on server, it stops on the target of the branch. In order to simulate 1646 * the server behaviour, we thus restart right away with a single step 1647 * instead of stopping here when hitting a BT 1648 */ 1649 if (debug_status & DBSR_BT) { 1650 regs->msr &= ~MSR_DE; 1651 1652 /* Disable BT */ 1653 mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_BT); 1654 /* Clear the BT event */ 1655 mtspr(SPRN_DBSR, DBSR_BT); 1656 1657 /* Do the single step trick only when coming from userspace */ 1658 if (user_mode(regs)) { 1659 current->thread.debug.dbcr0 &= ~DBCR0_BT; 1660 current->thread.debug.dbcr0 |= DBCR0_IDM | DBCR0_IC; 1661 regs->msr |= MSR_DE; 1662 return; 1663 } 1664 1665 if (kprobe_post_handler(regs)) 1666 return; 1667 1668 if (notify_die(DIE_SSTEP, "block_step", regs, 5, 1669 5, SIGTRAP) == NOTIFY_STOP) { 1670 return; 1671 } 1672 if (debugger_sstep(regs)) 1673 return; 1674 } else if (debug_status & DBSR_IC) { /* Instruction complete */ 1675 regs->msr &= ~MSR_DE; 1676 1677 /* Disable instruction completion */ 1678 mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_IC); 1679 /* Clear the instruction completion event */ 1680 mtspr(SPRN_DBSR, DBSR_IC); 1681 1682 if (kprobe_post_handler(regs)) 1683 return; 1684 1685 if (notify_die(DIE_SSTEP, "single_step", regs, 5, 1686 5, SIGTRAP) == NOTIFY_STOP) { 1687 return; 1688 } 1689 1690 if (debugger_sstep(regs)) 1691 return; 1692 1693 if (user_mode(regs)) { 1694 current->thread.debug.dbcr0 &= ~DBCR0_IC; 1695 if (DBCR_ACTIVE_EVENTS(current->thread.debug.dbcr0, 1696 current->thread.debug.dbcr1)) 1697 regs->msr |= MSR_DE; 1698 else 1699 /* Make sure the IDM bit is off */ 1700 current->thread.debug.dbcr0 &= ~DBCR0_IDM; 1701 } 1702 1703 _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip); 1704 } else 1705 handle_debug(regs, debug_status); 1706 } 1707 NOKPROBE_SYMBOL(DebugException); 1708 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */ 1709 1710 #if !defined(CONFIG_TAU_INT) 1711 void TAUException(struct pt_regs *regs) 1712 { 1713 printk("TAU trap at PC: %lx, MSR: %lx, vector=%lx %s\n", 1714 regs->nip, regs->msr, regs->trap, print_tainted()); 1715 } 1716 #endif /* CONFIG_INT_TAU */ 1717 1718 #ifdef CONFIG_ALTIVEC 1719 void altivec_assist_exception(struct pt_regs *regs) 1720 { 1721 int err; 1722 1723 if (!user_mode(regs)) { 1724 printk(KERN_EMERG "VMX/Altivec assist exception in kernel mode" 1725 " at %lx\n", regs->nip); 1726 die("Kernel VMX/Altivec assist exception", regs, SIGILL); 1727 } 1728 1729 flush_altivec_to_thread(current); 1730 1731 PPC_WARN_EMULATED(altivec, regs); 1732 err = emulate_altivec(regs); 1733 if (err == 0) { 1734 regs->nip += 4; /* skip emulated instruction */ 1735 emulate_single_step(regs); 1736 return; 1737 } 1738 1739 if (err == -EFAULT) { 1740 /* got an error reading the instruction */ 1741 _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip); 1742 } else { 1743 /* didn't recognize the instruction */ 1744 /* XXX quick hack for now: set the non-Java bit in the VSCR */ 1745 printk_ratelimited(KERN_ERR "Unrecognized altivec instruction " 1746 "in %s at %lx\n", current->comm, regs->nip); 1747 current->thread.vr_state.vscr.u[3] |= 0x10000; 1748 } 1749 } 1750 #endif /* CONFIG_ALTIVEC */ 1751 1752 #ifdef CONFIG_FSL_BOOKE 1753 void CacheLockingException(struct pt_regs *regs, unsigned long address, 1754 unsigned long error_code) 1755 { 1756 /* We treat cache locking instructions from the user 1757 * as priv ops, in the future we could try to do 1758 * something smarter 1759 */ 1760 if (error_code & (ESR_DLK|ESR_ILK)) 1761 _exception(SIGILL, regs, ILL_PRVOPC, regs->nip); 1762 return; 1763 } 1764 #endif /* CONFIG_FSL_BOOKE */ 1765 1766 #ifdef CONFIG_SPE 1767 void SPEFloatingPointException(struct pt_regs *regs) 1768 { 1769 extern int do_spe_mathemu(struct pt_regs *regs); 1770 unsigned long spefscr; 1771 int fpexc_mode; 1772 int code = 0; 1773 int err; 1774 1775 flush_spe_to_thread(current); 1776 1777 spefscr = current->thread.spefscr; 1778 fpexc_mode = current->thread.fpexc_mode; 1779 1780 if ((spefscr & SPEFSCR_FOVF) && (fpexc_mode & PR_FP_EXC_OVF)) { 1781 code = FPE_FLTOVF; 1782 } 1783 else if ((spefscr & SPEFSCR_FUNF) && (fpexc_mode & PR_FP_EXC_UND)) { 1784 code = FPE_FLTUND; 1785 } 1786 else if ((spefscr & SPEFSCR_FDBZ) && (fpexc_mode & PR_FP_EXC_DIV)) 1787 code = FPE_FLTDIV; 1788 else if ((spefscr & SPEFSCR_FINV) && (fpexc_mode & PR_FP_EXC_INV)) { 1789 code = FPE_FLTINV; 1790 } 1791 else if ((spefscr & (SPEFSCR_FG | SPEFSCR_FX)) && (fpexc_mode & PR_FP_EXC_RES)) 1792 code = FPE_FLTRES; 1793 1794 err = do_spe_mathemu(regs); 1795 if (err == 0) { 1796 regs->nip += 4; /* skip emulated instruction */ 1797 emulate_single_step(regs); 1798 return; 1799 } 1800 1801 if (err == -EFAULT) { 1802 /* got an error reading the instruction */ 1803 _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip); 1804 } else if (err == -EINVAL) { 1805 /* didn't recognize the instruction */ 1806 printk(KERN_ERR "unrecognized spe instruction " 1807 "in %s at %lx\n", current->comm, regs->nip); 1808 } else { 1809 _exception(SIGFPE, regs, code, regs->nip); 1810 } 1811 1812 return; 1813 } 1814 1815 void SPEFloatingPointRoundException(struct pt_regs *regs) 1816 { 1817 extern int speround_handler(struct pt_regs *regs); 1818 int err; 1819 1820 preempt_disable(); 1821 if (regs->msr & MSR_SPE) 1822 giveup_spe(current); 1823 preempt_enable(); 1824 1825 regs->nip -= 4; 1826 err = speround_handler(regs); 1827 if (err == 0) { 1828 regs->nip += 4; /* skip emulated instruction */ 1829 emulate_single_step(regs); 1830 return; 1831 } 1832 1833 if (err == -EFAULT) { 1834 /* got an error reading the instruction */ 1835 _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip); 1836 } else if (err == -EINVAL) { 1837 /* didn't recognize the instruction */ 1838 printk(KERN_ERR "unrecognized spe instruction " 1839 "in %s at %lx\n", current->comm, regs->nip); 1840 } else { 1841 _exception(SIGFPE, regs, 0, regs->nip); 1842 return; 1843 } 1844 } 1845 #endif 1846 1847 /* 1848 * We enter here if we get an unrecoverable exception, that is, one 1849 * that happened at a point where the RI (recoverable interrupt) bit 1850 * in the MSR is 0. This indicates that SRR0/1 are live, and that 1851 * we therefore lost state by taking this exception. 1852 */ 1853 void unrecoverable_exception(struct pt_regs *regs) 1854 { 1855 printk(KERN_EMERG "Unrecoverable exception %lx at %lx\n", 1856 regs->trap, regs->nip); 1857 die("Unrecoverable exception", regs, SIGABRT); 1858 } 1859 NOKPROBE_SYMBOL(unrecoverable_exception); 1860 1861 #if defined(CONFIG_BOOKE_WDT) || defined(CONFIG_40x) 1862 /* 1863 * Default handler for a Watchdog exception, 1864 * spins until a reboot occurs 1865 */ 1866 void __attribute__ ((weak)) WatchdogHandler(struct pt_regs *regs) 1867 { 1868 /* Generic WatchdogHandler, implement your own */ 1869 mtspr(SPRN_TCR, mfspr(SPRN_TCR)&(~TCR_WIE)); 1870 return; 1871 } 1872 1873 void WatchdogException(struct pt_regs *regs) 1874 { 1875 printk (KERN_EMERG "PowerPC Book-E Watchdog Exception\n"); 1876 WatchdogHandler(regs); 1877 } 1878 #endif 1879 1880 /* 1881 * We enter here if we discover during exception entry that we are 1882 * running in supervisor mode with a userspace value in the stack pointer. 1883 */ 1884 void kernel_bad_stack(struct pt_regs *regs) 1885 { 1886 printk(KERN_EMERG "Bad kernel stack pointer %lx at %lx\n", 1887 regs->gpr[1], regs->nip); 1888 die("Bad kernel stack pointer", regs, SIGABRT); 1889 } 1890 NOKPROBE_SYMBOL(kernel_bad_stack); 1891 1892 void __init trap_init(void) 1893 { 1894 } 1895 1896 1897 #ifdef CONFIG_PPC_EMULATED_STATS 1898 1899 #define WARN_EMULATED_SETUP(type) .type = { .name = #type } 1900 1901 struct ppc_emulated ppc_emulated = { 1902 #ifdef CONFIG_ALTIVEC 1903 WARN_EMULATED_SETUP(altivec), 1904 #endif 1905 WARN_EMULATED_SETUP(dcba), 1906 WARN_EMULATED_SETUP(dcbz), 1907 WARN_EMULATED_SETUP(fp_pair), 1908 WARN_EMULATED_SETUP(isel), 1909 WARN_EMULATED_SETUP(mcrxr), 1910 WARN_EMULATED_SETUP(mfpvr), 1911 WARN_EMULATED_SETUP(multiple), 1912 WARN_EMULATED_SETUP(popcntb), 1913 WARN_EMULATED_SETUP(spe), 1914 WARN_EMULATED_SETUP(string), 1915 WARN_EMULATED_SETUP(sync), 1916 WARN_EMULATED_SETUP(unaligned), 1917 #ifdef CONFIG_MATH_EMULATION 1918 WARN_EMULATED_SETUP(math), 1919 #endif 1920 #ifdef CONFIG_VSX 1921 WARN_EMULATED_SETUP(vsx), 1922 #endif 1923 #ifdef CONFIG_PPC64 1924 WARN_EMULATED_SETUP(mfdscr), 1925 WARN_EMULATED_SETUP(mtdscr), 1926 WARN_EMULATED_SETUP(lq_stq), 1927 #endif 1928 }; 1929 1930 u32 ppc_warn_emulated; 1931 1932 void ppc_warn_emulated_print(const char *type) 1933 { 1934 pr_warn_ratelimited("%s used emulated %s instruction\n", current->comm, 1935 type); 1936 } 1937 1938 static int __init ppc_warn_emulated_init(void) 1939 { 1940 struct dentry *dir, *d; 1941 unsigned int i; 1942 struct ppc_emulated_entry *entries = (void *)&ppc_emulated; 1943 1944 if (!powerpc_debugfs_root) 1945 return -ENODEV; 1946 1947 dir = debugfs_create_dir("emulated_instructions", 1948 powerpc_debugfs_root); 1949 if (!dir) 1950 return -ENOMEM; 1951 1952 d = debugfs_create_u32("do_warn", S_IRUGO | S_IWUSR, dir, 1953 &ppc_warn_emulated); 1954 if (!d) 1955 goto fail; 1956 1957 for (i = 0; i < sizeof(ppc_emulated)/sizeof(*entries); i++) { 1958 d = debugfs_create_u32(entries[i].name, S_IRUGO | S_IWUSR, dir, 1959 (u32 *)&entries[i].val.counter); 1960 if (!d) 1961 goto fail; 1962 } 1963 1964 return 0; 1965 1966 fail: 1967 debugfs_remove_recursive(dir); 1968 return -ENOMEM; 1969 } 1970 1971 device_initcall(ppc_warn_emulated_init); 1972 1973 #endif /* CONFIG_PPC_EMULATED_STATS */ 1974