1 /* 2 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 3 * Copyright 2007-2010 Freescale Semiconductor, Inc. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License 7 * as published by the Free Software Foundation; either version 8 * 2 of the License, or (at your option) any later version. 9 * 10 * Modified by Cort Dougan (cort@cs.nmt.edu) 11 * and Paul Mackerras (paulus@samba.org) 12 */ 13 14 /* 15 * This file handles the architecture-dependent parts of hardware exceptions 16 */ 17 18 #include <linux/errno.h> 19 #include <linux/sched.h> 20 #include <linux/kernel.h> 21 #include <linux/mm.h> 22 #include <linux/stddef.h> 23 #include <linux/unistd.h> 24 #include <linux/ptrace.h> 25 #include <linux/user.h> 26 #include <linux/interrupt.h> 27 #include <linux/init.h> 28 #include <linux/module.h> 29 #include <linux/prctl.h> 30 #include <linux/delay.h> 31 #include <linux/kprobes.h> 32 #include <linux/kexec.h> 33 #include <linux/backlight.h> 34 #include <linux/bug.h> 35 #include <linux/kdebug.h> 36 #include <linux/debugfs.h> 37 #include <linux/ratelimit.h> 38 #include <linux/context_tracking.h> 39 40 #include <asm/emulated_ops.h> 41 #include <asm/pgtable.h> 42 #include <asm/uaccess.h> 43 #include <asm/io.h> 44 #include <asm/machdep.h> 45 #include <asm/rtas.h> 46 #include <asm/pmc.h> 47 #include <asm/reg.h> 48 #ifdef CONFIG_PMAC_BACKLIGHT 49 #include <asm/backlight.h> 50 #endif 51 #ifdef CONFIG_PPC64 52 #include <asm/firmware.h> 53 #include <asm/processor.h> 54 #include <asm/tm.h> 55 #endif 56 #include <asm/kexec.h> 57 #include <asm/ppc-opcode.h> 58 #include <asm/rio.h> 59 #include <asm/fadump.h> 60 #include <asm/switch_to.h> 61 #include <asm/tm.h> 62 #include <asm/debug.h> 63 #include <sysdev/fsl_pci.h> 64 65 #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC) 66 int (*__debugger)(struct pt_regs *regs) __read_mostly; 67 int (*__debugger_ipi)(struct pt_regs *regs) __read_mostly; 68 int (*__debugger_bpt)(struct pt_regs *regs) __read_mostly; 69 int (*__debugger_sstep)(struct pt_regs *regs) __read_mostly; 70 int (*__debugger_iabr_match)(struct pt_regs *regs) __read_mostly; 71 int (*__debugger_break_match)(struct pt_regs *regs) __read_mostly; 72 int (*__debugger_fault_handler)(struct pt_regs *regs) __read_mostly; 73 74 EXPORT_SYMBOL(__debugger); 75 EXPORT_SYMBOL(__debugger_ipi); 76 EXPORT_SYMBOL(__debugger_bpt); 77 EXPORT_SYMBOL(__debugger_sstep); 78 EXPORT_SYMBOL(__debugger_iabr_match); 79 EXPORT_SYMBOL(__debugger_break_match); 80 EXPORT_SYMBOL(__debugger_fault_handler); 81 #endif 82 83 /* Transactional Memory trap debug */ 84 #ifdef TM_DEBUG_SW 85 #define TM_DEBUG(x...) printk(KERN_INFO x) 86 #else 87 #define TM_DEBUG(x...) do { } while(0) 88 #endif 89 90 /* 91 * Trap & Exception support 92 */ 93 94 #ifdef CONFIG_PMAC_BACKLIGHT 95 static void pmac_backlight_unblank(void) 96 { 97 mutex_lock(&pmac_backlight_mutex); 98 if (pmac_backlight) { 99 struct backlight_properties *props; 100 101 props = &pmac_backlight->props; 102 props->brightness = props->max_brightness; 103 props->power = FB_BLANK_UNBLANK; 104 backlight_update_status(pmac_backlight); 105 } 106 mutex_unlock(&pmac_backlight_mutex); 107 } 108 #else 109 static inline void pmac_backlight_unblank(void) { } 110 #endif 111 112 static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED; 113 static int die_owner = -1; 114 static unsigned int die_nest_count; 115 static int die_counter; 116 117 static unsigned __kprobes long oops_begin(struct pt_regs *regs) 118 { 119 int cpu; 120 unsigned long flags; 121 122 if (debugger(regs)) 123 return 1; 124 125 oops_enter(); 126 127 /* racy, but better than risking deadlock. */ 128 raw_local_irq_save(flags); 129 cpu = smp_processor_id(); 130 if (!arch_spin_trylock(&die_lock)) { 131 if (cpu == die_owner) 132 /* nested oops. should stop eventually */; 133 else 134 arch_spin_lock(&die_lock); 135 } 136 die_nest_count++; 137 die_owner = cpu; 138 console_verbose(); 139 bust_spinlocks(1); 140 if (machine_is(powermac)) 141 pmac_backlight_unblank(); 142 return flags; 143 } 144 145 static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, 146 int signr) 147 { 148 bust_spinlocks(0); 149 die_owner = -1; 150 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); 151 die_nest_count--; 152 oops_exit(); 153 printk("\n"); 154 if (!die_nest_count) 155 /* Nest count reaches zero, release the lock. */ 156 arch_spin_unlock(&die_lock); 157 raw_local_irq_restore(flags); 158 159 crash_fadump(regs, "die oops"); 160 161 /* 162 * A system reset (0x100) is a request to dump, so we always send 163 * it through the crashdump code. 164 */ 165 if (kexec_should_crash(current) || (TRAP(regs) == 0x100)) { 166 crash_kexec(regs); 167 168 /* 169 * We aren't the primary crash CPU. We need to send it 170 * to a holding pattern to avoid it ending up in the panic 171 * code. 172 */ 173 crash_kexec_secondary(regs); 174 } 175 176 if (!signr) 177 return; 178 179 /* 180 * While our oops output is serialised by a spinlock, output 181 * from panic() called below can race and corrupt it. If we 182 * know we are going to panic, delay for 1 second so we have a 183 * chance to get clean backtraces from all CPUs that are oopsing. 184 */ 185 if (in_interrupt() || panic_on_oops || !current->pid || 186 is_global_init(current)) { 187 mdelay(MSEC_PER_SEC); 188 } 189 190 if (in_interrupt()) 191 panic("Fatal exception in interrupt"); 192 if (panic_on_oops) 193 panic("Fatal exception"); 194 do_exit(signr); 195 } 196 197 static int __kprobes __die(const char *str, struct pt_regs *regs, long err) 198 { 199 printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter); 200 #ifdef CONFIG_PREEMPT 201 printk("PREEMPT "); 202 #endif 203 #ifdef CONFIG_SMP 204 printk("SMP NR_CPUS=%d ", NR_CPUS); 205 #endif 206 if (debug_pagealloc_enabled()) 207 printk("DEBUG_PAGEALLOC "); 208 #ifdef CONFIG_NUMA 209 printk("NUMA "); 210 #endif 211 printk("%s\n", ppc_md.name ? ppc_md.name : ""); 212 213 if (notify_die(DIE_OOPS, str, regs, err, 255, SIGSEGV) == NOTIFY_STOP) 214 return 1; 215 216 print_modules(); 217 show_regs(regs); 218 219 return 0; 220 } 221 222 void die(const char *str, struct pt_regs *regs, long err) 223 { 224 unsigned long flags = oops_begin(regs); 225 226 if (__die(str, regs, err)) 227 err = 0; 228 oops_end(flags, regs, err); 229 } 230 231 void user_single_step_siginfo(struct task_struct *tsk, 232 struct pt_regs *regs, siginfo_t *info) 233 { 234 memset(info, 0, sizeof(*info)); 235 info->si_signo = SIGTRAP; 236 info->si_code = TRAP_TRACE; 237 info->si_addr = (void __user *)regs->nip; 238 } 239 240 void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr) 241 { 242 siginfo_t info; 243 const char fmt32[] = KERN_INFO "%s[%d]: unhandled signal %d " \ 244 "at %08lx nip %08lx lr %08lx code %x\n"; 245 const char fmt64[] = KERN_INFO "%s[%d]: unhandled signal %d " \ 246 "at %016lx nip %016lx lr %016lx code %x\n"; 247 248 if (!user_mode(regs)) { 249 die("Exception in kernel mode", regs, signr); 250 return; 251 } 252 253 if (show_unhandled_signals && unhandled_signal(current, signr)) { 254 printk_ratelimited(regs->msr & MSR_64BIT ? fmt64 : fmt32, 255 current->comm, current->pid, signr, 256 addr, regs->nip, regs->link, code); 257 } 258 259 if (arch_irqs_disabled() && !arch_irq_disabled_regs(regs)) 260 local_irq_enable(); 261 262 current->thread.trap_nr = code; 263 memset(&info, 0, sizeof(info)); 264 info.si_signo = signr; 265 info.si_code = code; 266 info.si_addr = (void __user *) addr; 267 force_sig_info(signr, &info, current); 268 } 269 270 #ifdef CONFIG_PPC64 271 void system_reset_exception(struct pt_regs *regs) 272 { 273 /* See if any machine dependent calls */ 274 if (ppc_md.system_reset_exception) { 275 if (ppc_md.system_reset_exception(regs)) 276 return; 277 } 278 279 die("System Reset", regs, SIGABRT); 280 281 /* Must die if the interrupt is not recoverable */ 282 if (!(regs->msr & MSR_RI)) 283 panic("Unrecoverable System Reset"); 284 285 /* What should we do here? We could issue a shutdown or hard reset. */ 286 } 287 288 /* 289 * This function is called in real mode. Strictly no printk's please. 290 * 291 * regs->nip and regs->msr contains srr0 and ssr1. 292 */ 293 long machine_check_early(struct pt_regs *regs) 294 { 295 long handled = 0; 296 297 __this_cpu_inc(irq_stat.mce_exceptions); 298 299 add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE); 300 301 if (cur_cpu_spec && cur_cpu_spec->machine_check_early) 302 handled = cur_cpu_spec->machine_check_early(regs); 303 return handled; 304 } 305 306 long hmi_exception_realmode(struct pt_regs *regs) 307 { 308 __this_cpu_inc(irq_stat.hmi_exceptions); 309 310 if (ppc_md.hmi_exception_early) 311 ppc_md.hmi_exception_early(regs); 312 313 return 0; 314 } 315 316 #endif 317 318 /* 319 * I/O accesses can cause machine checks on powermacs. 320 * Check if the NIP corresponds to the address of a sync 321 * instruction for which there is an entry in the exception 322 * table. 323 * Note that the 601 only takes a machine check on TEA 324 * (transfer error ack) signal assertion, and does not 325 * set any of the top 16 bits of SRR1. 326 * -- paulus. 327 */ 328 static inline int check_io_access(struct pt_regs *regs) 329 { 330 #ifdef CONFIG_PPC32 331 unsigned long msr = regs->msr; 332 const struct exception_table_entry *entry; 333 unsigned int *nip = (unsigned int *)regs->nip; 334 335 if (((msr & 0xffff0000) == 0 || (msr & (0x80000 | 0x40000))) 336 && (entry = search_exception_tables(regs->nip)) != NULL) { 337 /* 338 * Check that it's a sync instruction, or somewhere 339 * in the twi; isync; nop sequence that inb/inw/inl uses. 340 * As the address is in the exception table 341 * we should be able to read the instr there. 342 * For the debug message, we look at the preceding 343 * load or store. 344 */ 345 if (*nip == 0x60000000) /* nop */ 346 nip -= 2; 347 else if (*nip == 0x4c00012c) /* isync */ 348 --nip; 349 if (*nip == 0x7c0004ac || (*nip >> 26) == 3) { 350 /* sync or twi */ 351 unsigned int rb; 352 353 --nip; 354 rb = (*nip >> 11) & 0x1f; 355 printk(KERN_DEBUG "%s bad port %lx at %p\n", 356 (*nip & 0x100)? "OUT to": "IN from", 357 regs->gpr[rb] - _IO_BASE, nip); 358 regs->msr |= MSR_RI; 359 regs->nip = entry->fixup; 360 return 1; 361 } 362 } 363 #endif /* CONFIG_PPC32 */ 364 return 0; 365 } 366 367 #ifdef CONFIG_PPC_ADV_DEBUG_REGS 368 /* On 4xx, the reason for the machine check or program exception 369 is in the ESR. */ 370 #define get_reason(regs) ((regs)->dsisr) 371 #ifndef CONFIG_FSL_BOOKE 372 #define get_mc_reason(regs) ((regs)->dsisr) 373 #else 374 #define get_mc_reason(regs) (mfspr(SPRN_MCSR)) 375 #endif 376 #define REASON_FP ESR_FP 377 #define REASON_ILLEGAL (ESR_PIL | ESR_PUO) 378 #define REASON_PRIVILEGED ESR_PPR 379 #define REASON_TRAP ESR_PTR 380 381 /* single-step stuff */ 382 #define single_stepping(regs) (current->thread.debug.dbcr0 & DBCR0_IC) 383 #define clear_single_step(regs) (current->thread.debug.dbcr0 &= ~DBCR0_IC) 384 385 #else 386 /* On non-4xx, the reason for the machine check or program 387 exception is in the MSR. */ 388 #define get_reason(regs) ((regs)->msr) 389 #define get_mc_reason(regs) ((regs)->msr) 390 #define REASON_TM 0x200000 391 #define REASON_FP 0x100000 392 #define REASON_ILLEGAL 0x80000 393 #define REASON_PRIVILEGED 0x40000 394 #define REASON_TRAP 0x20000 395 396 #define single_stepping(regs) ((regs)->msr & MSR_SE) 397 #define clear_single_step(regs) ((regs)->msr &= ~MSR_SE) 398 #endif 399 400 #if defined(CONFIG_4xx) 401 int machine_check_4xx(struct pt_regs *regs) 402 { 403 unsigned long reason = get_mc_reason(regs); 404 405 if (reason & ESR_IMCP) { 406 printk("Instruction"); 407 mtspr(SPRN_ESR, reason & ~ESR_IMCP); 408 } else 409 printk("Data"); 410 printk(" machine check in kernel mode.\n"); 411 412 return 0; 413 } 414 415 int machine_check_440A(struct pt_regs *regs) 416 { 417 unsigned long reason = get_mc_reason(regs); 418 419 printk("Machine check in kernel mode.\n"); 420 if (reason & ESR_IMCP){ 421 printk("Instruction Synchronous Machine Check exception\n"); 422 mtspr(SPRN_ESR, reason & ~ESR_IMCP); 423 } 424 else { 425 u32 mcsr = mfspr(SPRN_MCSR); 426 if (mcsr & MCSR_IB) 427 printk("Instruction Read PLB Error\n"); 428 if (mcsr & MCSR_DRB) 429 printk("Data Read PLB Error\n"); 430 if (mcsr & MCSR_DWB) 431 printk("Data Write PLB Error\n"); 432 if (mcsr & MCSR_TLBP) 433 printk("TLB Parity Error\n"); 434 if (mcsr & MCSR_ICP){ 435 flush_instruction_cache(); 436 printk("I-Cache Parity Error\n"); 437 } 438 if (mcsr & MCSR_DCSP) 439 printk("D-Cache Search Parity Error\n"); 440 if (mcsr & MCSR_DCFP) 441 printk("D-Cache Flush Parity Error\n"); 442 if (mcsr & MCSR_IMPE) 443 printk("Machine Check exception is imprecise\n"); 444 445 /* Clear MCSR */ 446 mtspr(SPRN_MCSR, mcsr); 447 } 448 return 0; 449 } 450 451 int machine_check_47x(struct pt_regs *regs) 452 { 453 unsigned long reason = get_mc_reason(regs); 454 u32 mcsr; 455 456 printk(KERN_ERR "Machine check in kernel mode.\n"); 457 if (reason & ESR_IMCP) { 458 printk(KERN_ERR 459 "Instruction Synchronous Machine Check exception\n"); 460 mtspr(SPRN_ESR, reason & ~ESR_IMCP); 461 return 0; 462 } 463 mcsr = mfspr(SPRN_MCSR); 464 if (mcsr & MCSR_IB) 465 printk(KERN_ERR "Instruction Read PLB Error\n"); 466 if (mcsr & MCSR_DRB) 467 printk(KERN_ERR "Data Read PLB Error\n"); 468 if (mcsr & MCSR_DWB) 469 printk(KERN_ERR "Data Write PLB Error\n"); 470 if (mcsr & MCSR_TLBP) 471 printk(KERN_ERR "TLB Parity Error\n"); 472 if (mcsr & MCSR_ICP) { 473 flush_instruction_cache(); 474 printk(KERN_ERR "I-Cache Parity Error\n"); 475 } 476 if (mcsr & MCSR_DCSP) 477 printk(KERN_ERR "D-Cache Search Parity Error\n"); 478 if (mcsr & PPC47x_MCSR_GPR) 479 printk(KERN_ERR "GPR Parity Error\n"); 480 if (mcsr & PPC47x_MCSR_FPR) 481 printk(KERN_ERR "FPR Parity Error\n"); 482 if (mcsr & PPC47x_MCSR_IPR) 483 printk(KERN_ERR "Machine Check exception is imprecise\n"); 484 485 /* Clear MCSR */ 486 mtspr(SPRN_MCSR, mcsr); 487 488 return 0; 489 } 490 #elif defined(CONFIG_E500) 491 int machine_check_e500mc(struct pt_regs *regs) 492 { 493 unsigned long mcsr = mfspr(SPRN_MCSR); 494 unsigned long reason = mcsr; 495 int recoverable = 1; 496 497 if (reason & MCSR_LD) { 498 recoverable = fsl_rio_mcheck_exception(regs); 499 if (recoverable == 1) 500 goto silent_out; 501 } 502 503 printk("Machine check in kernel mode.\n"); 504 printk("Caused by (from MCSR=%lx): ", reason); 505 506 if (reason & MCSR_MCP) 507 printk("Machine Check Signal\n"); 508 509 if (reason & MCSR_ICPERR) { 510 printk("Instruction Cache Parity Error\n"); 511 512 /* 513 * This is recoverable by invalidating the i-cache. 514 */ 515 mtspr(SPRN_L1CSR1, mfspr(SPRN_L1CSR1) | L1CSR1_ICFI); 516 while (mfspr(SPRN_L1CSR1) & L1CSR1_ICFI) 517 ; 518 519 /* 520 * This will generally be accompanied by an instruction 521 * fetch error report -- only treat MCSR_IF as fatal 522 * if it wasn't due to an L1 parity error. 523 */ 524 reason &= ~MCSR_IF; 525 } 526 527 if (reason & MCSR_DCPERR_MC) { 528 printk("Data Cache Parity Error\n"); 529 530 /* 531 * In write shadow mode we auto-recover from the error, but it 532 * may still get logged and cause a machine check. We should 533 * only treat the non-write shadow case as non-recoverable. 534 */ 535 if (!(mfspr(SPRN_L1CSR2) & L1CSR2_DCWS)) 536 recoverable = 0; 537 } 538 539 if (reason & MCSR_L2MMU_MHIT) { 540 printk("Hit on multiple TLB entries\n"); 541 recoverable = 0; 542 } 543 544 if (reason & MCSR_NMI) 545 printk("Non-maskable interrupt\n"); 546 547 if (reason & MCSR_IF) { 548 printk("Instruction Fetch Error Report\n"); 549 recoverable = 0; 550 } 551 552 if (reason & MCSR_LD) { 553 printk("Load Error Report\n"); 554 recoverable = 0; 555 } 556 557 if (reason & MCSR_ST) { 558 printk("Store Error Report\n"); 559 recoverable = 0; 560 } 561 562 if (reason & MCSR_LDG) { 563 printk("Guarded Load Error Report\n"); 564 recoverable = 0; 565 } 566 567 if (reason & MCSR_TLBSYNC) 568 printk("Simultaneous tlbsync operations\n"); 569 570 if (reason & MCSR_BSL2_ERR) { 571 printk("Level 2 Cache Error\n"); 572 recoverable = 0; 573 } 574 575 if (reason & MCSR_MAV) { 576 u64 addr; 577 578 addr = mfspr(SPRN_MCAR); 579 addr |= (u64)mfspr(SPRN_MCARU) << 32; 580 581 printk("Machine Check %s Address: %#llx\n", 582 reason & MCSR_MEA ? "Effective" : "Physical", addr); 583 } 584 585 silent_out: 586 mtspr(SPRN_MCSR, mcsr); 587 return mfspr(SPRN_MCSR) == 0 && recoverable; 588 } 589 590 int machine_check_e500(struct pt_regs *regs) 591 { 592 unsigned long reason = get_mc_reason(regs); 593 594 if (reason & MCSR_BUS_RBERR) { 595 if (fsl_rio_mcheck_exception(regs)) 596 return 1; 597 if (fsl_pci_mcheck_exception(regs)) 598 return 1; 599 } 600 601 printk("Machine check in kernel mode.\n"); 602 printk("Caused by (from MCSR=%lx): ", reason); 603 604 if (reason & MCSR_MCP) 605 printk("Machine Check Signal\n"); 606 if (reason & MCSR_ICPERR) 607 printk("Instruction Cache Parity Error\n"); 608 if (reason & MCSR_DCP_PERR) 609 printk("Data Cache Push Parity Error\n"); 610 if (reason & MCSR_DCPERR) 611 printk("Data Cache Parity Error\n"); 612 if (reason & MCSR_BUS_IAERR) 613 printk("Bus - Instruction Address Error\n"); 614 if (reason & MCSR_BUS_RAERR) 615 printk("Bus - Read Address Error\n"); 616 if (reason & MCSR_BUS_WAERR) 617 printk("Bus - Write Address Error\n"); 618 if (reason & MCSR_BUS_IBERR) 619 printk("Bus - Instruction Data Error\n"); 620 if (reason & MCSR_BUS_RBERR) 621 printk("Bus - Read Data Bus Error\n"); 622 if (reason & MCSR_BUS_WBERR) 623 printk("Bus - Write Data Bus Error\n"); 624 if (reason & MCSR_BUS_IPERR) 625 printk("Bus - Instruction Parity Error\n"); 626 if (reason & MCSR_BUS_RPERR) 627 printk("Bus - Read Parity Error\n"); 628 629 return 0; 630 } 631 632 int machine_check_generic(struct pt_regs *regs) 633 { 634 return 0; 635 } 636 #elif defined(CONFIG_E200) 637 int machine_check_e200(struct pt_regs *regs) 638 { 639 unsigned long reason = get_mc_reason(regs); 640 641 printk("Machine check in kernel mode.\n"); 642 printk("Caused by (from MCSR=%lx): ", reason); 643 644 if (reason & MCSR_MCP) 645 printk("Machine Check Signal\n"); 646 if (reason & MCSR_CP_PERR) 647 printk("Cache Push Parity Error\n"); 648 if (reason & MCSR_CPERR) 649 printk("Cache Parity Error\n"); 650 if (reason & MCSR_EXCP_ERR) 651 printk("ISI, ITLB, or Bus Error on first instruction fetch for an exception handler\n"); 652 if (reason & MCSR_BUS_IRERR) 653 printk("Bus - Read Bus Error on instruction fetch\n"); 654 if (reason & MCSR_BUS_DRERR) 655 printk("Bus - Read Bus Error on data load\n"); 656 if (reason & MCSR_BUS_WRERR) 657 printk("Bus - Write Bus Error on buffered store or cache line push\n"); 658 659 return 0; 660 } 661 #else 662 int machine_check_generic(struct pt_regs *regs) 663 { 664 unsigned long reason = get_mc_reason(regs); 665 666 printk("Machine check in kernel mode.\n"); 667 printk("Caused by (from SRR1=%lx): ", reason); 668 switch (reason & 0x601F0000) { 669 case 0x80000: 670 printk("Machine check signal\n"); 671 break; 672 case 0: /* for 601 */ 673 case 0x40000: 674 case 0x140000: /* 7450 MSS error and TEA */ 675 printk("Transfer error ack signal\n"); 676 break; 677 case 0x20000: 678 printk("Data parity error signal\n"); 679 break; 680 case 0x10000: 681 printk("Address parity error signal\n"); 682 break; 683 case 0x20000000: 684 printk("L1 Data Cache error\n"); 685 break; 686 case 0x40000000: 687 printk("L1 Instruction Cache error\n"); 688 break; 689 case 0x00100000: 690 printk("L2 data cache parity error\n"); 691 break; 692 default: 693 printk("Unknown values in msr\n"); 694 } 695 return 0; 696 } 697 #endif /* everything else */ 698 699 void machine_check_exception(struct pt_regs *regs) 700 { 701 enum ctx_state prev_state = exception_enter(); 702 int recover = 0; 703 704 __this_cpu_inc(irq_stat.mce_exceptions); 705 706 /* See if any machine dependent calls. In theory, we would want 707 * to call the CPU first, and call the ppc_md. one if the CPU 708 * one returns a positive number. However there is existing code 709 * that assumes the board gets a first chance, so let's keep it 710 * that way for now and fix things later. --BenH. 711 */ 712 if (ppc_md.machine_check_exception) 713 recover = ppc_md.machine_check_exception(regs); 714 else if (cur_cpu_spec->machine_check) 715 recover = cur_cpu_spec->machine_check(regs); 716 717 if (recover > 0) 718 goto bail; 719 720 #if defined(CONFIG_8xx) && defined(CONFIG_PCI) 721 /* the qspan pci read routines can cause machine checks -- Cort 722 * 723 * yuck !!! that totally needs to go away ! There are better ways 724 * to deal with that than having a wart in the mcheck handler. 725 * -- BenH 726 */ 727 bad_page_fault(regs, regs->dar, SIGBUS); 728 goto bail; 729 #endif 730 731 if (debugger_fault_handler(regs)) 732 goto bail; 733 734 if (check_io_access(regs)) 735 goto bail; 736 737 die("Machine check", regs, SIGBUS); 738 739 /* Must die if the interrupt is not recoverable */ 740 if (!(regs->msr & MSR_RI)) 741 panic("Unrecoverable Machine check"); 742 743 bail: 744 exception_exit(prev_state); 745 } 746 747 void SMIException(struct pt_regs *regs) 748 { 749 die("System Management Interrupt", regs, SIGABRT); 750 } 751 752 void handle_hmi_exception(struct pt_regs *regs) 753 { 754 struct pt_regs *old_regs; 755 756 old_regs = set_irq_regs(regs); 757 irq_enter(); 758 759 if (ppc_md.handle_hmi_exception) 760 ppc_md.handle_hmi_exception(regs); 761 762 irq_exit(); 763 set_irq_regs(old_regs); 764 } 765 766 void unknown_exception(struct pt_regs *regs) 767 { 768 enum ctx_state prev_state = exception_enter(); 769 770 printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n", 771 regs->nip, regs->msr, regs->trap); 772 773 _exception(SIGTRAP, regs, 0, 0); 774 775 exception_exit(prev_state); 776 } 777 778 void instruction_breakpoint_exception(struct pt_regs *regs) 779 { 780 enum ctx_state prev_state = exception_enter(); 781 782 if (notify_die(DIE_IABR_MATCH, "iabr_match", regs, 5, 783 5, SIGTRAP) == NOTIFY_STOP) 784 goto bail; 785 if (debugger_iabr_match(regs)) 786 goto bail; 787 _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip); 788 789 bail: 790 exception_exit(prev_state); 791 } 792 793 void RunModeException(struct pt_regs *regs) 794 { 795 _exception(SIGTRAP, regs, 0, 0); 796 } 797 798 void __kprobes single_step_exception(struct pt_regs *regs) 799 { 800 enum ctx_state prev_state = exception_enter(); 801 802 clear_single_step(regs); 803 804 if (notify_die(DIE_SSTEP, "single_step", regs, 5, 805 5, SIGTRAP) == NOTIFY_STOP) 806 goto bail; 807 if (debugger_sstep(regs)) 808 goto bail; 809 810 _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip); 811 812 bail: 813 exception_exit(prev_state); 814 } 815 816 /* 817 * After we have successfully emulated an instruction, we have to 818 * check if the instruction was being single-stepped, and if so, 819 * pretend we got a single-step exception. This was pointed out 820 * by Kumar Gala. -- paulus 821 */ 822 static void emulate_single_step(struct pt_regs *regs) 823 { 824 if (single_stepping(regs)) 825 single_step_exception(regs); 826 } 827 828 static inline int __parse_fpscr(unsigned long fpscr) 829 { 830 int ret = 0; 831 832 /* Invalid operation */ 833 if ((fpscr & FPSCR_VE) && (fpscr & FPSCR_VX)) 834 ret = FPE_FLTINV; 835 836 /* Overflow */ 837 else if ((fpscr & FPSCR_OE) && (fpscr & FPSCR_OX)) 838 ret = FPE_FLTOVF; 839 840 /* Underflow */ 841 else if ((fpscr & FPSCR_UE) && (fpscr & FPSCR_UX)) 842 ret = FPE_FLTUND; 843 844 /* Divide by zero */ 845 else if ((fpscr & FPSCR_ZE) && (fpscr & FPSCR_ZX)) 846 ret = FPE_FLTDIV; 847 848 /* Inexact result */ 849 else if ((fpscr & FPSCR_XE) && (fpscr & FPSCR_XX)) 850 ret = FPE_FLTRES; 851 852 return ret; 853 } 854 855 static void parse_fpe(struct pt_regs *regs) 856 { 857 int code = 0; 858 859 flush_fp_to_thread(current); 860 861 code = __parse_fpscr(current->thread.fp_state.fpscr); 862 863 _exception(SIGFPE, regs, code, regs->nip); 864 } 865 866 /* 867 * Illegal instruction emulation support. Originally written to 868 * provide the PVR to user applications using the mfspr rd, PVR. 869 * Return non-zero if we can't emulate, or -EFAULT if the associated 870 * memory access caused an access fault. Return zero on success. 871 * 872 * There are a couple of ways to do this, either "decode" the instruction 873 * or directly match lots of bits. In this case, matching lots of 874 * bits is faster and easier. 875 * 876 */ 877 static int emulate_string_inst(struct pt_regs *regs, u32 instword) 878 { 879 u8 rT = (instword >> 21) & 0x1f; 880 u8 rA = (instword >> 16) & 0x1f; 881 u8 NB_RB = (instword >> 11) & 0x1f; 882 u32 num_bytes; 883 unsigned long EA; 884 int pos = 0; 885 886 /* Early out if we are an invalid form of lswx */ 887 if ((instword & PPC_INST_STRING_MASK) == PPC_INST_LSWX) 888 if ((rT == rA) || (rT == NB_RB)) 889 return -EINVAL; 890 891 EA = (rA == 0) ? 0 : regs->gpr[rA]; 892 893 switch (instword & PPC_INST_STRING_MASK) { 894 case PPC_INST_LSWX: 895 case PPC_INST_STSWX: 896 EA += NB_RB; 897 num_bytes = regs->xer & 0x7f; 898 break; 899 case PPC_INST_LSWI: 900 case PPC_INST_STSWI: 901 num_bytes = (NB_RB == 0) ? 32 : NB_RB; 902 break; 903 default: 904 return -EINVAL; 905 } 906 907 while (num_bytes != 0) 908 { 909 u8 val; 910 u32 shift = 8 * (3 - (pos & 0x3)); 911 912 /* if process is 32-bit, clear upper 32 bits of EA */ 913 if ((regs->msr & MSR_64BIT) == 0) 914 EA &= 0xFFFFFFFF; 915 916 switch ((instword & PPC_INST_STRING_MASK)) { 917 case PPC_INST_LSWX: 918 case PPC_INST_LSWI: 919 if (get_user(val, (u8 __user *)EA)) 920 return -EFAULT; 921 /* first time updating this reg, 922 * zero it out */ 923 if (pos == 0) 924 regs->gpr[rT] = 0; 925 regs->gpr[rT] |= val << shift; 926 break; 927 case PPC_INST_STSWI: 928 case PPC_INST_STSWX: 929 val = regs->gpr[rT] >> shift; 930 if (put_user(val, (u8 __user *)EA)) 931 return -EFAULT; 932 break; 933 } 934 /* move EA to next address */ 935 EA += 1; 936 num_bytes--; 937 938 /* manage our position within the register */ 939 if (++pos == 4) { 940 pos = 0; 941 if (++rT == 32) 942 rT = 0; 943 } 944 } 945 946 return 0; 947 } 948 949 static int emulate_popcntb_inst(struct pt_regs *regs, u32 instword) 950 { 951 u32 ra,rs; 952 unsigned long tmp; 953 954 ra = (instword >> 16) & 0x1f; 955 rs = (instword >> 21) & 0x1f; 956 957 tmp = regs->gpr[rs]; 958 tmp = tmp - ((tmp >> 1) & 0x5555555555555555ULL); 959 tmp = (tmp & 0x3333333333333333ULL) + ((tmp >> 2) & 0x3333333333333333ULL); 960 tmp = (tmp + (tmp >> 4)) & 0x0f0f0f0f0f0f0f0fULL; 961 regs->gpr[ra] = tmp; 962 963 return 0; 964 } 965 966 static int emulate_isel(struct pt_regs *regs, u32 instword) 967 { 968 u8 rT = (instword >> 21) & 0x1f; 969 u8 rA = (instword >> 16) & 0x1f; 970 u8 rB = (instword >> 11) & 0x1f; 971 u8 BC = (instword >> 6) & 0x1f; 972 u8 bit; 973 unsigned long tmp; 974 975 tmp = (rA == 0) ? 0 : regs->gpr[rA]; 976 bit = (regs->ccr >> (31 - BC)) & 0x1; 977 978 regs->gpr[rT] = bit ? tmp : regs->gpr[rB]; 979 980 return 0; 981 } 982 983 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 984 static inline bool tm_abort_check(struct pt_regs *regs, int cause) 985 { 986 /* If we're emulating a load/store in an active transaction, we cannot 987 * emulate it as the kernel operates in transaction suspended context. 988 * We need to abort the transaction. This creates a persistent TM 989 * abort so tell the user what caused it with a new code. 990 */ 991 if (MSR_TM_TRANSACTIONAL(regs->msr)) { 992 tm_enable(); 993 tm_abort(cause); 994 return true; 995 } 996 return false; 997 } 998 #else 999 static inline bool tm_abort_check(struct pt_regs *regs, int reason) 1000 { 1001 return false; 1002 } 1003 #endif 1004 1005 static int emulate_instruction(struct pt_regs *regs) 1006 { 1007 u32 instword; 1008 u32 rd; 1009 1010 if (!user_mode(regs)) 1011 return -EINVAL; 1012 CHECK_FULL_REGS(regs); 1013 1014 if (get_user(instword, (u32 __user *)(regs->nip))) 1015 return -EFAULT; 1016 1017 /* Emulate the mfspr rD, PVR. */ 1018 if ((instword & PPC_INST_MFSPR_PVR_MASK) == PPC_INST_MFSPR_PVR) { 1019 PPC_WARN_EMULATED(mfpvr, regs); 1020 rd = (instword >> 21) & 0x1f; 1021 regs->gpr[rd] = mfspr(SPRN_PVR); 1022 return 0; 1023 } 1024 1025 /* Emulating the dcba insn is just a no-op. */ 1026 if ((instword & PPC_INST_DCBA_MASK) == PPC_INST_DCBA) { 1027 PPC_WARN_EMULATED(dcba, regs); 1028 return 0; 1029 } 1030 1031 /* Emulate the mcrxr insn. */ 1032 if ((instword & PPC_INST_MCRXR_MASK) == PPC_INST_MCRXR) { 1033 int shift = (instword >> 21) & 0x1c; 1034 unsigned long msk = 0xf0000000UL >> shift; 1035 1036 PPC_WARN_EMULATED(mcrxr, regs); 1037 regs->ccr = (regs->ccr & ~msk) | ((regs->xer >> shift) & msk); 1038 regs->xer &= ~0xf0000000UL; 1039 return 0; 1040 } 1041 1042 /* Emulate load/store string insn. */ 1043 if ((instword & PPC_INST_STRING_GEN_MASK) == PPC_INST_STRING) { 1044 if (tm_abort_check(regs, 1045 TM_CAUSE_EMULATE | TM_CAUSE_PERSISTENT)) 1046 return -EINVAL; 1047 PPC_WARN_EMULATED(string, regs); 1048 return emulate_string_inst(regs, instword); 1049 } 1050 1051 /* Emulate the popcntb (Population Count Bytes) instruction. */ 1052 if ((instword & PPC_INST_POPCNTB_MASK) == PPC_INST_POPCNTB) { 1053 PPC_WARN_EMULATED(popcntb, regs); 1054 return emulate_popcntb_inst(regs, instword); 1055 } 1056 1057 /* Emulate isel (Integer Select) instruction */ 1058 if ((instword & PPC_INST_ISEL_MASK) == PPC_INST_ISEL) { 1059 PPC_WARN_EMULATED(isel, regs); 1060 return emulate_isel(regs, instword); 1061 } 1062 1063 /* Emulate sync instruction variants */ 1064 if ((instword & PPC_INST_SYNC_MASK) == PPC_INST_SYNC) { 1065 PPC_WARN_EMULATED(sync, regs); 1066 asm volatile("sync"); 1067 return 0; 1068 } 1069 1070 #ifdef CONFIG_PPC64 1071 /* Emulate the mfspr rD, DSCR. */ 1072 if ((((instword & PPC_INST_MFSPR_DSCR_USER_MASK) == 1073 PPC_INST_MFSPR_DSCR_USER) || 1074 ((instword & PPC_INST_MFSPR_DSCR_MASK) == 1075 PPC_INST_MFSPR_DSCR)) && 1076 cpu_has_feature(CPU_FTR_DSCR)) { 1077 PPC_WARN_EMULATED(mfdscr, regs); 1078 rd = (instword >> 21) & 0x1f; 1079 regs->gpr[rd] = mfspr(SPRN_DSCR); 1080 return 0; 1081 } 1082 /* Emulate the mtspr DSCR, rD. */ 1083 if ((((instword & PPC_INST_MTSPR_DSCR_USER_MASK) == 1084 PPC_INST_MTSPR_DSCR_USER) || 1085 ((instword & PPC_INST_MTSPR_DSCR_MASK) == 1086 PPC_INST_MTSPR_DSCR)) && 1087 cpu_has_feature(CPU_FTR_DSCR)) { 1088 PPC_WARN_EMULATED(mtdscr, regs); 1089 rd = (instword >> 21) & 0x1f; 1090 current->thread.dscr = regs->gpr[rd]; 1091 current->thread.dscr_inherit = 1; 1092 mtspr(SPRN_DSCR, current->thread.dscr); 1093 return 0; 1094 } 1095 #endif 1096 1097 return -EINVAL; 1098 } 1099 1100 int is_valid_bugaddr(unsigned long addr) 1101 { 1102 return is_kernel_addr(addr); 1103 } 1104 1105 #ifdef CONFIG_MATH_EMULATION 1106 static int emulate_math(struct pt_regs *regs) 1107 { 1108 int ret; 1109 extern int do_mathemu(struct pt_regs *regs); 1110 1111 ret = do_mathemu(regs); 1112 if (ret >= 0) 1113 PPC_WARN_EMULATED(math, regs); 1114 1115 switch (ret) { 1116 case 0: 1117 emulate_single_step(regs); 1118 return 0; 1119 case 1: { 1120 int code = 0; 1121 code = __parse_fpscr(current->thread.fp_state.fpscr); 1122 _exception(SIGFPE, regs, code, regs->nip); 1123 return 0; 1124 } 1125 case -EFAULT: 1126 _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip); 1127 return 0; 1128 } 1129 1130 return -1; 1131 } 1132 #else 1133 static inline int emulate_math(struct pt_regs *regs) { return -1; } 1134 #endif 1135 1136 void __kprobes program_check_exception(struct pt_regs *regs) 1137 { 1138 enum ctx_state prev_state = exception_enter(); 1139 unsigned int reason = get_reason(regs); 1140 1141 /* We can now get here via a FP Unavailable exception if the core 1142 * has no FPU, in that case the reason flags will be 0 */ 1143 1144 if (reason & REASON_FP) { 1145 /* IEEE FP exception */ 1146 parse_fpe(regs); 1147 goto bail; 1148 } 1149 if (reason & REASON_TRAP) { 1150 unsigned long bugaddr; 1151 /* Debugger is first in line to stop recursive faults in 1152 * rcu_lock, notify_die, or atomic_notifier_call_chain */ 1153 if (debugger_bpt(regs)) 1154 goto bail; 1155 1156 /* trap exception */ 1157 if (notify_die(DIE_BPT, "breakpoint", regs, 5, 5, SIGTRAP) 1158 == NOTIFY_STOP) 1159 goto bail; 1160 1161 bugaddr = regs->nip; 1162 /* 1163 * Fixup bugaddr for BUG_ON() in real mode 1164 */ 1165 if (!is_kernel_addr(bugaddr) && !(regs->msr & MSR_IR)) 1166 bugaddr += PAGE_OFFSET; 1167 1168 if (!(regs->msr & MSR_PR) && /* not user-mode */ 1169 report_bug(bugaddr, regs) == BUG_TRAP_TYPE_WARN) { 1170 regs->nip += 4; 1171 goto bail; 1172 } 1173 _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip); 1174 goto bail; 1175 } 1176 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1177 if (reason & REASON_TM) { 1178 /* This is a TM "Bad Thing Exception" program check. 1179 * This occurs when: 1180 * - An rfid/hrfid/mtmsrd attempts to cause an illegal 1181 * transition in TM states. 1182 * - A trechkpt is attempted when transactional. 1183 * - A treclaim is attempted when non transactional. 1184 * - A tend is illegally attempted. 1185 * - writing a TM SPR when transactional. 1186 */ 1187 if (!user_mode(regs) && 1188 report_bug(regs->nip, regs) == BUG_TRAP_TYPE_WARN) { 1189 regs->nip += 4; 1190 goto bail; 1191 } 1192 /* If usermode caused this, it's done something illegal and 1193 * gets a SIGILL slap on the wrist. We call it an illegal 1194 * operand to distinguish from the instruction just being bad 1195 * (e.g. executing a 'tend' on a CPU without TM!); it's an 1196 * illegal /placement/ of a valid instruction. 1197 */ 1198 if (user_mode(regs)) { 1199 _exception(SIGILL, regs, ILL_ILLOPN, regs->nip); 1200 goto bail; 1201 } else { 1202 printk(KERN_EMERG "Unexpected TM Bad Thing exception " 1203 "at %lx (msr 0x%x)\n", regs->nip, reason); 1204 die("Unrecoverable exception", regs, SIGABRT); 1205 } 1206 } 1207 #endif 1208 1209 /* 1210 * If we took the program check in the kernel skip down to sending a 1211 * SIGILL. The subsequent cases all relate to emulating instructions 1212 * which we should only do for userspace. We also do not want to enable 1213 * interrupts for kernel faults because that might lead to further 1214 * faults, and loose the context of the original exception. 1215 */ 1216 if (!user_mode(regs)) 1217 goto sigill; 1218 1219 /* We restore the interrupt state now */ 1220 if (!arch_irq_disabled_regs(regs)) 1221 local_irq_enable(); 1222 1223 /* (reason & REASON_ILLEGAL) would be the obvious thing here, 1224 * but there seems to be a hardware bug on the 405GP (RevD) 1225 * that means ESR is sometimes set incorrectly - either to 1226 * ESR_DST (!?) or 0. In the process of chasing this with the 1227 * hardware people - not sure if it can happen on any illegal 1228 * instruction or only on FP instructions, whether there is a 1229 * pattern to occurrences etc. -dgibson 31/Mar/2003 1230 */ 1231 if (!emulate_math(regs)) 1232 goto bail; 1233 1234 /* Try to emulate it if we should. */ 1235 if (reason & (REASON_ILLEGAL | REASON_PRIVILEGED)) { 1236 switch (emulate_instruction(regs)) { 1237 case 0: 1238 regs->nip += 4; 1239 emulate_single_step(regs); 1240 goto bail; 1241 case -EFAULT: 1242 _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip); 1243 goto bail; 1244 } 1245 } 1246 1247 sigill: 1248 if (reason & REASON_PRIVILEGED) 1249 _exception(SIGILL, regs, ILL_PRVOPC, regs->nip); 1250 else 1251 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 1252 1253 bail: 1254 exception_exit(prev_state); 1255 } 1256 1257 /* 1258 * This occurs when running in hypervisor mode on POWER6 or later 1259 * and an illegal instruction is encountered. 1260 */ 1261 void __kprobes emulation_assist_interrupt(struct pt_regs *regs) 1262 { 1263 regs->msr |= REASON_ILLEGAL; 1264 program_check_exception(regs); 1265 } 1266 1267 void alignment_exception(struct pt_regs *regs) 1268 { 1269 enum ctx_state prev_state = exception_enter(); 1270 int sig, code, fixed = 0; 1271 1272 /* We restore the interrupt state now */ 1273 if (!arch_irq_disabled_regs(regs)) 1274 local_irq_enable(); 1275 1276 if (tm_abort_check(regs, TM_CAUSE_ALIGNMENT | TM_CAUSE_PERSISTENT)) 1277 goto bail; 1278 1279 /* we don't implement logging of alignment exceptions */ 1280 if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS)) 1281 fixed = fix_alignment(regs); 1282 1283 if (fixed == 1) { 1284 regs->nip += 4; /* skip over emulated instruction */ 1285 emulate_single_step(regs); 1286 goto bail; 1287 } 1288 1289 /* Operand address was bad */ 1290 if (fixed == -EFAULT) { 1291 sig = SIGSEGV; 1292 code = SEGV_ACCERR; 1293 } else { 1294 sig = SIGBUS; 1295 code = BUS_ADRALN; 1296 } 1297 if (user_mode(regs)) 1298 _exception(sig, regs, code, regs->dar); 1299 else 1300 bad_page_fault(regs, regs->dar, sig); 1301 1302 bail: 1303 exception_exit(prev_state); 1304 } 1305 1306 void StackOverflow(struct pt_regs *regs) 1307 { 1308 printk(KERN_CRIT "Kernel stack overflow in process %p, r1=%lx\n", 1309 current, regs->gpr[1]); 1310 debugger(regs); 1311 show_regs(regs); 1312 panic("kernel stack overflow"); 1313 } 1314 1315 void nonrecoverable_exception(struct pt_regs *regs) 1316 { 1317 printk(KERN_ERR "Non-recoverable exception at PC=%lx MSR=%lx\n", 1318 regs->nip, regs->msr); 1319 debugger(regs); 1320 die("nonrecoverable exception", regs, SIGKILL); 1321 } 1322 1323 void kernel_fp_unavailable_exception(struct pt_regs *regs) 1324 { 1325 enum ctx_state prev_state = exception_enter(); 1326 1327 printk(KERN_EMERG "Unrecoverable FP Unavailable Exception " 1328 "%lx at %lx\n", regs->trap, regs->nip); 1329 die("Unrecoverable FP Unavailable Exception", regs, SIGABRT); 1330 1331 exception_exit(prev_state); 1332 } 1333 1334 void altivec_unavailable_exception(struct pt_regs *regs) 1335 { 1336 enum ctx_state prev_state = exception_enter(); 1337 1338 if (user_mode(regs)) { 1339 /* A user program has executed an altivec instruction, 1340 but this kernel doesn't support altivec. */ 1341 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 1342 goto bail; 1343 } 1344 1345 printk(KERN_EMERG "Unrecoverable VMX/Altivec Unavailable Exception " 1346 "%lx at %lx\n", regs->trap, regs->nip); 1347 die("Unrecoverable VMX/Altivec Unavailable Exception", regs, SIGABRT); 1348 1349 bail: 1350 exception_exit(prev_state); 1351 } 1352 1353 void vsx_unavailable_exception(struct pt_regs *regs) 1354 { 1355 if (user_mode(regs)) { 1356 /* A user program has executed an vsx instruction, 1357 but this kernel doesn't support vsx. */ 1358 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 1359 return; 1360 } 1361 1362 printk(KERN_EMERG "Unrecoverable VSX Unavailable Exception " 1363 "%lx at %lx\n", regs->trap, regs->nip); 1364 die("Unrecoverable VSX Unavailable Exception", regs, SIGABRT); 1365 } 1366 1367 #ifdef CONFIG_PPC64 1368 void facility_unavailable_exception(struct pt_regs *regs) 1369 { 1370 static char *facility_strings[] = { 1371 [FSCR_FP_LG] = "FPU", 1372 [FSCR_VECVSX_LG] = "VMX/VSX", 1373 [FSCR_DSCR_LG] = "DSCR", 1374 [FSCR_PM_LG] = "PMU SPRs", 1375 [FSCR_BHRB_LG] = "BHRB", 1376 [FSCR_TM_LG] = "TM", 1377 [FSCR_EBB_LG] = "EBB", 1378 [FSCR_TAR_LG] = "TAR", 1379 }; 1380 char *facility = "unknown"; 1381 u64 value; 1382 u32 instword, rd; 1383 u8 status; 1384 bool hv; 1385 1386 hv = (regs->trap == 0xf80); 1387 if (hv) 1388 value = mfspr(SPRN_HFSCR); 1389 else 1390 value = mfspr(SPRN_FSCR); 1391 1392 status = value >> 56; 1393 if (status == FSCR_DSCR_LG) { 1394 /* 1395 * User is accessing the DSCR register using the problem 1396 * state only SPR number (0x03) either through a mfspr or 1397 * a mtspr instruction. If it is a write attempt through 1398 * a mtspr, then we set the inherit bit. This also allows 1399 * the user to write or read the register directly in the 1400 * future by setting via the FSCR DSCR bit. But in case it 1401 * is a read DSCR attempt through a mfspr instruction, we 1402 * just emulate the instruction instead. This code path will 1403 * always emulate all the mfspr instructions till the user 1404 * has attempted at least one mtspr instruction. This way it 1405 * preserves the same behaviour when the user is accessing 1406 * the DSCR through privilege level only SPR number (0x11) 1407 * which is emulated through illegal instruction exception. 1408 * We always leave HFSCR DSCR set. 1409 */ 1410 if (get_user(instword, (u32 __user *)(regs->nip))) { 1411 pr_err("Failed to fetch the user instruction\n"); 1412 return; 1413 } 1414 1415 /* Write into DSCR (mtspr 0x03, RS) */ 1416 if ((instword & PPC_INST_MTSPR_DSCR_USER_MASK) 1417 == PPC_INST_MTSPR_DSCR_USER) { 1418 rd = (instword >> 21) & 0x1f; 1419 current->thread.dscr = regs->gpr[rd]; 1420 current->thread.dscr_inherit = 1; 1421 mtspr(SPRN_FSCR, value | FSCR_DSCR); 1422 } 1423 1424 /* Read from DSCR (mfspr RT, 0x03) */ 1425 if ((instword & PPC_INST_MFSPR_DSCR_USER_MASK) 1426 == PPC_INST_MFSPR_DSCR_USER) { 1427 if (emulate_instruction(regs)) { 1428 pr_err("DSCR based mfspr emulation failed\n"); 1429 return; 1430 } 1431 regs->nip += 4; 1432 emulate_single_step(regs); 1433 } 1434 return; 1435 } 1436 1437 if ((status < ARRAY_SIZE(facility_strings)) && 1438 facility_strings[status]) 1439 facility = facility_strings[status]; 1440 1441 /* We restore the interrupt state now */ 1442 if (!arch_irq_disabled_regs(regs)) 1443 local_irq_enable(); 1444 1445 pr_err_ratelimited( 1446 "%sFacility '%s' unavailable, exception at 0x%lx, MSR=%lx\n", 1447 hv ? "Hypervisor " : "", facility, regs->nip, regs->msr); 1448 1449 if (user_mode(regs)) { 1450 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 1451 return; 1452 } 1453 1454 die("Unexpected facility unavailable exception", regs, SIGABRT); 1455 } 1456 #endif 1457 1458 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1459 1460 void fp_unavailable_tm(struct pt_regs *regs) 1461 { 1462 /* Note: This does not handle any kind of FP laziness. */ 1463 1464 TM_DEBUG("FP Unavailable trap whilst transactional at 0x%lx, MSR=%lx\n", 1465 regs->nip, regs->msr); 1466 1467 /* We can only have got here if the task started using FP after 1468 * beginning the transaction. So, the transactional regs are just a 1469 * copy of the checkpointed ones. But, we still need to recheckpoint 1470 * as we're enabling FP for the process; it will return, abort the 1471 * transaction, and probably retry but now with FP enabled. So the 1472 * checkpointed FP registers need to be loaded. 1473 */ 1474 tm_reclaim_current(TM_CAUSE_FAC_UNAV); 1475 /* Reclaim didn't save out any FPRs to transact_fprs. */ 1476 1477 /* Enable FP for the task: */ 1478 regs->msr |= (MSR_FP | current->thread.fpexc_mode); 1479 1480 /* This loads and recheckpoints the FP registers from 1481 * thread.fpr[]. They will remain in registers after the 1482 * checkpoint so we don't need to reload them after. 1483 * If VMX is in use, the VRs now hold checkpointed values, 1484 * so we don't want to load the VRs from the thread_struct. 1485 */ 1486 tm_recheckpoint(¤t->thread, MSR_FP); 1487 1488 /* If VMX is in use, get the transactional values back */ 1489 if (regs->msr & MSR_VEC) { 1490 do_load_up_transact_altivec(¤t->thread); 1491 /* At this point all the VSX state is loaded, so enable it */ 1492 regs->msr |= MSR_VSX; 1493 } 1494 } 1495 1496 void altivec_unavailable_tm(struct pt_regs *regs) 1497 { 1498 /* See the comments in fp_unavailable_tm(). This function operates 1499 * the same way. 1500 */ 1501 1502 TM_DEBUG("Vector Unavailable trap whilst transactional at 0x%lx," 1503 "MSR=%lx\n", 1504 regs->nip, regs->msr); 1505 tm_reclaim_current(TM_CAUSE_FAC_UNAV); 1506 regs->msr |= MSR_VEC; 1507 tm_recheckpoint(¤t->thread, MSR_VEC); 1508 current->thread.used_vr = 1; 1509 1510 if (regs->msr & MSR_FP) { 1511 do_load_up_transact_fpu(¤t->thread); 1512 regs->msr |= MSR_VSX; 1513 } 1514 } 1515 1516 void vsx_unavailable_tm(struct pt_regs *regs) 1517 { 1518 unsigned long orig_msr = regs->msr; 1519 1520 /* See the comments in fp_unavailable_tm(). This works similarly, 1521 * though we're loading both FP and VEC registers in here. 1522 * 1523 * If FP isn't in use, load FP regs. If VEC isn't in use, load VEC 1524 * regs. Either way, set MSR_VSX. 1525 */ 1526 1527 TM_DEBUG("VSX Unavailable trap whilst transactional at 0x%lx," 1528 "MSR=%lx\n", 1529 regs->nip, regs->msr); 1530 1531 current->thread.used_vsr = 1; 1532 1533 /* If FP and VMX are already loaded, we have all the state we need */ 1534 if ((orig_msr & (MSR_FP | MSR_VEC)) == (MSR_FP | MSR_VEC)) { 1535 regs->msr |= MSR_VSX; 1536 return; 1537 } 1538 1539 /* This reclaims FP and/or VR regs if they're already enabled */ 1540 tm_reclaim_current(TM_CAUSE_FAC_UNAV); 1541 1542 regs->msr |= MSR_VEC | MSR_FP | current->thread.fpexc_mode | 1543 MSR_VSX; 1544 1545 /* This loads & recheckpoints FP and VRs; but we have 1546 * to be sure not to overwrite previously-valid state. 1547 */ 1548 tm_recheckpoint(¤t->thread, regs->msr & ~orig_msr); 1549 1550 if (orig_msr & MSR_FP) 1551 do_load_up_transact_fpu(¤t->thread); 1552 if (orig_msr & MSR_VEC) 1553 do_load_up_transact_altivec(¤t->thread); 1554 } 1555 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ 1556 1557 void performance_monitor_exception(struct pt_regs *regs) 1558 { 1559 __this_cpu_inc(irq_stat.pmu_irqs); 1560 1561 perf_irq(regs); 1562 } 1563 1564 #ifdef CONFIG_8xx 1565 void SoftwareEmulation(struct pt_regs *regs) 1566 { 1567 CHECK_FULL_REGS(regs); 1568 1569 if (!user_mode(regs)) { 1570 debugger(regs); 1571 die("Kernel Mode Unimplemented Instruction or SW FPU Emulation", 1572 regs, SIGFPE); 1573 } 1574 1575 if (!emulate_math(regs)) 1576 return; 1577 1578 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 1579 } 1580 #endif /* CONFIG_8xx */ 1581 1582 #ifdef CONFIG_PPC_ADV_DEBUG_REGS 1583 static void handle_debug(struct pt_regs *regs, unsigned long debug_status) 1584 { 1585 int changed = 0; 1586 /* 1587 * Determine the cause of the debug event, clear the 1588 * event flags and send a trap to the handler. Torez 1589 */ 1590 if (debug_status & (DBSR_DAC1R | DBSR_DAC1W)) { 1591 dbcr_dac(current) &= ~(DBCR_DAC1R | DBCR_DAC1W); 1592 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE 1593 current->thread.debug.dbcr2 &= ~DBCR2_DAC12MODE; 1594 #endif 1595 do_send_trap(regs, mfspr(SPRN_DAC1), debug_status, TRAP_HWBKPT, 1596 5); 1597 changed |= 0x01; 1598 } else if (debug_status & (DBSR_DAC2R | DBSR_DAC2W)) { 1599 dbcr_dac(current) &= ~(DBCR_DAC2R | DBCR_DAC2W); 1600 do_send_trap(regs, mfspr(SPRN_DAC2), debug_status, TRAP_HWBKPT, 1601 6); 1602 changed |= 0x01; 1603 } else if (debug_status & DBSR_IAC1) { 1604 current->thread.debug.dbcr0 &= ~DBCR0_IAC1; 1605 dbcr_iac_range(current) &= ~DBCR_IAC12MODE; 1606 do_send_trap(regs, mfspr(SPRN_IAC1), debug_status, TRAP_HWBKPT, 1607 1); 1608 changed |= 0x01; 1609 } else if (debug_status & DBSR_IAC2) { 1610 current->thread.debug.dbcr0 &= ~DBCR0_IAC2; 1611 do_send_trap(regs, mfspr(SPRN_IAC2), debug_status, TRAP_HWBKPT, 1612 2); 1613 changed |= 0x01; 1614 } else if (debug_status & DBSR_IAC3) { 1615 current->thread.debug.dbcr0 &= ~DBCR0_IAC3; 1616 dbcr_iac_range(current) &= ~DBCR_IAC34MODE; 1617 do_send_trap(regs, mfspr(SPRN_IAC3), debug_status, TRAP_HWBKPT, 1618 3); 1619 changed |= 0x01; 1620 } else if (debug_status & DBSR_IAC4) { 1621 current->thread.debug.dbcr0 &= ~DBCR0_IAC4; 1622 do_send_trap(regs, mfspr(SPRN_IAC4), debug_status, TRAP_HWBKPT, 1623 4); 1624 changed |= 0x01; 1625 } 1626 /* 1627 * At the point this routine was called, the MSR(DE) was turned off. 1628 * Check all other debug flags and see if that bit needs to be turned 1629 * back on or not. 1630 */ 1631 if (DBCR_ACTIVE_EVENTS(current->thread.debug.dbcr0, 1632 current->thread.debug.dbcr1)) 1633 regs->msr |= MSR_DE; 1634 else 1635 /* Make sure the IDM flag is off */ 1636 current->thread.debug.dbcr0 &= ~DBCR0_IDM; 1637 1638 if (changed & 0x01) 1639 mtspr(SPRN_DBCR0, current->thread.debug.dbcr0); 1640 } 1641 1642 void __kprobes DebugException(struct pt_regs *regs, unsigned long debug_status) 1643 { 1644 current->thread.debug.dbsr = debug_status; 1645 1646 /* Hack alert: On BookE, Branch Taken stops on the branch itself, while 1647 * on server, it stops on the target of the branch. In order to simulate 1648 * the server behaviour, we thus restart right away with a single step 1649 * instead of stopping here when hitting a BT 1650 */ 1651 if (debug_status & DBSR_BT) { 1652 regs->msr &= ~MSR_DE; 1653 1654 /* Disable BT */ 1655 mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_BT); 1656 /* Clear the BT event */ 1657 mtspr(SPRN_DBSR, DBSR_BT); 1658 1659 /* Do the single step trick only when coming from userspace */ 1660 if (user_mode(regs)) { 1661 current->thread.debug.dbcr0 &= ~DBCR0_BT; 1662 current->thread.debug.dbcr0 |= DBCR0_IDM | DBCR0_IC; 1663 regs->msr |= MSR_DE; 1664 return; 1665 } 1666 1667 if (notify_die(DIE_SSTEP, "block_step", regs, 5, 1668 5, SIGTRAP) == NOTIFY_STOP) { 1669 return; 1670 } 1671 if (debugger_sstep(regs)) 1672 return; 1673 } else if (debug_status & DBSR_IC) { /* Instruction complete */ 1674 regs->msr &= ~MSR_DE; 1675 1676 /* Disable instruction completion */ 1677 mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_IC); 1678 /* Clear the instruction completion event */ 1679 mtspr(SPRN_DBSR, DBSR_IC); 1680 1681 if (notify_die(DIE_SSTEP, "single_step", regs, 5, 1682 5, SIGTRAP) == NOTIFY_STOP) { 1683 return; 1684 } 1685 1686 if (debugger_sstep(regs)) 1687 return; 1688 1689 if (user_mode(regs)) { 1690 current->thread.debug.dbcr0 &= ~DBCR0_IC; 1691 if (DBCR_ACTIVE_EVENTS(current->thread.debug.dbcr0, 1692 current->thread.debug.dbcr1)) 1693 regs->msr |= MSR_DE; 1694 else 1695 /* Make sure the IDM bit is off */ 1696 current->thread.debug.dbcr0 &= ~DBCR0_IDM; 1697 } 1698 1699 _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip); 1700 } else 1701 handle_debug(regs, debug_status); 1702 } 1703 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */ 1704 1705 #if !defined(CONFIG_TAU_INT) 1706 void TAUException(struct pt_regs *regs) 1707 { 1708 printk("TAU trap at PC: %lx, MSR: %lx, vector=%lx %s\n", 1709 regs->nip, regs->msr, regs->trap, print_tainted()); 1710 } 1711 #endif /* CONFIG_INT_TAU */ 1712 1713 #ifdef CONFIG_ALTIVEC 1714 void altivec_assist_exception(struct pt_regs *regs) 1715 { 1716 int err; 1717 1718 if (!user_mode(regs)) { 1719 printk(KERN_EMERG "VMX/Altivec assist exception in kernel mode" 1720 " at %lx\n", regs->nip); 1721 die("Kernel VMX/Altivec assist exception", regs, SIGILL); 1722 } 1723 1724 flush_altivec_to_thread(current); 1725 1726 PPC_WARN_EMULATED(altivec, regs); 1727 err = emulate_altivec(regs); 1728 if (err == 0) { 1729 regs->nip += 4; /* skip emulated instruction */ 1730 emulate_single_step(regs); 1731 return; 1732 } 1733 1734 if (err == -EFAULT) { 1735 /* got an error reading the instruction */ 1736 _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip); 1737 } else { 1738 /* didn't recognize the instruction */ 1739 /* XXX quick hack for now: set the non-Java bit in the VSCR */ 1740 printk_ratelimited(KERN_ERR "Unrecognized altivec instruction " 1741 "in %s at %lx\n", current->comm, regs->nip); 1742 current->thread.vr_state.vscr.u[3] |= 0x10000; 1743 } 1744 } 1745 #endif /* CONFIG_ALTIVEC */ 1746 1747 #ifdef CONFIG_FSL_BOOKE 1748 void CacheLockingException(struct pt_regs *regs, unsigned long address, 1749 unsigned long error_code) 1750 { 1751 /* We treat cache locking instructions from the user 1752 * as priv ops, in the future we could try to do 1753 * something smarter 1754 */ 1755 if (error_code & (ESR_DLK|ESR_ILK)) 1756 _exception(SIGILL, regs, ILL_PRVOPC, regs->nip); 1757 return; 1758 } 1759 #endif /* CONFIG_FSL_BOOKE */ 1760 1761 #ifdef CONFIG_SPE 1762 void SPEFloatingPointException(struct pt_regs *regs) 1763 { 1764 extern int do_spe_mathemu(struct pt_regs *regs); 1765 unsigned long spefscr; 1766 int fpexc_mode; 1767 int code = 0; 1768 int err; 1769 1770 flush_spe_to_thread(current); 1771 1772 spefscr = current->thread.spefscr; 1773 fpexc_mode = current->thread.fpexc_mode; 1774 1775 if ((spefscr & SPEFSCR_FOVF) && (fpexc_mode & PR_FP_EXC_OVF)) { 1776 code = FPE_FLTOVF; 1777 } 1778 else if ((spefscr & SPEFSCR_FUNF) && (fpexc_mode & PR_FP_EXC_UND)) { 1779 code = FPE_FLTUND; 1780 } 1781 else if ((spefscr & SPEFSCR_FDBZ) && (fpexc_mode & PR_FP_EXC_DIV)) 1782 code = FPE_FLTDIV; 1783 else if ((spefscr & SPEFSCR_FINV) && (fpexc_mode & PR_FP_EXC_INV)) { 1784 code = FPE_FLTINV; 1785 } 1786 else if ((spefscr & (SPEFSCR_FG | SPEFSCR_FX)) && (fpexc_mode & PR_FP_EXC_RES)) 1787 code = FPE_FLTRES; 1788 1789 err = do_spe_mathemu(regs); 1790 if (err == 0) { 1791 regs->nip += 4; /* skip emulated instruction */ 1792 emulate_single_step(regs); 1793 return; 1794 } 1795 1796 if (err == -EFAULT) { 1797 /* got an error reading the instruction */ 1798 _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip); 1799 } else if (err == -EINVAL) { 1800 /* didn't recognize the instruction */ 1801 printk(KERN_ERR "unrecognized spe instruction " 1802 "in %s at %lx\n", current->comm, regs->nip); 1803 } else { 1804 _exception(SIGFPE, regs, code, regs->nip); 1805 } 1806 1807 return; 1808 } 1809 1810 void SPEFloatingPointRoundException(struct pt_regs *regs) 1811 { 1812 extern int speround_handler(struct pt_regs *regs); 1813 int err; 1814 1815 preempt_disable(); 1816 if (regs->msr & MSR_SPE) 1817 giveup_spe(current); 1818 preempt_enable(); 1819 1820 regs->nip -= 4; 1821 err = speround_handler(regs); 1822 if (err == 0) { 1823 regs->nip += 4; /* skip emulated instruction */ 1824 emulate_single_step(regs); 1825 return; 1826 } 1827 1828 if (err == -EFAULT) { 1829 /* got an error reading the instruction */ 1830 _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip); 1831 } else if (err == -EINVAL) { 1832 /* didn't recognize the instruction */ 1833 printk(KERN_ERR "unrecognized spe instruction " 1834 "in %s at %lx\n", current->comm, regs->nip); 1835 } else { 1836 _exception(SIGFPE, regs, 0, regs->nip); 1837 return; 1838 } 1839 } 1840 #endif 1841 1842 /* 1843 * We enter here if we get an unrecoverable exception, that is, one 1844 * that happened at a point where the RI (recoverable interrupt) bit 1845 * in the MSR is 0. This indicates that SRR0/1 are live, and that 1846 * we therefore lost state by taking this exception. 1847 */ 1848 void unrecoverable_exception(struct pt_regs *regs) 1849 { 1850 printk(KERN_EMERG "Unrecoverable exception %lx at %lx\n", 1851 regs->trap, regs->nip); 1852 die("Unrecoverable exception", regs, SIGABRT); 1853 } 1854 1855 #if defined(CONFIG_BOOKE_WDT) || defined(CONFIG_40x) 1856 /* 1857 * Default handler for a Watchdog exception, 1858 * spins until a reboot occurs 1859 */ 1860 void __attribute__ ((weak)) WatchdogHandler(struct pt_regs *regs) 1861 { 1862 /* Generic WatchdogHandler, implement your own */ 1863 mtspr(SPRN_TCR, mfspr(SPRN_TCR)&(~TCR_WIE)); 1864 return; 1865 } 1866 1867 void WatchdogException(struct pt_regs *regs) 1868 { 1869 printk (KERN_EMERG "PowerPC Book-E Watchdog Exception\n"); 1870 WatchdogHandler(regs); 1871 } 1872 #endif 1873 1874 /* 1875 * We enter here if we discover during exception entry that we are 1876 * running in supervisor mode with a userspace value in the stack pointer. 1877 */ 1878 void kernel_bad_stack(struct pt_regs *regs) 1879 { 1880 printk(KERN_EMERG "Bad kernel stack pointer %lx at %lx\n", 1881 regs->gpr[1], regs->nip); 1882 die("Bad kernel stack pointer", regs, SIGABRT); 1883 } 1884 1885 void __init trap_init(void) 1886 { 1887 } 1888 1889 1890 #ifdef CONFIG_PPC_EMULATED_STATS 1891 1892 #define WARN_EMULATED_SETUP(type) .type = { .name = #type } 1893 1894 struct ppc_emulated ppc_emulated = { 1895 #ifdef CONFIG_ALTIVEC 1896 WARN_EMULATED_SETUP(altivec), 1897 #endif 1898 WARN_EMULATED_SETUP(dcba), 1899 WARN_EMULATED_SETUP(dcbz), 1900 WARN_EMULATED_SETUP(fp_pair), 1901 WARN_EMULATED_SETUP(isel), 1902 WARN_EMULATED_SETUP(mcrxr), 1903 WARN_EMULATED_SETUP(mfpvr), 1904 WARN_EMULATED_SETUP(multiple), 1905 WARN_EMULATED_SETUP(popcntb), 1906 WARN_EMULATED_SETUP(spe), 1907 WARN_EMULATED_SETUP(string), 1908 WARN_EMULATED_SETUP(sync), 1909 WARN_EMULATED_SETUP(unaligned), 1910 #ifdef CONFIG_MATH_EMULATION 1911 WARN_EMULATED_SETUP(math), 1912 #endif 1913 #ifdef CONFIG_VSX 1914 WARN_EMULATED_SETUP(vsx), 1915 #endif 1916 #ifdef CONFIG_PPC64 1917 WARN_EMULATED_SETUP(mfdscr), 1918 WARN_EMULATED_SETUP(mtdscr), 1919 WARN_EMULATED_SETUP(lq_stq), 1920 #endif 1921 }; 1922 1923 u32 ppc_warn_emulated; 1924 1925 void ppc_warn_emulated_print(const char *type) 1926 { 1927 pr_warn_ratelimited("%s used emulated %s instruction\n", current->comm, 1928 type); 1929 } 1930 1931 static int __init ppc_warn_emulated_init(void) 1932 { 1933 struct dentry *dir, *d; 1934 unsigned int i; 1935 struct ppc_emulated_entry *entries = (void *)&ppc_emulated; 1936 1937 if (!powerpc_debugfs_root) 1938 return -ENODEV; 1939 1940 dir = debugfs_create_dir("emulated_instructions", 1941 powerpc_debugfs_root); 1942 if (!dir) 1943 return -ENOMEM; 1944 1945 d = debugfs_create_u32("do_warn", S_IRUGO | S_IWUSR, dir, 1946 &ppc_warn_emulated); 1947 if (!d) 1948 goto fail; 1949 1950 for (i = 0; i < sizeof(ppc_emulated)/sizeof(*entries); i++) { 1951 d = debugfs_create_u32(entries[i].name, S_IRUGO | S_IWUSR, dir, 1952 (u32 *)&entries[i].val.counter); 1953 if (!d) 1954 goto fail; 1955 } 1956 1957 return 0; 1958 1959 fail: 1960 debugfs_remove_recursive(dir); 1961 return -ENOMEM; 1962 } 1963 1964 device_initcall(ppc_warn_emulated_init); 1965 1966 #endif /* CONFIG_PPC_EMULATED_STATS */ 1967