1 /* 2 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 3 * Copyright 2007-2010 Freescale Semiconductor, Inc. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License 7 * as published by the Free Software Foundation; either version 8 * 2 of the License, or (at your option) any later version. 9 * 10 * Modified by Cort Dougan (cort@cs.nmt.edu) 11 * and Paul Mackerras (paulus@samba.org) 12 */ 13 14 /* 15 * This file handles the architecture-dependent parts of hardware exceptions 16 */ 17 18 #include <linux/errno.h> 19 #include <linux/sched.h> 20 #include <linux/kernel.h> 21 #include <linux/mm.h> 22 #include <linux/stddef.h> 23 #include <linux/unistd.h> 24 #include <linux/ptrace.h> 25 #include <linux/user.h> 26 #include <linux/interrupt.h> 27 #include <linux/init.h> 28 #include <linux/module.h> 29 #include <linux/prctl.h> 30 #include <linux/delay.h> 31 #include <linux/kprobes.h> 32 #include <linux/kexec.h> 33 #include <linux/backlight.h> 34 #include <linux/bug.h> 35 #include <linux/kdebug.h> 36 #include <linux/debugfs.h> 37 #include <linux/ratelimit.h> 38 39 #include <asm/emulated_ops.h> 40 #include <asm/pgtable.h> 41 #include <asm/uaccess.h> 42 #include <asm/system.h> 43 #include <asm/io.h> 44 #include <asm/machdep.h> 45 #include <asm/rtas.h> 46 #include <asm/pmc.h> 47 #ifdef CONFIG_PPC32 48 #include <asm/reg.h> 49 #endif 50 #ifdef CONFIG_PMAC_BACKLIGHT 51 #include <asm/backlight.h> 52 #endif 53 #ifdef CONFIG_PPC64 54 #include <asm/firmware.h> 55 #include <asm/processor.h> 56 #endif 57 #include <asm/kexec.h> 58 #include <asm/ppc-opcode.h> 59 #include <asm/rio.h> 60 61 #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC) 62 int (*__debugger)(struct pt_regs *regs) __read_mostly; 63 int (*__debugger_ipi)(struct pt_regs *regs) __read_mostly; 64 int (*__debugger_bpt)(struct pt_regs *regs) __read_mostly; 65 int (*__debugger_sstep)(struct pt_regs *regs) __read_mostly; 66 int (*__debugger_iabr_match)(struct pt_regs *regs) __read_mostly; 67 int (*__debugger_dabr_match)(struct pt_regs *regs) __read_mostly; 68 int (*__debugger_fault_handler)(struct pt_regs *regs) __read_mostly; 69 70 EXPORT_SYMBOL(__debugger); 71 EXPORT_SYMBOL(__debugger_ipi); 72 EXPORT_SYMBOL(__debugger_bpt); 73 EXPORT_SYMBOL(__debugger_sstep); 74 EXPORT_SYMBOL(__debugger_iabr_match); 75 EXPORT_SYMBOL(__debugger_dabr_match); 76 EXPORT_SYMBOL(__debugger_fault_handler); 77 #endif 78 79 /* 80 * Trap & Exception support 81 */ 82 83 #ifdef CONFIG_PMAC_BACKLIGHT 84 static void pmac_backlight_unblank(void) 85 { 86 mutex_lock(&pmac_backlight_mutex); 87 if (pmac_backlight) { 88 struct backlight_properties *props; 89 90 props = &pmac_backlight->props; 91 props->brightness = props->max_brightness; 92 props->power = FB_BLANK_UNBLANK; 93 backlight_update_status(pmac_backlight); 94 } 95 mutex_unlock(&pmac_backlight_mutex); 96 } 97 #else 98 static inline void pmac_backlight_unblank(void) { } 99 #endif 100 101 int die(const char *str, struct pt_regs *regs, long err) 102 { 103 static struct { 104 raw_spinlock_t lock; 105 u32 lock_owner; 106 int lock_owner_depth; 107 } die = { 108 .lock = __RAW_SPIN_LOCK_UNLOCKED(die.lock), 109 .lock_owner = -1, 110 .lock_owner_depth = 0 111 }; 112 static int die_counter; 113 unsigned long flags; 114 115 if (debugger(regs)) 116 return 1; 117 118 oops_enter(); 119 120 if (die.lock_owner != raw_smp_processor_id()) { 121 console_verbose(); 122 raw_spin_lock_irqsave(&die.lock, flags); 123 die.lock_owner = smp_processor_id(); 124 die.lock_owner_depth = 0; 125 bust_spinlocks(1); 126 if (machine_is(powermac)) 127 pmac_backlight_unblank(); 128 } else { 129 local_save_flags(flags); 130 } 131 132 if (++die.lock_owner_depth < 3) { 133 printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter); 134 #ifdef CONFIG_PREEMPT 135 printk("PREEMPT "); 136 #endif 137 #ifdef CONFIG_SMP 138 printk("SMP NR_CPUS=%d ", NR_CPUS); 139 #endif 140 #ifdef CONFIG_DEBUG_PAGEALLOC 141 printk("DEBUG_PAGEALLOC "); 142 #endif 143 #ifdef CONFIG_NUMA 144 printk("NUMA "); 145 #endif 146 printk("%s\n", ppc_md.name ? ppc_md.name : ""); 147 148 if (notify_die(DIE_OOPS, str, regs, err, 255, 149 SIGSEGV) == NOTIFY_STOP) 150 return 1; 151 152 print_modules(); 153 show_regs(regs); 154 } else { 155 printk("Recursive die() failure, output suppressed\n"); 156 } 157 158 bust_spinlocks(0); 159 die.lock_owner = -1; 160 add_taint(TAINT_DIE); 161 oops_exit(); 162 printk("\n"); 163 raw_spin_unlock_irqrestore(&die.lock, flags); 164 165 /* 166 * A system reset (0x100) is a request to dump, so we always send 167 * it through the crashdump code. 168 */ 169 if (kexec_should_crash(current) || (TRAP(regs) == 0x100)) { 170 crash_kexec(regs); 171 172 /* 173 * We aren't the primary crash CPU. We need to send it 174 * to a holding pattern to avoid it ending up in the panic 175 * code. 176 */ 177 crash_kexec_secondary(regs); 178 } 179 180 /* 181 * While our oops output is serialised by a spinlock, output 182 * from panic() called below can race and corrupt it. If we 183 * know we are going to panic, delay for 1 second so we have a 184 * chance to get clean backtraces from all CPUs that are oopsing. 185 */ 186 if (in_interrupt() || panic_on_oops || !current->pid || 187 is_global_init(current)) { 188 mdelay(MSEC_PER_SEC); 189 } 190 191 if (in_interrupt()) 192 panic("Fatal exception in interrupt"); 193 194 if (panic_on_oops) 195 panic("Fatal exception"); 196 197 do_exit(err); 198 199 return 0; 200 } 201 202 void user_single_step_siginfo(struct task_struct *tsk, 203 struct pt_regs *regs, siginfo_t *info) 204 { 205 memset(info, 0, sizeof(*info)); 206 info->si_signo = SIGTRAP; 207 info->si_code = TRAP_TRACE; 208 info->si_addr = (void __user *)regs->nip; 209 } 210 211 void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr) 212 { 213 siginfo_t info; 214 const char fmt32[] = KERN_INFO "%s[%d]: unhandled signal %d " \ 215 "at %08lx nip %08lx lr %08lx code %x\n"; 216 const char fmt64[] = KERN_INFO "%s[%d]: unhandled signal %d " \ 217 "at %016lx nip %016lx lr %016lx code %x\n"; 218 219 if (!user_mode(regs)) { 220 if (die("Exception in kernel mode", regs, signr)) 221 return; 222 } else if (show_unhandled_signals && 223 unhandled_signal(current, signr)) { 224 printk_ratelimited(regs->msr & MSR_64BIT ? fmt64 : fmt32, 225 current->comm, current->pid, signr, 226 addr, regs->nip, regs->link, code); 227 } 228 229 memset(&info, 0, sizeof(info)); 230 info.si_signo = signr; 231 info.si_code = code; 232 info.si_addr = (void __user *) addr; 233 force_sig_info(signr, &info, current); 234 } 235 236 #ifdef CONFIG_PPC64 237 void system_reset_exception(struct pt_regs *regs) 238 { 239 /* See if any machine dependent calls */ 240 if (ppc_md.system_reset_exception) { 241 if (ppc_md.system_reset_exception(regs)) 242 return; 243 } 244 245 die("System Reset", regs, SIGABRT); 246 247 /* Must die if the interrupt is not recoverable */ 248 if (!(regs->msr & MSR_RI)) 249 panic("Unrecoverable System Reset"); 250 251 /* What should we do here? We could issue a shutdown or hard reset. */ 252 } 253 #endif 254 255 /* 256 * I/O accesses can cause machine checks on powermacs. 257 * Check if the NIP corresponds to the address of a sync 258 * instruction for which there is an entry in the exception 259 * table. 260 * Note that the 601 only takes a machine check on TEA 261 * (transfer error ack) signal assertion, and does not 262 * set any of the top 16 bits of SRR1. 263 * -- paulus. 264 */ 265 static inline int check_io_access(struct pt_regs *regs) 266 { 267 #ifdef CONFIG_PPC32 268 unsigned long msr = regs->msr; 269 const struct exception_table_entry *entry; 270 unsigned int *nip = (unsigned int *)regs->nip; 271 272 if (((msr & 0xffff0000) == 0 || (msr & (0x80000 | 0x40000))) 273 && (entry = search_exception_tables(regs->nip)) != NULL) { 274 /* 275 * Check that it's a sync instruction, or somewhere 276 * in the twi; isync; nop sequence that inb/inw/inl uses. 277 * As the address is in the exception table 278 * we should be able to read the instr there. 279 * For the debug message, we look at the preceding 280 * load or store. 281 */ 282 if (*nip == 0x60000000) /* nop */ 283 nip -= 2; 284 else if (*nip == 0x4c00012c) /* isync */ 285 --nip; 286 if (*nip == 0x7c0004ac || (*nip >> 26) == 3) { 287 /* sync or twi */ 288 unsigned int rb; 289 290 --nip; 291 rb = (*nip >> 11) & 0x1f; 292 printk(KERN_DEBUG "%s bad port %lx at %p\n", 293 (*nip & 0x100)? "OUT to": "IN from", 294 regs->gpr[rb] - _IO_BASE, nip); 295 regs->msr |= MSR_RI; 296 regs->nip = entry->fixup; 297 return 1; 298 } 299 } 300 #endif /* CONFIG_PPC32 */ 301 return 0; 302 } 303 304 #ifdef CONFIG_PPC_ADV_DEBUG_REGS 305 /* On 4xx, the reason for the machine check or program exception 306 is in the ESR. */ 307 #define get_reason(regs) ((regs)->dsisr) 308 #ifndef CONFIG_FSL_BOOKE 309 #define get_mc_reason(regs) ((regs)->dsisr) 310 #else 311 #define get_mc_reason(regs) (mfspr(SPRN_MCSR)) 312 #endif 313 #define REASON_FP ESR_FP 314 #define REASON_ILLEGAL (ESR_PIL | ESR_PUO) 315 #define REASON_PRIVILEGED ESR_PPR 316 #define REASON_TRAP ESR_PTR 317 318 /* single-step stuff */ 319 #define single_stepping(regs) (current->thread.dbcr0 & DBCR0_IC) 320 #define clear_single_step(regs) (current->thread.dbcr0 &= ~DBCR0_IC) 321 322 #else 323 /* On non-4xx, the reason for the machine check or program 324 exception is in the MSR. */ 325 #define get_reason(regs) ((regs)->msr) 326 #define get_mc_reason(regs) ((regs)->msr) 327 #define REASON_FP 0x100000 328 #define REASON_ILLEGAL 0x80000 329 #define REASON_PRIVILEGED 0x40000 330 #define REASON_TRAP 0x20000 331 332 #define single_stepping(regs) ((regs)->msr & MSR_SE) 333 #define clear_single_step(regs) ((regs)->msr &= ~MSR_SE) 334 #endif 335 336 #if defined(CONFIG_4xx) 337 int machine_check_4xx(struct pt_regs *regs) 338 { 339 unsigned long reason = get_mc_reason(regs); 340 341 if (reason & ESR_IMCP) { 342 printk("Instruction"); 343 mtspr(SPRN_ESR, reason & ~ESR_IMCP); 344 } else 345 printk("Data"); 346 printk(" machine check in kernel mode.\n"); 347 348 return 0; 349 } 350 351 int machine_check_440A(struct pt_regs *regs) 352 { 353 unsigned long reason = get_mc_reason(regs); 354 355 printk("Machine check in kernel mode.\n"); 356 if (reason & ESR_IMCP){ 357 printk("Instruction Synchronous Machine Check exception\n"); 358 mtspr(SPRN_ESR, reason & ~ESR_IMCP); 359 } 360 else { 361 u32 mcsr = mfspr(SPRN_MCSR); 362 if (mcsr & MCSR_IB) 363 printk("Instruction Read PLB Error\n"); 364 if (mcsr & MCSR_DRB) 365 printk("Data Read PLB Error\n"); 366 if (mcsr & MCSR_DWB) 367 printk("Data Write PLB Error\n"); 368 if (mcsr & MCSR_TLBP) 369 printk("TLB Parity Error\n"); 370 if (mcsr & MCSR_ICP){ 371 flush_instruction_cache(); 372 printk("I-Cache Parity Error\n"); 373 } 374 if (mcsr & MCSR_DCSP) 375 printk("D-Cache Search Parity Error\n"); 376 if (mcsr & MCSR_DCFP) 377 printk("D-Cache Flush Parity Error\n"); 378 if (mcsr & MCSR_IMPE) 379 printk("Machine Check exception is imprecise\n"); 380 381 /* Clear MCSR */ 382 mtspr(SPRN_MCSR, mcsr); 383 } 384 return 0; 385 } 386 387 int machine_check_47x(struct pt_regs *regs) 388 { 389 unsigned long reason = get_mc_reason(regs); 390 u32 mcsr; 391 392 printk(KERN_ERR "Machine check in kernel mode.\n"); 393 if (reason & ESR_IMCP) { 394 printk(KERN_ERR 395 "Instruction Synchronous Machine Check exception\n"); 396 mtspr(SPRN_ESR, reason & ~ESR_IMCP); 397 return 0; 398 } 399 mcsr = mfspr(SPRN_MCSR); 400 if (mcsr & MCSR_IB) 401 printk(KERN_ERR "Instruction Read PLB Error\n"); 402 if (mcsr & MCSR_DRB) 403 printk(KERN_ERR "Data Read PLB Error\n"); 404 if (mcsr & MCSR_DWB) 405 printk(KERN_ERR "Data Write PLB Error\n"); 406 if (mcsr & MCSR_TLBP) 407 printk(KERN_ERR "TLB Parity Error\n"); 408 if (mcsr & MCSR_ICP) { 409 flush_instruction_cache(); 410 printk(KERN_ERR "I-Cache Parity Error\n"); 411 } 412 if (mcsr & MCSR_DCSP) 413 printk(KERN_ERR "D-Cache Search Parity Error\n"); 414 if (mcsr & PPC47x_MCSR_GPR) 415 printk(KERN_ERR "GPR Parity Error\n"); 416 if (mcsr & PPC47x_MCSR_FPR) 417 printk(KERN_ERR "FPR Parity Error\n"); 418 if (mcsr & PPC47x_MCSR_IPR) 419 printk(KERN_ERR "Machine Check exception is imprecise\n"); 420 421 /* Clear MCSR */ 422 mtspr(SPRN_MCSR, mcsr); 423 424 return 0; 425 } 426 #elif defined(CONFIG_E500) 427 int machine_check_e500mc(struct pt_regs *regs) 428 { 429 unsigned long mcsr = mfspr(SPRN_MCSR); 430 unsigned long reason = mcsr; 431 int recoverable = 1; 432 433 if (reason & MCSR_LD) { 434 recoverable = fsl_rio_mcheck_exception(regs); 435 if (recoverable == 1) 436 goto silent_out; 437 } 438 439 printk("Machine check in kernel mode.\n"); 440 printk("Caused by (from MCSR=%lx): ", reason); 441 442 if (reason & MCSR_MCP) 443 printk("Machine Check Signal\n"); 444 445 if (reason & MCSR_ICPERR) { 446 printk("Instruction Cache Parity Error\n"); 447 448 /* 449 * This is recoverable by invalidating the i-cache. 450 */ 451 mtspr(SPRN_L1CSR1, mfspr(SPRN_L1CSR1) | L1CSR1_ICFI); 452 while (mfspr(SPRN_L1CSR1) & L1CSR1_ICFI) 453 ; 454 455 /* 456 * This will generally be accompanied by an instruction 457 * fetch error report -- only treat MCSR_IF as fatal 458 * if it wasn't due to an L1 parity error. 459 */ 460 reason &= ~MCSR_IF; 461 } 462 463 if (reason & MCSR_DCPERR_MC) { 464 printk("Data Cache Parity Error\n"); 465 466 /* 467 * In write shadow mode we auto-recover from the error, but it 468 * may still get logged and cause a machine check. We should 469 * only treat the non-write shadow case as non-recoverable. 470 */ 471 if (!(mfspr(SPRN_L1CSR2) & L1CSR2_DCWS)) 472 recoverable = 0; 473 } 474 475 if (reason & MCSR_L2MMU_MHIT) { 476 printk("Hit on multiple TLB entries\n"); 477 recoverable = 0; 478 } 479 480 if (reason & MCSR_NMI) 481 printk("Non-maskable interrupt\n"); 482 483 if (reason & MCSR_IF) { 484 printk("Instruction Fetch Error Report\n"); 485 recoverable = 0; 486 } 487 488 if (reason & MCSR_LD) { 489 printk("Load Error Report\n"); 490 recoverable = 0; 491 } 492 493 if (reason & MCSR_ST) { 494 printk("Store Error Report\n"); 495 recoverable = 0; 496 } 497 498 if (reason & MCSR_LDG) { 499 printk("Guarded Load Error Report\n"); 500 recoverable = 0; 501 } 502 503 if (reason & MCSR_TLBSYNC) 504 printk("Simultaneous tlbsync operations\n"); 505 506 if (reason & MCSR_BSL2_ERR) { 507 printk("Level 2 Cache Error\n"); 508 recoverable = 0; 509 } 510 511 if (reason & MCSR_MAV) { 512 u64 addr; 513 514 addr = mfspr(SPRN_MCAR); 515 addr |= (u64)mfspr(SPRN_MCARU) << 32; 516 517 printk("Machine Check %s Address: %#llx\n", 518 reason & MCSR_MEA ? "Effective" : "Physical", addr); 519 } 520 521 silent_out: 522 mtspr(SPRN_MCSR, mcsr); 523 return mfspr(SPRN_MCSR) == 0 && recoverable; 524 } 525 526 int machine_check_e500(struct pt_regs *regs) 527 { 528 unsigned long reason = get_mc_reason(regs); 529 530 if (reason & MCSR_BUS_RBERR) { 531 if (fsl_rio_mcheck_exception(regs)) 532 return 1; 533 } 534 535 printk("Machine check in kernel mode.\n"); 536 printk("Caused by (from MCSR=%lx): ", reason); 537 538 if (reason & MCSR_MCP) 539 printk("Machine Check Signal\n"); 540 if (reason & MCSR_ICPERR) 541 printk("Instruction Cache Parity Error\n"); 542 if (reason & MCSR_DCP_PERR) 543 printk("Data Cache Push Parity Error\n"); 544 if (reason & MCSR_DCPERR) 545 printk("Data Cache Parity Error\n"); 546 if (reason & MCSR_BUS_IAERR) 547 printk("Bus - Instruction Address Error\n"); 548 if (reason & MCSR_BUS_RAERR) 549 printk("Bus - Read Address Error\n"); 550 if (reason & MCSR_BUS_WAERR) 551 printk("Bus - Write Address Error\n"); 552 if (reason & MCSR_BUS_IBERR) 553 printk("Bus - Instruction Data Error\n"); 554 if (reason & MCSR_BUS_RBERR) 555 printk("Bus - Read Data Bus Error\n"); 556 if (reason & MCSR_BUS_WBERR) 557 printk("Bus - Read Data Bus Error\n"); 558 if (reason & MCSR_BUS_IPERR) 559 printk("Bus - Instruction Parity Error\n"); 560 if (reason & MCSR_BUS_RPERR) 561 printk("Bus - Read Parity Error\n"); 562 563 return 0; 564 } 565 566 int machine_check_generic(struct pt_regs *regs) 567 { 568 return 0; 569 } 570 #elif defined(CONFIG_E200) 571 int machine_check_e200(struct pt_regs *regs) 572 { 573 unsigned long reason = get_mc_reason(regs); 574 575 printk("Machine check in kernel mode.\n"); 576 printk("Caused by (from MCSR=%lx): ", reason); 577 578 if (reason & MCSR_MCP) 579 printk("Machine Check Signal\n"); 580 if (reason & MCSR_CP_PERR) 581 printk("Cache Push Parity Error\n"); 582 if (reason & MCSR_CPERR) 583 printk("Cache Parity Error\n"); 584 if (reason & MCSR_EXCP_ERR) 585 printk("ISI, ITLB, or Bus Error on first instruction fetch for an exception handler\n"); 586 if (reason & MCSR_BUS_IRERR) 587 printk("Bus - Read Bus Error on instruction fetch\n"); 588 if (reason & MCSR_BUS_DRERR) 589 printk("Bus - Read Bus Error on data load\n"); 590 if (reason & MCSR_BUS_WRERR) 591 printk("Bus - Write Bus Error on buffered store or cache line push\n"); 592 593 return 0; 594 } 595 #else 596 int machine_check_generic(struct pt_regs *regs) 597 { 598 unsigned long reason = get_mc_reason(regs); 599 600 printk("Machine check in kernel mode.\n"); 601 printk("Caused by (from SRR1=%lx): ", reason); 602 switch (reason & 0x601F0000) { 603 case 0x80000: 604 printk("Machine check signal\n"); 605 break; 606 case 0: /* for 601 */ 607 case 0x40000: 608 case 0x140000: /* 7450 MSS error and TEA */ 609 printk("Transfer error ack signal\n"); 610 break; 611 case 0x20000: 612 printk("Data parity error signal\n"); 613 break; 614 case 0x10000: 615 printk("Address parity error signal\n"); 616 break; 617 case 0x20000000: 618 printk("L1 Data Cache error\n"); 619 break; 620 case 0x40000000: 621 printk("L1 Instruction Cache error\n"); 622 break; 623 case 0x00100000: 624 printk("L2 data cache parity error\n"); 625 break; 626 default: 627 printk("Unknown values in msr\n"); 628 } 629 return 0; 630 } 631 #endif /* everything else */ 632 633 void machine_check_exception(struct pt_regs *regs) 634 { 635 int recover = 0; 636 637 __get_cpu_var(irq_stat).mce_exceptions++; 638 639 /* See if any machine dependent calls. In theory, we would want 640 * to call the CPU first, and call the ppc_md. one if the CPU 641 * one returns a positive number. However there is existing code 642 * that assumes the board gets a first chance, so let's keep it 643 * that way for now and fix things later. --BenH. 644 */ 645 if (ppc_md.machine_check_exception) 646 recover = ppc_md.machine_check_exception(regs); 647 else if (cur_cpu_spec->machine_check) 648 recover = cur_cpu_spec->machine_check(regs); 649 650 if (recover > 0) 651 return; 652 653 #if defined(CONFIG_8xx) && defined(CONFIG_PCI) 654 /* the qspan pci read routines can cause machine checks -- Cort 655 * 656 * yuck !!! that totally needs to go away ! There are better ways 657 * to deal with that than having a wart in the mcheck handler. 658 * -- BenH 659 */ 660 bad_page_fault(regs, regs->dar, SIGBUS); 661 return; 662 #endif 663 664 if (debugger_fault_handler(regs)) 665 return; 666 667 if (check_io_access(regs)) 668 return; 669 670 die("Machine check", regs, SIGBUS); 671 672 /* Must die if the interrupt is not recoverable */ 673 if (!(regs->msr & MSR_RI)) 674 panic("Unrecoverable Machine check"); 675 } 676 677 void SMIException(struct pt_regs *regs) 678 { 679 die("System Management Interrupt", regs, SIGABRT); 680 } 681 682 void unknown_exception(struct pt_regs *regs) 683 { 684 printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n", 685 regs->nip, regs->msr, regs->trap); 686 687 _exception(SIGTRAP, regs, 0, 0); 688 } 689 690 void instruction_breakpoint_exception(struct pt_regs *regs) 691 { 692 if (notify_die(DIE_IABR_MATCH, "iabr_match", regs, 5, 693 5, SIGTRAP) == NOTIFY_STOP) 694 return; 695 if (debugger_iabr_match(regs)) 696 return; 697 _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip); 698 } 699 700 void RunModeException(struct pt_regs *regs) 701 { 702 _exception(SIGTRAP, regs, 0, 0); 703 } 704 705 void __kprobes single_step_exception(struct pt_regs *regs) 706 { 707 clear_single_step(regs); 708 709 if (notify_die(DIE_SSTEP, "single_step", regs, 5, 710 5, SIGTRAP) == NOTIFY_STOP) 711 return; 712 if (debugger_sstep(regs)) 713 return; 714 715 _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip); 716 } 717 718 /* 719 * After we have successfully emulated an instruction, we have to 720 * check if the instruction was being single-stepped, and if so, 721 * pretend we got a single-step exception. This was pointed out 722 * by Kumar Gala. -- paulus 723 */ 724 static void emulate_single_step(struct pt_regs *regs) 725 { 726 if (single_stepping(regs)) 727 single_step_exception(regs); 728 } 729 730 static inline int __parse_fpscr(unsigned long fpscr) 731 { 732 int ret = 0; 733 734 /* Invalid operation */ 735 if ((fpscr & FPSCR_VE) && (fpscr & FPSCR_VX)) 736 ret = FPE_FLTINV; 737 738 /* Overflow */ 739 else if ((fpscr & FPSCR_OE) && (fpscr & FPSCR_OX)) 740 ret = FPE_FLTOVF; 741 742 /* Underflow */ 743 else if ((fpscr & FPSCR_UE) && (fpscr & FPSCR_UX)) 744 ret = FPE_FLTUND; 745 746 /* Divide by zero */ 747 else if ((fpscr & FPSCR_ZE) && (fpscr & FPSCR_ZX)) 748 ret = FPE_FLTDIV; 749 750 /* Inexact result */ 751 else if ((fpscr & FPSCR_XE) && (fpscr & FPSCR_XX)) 752 ret = FPE_FLTRES; 753 754 return ret; 755 } 756 757 static void parse_fpe(struct pt_regs *regs) 758 { 759 int code = 0; 760 761 flush_fp_to_thread(current); 762 763 code = __parse_fpscr(current->thread.fpscr.val); 764 765 _exception(SIGFPE, regs, code, regs->nip); 766 } 767 768 /* 769 * Illegal instruction emulation support. Originally written to 770 * provide the PVR to user applications using the mfspr rd, PVR. 771 * Return non-zero if we can't emulate, or -EFAULT if the associated 772 * memory access caused an access fault. Return zero on success. 773 * 774 * There are a couple of ways to do this, either "decode" the instruction 775 * or directly match lots of bits. In this case, matching lots of 776 * bits is faster and easier. 777 * 778 */ 779 static int emulate_string_inst(struct pt_regs *regs, u32 instword) 780 { 781 u8 rT = (instword >> 21) & 0x1f; 782 u8 rA = (instword >> 16) & 0x1f; 783 u8 NB_RB = (instword >> 11) & 0x1f; 784 u32 num_bytes; 785 unsigned long EA; 786 int pos = 0; 787 788 /* Early out if we are an invalid form of lswx */ 789 if ((instword & PPC_INST_STRING_MASK) == PPC_INST_LSWX) 790 if ((rT == rA) || (rT == NB_RB)) 791 return -EINVAL; 792 793 EA = (rA == 0) ? 0 : regs->gpr[rA]; 794 795 switch (instword & PPC_INST_STRING_MASK) { 796 case PPC_INST_LSWX: 797 case PPC_INST_STSWX: 798 EA += NB_RB; 799 num_bytes = regs->xer & 0x7f; 800 break; 801 case PPC_INST_LSWI: 802 case PPC_INST_STSWI: 803 num_bytes = (NB_RB == 0) ? 32 : NB_RB; 804 break; 805 default: 806 return -EINVAL; 807 } 808 809 while (num_bytes != 0) 810 { 811 u8 val; 812 u32 shift = 8 * (3 - (pos & 0x3)); 813 814 switch ((instword & PPC_INST_STRING_MASK)) { 815 case PPC_INST_LSWX: 816 case PPC_INST_LSWI: 817 if (get_user(val, (u8 __user *)EA)) 818 return -EFAULT; 819 /* first time updating this reg, 820 * zero it out */ 821 if (pos == 0) 822 regs->gpr[rT] = 0; 823 regs->gpr[rT] |= val << shift; 824 break; 825 case PPC_INST_STSWI: 826 case PPC_INST_STSWX: 827 val = regs->gpr[rT] >> shift; 828 if (put_user(val, (u8 __user *)EA)) 829 return -EFAULT; 830 break; 831 } 832 /* move EA to next address */ 833 EA += 1; 834 num_bytes--; 835 836 /* manage our position within the register */ 837 if (++pos == 4) { 838 pos = 0; 839 if (++rT == 32) 840 rT = 0; 841 } 842 } 843 844 return 0; 845 } 846 847 static int emulate_popcntb_inst(struct pt_regs *regs, u32 instword) 848 { 849 u32 ra,rs; 850 unsigned long tmp; 851 852 ra = (instword >> 16) & 0x1f; 853 rs = (instword >> 21) & 0x1f; 854 855 tmp = regs->gpr[rs]; 856 tmp = tmp - ((tmp >> 1) & 0x5555555555555555ULL); 857 tmp = (tmp & 0x3333333333333333ULL) + ((tmp >> 2) & 0x3333333333333333ULL); 858 tmp = (tmp + (tmp >> 4)) & 0x0f0f0f0f0f0f0f0fULL; 859 regs->gpr[ra] = tmp; 860 861 return 0; 862 } 863 864 static int emulate_isel(struct pt_regs *regs, u32 instword) 865 { 866 u8 rT = (instword >> 21) & 0x1f; 867 u8 rA = (instword >> 16) & 0x1f; 868 u8 rB = (instword >> 11) & 0x1f; 869 u8 BC = (instword >> 6) & 0x1f; 870 u8 bit; 871 unsigned long tmp; 872 873 tmp = (rA == 0) ? 0 : regs->gpr[rA]; 874 bit = (regs->ccr >> (31 - BC)) & 0x1; 875 876 regs->gpr[rT] = bit ? tmp : regs->gpr[rB]; 877 878 return 0; 879 } 880 881 static int emulate_instruction(struct pt_regs *regs) 882 { 883 u32 instword; 884 u32 rd; 885 886 if (!user_mode(regs) || (regs->msr & MSR_LE)) 887 return -EINVAL; 888 CHECK_FULL_REGS(regs); 889 890 if (get_user(instword, (u32 __user *)(regs->nip))) 891 return -EFAULT; 892 893 /* Emulate the mfspr rD, PVR. */ 894 if ((instword & PPC_INST_MFSPR_PVR_MASK) == PPC_INST_MFSPR_PVR) { 895 PPC_WARN_EMULATED(mfpvr, regs); 896 rd = (instword >> 21) & 0x1f; 897 regs->gpr[rd] = mfspr(SPRN_PVR); 898 return 0; 899 } 900 901 /* Emulating the dcba insn is just a no-op. */ 902 if ((instword & PPC_INST_DCBA_MASK) == PPC_INST_DCBA) { 903 PPC_WARN_EMULATED(dcba, regs); 904 return 0; 905 } 906 907 /* Emulate the mcrxr insn. */ 908 if ((instword & PPC_INST_MCRXR_MASK) == PPC_INST_MCRXR) { 909 int shift = (instword >> 21) & 0x1c; 910 unsigned long msk = 0xf0000000UL >> shift; 911 912 PPC_WARN_EMULATED(mcrxr, regs); 913 regs->ccr = (regs->ccr & ~msk) | ((regs->xer >> shift) & msk); 914 regs->xer &= ~0xf0000000UL; 915 return 0; 916 } 917 918 /* Emulate load/store string insn. */ 919 if ((instword & PPC_INST_STRING_GEN_MASK) == PPC_INST_STRING) { 920 PPC_WARN_EMULATED(string, regs); 921 return emulate_string_inst(regs, instword); 922 } 923 924 /* Emulate the popcntb (Population Count Bytes) instruction. */ 925 if ((instword & PPC_INST_POPCNTB_MASK) == PPC_INST_POPCNTB) { 926 PPC_WARN_EMULATED(popcntb, regs); 927 return emulate_popcntb_inst(regs, instword); 928 } 929 930 /* Emulate isel (Integer Select) instruction */ 931 if ((instword & PPC_INST_ISEL_MASK) == PPC_INST_ISEL) { 932 PPC_WARN_EMULATED(isel, regs); 933 return emulate_isel(regs, instword); 934 } 935 936 #ifdef CONFIG_PPC64 937 /* Emulate the mfspr rD, DSCR. */ 938 if (((instword & PPC_INST_MFSPR_DSCR_MASK) == PPC_INST_MFSPR_DSCR) && 939 cpu_has_feature(CPU_FTR_DSCR)) { 940 PPC_WARN_EMULATED(mfdscr, regs); 941 rd = (instword >> 21) & 0x1f; 942 regs->gpr[rd] = mfspr(SPRN_DSCR); 943 return 0; 944 } 945 /* Emulate the mtspr DSCR, rD. */ 946 if (((instword & PPC_INST_MTSPR_DSCR_MASK) == PPC_INST_MTSPR_DSCR) && 947 cpu_has_feature(CPU_FTR_DSCR)) { 948 PPC_WARN_EMULATED(mtdscr, regs); 949 rd = (instword >> 21) & 0x1f; 950 mtspr(SPRN_DSCR, regs->gpr[rd]); 951 current->thread.dscr_inherit = 1; 952 return 0; 953 } 954 #endif 955 956 return -EINVAL; 957 } 958 959 int is_valid_bugaddr(unsigned long addr) 960 { 961 return is_kernel_addr(addr); 962 } 963 964 void __kprobes program_check_exception(struct pt_regs *regs) 965 { 966 unsigned int reason = get_reason(regs); 967 extern int do_mathemu(struct pt_regs *regs); 968 969 /* We can now get here via a FP Unavailable exception if the core 970 * has no FPU, in that case the reason flags will be 0 */ 971 972 if (reason & REASON_FP) { 973 /* IEEE FP exception */ 974 parse_fpe(regs); 975 return; 976 } 977 if (reason & REASON_TRAP) { 978 /* Debugger is first in line to stop recursive faults in 979 * rcu_lock, notify_die, or atomic_notifier_call_chain */ 980 if (debugger_bpt(regs)) 981 return; 982 983 /* trap exception */ 984 if (notify_die(DIE_BPT, "breakpoint", regs, 5, 5, SIGTRAP) 985 == NOTIFY_STOP) 986 return; 987 988 if (!(regs->msr & MSR_PR) && /* not user-mode */ 989 report_bug(regs->nip, regs) == BUG_TRAP_TYPE_WARN) { 990 regs->nip += 4; 991 return; 992 } 993 _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip); 994 return; 995 } 996 997 local_irq_enable(); 998 999 #ifdef CONFIG_MATH_EMULATION 1000 /* (reason & REASON_ILLEGAL) would be the obvious thing here, 1001 * but there seems to be a hardware bug on the 405GP (RevD) 1002 * that means ESR is sometimes set incorrectly - either to 1003 * ESR_DST (!?) or 0. In the process of chasing this with the 1004 * hardware people - not sure if it can happen on any illegal 1005 * instruction or only on FP instructions, whether there is a 1006 * pattern to occurrences etc. -dgibson 31/Mar/2003 */ 1007 switch (do_mathemu(regs)) { 1008 case 0: 1009 emulate_single_step(regs); 1010 return; 1011 case 1: { 1012 int code = 0; 1013 code = __parse_fpscr(current->thread.fpscr.val); 1014 _exception(SIGFPE, regs, code, regs->nip); 1015 return; 1016 } 1017 case -EFAULT: 1018 _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip); 1019 return; 1020 } 1021 /* fall through on any other errors */ 1022 #endif /* CONFIG_MATH_EMULATION */ 1023 1024 /* Try to emulate it if we should. */ 1025 if (reason & (REASON_ILLEGAL | REASON_PRIVILEGED)) { 1026 switch (emulate_instruction(regs)) { 1027 case 0: 1028 regs->nip += 4; 1029 emulate_single_step(regs); 1030 return; 1031 case -EFAULT: 1032 _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip); 1033 return; 1034 } 1035 } 1036 1037 if (reason & REASON_PRIVILEGED) 1038 _exception(SIGILL, regs, ILL_PRVOPC, regs->nip); 1039 else 1040 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 1041 } 1042 1043 void alignment_exception(struct pt_regs *regs) 1044 { 1045 int sig, code, fixed = 0; 1046 1047 /* we don't implement logging of alignment exceptions */ 1048 if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS)) 1049 fixed = fix_alignment(regs); 1050 1051 if (fixed == 1) { 1052 regs->nip += 4; /* skip over emulated instruction */ 1053 emulate_single_step(regs); 1054 return; 1055 } 1056 1057 /* Operand address was bad */ 1058 if (fixed == -EFAULT) { 1059 sig = SIGSEGV; 1060 code = SEGV_ACCERR; 1061 } else { 1062 sig = SIGBUS; 1063 code = BUS_ADRALN; 1064 } 1065 if (user_mode(regs)) 1066 _exception(sig, regs, code, regs->dar); 1067 else 1068 bad_page_fault(regs, regs->dar, sig); 1069 } 1070 1071 void StackOverflow(struct pt_regs *regs) 1072 { 1073 printk(KERN_CRIT "Kernel stack overflow in process %p, r1=%lx\n", 1074 current, regs->gpr[1]); 1075 debugger(regs); 1076 show_regs(regs); 1077 panic("kernel stack overflow"); 1078 } 1079 1080 void nonrecoverable_exception(struct pt_regs *regs) 1081 { 1082 printk(KERN_ERR "Non-recoverable exception at PC=%lx MSR=%lx\n", 1083 regs->nip, regs->msr); 1084 debugger(regs); 1085 die("nonrecoverable exception", regs, SIGKILL); 1086 } 1087 1088 void trace_syscall(struct pt_regs *regs) 1089 { 1090 printk("Task: %p(%d), PC: %08lX/%08lX, Syscall: %3ld, Result: %s%ld %s\n", 1091 current, task_pid_nr(current), regs->nip, regs->link, regs->gpr[0], 1092 regs->ccr&0x10000000?"Error=":"", regs->gpr[3], print_tainted()); 1093 } 1094 1095 void kernel_fp_unavailable_exception(struct pt_regs *regs) 1096 { 1097 printk(KERN_EMERG "Unrecoverable FP Unavailable Exception " 1098 "%lx at %lx\n", regs->trap, regs->nip); 1099 die("Unrecoverable FP Unavailable Exception", regs, SIGABRT); 1100 } 1101 1102 void altivec_unavailable_exception(struct pt_regs *regs) 1103 { 1104 if (user_mode(regs)) { 1105 /* A user program has executed an altivec instruction, 1106 but this kernel doesn't support altivec. */ 1107 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 1108 return; 1109 } 1110 1111 printk(KERN_EMERG "Unrecoverable VMX/Altivec Unavailable Exception " 1112 "%lx at %lx\n", regs->trap, regs->nip); 1113 die("Unrecoverable VMX/Altivec Unavailable Exception", regs, SIGABRT); 1114 } 1115 1116 void vsx_unavailable_exception(struct pt_regs *regs) 1117 { 1118 if (user_mode(regs)) { 1119 /* A user program has executed an vsx instruction, 1120 but this kernel doesn't support vsx. */ 1121 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 1122 return; 1123 } 1124 1125 printk(KERN_EMERG "Unrecoverable VSX Unavailable Exception " 1126 "%lx at %lx\n", regs->trap, regs->nip); 1127 die("Unrecoverable VSX Unavailable Exception", regs, SIGABRT); 1128 } 1129 1130 void performance_monitor_exception(struct pt_regs *regs) 1131 { 1132 __get_cpu_var(irq_stat).pmu_irqs++; 1133 1134 perf_irq(regs); 1135 } 1136 1137 #ifdef CONFIG_8xx 1138 void SoftwareEmulation(struct pt_regs *regs) 1139 { 1140 extern int do_mathemu(struct pt_regs *); 1141 extern int Soft_emulate_8xx(struct pt_regs *); 1142 #if defined(CONFIG_MATH_EMULATION) || defined(CONFIG_8XX_MINIMAL_FPEMU) 1143 int errcode; 1144 #endif 1145 1146 CHECK_FULL_REGS(regs); 1147 1148 if (!user_mode(regs)) { 1149 debugger(regs); 1150 die("Kernel Mode Software FPU Emulation", regs, SIGFPE); 1151 } 1152 1153 #ifdef CONFIG_MATH_EMULATION 1154 errcode = do_mathemu(regs); 1155 if (errcode >= 0) 1156 PPC_WARN_EMULATED(math, regs); 1157 1158 switch (errcode) { 1159 case 0: 1160 emulate_single_step(regs); 1161 return; 1162 case 1: { 1163 int code = 0; 1164 code = __parse_fpscr(current->thread.fpscr.val); 1165 _exception(SIGFPE, regs, code, regs->nip); 1166 return; 1167 } 1168 case -EFAULT: 1169 _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip); 1170 return; 1171 default: 1172 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 1173 return; 1174 } 1175 1176 #elif defined(CONFIG_8XX_MINIMAL_FPEMU) 1177 errcode = Soft_emulate_8xx(regs); 1178 if (errcode >= 0) 1179 PPC_WARN_EMULATED(8xx, regs); 1180 1181 switch (errcode) { 1182 case 0: 1183 emulate_single_step(regs); 1184 return; 1185 case 1: 1186 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 1187 return; 1188 case -EFAULT: 1189 _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip); 1190 return; 1191 } 1192 #else 1193 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 1194 #endif 1195 } 1196 #endif /* CONFIG_8xx */ 1197 1198 #ifdef CONFIG_PPC_ADV_DEBUG_REGS 1199 static void handle_debug(struct pt_regs *regs, unsigned long debug_status) 1200 { 1201 int changed = 0; 1202 /* 1203 * Determine the cause of the debug event, clear the 1204 * event flags and send a trap to the handler. Torez 1205 */ 1206 if (debug_status & (DBSR_DAC1R | DBSR_DAC1W)) { 1207 dbcr_dac(current) &= ~(DBCR_DAC1R | DBCR_DAC1W); 1208 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE 1209 current->thread.dbcr2 &= ~DBCR2_DAC12MODE; 1210 #endif 1211 do_send_trap(regs, mfspr(SPRN_DAC1), debug_status, TRAP_HWBKPT, 1212 5); 1213 changed |= 0x01; 1214 } else if (debug_status & (DBSR_DAC2R | DBSR_DAC2W)) { 1215 dbcr_dac(current) &= ~(DBCR_DAC2R | DBCR_DAC2W); 1216 do_send_trap(regs, mfspr(SPRN_DAC2), debug_status, TRAP_HWBKPT, 1217 6); 1218 changed |= 0x01; 1219 } else if (debug_status & DBSR_IAC1) { 1220 current->thread.dbcr0 &= ~DBCR0_IAC1; 1221 dbcr_iac_range(current) &= ~DBCR_IAC12MODE; 1222 do_send_trap(regs, mfspr(SPRN_IAC1), debug_status, TRAP_HWBKPT, 1223 1); 1224 changed |= 0x01; 1225 } else if (debug_status & DBSR_IAC2) { 1226 current->thread.dbcr0 &= ~DBCR0_IAC2; 1227 do_send_trap(regs, mfspr(SPRN_IAC2), debug_status, TRAP_HWBKPT, 1228 2); 1229 changed |= 0x01; 1230 } else if (debug_status & DBSR_IAC3) { 1231 current->thread.dbcr0 &= ~DBCR0_IAC3; 1232 dbcr_iac_range(current) &= ~DBCR_IAC34MODE; 1233 do_send_trap(regs, mfspr(SPRN_IAC3), debug_status, TRAP_HWBKPT, 1234 3); 1235 changed |= 0x01; 1236 } else if (debug_status & DBSR_IAC4) { 1237 current->thread.dbcr0 &= ~DBCR0_IAC4; 1238 do_send_trap(regs, mfspr(SPRN_IAC4), debug_status, TRAP_HWBKPT, 1239 4); 1240 changed |= 0x01; 1241 } 1242 /* 1243 * At the point this routine was called, the MSR(DE) was turned off. 1244 * Check all other debug flags and see if that bit needs to be turned 1245 * back on or not. 1246 */ 1247 if (DBCR_ACTIVE_EVENTS(current->thread.dbcr0, current->thread.dbcr1)) 1248 regs->msr |= MSR_DE; 1249 else 1250 /* Make sure the IDM flag is off */ 1251 current->thread.dbcr0 &= ~DBCR0_IDM; 1252 1253 if (changed & 0x01) 1254 mtspr(SPRN_DBCR0, current->thread.dbcr0); 1255 } 1256 1257 void __kprobes DebugException(struct pt_regs *regs, unsigned long debug_status) 1258 { 1259 current->thread.dbsr = debug_status; 1260 1261 /* Hack alert: On BookE, Branch Taken stops on the branch itself, while 1262 * on server, it stops on the target of the branch. In order to simulate 1263 * the server behaviour, we thus restart right away with a single step 1264 * instead of stopping here when hitting a BT 1265 */ 1266 if (debug_status & DBSR_BT) { 1267 regs->msr &= ~MSR_DE; 1268 1269 /* Disable BT */ 1270 mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_BT); 1271 /* Clear the BT event */ 1272 mtspr(SPRN_DBSR, DBSR_BT); 1273 1274 /* Do the single step trick only when coming from userspace */ 1275 if (user_mode(regs)) { 1276 current->thread.dbcr0 &= ~DBCR0_BT; 1277 current->thread.dbcr0 |= DBCR0_IDM | DBCR0_IC; 1278 regs->msr |= MSR_DE; 1279 return; 1280 } 1281 1282 if (notify_die(DIE_SSTEP, "block_step", regs, 5, 1283 5, SIGTRAP) == NOTIFY_STOP) { 1284 return; 1285 } 1286 if (debugger_sstep(regs)) 1287 return; 1288 } else if (debug_status & DBSR_IC) { /* Instruction complete */ 1289 regs->msr &= ~MSR_DE; 1290 1291 /* Disable instruction completion */ 1292 mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_IC); 1293 /* Clear the instruction completion event */ 1294 mtspr(SPRN_DBSR, DBSR_IC); 1295 1296 if (notify_die(DIE_SSTEP, "single_step", regs, 5, 1297 5, SIGTRAP) == NOTIFY_STOP) { 1298 return; 1299 } 1300 1301 if (debugger_sstep(regs)) 1302 return; 1303 1304 if (user_mode(regs)) { 1305 current->thread.dbcr0 &= ~DBCR0_IC; 1306 if (DBCR_ACTIVE_EVENTS(current->thread.dbcr0, 1307 current->thread.dbcr1)) 1308 regs->msr |= MSR_DE; 1309 else 1310 /* Make sure the IDM bit is off */ 1311 current->thread.dbcr0 &= ~DBCR0_IDM; 1312 } 1313 1314 _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip); 1315 } else 1316 handle_debug(regs, debug_status); 1317 } 1318 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */ 1319 1320 #if !defined(CONFIG_TAU_INT) 1321 void TAUException(struct pt_regs *regs) 1322 { 1323 printk("TAU trap at PC: %lx, MSR: %lx, vector=%lx %s\n", 1324 regs->nip, regs->msr, regs->trap, print_tainted()); 1325 } 1326 #endif /* CONFIG_INT_TAU */ 1327 1328 #ifdef CONFIG_ALTIVEC 1329 void altivec_assist_exception(struct pt_regs *regs) 1330 { 1331 int err; 1332 1333 if (!user_mode(regs)) { 1334 printk(KERN_EMERG "VMX/Altivec assist exception in kernel mode" 1335 " at %lx\n", regs->nip); 1336 die("Kernel VMX/Altivec assist exception", regs, SIGILL); 1337 } 1338 1339 flush_altivec_to_thread(current); 1340 1341 PPC_WARN_EMULATED(altivec, regs); 1342 err = emulate_altivec(regs); 1343 if (err == 0) { 1344 regs->nip += 4; /* skip emulated instruction */ 1345 emulate_single_step(regs); 1346 return; 1347 } 1348 1349 if (err == -EFAULT) { 1350 /* got an error reading the instruction */ 1351 _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip); 1352 } else { 1353 /* didn't recognize the instruction */ 1354 /* XXX quick hack for now: set the non-Java bit in the VSCR */ 1355 printk_ratelimited(KERN_ERR "Unrecognized altivec instruction " 1356 "in %s at %lx\n", current->comm, regs->nip); 1357 current->thread.vscr.u[3] |= 0x10000; 1358 } 1359 } 1360 #endif /* CONFIG_ALTIVEC */ 1361 1362 #ifdef CONFIG_VSX 1363 void vsx_assist_exception(struct pt_regs *regs) 1364 { 1365 if (!user_mode(regs)) { 1366 printk(KERN_EMERG "VSX assist exception in kernel mode" 1367 " at %lx\n", regs->nip); 1368 die("Kernel VSX assist exception", regs, SIGILL); 1369 } 1370 1371 flush_vsx_to_thread(current); 1372 printk(KERN_INFO "VSX assist not supported at %lx\n", regs->nip); 1373 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 1374 } 1375 #endif /* CONFIG_VSX */ 1376 1377 #ifdef CONFIG_FSL_BOOKE 1378 void CacheLockingException(struct pt_regs *regs, unsigned long address, 1379 unsigned long error_code) 1380 { 1381 /* We treat cache locking instructions from the user 1382 * as priv ops, in the future we could try to do 1383 * something smarter 1384 */ 1385 if (error_code & (ESR_DLK|ESR_ILK)) 1386 _exception(SIGILL, regs, ILL_PRVOPC, regs->nip); 1387 return; 1388 } 1389 #endif /* CONFIG_FSL_BOOKE */ 1390 1391 #ifdef CONFIG_SPE 1392 void SPEFloatingPointException(struct pt_regs *regs) 1393 { 1394 extern int do_spe_mathemu(struct pt_regs *regs); 1395 unsigned long spefscr; 1396 int fpexc_mode; 1397 int code = 0; 1398 int err; 1399 1400 flush_spe_to_thread(current); 1401 1402 spefscr = current->thread.spefscr; 1403 fpexc_mode = current->thread.fpexc_mode; 1404 1405 if ((spefscr & SPEFSCR_FOVF) && (fpexc_mode & PR_FP_EXC_OVF)) { 1406 code = FPE_FLTOVF; 1407 } 1408 else if ((spefscr & SPEFSCR_FUNF) && (fpexc_mode & PR_FP_EXC_UND)) { 1409 code = FPE_FLTUND; 1410 } 1411 else if ((spefscr & SPEFSCR_FDBZ) && (fpexc_mode & PR_FP_EXC_DIV)) 1412 code = FPE_FLTDIV; 1413 else if ((spefscr & SPEFSCR_FINV) && (fpexc_mode & PR_FP_EXC_INV)) { 1414 code = FPE_FLTINV; 1415 } 1416 else if ((spefscr & (SPEFSCR_FG | SPEFSCR_FX)) && (fpexc_mode & PR_FP_EXC_RES)) 1417 code = FPE_FLTRES; 1418 1419 err = do_spe_mathemu(regs); 1420 if (err == 0) { 1421 regs->nip += 4; /* skip emulated instruction */ 1422 emulate_single_step(regs); 1423 return; 1424 } 1425 1426 if (err == -EFAULT) { 1427 /* got an error reading the instruction */ 1428 _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip); 1429 } else if (err == -EINVAL) { 1430 /* didn't recognize the instruction */ 1431 printk(KERN_ERR "unrecognized spe instruction " 1432 "in %s at %lx\n", current->comm, regs->nip); 1433 } else { 1434 _exception(SIGFPE, regs, code, regs->nip); 1435 } 1436 1437 return; 1438 } 1439 1440 void SPEFloatingPointRoundException(struct pt_regs *regs) 1441 { 1442 extern int speround_handler(struct pt_regs *regs); 1443 int err; 1444 1445 preempt_disable(); 1446 if (regs->msr & MSR_SPE) 1447 giveup_spe(current); 1448 preempt_enable(); 1449 1450 regs->nip -= 4; 1451 err = speround_handler(regs); 1452 if (err == 0) { 1453 regs->nip += 4; /* skip emulated instruction */ 1454 emulate_single_step(regs); 1455 return; 1456 } 1457 1458 if (err == -EFAULT) { 1459 /* got an error reading the instruction */ 1460 _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip); 1461 } else if (err == -EINVAL) { 1462 /* didn't recognize the instruction */ 1463 printk(KERN_ERR "unrecognized spe instruction " 1464 "in %s at %lx\n", current->comm, regs->nip); 1465 } else { 1466 _exception(SIGFPE, regs, 0, regs->nip); 1467 return; 1468 } 1469 } 1470 #endif 1471 1472 /* 1473 * We enter here if we get an unrecoverable exception, that is, one 1474 * that happened at a point where the RI (recoverable interrupt) bit 1475 * in the MSR is 0. This indicates that SRR0/1 are live, and that 1476 * we therefore lost state by taking this exception. 1477 */ 1478 void unrecoverable_exception(struct pt_regs *regs) 1479 { 1480 printk(KERN_EMERG "Unrecoverable exception %lx at %lx\n", 1481 regs->trap, regs->nip); 1482 die("Unrecoverable exception", regs, SIGABRT); 1483 } 1484 1485 #ifdef CONFIG_BOOKE_WDT 1486 /* 1487 * Default handler for a Watchdog exception, 1488 * spins until a reboot occurs 1489 */ 1490 void __attribute__ ((weak)) WatchdogHandler(struct pt_regs *regs) 1491 { 1492 /* Generic WatchdogHandler, implement your own */ 1493 mtspr(SPRN_TCR, mfspr(SPRN_TCR)&(~TCR_WIE)); 1494 return; 1495 } 1496 1497 void WatchdogException(struct pt_regs *regs) 1498 { 1499 printk (KERN_EMERG "PowerPC Book-E Watchdog Exception\n"); 1500 WatchdogHandler(regs); 1501 } 1502 #endif 1503 1504 /* 1505 * We enter here if we discover during exception entry that we are 1506 * running in supervisor mode with a userspace value in the stack pointer. 1507 */ 1508 void kernel_bad_stack(struct pt_regs *regs) 1509 { 1510 printk(KERN_EMERG "Bad kernel stack pointer %lx at %lx\n", 1511 regs->gpr[1], regs->nip); 1512 die("Bad kernel stack pointer", regs, SIGABRT); 1513 } 1514 1515 void __init trap_init(void) 1516 { 1517 } 1518 1519 1520 #ifdef CONFIG_PPC_EMULATED_STATS 1521 1522 #define WARN_EMULATED_SETUP(type) .type = { .name = #type } 1523 1524 struct ppc_emulated ppc_emulated = { 1525 #ifdef CONFIG_ALTIVEC 1526 WARN_EMULATED_SETUP(altivec), 1527 #endif 1528 WARN_EMULATED_SETUP(dcba), 1529 WARN_EMULATED_SETUP(dcbz), 1530 WARN_EMULATED_SETUP(fp_pair), 1531 WARN_EMULATED_SETUP(isel), 1532 WARN_EMULATED_SETUP(mcrxr), 1533 WARN_EMULATED_SETUP(mfpvr), 1534 WARN_EMULATED_SETUP(multiple), 1535 WARN_EMULATED_SETUP(popcntb), 1536 WARN_EMULATED_SETUP(spe), 1537 WARN_EMULATED_SETUP(string), 1538 WARN_EMULATED_SETUP(unaligned), 1539 #ifdef CONFIG_MATH_EMULATION 1540 WARN_EMULATED_SETUP(math), 1541 #elif defined(CONFIG_8XX_MINIMAL_FPEMU) 1542 WARN_EMULATED_SETUP(8xx), 1543 #endif 1544 #ifdef CONFIG_VSX 1545 WARN_EMULATED_SETUP(vsx), 1546 #endif 1547 #ifdef CONFIG_PPC64 1548 WARN_EMULATED_SETUP(mfdscr), 1549 WARN_EMULATED_SETUP(mtdscr), 1550 #endif 1551 }; 1552 1553 u32 ppc_warn_emulated; 1554 1555 void ppc_warn_emulated_print(const char *type) 1556 { 1557 pr_warn_ratelimited("%s used emulated %s instruction\n", current->comm, 1558 type); 1559 } 1560 1561 static int __init ppc_warn_emulated_init(void) 1562 { 1563 struct dentry *dir, *d; 1564 unsigned int i; 1565 struct ppc_emulated_entry *entries = (void *)&ppc_emulated; 1566 1567 if (!powerpc_debugfs_root) 1568 return -ENODEV; 1569 1570 dir = debugfs_create_dir("emulated_instructions", 1571 powerpc_debugfs_root); 1572 if (!dir) 1573 return -ENOMEM; 1574 1575 d = debugfs_create_u32("do_warn", S_IRUGO | S_IWUSR, dir, 1576 &ppc_warn_emulated); 1577 if (!d) 1578 goto fail; 1579 1580 for (i = 0; i < sizeof(ppc_emulated)/sizeof(*entries); i++) { 1581 d = debugfs_create_u32(entries[i].name, S_IRUGO | S_IWUSR, dir, 1582 (u32 *)&entries[i].val.counter); 1583 if (!d) 1584 goto fail; 1585 } 1586 1587 return 0; 1588 1589 fail: 1590 debugfs_remove_recursive(dir); 1591 return -ENOMEM; 1592 } 1593 1594 device_initcall(ppc_warn_emulated_init); 1595 1596 #endif /* CONFIG_PPC_EMULATED_STATS */ 1597