1 /* 2 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation; either version 7 * 2 of the License, or (at your option) any later version. 8 * 9 * Modified by Cort Dougan (cort@cs.nmt.edu) 10 * and Paul Mackerras (paulus@samba.org) 11 */ 12 13 /* 14 * This file handles the architecture-dependent parts of hardware exceptions 15 */ 16 17 #include <linux/errno.h> 18 #include <linux/sched.h> 19 #include <linux/kernel.h> 20 #include <linux/mm.h> 21 #include <linux/stddef.h> 22 #include <linux/unistd.h> 23 #include <linux/ptrace.h> 24 #include <linux/slab.h> 25 #include <linux/user.h> 26 #include <linux/a.out.h> 27 #include <linux/interrupt.h> 28 #include <linux/init.h> 29 #include <linux/module.h> 30 #include <linux/prctl.h> 31 #include <linux/delay.h> 32 #include <linux/kprobes.h> 33 #include <linux/kexec.h> 34 #include <linux/backlight.h> 35 #include <linux/bug.h> 36 #include <linux/kdebug.h> 37 38 #include <asm/pgtable.h> 39 #include <asm/uaccess.h> 40 #include <asm/system.h> 41 #include <asm/io.h> 42 #include <asm/machdep.h> 43 #include <asm/rtas.h> 44 #include <asm/pmc.h> 45 #ifdef CONFIG_PPC32 46 #include <asm/reg.h> 47 #endif 48 #ifdef CONFIG_PMAC_BACKLIGHT 49 #include <asm/backlight.h> 50 #endif 51 #ifdef CONFIG_PPC64 52 #include <asm/firmware.h> 53 #include <asm/processor.h> 54 #endif 55 #include <asm/kexec.h> 56 57 #ifdef CONFIG_DEBUGGER 58 int (*__debugger)(struct pt_regs *regs); 59 int (*__debugger_ipi)(struct pt_regs *regs); 60 int (*__debugger_bpt)(struct pt_regs *regs); 61 int (*__debugger_sstep)(struct pt_regs *regs); 62 int (*__debugger_iabr_match)(struct pt_regs *regs); 63 int (*__debugger_dabr_match)(struct pt_regs *regs); 64 int (*__debugger_fault_handler)(struct pt_regs *regs); 65 66 EXPORT_SYMBOL(__debugger); 67 EXPORT_SYMBOL(__debugger_ipi); 68 EXPORT_SYMBOL(__debugger_bpt); 69 EXPORT_SYMBOL(__debugger_sstep); 70 EXPORT_SYMBOL(__debugger_iabr_match); 71 EXPORT_SYMBOL(__debugger_dabr_match); 72 EXPORT_SYMBOL(__debugger_fault_handler); 73 #endif 74 75 /* 76 * Trap & Exception support 77 */ 78 79 #ifdef CONFIG_PMAC_BACKLIGHT 80 static void pmac_backlight_unblank(void) 81 { 82 mutex_lock(&pmac_backlight_mutex); 83 if (pmac_backlight) { 84 struct backlight_properties *props; 85 86 props = &pmac_backlight->props; 87 props->brightness = props->max_brightness; 88 props->power = FB_BLANK_UNBLANK; 89 backlight_update_status(pmac_backlight); 90 } 91 mutex_unlock(&pmac_backlight_mutex); 92 } 93 #else 94 static inline void pmac_backlight_unblank(void) { } 95 #endif 96 97 int die(const char *str, struct pt_regs *regs, long err) 98 { 99 static struct { 100 spinlock_t lock; 101 u32 lock_owner; 102 int lock_owner_depth; 103 } die = { 104 .lock = __SPIN_LOCK_UNLOCKED(die.lock), 105 .lock_owner = -1, 106 .lock_owner_depth = 0 107 }; 108 static int die_counter; 109 unsigned long flags; 110 111 if (debugger(regs)) 112 return 1; 113 114 oops_enter(); 115 116 if (die.lock_owner != raw_smp_processor_id()) { 117 console_verbose(); 118 spin_lock_irqsave(&die.lock, flags); 119 die.lock_owner = smp_processor_id(); 120 die.lock_owner_depth = 0; 121 bust_spinlocks(1); 122 if (machine_is(powermac)) 123 pmac_backlight_unblank(); 124 } else { 125 local_save_flags(flags); 126 } 127 128 if (++die.lock_owner_depth < 3) { 129 printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter); 130 #ifdef CONFIG_PREEMPT 131 printk("PREEMPT "); 132 #endif 133 #ifdef CONFIG_SMP 134 printk("SMP NR_CPUS=%d ", NR_CPUS); 135 #endif 136 #ifdef CONFIG_DEBUG_PAGEALLOC 137 printk("DEBUG_PAGEALLOC "); 138 #endif 139 #ifdef CONFIG_NUMA 140 printk("NUMA "); 141 #endif 142 printk("%s\n", ppc_md.name ? ppc_md.name : ""); 143 144 print_modules(); 145 show_regs(regs); 146 } else { 147 printk("Recursive die() failure, output suppressed\n"); 148 } 149 150 bust_spinlocks(0); 151 die.lock_owner = -1; 152 add_taint(TAINT_DIE); 153 spin_unlock_irqrestore(&die.lock, flags); 154 155 if (kexec_should_crash(current) || 156 kexec_sr_activated(smp_processor_id())) 157 crash_kexec(regs); 158 crash_kexec_secondary(regs); 159 160 if (in_interrupt()) 161 panic("Fatal exception in interrupt"); 162 163 if (panic_on_oops) 164 panic("Fatal exception"); 165 166 oops_exit(); 167 do_exit(err); 168 169 return 0; 170 } 171 172 void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr) 173 { 174 siginfo_t info; 175 176 if (!user_mode(regs)) { 177 if (die("Exception in kernel mode", regs, signr)) 178 return; 179 } 180 181 memset(&info, 0, sizeof(info)); 182 info.si_signo = signr; 183 info.si_code = code; 184 info.si_addr = (void __user *) addr; 185 force_sig_info(signr, &info, current); 186 187 /* 188 * Init gets no signals that it doesn't have a handler for. 189 * That's all very well, but if it has caused a synchronous 190 * exception and we ignore the resulting signal, it will just 191 * generate the same exception over and over again and we get 192 * nowhere. Better to kill it and let the kernel panic. 193 */ 194 if (is_init(current)) { 195 __sighandler_t handler; 196 197 spin_lock_irq(¤t->sighand->siglock); 198 handler = current->sighand->action[signr-1].sa.sa_handler; 199 spin_unlock_irq(¤t->sighand->siglock); 200 if (handler == SIG_DFL) { 201 /* init has generated a synchronous exception 202 and it doesn't have a handler for the signal */ 203 printk(KERN_CRIT "init has generated signal %d " 204 "but has no handler for it\n", signr); 205 do_exit(signr); 206 } 207 } 208 } 209 210 #ifdef CONFIG_PPC64 211 void system_reset_exception(struct pt_regs *regs) 212 { 213 /* See if any machine dependent calls */ 214 if (ppc_md.system_reset_exception) { 215 if (ppc_md.system_reset_exception(regs)) 216 return; 217 } 218 219 #ifdef CONFIG_KEXEC 220 cpu_set(smp_processor_id(), cpus_in_sr); 221 #endif 222 223 die("System Reset", regs, SIGABRT); 224 225 /* 226 * Some CPUs when released from the debugger will execute this path. 227 * These CPUs entered the debugger via a soft-reset. If the CPU was 228 * hung before entering the debugger it will return to the hung 229 * state when exiting this function. This causes a problem in 230 * kdump since the hung CPU(s) will not respond to the IPI sent 231 * from kdump. To prevent the problem we call crash_kexec_secondary() 232 * here. If a kdump had not been initiated or we exit the debugger 233 * with the "exit and recover" command (x) crash_kexec_secondary() 234 * will return after 5ms and the CPU returns to its previous state. 235 */ 236 crash_kexec_secondary(regs); 237 238 /* Must die if the interrupt is not recoverable */ 239 if (!(regs->msr & MSR_RI)) 240 panic("Unrecoverable System Reset"); 241 242 /* What should we do here? We could issue a shutdown or hard reset. */ 243 } 244 #endif 245 246 /* 247 * I/O accesses can cause machine checks on powermacs. 248 * Check if the NIP corresponds to the address of a sync 249 * instruction for which there is an entry in the exception 250 * table. 251 * Note that the 601 only takes a machine check on TEA 252 * (transfer error ack) signal assertion, and does not 253 * set any of the top 16 bits of SRR1. 254 * -- paulus. 255 */ 256 static inline int check_io_access(struct pt_regs *regs) 257 { 258 #ifdef CONFIG_PPC32 259 unsigned long msr = regs->msr; 260 const struct exception_table_entry *entry; 261 unsigned int *nip = (unsigned int *)regs->nip; 262 263 if (((msr & 0xffff0000) == 0 || (msr & (0x80000 | 0x40000))) 264 && (entry = search_exception_tables(regs->nip)) != NULL) { 265 /* 266 * Check that it's a sync instruction, or somewhere 267 * in the twi; isync; nop sequence that inb/inw/inl uses. 268 * As the address is in the exception table 269 * we should be able to read the instr there. 270 * For the debug message, we look at the preceding 271 * load or store. 272 */ 273 if (*nip == 0x60000000) /* nop */ 274 nip -= 2; 275 else if (*nip == 0x4c00012c) /* isync */ 276 --nip; 277 if (*nip == 0x7c0004ac || (*nip >> 26) == 3) { 278 /* sync or twi */ 279 unsigned int rb; 280 281 --nip; 282 rb = (*nip >> 11) & 0x1f; 283 printk(KERN_DEBUG "%s bad port %lx at %p\n", 284 (*nip & 0x100)? "OUT to": "IN from", 285 regs->gpr[rb] - _IO_BASE, nip); 286 regs->msr |= MSR_RI; 287 regs->nip = entry->fixup; 288 return 1; 289 } 290 } 291 #endif /* CONFIG_PPC32 */ 292 return 0; 293 } 294 295 #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) 296 /* On 4xx, the reason for the machine check or program exception 297 is in the ESR. */ 298 #define get_reason(regs) ((regs)->dsisr) 299 #ifndef CONFIG_FSL_BOOKE 300 #define get_mc_reason(regs) ((regs)->dsisr) 301 #else 302 #define get_mc_reason(regs) (mfspr(SPRN_MCSR)) 303 #endif 304 #define REASON_FP ESR_FP 305 #define REASON_ILLEGAL (ESR_PIL | ESR_PUO) 306 #define REASON_PRIVILEGED ESR_PPR 307 #define REASON_TRAP ESR_PTR 308 309 /* single-step stuff */ 310 #define single_stepping(regs) (current->thread.dbcr0 & DBCR0_IC) 311 #define clear_single_step(regs) (current->thread.dbcr0 &= ~DBCR0_IC) 312 313 #else 314 /* On non-4xx, the reason for the machine check or program 315 exception is in the MSR. */ 316 #define get_reason(regs) ((regs)->msr) 317 #define get_mc_reason(regs) ((regs)->msr) 318 #define REASON_FP 0x100000 319 #define REASON_ILLEGAL 0x80000 320 #define REASON_PRIVILEGED 0x40000 321 #define REASON_TRAP 0x20000 322 323 #define single_stepping(regs) ((regs)->msr & MSR_SE) 324 #define clear_single_step(regs) ((regs)->msr &= ~MSR_SE) 325 #endif 326 327 /* 328 * This is "fall-back" implementation for configurations 329 * which don't provide platform-specific machine check info 330 */ 331 void __attribute__ ((weak)) 332 platform_machine_check(struct pt_regs *regs) 333 { 334 } 335 336 void machine_check_exception(struct pt_regs *regs) 337 { 338 int recover = 0; 339 unsigned long reason = get_mc_reason(regs); 340 341 /* See if any machine dependent calls */ 342 if (ppc_md.machine_check_exception) 343 recover = ppc_md.machine_check_exception(regs); 344 345 if (recover) 346 return; 347 348 if (user_mode(regs)) { 349 regs->msr |= MSR_RI; 350 _exception(SIGBUS, regs, BUS_ADRERR, regs->nip); 351 return; 352 } 353 354 #if defined(CONFIG_8xx) && defined(CONFIG_PCI) 355 /* the qspan pci read routines can cause machine checks -- Cort */ 356 bad_page_fault(regs, regs->dar, SIGBUS); 357 return; 358 #endif 359 360 if (debugger_fault_handler(regs)) { 361 regs->msr |= MSR_RI; 362 return; 363 } 364 365 if (check_io_access(regs)) 366 return; 367 368 #if defined(CONFIG_4xx) && !defined(CONFIG_440A) 369 if (reason & ESR_IMCP) { 370 printk("Instruction"); 371 mtspr(SPRN_ESR, reason & ~ESR_IMCP); 372 } else 373 printk("Data"); 374 printk(" machine check in kernel mode.\n"); 375 #elif defined(CONFIG_440A) 376 printk("Machine check in kernel mode.\n"); 377 if (reason & ESR_IMCP){ 378 printk("Instruction Synchronous Machine Check exception\n"); 379 mtspr(SPRN_ESR, reason & ~ESR_IMCP); 380 } 381 else { 382 u32 mcsr = mfspr(SPRN_MCSR); 383 if (mcsr & MCSR_IB) 384 printk("Instruction Read PLB Error\n"); 385 if (mcsr & MCSR_DRB) 386 printk("Data Read PLB Error\n"); 387 if (mcsr & MCSR_DWB) 388 printk("Data Write PLB Error\n"); 389 if (mcsr & MCSR_TLBP) 390 printk("TLB Parity Error\n"); 391 if (mcsr & MCSR_ICP){ 392 flush_instruction_cache(); 393 printk("I-Cache Parity Error\n"); 394 } 395 if (mcsr & MCSR_DCSP) 396 printk("D-Cache Search Parity Error\n"); 397 if (mcsr & MCSR_DCFP) 398 printk("D-Cache Flush Parity Error\n"); 399 if (mcsr & MCSR_IMPE) 400 printk("Machine Check exception is imprecise\n"); 401 402 /* Clear MCSR */ 403 mtspr(SPRN_MCSR, mcsr); 404 } 405 #elif defined (CONFIG_E500) 406 printk("Machine check in kernel mode.\n"); 407 printk("Caused by (from MCSR=%lx): ", reason); 408 409 if (reason & MCSR_MCP) 410 printk("Machine Check Signal\n"); 411 if (reason & MCSR_ICPERR) 412 printk("Instruction Cache Parity Error\n"); 413 if (reason & MCSR_DCP_PERR) 414 printk("Data Cache Push Parity Error\n"); 415 if (reason & MCSR_DCPERR) 416 printk("Data Cache Parity Error\n"); 417 if (reason & MCSR_GL_CI) 418 printk("Guarded Load or Cache-Inhibited stwcx.\n"); 419 if (reason & MCSR_BUS_IAERR) 420 printk("Bus - Instruction Address Error\n"); 421 if (reason & MCSR_BUS_RAERR) 422 printk("Bus - Read Address Error\n"); 423 if (reason & MCSR_BUS_WAERR) 424 printk("Bus - Write Address Error\n"); 425 if (reason & MCSR_BUS_IBERR) 426 printk("Bus - Instruction Data Error\n"); 427 if (reason & MCSR_BUS_RBERR) 428 printk("Bus - Read Data Bus Error\n"); 429 if (reason & MCSR_BUS_WBERR) 430 printk("Bus - Read Data Bus Error\n"); 431 if (reason & MCSR_BUS_IPERR) 432 printk("Bus - Instruction Parity Error\n"); 433 if (reason & MCSR_BUS_RPERR) 434 printk("Bus - Read Parity Error\n"); 435 #elif defined (CONFIG_E200) 436 printk("Machine check in kernel mode.\n"); 437 printk("Caused by (from MCSR=%lx): ", reason); 438 439 if (reason & MCSR_MCP) 440 printk("Machine Check Signal\n"); 441 if (reason & MCSR_CP_PERR) 442 printk("Cache Push Parity Error\n"); 443 if (reason & MCSR_CPERR) 444 printk("Cache Parity Error\n"); 445 if (reason & MCSR_EXCP_ERR) 446 printk("ISI, ITLB, or Bus Error on first instruction fetch for an exception handler\n"); 447 if (reason & MCSR_BUS_IRERR) 448 printk("Bus - Read Bus Error on instruction fetch\n"); 449 if (reason & MCSR_BUS_DRERR) 450 printk("Bus - Read Bus Error on data load\n"); 451 if (reason & MCSR_BUS_WRERR) 452 printk("Bus - Write Bus Error on buffered store or cache line push\n"); 453 #else /* !CONFIG_4xx && !CONFIG_E500 && !CONFIG_E200 */ 454 printk("Machine check in kernel mode.\n"); 455 printk("Caused by (from SRR1=%lx): ", reason); 456 switch (reason & 0x601F0000) { 457 case 0x80000: 458 printk("Machine check signal\n"); 459 break; 460 case 0: /* for 601 */ 461 case 0x40000: 462 case 0x140000: /* 7450 MSS error and TEA */ 463 printk("Transfer error ack signal\n"); 464 break; 465 case 0x20000: 466 printk("Data parity error signal\n"); 467 break; 468 case 0x10000: 469 printk("Address parity error signal\n"); 470 break; 471 case 0x20000000: 472 printk("L1 Data Cache error\n"); 473 break; 474 case 0x40000000: 475 printk("L1 Instruction Cache error\n"); 476 break; 477 case 0x00100000: 478 printk("L2 data cache parity error\n"); 479 break; 480 default: 481 printk("Unknown values in msr\n"); 482 } 483 #endif /* CONFIG_4xx */ 484 485 /* 486 * Optional platform-provided routine to print out 487 * additional info, e.g. bus error registers. 488 */ 489 platform_machine_check(regs); 490 491 if (debugger_fault_handler(regs)) 492 return; 493 die("Machine check", regs, SIGBUS); 494 495 /* Must die if the interrupt is not recoverable */ 496 if (!(regs->msr & MSR_RI)) 497 panic("Unrecoverable Machine check"); 498 } 499 500 void SMIException(struct pt_regs *regs) 501 { 502 die("System Management Interrupt", regs, SIGABRT); 503 } 504 505 void unknown_exception(struct pt_regs *regs) 506 { 507 printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n", 508 regs->nip, regs->msr, regs->trap); 509 510 _exception(SIGTRAP, regs, 0, 0); 511 } 512 513 void instruction_breakpoint_exception(struct pt_regs *regs) 514 { 515 if (notify_die(DIE_IABR_MATCH, "iabr_match", regs, 5, 516 5, SIGTRAP) == NOTIFY_STOP) 517 return; 518 if (debugger_iabr_match(regs)) 519 return; 520 _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip); 521 } 522 523 void RunModeException(struct pt_regs *regs) 524 { 525 _exception(SIGTRAP, regs, 0, 0); 526 } 527 528 void __kprobes single_step_exception(struct pt_regs *regs) 529 { 530 regs->msr &= ~(MSR_SE | MSR_BE); /* Turn off 'trace' bits */ 531 532 if (notify_die(DIE_SSTEP, "single_step", regs, 5, 533 5, SIGTRAP) == NOTIFY_STOP) 534 return; 535 if (debugger_sstep(regs)) 536 return; 537 538 _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip); 539 } 540 541 /* 542 * After we have successfully emulated an instruction, we have to 543 * check if the instruction was being single-stepped, and if so, 544 * pretend we got a single-step exception. This was pointed out 545 * by Kumar Gala. -- paulus 546 */ 547 static void emulate_single_step(struct pt_regs *regs) 548 { 549 if (single_stepping(regs)) { 550 clear_single_step(regs); 551 _exception(SIGTRAP, regs, TRAP_TRACE, 0); 552 } 553 } 554 555 static inline int __parse_fpscr(unsigned long fpscr) 556 { 557 int ret = 0; 558 559 /* Invalid operation */ 560 if ((fpscr & FPSCR_VE) && (fpscr & FPSCR_VX)) 561 ret = FPE_FLTINV; 562 563 /* Overflow */ 564 else if ((fpscr & FPSCR_OE) && (fpscr & FPSCR_OX)) 565 ret = FPE_FLTOVF; 566 567 /* Underflow */ 568 else if ((fpscr & FPSCR_UE) && (fpscr & FPSCR_UX)) 569 ret = FPE_FLTUND; 570 571 /* Divide by zero */ 572 else if ((fpscr & FPSCR_ZE) && (fpscr & FPSCR_ZX)) 573 ret = FPE_FLTDIV; 574 575 /* Inexact result */ 576 else if ((fpscr & FPSCR_XE) && (fpscr & FPSCR_XX)) 577 ret = FPE_FLTRES; 578 579 return ret; 580 } 581 582 static void parse_fpe(struct pt_regs *regs) 583 { 584 int code = 0; 585 586 flush_fp_to_thread(current); 587 588 code = __parse_fpscr(current->thread.fpscr.val); 589 590 _exception(SIGFPE, regs, code, regs->nip); 591 } 592 593 /* 594 * Illegal instruction emulation support. Originally written to 595 * provide the PVR to user applications using the mfspr rd, PVR. 596 * Return non-zero if we can't emulate, or -EFAULT if the associated 597 * memory access caused an access fault. Return zero on success. 598 * 599 * There are a couple of ways to do this, either "decode" the instruction 600 * or directly match lots of bits. In this case, matching lots of 601 * bits is faster and easier. 602 * 603 */ 604 #define INST_MFSPR_PVR 0x7c1f42a6 605 #define INST_MFSPR_PVR_MASK 0xfc1fffff 606 607 #define INST_DCBA 0x7c0005ec 608 #define INST_DCBA_MASK 0xfc0007fe 609 610 #define INST_MCRXR 0x7c000400 611 #define INST_MCRXR_MASK 0xfc0007fe 612 613 #define INST_STRING 0x7c00042a 614 #define INST_STRING_MASK 0xfc0007fe 615 #define INST_STRING_GEN_MASK 0xfc00067e 616 #define INST_LSWI 0x7c0004aa 617 #define INST_LSWX 0x7c00042a 618 #define INST_STSWI 0x7c0005aa 619 #define INST_STSWX 0x7c00052a 620 621 #define INST_POPCNTB 0x7c0000f4 622 #define INST_POPCNTB_MASK 0xfc0007fe 623 624 static int emulate_string_inst(struct pt_regs *regs, u32 instword) 625 { 626 u8 rT = (instword >> 21) & 0x1f; 627 u8 rA = (instword >> 16) & 0x1f; 628 u8 NB_RB = (instword >> 11) & 0x1f; 629 u32 num_bytes; 630 unsigned long EA; 631 int pos = 0; 632 633 /* Early out if we are an invalid form of lswx */ 634 if ((instword & INST_STRING_MASK) == INST_LSWX) 635 if ((rT == rA) || (rT == NB_RB)) 636 return -EINVAL; 637 638 EA = (rA == 0) ? 0 : regs->gpr[rA]; 639 640 switch (instword & INST_STRING_MASK) { 641 case INST_LSWX: 642 case INST_STSWX: 643 EA += NB_RB; 644 num_bytes = regs->xer & 0x7f; 645 break; 646 case INST_LSWI: 647 case INST_STSWI: 648 num_bytes = (NB_RB == 0) ? 32 : NB_RB; 649 break; 650 default: 651 return -EINVAL; 652 } 653 654 while (num_bytes != 0) 655 { 656 u8 val; 657 u32 shift = 8 * (3 - (pos & 0x3)); 658 659 switch ((instword & INST_STRING_MASK)) { 660 case INST_LSWX: 661 case INST_LSWI: 662 if (get_user(val, (u8 __user *)EA)) 663 return -EFAULT; 664 /* first time updating this reg, 665 * zero it out */ 666 if (pos == 0) 667 regs->gpr[rT] = 0; 668 regs->gpr[rT] |= val << shift; 669 break; 670 case INST_STSWI: 671 case INST_STSWX: 672 val = regs->gpr[rT] >> shift; 673 if (put_user(val, (u8 __user *)EA)) 674 return -EFAULT; 675 break; 676 } 677 /* move EA to next address */ 678 EA += 1; 679 num_bytes--; 680 681 /* manage our position within the register */ 682 if (++pos == 4) { 683 pos = 0; 684 if (++rT == 32) 685 rT = 0; 686 } 687 } 688 689 return 0; 690 } 691 692 static int emulate_popcntb_inst(struct pt_regs *regs, u32 instword) 693 { 694 u32 ra,rs; 695 unsigned long tmp; 696 697 ra = (instword >> 16) & 0x1f; 698 rs = (instword >> 21) & 0x1f; 699 700 tmp = regs->gpr[rs]; 701 tmp = tmp - ((tmp >> 1) & 0x5555555555555555ULL); 702 tmp = (tmp & 0x3333333333333333ULL) + ((tmp >> 2) & 0x3333333333333333ULL); 703 tmp = (tmp + (tmp >> 4)) & 0x0f0f0f0f0f0f0f0fULL; 704 regs->gpr[ra] = tmp; 705 706 return 0; 707 } 708 709 static int emulate_instruction(struct pt_regs *regs) 710 { 711 u32 instword; 712 u32 rd; 713 714 if (!user_mode(regs) || (regs->msr & MSR_LE)) 715 return -EINVAL; 716 CHECK_FULL_REGS(regs); 717 718 if (get_user(instword, (u32 __user *)(regs->nip))) 719 return -EFAULT; 720 721 /* Emulate the mfspr rD, PVR. */ 722 if ((instword & INST_MFSPR_PVR_MASK) == INST_MFSPR_PVR) { 723 rd = (instword >> 21) & 0x1f; 724 regs->gpr[rd] = mfspr(SPRN_PVR); 725 return 0; 726 } 727 728 /* Emulating the dcba insn is just a no-op. */ 729 if ((instword & INST_DCBA_MASK) == INST_DCBA) 730 return 0; 731 732 /* Emulate the mcrxr insn. */ 733 if ((instword & INST_MCRXR_MASK) == INST_MCRXR) { 734 int shift = (instword >> 21) & 0x1c; 735 unsigned long msk = 0xf0000000UL >> shift; 736 737 regs->ccr = (regs->ccr & ~msk) | ((regs->xer >> shift) & msk); 738 regs->xer &= ~0xf0000000UL; 739 return 0; 740 } 741 742 /* Emulate load/store string insn. */ 743 if ((instword & INST_STRING_GEN_MASK) == INST_STRING) 744 return emulate_string_inst(regs, instword); 745 746 /* Emulate the popcntb (Population Count Bytes) instruction. */ 747 if ((instword & INST_POPCNTB_MASK) == INST_POPCNTB) { 748 return emulate_popcntb_inst(regs, instword); 749 } 750 751 return -EINVAL; 752 } 753 754 int is_valid_bugaddr(unsigned long addr) 755 { 756 return is_kernel_addr(addr); 757 } 758 759 void __kprobes program_check_exception(struct pt_regs *regs) 760 { 761 unsigned int reason = get_reason(regs); 762 extern int do_mathemu(struct pt_regs *regs); 763 764 /* We can now get here via a FP Unavailable exception if the core 765 * has no FPU, in that case the reason flags will be 0 */ 766 767 if (reason & REASON_FP) { 768 /* IEEE FP exception */ 769 parse_fpe(regs); 770 return; 771 } 772 if (reason & REASON_TRAP) { 773 /* trap exception */ 774 if (notify_die(DIE_BPT, "breakpoint", regs, 5, 5, SIGTRAP) 775 == NOTIFY_STOP) 776 return; 777 if (debugger_bpt(regs)) 778 return; 779 780 if (!(regs->msr & MSR_PR) && /* not user-mode */ 781 report_bug(regs->nip, regs) == BUG_TRAP_TYPE_WARN) { 782 regs->nip += 4; 783 return; 784 } 785 _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip); 786 return; 787 } 788 789 local_irq_enable(); 790 791 #ifdef CONFIG_MATH_EMULATION 792 /* (reason & REASON_ILLEGAL) would be the obvious thing here, 793 * but there seems to be a hardware bug on the 405GP (RevD) 794 * that means ESR is sometimes set incorrectly - either to 795 * ESR_DST (!?) or 0. In the process of chasing this with the 796 * hardware people - not sure if it can happen on any illegal 797 * instruction or only on FP instructions, whether there is a 798 * pattern to occurences etc. -dgibson 31/Mar/2003 */ 799 switch (do_mathemu(regs)) { 800 case 0: 801 emulate_single_step(regs); 802 return; 803 case 1: { 804 int code = 0; 805 code = __parse_fpscr(current->thread.fpscr.val); 806 _exception(SIGFPE, regs, code, regs->nip); 807 return; 808 } 809 case -EFAULT: 810 _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip); 811 return; 812 } 813 /* fall through on any other errors */ 814 #endif /* CONFIG_MATH_EMULATION */ 815 816 /* Try to emulate it if we should. */ 817 if (reason & (REASON_ILLEGAL | REASON_PRIVILEGED)) { 818 switch (emulate_instruction(regs)) { 819 case 0: 820 regs->nip += 4; 821 emulate_single_step(regs); 822 return; 823 case -EFAULT: 824 _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip); 825 return; 826 } 827 } 828 829 if (reason & REASON_PRIVILEGED) 830 _exception(SIGILL, regs, ILL_PRVOPC, regs->nip); 831 else 832 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 833 } 834 835 void alignment_exception(struct pt_regs *regs) 836 { 837 int sig, code, fixed = 0; 838 839 /* we don't implement logging of alignment exceptions */ 840 if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS)) 841 fixed = fix_alignment(regs); 842 843 if (fixed == 1) { 844 regs->nip += 4; /* skip over emulated instruction */ 845 emulate_single_step(regs); 846 return; 847 } 848 849 /* Operand address was bad */ 850 if (fixed == -EFAULT) { 851 sig = SIGSEGV; 852 code = SEGV_ACCERR; 853 } else { 854 sig = SIGBUS; 855 code = BUS_ADRALN; 856 } 857 if (user_mode(regs)) 858 _exception(sig, regs, code, regs->dar); 859 else 860 bad_page_fault(regs, regs->dar, sig); 861 } 862 863 void StackOverflow(struct pt_regs *regs) 864 { 865 printk(KERN_CRIT "Kernel stack overflow in process %p, r1=%lx\n", 866 current, regs->gpr[1]); 867 debugger(regs); 868 show_regs(regs); 869 panic("kernel stack overflow"); 870 } 871 872 void nonrecoverable_exception(struct pt_regs *regs) 873 { 874 printk(KERN_ERR "Non-recoverable exception at PC=%lx MSR=%lx\n", 875 regs->nip, regs->msr); 876 debugger(regs); 877 die("nonrecoverable exception", regs, SIGKILL); 878 } 879 880 void trace_syscall(struct pt_regs *regs) 881 { 882 printk("Task: %p(%d), PC: %08lX/%08lX, Syscall: %3ld, Result: %s%ld %s\n", 883 current, current->pid, regs->nip, regs->link, regs->gpr[0], 884 regs->ccr&0x10000000?"Error=":"", regs->gpr[3], print_tainted()); 885 } 886 887 void kernel_fp_unavailable_exception(struct pt_regs *regs) 888 { 889 printk(KERN_EMERG "Unrecoverable FP Unavailable Exception " 890 "%lx at %lx\n", regs->trap, regs->nip); 891 die("Unrecoverable FP Unavailable Exception", regs, SIGABRT); 892 } 893 894 void altivec_unavailable_exception(struct pt_regs *regs) 895 { 896 if (user_mode(regs)) { 897 /* A user program has executed an altivec instruction, 898 but this kernel doesn't support altivec. */ 899 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 900 return; 901 } 902 903 printk(KERN_EMERG "Unrecoverable VMX/Altivec Unavailable Exception " 904 "%lx at %lx\n", regs->trap, regs->nip); 905 die("Unrecoverable VMX/Altivec Unavailable Exception", regs, SIGABRT); 906 } 907 908 void performance_monitor_exception(struct pt_regs *regs) 909 { 910 perf_irq(regs); 911 } 912 913 #ifdef CONFIG_8xx 914 void SoftwareEmulation(struct pt_regs *regs) 915 { 916 extern int do_mathemu(struct pt_regs *); 917 extern int Soft_emulate_8xx(struct pt_regs *); 918 int errcode; 919 920 CHECK_FULL_REGS(regs); 921 922 if (!user_mode(regs)) { 923 debugger(regs); 924 die("Kernel Mode Software FPU Emulation", regs, SIGFPE); 925 } 926 927 #ifdef CONFIG_MATH_EMULATION 928 errcode = do_mathemu(regs); 929 930 switch (errcode) { 931 case 0: 932 emulate_single_step(regs); 933 return; 934 case 1: { 935 int code = 0; 936 code = __parse_fpscr(current->thread.fpscr.val); 937 _exception(SIGFPE, regs, code, regs->nip); 938 return; 939 } 940 case -EFAULT: 941 _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip); 942 return; 943 default: 944 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 945 return; 946 } 947 948 #else 949 errcode = Soft_emulate_8xx(regs); 950 switch (errcode) { 951 case 0: 952 emulate_single_step(regs); 953 return; 954 case 1: 955 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 956 return; 957 case -EFAULT: 958 _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip); 959 return; 960 } 961 #endif 962 } 963 #endif /* CONFIG_8xx */ 964 965 #if defined(CONFIG_40x) || defined(CONFIG_BOOKE) 966 967 void DebugException(struct pt_regs *regs, unsigned long debug_status) 968 { 969 if (debug_status & DBSR_IC) { /* instruction completion */ 970 regs->msr &= ~MSR_DE; 971 if (user_mode(regs)) { 972 current->thread.dbcr0 &= ~DBCR0_IC; 973 } else { 974 /* Disable instruction completion */ 975 mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_IC); 976 /* Clear the instruction completion event */ 977 mtspr(SPRN_DBSR, DBSR_IC); 978 if (debugger_sstep(regs)) 979 return; 980 } 981 _exception(SIGTRAP, regs, TRAP_TRACE, 0); 982 } 983 } 984 #endif /* CONFIG_4xx || CONFIG_BOOKE */ 985 986 #if !defined(CONFIG_TAU_INT) 987 void TAUException(struct pt_regs *regs) 988 { 989 printk("TAU trap at PC: %lx, MSR: %lx, vector=%lx %s\n", 990 regs->nip, regs->msr, regs->trap, print_tainted()); 991 } 992 #endif /* CONFIG_INT_TAU */ 993 994 #ifdef CONFIG_ALTIVEC 995 void altivec_assist_exception(struct pt_regs *regs) 996 { 997 int err; 998 999 if (!user_mode(regs)) { 1000 printk(KERN_EMERG "VMX/Altivec assist exception in kernel mode" 1001 " at %lx\n", regs->nip); 1002 die("Kernel VMX/Altivec assist exception", regs, SIGILL); 1003 } 1004 1005 flush_altivec_to_thread(current); 1006 1007 err = emulate_altivec(regs); 1008 if (err == 0) { 1009 regs->nip += 4; /* skip emulated instruction */ 1010 emulate_single_step(regs); 1011 return; 1012 } 1013 1014 if (err == -EFAULT) { 1015 /* got an error reading the instruction */ 1016 _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip); 1017 } else { 1018 /* didn't recognize the instruction */ 1019 /* XXX quick hack for now: set the non-Java bit in the VSCR */ 1020 if (printk_ratelimit()) 1021 printk(KERN_ERR "Unrecognized altivec instruction " 1022 "in %s at %lx\n", current->comm, regs->nip); 1023 current->thread.vscr.u[3] |= 0x10000; 1024 } 1025 } 1026 #endif /* CONFIG_ALTIVEC */ 1027 1028 #ifdef CONFIG_FSL_BOOKE 1029 void CacheLockingException(struct pt_regs *regs, unsigned long address, 1030 unsigned long error_code) 1031 { 1032 /* We treat cache locking instructions from the user 1033 * as priv ops, in the future we could try to do 1034 * something smarter 1035 */ 1036 if (error_code & (ESR_DLK|ESR_ILK)) 1037 _exception(SIGILL, regs, ILL_PRVOPC, regs->nip); 1038 return; 1039 } 1040 #endif /* CONFIG_FSL_BOOKE */ 1041 1042 #ifdef CONFIG_SPE 1043 void SPEFloatingPointException(struct pt_regs *regs) 1044 { 1045 unsigned long spefscr; 1046 int fpexc_mode; 1047 int code = 0; 1048 1049 spefscr = current->thread.spefscr; 1050 fpexc_mode = current->thread.fpexc_mode; 1051 1052 /* Hardware does not neccessarily set sticky 1053 * underflow/overflow/invalid flags */ 1054 if ((spefscr & SPEFSCR_FOVF) && (fpexc_mode & PR_FP_EXC_OVF)) { 1055 code = FPE_FLTOVF; 1056 spefscr |= SPEFSCR_FOVFS; 1057 } 1058 else if ((spefscr & SPEFSCR_FUNF) && (fpexc_mode & PR_FP_EXC_UND)) { 1059 code = FPE_FLTUND; 1060 spefscr |= SPEFSCR_FUNFS; 1061 } 1062 else if ((spefscr & SPEFSCR_FDBZ) && (fpexc_mode & PR_FP_EXC_DIV)) 1063 code = FPE_FLTDIV; 1064 else if ((spefscr & SPEFSCR_FINV) && (fpexc_mode & PR_FP_EXC_INV)) { 1065 code = FPE_FLTINV; 1066 spefscr |= SPEFSCR_FINVS; 1067 } 1068 else if ((spefscr & (SPEFSCR_FG | SPEFSCR_FX)) && (fpexc_mode & PR_FP_EXC_RES)) 1069 code = FPE_FLTRES; 1070 1071 current->thread.spefscr = spefscr; 1072 1073 _exception(SIGFPE, regs, code, regs->nip); 1074 return; 1075 } 1076 #endif 1077 1078 /* 1079 * We enter here if we get an unrecoverable exception, that is, one 1080 * that happened at a point where the RI (recoverable interrupt) bit 1081 * in the MSR is 0. This indicates that SRR0/1 are live, and that 1082 * we therefore lost state by taking this exception. 1083 */ 1084 void unrecoverable_exception(struct pt_regs *regs) 1085 { 1086 printk(KERN_EMERG "Unrecoverable exception %lx at %lx\n", 1087 regs->trap, regs->nip); 1088 die("Unrecoverable exception", regs, SIGABRT); 1089 } 1090 1091 #ifdef CONFIG_BOOKE_WDT 1092 /* 1093 * Default handler for a Watchdog exception, 1094 * spins until a reboot occurs 1095 */ 1096 void __attribute__ ((weak)) WatchdogHandler(struct pt_regs *regs) 1097 { 1098 /* Generic WatchdogHandler, implement your own */ 1099 mtspr(SPRN_TCR, mfspr(SPRN_TCR)&(~TCR_WIE)); 1100 return; 1101 } 1102 1103 void WatchdogException(struct pt_regs *regs) 1104 { 1105 printk (KERN_EMERG "PowerPC Book-E Watchdog Exception\n"); 1106 WatchdogHandler(regs); 1107 } 1108 #endif 1109 1110 /* 1111 * We enter here if we discover during exception entry that we are 1112 * running in supervisor mode with a userspace value in the stack pointer. 1113 */ 1114 void kernel_bad_stack(struct pt_regs *regs) 1115 { 1116 printk(KERN_EMERG "Bad kernel stack pointer %lx at %lx\n", 1117 regs->gpr[1], regs->nip); 1118 die("Bad kernel stack pointer", regs, SIGABRT); 1119 } 1120 1121 void __init trap_init(void) 1122 { 1123 } 1124