1 /* 2 * 'traps.c' handles hardware traps and faults after we have saved some 3 * state in 'entry.S'. 4 * 5 * SuperH version: Copyright (C) 1999 Niibe Yutaka 6 * Copyright (C) 2000 Philipp Rumpf 7 * Copyright (C) 2000 David Howells 8 * Copyright (C) 2002 - 2007 Paul Mundt 9 * 10 * This file is subject to the terms and conditions of the GNU General Public 11 * License. See the file "COPYING" in the main directory of this archive 12 * for more details. 13 */ 14 #include <linux/kernel.h> 15 #include <linux/ptrace.h> 16 #include <linux/hardirq.h> 17 #include <linux/init.h> 18 #include <linux/spinlock.h> 19 #include <linux/module.h> 20 #include <linux/kallsyms.h> 21 #include <linux/io.h> 22 #include <linux/bug.h> 23 #include <linux/debug_locks.h> 24 #include <linux/kdebug.h> 25 #include <linux/kexec.h> 26 #include <linux/limits.h> 27 #include <asm/system.h> 28 #include <asm/uaccess.h> 29 #include <asm/fpu.h> 30 #include <asm/kprobes.h> 31 32 #ifdef CONFIG_CPU_SH2 33 # define TRAP_RESERVED_INST 4 34 # define TRAP_ILLEGAL_SLOT_INST 6 35 # define TRAP_ADDRESS_ERROR 9 36 # ifdef CONFIG_CPU_SH2A 37 # define TRAP_UBC 12 38 # define TRAP_FPU_ERROR 13 39 # define TRAP_DIVZERO_ERROR 17 40 # define TRAP_DIVOVF_ERROR 18 41 # endif 42 #else 43 #define TRAP_RESERVED_INST 12 44 #define TRAP_ILLEGAL_SLOT_INST 13 45 #endif 46 47 static void dump_mem(const char *str, unsigned long bottom, unsigned long top) 48 { 49 unsigned long p; 50 int i; 51 52 printk("%s(0x%08lx to 0x%08lx)\n", str, bottom, top); 53 54 for (p = bottom & ~31; p < top; ) { 55 printk("%04lx: ", p & 0xffff); 56 57 for (i = 0; i < 8; i++, p += 4) { 58 unsigned int val; 59 60 if (p < bottom || p >= top) 61 printk(" "); 62 else { 63 if (__get_user(val, (unsigned int __user *)p)) { 64 printk("\n"); 65 return; 66 } 67 printk("%08x ", val); 68 } 69 } 70 printk("\n"); 71 } 72 } 73 74 static DEFINE_SPINLOCK(die_lock); 75 76 void die(const char * str, struct pt_regs * regs, long err) 77 { 78 static int die_counter; 79 80 oops_enter(); 81 82 console_verbose(); 83 spin_lock_irq(&die_lock); 84 bust_spinlocks(1); 85 86 printk("%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter); 87 88 print_modules(); 89 show_regs(regs); 90 91 printk("Process: %s (pid: %d, stack limit = %p)\n", current->comm, 92 task_pid_nr(current), task_stack_page(current) + 1); 93 94 if (!user_mode(regs) || in_interrupt()) 95 dump_mem("Stack: ", regs->regs[15], THREAD_SIZE + 96 (unsigned long)task_stack_page(current)); 97 98 notify_die(DIE_OOPS, str, regs, err, 255, SIGSEGV); 99 100 bust_spinlocks(0); 101 add_taint(TAINT_DIE); 102 spin_unlock_irq(&die_lock); 103 104 if (kexec_should_crash(current)) 105 crash_kexec(regs); 106 107 if (in_interrupt()) 108 panic("Fatal exception in interrupt"); 109 110 if (panic_on_oops) 111 panic("Fatal exception"); 112 113 oops_exit(); 114 do_exit(SIGSEGV); 115 } 116 117 static inline void die_if_kernel(const char *str, struct pt_regs *regs, 118 long err) 119 { 120 if (!user_mode(regs)) 121 die(str, regs, err); 122 } 123 124 /* 125 * try and fix up kernelspace address errors 126 * - userspace errors just cause EFAULT to be returned, resulting in SEGV 127 * - kernel/userspace interfaces cause a jump to an appropriate handler 128 * - other kernel errors are bad 129 */ 130 static void die_if_no_fixup(const char * str, struct pt_regs * regs, long err) 131 { 132 if (!user_mode(regs)) { 133 const struct exception_table_entry *fixup; 134 fixup = search_exception_tables(regs->pc); 135 if (fixup) { 136 regs->pc = fixup->fixup; 137 return; 138 } 139 die(str, regs, err); 140 } 141 } 142 143 static inline void sign_extend(unsigned int count, unsigned char *dst) 144 { 145 #ifdef __LITTLE_ENDIAN__ 146 if ((count == 1) && dst[0] & 0x80) { 147 dst[1] = 0xff; 148 dst[2] = 0xff; 149 dst[3] = 0xff; 150 } 151 if ((count == 2) && dst[1] & 0x80) { 152 dst[2] = 0xff; 153 dst[3] = 0xff; 154 } 155 #else 156 if ((count == 1) && dst[3] & 0x80) { 157 dst[2] = 0xff; 158 dst[1] = 0xff; 159 dst[0] = 0xff; 160 } 161 if ((count == 2) && dst[2] & 0x80) { 162 dst[1] = 0xff; 163 dst[0] = 0xff; 164 } 165 #endif 166 } 167 168 static struct mem_access user_mem_access = { 169 copy_from_user, 170 copy_to_user, 171 }; 172 173 /* 174 * handle an instruction that does an unaligned memory access by emulating the 175 * desired behaviour 176 * - note that PC _may not_ point to the faulting instruction 177 * (if that instruction is in a branch delay slot) 178 * - return 0 if emulation okay, -EFAULT on existential error 179 */ 180 static int handle_unaligned_ins(insn_size_t instruction, struct pt_regs *regs, 181 struct mem_access *ma) 182 { 183 int ret, index, count; 184 unsigned long *rm, *rn; 185 unsigned char *src, *dst; 186 unsigned char __user *srcu, *dstu; 187 188 index = (instruction>>8)&15; /* 0x0F00 */ 189 rn = ®s->regs[index]; 190 191 index = (instruction>>4)&15; /* 0x00F0 */ 192 rm = ®s->regs[index]; 193 194 count = 1<<(instruction&3); 195 196 ret = -EFAULT; 197 switch (instruction>>12) { 198 case 0: /* mov.[bwl] to/from memory via r0+rn */ 199 if (instruction & 8) { 200 /* from memory */ 201 srcu = (unsigned char __user *)*rm; 202 srcu += regs->regs[0]; 203 dst = (unsigned char *)rn; 204 *(unsigned long *)dst = 0; 205 206 #if !defined(__LITTLE_ENDIAN__) 207 dst += 4-count; 208 #endif 209 if (ma->from(dst, srcu, count)) 210 goto fetch_fault; 211 212 sign_extend(count, dst); 213 } else { 214 /* to memory */ 215 src = (unsigned char *)rm; 216 #if !defined(__LITTLE_ENDIAN__) 217 src += 4-count; 218 #endif 219 dstu = (unsigned char __user *)*rn; 220 dstu += regs->regs[0]; 221 222 if (ma->to(dstu, src, count)) 223 goto fetch_fault; 224 } 225 ret = 0; 226 break; 227 228 case 1: /* mov.l Rm,@(disp,Rn) */ 229 src = (unsigned char*) rm; 230 dstu = (unsigned char __user *)*rn; 231 dstu += (instruction&0x000F)<<2; 232 233 if (ma->to(dstu, src, 4)) 234 goto fetch_fault; 235 ret = 0; 236 break; 237 238 case 2: /* mov.[bwl] to memory, possibly with pre-decrement */ 239 if (instruction & 4) 240 *rn -= count; 241 src = (unsigned char*) rm; 242 dstu = (unsigned char __user *)*rn; 243 #if !defined(__LITTLE_ENDIAN__) 244 src += 4-count; 245 #endif 246 if (ma->to(dstu, src, count)) 247 goto fetch_fault; 248 ret = 0; 249 break; 250 251 case 5: /* mov.l @(disp,Rm),Rn */ 252 srcu = (unsigned char __user *)*rm; 253 srcu += (instruction & 0x000F) << 2; 254 dst = (unsigned char *)rn; 255 *(unsigned long *)dst = 0; 256 257 if (ma->from(dst, srcu, 4)) 258 goto fetch_fault; 259 ret = 0; 260 break; 261 262 case 6: /* mov.[bwl] from memory, possibly with post-increment */ 263 srcu = (unsigned char __user *)*rm; 264 if (instruction & 4) 265 *rm += count; 266 dst = (unsigned char*) rn; 267 *(unsigned long*)dst = 0; 268 269 #if !defined(__LITTLE_ENDIAN__) 270 dst += 4-count; 271 #endif 272 if (ma->from(dst, srcu, count)) 273 goto fetch_fault; 274 sign_extend(count, dst); 275 ret = 0; 276 break; 277 278 case 8: 279 switch ((instruction&0xFF00)>>8) { 280 case 0x81: /* mov.w R0,@(disp,Rn) */ 281 src = (unsigned char *) ®s->regs[0]; 282 #if !defined(__LITTLE_ENDIAN__) 283 src += 2; 284 #endif 285 dstu = (unsigned char __user *)*rm; /* called Rn in the spec */ 286 dstu += (instruction & 0x000F) << 1; 287 288 if (ma->to(dstu, src, 2)) 289 goto fetch_fault; 290 ret = 0; 291 break; 292 293 case 0x85: /* mov.w @(disp,Rm),R0 */ 294 srcu = (unsigned char __user *)*rm; 295 srcu += (instruction & 0x000F) << 1; 296 dst = (unsigned char *) ®s->regs[0]; 297 *(unsigned long *)dst = 0; 298 299 #if !defined(__LITTLE_ENDIAN__) 300 dst += 2; 301 #endif 302 if (ma->from(dst, srcu, 2)) 303 goto fetch_fault; 304 sign_extend(2, dst); 305 ret = 0; 306 break; 307 } 308 break; 309 } 310 return ret; 311 312 fetch_fault: 313 /* Argh. Address not only misaligned but also non-existent. 314 * Raise an EFAULT and see if it's trapped 315 */ 316 die_if_no_fixup("Fault in unaligned fixup", regs, 0); 317 return -EFAULT; 318 } 319 320 /* 321 * emulate the instruction in the delay slot 322 * - fetches the instruction from PC+2 323 */ 324 static inline int handle_delayslot(struct pt_regs *regs, 325 insn_size_t old_instruction, 326 struct mem_access *ma) 327 { 328 insn_size_t instruction; 329 void __user *addr = (void __user *)(regs->pc + 330 instruction_size(old_instruction)); 331 332 if (copy_from_user(&instruction, addr, sizeof(instruction))) { 333 /* the instruction-fetch faulted */ 334 if (user_mode(regs)) 335 return -EFAULT; 336 337 /* kernel */ 338 die("delay-slot-insn faulting in handle_unaligned_delayslot", 339 regs, 0); 340 } 341 342 return handle_unaligned_ins(instruction, regs, ma); 343 } 344 345 /* 346 * handle an instruction that does an unaligned memory access 347 * - have to be careful of branch delay-slot instructions that fault 348 * SH3: 349 * - if the branch would be taken PC points to the branch 350 * - if the branch would not be taken, PC points to delay-slot 351 * SH4: 352 * - PC always points to delayed branch 353 * - return 0 if handled, -EFAULT if failed (may not return if in kernel) 354 */ 355 356 /* Macros to determine offset from current PC for branch instructions */ 357 /* Explicit type coercion is used to force sign extension where needed */ 358 #define SH_PC_8BIT_OFFSET(instr) ((((signed char)(instr))*2) + 4) 359 #define SH_PC_12BIT_OFFSET(instr) ((((signed short)(instr<<4))>>3) + 4) 360 361 /* 362 * XXX: SH-2A needs this too, but it needs an overhaul thanks to mixed 32-bit 363 * opcodes.. 364 */ 365 366 static int handle_unaligned_notify_count = 10; 367 368 int handle_unaligned_access(insn_size_t instruction, struct pt_regs *regs, 369 struct mem_access *ma) 370 { 371 u_int rm; 372 int ret, index; 373 374 index = (instruction>>8)&15; /* 0x0F00 */ 375 rm = regs->regs[index]; 376 377 /* shout about the first ten userspace fixups */ 378 if (user_mode(regs) && handle_unaligned_notify_count>0) { 379 handle_unaligned_notify_count--; 380 381 printk(KERN_NOTICE "Fixing up unaligned userspace access " 382 "in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n", 383 current->comm, task_pid_nr(current), 384 (void *)regs->pc, instruction); 385 } 386 387 ret = -EFAULT; 388 switch (instruction&0xF000) { 389 case 0x0000: 390 if (instruction==0x000B) { 391 /* rts */ 392 ret = handle_delayslot(regs, instruction, ma); 393 if (ret==0) 394 regs->pc = regs->pr; 395 } 396 else if ((instruction&0x00FF)==0x0023) { 397 /* braf @Rm */ 398 ret = handle_delayslot(regs, instruction, ma); 399 if (ret==0) 400 regs->pc += rm + 4; 401 } 402 else if ((instruction&0x00FF)==0x0003) { 403 /* bsrf @Rm */ 404 ret = handle_delayslot(regs, instruction, ma); 405 if (ret==0) { 406 regs->pr = regs->pc + 4; 407 regs->pc += rm + 4; 408 } 409 } 410 else { 411 /* mov.[bwl] to/from memory via r0+rn */ 412 goto simple; 413 } 414 break; 415 416 case 0x1000: /* mov.l Rm,@(disp,Rn) */ 417 goto simple; 418 419 case 0x2000: /* mov.[bwl] to memory, possibly with pre-decrement */ 420 goto simple; 421 422 case 0x4000: 423 if ((instruction&0x00FF)==0x002B) { 424 /* jmp @Rm */ 425 ret = handle_delayslot(regs, instruction, ma); 426 if (ret==0) 427 regs->pc = rm; 428 } 429 else if ((instruction&0x00FF)==0x000B) { 430 /* jsr @Rm */ 431 ret = handle_delayslot(regs, instruction, ma); 432 if (ret==0) { 433 regs->pr = regs->pc + 4; 434 regs->pc = rm; 435 } 436 } 437 else { 438 /* mov.[bwl] to/from memory via r0+rn */ 439 goto simple; 440 } 441 break; 442 443 case 0x5000: /* mov.l @(disp,Rm),Rn */ 444 goto simple; 445 446 case 0x6000: /* mov.[bwl] from memory, possibly with post-increment */ 447 goto simple; 448 449 case 0x8000: /* bf lab, bf/s lab, bt lab, bt/s lab */ 450 switch (instruction&0x0F00) { 451 case 0x0100: /* mov.w R0,@(disp,Rm) */ 452 goto simple; 453 case 0x0500: /* mov.w @(disp,Rm),R0 */ 454 goto simple; 455 case 0x0B00: /* bf lab - no delayslot*/ 456 break; 457 case 0x0F00: /* bf/s lab */ 458 ret = handle_delayslot(regs, instruction, ma); 459 if (ret==0) { 460 #if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB) 461 if ((regs->sr & 0x00000001) != 0) 462 regs->pc += 4; /* next after slot */ 463 else 464 #endif 465 regs->pc += SH_PC_8BIT_OFFSET(instruction); 466 } 467 break; 468 case 0x0900: /* bt lab - no delayslot */ 469 break; 470 case 0x0D00: /* bt/s lab */ 471 ret = handle_delayslot(regs, instruction, ma); 472 if (ret==0) { 473 #if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB) 474 if ((regs->sr & 0x00000001) == 0) 475 regs->pc += 4; /* next after slot */ 476 else 477 #endif 478 regs->pc += SH_PC_8BIT_OFFSET(instruction); 479 } 480 break; 481 } 482 break; 483 484 case 0xA000: /* bra label */ 485 ret = handle_delayslot(regs, instruction, ma); 486 if (ret==0) 487 regs->pc += SH_PC_12BIT_OFFSET(instruction); 488 break; 489 490 case 0xB000: /* bsr label */ 491 ret = handle_delayslot(regs, instruction, ma); 492 if (ret==0) { 493 regs->pr = regs->pc + 4; 494 regs->pc += SH_PC_12BIT_OFFSET(instruction); 495 } 496 break; 497 } 498 return ret; 499 500 /* handle non-delay-slot instruction */ 501 simple: 502 ret = handle_unaligned_ins(instruction, regs, ma); 503 if (ret==0) 504 regs->pc += instruction_size(instruction); 505 return ret; 506 } 507 508 /* 509 * Handle various address error exceptions: 510 * - instruction address error: 511 * misaligned PC 512 * PC >= 0x80000000 in user mode 513 * - data address error (read and write) 514 * misaligned data access 515 * access to >= 0x80000000 is user mode 516 * Unfortuntaly we can't distinguish between instruction address error 517 * and data address errors caused by read accesses. 518 */ 519 asmlinkage void do_address_error(struct pt_regs *regs, 520 unsigned long writeaccess, 521 unsigned long address) 522 { 523 unsigned long error_code = 0; 524 mm_segment_t oldfs; 525 siginfo_t info; 526 insn_size_t instruction; 527 int tmp; 528 529 /* Intentional ifdef */ 530 #ifdef CONFIG_CPU_HAS_SR_RB 531 error_code = lookup_exception_vector(); 532 #endif 533 534 oldfs = get_fs(); 535 536 if (user_mode(regs)) { 537 int si_code = BUS_ADRERR; 538 539 local_irq_enable(); 540 541 /* bad PC is not something we can fix */ 542 if (regs->pc & 1) { 543 si_code = BUS_ADRALN; 544 goto uspace_segv; 545 } 546 547 set_fs(USER_DS); 548 if (copy_from_user(&instruction, (void __user *)(regs->pc), 549 sizeof(instruction))) { 550 /* Argh. Fault on the instruction itself. 551 This should never happen non-SMP 552 */ 553 set_fs(oldfs); 554 goto uspace_segv; 555 } 556 557 tmp = handle_unaligned_access(instruction, regs, 558 &user_mem_access); 559 set_fs(oldfs); 560 561 if (tmp==0) 562 return; /* sorted */ 563 uspace_segv: 564 printk(KERN_NOTICE "Sending SIGBUS to \"%s\" due to unaligned " 565 "access (PC %lx PR %lx)\n", current->comm, regs->pc, 566 regs->pr); 567 568 info.si_signo = SIGBUS; 569 info.si_errno = 0; 570 info.si_code = si_code; 571 info.si_addr = (void __user *)address; 572 force_sig_info(SIGBUS, &info, current); 573 } else { 574 if (regs->pc & 1) 575 die("unaligned program counter", regs, error_code); 576 577 set_fs(KERNEL_DS); 578 if (copy_from_user(&instruction, (void __user *)(regs->pc), 579 sizeof(instruction))) { 580 /* Argh. Fault on the instruction itself. 581 This should never happen non-SMP 582 */ 583 set_fs(oldfs); 584 die("insn faulting in do_address_error", regs, 0); 585 } 586 587 handle_unaligned_access(instruction, regs, &user_mem_access); 588 set_fs(oldfs); 589 } 590 } 591 592 #ifdef CONFIG_SH_DSP 593 /* 594 * SH-DSP support gerg@snapgear.com. 595 */ 596 int is_dsp_inst(struct pt_regs *regs) 597 { 598 unsigned short inst = 0; 599 600 /* 601 * Safe guard if DSP mode is already enabled or we're lacking 602 * the DSP altogether. 603 */ 604 if (!(current_cpu_data.flags & CPU_HAS_DSP) || (regs->sr & SR_DSP)) 605 return 0; 606 607 get_user(inst, ((unsigned short *) regs->pc)); 608 609 inst &= 0xf000; 610 611 /* Check for any type of DSP or support instruction */ 612 if ((inst == 0xf000) || (inst == 0x4000)) 613 return 1; 614 615 return 0; 616 } 617 #else 618 #define is_dsp_inst(regs) (0) 619 #endif /* CONFIG_SH_DSP */ 620 621 #ifdef CONFIG_CPU_SH2A 622 asmlinkage void do_divide_error(unsigned long r4, unsigned long r5, 623 unsigned long r6, unsigned long r7, 624 struct pt_regs __regs) 625 { 626 siginfo_t info; 627 628 switch (r4) { 629 case TRAP_DIVZERO_ERROR: 630 info.si_code = FPE_INTDIV; 631 break; 632 case TRAP_DIVOVF_ERROR: 633 info.si_code = FPE_INTOVF; 634 break; 635 } 636 637 force_sig_info(SIGFPE, &info, current); 638 } 639 #endif 640 641 asmlinkage void do_reserved_inst(unsigned long r4, unsigned long r5, 642 unsigned long r6, unsigned long r7, 643 struct pt_regs __regs) 644 { 645 struct pt_regs *regs = RELOC_HIDE(&__regs, 0); 646 unsigned long error_code; 647 struct task_struct *tsk = current; 648 649 #ifdef CONFIG_SH_FPU_EMU 650 unsigned short inst = 0; 651 int err; 652 653 get_user(inst, (unsigned short*)regs->pc); 654 655 err = do_fpu_inst(inst, regs); 656 if (!err) { 657 regs->pc += instruction_size(inst); 658 return; 659 } 660 /* not a FPU inst. */ 661 #endif 662 663 #ifdef CONFIG_SH_DSP 664 /* Check if it's a DSP instruction */ 665 if (is_dsp_inst(regs)) { 666 /* Enable DSP mode, and restart instruction. */ 667 regs->sr |= SR_DSP; 668 /* Save DSP mode */ 669 tsk->thread.dsp_status.status |= SR_DSP; 670 return; 671 } 672 #endif 673 674 error_code = lookup_exception_vector(); 675 676 local_irq_enable(); 677 force_sig(SIGILL, tsk); 678 die_if_no_fixup("reserved instruction", regs, error_code); 679 } 680 681 #ifdef CONFIG_SH_FPU_EMU 682 static int emulate_branch(unsigned short inst, struct pt_regs *regs) 683 { 684 /* 685 * bfs: 8fxx: PC+=d*2+4; 686 * bts: 8dxx: PC+=d*2+4; 687 * bra: axxx: PC+=D*2+4; 688 * bsr: bxxx: PC+=D*2+4 after PR=PC+4; 689 * braf:0x23: PC+=Rn*2+4; 690 * bsrf:0x03: PC+=Rn*2+4 after PR=PC+4; 691 * jmp: 4x2b: PC=Rn; 692 * jsr: 4x0b: PC=Rn after PR=PC+4; 693 * rts: 000b: PC=PR; 694 */ 695 if (((inst & 0xf000) == 0xb000) || /* bsr */ 696 ((inst & 0xf0ff) == 0x0003) || /* bsrf */ 697 ((inst & 0xf0ff) == 0x400b)) /* jsr */ 698 regs->pr = regs->pc + 4; 699 700 if ((inst & 0xfd00) == 0x8d00) { /* bfs, bts */ 701 regs->pc += SH_PC_8BIT_OFFSET(inst); 702 return 0; 703 } 704 705 if ((inst & 0xe000) == 0xa000) { /* bra, bsr */ 706 regs->pc += SH_PC_12BIT_OFFSET(inst); 707 return 0; 708 } 709 710 if ((inst & 0xf0df) == 0x0003) { /* braf, bsrf */ 711 regs->pc += regs->regs[(inst & 0x0f00) >> 8] + 4; 712 return 0; 713 } 714 715 if ((inst & 0xf0df) == 0x400b) { /* jmp, jsr */ 716 regs->pc = regs->regs[(inst & 0x0f00) >> 8]; 717 return 0; 718 } 719 720 if ((inst & 0xffff) == 0x000b) { /* rts */ 721 regs->pc = regs->pr; 722 return 0; 723 } 724 725 return 1; 726 } 727 #endif 728 729 asmlinkage void do_illegal_slot_inst(unsigned long r4, unsigned long r5, 730 unsigned long r6, unsigned long r7, 731 struct pt_regs __regs) 732 { 733 struct pt_regs *regs = RELOC_HIDE(&__regs, 0); 734 unsigned long inst; 735 struct task_struct *tsk = current; 736 737 if (kprobe_handle_illslot(regs->pc) == 0) 738 return; 739 740 #ifdef CONFIG_SH_FPU_EMU 741 get_user(inst, (unsigned short *)regs->pc + 1); 742 if (!do_fpu_inst(inst, regs)) { 743 get_user(inst, (unsigned short *)regs->pc); 744 if (!emulate_branch(inst, regs)) 745 return; 746 /* fault in branch.*/ 747 } 748 /* not a FPU inst. */ 749 #endif 750 751 inst = lookup_exception_vector(); 752 753 local_irq_enable(); 754 force_sig(SIGILL, tsk); 755 die_if_no_fixup("illegal slot instruction", regs, inst); 756 } 757 758 asmlinkage void do_exception_error(unsigned long r4, unsigned long r5, 759 unsigned long r6, unsigned long r7, 760 struct pt_regs __regs) 761 { 762 struct pt_regs *regs = RELOC_HIDE(&__regs, 0); 763 long ex; 764 765 ex = lookup_exception_vector(); 766 die_if_kernel("exception", regs, ex); 767 } 768 769 #if defined(CONFIG_SH_STANDARD_BIOS) 770 void *gdb_vbr_vector; 771 772 static inline void __init gdb_vbr_init(void) 773 { 774 register unsigned long vbr; 775 776 /* 777 * Read the old value of the VBR register to initialise 778 * the vector through which debug and BIOS traps are 779 * delegated by the Linux trap handler. 780 */ 781 asm volatile("stc vbr, %0" : "=r" (vbr)); 782 783 gdb_vbr_vector = (void *)(vbr + 0x100); 784 printk("Setting GDB trap vector to 0x%08lx\n", 785 (unsigned long)gdb_vbr_vector); 786 } 787 #endif 788 789 void __cpuinit per_cpu_trap_init(void) 790 { 791 extern void *vbr_base; 792 793 #ifdef CONFIG_SH_STANDARD_BIOS 794 if (raw_smp_processor_id() == 0) 795 gdb_vbr_init(); 796 #endif 797 798 /* NOTE: The VBR value should be at P1 799 (or P2, virtural "fixed" address space). 800 It's definitely should not in physical address. */ 801 802 asm volatile("ldc %0, vbr" 803 : /* no output */ 804 : "r" (&vbr_base) 805 : "memory"); 806 } 807 808 void *set_exception_table_vec(unsigned int vec, void *handler) 809 { 810 extern void *exception_handling_table[]; 811 void *old_handler; 812 813 old_handler = exception_handling_table[vec]; 814 exception_handling_table[vec] = handler; 815 return old_handler; 816 } 817 818 void __init trap_init(void) 819 { 820 set_exception_table_vec(TRAP_RESERVED_INST, do_reserved_inst); 821 set_exception_table_vec(TRAP_ILLEGAL_SLOT_INST, do_illegal_slot_inst); 822 823 #if defined(CONFIG_CPU_SH4) && !defined(CONFIG_SH_FPU) || \ 824 defined(CONFIG_SH_FPU_EMU) 825 /* 826 * For SH-4 lacking an FPU, treat floating point instructions as 827 * reserved. They'll be handled in the math-emu case, or faulted on 828 * otherwise. 829 */ 830 set_exception_table_evt(0x800, do_reserved_inst); 831 set_exception_table_evt(0x820, do_illegal_slot_inst); 832 #elif defined(CONFIG_SH_FPU) 833 #ifdef CONFIG_CPU_SUBTYPE_SHX3 834 set_exception_table_evt(0xd80, fpu_state_restore_trap_handler); 835 set_exception_table_evt(0xda0, fpu_state_restore_trap_handler); 836 #else 837 set_exception_table_evt(0x800, fpu_state_restore_trap_handler); 838 set_exception_table_evt(0x820, fpu_state_restore_trap_handler); 839 #endif 840 #endif 841 842 #ifdef CONFIG_CPU_SH2 843 set_exception_table_vec(TRAP_ADDRESS_ERROR, address_error_trap_handler); 844 #endif 845 #ifdef CONFIG_CPU_SH2A 846 set_exception_table_vec(TRAP_DIVZERO_ERROR, do_divide_error); 847 set_exception_table_vec(TRAP_DIVOVF_ERROR, do_divide_error); 848 #ifdef CONFIG_SH_FPU 849 set_exception_table_vec(TRAP_FPU_ERROR, fpu_error_trap_handler); 850 #endif 851 #endif 852 853 #ifdef TRAP_UBC 854 set_exception_table_vec(TRAP_UBC, break_point_trap); 855 #endif 856 857 /* Setup VBR for boot cpu */ 858 per_cpu_trap_init(); 859 } 860 861 void show_trace(struct task_struct *tsk, unsigned long *sp, 862 struct pt_regs *regs) 863 { 864 unsigned long addr; 865 866 if (regs && user_mode(regs)) 867 return; 868 869 printk("\nCall trace:\n"); 870 871 while (!kstack_end(sp)) { 872 addr = *sp++; 873 if (kernel_text_address(addr)) 874 print_ip_sym(addr); 875 } 876 877 printk("\n"); 878 879 if (!tsk) 880 tsk = current; 881 882 debug_show_held_locks(tsk); 883 } 884 885 void show_stack(struct task_struct *tsk, unsigned long *sp) 886 { 887 unsigned long stack; 888 889 if (!tsk) 890 tsk = current; 891 if (tsk == current) 892 sp = (unsigned long *)current_stack_pointer; 893 else 894 sp = (unsigned long *)tsk->thread.sp; 895 896 stack = (unsigned long)sp; 897 dump_mem("Stack: ", stack, THREAD_SIZE + 898 (unsigned long)task_stack_page(tsk)); 899 show_trace(tsk, sp, NULL); 900 } 901 902 void dump_stack(void) 903 { 904 show_stack(NULL, NULL); 905 } 906 EXPORT_SYMBOL(dump_stack); 907