1 /* 2 * linux/arch/arm/kernel/traps.c 3 * 4 * Copyright (C) 1995-2002 Russell King 5 * Fragments that appear the same as linux/arch/i386/kernel/traps.c (C) Linus Torvalds 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 * 11 * 'traps.c' handles hardware exceptions after we have saved some state in 12 * 'linux/arch/arm/lib/traps.S'. Mostly a debugging aid, but will probably 13 * kill the offending process. 14 */ 15 #include <linux/module.h> 16 #include <linux/signal.h> 17 #include <linux/spinlock.h> 18 #include <linux/personality.h> 19 #include <linux/kallsyms.h> 20 #include <linux/delay.h> 21 #include <linux/init.h> 22 23 #include <asm/atomic.h> 24 #include <asm/cacheflush.h> 25 #include <asm/system.h> 26 #include <asm/uaccess.h> 27 #include <asm/unistd.h> 28 #include <asm/traps.h> 29 #include <asm/io.h> 30 31 #include "ptrace.h" 32 #include "signal.h" 33 34 static const char *handler[]= { "prefetch abort", "data abort", "address exception", "interrupt" }; 35 36 #ifdef CONFIG_DEBUG_USER 37 unsigned int user_debug; 38 39 static int __init user_debug_setup(char *str) 40 { 41 get_option(&str, &user_debug); 42 return 1; 43 } 44 __setup("user_debug=", user_debug_setup); 45 #endif 46 47 static void dump_mem(const char *str, unsigned long bottom, unsigned long top); 48 49 static inline int in_exception_text(unsigned long ptr) 50 { 51 extern char __exception_text_start[]; 52 extern char __exception_text_end[]; 53 54 return ptr >= (unsigned long)&__exception_text_start && 55 ptr < (unsigned long)&__exception_text_end; 56 } 57 58 void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame) 59 { 60 #ifdef CONFIG_KALLSYMS 61 printk("[<%08lx>] ", where); 62 print_symbol("(%s) ", where); 63 printk("from [<%08lx>] ", from); 64 print_symbol("(%s)\n", from); 65 #else 66 printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from); 67 #endif 68 69 if (in_exception_text(where)) 70 dump_mem("Exception stack", frame + 4, frame + 4 + sizeof(struct pt_regs)); 71 } 72 73 /* 74 * Stack pointers should always be within the kernels view of 75 * physical memory. If it is not there, then we can't dump 76 * out any information relating to the stack. 77 */ 78 static int verify_stack(unsigned long sp) 79 { 80 if (sp < PAGE_OFFSET || (sp > (unsigned long)high_memory && high_memory != 0)) 81 return -EFAULT; 82 83 return 0; 84 } 85 86 /* 87 * Dump out the contents of some memory nicely... 88 */ 89 static void dump_mem(const char *str, unsigned long bottom, unsigned long top) 90 { 91 unsigned long p = bottom & ~31; 92 mm_segment_t fs; 93 int i; 94 95 /* 96 * We need to switch to kernel mode so that we can use __get_user 97 * to safely read from kernel space. Note that we now dump the 98 * code first, just in case the backtrace kills us. 99 */ 100 fs = get_fs(); 101 set_fs(KERNEL_DS); 102 103 printk("%s(0x%08lx to 0x%08lx)\n", str, bottom, top); 104 105 for (p = bottom & ~31; p < top;) { 106 printk("%04lx: ", p & 0xffff); 107 108 for (i = 0; i < 8; i++, p += 4) { 109 unsigned int val; 110 111 if (p < bottom || p >= top) 112 printk(" "); 113 else { 114 __get_user(val, (unsigned long *)p); 115 printk("%08x ", val); 116 } 117 } 118 printk ("\n"); 119 } 120 121 set_fs(fs); 122 } 123 124 static void dump_instr(struct pt_regs *regs) 125 { 126 unsigned long addr = instruction_pointer(regs); 127 const int thumb = thumb_mode(regs); 128 const int width = thumb ? 4 : 8; 129 mm_segment_t fs; 130 int i; 131 132 /* 133 * We need to switch to kernel mode so that we can use __get_user 134 * to safely read from kernel space. Note that we now dump the 135 * code first, just in case the backtrace kills us. 136 */ 137 fs = get_fs(); 138 set_fs(KERNEL_DS); 139 140 printk("Code: "); 141 for (i = -4; i < 1; i++) { 142 unsigned int val, bad; 143 144 if (thumb) 145 bad = __get_user(val, &((u16 *)addr)[i]); 146 else 147 bad = __get_user(val, &((u32 *)addr)[i]); 148 149 if (!bad) 150 printk(i == 0 ? "(%0*x) " : "%0*x ", width, val); 151 else { 152 printk("bad PC value."); 153 break; 154 } 155 } 156 printk("\n"); 157 158 set_fs(fs); 159 } 160 161 static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk) 162 { 163 unsigned int fp; 164 int ok = 1; 165 166 printk("Backtrace: "); 167 fp = regs->ARM_fp; 168 if (!fp) { 169 printk("no frame pointer"); 170 ok = 0; 171 } else if (verify_stack(fp)) { 172 printk("invalid frame pointer 0x%08x", fp); 173 ok = 0; 174 } else if (fp < (unsigned long)end_of_stack(tsk)) 175 printk("frame pointer underflow"); 176 printk("\n"); 177 178 if (ok) 179 c_backtrace(fp, processor_mode(regs)); 180 } 181 182 void dump_stack(void) 183 { 184 #ifdef CONFIG_DEBUG_ERRORS 185 __backtrace(); 186 #endif 187 } 188 189 EXPORT_SYMBOL(dump_stack); 190 191 void show_stack(struct task_struct *tsk, unsigned long *sp) 192 { 193 unsigned long fp; 194 195 if (!tsk) 196 tsk = current; 197 198 if (tsk != current) 199 fp = thread_saved_fp(tsk); 200 else 201 asm("mov %0, fp" : "=r" (fp) : : "cc"); 202 203 c_backtrace(fp, 0x10); 204 barrier(); 205 } 206 207 static void __die(const char *str, int err, struct thread_info *thread, struct pt_regs *regs) 208 { 209 struct task_struct *tsk = thread->task; 210 static int die_counter; 211 212 printk("Internal error: %s: %x [#%d]\n", str, err, ++die_counter); 213 print_modules(); 214 __show_regs(regs); 215 printk("Process %s (pid: %d, stack limit = 0x%p)\n", 216 tsk->comm, tsk->pid, thread + 1); 217 218 if (!user_mode(regs) || in_interrupt()) { 219 dump_mem("Stack: ", regs->ARM_sp, 220 THREAD_SIZE + (unsigned long)task_stack_page(tsk)); 221 dump_backtrace(regs, tsk); 222 dump_instr(regs); 223 } 224 } 225 226 DEFINE_SPINLOCK(die_lock); 227 228 /* 229 * This function is protected against re-entrancy. 230 */ 231 NORET_TYPE void die(const char *str, struct pt_regs *regs, int err) 232 { 233 struct thread_info *thread = current_thread_info(); 234 235 console_verbose(); 236 spin_lock_irq(&die_lock); 237 bust_spinlocks(1); 238 __die(str, err, thread, regs); 239 bust_spinlocks(0); 240 spin_unlock_irq(&die_lock); 241 242 if (panic_on_oops) 243 panic("Fatal exception"); 244 245 do_exit(SIGSEGV); 246 } 247 248 void notify_die(const char *str, struct pt_regs *regs, struct siginfo *info, 249 unsigned long err, unsigned long trap) 250 { 251 if (user_mode(regs)) { 252 current->thread.error_code = err; 253 current->thread.trap_no = trap; 254 255 force_sig_info(info->si_signo, info, current); 256 } else { 257 die(str, regs, err); 258 } 259 } 260 261 static LIST_HEAD(undef_hook); 262 static DEFINE_SPINLOCK(undef_lock); 263 264 void register_undef_hook(struct undef_hook *hook) 265 { 266 unsigned long flags; 267 268 spin_lock_irqsave(&undef_lock, flags); 269 list_add(&hook->node, &undef_hook); 270 spin_unlock_irqrestore(&undef_lock, flags); 271 } 272 273 void unregister_undef_hook(struct undef_hook *hook) 274 { 275 unsigned long flags; 276 277 spin_lock_irqsave(&undef_lock, flags); 278 list_del(&hook->node); 279 spin_unlock_irqrestore(&undef_lock, flags); 280 } 281 282 asmlinkage void __exception do_undefinstr(struct pt_regs *regs) 283 { 284 unsigned int correction = thumb_mode(regs) ? 2 : 4; 285 unsigned int instr; 286 struct undef_hook *hook; 287 siginfo_t info; 288 void __user *pc; 289 unsigned long flags; 290 291 /* 292 * According to the ARM ARM, PC is 2 or 4 bytes ahead, 293 * depending whether we're in Thumb mode or not. 294 * Correct this offset. 295 */ 296 regs->ARM_pc -= correction; 297 298 pc = (void __user *)instruction_pointer(regs); 299 300 if (processor_mode(regs) == SVC_MODE) { 301 instr = *(u32 *) pc; 302 } else if (thumb_mode(regs)) { 303 get_user(instr, (u16 __user *)pc); 304 } else { 305 get_user(instr, (u32 __user *)pc); 306 } 307 308 spin_lock_irqsave(&undef_lock, flags); 309 list_for_each_entry(hook, &undef_hook, node) { 310 if ((instr & hook->instr_mask) == hook->instr_val && 311 (regs->ARM_cpsr & hook->cpsr_mask) == hook->cpsr_val) { 312 if (hook->fn(regs, instr) == 0) { 313 spin_unlock_irq(&undef_lock); 314 return; 315 } 316 } 317 } 318 spin_unlock_irqrestore(&undef_lock, flags); 319 320 #ifdef CONFIG_DEBUG_USER 321 if (user_debug & UDBG_UNDEFINED) { 322 printk(KERN_INFO "%s (%d): undefined instruction: pc=%p\n", 323 current->comm, current->pid, pc); 324 dump_instr(regs); 325 } 326 #endif 327 328 info.si_signo = SIGILL; 329 info.si_errno = 0; 330 info.si_code = ILL_ILLOPC; 331 info.si_addr = pc; 332 333 notify_die("Oops - undefined instruction", regs, &info, 0, 6); 334 } 335 336 asmlinkage void do_unexp_fiq (struct pt_regs *regs) 337 { 338 #ifndef CONFIG_IGNORE_FIQ 339 printk("Hmm. Unexpected FIQ received, but trying to continue\n"); 340 printk("You may have a hardware problem...\n"); 341 #endif 342 } 343 344 /* 345 * bad_mode handles the impossible case in the vectors. If you see one of 346 * these, then it's extremely serious, and could mean you have buggy hardware. 347 * It never returns, and never tries to sync. We hope that we can at least 348 * dump out some state information... 349 */ 350 asmlinkage void bad_mode(struct pt_regs *regs, int reason) 351 { 352 console_verbose(); 353 354 printk(KERN_CRIT "Bad mode in %s handler detected\n", handler[reason]); 355 356 die("Oops - bad mode", regs, 0); 357 local_irq_disable(); 358 panic("bad mode"); 359 } 360 361 static int bad_syscall(int n, struct pt_regs *regs) 362 { 363 struct thread_info *thread = current_thread_info(); 364 siginfo_t info; 365 366 if (current->personality != PER_LINUX && 367 current->personality != PER_LINUX_32BIT && 368 thread->exec_domain->handler) { 369 thread->exec_domain->handler(n, regs); 370 return regs->ARM_r0; 371 } 372 373 #ifdef CONFIG_DEBUG_USER 374 if (user_debug & UDBG_SYSCALL) { 375 printk(KERN_ERR "[%d] %s: obsolete system call %08x.\n", 376 current->pid, current->comm, n); 377 dump_instr(regs); 378 } 379 #endif 380 381 info.si_signo = SIGILL; 382 info.si_errno = 0; 383 info.si_code = ILL_ILLTRP; 384 info.si_addr = (void __user *)instruction_pointer(regs) - 385 (thumb_mode(regs) ? 2 : 4); 386 387 notify_die("Oops - bad syscall", regs, &info, n, 0); 388 389 return regs->ARM_r0; 390 } 391 392 static inline void 393 do_cache_op(unsigned long start, unsigned long end, int flags) 394 { 395 struct vm_area_struct *vma; 396 397 if (end < start || flags) 398 return; 399 400 vma = find_vma(current->active_mm, start); 401 if (vma && vma->vm_start < end) { 402 if (start < vma->vm_start) 403 start = vma->vm_start; 404 if (end > vma->vm_end) 405 end = vma->vm_end; 406 407 flush_cache_user_range(vma, start, end); 408 } 409 } 410 411 /* 412 * Handle all unrecognised system calls. 413 * 0x9f0000 - 0x9fffff are some more esoteric system calls 414 */ 415 #define NR(x) ((__ARM_NR_##x) - __ARM_NR_BASE) 416 asmlinkage int arm_syscall(int no, struct pt_regs *regs) 417 { 418 struct thread_info *thread = current_thread_info(); 419 siginfo_t info; 420 421 if ((no >> 16) != (__ARM_NR_BASE>> 16)) 422 return bad_syscall(no, regs); 423 424 switch (no & 0xffff) { 425 case 0: /* branch through 0 */ 426 info.si_signo = SIGSEGV; 427 info.si_errno = 0; 428 info.si_code = SEGV_MAPERR; 429 info.si_addr = NULL; 430 431 notify_die("branch through zero", regs, &info, 0, 0); 432 return 0; 433 434 case NR(breakpoint): /* SWI BREAK_POINT */ 435 regs->ARM_pc -= thumb_mode(regs) ? 2 : 4; 436 ptrace_break(current, regs); 437 return regs->ARM_r0; 438 439 /* 440 * Flush a region from virtual address 'r0' to virtual address 'r1' 441 * _exclusive_. There is no alignment requirement on either address; 442 * user space does not need to know the hardware cache layout. 443 * 444 * r2 contains flags. It should ALWAYS be passed as ZERO until it 445 * is defined to be something else. For now we ignore it, but may 446 * the fires of hell burn in your belly if you break this rule. ;) 447 * 448 * (at a later date, we may want to allow this call to not flush 449 * various aspects of the cache. Passing '0' will guarantee that 450 * everything necessary gets flushed to maintain consistency in 451 * the specified region). 452 */ 453 case NR(cacheflush): 454 do_cache_op(regs->ARM_r0, regs->ARM_r1, regs->ARM_r2); 455 return 0; 456 457 case NR(usr26): 458 if (!(elf_hwcap & HWCAP_26BIT)) 459 break; 460 regs->ARM_cpsr &= ~MODE32_BIT; 461 return regs->ARM_r0; 462 463 case NR(usr32): 464 if (!(elf_hwcap & HWCAP_26BIT)) 465 break; 466 regs->ARM_cpsr |= MODE32_BIT; 467 return regs->ARM_r0; 468 469 case NR(set_tls): 470 thread->tp_value = regs->ARM_r0; 471 #if defined(CONFIG_HAS_TLS_REG) 472 asm ("mcr p15, 0, %0, c13, c0, 3" : : "r" (regs->ARM_r0) ); 473 #elif !defined(CONFIG_TLS_REG_EMUL) 474 /* 475 * User space must never try to access this directly. 476 * Expect your app to break eventually if you do so. 477 * The user helper at 0xffff0fe0 must be used instead. 478 * (see entry-armv.S for details) 479 */ 480 *((unsigned int *)0xffff0ff0) = regs->ARM_r0; 481 #endif 482 return 0; 483 484 #ifdef CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG 485 /* 486 * Atomically store r1 in *r2 if *r2 is equal to r0 for user space. 487 * Return zero in r0 if *MEM was changed or non-zero if no exchange 488 * happened. Also set the user C flag accordingly. 489 * If access permissions have to be fixed up then non-zero is 490 * returned and the operation has to be re-attempted. 491 * 492 * *NOTE*: This is a ghost syscall private to the kernel. Only the 493 * __kuser_cmpxchg code in entry-armv.S should be aware of its 494 * existence. Don't ever use this from user code. 495 */ 496 case 0xfff0: 497 { 498 extern void do_DataAbort(unsigned long addr, unsigned int fsr, 499 struct pt_regs *regs); 500 unsigned long val; 501 unsigned long addr = regs->ARM_r2; 502 struct mm_struct *mm = current->mm; 503 pgd_t *pgd; pmd_t *pmd; pte_t *pte; 504 spinlock_t *ptl; 505 506 regs->ARM_cpsr &= ~PSR_C_BIT; 507 down_read(&mm->mmap_sem); 508 pgd = pgd_offset(mm, addr); 509 if (!pgd_present(*pgd)) 510 goto bad_access; 511 pmd = pmd_offset(pgd, addr); 512 if (!pmd_present(*pmd)) 513 goto bad_access; 514 pte = pte_offset_map_lock(mm, pmd, addr, &ptl); 515 if (!pte_present(*pte) || !pte_dirty(*pte)) { 516 pte_unmap_unlock(pte, ptl); 517 goto bad_access; 518 } 519 val = *(unsigned long *)addr; 520 val -= regs->ARM_r0; 521 if (val == 0) { 522 *(unsigned long *)addr = regs->ARM_r1; 523 regs->ARM_cpsr |= PSR_C_BIT; 524 } 525 pte_unmap_unlock(pte, ptl); 526 up_read(&mm->mmap_sem); 527 return val; 528 529 bad_access: 530 up_read(&mm->mmap_sem); 531 /* simulate a write access fault */ 532 do_DataAbort(addr, 15 + (1 << 11), regs); 533 return -1; 534 } 535 #endif 536 537 default: 538 /* Calls 9f00xx..9f07ff are defined to return -ENOSYS 539 if not implemented, rather than raising SIGILL. This 540 way the calling program can gracefully determine whether 541 a feature is supported. */ 542 if (no <= 0x7ff) 543 return -ENOSYS; 544 break; 545 } 546 #ifdef CONFIG_DEBUG_USER 547 /* 548 * experience shows that these seem to indicate that 549 * something catastrophic has happened 550 */ 551 if (user_debug & UDBG_SYSCALL) { 552 printk("[%d] %s: arm syscall %d\n", 553 current->pid, current->comm, no); 554 dump_instr(regs); 555 if (user_mode(regs)) { 556 __show_regs(regs); 557 c_backtrace(regs->ARM_fp, processor_mode(regs)); 558 } 559 } 560 #endif 561 info.si_signo = SIGILL; 562 info.si_errno = 0; 563 info.si_code = ILL_ILLTRP; 564 info.si_addr = (void __user *)instruction_pointer(regs) - 565 (thumb_mode(regs) ? 2 : 4); 566 567 notify_die("Oops - bad syscall(2)", regs, &info, no, 0); 568 return 0; 569 } 570 571 #ifdef CONFIG_TLS_REG_EMUL 572 573 /* 574 * We might be running on an ARMv6+ processor which should have the TLS 575 * register but for some reason we can't use it, or maybe an SMP system 576 * using a pre-ARMv6 processor (there are apparently a few prototypes like 577 * that in existence) and therefore access to that register must be 578 * emulated. 579 */ 580 581 static int get_tp_trap(struct pt_regs *regs, unsigned int instr) 582 { 583 int reg = (instr >> 12) & 15; 584 if (reg == 15) 585 return 1; 586 regs->uregs[reg] = current_thread_info()->tp_value; 587 regs->ARM_pc += 4; 588 return 0; 589 } 590 591 static struct undef_hook arm_mrc_hook = { 592 .instr_mask = 0x0fff0fff, 593 .instr_val = 0x0e1d0f70, 594 .cpsr_mask = PSR_T_BIT, 595 .cpsr_val = 0, 596 .fn = get_tp_trap, 597 }; 598 599 static int __init arm_mrc_hook_init(void) 600 { 601 register_undef_hook(&arm_mrc_hook); 602 return 0; 603 } 604 605 late_initcall(arm_mrc_hook_init); 606 607 #endif 608 609 void __bad_xchg(volatile void *ptr, int size) 610 { 611 printk("xchg: bad data size: pc 0x%p, ptr 0x%p, size %d\n", 612 __builtin_return_address(0), ptr, size); 613 BUG(); 614 } 615 EXPORT_SYMBOL(__bad_xchg); 616 617 /* 618 * A data abort trap was taken, but we did not handle the instruction. 619 * Try to abort the user program, or panic if it was the kernel. 620 */ 621 asmlinkage void 622 baddataabort(int code, unsigned long instr, struct pt_regs *regs) 623 { 624 unsigned long addr = instruction_pointer(regs); 625 siginfo_t info; 626 627 #ifdef CONFIG_DEBUG_USER 628 if (user_debug & UDBG_BADABORT) { 629 printk(KERN_ERR "[%d] %s: bad data abort: code %d instr 0x%08lx\n", 630 current->pid, current->comm, code, instr); 631 dump_instr(regs); 632 show_pte(current->mm, addr); 633 } 634 #endif 635 636 info.si_signo = SIGILL; 637 info.si_errno = 0; 638 info.si_code = ILL_ILLOPC; 639 info.si_addr = (void __user *)addr; 640 641 notify_die("unknown data abort code", regs, &info, instr, 0); 642 } 643 644 void __attribute__((noreturn)) __bug(const char *file, int line) 645 { 646 printk(KERN_CRIT"kernel BUG at %s:%d!\n", file, line); 647 *(int *)0 = 0; 648 649 /* Avoid "noreturn function does return" */ 650 for (;;); 651 } 652 EXPORT_SYMBOL(__bug); 653 654 void __readwrite_bug(const char *fn) 655 { 656 printk("%s called, but not implemented\n", fn); 657 BUG(); 658 } 659 EXPORT_SYMBOL(__readwrite_bug); 660 661 void __pte_error(const char *file, int line, unsigned long val) 662 { 663 printk("%s:%d: bad pte %08lx.\n", file, line, val); 664 } 665 666 void __pmd_error(const char *file, int line, unsigned long val) 667 { 668 printk("%s:%d: bad pmd %08lx.\n", file, line, val); 669 } 670 671 void __pgd_error(const char *file, int line, unsigned long val) 672 { 673 printk("%s:%d: bad pgd %08lx.\n", file, line, val); 674 } 675 676 asmlinkage void __div0(void) 677 { 678 printk("Division by zero in kernel.\n"); 679 dump_stack(); 680 } 681 EXPORT_SYMBOL(__div0); 682 683 void abort(void) 684 { 685 BUG(); 686 687 /* if that doesn't kill us, halt */ 688 panic("Oops failed to kill thread"); 689 } 690 EXPORT_SYMBOL(abort); 691 692 void __init trap_init(void) 693 { 694 unsigned long vectors = CONFIG_VECTORS_BASE; 695 extern char __stubs_start[], __stubs_end[]; 696 extern char __vectors_start[], __vectors_end[]; 697 extern char __kuser_helper_start[], __kuser_helper_end[]; 698 int kuser_sz = __kuser_helper_end - __kuser_helper_start; 699 700 /* 701 * Copy the vectors, stubs and kuser helpers (in entry-armv.S) 702 * into the vector page, mapped at 0xffff0000, and ensure these 703 * are visible to the instruction stream. 704 */ 705 memcpy((void *)vectors, __vectors_start, __vectors_end - __vectors_start); 706 memcpy((void *)vectors + 0x200, __stubs_start, __stubs_end - __stubs_start); 707 memcpy((void *)vectors + 0x1000 - kuser_sz, __kuser_helper_start, kuser_sz); 708 709 /* 710 * Copy signal return handlers into the vector page, and 711 * set sigreturn to be a pointer to these. 712 */ 713 memcpy((void *)KERN_SIGRETURN_CODE, sigreturn_codes, 714 sizeof(sigreturn_codes)); 715 716 flush_icache_range(vectors, vectors + PAGE_SIZE); 717 modify_domain(DOMAIN_USER, DOMAIN_CLIENT); 718 } 719