1 /* 2 * arch/xtensa/kernel/traps.c 3 * 4 * Exception handling. 5 * 6 * Derived from code with the following copyrights: 7 * Copyright (C) 1994 - 1999 by Ralf Baechle 8 * Modified for R3000 by Paul M. Antoine, 1995, 1996 9 * Complete output from die() by Ulf Carlsson, 1998 10 * Copyright (C) 1999 Silicon Graphics, Inc. 11 * 12 * Essentially rewritten for the Xtensa architecture port. 13 * 14 * Copyright (C) 2001 - 2013 Tensilica Inc. 15 * 16 * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com> 17 * Chris Zankel <chris@zankel.net> 18 * Marc Gauthier<marc@tensilica.com, marc@alumni.uwaterloo.ca> 19 * Kevin Chea 20 * 21 * This file is subject to the terms and conditions of the GNU General Public 22 * License. See the file "COPYING" in the main directory of this archive 23 * for more details. 24 */ 25 26 #include <linux/kernel.h> 27 #include <linux/sched.h> 28 #include <linux/init.h> 29 #include <linux/module.h> 30 #include <linux/stringify.h> 31 #include <linux/kallsyms.h> 32 #include <linux/delay.h> 33 #include <linux/hardirq.h> 34 35 #include <asm/stacktrace.h> 36 #include <asm/ptrace.h> 37 #include <asm/timex.h> 38 #include <asm/uaccess.h> 39 #include <asm/pgtable.h> 40 #include <asm/processor.h> 41 #include <asm/traps.h> 42 #include <asm/hw_breakpoint.h> 43 44 /* 45 * Machine specific interrupt handlers 46 */ 47 48 extern void kernel_exception(void); 49 extern void user_exception(void); 50 51 extern void fast_syscall_kernel(void); 52 extern void fast_syscall_user(void); 53 extern void fast_alloca(void); 54 extern void fast_unaligned(void); 55 extern void fast_second_level_miss(void); 56 extern void fast_store_prohibited(void); 57 extern void fast_coprocessor(void); 58 59 extern void do_illegal_instruction (struct pt_regs*); 60 extern void do_interrupt (struct pt_regs*); 61 extern void do_nmi(struct pt_regs *); 62 extern void do_unaligned_user (struct pt_regs*); 63 extern void do_multihit (struct pt_regs*, unsigned long); 64 extern void do_page_fault (struct pt_regs*, unsigned long); 65 extern void do_debug (struct pt_regs*); 66 extern void system_call (struct pt_regs*); 67 68 /* 69 * The vector table must be preceded by a save area (which 70 * implies it must be in RAM, unless one places RAM immediately 71 * before a ROM and puts the vector at the start of the ROM (!)) 72 */ 73 74 #define KRNL 0x01 75 #define USER 0x02 76 77 #define COPROCESSOR(x) \ 78 { EXCCAUSE_COPROCESSOR ## x ## _DISABLED, USER, fast_coprocessor } 79 80 typedef struct { 81 int cause; 82 int fast; 83 void* handler; 84 } dispatch_init_table_t; 85 86 static dispatch_init_table_t __initdata dispatch_init_table[] = { 87 88 { EXCCAUSE_ILLEGAL_INSTRUCTION, 0, do_illegal_instruction}, 89 { EXCCAUSE_SYSTEM_CALL, KRNL, fast_syscall_kernel }, 90 { EXCCAUSE_SYSTEM_CALL, USER, fast_syscall_user }, 91 { EXCCAUSE_SYSTEM_CALL, 0, system_call }, 92 /* EXCCAUSE_INSTRUCTION_FETCH unhandled */ 93 /* EXCCAUSE_LOAD_STORE_ERROR unhandled*/ 94 { EXCCAUSE_LEVEL1_INTERRUPT, 0, do_interrupt }, 95 { EXCCAUSE_ALLOCA, USER|KRNL, fast_alloca }, 96 /* EXCCAUSE_INTEGER_DIVIDE_BY_ZERO unhandled */ 97 /* EXCCAUSE_PRIVILEGED unhandled */ 98 #if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION 99 #ifdef CONFIG_XTENSA_UNALIGNED_USER 100 { EXCCAUSE_UNALIGNED, USER, fast_unaligned }, 101 #endif 102 { EXCCAUSE_UNALIGNED, 0, do_unaligned_user }, 103 { EXCCAUSE_UNALIGNED, KRNL, fast_unaligned }, 104 #endif 105 #ifdef CONFIG_MMU 106 { EXCCAUSE_ITLB_MISS, 0, do_page_fault }, 107 { EXCCAUSE_ITLB_MISS, USER|KRNL, fast_second_level_miss}, 108 { EXCCAUSE_ITLB_MULTIHIT, 0, do_multihit }, 109 { EXCCAUSE_ITLB_PRIVILEGE, 0, do_page_fault }, 110 /* EXCCAUSE_SIZE_RESTRICTION unhandled */ 111 { EXCCAUSE_FETCH_CACHE_ATTRIBUTE, 0, do_page_fault }, 112 { EXCCAUSE_DTLB_MISS, USER|KRNL, fast_second_level_miss}, 113 { EXCCAUSE_DTLB_MISS, 0, do_page_fault }, 114 { EXCCAUSE_DTLB_MULTIHIT, 0, do_multihit }, 115 { EXCCAUSE_DTLB_PRIVILEGE, 0, do_page_fault }, 116 /* EXCCAUSE_DTLB_SIZE_RESTRICTION unhandled */ 117 { EXCCAUSE_STORE_CACHE_ATTRIBUTE, USER|KRNL, fast_store_prohibited }, 118 { EXCCAUSE_STORE_CACHE_ATTRIBUTE, 0, do_page_fault }, 119 { EXCCAUSE_LOAD_CACHE_ATTRIBUTE, 0, do_page_fault }, 120 #endif /* CONFIG_MMU */ 121 /* XCCHAL_EXCCAUSE_FLOATING_POINT unhandled */ 122 #if XTENSA_HAVE_COPROCESSOR(0) 123 COPROCESSOR(0), 124 #endif 125 #if XTENSA_HAVE_COPROCESSOR(1) 126 COPROCESSOR(1), 127 #endif 128 #if XTENSA_HAVE_COPROCESSOR(2) 129 COPROCESSOR(2), 130 #endif 131 #if XTENSA_HAVE_COPROCESSOR(3) 132 COPROCESSOR(3), 133 #endif 134 #if XTENSA_HAVE_COPROCESSOR(4) 135 COPROCESSOR(4), 136 #endif 137 #if XTENSA_HAVE_COPROCESSOR(5) 138 COPROCESSOR(5), 139 #endif 140 #if XTENSA_HAVE_COPROCESSOR(6) 141 COPROCESSOR(6), 142 #endif 143 #if XTENSA_HAVE_COPROCESSOR(7) 144 COPROCESSOR(7), 145 #endif 146 #if XTENSA_FAKE_NMI 147 { EXCCAUSE_MAPPED_NMI, 0, do_nmi }, 148 #endif 149 { EXCCAUSE_MAPPED_DEBUG, 0, do_debug }, 150 { -1, -1, 0 } 151 152 }; 153 154 /* The exception table <exc_table> serves two functions: 155 * 1. it contains three dispatch tables (fast_user, fast_kernel, default-c) 156 * 2. it is a temporary memory buffer for the exception handlers. 157 */ 158 159 DEFINE_PER_CPU(unsigned long, exc_table[EXC_TABLE_SIZE/4]); 160 161 DEFINE_PER_CPU(struct debug_table, debug_table); 162 163 void die(const char*, struct pt_regs*, long); 164 165 static inline void 166 __die_if_kernel(const char *str, struct pt_regs *regs, long err) 167 { 168 if (!user_mode(regs)) 169 die(str, regs, err); 170 } 171 172 /* 173 * Unhandled Exceptions. Kill user task or panic if in kernel space. 174 */ 175 176 void do_unhandled(struct pt_regs *regs, unsigned long exccause) 177 { 178 __die_if_kernel("Caught unhandled exception - should not happen", 179 regs, SIGKILL); 180 181 /* If in user mode, send SIGILL signal to current process */ 182 printk("Caught unhandled exception in '%s' " 183 "(pid = %d, pc = %#010lx) - should not happen\n" 184 "\tEXCCAUSE is %ld\n", 185 current->comm, task_pid_nr(current), regs->pc, exccause); 186 force_sig(SIGILL, current); 187 } 188 189 /* 190 * Multi-hit exception. This if fatal! 191 */ 192 193 void do_multihit(struct pt_regs *regs, unsigned long exccause) 194 { 195 die("Caught multihit exception", regs, SIGKILL); 196 } 197 198 /* 199 * IRQ handler. 200 */ 201 202 extern void do_IRQ(int, struct pt_regs *); 203 204 #if XTENSA_FAKE_NMI 205 206 #define IS_POW2(v) (((v) & ((v) - 1)) == 0) 207 208 #if !(PROFILING_INTLEVEL == XCHAL_EXCM_LEVEL && \ 209 IS_POW2(XTENSA_INTLEVEL_MASK(PROFILING_INTLEVEL))) 210 #warning "Fake NMI is requested for PMM, but there are other IRQs at or above its level." 211 #warning "Fake NMI will be used, but there will be a bugcheck if one of those IRQs fire." 212 213 static inline void check_valid_nmi(void) 214 { 215 unsigned intread = get_sr(interrupt); 216 unsigned intenable = get_sr(intenable); 217 218 BUG_ON(intread & intenable & 219 ~(XTENSA_INTLEVEL_ANDBELOW_MASK(PROFILING_INTLEVEL) ^ 220 XTENSA_INTLEVEL_MASK(PROFILING_INTLEVEL) ^ 221 BIT(XCHAL_PROFILING_INTERRUPT))); 222 } 223 224 #else 225 226 static inline void check_valid_nmi(void) 227 { 228 } 229 230 #endif 231 232 irqreturn_t xtensa_pmu_irq_handler(int irq, void *dev_id); 233 234 DEFINE_PER_CPU(unsigned long, nmi_count); 235 236 void do_nmi(struct pt_regs *regs) 237 { 238 struct pt_regs *old_regs; 239 240 if ((regs->ps & PS_INTLEVEL_MASK) < LOCKLEVEL) 241 trace_hardirqs_off(); 242 243 old_regs = set_irq_regs(regs); 244 nmi_enter(); 245 ++*this_cpu_ptr(&nmi_count); 246 check_valid_nmi(); 247 xtensa_pmu_irq_handler(0, NULL); 248 nmi_exit(); 249 set_irq_regs(old_regs); 250 } 251 #endif 252 253 void do_interrupt(struct pt_regs *regs) 254 { 255 static const unsigned int_level_mask[] = { 256 0, 257 XCHAL_INTLEVEL1_MASK, 258 XCHAL_INTLEVEL2_MASK, 259 XCHAL_INTLEVEL3_MASK, 260 XCHAL_INTLEVEL4_MASK, 261 XCHAL_INTLEVEL5_MASK, 262 XCHAL_INTLEVEL6_MASK, 263 XCHAL_INTLEVEL7_MASK, 264 }; 265 struct pt_regs *old_regs; 266 267 trace_hardirqs_off(); 268 269 old_regs = set_irq_regs(regs); 270 irq_enter(); 271 272 for (;;) { 273 unsigned intread = get_sr(interrupt); 274 unsigned intenable = get_sr(intenable); 275 unsigned int_at_level = intread & intenable; 276 unsigned level; 277 278 for (level = LOCKLEVEL; level > 0; --level) { 279 if (int_at_level & int_level_mask[level]) { 280 int_at_level &= int_level_mask[level]; 281 break; 282 } 283 } 284 285 if (level == 0) 286 break; 287 288 do_IRQ(__ffs(int_at_level), regs); 289 } 290 291 irq_exit(); 292 set_irq_regs(old_regs); 293 } 294 295 /* 296 * Illegal instruction. Fatal if in kernel space. 297 */ 298 299 void 300 do_illegal_instruction(struct pt_regs *regs) 301 { 302 __die_if_kernel("Illegal instruction in kernel", regs, SIGKILL); 303 304 /* If in user mode, send SIGILL signal to current process. */ 305 306 printk("Illegal Instruction in '%s' (pid = %d, pc = %#010lx)\n", 307 current->comm, task_pid_nr(current), regs->pc); 308 force_sig(SIGILL, current); 309 } 310 311 312 /* 313 * Handle unaligned memory accesses from user space. Kill task. 314 * 315 * If CONFIG_UNALIGNED_USER is not set, we don't allow unaligned memory 316 * accesses causes from user space. 317 */ 318 319 #if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION 320 void 321 do_unaligned_user (struct pt_regs *regs) 322 { 323 siginfo_t info; 324 325 __die_if_kernel("Unhandled unaligned exception in kernel", 326 regs, SIGKILL); 327 328 current->thread.bad_vaddr = regs->excvaddr; 329 current->thread.error_code = -3; 330 printk("Unaligned memory access to %08lx in '%s' " 331 "(pid = %d, pc = %#010lx)\n", 332 regs->excvaddr, current->comm, task_pid_nr(current), regs->pc); 333 info.si_signo = SIGBUS; 334 info.si_errno = 0; 335 info.si_code = BUS_ADRALN; 336 info.si_addr = (void *) regs->excvaddr; 337 force_sig_info(SIGSEGV, &info, current); 338 339 } 340 #endif 341 342 /* Handle debug events. 343 * When CONFIG_HAVE_HW_BREAKPOINT is on this handler is called with 344 * preemption disabled to avoid rescheduling and keep mapping of hardware 345 * breakpoint structures to debug registers intact, so that 346 * DEBUGCAUSE.DBNUM could be used in case of data breakpoint hit. 347 */ 348 void 349 do_debug(struct pt_regs *regs) 350 { 351 #ifdef CONFIG_HAVE_HW_BREAKPOINT 352 int ret = check_hw_breakpoint(regs); 353 354 preempt_enable(); 355 if (ret == 0) 356 return; 357 #endif 358 __die_if_kernel("Breakpoint in kernel", regs, SIGKILL); 359 360 /* If in user mode, send SIGTRAP signal to current process */ 361 362 force_sig(SIGTRAP, current); 363 } 364 365 366 static void set_handler(int idx, void *handler) 367 { 368 unsigned int cpu; 369 370 for_each_possible_cpu(cpu) 371 per_cpu(exc_table, cpu)[idx] = (unsigned long)handler; 372 } 373 374 /* Set exception C handler - for temporary use when probing exceptions */ 375 376 void * __init trap_set_handler(int cause, void *handler) 377 { 378 void *previous = (void *)per_cpu(exc_table, 0)[ 379 EXC_TABLE_DEFAULT / 4 + cause]; 380 set_handler(EXC_TABLE_DEFAULT / 4 + cause, handler); 381 return previous; 382 } 383 384 385 static void trap_init_excsave(void) 386 { 387 unsigned long excsave1 = (unsigned long)this_cpu_ptr(exc_table); 388 __asm__ __volatile__("wsr %0, excsave1\n" : : "a" (excsave1)); 389 } 390 391 static void trap_init_debug(void) 392 { 393 unsigned long debugsave = (unsigned long)this_cpu_ptr(&debug_table); 394 395 this_cpu_ptr(&debug_table)->debug_exception = debug_exception; 396 __asm__ __volatile__("wsr %0, excsave" __stringify(XCHAL_DEBUGLEVEL) 397 :: "a"(debugsave)); 398 } 399 400 /* 401 * Initialize dispatch tables. 402 * 403 * The exception vectors are stored compressed the __init section in the 404 * dispatch_init_table. This function initializes the following three tables 405 * from that compressed table: 406 * - fast user first dispatch table for user exceptions 407 * - fast kernel first dispatch table for kernel exceptions 408 * - default C-handler C-handler called by the default fast handler. 409 * 410 * See vectors.S for more details. 411 */ 412 413 void __init trap_init(void) 414 { 415 int i; 416 417 /* Setup default vectors. */ 418 419 for(i = 0; i < 64; i++) { 420 set_handler(EXC_TABLE_FAST_USER/4 + i, user_exception); 421 set_handler(EXC_TABLE_FAST_KERNEL/4 + i, kernel_exception); 422 set_handler(EXC_TABLE_DEFAULT/4 + i, do_unhandled); 423 } 424 425 /* Setup specific handlers. */ 426 427 for(i = 0; dispatch_init_table[i].cause >= 0; i++) { 428 429 int fast = dispatch_init_table[i].fast; 430 int cause = dispatch_init_table[i].cause; 431 void *handler = dispatch_init_table[i].handler; 432 433 if (fast == 0) 434 set_handler (EXC_TABLE_DEFAULT/4 + cause, handler); 435 if (fast && fast & USER) 436 set_handler (EXC_TABLE_FAST_USER/4 + cause, handler); 437 if (fast && fast & KRNL) 438 set_handler (EXC_TABLE_FAST_KERNEL/4 + cause, handler); 439 } 440 441 /* Initialize EXCSAVE_1 to hold the address of the exception table. */ 442 trap_init_excsave(); 443 trap_init_debug(); 444 } 445 446 #ifdef CONFIG_SMP 447 void secondary_trap_init(void) 448 { 449 trap_init_excsave(); 450 trap_init_debug(); 451 } 452 #endif 453 454 /* 455 * This function dumps the current valid window frame and other base registers. 456 */ 457 458 void show_regs(struct pt_regs * regs) 459 { 460 int i, wmask; 461 462 show_regs_print_info(KERN_DEFAULT); 463 464 wmask = regs->wmask & ~1; 465 466 for (i = 0; i < 16; i++) { 467 if ((i % 8) == 0) 468 printk(KERN_INFO "a%02d:", i); 469 printk(KERN_CONT " %08lx", regs->areg[i]); 470 } 471 printk(KERN_CONT "\n"); 472 473 printk("pc: %08lx, ps: %08lx, depc: %08lx, excvaddr: %08lx\n", 474 regs->pc, regs->ps, regs->depc, regs->excvaddr); 475 printk("lbeg: %08lx, lend: %08lx lcount: %08lx, sar: %08lx\n", 476 regs->lbeg, regs->lend, regs->lcount, regs->sar); 477 if (user_mode(regs)) 478 printk("wb: %08lx, ws: %08lx, wmask: %08lx, syscall: %ld\n", 479 regs->windowbase, regs->windowstart, regs->wmask, 480 regs->syscall); 481 } 482 483 static int show_trace_cb(struct stackframe *frame, void *data) 484 { 485 if (kernel_text_address(frame->pc)) { 486 printk(" [<%08lx>] ", frame->pc); 487 print_symbol("%s\n", frame->pc); 488 } 489 return 0; 490 } 491 492 void show_trace(struct task_struct *task, unsigned long *sp) 493 { 494 if (!sp) 495 sp = stack_pointer(task); 496 497 printk("Call Trace:"); 498 #ifdef CONFIG_KALLSYMS 499 printk("\n"); 500 #endif 501 walk_stackframe(sp, show_trace_cb, NULL); 502 printk("\n"); 503 } 504 505 /* 506 * This routine abuses get_user()/put_user() to reference pointers 507 * with at least a bit of error checking ... 508 */ 509 510 static int kstack_depth_to_print = 24; 511 512 void show_stack(struct task_struct *task, unsigned long *sp) 513 { 514 int i = 0; 515 unsigned long *stack; 516 517 if (!sp) 518 sp = stack_pointer(task); 519 stack = sp; 520 521 printk("\nStack: "); 522 523 for (i = 0; i < kstack_depth_to_print; i++) { 524 if (kstack_end(sp)) 525 break; 526 if (i && ((i % 8) == 0)) 527 printk("\n "); 528 printk("%08lx ", *sp++); 529 } 530 printk("\n"); 531 show_trace(task, stack); 532 } 533 534 void show_code(unsigned int *pc) 535 { 536 long i; 537 538 printk("\nCode:"); 539 540 for(i = -3 ; i < 6 ; i++) { 541 unsigned long insn; 542 if (__get_user(insn, pc + i)) { 543 printk(" (Bad address in pc)\n"); 544 break; 545 } 546 printk("%c%08lx%c",(i?' ':'<'),insn,(i?' ':'>')); 547 } 548 } 549 550 DEFINE_SPINLOCK(die_lock); 551 552 void die(const char * str, struct pt_regs * regs, long err) 553 { 554 static int die_counter; 555 int nl = 0; 556 557 console_verbose(); 558 spin_lock_irq(&die_lock); 559 560 printk("%s: sig: %ld [#%d]\n", str, err, ++die_counter); 561 #ifdef CONFIG_PREEMPT 562 printk("PREEMPT "); 563 nl = 1; 564 #endif 565 if (nl) 566 printk("\n"); 567 show_regs(regs); 568 if (!user_mode(regs)) 569 show_stack(NULL, (unsigned long*)regs->areg[1]); 570 571 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); 572 spin_unlock_irq(&die_lock); 573 574 if (in_interrupt()) 575 panic("Fatal exception in interrupt"); 576 577 if (panic_on_oops) 578 panic("Fatal exception"); 579 580 do_exit(err); 581 } 582