1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 1994 - 1999, 2000, 01, 06 Ralf Baechle 7 * Copyright (C) 1995, 1996 Paul M. Antoine 8 * Copyright (C) 1998 Ulf Carlsson 9 * Copyright (C) 1999 Silicon Graphics, Inc. 10 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com 11 * Copyright (C) 2002, 2003, 2004, 2005, 2007 Maciej W. Rozycki 12 * Copyright (C) 2000, 2001, 2012 MIPS Technologies, Inc. All rights reserved. 13 * Copyright (C) 2014, Imagination Technologies Ltd. 14 */ 15 #include <linux/bug.h> 16 #include <linux/compiler.h> 17 #include <linux/context_tracking.h> 18 #include <linux/cpu_pm.h> 19 #include <linux/kexec.h> 20 #include <linux/init.h> 21 #include <linux/kernel.h> 22 #include <linux/module.h> 23 #include <linux/mm.h> 24 #include <linux/sched.h> 25 #include <linux/smp.h> 26 #include <linux/spinlock.h> 27 #include <linux/kallsyms.h> 28 #include <linux/bootmem.h> 29 #include <linux/interrupt.h> 30 #include <linux/ptrace.h> 31 #include <linux/kgdb.h> 32 #include <linux/kdebug.h> 33 #include <linux/kprobes.h> 34 #include <linux/notifier.h> 35 #include <linux/kdb.h> 36 #include <linux/irq.h> 37 #include <linux/perf_event.h> 38 39 #include <asm/bootinfo.h> 40 #include <asm/branch.h> 41 #include <asm/break.h> 42 #include <asm/cop2.h> 43 #include <asm/cpu.h> 44 #include <asm/cpu-type.h> 45 #include <asm/dsp.h> 46 #include <asm/fpu.h> 47 #include <asm/fpu_emulator.h> 48 #include <asm/idle.h> 49 #include <asm/mipsregs.h> 50 #include <asm/mipsmtregs.h> 51 #include <asm/module.h> 52 #include <asm/msa.h> 53 #include <asm/pgtable.h> 54 #include <asm/ptrace.h> 55 #include <asm/sections.h> 56 #include <asm/tlbdebug.h> 57 #include <asm/traps.h> 58 #include <asm/uaccess.h> 59 #include <asm/watch.h> 60 #include <asm/mmu_context.h> 61 #include <asm/types.h> 62 #include <asm/stacktrace.h> 63 #include <asm/uasm.h> 64 65 extern void check_wait(void); 66 extern asmlinkage void rollback_handle_int(void); 67 extern asmlinkage void handle_int(void); 68 extern u32 handle_tlbl[]; 69 extern u32 handle_tlbs[]; 70 extern u32 handle_tlbm[]; 71 extern asmlinkage void handle_adel(void); 72 extern asmlinkage void handle_ades(void); 73 extern asmlinkage void handle_ibe(void); 74 extern asmlinkage void handle_dbe(void); 75 extern asmlinkage void handle_sys(void); 76 extern asmlinkage void handle_bp(void); 77 extern asmlinkage void handle_ri(void); 78 extern asmlinkage void handle_ri_rdhwr_vivt(void); 79 extern asmlinkage void handle_ri_rdhwr(void); 80 extern asmlinkage void handle_cpu(void); 81 extern asmlinkage void handle_ov(void); 82 extern asmlinkage void handle_tr(void); 83 extern asmlinkage void handle_msa_fpe(void); 84 extern asmlinkage void handle_fpe(void); 85 extern asmlinkage void handle_ftlb(void); 86 extern asmlinkage void handle_msa(void); 87 extern asmlinkage void handle_mdmx(void); 88 extern asmlinkage void handle_watch(void); 89 extern asmlinkage void handle_mt(void); 90 extern asmlinkage void handle_dsp(void); 91 extern asmlinkage void handle_mcheck(void); 92 extern asmlinkage void handle_reserved(void); 93 extern void tlb_do_page_fault_0(void); 94 95 void (*board_be_init)(void); 96 int (*board_be_handler)(struct pt_regs *regs, int is_fixup); 97 void (*board_nmi_handler_setup)(void); 98 void (*board_ejtag_handler_setup)(void); 99 void (*board_bind_eic_interrupt)(int irq, int regset); 100 void (*board_ebase_setup)(void); 101 void(*board_cache_error_setup)(void); 102 103 static void show_raw_backtrace(unsigned long reg29) 104 { 105 unsigned long *sp = (unsigned long *)(reg29 & ~3); 106 unsigned long addr; 107 108 printk("Call Trace:"); 109 #ifdef CONFIG_KALLSYMS 110 printk("\n"); 111 #endif 112 while (!kstack_end(sp)) { 113 unsigned long __user *p = 114 (unsigned long __user *)(unsigned long)sp++; 115 if (__get_user(addr, p)) { 116 printk(" (Bad stack address)"); 117 break; 118 } 119 if (__kernel_text_address(addr)) 120 print_ip_sym(addr); 121 } 122 printk("\n"); 123 } 124 125 #ifdef CONFIG_KALLSYMS 126 int raw_show_trace; 127 static int __init set_raw_show_trace(char *str) 128 { 129 raw_show_trace = 1; 130 return 1; 131 } 132 __setup("raw_show_trace", set_raw_show_trace); 133 #endif 134 135 static void show_backtrace(struct task_struct *task, const struct pt_regs *regs) 136 { 137 unsigned long sp = regs->regs[29]; 138 unsigned long ra = regs->regs[31]; 139 unsigned long pc = regs->cp0_epc; 140 141 if (!task) 142 task = current; 143 144 if (raw_show_trace || !__kernel_text_address(pc)) { 145 show_raw_backtrace(sp); 146 return; 147 } 148 printk("Call Trace:\n"); 149 do { 150 print_ip_sym(pc); 151 pc = unwind_stack(task, &sp, pc, &ra); 152 } while (pc); 153 printk("\n"); 154 } 155 156 /* 157 * This routine abuses get_user()/put_user() to reference pointers 158 * with at least a bit of error checking ... 159 */ 160 static void show_stacktrace(struct task_struct *task, 161 const struct pt_regs *regs) 162 { 163 const int field = 2 * sizeof(unsigned long); 164 long stackdata; 165 int i; 166 unsigned long __user *sp = (unsigned long __user *)regs->regs[29]; 167 168 printk("Stack :"); 169 i = 0; 170 while ((unsigned long) sp & (PAGE_SIZE - 1)) { 171 if (i && ((i % (64 / field)) == 0)) 172 printk("\n "); 173 if (i > 39) { 174 printk(" ..."); 175 break; 176 } 177 178 if (__get_user(stackdata, sp++)) { 179 printk(" (Bad stack address)"); 180 break; 181 } 182 183 printk(" %0*lx", field, stackdata); 184 i++; 185 } 186 printk("\n"); 187 show_backtrace(task, regs); 188 } 189 190 void show_stack(struct task_struct *task, unsigned long *sp) 191 { 192 struct pt_regs regs; 193 if (sp) { 194 regs.regs[29] = (unsigned long)sp; 195 regs.regs[31] = 0; 196 regs.cp0_epc = 0; 197 } else { 198 if (task && task != current) { 199 regs.regs[29] = task->thread.reg29; 200 regs.regs[31] = 0; 201 regs.cp0_epc = task->thread.reg31; 202 #ifdef CONFIG_KGDB_KDB 203 } else if (atomic_read(&kgdb_active) != -1 && 204 kdb_current_regs) { 205 memcpy(®s, kdb_current_regs, sizeof(regs)); 206 #endif /* CONFIG_KGDB_KDB */ 207 } else { 208 prepare_frametrace(®s); 209 } 210 } 211 show_stacktrace(task, ®s); 212 } 213 214 static void show_code(unsigned int __user *pc) 215 { 216 long i; 217 unsigned short __user *pc16 = NULL; 218 219 printk("\nCode:"); 220 221 if ((unsigned long)pc & 1) 222 pc16 = (unsigned short __user *)((unsigned long)pc & ~1); 223 for(i = -3 ; i < 6 ; i++) { 224 unsigned int insn; 225 if (pc16 ? __get_user(insn, pc16 + i) : __get_user(insn, pc + i)) { 226 printk(" (Bad address in epc)\n"); 227 break; 228 } 229 printk("%c%0*x%c", (i?' ':'<'), pc16 ? 4 : 8, insn, (i?' ':'>')); 230 } 231 } 232 233 static void __show_regs(const struct pt_regs *regs) 234 { 235 const int field = 2 * sizeof(unsigned long); 236 unsigned int cause = regs->cp0_cause; 237 int i; 238 239 show_regs_print_info(KERN_DEFAULT); 240 241 /* 242 * Saved main processor registers 243 */ 244 for (i = 0; i < 32; ) { 245 if ((i % 4) == 0) 246 printk("$%2d :", i); 247 if (i == 0) 248 printk(" %0*lx", field, 0UL); 249 else if (i == 26 || i == 27) 250 printk(" %*s", field, ""); 251 else 252 printk(" %0*lx", field, regs->regs[i]); 253 254 i++; 255 if ((i % 4) == 0) 256 printk("\n"); 257 } 258 259 #ifdef CONFIG_CPU_HAS_SMARTMIPS 260 printk("Acx : %0*lx\n", field, regs->acx); 261 #endif 262 printk("Hi : %0*lx\n", field, regs->hi); 263 printk("Lo : %0*lx\n", field, regs->lo); 264 265 /* 266 * Saved cp0 registers 267 */ 268 printk("epc : %0*lx %pS\n", field, regs->cp0_epc, 269 (void *) regs->cp0_epc); 270 printk(" %s\n", print_tainted()); 271 printk("ra : %0*lx %pS\n", field, regs->regs[31], 272 (void *) regs->regs[31]); 273 274 printk("Status: %08x ", (uint32_t) regs->cp0_status); 275 276 if (cpu_has_3kex) { 277 if (regs->cp0_status & ST0_KUO) 278 printk("KUo "); 279 if (regs->cp0_status & ST0_IEO) 280 printk("IEo "); 281 if (regs->cp0_status & ST0_KUP) 282 printk("KUp "); 283 if (regs->cp0_status & ST0_IEP) 284 printk("IEp "); 285 if (regs->cp0_status & ST0_KUC) 286 printk("KUc "); 287 if (regs->cp0_status & ST0_IEC) 288 printk("IEc "); 289 } else if (cpu_has_4kex) { 290 if (regs->cp0_status & ST0_KX) 291 printk("KX "); 292 if (regs->cp0_status & ST0_SX) 293 printk("SX "); 294 if (regs->cp0_status & ST0_UX) 295 printk("UX "); 296 switch (regs->cp0_status & ST0_KSU) { 297 case KSU_USER: 298 printk("USER "); 299 break; 300 case KSU_SUPERVISOR: 301 printk("SUPERVISOR "); 302 break; 303 case KSU_KERNEL: 304 printk("KERNEL "); 305 break; 306 default: 307 printk("BAD_MODE "); 308 break; 309 } 310 if (regs->cp0_status & ST0_ERL) 311 printk("ERL "); 312 if (regs->cp0_status & ST0_EXL) 313 printk("EXL "); 314 if (regs->cp0_status & ST0_IE) 315 printk("IE "); 316 } 317 printk("\n"); 318 319 printk("Cause : %08x\n", cause); 320 321 cause = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE; 322 if (1 <= cause && cause <= 5) 323 printk("BadVA : %0*lx\n", field, regs->cp0_badvaddr); 324 325 printk("PrId : %08x (%s)\n", read_c0_prid(), 326 cpu_name_string()); 327 } 328 329 /* 330 * FIXME: really the generic show_regs should take a const pointer argument. 331 */ 332 void show_regs(struct pt_regs *regs) 333 { 334 __show_regs((struct pt_regs *)regs); 335 } 336 337 void show_registers(struct pt_regs *regs) 338 { 339 const int field = 2 * sizeof(unsigned long); 340 mm_segment_t old_fs = get_fs(); 341 342 __show_regs(regs); 343 print_modules(); 344 printk("Process %s (pid: %d, threadinfo=%p, task=%p, tls=%0*lx)\n", 345 current->comm, current->pid, current_thread_info(), current, 346 field, current_thread_info()->tp_value); 347 if (cpu_has_userlocal) { 348 unsigned long tls; 349 350 tls = read_c0_userlocal(); 351 if (tls != current_thread_info()->tp_value) 352 printk("*HwTLS: %0*lx\n", field, tls); 353 } 354 355 if (!user_mode(regs)) 356 /* Necessary for getting the correct stack content */ 357 set_fs(KERNEL_DS); 358 show_stacktrace(current, regs); 359 show_code((unsigned int __user *) regs->cp0_epc); 360 printk("\n"); 361 set_fs(old_fs); 362 } 363 364 static int regs_to_trapnr(struct pt_regs *regs) 365 { 366 return (regs->cp0_cause >> 2) & 0x1f; 367 } 368 369 static DEFINE_RAW_SPINLOCK(die_lock); 370 371 void __noreturn die(const char *str, struct pt_regs *regs) 372 { 373 static int die_counter; 374 int sig = SIGSEGV; 375 376 oops_enter(); 377 378 if (notify_die(DIE_OOPS, str, regs, 0, regs_to_trapnr(regs), 379 SIGSEGV) == NOTIFY_STOP) 380 sig = 0; 381 382 console_verbose(); 383 raw_spin_lock_irq(&die_lock); 384 bust_spinlocks(1); 385 386 printk("%s[#%d]:\n", str, ++die_counter); 387 show_registers(regs); 388 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); 389 raw_spin_unlock_irq(&die_lock); 390 391 oops_exit(); 392 393 if (in_interrupt()) 394 panic("Fatal exception in interrupt"); 395 396 if (panic_on_oops) { 397 printk(KERN_EMERG "Fatal exception: panic in 5 seconds"); 398 ssleep(5); 399 panic("Fatal exception"); 400 } 401 402 if (regs && kexec_should_crash(current)) 403 crash_kexec(regs); 404 405 do_exit(sig); 406 } 407 408 extern struct exception_table_entry __start___dbe_table[]; 409 extern struct exception_table_entry __stop___dbe_table[]; 410 411 __asm__( 412 " .section __dbe_table, \"a\"\n" 413 " .previous \n"); 414 415 /* Given an address, look for it in the exception tables. */ 416 static const struct exception_table_entry *search_dbe_tables(unsigned long addr) 417 { 418 const struct exception_table_entry *e; 419 420 e = search_extable(__start___dbe_table, __stop___dbe_table - 1, addr); 421 if (!e) 422 e = search_module_dbetables(addr); 423 return e; 424 } 425 426 asmlinkage void do_be(struct pt_regs *regs) 427 { 428 const int field = 2 * sizeof(unsigned long); 429 const struct exception_table_entry *fixup = NULL; 430 int data = regs->cp0_cause & 4; 431 int action = MIPS_BE_FATAL; 432 enum ctx_state prev_state; 433 434 prev_state = exception_enter(); 435 /* XXX For now. Fixme, this searches the wrong table ... */ 436 if (data && !user_mode(regs)) 437 fixup = search_dbe_tables(exception_epc(regs)); 438 439 if (fixup) 440 action = MIPS_BE_FIXUP; 441 442 if (board_be_handler) 443 action = board_be_handler(regs, fixup != NULL); 444 445 switch (action) { 446 case MIPS_BE_DISCARD: 447 goto out; 448 case MIPS_BE_FIXUP: 449 if (fixup) { 450 regs->cp0_epc = fixup->nextinsn; 451 goto out; 452 } 453 break; 454 default: 455 break; 456 } 457 458 /* 459 * Assume it would be too dangerous to continue ... 460 */ 461 printk(KERN_ALERT "%s bus error, epc == %0*lx, ra == %0*lx\n", 462 data ? "Data" : "Instruction", 463 field, regs->cp0_epc, field, regs->regs[31]); 464 if (notify_die(DIE_OOPS, "bus error", regs, 0, regs_to_trapnr(regs), 465 SIGBUS) == NOTIFY_STOP) 466 goto out; 467 468 die_if_kernel("Oops", regs); 469 force_sig(SIGBUS, current); 470 471 out: 472 exception_exit(prev_state); 473 } 474 475 /* 476 * ll/sc, rdhwr, sync emulation 477 */ 478 479 #define OPCODE 0xfc000000 480 #define BASE 0x03e00000 481 #define RT 0x001f0000 482 #define OFFSET 0x0000ffff 483 #define LL 0xc0000000 484 #define SC 0xe0000000 485 #define SPEC0 0x00000000 486 #define SPEC3 0x7c000000 487 #define RD 0x0000f800 488 #define FUNC 0x0000003f 489 #define SYNC 0x0000000f 490 #define RDHWR 0x0000003b 491 492 /* microMIPS definitions */ 493 #define MM_POOL32A_FUNC 0xfc00ffff 494 #define MM_RDHWR 0x00006b3c 495 #define MM_RS 0x001f0000 496 #define MM_RT 0x03e00000 497 498 /* 499 * The ll_bit is cleared by r*_switch.S 500 */ 501 502 unsigned int ll_bit; 503 struct task_struct *ll_task; 504 505 static inline int simulate_ll(struct pt_regs *regs, unsigned int opcode) 506 { 507 unsigned long value, __user *vaddr; 508 long offset; 509 510 /* 511 * analyse the ll instruction that just caused a ri exception 512 * and put the referenced address to addr. 513 */ 514 515 /* sign extend offset */ 516 offset = opcode & OFFSET; 517 offset <<= 16; 518 offset >>= 16; 519 520 vaddr = (unsigned long __user *) 521 ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset); 522 523 if ((unsigned long)vaddr & 3) 524 return SIGBUS; 525 if (get_user(value, vaddr)) 526 return SIGSEGV; 527 528 preempt_disable(); 529 530 if (ll_task == NULL || ll_task == current) { 531 ll_bit = 1; 532 } else { 533 ll_bit = 0; 534 } 535 ll_task = current; 536 537 preempt_enable(); 538 539 regs->regs[(opcode & RT) >> 16] = value; 540 541 return 0; 542 } 543 544 static inline int simulate_sc(struct pt_regs *regs, unsigned int opcode) 545 { 546 unsigned long __user *vaddr; 547 unsigned long reg; 548 long offset; 549 550 /* 551 * analyse the sc instruction that just caused a ri exception 552 * and put the referenced address to addr. 553 */ 554 555 /* sign extend offset */ 556 offset = opcode & OFFSET; 557 offset <<= 16; 558 offset >>= 16; 559 560 vaddr = (unsigned long __user *) 561 ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset); 562 reg = (opcode & RT) >> 16; 563 564 if ((unsigned long)vaddr & 3) 565 return SIGBUS; 566 567 preempt_disable(); 568 569 if (ll_bit == 0 || ll_task != current) { 570 regs->regs[reg] = 0; 571 preempt_enable(); 572 return 0; 573 } 574 575 preempt_enable(); 576 577 if (put_user(regs->regs[reg], vaddr)) 578 return SIGSEGV; 579 580 regs->regs[reg] = 1; 581 582 return 0; 583 } 584 585 /* 586 * ll uses the opcode of lwc0 and sc uses the opcode of swc0. That is both 587 * opcodes are supposed to result in coprocessor unusable exceptions if 588 * executed on ll/sc-less processors. That's the theory. In practice a 589 * few processors such as NEC's VR4100 throw reserved instruction exceptions 590 * instead, so we're doing the emulation thing in both exception handlers. 591 */ 592 static int simulate_llsc(struct pt_regs *regs, unsigned int opcode) 593 { 594 if ((opcode & OPCODE) == LL) { 595 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 596 1, regs, 0); 597 return simulate_ll(regs, opcode); 598 } 599 if ((opcode & OPCODE) == SC) { 600 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 601 1, regs, 0); 602 return simulate_sc(regs, opcode); 603 } 604 605 return -1; /* Must be something else ... */ 606 } 607 608 /* 609 * Simulate trapping 'rdhwr' instructions to provide user accessible 610 * registers not implemented in hardware. 611 */ 612 static int simulate_rdhwr(struct pt_regs *regs, int rd, int rt) 613 { 614 struct thread_info *ti = task_thread_info(current); 615 616 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 617 1, regs, 0); 618 switch (rd) { 619 case 0: /* CPU number */ 620 regs->regs[rt] = smp_processor_id(); 621 return 0; 622 case 1: /* SYNCI length */ 623 regs->regs[rt] = min(current_cpu_data.dcache.linesz, 624 current_cpu_data.icache.linesz); 625 return 0; 626 case 2: /* Read count register */ 627 regs->regs[rt] = read_c0_count(); 628 return 0; 629 case 3: /* Count register resolution */ 630 switch (current_cpu_type()) { 631 case CPU_20KC: 632 case CPU_25KF: 633 regs->regs[rt] = 1; 634 break; 635 default: 636 regs->regs[rt] = 2; 637 } 638 return 0; 639 case 29: 640 regs->regs[rt] = ti->tp_value; 641 return 0; 642 default: 643 return -1; 644 } 645 } 646 647 static int simulate_rdhwr_normal(struct pt_regs *regs, unsigned int opcode) 648 { 649 if ((opcode & OPCODE) == SPEC3 && (opcode & FUNC) == RDHWR) { 650 int rd = (opcode & RD) >> 11; 651 int rt = (opcode & RT) >> 16; 652 653 simulate_rdhwr(regs, rd, rt); 654 return 0; 655 } 656 657 /* Not ours. */ 658 return -1; 659 } 660 661 static int simulate_rdhwr_mm(struct pt_regs *regs, unsigned short opcode) 662 { 663 if ((opcode & MM_POOL32A_FUNC) == MM_RDHWR) { 664 int rd = (opcode & MM_RS) >> 16; 665 int rt = (opcode & MM_RT) >> 21; 666 simulate_rdhwr(regs, rd, rt); 667 return 0; 668 } 669 670 /* Not ours. */ 671 return -1; 672 } 673 674 static int simulate_sync(struct pt_regs *regs, unsigned int opcode) 675 { 676 if ((opcode & OPCODE) == SPEC0 && (opcode & FUNC) == SYNC) { 677 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 678 1, regs, 0); 679 return 0; 680 } 681 682 return -1; /* Must be something else ... */ 683 } 684 685 asmlinkage void do_ov(struct pt_regs *regs) 686 { 687 enum ctx_state prev_state; 688 siginfo_t info; 689 690 prev_state = exception_enter(); 691 die_if_kernel("Integer overflow", regs); 692 693 info.si_code = FPE_INTOVF; 694 info.si_signo = SIGFPE; 695 info.si_errno = 0; 696 info.si_addr = (void __user *) regs->cp0_epc; 697 force_sig_info(SIGFPE, &info, current); 698 exception_exit(prev_state); 699 } 700 701 int process_fpemu_return(int sig, void __user *fault_addr) 702 { 703 if (sig == SIGSEGV || sig == SIGBUS) { 704 struct siginfo si = {0}; 705 si.si_addr = fault_addr; 706 si.si_signo = sig; 707 if (sig == SIGSEGV) { 708 down_read(¤t->mm->mmap_sem); 709 if (find_vma(current->mm, (unsigned long)fault_addr)) 710 si.si_code = SEGV_ACCERR; 711 else 712 si.si_code = SEGV_MAPERR; 713 up_read(¤t->mm->mmap_sem); 714 } else { 715 si.si_code = BUS_ADRERR; 716 } 717 force_sig_info(sig, &si, current); 718 return 1; 719 } else if (sig) { 720 force_sig(sig, current); 721 return 1; 722 } else { 723 return 0; 724 } 725 } 726 727 /* 728 * XXX Delayed fp exceptions when doing a lazy ctx switch XXX 729 */ 730 asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31) 731 { 732 enum ctx_state prev_state; 733 siginfo_t info = {0}; 734 735 prev_state = exception_enter(); 736 if (notify_die(DIE_FP, "FP exception", regs, 0, regs_to_trapnr(regs), 737 SIGFPE) == NOTIFY_STOP) 738 goto out; 739 die_if_kernel("FP exception in kernel code", regs); 740 741 if (fcr31 & FPU_CSR_UNI_X) { 742 int sig; 743 void __user *fault_addr = NULL; 744 745 /* 746 * Unimplemented operation exception. If we've got the full 747 * software emulator on-board, let's use it... 748 * 749 * Force FPU to dump state into task/thread context. We're 750 * moving a lot of data here for what is probably a single 751 * instruction, but the alternative is to pre-decode the FP 752 * register operands before invoking the emulator, which seems 753 * a bit extreme for what should be an infrequent event. 754 */ 755 /* Ensure 'resume' not overwrite saved fp context again. */ 756 lose_fpu(1); 757 758 /* Run the emulator */ 759 sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1, 760 &fault_addr); 761 762 /* 763 * We can't allow the emulated instruction to leave any of 764 * the cause bit set in $fcr31. 765 */ 766 current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X; 767 768 /* Restore the hardware register state */ 769 own_fpu(1); /* Using the FPU again. */ 770 771 /* If something went wrong, signal */ 772 process_fpemu_return(sig, fault_addr); 773 774 goto out; 775 } else if (fcr31 & FPU_CSR_INV_X) 776 info.si_code = FPE_FLTINV; 777 else if (fcr31 & FPU_CSR_DIV_X) 778 info.si_code = FPE_FLTDIV; 779 else if (fcr31 & FPU_CSR_OVF_X) 780 info.si_code = FPE_FLTOVF; 781 else if (fcr31 & FPU_CSR_UDF_X) 782 info.si_code = FPE_FLTUND; 783 else if (fcr31 & FPU_CSR_INE_X) 784 info.si_code = FPE_FLTRES; 785 else 786 info.si_code = __SI_FAULT; 787 info.si_signo = SIGFPE; 788 info.si_errno = 0; 789 info.si_addr = (void __user *) regs->cp0_epc; 790 force_sig_info(SIGFPE, &info, current); 791 792 out: 793 exception_exit(prev_state); 794 } 795 796 static void do_trap_or_bp(struct pt_regs *regs, unsigned int code, 797 const char *str) 798 { 799 siginfo_t info; 800 char b[40]; 801 802 #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP 803 if (kgdb_ll_trap(DIE_TRAP, str, regs, code, regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP) 804 return; 805 #endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */ 806 807 if (notify_die(DIE_TRAP, str, regs, code, regs_to_trapnr(regs), 808 SIGTRAP) == NOTIFY_STOP) 809 return; 810 811 /* 812 * A short test says that IRIX 5.3 sends SIGTRAP for all trap 813 * insns, even for trap and break codes that indicate arithmetic 814 * failures. Weird ... 815 * But should we continue the brokenness??? --macro 816 */ 817 switch (code) { 818 case BRK_OVERFLOW: 819 case BRK_DIVZERO: 820 scnprintf(b, sizeof(b), "%s instruction in kernel code", str); 821 die_if_kernel(b, regs); 822 if (code == BRK_DIVZERO) 823 info.si_code = FPE_INTDIV; 824 else 825 info.si_code = FPE_INTOVF; 826 info.si_signo = SIGFPE; 827 info.si_errno = 0; 828 info.si_addr = (void __user *) regs->cp0_epc; 829 force_sig_info(SIGFPE, &info, current); 830 break; 831 case BRK_BUG: 832 die_if_kernel("Kernel bug detected", regs); 833 force_sig(SIGTRAP, current); 834 break; 835 case BRK_MEMU: 836 /* 837 * Address errors may be deliberately induced by the FPU 838 * emulator to retake control of the CPU after executing the 839 * instruction in the delay slot of an emulated branch. 840 * 841 * Terminate if exception was recognized as a delay slot return 842 * otherwise handle as normal. 843 */ 844 if (do_dsemulret(regs)) 845 return; 846 847 die_if_kernel("Math emu break/trap", regs); 848 force_sig(SIGTRAP, current); 849 break; 850 default: 851 scnprintf(b, sizeof(b), "%s instruction in kernel code", str); 852 die_if_kernel(b, regs); 853 force_sig(SIGTRAP, current); 854 } 855 } 856 857 asmlinkage void do_bp(struct pt_regs *regs) 858 { 859 unsigned int opcode, bcode; 860 enum ctx_state prev_state; 861 unsigned long epc; 862 u16 instr[2]; 863 mm_segment_t seg; 864 865 seg = get_fs(); 866 if (!user_mode(regs)) 867 set_fs(KERNEL_DS); 868 869 prev_state = exception_enter(); 870 if (get_isa16_mode(regs->cp0_epc)) { 871 /* Calculate EPC. */ 872 epc = exception_epc(regs); 873 if (cpu_has_mmips) { 874 if ((__get_user(instr[0], (u16 __user *)msk_isa16_mode(epc)) || 875 (__get_user(instr[1], (u16 __user *)msk_isa16_mode(epc + 2))))) 876 goto out_sigsegv; 877 opcode = (instr[0] << 16) | instr[1]; 878 } else { 879 /* MIPS16e mode */ 880 if (__get_user(instr[0], 881 (u16 __user *)msk_isa16_mode(epc))) 882 goto out_sigsegv; 883 bcode = (instr[0] >> 6) & 0x3f; 884 do_trap_or_bp(regs, bcode, "Break"); 885 goto out; 886 } 887 } else { 888 if (__get_user(opcode, 889 (unsigned int __user *) exception_epc(regs))) 890 goto out_sigsegv; 891 } 892 893 /* 894 * There is the ancient bug in the MIPS assemblers that the break 895 * code starts left to bit 16 instead to bit 6 in the opcode. 896 * Gas is bug-compatible, but not always, grrr... 897 * We handle both cases with a simple heuristics. --macro 898 */ 899 bcode = ((opcode >> 6) & ((1 << 20) - 1)); 900 if (bcode >= (1 << 10)) 901 bcode >>= 10; 902 903 /* 904 * notify the kprobe handlers, if instruction is likely to 905 * pertain to them. 906 */ 907 switch (bcode) { 908 case BRK_KPROBE_BP: 909 if (notify_die(DIE_BREAK, "debug", regs, bcode, 910 regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP) 911 goto out; 912 else 913 break; 914 case BRK_KPROBE_SSTEPBP: 915 if (notify_die(DIE_SSTEPBP, "single_step", regs, bcode, 916 regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP) 917 goto out; 918 else 919 break; 920 default: 921 break; 922 } 923 924 do_trap_or_bp(regs, bcode, "Break"); 925 926 out: 927 set_fs(seg); 928 exception_exit(prev_state); 929 return; 930 931 out_sigsegv: 932 force_sig(SIGSEGV, current); 933 goto out; 934 } 935 936 asmlinkage void do_tr(struct pt_regs *regs) 937 { 938 u32 opcode, tcode = 0; 939 enum ctx_state prev_state; 940 u16 instr[2]; 941 mm_segment_t seg; 942 unsigned long epc = msk_isa16_mode(exception_epc(regs)); 943 944 seg = get_fs(); 945 if (!user_mode(regs)) 946 set_fs(get_ds()); 947 948 prev_state = exception_enter(); 949 if (get_isa16_mode(regs->cp0_epc)) { 950 if (__get_user(instr[0], (u16 __user *)(epc + 0)) || 951 __get_user(instr[1], (u16 __user *)(epc + 2))) 952 goto out_sigsegv; 953 opcode = (instr[0] << 16) | instr[1]; 954 /* Immediate versions don't provide a code. */ 955 if (!(opcode & OPCODE)) 956 tcode = (opcode >> 12) & ((1 << 4) - 1); 957 } else { 958 if (__get_user(opcode, (u32 __user *)epc)) 959 goto out_sigsegv; 960 /* Immediate versions don't provide a code. */ 961 if (!(opcode & OPCODE)) 962 tcode = (opcode >> 6) & ((1 << 10) - 1); 963 } 964 965 do_trap_or_bp(regs, tcode, "Trap"); 966 967 out: 968 set_fs(seg); 969 exception_exit(prev_state); 970 return; 971 972 out_sigsegv: 973 force_sig(SIGSEGV, current); 974 goto out; 975 } 976 977 asmlinkage void do_ri(struct pt_regs *regs) 978 { 979 unsigned int __user *epc = (unsigned int __user *)exception_epc(regs); 980 unsigned long old_epc = regs->cp0_epc; 981 unsigned long old31 = regs->regs[31]; 982 enum ctx_state prev_state; 983 unsigned int opcode = 0; 984 int status = -1; 985 986 prev_state = exception_enter(); 987 if (notify_die(DIE_RI, "RI Fault", regs, 0, regs_to_trapnr(regs), 988 SIGILL) == NOTIFY_STOP) 989 goto out; 990 991 die_if_kernel("Reserved instruction in kernel code", regs); 992 993 if (unlikely(compute_return_epc(regs) < 0)) 994 goto out; 995 996 if (get_isa16_mode(regs->cp0_epc)) { 997 unsigned short mmop[2] = { 0 }; 998 999 if (unlikely(get_user(mmop[0], epc) < 0)) 1000 status = SIGSEGV; 1001 if (unlikely(get_user(mmop[1], epc) < 0)) 1002 status = SIGSEGV; 1003 opcode = (mmop[0] << 16) | mmop[1]; 1004 1005 if (status < 0) 1006 status = simulate_rdhwr_mm(regs, opcode); 1007 } else { 1008 if (unlikely(get_user(opcode, epc) < 0)) 1009 status = SIGSEGV; 1010 1011 if (!cpu_has_llsc && status < 0) 1012 status = simulate_llsc(regs, opcode); 1013 1014 if (status < 0) 1015 status = simulate_rdhwr_normal(regs, opcode); 1016 1017 if (status < 0) 1018 status = simulate_sync(regs, opcode); 1019 } 1020 1021 if (status < 0) 1022 status = SIGILL; 1023 1024 if (unlikely(status > 0)) { 1025 regs->cp0_epc = old_epc; /* Undo skip-over. */ 1026 regs->regs[31] = old31; 1027 force_sig(status, current); 1028 } 1029 1030 out: 1031 exception_exit(prev_state); 1032 } 1033 1034 /* 1035 * MIPS MT processors may have fewer FPU contexts than CPU threads. If we've 1036 * emulated more than some threshold number of instructions, force migration to 1037 * a "CPU" that has FP support. 1038 */ 1039 static void mt_ase_fp_affinity(void) 1040 { 1041 #ifdef CONFIG_MIPS_MT_FPAFF 1042 if (mt_fpemul_threshold > 0 && 1043 ((current->thread.emulated_fp++ > mt_fpemul_threshold))) { 1044 /* 1045 * If there's no FPU present, or if the application has already 1046 * restricted the allowed set to exclude any CPUs with FPUs, 1047 * we'll skip the procedure. 1048 */ 1049 if (cpus_intersects(current->cpus_allowed, mt_fpu_cpumask)) { 1050 cpumask_t tmask; 1051 1052 current->thread.user_cpus_allowed 1053 = current->cpus_allowed; 1054 cpus_and(tmask, current->cpus_allowed, 1055 mt_fpu_cpumask); 1056 set_cpus_allowed_ptr(current, &tmask); 1057 set_thread_flag(TIF_FPUBOUND); 1058 } 1059 } 1060 #endif /* CONFIG_MIPS_MT_FPAFF */ 1061 } 1062 1063 /* 1064 * No lock; only written during early bootup by CPU 0. 1065 */ 1066 static RAW_NOTIFIER_HEAD(cu2_chain); 1067 1068 int __ref register_cu2_notifier(struct notifier_block *nb) 1069 { 1070 return raw_notifier_chain_register(&cu2_chain, nb); 1071 } 1072 1073 int cu2_notifier_call_chain(unsigned long val, void *v) 1074 { 1075 return raw_notifier_call_chain(&cu2_chain, val, v); 1076 } 1077 1078 static int default_cu2_call(struct notifier_block *nfb, unsigned long action, 1079 void *data) 1080 { 1081 struct pt_regs *regs = data; 1082 1083 die_if_kernel("COP2: Unhandled kernel unaligned access or invalid " 1084 "instruction", regs); 1085 force_sig(SIGILL, current); 1086 1087 return NOTIFY_OK; 1088 } 1089 1090 static int enable_restore_fp_context(int msa) 1091 { 1092 int err, was_fpu_owner, prior_msa; 1093 1094 if (!used_math()) { 1095 /* First time FP context user. */ 1096 preempt_disable(); 1097 err = init_fpu(); 1098 if (msa && !err) { 1099 enable_msa(); 1100 _init_msa_upper(); 1101 set_thread_flag(TIF_USEDMSA); 1102 set_thread_flag(TIF_MSA_CTX_LIVE); 1103 } 1104 preempt_enable(); 1105 if (!err) 1106 set_used_math(); 1107 return err; 1108 } 1109 1110 /* 1111 * This task has formerly used the FP context. 1112 * 1113 * If this thread has no live MSA vector context then we can simply 1114 * restore the scalar FP context. If it has live MSA vector context 1115 * (that is, it has or may have used MSA since last performing a 1116 * function call) then we'll need to restore the vector context. This 1117 * applies even if we're currently only executing a scalar FP 1118 * instruction. This is because if we were to later execute an MSA 1119 * instruction then we'd either have to: 1120 * 1121 * - Restore the vector context & clobber any registers modified by 1122 * scalar FP instructions between now & then. 1123 * 1124 * or 1125 * 1126 * - Not restore the vector context & lose the most significant bits 1127 * of all vector registers. 1128 * 1129 * Neither of those options is acceptable. We cannot restore the least 1130 * significant bits of the registers now & only restore the most 1131 * significant bits later because the most significant bits of any 1132 * vector registers whose aliased FP register is modified now will have 1133 * been zeroed. We'd have no way to know that when restoring the vector 1134 * context & thus may load an outdated value for the most significant 1135 * bits of a vector register. 1136 */ 1137 if (!msa && !thread_msa_context_live()) 1138 return own_fpu(1); 1139 1140 /* 1141 * This task is using or has previously used MSA. Thus we require 1142 * that Status.FR == 1. 1143 */ 1144 preempt_disable(); 1145 was_fpu_owner = is_fpu_owner(); 1146 err = own_fpu_inatomic(0); 1147 if (err) 1148 goto out; 1149 1150 enable_msa(); 1151 write_msa_csr(current->thread.fpu.msacsr); 1152 set_thread_flag(TIF_USEDMSA); 1153 1154 /* 1155 * If this is the first time that the task is using MSA and it has 1156 * previously used scalar FP in this time slice then we already nave 1157 * FP context which we shouldn't clobber. We do however need to clear 1158 * the upper 64b of each vector register so that this task has no 1159 * opportunity to see data left behind by another. 1160 */ 1161 prior_msa = test_and_set_thread_flag(TIF_MSA_CTX_LIVE); 1162 if (!prior_msa && was_fpu_owner) { 1163 _init_msa_upper(); 1164 1165 goto out; 1166 } 1167 1168 if (!prior_msa) { 1169 /* 1170 * Restore the least significant 64b of each vector register 1171 * from the existing scalar FP context. 1172 */ 1173 _restore_fp(current); 1174 1175 /* 1176 * The task has not formerly used MSA, so clear the upper 64b 1177 * of each vector register such that it cannot see data left 1178 * behind by another task. 1179 */ 1180 _init_msa_upper(); 1181 } else { 1182 /* We need to restore the vector context. */ 1183 restore_msa(current); 1184 1185 /* Restore the scalar FP control & status register */ 1186 if (!was_fpu_owner) 1187 asm volatile("ctc1 %0, $31" : : "r"(current->thread.fpu.fcr31)); 1188 } 1189 1190 out: 1191 preempt_enable(); 1192 1193 return 0; 1194 } 1195 1196 asmlinkage void do_cpu(struct pt_regs *regs) 1197 { 1198 enum ctx_state prev_state; 1199 unsigned int __user *epc; 1200 unsigned long old_epc, old31; 1201 unsigned int opcode; 1202 unsigned int cpid; 1203 int status, err; 1204 unsigned long __maybe_unused flags; 1205 1206 prev_state = exception_enter(); 1207 cpid = (regs->cp0_cause >> CAUSEB_CE) & 3; 1208 1209 if (cpid != 2) 1210 die_if_kernel("do_cpu invoked from kernel context!", regs); 1211 1212 switch (cpid) { 1213 case 0: 1214 epc = (unsigned int __user *)exception_epc(regs); 1215 old_epc = regs->cp0_epc; 1216 old31 = regs->regs[31]; 1217 opcode = 0; 1218 status = -1; 1219 1220 if (unlikely(compute_return_epc(regs) < 0)) 1221 goto out; 1222 1223 if (get_isa16_mode(regs->cp0_epc)) { 1224 unsigned short mmop[2] = { 0 }; 1225 1226 if (unlikely(get_user(mmop[0], epc) < 0)) 1227 status = SIGSEGV; 1228 if (unlikely(get_user(mmop[1], epc) < 0)) 1229 status = SIGSEGV; 1230 opcode = (mmop[0] << 16) | mmop[1]; 1231 1232 if (status < 0) 1233 status = simulate_rdhwr_mm(regs, opcode); 1234 } else { 1235 if (unlikely(get_user(opcode, epc) < 0)) 1236 status = SIGSEGV; 1237 1238 if (!cpu_has_llsc && status < 0) 1239 status = simulate_llsc(regs, opcode); 1240 1241 if (status < 0) 1242 status = simulate_rdhwr_normal(regs, opcode); 1243 } 1244 1245 if (status < 0) 1246 status = SIGILL; 1247 1248 if (unlikely(status > 0)) { 1249 regs->cp0_epc = old_epc; /* Undo skip-over. */ 1250 regs->regs[31] = old31; 1251 force_sig(status, current); 1252 } 1253 1254 goto out; 1255 1256 case 3: 1257 /* 1258 * Old (MIPS I and MIPS II) processors will set this code 1259 * for COP1X opcode instructions that replaced the original 1260 * COP3 space. We don't limit COP1 space instructions in 1261 * the emulator according to the CPU ISA, so we want to 1262 * treat COP1X instructions consistently regardless of which 1263 * code the CPU chose. Therefore we redirect this trap to 1264 * the FP emulator too. 1265 * 1266 * Then some newer FPU-less processors use this code 1267 * erroneously too, so they are covered by this choice 1268 * as well. 1269 */ 1270 if (raw_cpu_has_fpu) 1271 break; 1272 /* Fall through. */ 1273 1274 case 1: 1275 err = enable_restore_fp_context(0); 1276 1277 if (!raw_cpu_has_fpu || err) { 1278 int sig; 1279 void __user *fault_addr = NULL; 1280 sig = fpu_emulator_cop1Handler(regs, 1281 ¤t->thread.fpu, 1282 0, &fault_addr); 1283 if (!process_fpemu_return(sig, fault_addr) && !err) 1284 mt_ase_fp_affinity(); 1285 } 1286 1287 goto out; 1288 1289 case 2: 1290 raw_notifier_call_chain(&cu2_chain, CU2_EXCEPTION, regs); 1291 goto out; 1292 } 1293 1294 force_sig(SIGILL, current); 1295 1296 out: 1297 exception_exit(prev_state); 1298 } 1299 1300 asmlinkage void do_msa_fpe(struct pt_regs *regs) 1301 { 1302 enum ctx_state prev_state; 1303 1304 prev_state = exception_enter(); 1305 die_if_kernel("do_msa_fpe invoked from kernel context!", regs); 1306 force_sig(SIGFPE, current); 1307 exception_exit(prev_state); 1308 } 1309 1310 asmlinkage void do_msa(struct pt_regs *regs) 1311 { 1312 enum ctx_state prev_state; 1313 int err; 1314 1315 prev_state = exception_enter(); 1316 1317 if (!cpu_has_msa || test_thread_flag(TIF_32BIT_FPREGS)) { 1318 force_sig(SIGILL, current); 1319 goto out; 1320 } 1321 1322 die_if_kernel("do_msa invoked from kernel context!", regs); 1323 1324 err = enable_restore_fp_context(1); 1325 if (err) 1326 force_sig(SIGILL, current); 1327 out: 1328 exception_exit(prev_state); 1329 } 1330 1331 asmlinkage void do_mdmx(struct pt_regs *regs) 1332 { 1333 enum ctx_state prev_state; 1334 1335 prev_state = exception_enter(); 1336 force_sig(SIGILL, current); 1337 exception_exit(prev_state); 1338 } 1339 1340 /* 1341 * Called with interrupts disabled. 1342 */ 1343 asmlinkage void do_watch(struct pt_regs *regs) 1344 { 1345 enum ctx_state prev_state; 1346 u32 cause; 1347 1348 prev_state = exception_enter(); 1349 /* 1350 * Clear WP (bit 22) bit of cause register so we don't loop 1351 * forever. 1352 */ 1353 cause = read_c0_cause(); 1354 cause &= ~(1 << 22); 1355 write_c0_cause(cause); 1356 1357 /* 1358 * If the current thread has the watch registers loaded, save 1359 * their values and send SIGTRAP. Otherwise another thread 1360 * left the registers set, clear them and continue. 1361 */ 1362 if (test_tsk_thread_flag(current, TIF_LOAD_WATCH)) { 1363 mips_read_watch_registers(); 1364 local_irq_enable(); 1365 force_sig(SIGTRAP, current); 1366 } else { 1367 mips_clear_watch_registers(); 1368 local_irq_enable(); 1369 } 1370 exception_exit(prev_state); 1371 } 1372 1373 asmlinkage void do_mcheck(struct pt_regs *regs) 1374 { 1375 const int field = 2 * sizeof(unsigned long); 1376 int multi_match = regs->cp0_status & ST0_TS; 1377 enum ctx_state prev_state; 1378 1379 prev_state = exception_enter(); 1380 show_regs(regs); 1381 1382 if (multi_match) { 1383 printk("Index : %0x\n", read_c0_index()); 1384 printk("Pagemask: %0x\n", read_c0_pagemask()); 1385 printk("EntryHi : %0*lx\n", field, read_c0_entryhi()); 1386 printk("EntryLo0: %0*lx\n", field, read_c0_entrylo0()); 1387 printk("EntryLo1: %0*lx\n", field, read_c0_entrylo1()); 1388 printk("\n"); 1389 dump_tlb_all(); 1390 } 1391 1392 show_code((unsigned int __user *) regs->cp0_epc); 1393 1394 /* 1395 * Some chips may have other causes of machine check (e.g. SB1 1396 * graduation timer) 1397 */ 1398 panic("Caught Machine Check exception - %scaused by multiple " 1399 "matching entries in the TLB.", 1400 (multi_match) ? "" : "not "); 1401 } 1402 1403 asmlinkage void do_mt(struct pt_regs *regs) 1404 { 1405 int subcode; 1406 1407 subcode = (read_vpe_c0_vpecontrol() & VPECONTROL_EXCPT) 1408 >> VPECONTROL_EXCPT_SHIFT; 1409 switch (subcode) { 1410 case 0: 1411 printk(KERN_DEBUG "Thread Underflow\n"); 1412 break; 1413 case 1: 1414 printk(KERN_DEBUG "Thread Overflow\n"); 1415 break; 1416 case 2: 1417 printk(KERN_DEBUG "Invalid YIELD Qualifier\n"); 1418 break; 1419 case 3: 1420 printk(KERN_DEBUG "Gating Storage Exception\n"); 1421 break; 1422 case 4: 1423 printk(KERN_DEBUG "YIELD Scheduler Exception\n"); 1424 break; 1425 case 5: 1426 printk(KERN_DEBUG "Gating Storage Scheduler Exception\n"); 1427 break; 1428 default: 1429 printk(KERN_DEBUG "*** UNKNOWN THREAD EXCEPTION %d ***\n", 1430 subcode); 1431 break; 1432 } 1433 die_if_kernel("MIPS MT Thread exception in kernel", regs); 1434 1435 force_sig(SIGILL, current); 1436 } 1437 1438 1439 asmlinkage void do_dsp(struct pt_regs *regs) 1440 { 1441 if (cpu_has_dsp) 1442 panic("Unexpected DSP exception"); 1443 1444 force_sig(SIGILL, current); 1445 } 1446 1447 asmlinkage void do_reserved(struct pt_regs *regs) 1448 { 1449 /* 1450 * Game over - no way to handle this if it ever occurs. Most probably 1451 * caused by a new unknown cpu type or after another deadly 1452 * hard/software error. 1453 */ 1454 show_regs(regs); 1455 panic("Caught reserved exception %ld - should not happen.", 1456 (regs->cp0_cause & 0x7f) >> 2); 1457 } 1458 1459 static int __initdata l1parity = 1; 1460 static int __init nol1parity(char *s) 1461 { 1462 l1parity = 0; 1463 return 1; 1464 } 1465 __setup("nol1par", nol1parity); 1466 static int __initdata l2parity = 1; 1467 static int __init nol2parity(char *s) 1468 { 1469 l2parity = 0; 1470 return 1; 1471 } 1472 __setup("nol2par", nol2parity); 1473 1474 /* 1475 * Some MIPS CPUs can enable/disable for cache parity detection, but do 1476 * it different ways. 1477 */ 1478 static inline void parity_protection_init(void) 1479 { 1480 switch (current_cpu_type()) { 1481 case CPU_24K: 1482 case CPU_34K: 1483 case CPU_74K: 1484 case CPU_1004K: 1485 case CPU_1074K: 1486 case CPU_INTERAPTIV: 1487 case CPU_PROAPTIV: 1488 case CPU_P5600: 1489 { 1490 #define ERRCTL_PE 0x80000000 1491 #define ERRCTL_L2P 0x00800000 1492 unsigned long errctl; 1493 unsigned int l1parity_present, l2parity_present; 1494 1495 errctl = read_c0_ecc(); 1496 errctl &= ~(ERRCTL_PE|ERRCTL_L2P); 1497 1498 /* probe L1 parity support */ 1499 write_c0_ecc(errctl | ERRCTL_PE); 1500 back_to_back_c0_hazard(); 1501 l1parity_present = (read_c0_ecc() & ERRCTL_PE); 1502 1503 /* probe L2 parity support */ 1504 write_c0_ecc(errctl|ERRCTL_L2P); 1505 back_to_back_c0_hazard(); 1506 l2parity_present = (read_c0_ecc() & ERRCTL_L2P); 1507 1508 if (l1parity_present && l2parity_present) { 1509 if (l1parity) 1510 errctl |= ERRCTL_PE; 1511 if (l1parity ^ l2parity) 1512 errctl |= ERRCTL_L2P; 1513 } else if (l1parity_present) { 1514 if (l1parity) 1515 errctl |= ERRCTL_PE; 1516 } else if (l2parity_present) { 1517 if (l2parity) 1518 errctl |= ERRCTL_L2P; 1519 } else { 1520 /* No parity available */ 1521 } 1522 1523 printk(KERN_INFO "Writing ErrCtl register=%08lx\n", errctl); 1524 1525 write_c0_ecc(errctl); 1526 back_to_back_c0_hazard(); 1527 errctl = read_c0_ecc(); 1528 printk(KERN_INFO "Readback ErrCtl register=%08lx\n", errctl); 1529 1530 if (l1parity_present) 1531 printk(KERN_INFO "Cache parity protection %sabled\n", 1532 (errctl & ERRCTL_PE) ? "en" : "dis"); 1533 1534 if (l2parity_present) { 1535 if (l1parity_present && l1parity) 1536 errctl ^= ERRCTL_L2P; 1537 printk(KERN_INFO "L2 cache parity protection %sabled\n", 1538 (errctl & ERRCTL_L2P) ? "en" : "dis"); 1539 } 1540 } 1541 break; 1542 1543 case CPU_5KC: 1544 case CPU_5KE: 1545 case CPU_LOONGSON1: 1546 write_c0_ecc(0x80000000); 1547 back_to_back_c0_hazard(); 1548 /* Set the PE bit (bit 31) in the c0_errctl register. */ 1549 printk(KERN_INFO "Cache parity protection %sabled\n", 1550 (read_c0_ecc() & 0x80000000) ? "en" : "dis"); 1551 break; 1552 case CPU_20KC: 1553 case CPU_25KF: 1554 /* Clear the DE bit (bit 16) in the c0_status register. */ 1555 printk(KERN_INFO "Enable cache parity protection for " 1556 "MIPS 20KC/25KF CPUs.\n"); 1557 clear_c0_status(ST0_DE); 1558 break; 1559 default: 1560 break; 1561 } 1562 } 1563 1564 asmlinkage void cache_parity_error(void) 1565 { 1566 const int field = 2 * sizeof(unsigned long); 1567 unsigned int reg_val; 1568 1569 /* For the moment, report the problem and hang. */ 1570 printk("Cache error exception:\n"); 1571 printk("cp0_errorepc == %0*lx\n", field, read_c0_errorepc()); 1572 reg_val = read_c0_cacheerr(); 1573 printk("c0_cacheerr == %08x\n", reg_val); 1574 1575 printk("Decoded c0_cacheerr: %s cache fault in %s reference.\n", 1576 reg_val & (1<<30) ? "secondary" : "primary", 1577 reg_val & (1<<31) ? "data" : "insn"); 1578 if (cpu_has_mips_r2 && 1579 ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) { 1580 pr_err("Error bits: %s%s%s%s%s%s%s%s\n", 1581 reg_val & (1<<29) ? "ED " : "", 1582 reg_val & (1<<28) ? "ET " : "", 1583 reg_val & (1<<27) ? "ES " : "", 1584 reg_val & (1<<26) ? "EE " : "", 1585 reg_val & (1<<25) ? "EB " : "", 1586 reg_val & (1<<24) ? "EI " : "", 1587 reg_val & (1<<23) ? "E1 " : "", 1588 reg_val & (1<<22) ? "E0 " : ""); 1589 } else { 1590 pr_err("Error bits: %s%s%s%s%s%s%s\n", 1591 reg_val & (1<<29) ? "ED " : "", 1592 reg_val & (1<<28) ? "ET " : "", 1593 reg_val & (1<<26) ? "EE " : "", 1594 reg_val & (1<<25) ? "EB " : "", 1595 reg_val & (1<<24) ? "EI " : "", 1596 reg_val & (1<<23) ? "E1 " : "", 1597 reg_val & (1<<22) ? "E0 " : ""); 1598 } 1599 printk("IDX: 0x%08x\n", reg_val & ((1<<22)-1)); 1600 1601 #if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64) 1602 if (reg_val & (1<<22)) 1603 printk("DErrAddr0: 0x%0*lx\n", field, read_c0_derraddr0()); 1604 1605 if (reg_val & (1<<23)) 1606 printk("DErrAddr1: 0x%0*lx\n", field, read_c0_derraddr1()); 1607 #endif 1608 1609 panic("Can't handle the cache error!"); 1610 } 1611 1612 asmlinkage void do_ftlb(void) 1613 { 1614 const int field = 2 * sizeof(unsigned long); 1615 unsigned int reg_val; 1616 1617 /* For the moment, report the problem and hang. */ 1618 if (cpu_has_mips_r2 && 1619 ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) { 1620 pr_err("FTLB error exception, cp0_ecc=0x%08x:\n", 1621 read_c0_ecc()); 1622 pr_err("cp0_errorepc == %0*lx\n", field, read_c0_errorepc()); 1623 reg_val = read_c0_cacheerr(); 1624 pr_err("c0_cacheerr == %08x\n", reg_val); 1625 1626 if ((reg_val & 0xc0000000) == 0xc0000000) { 1627 pr_err("Decoded c0_cacheerr: FTLB parity error\n"); 1628 } else { 1629 pr_err("Decoded c0_cacheerr: %s cache fault in %s reference.\n", 1630 reg_val & (1<<30) ? "secondary" : "primary", 1631 reg_val & (1<<31) ? "data" : "insn"); 1632 } 1633 } else { 1634 pr_err("FTLB error exception\n"); 1635 } 1636 /* Just print the cacheerr bits for now */ 1637 cache_parity_error(); 1638 } 1639 1640 /* 1641 * SDBBP EJTAG debug exception handler. 1642 * We skip the instruction and return to the next instruction. 1643 */ 1644 void ejtag_exception_handler(struct pt_regs *regs) 1645 { 1646 const int field = 2 * sizeof(unsigned long); 1647 unsigned long depc, old_epc, old_ra; 1648 unsigned int debug; 1649 1650 printk(KERN_DEBUG "SDBBP EJTAG debug exception - not handled yet, just ignored!\n"); 1651 depc = read_c0_depc(); 1652 debug = read_c0_debug(); 1653 printk(KERN_DEBUG "c0_depc = %0*lx, DEBUG = %08x\n", field, depc, debug); 1654 if (debug & 0x80000000) { 1655 /* 1656 * In branch delay slot. 1657 * We cheat a little bit here and use EPC to calculate the 1658 * debug return address (DEPC). EPC is restored after the 1659 * calculation. 1660 */ 1661 old_epc = regs->cp0_epc; 1662 old_ra = regs->regs[31]; 1663 regs->cp0_epc = depc; 1664 compute_return_epc(regs); 1665 depc = regs->cp0_epc; 1666 regs->cp0_epc = old_epc; 1667 regs->regs[31] = old_ra; 1668 } else 1669 depc += 4; 1670 write_c0_depc(depc); 1671 1672 #if 0 1673 printk(KERN_DEBUG "\n\n----- Enable EJTAG single stepping ----\n\n"); 1674 write_c0_debug(debug | 0x100); 1675 #endif 1676 } 1677 1678 /* 1679 * NMI exception handler. 1680 * No lock; only written during early bootup by CPU 0. 1681 */ 1682 static RAW_NOTIFIER_HEAD(nmi_chain); 1683 1684 int register_nmi_notifier(struct notifier_block *nb) 1685 { 1686 return raw_notifier_chain_register(&nmi_chain, nb); 1687 } 1688 1689 void __noreturn nmi_exception_handler(struct pt_regs *regs) 1690 { 1691 char str[100]; 1692 1693 raw_notifier_call_chain(&nmi_chain, 0, regs); 1694 bust_spinlocks(1); 1695 snprintf(str, 100, "CPU%d NMI taken, CP0_EPC=%lx\n", 1696 smp_processor_id(), regs->cp0_epc); 1697 regs->cp0_epc = read_c0_errorepc(); 1698 die(str, regs); 1699 } 1700 1701 #define VECTORSPACING 0x100 /* for EI/VI mode */ 1702 1703 unsigned long ebase; 1704 unsigned long exception_handlers[32]; 1705 unsigned long vi_handlers[64]; 1706 1707 void __init *set_except_vector(int n, void *addr) 1708 { 1709 unsigned long handler = (unsigned long) addr; 1710 unsigned long old_handler; 1711 1712 #ifdef CONFIG_CPU_MICROMIPS 1713 /* 1714 * Only the TLB handlers are cache aligned with an even 1715 * address. All other handlers are on an odd address and 1716 * require no modification. Otherwise, MIPS32 mode will 1717 * be entered when handling any TLB exceptions. That 1718 * would be bad...since we must stay in microMIPS mode. 1719 */ 1720 if (!(handler & 0x1)) 1721 handler |= 1; 1722 #endif 1723 old_handler = xchg(&exception_handlers[n], handler); 1724 1725 if (n == 0 && cpu_has_divec) { 1726 #ifdef CONFIG_CPU_MICROMIPS 1727 unsigned long jump_mask = ~((1 << 27) - 1); 1728 #else 1729 unsigned long jump_mask = ~((1 << 28) - 1); 1730 #endif 1731 u32 *buf = (u32 *)(ebase + 0x200); 1732 unsigned int k0 = 26; 1733 if ((handler & jump_mask) == ((ebase + 0x200) & jump_mask)) { 1734 uasm_i_j(&buf, handler & ~jump_mask); 1735 uasm_i_nop(&buf); 1736 } else { 1737 UASM_i_LA(&buf, k0, handler); 1738 uasm_i_jr(&buf, k0); 1739 uasm_i_nop(&buf); 1740 } 1741 local_flush_icache_range(ebase + 0x200, (unsigned long)buf); 1742 } 1743 return (void *)old_handler; 1744 } 1745 1746 static void do_default_vi(void) 1747 { 1748 show_regs(get_irq_regs()); 1749 panic("Caught unexpected vectored interrupt."); 1750 } 1751 1752 static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs) 1753 { 1754 unsigned long handler; 1755 unsigned long old_handler = vi_handlers[n]; 1756 int srssets = current_cpu_data.srsets; 1757 u16 *h; 1758 unsigned char *b; 1759 1760 BUG_ON(!cpu_has_veic && !cpu_has_vint); 1761 1762 if (addr == NULL) { 1763 handler = (unsigned long) do_default_vi; 1764 srs = 0; 1765 } else 1766 handler = (unsigned long) addr; 1767 vi_handlers[n] = handler; 1768 1769 b = (unsigned char *)(ebase + 0x200 + n*VECTORSPACING); 1770 1771 if (srs >= srssets) 1772 panic("Shadow register set %d not supported", srs); 1773 1774 if (cpu_has_veic) { 1775 if (board_bind_eic_interrupt) 1776 board_bind_eic_interrupt(n, srs); 1777 } else if (cpu_has_vint) { 1778 /* SRSMap is only defined if shadow sets are implemented */ 1779 if (srssets > 1) 1780 change_c0_srsmap(0xf << n*4, srs << n*4); 1781 } 1782 1783 if (srs == 0) { 1784 /* 1785 * If no shadow set is selected then use the default handler 1786 * that does normal register saving and standard interrupt exit 1787 */ 1788 extern char except_vec_vi, except_vec_vi_lui; 1789 extern char except_vec_vi_ori, except_vec_vi_end; 1790 extern char rollback_except_vec_vi; 1791 char *vec_start = using_rollback_handler() ? 1792 &rollback_except_vec_vi : &except_vec_vi; 1793 #if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN) 1794 const int lui_offset = &except_vec_vi_lui - vec_start + 2; 1795 const int ori_offset = &except_vec_vi_ori - vec_start + 2; 1796 #else 1797 const int lui_offset = &except_vec_vi_lui - vec_start; 1798 const int ori_offset = &except_vec_vi_ori - vec_start; 1799 #endif 1800 const int handler_len = &except_vec_vi_end - vec_start; 1801 1802 if (handler_len > VECTORSPACING) { 1803 /* 1804 * Sigh... panicing won't help as the console 1805 * is probably not configured :( 1806 */ 1807 panic("VECTORSPACING too small"); 1808 } 1809 1810 set_handler(((unsigned long)b - ebase), vec_start, 1811 #ifdef CONFIG_CPU_MICROMIPS 1812 (handler_len - 1)); 1813 #else 1814 handler_len); 1815 #endif 1816 h = (u16 *)(b + lui_offset); 1817 *h = (handler >> 16) & 0xffff; 1818 h = (u16 *)(b + ori_offset); 1819 *h = (handler & 0xffff); 1820 local_flush_icache_range((unsigned long)b, 1821 (unsigned long)(b+handler_len)); 1822 } 1823 else { 1824 /* 1825 * In other cases jump directly to the interrupt handler. It 1826 * is the handler's responsibility to save registers if required 1827 * (eg hi/lo) and return from the exception using "eret". 1828 */ 1829 u32 insn; 1830 1831 h = (u16 *)b; 1832 /* j handler */ 1833 #ifdef CONFIG_CPU_MICROMIPS 1834 insn = 0xd4000000 | (((u32)handler & 0x07ffffff) >> 1); 1835 #else 1836 insn = 0x08000000 | (((u32)handler & 0x0fffffff) >> 2); 1837 #endif 1838 h[0] = (insn >> 16) & 0xffff; 1839 h[1] = insn & 0xffff; 1840 h[2] = 0; 1841 h[3] = 0; 1842 local_flush_icache_range((unsigned long)b, 1843 (unsigned long)(b+8)); 1844 } 1845 1846 return (void *)old_handler; 1847 } 1848 1849 void *set_vi_handler(int n, vi_handler_t addr) 1850 { 1851 return set_vi_srs_handler(n, addr, 0); 1852 } 1853 1854 extern void tlb_init(void); 1855 1856 /* 1857 * Timer interrupt 1858 */ 1859 int cp0_compare_irq; 1860 EXPORT_SYMBOL_GPL(cp0_compare_irq); 1861 int cp0_compare_irq_shift; 1862 1863 /* 1864 * Performance counter IRQ or -1 if shared with timer 1865 */ 1866 int cp0_perfcount_irq; 1867 EXPORT_SYMBOL_GPL(cp0_perfcount_irq); 1868 1869 static int noulri; 1870 1871 static int __init ulri_disable(char *s) 1872 { 1873 pr_info("Disabling ulri\n"); 1874 noulri = 1; 1875 1876 return 1; 1877 } 1878 __setup("noulri", ulri_disable); 1879 1880 /* configure STATUS register */ 1881 static void configure_status(void) 1882 { 1883 /* 1884 * Disable coprocessors and select 32-bit or 64-bit addressing 1885 * and the 16/32 or 32/32 FPR register model. Reset the BEV 1886 * flag that some firmware may have left set and the TS bit (for 1887 * IP27). Set XX for ISA IV code to work. 1888 */ 1889 unsigned int status_set = ST0_CU0; 1890 #ifdef CONFIG_64BIT 1891 status_set |= ST0_FR|ST0_KX|ST0_SX|ST0_UX; 1892 #endif 1893 if (current_cpu_data.isa_level & MIPS_CPU_ISA_IV) 1894 status_set |= ST0_XX; 1895 if (cpu_has_dsp) 1896 status_set |= ST0_MX; 1897 1898 change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX, 1899 status_set); 1900 } 1901 1902 /* configure HWRENA register */ 1903 static void configure_hwrena(void) 1904 { 1905 unsigned int hwrena = cpu_hwrena_impl_bits; 1906 1907 if (cpu_has_mips_r2) 1908 hwrena |= 0x0000000f; 1909 1910 if (!noulri && cpu_has_userlocal) 1911 hwrena |= (1 << 29); 1912 1913 if (hwrena) 1914 write_c0_hwrena(hwrena); 1915 } 1916 1917 static void configure_exception_vector(void) 1918 { 1919 if (cpu_has_veic || cpu_has_vint) { 1920 unsigned long sr = set_c0_status(ST0_BEV); 1921 write_c0_ebase(ebase); 1922 write_c0_status(sr); 1923 /* Setting vector spacing enables EI/VI mode */ 1924 change_c0_intctl(0x3e0, VECTORSPACING); 1925 } 1926 if (cpu_has_divec) { 1927 if (cpu_has_mipsmt) { 1928 unsigned int vpflags = dvpe(); 1929 set_c0_cause(CAUSEF_IV); 1930 evpe(vpflags); 1931 } else 1932 set_c0_cause(CAUSEF_IV); 1933 } 1934 } 1935 1936 void per_cpu_trap_init(bool is_boot_cpu) 1937 { 1938 unsigned int cpu = smp_processor_id(); 1939 1940 configure_status(); 1941 configure_hwrena(); 1942 1943 configure_exception_vector(); 1944 1945 /* 1946 * Before R2 both interrupt numbers were fixed to 7, so on R2 only: 1947 * 1948 * o read IntCtl.IPTI to determine the timer interrupt 1949 * o read IntCtl.IPPCI to determine the performance counter interrupt 1950 */ 1951 if (cpu_has_mips_r2) { 1952 cp0_compare_irq_shift = CAUSEB_TI - CAUSEB_IP; 1953 cp0_compare_irq = (read_c0_intctl() >> INTCTLB_IPTI) & 7; 1954 cp0_perfcount_irq = (read_c0_intctl() >> INTCTLB_IPPCI) & 7; 1955 if (cp0_perfcount_irq == cp0_compare_irq) 1956 cp0_perfcount_irq = -1; 1957 } else { 1958 cp0_compare_irq = CP0_LEGACY_COMPARE_IRQ; 1959 cp0_compare_irq_shift = CP0_LEGACY_PERFCNT_IRQ; 1960 cp0_perfcount_irq = -1; 1961 } 1962 1963 if (!cpu_data[cpu].asid_cache) 1964 cpu_data[cpu].asid_cache = ASID_FIRST_VERSION; 1965 1966 atomic_inc(&init_mm.mm_count); 1967 current->active_mm = &init_mm; 1968 BUG_ON(current->mm); 1969 enter_lazy_tlb(&init_mm, current); 1970 1971 /* Boot CPU's cache setup in setup_arch(). */ 1972 if (!is_boot_cpu) 1973 cpu_cache_init(); 1974 tlb_init(); 1975 TLBMISS_HANDLER_SETUP(); 1976 } 1977 1978 /* Install CPU exception handler */ 1979 void set_handler(unsigned long offset, void *addr, unsigned long size) 1980 { 1981 #ifdef CONFIG_CPU_MICROMIPS 1982 memcpy((void *)(ebase + offset), ((unsigned char *)addr - 1), size); 1983 #else 1984 memcpy((void *)(ebase + offset), addr, size); 1985 #endif 1986 local_flush_icache_range(ebase + offset, ebase + offset + size); 1987 } 1988 1989 static char panic_null_cerr[] = 1990 "Trying to set NULL cache error exception handler"; 1991 1992 /* 1993 * Install uncached CPU exception handler. 1994 * This is suitable only for the cache error exception which is the only 1995 * exception handler that is being run uncached. 1996 */ 1997 void set_uncached_handler(unsigned long offset, void *addr, 1998 unsigned long size) 1999 { 2000 unsigned long uncached_ebase = CKSEG1ADDR(ebase); 2001 2002 if (!addr) 2003 panic(panic_null_cerr); 2004 2005 memcpy((void *)(uncached_ebase + offset), addr, size); 2006 } 2007 2008 static int __initdata rdhwr_noopt; 2009 static int __init set_rdhwr_noopt(char *str) 2010 { 2011 rdhwr_noopt = 1; 2012 return 1; 2013 } 2014 2015 __setup("rdhwr_noopt", set_rdhwr_noopt); 2016 2017 void __init trap_init(void) 2018 { 2019 extern char except_vec3_generic; 2020 extern char except_vec4; 2021 extern char except_vec3_r4000; 2022 unsigned long i; 2023 2024 check_wait(); 2025 2026 #if defined(CONFIG_KGDB) 2027 if (kgdb_early_setup) 2028 return; /* Already done */ 2029 #endif 2030 2031 if (cpu_has_veic || cpu_has_vint) { 2032 unsigned long size = 0x200 + VECTORSPACING*64; 2033 ebase = (unsigned long) 2034 __alloc_bootmem(size, 1 << fls(size), 0); 2035 } else { 2036 #ifdef CONFIG_KVM_GUEST 2037 #define KVM_GUEST_KSEG0 0x40000000 2038 ebase = KVM_GUEST_KSEG0; 2039 #else 2040 ebase = CKSEG0; 2041 #endif 2042 if (cpu_has_mips_r2) 2043 ebase += (read_c0_ebase() & 0x3ffff000); 2044 } 2045 2046 if (cpu_has_mmips) { 2047 unsigned int config3 = read_c0_config3(); 2048 2049 if (IS_ENABLED(CONFIG_CPU_MICROMIPS)) 2050 write_c0_config3(config3 | MIPS_CONF3_ISA_OE); 2051 else 2052 write_c0_config3(config3 & ~MIPS_CONF3_ISA_OE); 2053 } 2054 2055 if (board_ebase_setup) 2056 board_ebase_setup(); 2057 per_cpu_trap_init(true); 2058 2059 /* 2060 * Copy the generic exception handlers to their final destination. 2061 * This will be overriden later as suitable for a particular 2062 * configuration. 2063 */ 2064 set_handler(0x180, &except_vec3_generic, 0x80); 2065 2066 /* 2067 * Setup default vectors 2068 */ 2069 for (i = 0; i <= 31; i++) 2070 set_except_vector(i, handle_reserved); 2071 2072 /* 2073 * Copy the EJTAG debug exception vector handler code to it's final 2074 * destination. 2075 */ 2076 if (cpu_has_ejtag && board_ejtag_handler_setup) 2077 board_ejtag_handler_setup(); 2078 2079 /* 2080 * Only some CPUs have the watch exceptions. 2081 */ 2082 if (cpu_has_watch) 2083 set_except_vector(23, handle_watch); 2084 2085 /* 2086 * Initialise interrupt handlers 2087 */ 2088 if (cpu_has_veic || cpu_has_vint) { 2089 int nvec = cpu_has_veic ? 64 : 8; 2090 for (i = 0; i < nvec; i++) 2091 set_vi_handler(i, NULL); 2092 } 2093 else if (cpu_has_divec) 2094 set_handler(0x200, &except_vec4, 0x8); 2095 2096 /* 2097 * Some CPUs can enable/disable for cache parity detection, but does 2098 * it different ways. 2099 */ 2100 parity_protection_init(); 2101 2102 /* 2103 * The Data Bus Errors / Instruction Bus Errors are signaled 2104 * by external hardware. Therefore these two exceptions 2105 * may have board specific handlers. 2106 */ 2107 if (board_be_init) 2108 board_be_init(); 2109 2110 set_except_vector(0, using_rollback_handler() ? rollback_handle_int 2111 : handle_int); 2112 set_except_vector(1, handle_tlbm); 2113 set_except_vector(2, handle_tlbl); 2114 set_except_vector(3, handle_tlbs); 2115 2116 set_except_vector(4, handle_adel); 2117 set_except_vector(5, handle_ades); 2118 2119 set_except_vector(6, handle_ibe); 2120 set_except_vector(7, handle_dbe); 2121 2122 set_except_vector(8, handle_sys); 2123 set_except_vector(9, handle_bp); 2124 set_except_vector(10, rdhwr_noopt ? handle_ri : 2125 (cpu_has_vtag_icache ? 2126 handle_ri_rdhwr_vivt : handle_ri_rdhwr)); 2127 set_except_vector(11, handle_cpu); 2128 set_except_vector(12, handle_ov); 2129 set_except_vector(13, handle_tr); 2130 set_except_vector(14, handle_msa_fpe); 2131 2132 if (current_cpu_type() == CPU_R6000 || 2133 current_cpu_type() == CPU_R6000A) { 2134 /* 2135 * The R6000 is the only R-series CPU that features a machine 2136 * check exception (similar to the R4000 cache error) and 2137 * unaligned ldc1/sdc1 exception. The handlers have not been 2138 * written yet. Well, anyway there is no R6000 machine on the 2139 * current list of targets for Linux/MIPS. 2140 * (Duh, crap, there is someone with a triple R6k machine) 2141 */ 2142 //set_except_vector(14, handle_mc); 2143 //set_except_vector(15, handle_ndc); 2144 } 2145 2146 2147 if (board_nmi_handler_setup) 2148 board_nmi_handler_setup(); 2149 2150 if (cpu_has_fpu && !cpu_has_nofpuex) 2151 set_except_vector(15, handle_fpe); 2152 2153 set_except_vector(16, handle_ftlb); 2154 2155 if (cpu_has_rixiex) { 2156 set_except_vector(19, tlb_do_page_fault_0); 2157 set_except_vector(20, tlb_do_page_fault_0); 2158 } 2159 2160 set_except_vector(21, handle_msa); 2161 set_except_vector(22, handle_mdmx); 2162 2163 if (cpu_has_mcheck) 2164 set_except_vector(24, handle_mcheck); 2165 2166 if (cpu_has_mipsmt) 2167 set_except_vector(25, handle_mt); 2168 2169 set_except_vector(26, handle_dsp); 2170 2171 if (board_cache_error_setup) 2172 board_cache_error_setup(); 2173 2174 if (cpu_has_vce) 2175 /* Special exception: R4[04]00 uses also the divec space. */ 2176 set_handler(0x180, &except_vec3_r4000, 0x100); 2177 else if (cpu_has_4kex) 2178 set_handler(0x180, &except_vec3_generic, 0x80); 2179 else 2180 set_handler(0x080, &except_vec3_generic, 0x80); 2181 2182 local_flush_icache_range(ebase, ebase + 0x400); 2183 2184 sort_extable(__start___dbe_table, __stop___dbe_table); 2185 2186 cu2_notifier(default_cu2_call, 0x80000000); /* Run last */ 2187 } 2188 2189 static int trap_pm_notifier(struct notifier_block *self, unsigned long cmd, 2190 void *v) 2191 { 2192 switch (cmd) { 2193 case CPU_PM_ENTER_FAILED: 2194 case CPU_PM_EXIT: 2195 configure_status(); 2196 configure_hwrena(); 2197 configure_exception_vector(); 2198 2199 /* Restore register with CPU number for TLB handlers */ 2200 TLBMISS_HANDLER_RESTORE(); 2201 2202 break; 2203 } 2204 2205 return NOTIFY_OK; 2206 } 2207 2208 static struct notifier_block trap_pm_notifier_block = { 2209 .notifier_call = trap_pm_notifier, 2210 }; 2211 2212 static int __init trap_pm_init(void) 2213 { 2214 return cpu_pm_register_notifier(&trap_pm_notifier_block); 2215 } 2216 arch_initcall(trap_pm_init); 2217