1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 1994 - 1999, 2000, 01, 06 Ralf Baechle 7 * Copyright (C) 1995, 1996 Paul M. Antoine 8 * Copyright (C) 1998 Ulf Carlsson 9 * Copyright (C) 1999 Silicon Graphics, Inc. 10 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com 11 * Copyright (C) 2002, 2003, 2004, 2005, 2007 Maciej W. Rozycki 12 * Copyright (C) 2000, 2001, 2012 MIPS Technologies, Inc. All rights reserved. 13 * Copyright (C) 2014, Imagination Technologies Ltd. 14 */ 15 #include <linux/bitops.h> 16 #include <linux/bug.h> 17 #include <linux/compiler.h> 18 #include <linux/context_tracking.h> 19 #include <linux/cpu_pm.h> 20 #include <linux/kexec.h> 21 #include <linux/init.h> 22 #include <linux/kernel.h> 23 #include <linux/module.h> 24 #include <linux/extable.h> 25 #include <linux/mm.h> 26 #include <linux/sched.h> 27 #include <linux/smp.h> 28 #include <linux/spinlock.h> 29 #include <linux/kallsyms.h> 30 #include <linux/bootmem.h> 31 #include <linux/interrupt.h> 32 #include <linux/ptrace.h> 33 #include <linux/kgdb.h> 34 #include <linux/kdebug.h> 35 #include <linux/kprobes.h> 36 #include <linux/notifier.h> 37 #include <linux/kdb.h> 38 #include <linux/irq.h> 39 #include <linux/perf_event.h> 40 41 #include <asm/addrspace.h> 42 #include <asm/bootinfo.h> 43 #include <asm/branch.h> 44 #include <asm/break.h> 45 #include <asm/cop2.h> 46 #include <asm/cpu.h> 47 #include <asm/cpu-type.h> 48 #include <asm/dsp.h> 49 #include <asm/fpu.h> 50 #include <asm/fpu_emulator.h> 51 #include <asm/idle.h> 52 #include <asm/mips-r2-to-r6-emul.h> 53 #include <asm/mipsregs.h> 54 #include <asm/mipsmtregs.h> 55 #include <asm/module.h> 56 #include <asm/msa.h> 57 #include <asm/pgtable.h> 58 #include <asm/ptrace.h> 59 #include <asm/sections.h> 60 #include <asm/siginfo.h> 61 #include <asm/tlbdebug.h> 62 #include <asm/traps.h> 63 #include <asm/uaccess.h> 64 #include <asm/watch.h> 65 #include <asm/mmu_context.h> 66 #include <asm/types.h> 67 #include <asm/stacktrace.h> 68 #include <asm/uasm.h> 69 70 extern void check_wait(void); 71 extern asmlinkage void rollback_handle_int(void); 72 extern asmlinkage void handle_int(void); 73 extern u32 handle_tlbl[]; 74 extern u32 handle_tlbs[]; 75 extern u32 handle_tlbm[]; 76 extern asmlinkage void handle_adel(void); 77 extern asmlinkage void handle_ades(void); 78 extern asmlinkage void handle_ibe(void); 79 extern asmlinkage void handle_dbe(void); 80 extern asmlinkage void handle_sys(void); 81 extern asmlinkage void handle_bp(void); 82 extern asmlinkage void handle_ri(void); 83 extern asmlinkage void handle_ri_rdhwr_vivt(void); 84 extern asmlinkage void handle_ri_rdhwr(void); 85 extern asmlinkage void handle_cpu(void); 86 extern asmlinkage void handle_ov(void); 87 extern asmlinkage void handle_tr(void); 88 extern asmlinkage void handle_msa_fpe(void); 89 extern asmlinkage void handle_fpe(void); 90 extern asmlinkage void handle_ftlb(void); 91 extern asmlinkage void handle_msa(void); 92 extern asmlinkage void handle_mdmx(void); 93 extern asmlinkage void handle_watch(void); 94 extern asmlinkage void handle_mt(void); 95 extern asmlinkage void handle_dsp(void); 96 extern asmlinkage void handle_mcheck(void); 97 extern asmlinkage void handle_reserved(void); 98 extern void tlb_do_page_fault_0(void); 99 100 void (*board_be_init)(void); 101 int (*board_be_handler)(struct pt_regs *regs, int is_fixup); 102 void (*board_nmi_handler_setup)(void); 103 void (*board_ejtag_handler_setup)(void); 104 void (*board_bind_eic_interrupt)(int irq, int regset); 105 void (*board_ebase_setup)(void); 106 void(*board_cache_error_setup)(void); 107 108 static void show_raw_backtrace(unsigned long reg29) 109 { 110 unsigned long *sp = (unsigned long *)(reg29 & ~3); 111 unsigned long addr; 112 113 printk("Call Trace:"); 114 #ifdef CONFIG_KALLSYMS 115 printk("\n"); 116 #endif 117 while (!kstack_end(sp)) { 118 unsigned long __user *p = 119 (unsigned long __user *)(unsigned long)sp++; 120 if (__get_user(addr, p)) { 121 printk(" (Bad stack address)"); 122 break; 123 } 124 if (__kernel_text_address(addr)) 125 print_ip_sym(addr); 126 } 127 printk("\n"); 128 } 129 130 #ifdef CONFIG_KALLSYMS 131 int raw_show_trace; 132 static int __init set_raw_show_trace(char *str) 133 { 134 raw_show_trace = 1; 135 return 1; 136 } 137 __setup("raw_show_trace", set_raw_show_trace); 138 #endif 139 140 static void show_backtrace(struct task_struct *task, const struct pt_regs *regs) 141 { 142 unsigned long sp = regs->regs[29]; 143 unsigned long ra = regs->regs[31]; 144 unsigned long pc = regs->cp0_epc; 145 146 if (!task) 147 task = current; 148 149 if (raw_show_trace || user_mode(regs) || !__kernel_text_address(pc)) { 150 show_raw_backtrace(sp); 151 return; 152 } 153 printk("Call Trace:\n"); 154 do { 155 print_ip_sym(pc); 156 pc = unwind_stack(task, &sp, pc, &ra); 157 } while (pc); 158 printk("\n"); 159 } 160 161 /* 162 * This routine abuses get_user()/put_user() to reference pointers 163 * with at least a bit of error checking ... 164 */ 165 static void show_stacktrace(struct task_struct *task, 166 const struct pt_regs *regs) 167 { 168 const int field = 2 * sizeof(unsigned long); 169 long stackdata; 170 int i; 171 unsigned long __user *sp = (unsigned long __user *)regs->regs[29]; 172 173 printk("Stack :"); 174 i = 0; 175 while ((unsigned long) sp & (PAGE_SIZE - 1)) { 176 if (i && ((i % (64 / field)) == 0)) 177 printk("\n "); 178 if (i > 39) { 179 printk(" ..."); 180 break; 181 } 182 183 if (__get_user(stackdata, sp++)) { 184 printk(" (Bad stack address)"); 185 break; 186 } 187 188 printk(" %0*lx", field, stackdata); 189 i++; 190 } 191 printk("\n"); 192 show_backtrace(task, regs); 193 } 194 195 void show_stack(struct task_struct *task, unsigned long *sp) 196 { 197 struct pt_regs regs; 198 mm_segment_t old_fs = get_fs(); 199 if (sp) { 200 regs.regs[29] = (unsigned long)sp; 201 regs.regs[31] = 0; 202 regs.cp0_epc = 0; 203 } else { 204 if (task && task != current) { 205 regs.regs[29] = task->thread.reg29; 206 regs.regs[31] = 0; 207 regs.cp0_epc = task->thread.reg31; 208 #ifdef CONFIG_KGDB_KDB 209 } else if (atomic_read(&kgdb_active) != -1 && 210 kdb_current_regs) { 211 memcpy(®s, kdb_current_regs, sizeof(regs)); 212 #endif /* CONFIG_KGDB_KDB */ 213 } else { 214 prepare_frametrace(®s); 215 } 216 } 217 /* 218 * show_stack() deals exclusively with kernel mode, so be sure to access 219 * the stack in the kernel (not user) address space. 220 */ 221 set_fs(KERNEL_DS); 222 show_stacktrace(task, ®s); 223 set_fs(old_fs); 224 } 225 226 static void show_code(unsigned int __user *pc) 227 { 228 long i; 229 unsigned short __user *pc16 = NULL; 230 231 printk("\nCode:"); 232 233 if ((unsigned long)pc & 1) 234 pc16 = (unsigned short __user *)((unsigned long)pc & ~1); 235 for(i = -3 ; i < 6 ; i++) { 236 unsigned int insn; 237 if (pc16 ? __get_user(insn, pc16 + i) : __get_user(insn, pc + i)) { 238 printk(" (Bad address in epc)\n"); 239 break; 240 } 241 printk("%c%0*x%c", (i?' ':'<'), pc16 ? 4 : 8, insn, (i?' ':'>')); 242 } 243 } 244 245 static void __show_regs(const struct pt_regs *regs) 246 { 247 const int field = 2 * sizeof(unsigned long); 248 unsigned int cause = regs->cp0_cause; 249 unsigned int exccode; 250 int i; 251 252 show_regs_print_info(KERN_DEFAULT); 253 254 /* 255 * Saved main processor registers 256 */ 257 for (i = 0; i < 32; ) { 258 if ((i % 4) == 0) 259 printk("$%2d :", i); 260 if (i == 0) 261 printk(" %0*lx", field, 0UL); 262 else if (i == 26 || i == 27) 263 printk(" %*s", field, ""); 264 else 265 printk(" %0*lx", field, regs->regs[i]); 266 267 i++; 268 if ((i % 4) == 0) 269 printk("\n"); 270 } 271 272 #ifdef CONFIG_CPU_HAS_SMARTMIPS 273 printk("Acx : %0*lx\n", field, regs->acx); 274 #endif 275 printk("Hi : %0*lx\n", field, regs->hi); 276 printk("Lo : %0*lx\n", field, regs->lo); 277 278 /* 279 * Saved cp0 registers 280 */ 281 printk("epc : %0*lx %pS\n", field, regs->cp0_epc, 282 (void *) regs->cp0_epc); 283 printk("ra : %0*lx %pS\n", field, regs->regs[31], 284 (void *) regs->regs[31]); 285 286 printk("Status: %08x ", (uint32_t) regs->cp0_status); 287 288 if (cpu_has_3kex) { 289 if (regs->cp0_status & ST0_KUO) 290 printk("KUo "); 291 if (regs->cp0_status & ST0_IEO) 292 printk("IEo "); 293 if (regs->cp0_status & ST0_KUP) 294 printk("KUp "); 295 if (regs->cp0_status & ST0_IEP) 296 printk("IEp "); 297 if (regs->cp0_status & ST0_KUC) 298 printk("KUc "); 299 if (regs->cp0_status & ST0_IEC) 300 printk("IEc "); 301 } else if (cpu_has_4kex) { 302 if (regs->cp0_status & ST0_KX) 303 printk("KX "); 304 if (regs->cp0_status & ST0_SX) 305 printk("SX "); 306 if (regs->cp0_status & ST0_UX) 307 printk("UX "); 308 switch (regs->cp0_status & ST0_KSU) { 309 case KSU_USER: 310 printk("USER "); 311 break; 312 case KSU_SUPERVISOR: 313 printk("SUPERVISOR "); 314 break; 315 case KSU_KERNEL: 316 printk("KERNEL "); 317 break; 318 default: 319 printk("BAD_MODE "); 320 break; 321 } 322 if (regs->cp0_status & ST0_ERL) 323 printk("ERL "); 324 if (regs->cp0_status & ST0_EXL) 325 printk("EXL "); 326 if (regs->cp0_status & ST0_IE) 327 printk("IE "); 328 } 329 printk("\n"); 330 331 exccode = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE; 332 printk("Cause : %08x (ExcCode %02x)\n", cause, exccode); 333 334 if (1 <= exccode && exccode <= 5) 335 printk("BadVA : %0*lx\n", field, regs->cp0_badvaddr); 336 337 printk("PrId : %08x (%s)\n", read_c0_prid(), 338 cpu_name_string()); 339 } 340 341 /* 342 * FIXME: really the generic show_regs should take a const pointer argument. 343 */ 344 void show_regs(struct pt_regs *regs) 345 { 346 __show_regs((struct pt_regs *)regs); 347 } 348 349 void show_registers(struct pt_regs *regs) 350 { 351 const int field = 2 * sizeof(unsigned long); 352 mm_segment_t old_fs = get_fs(); 353 354 __show_regs(regs); 355 print_modules(); 356 printk("Process %s (pid: %d, threadinfo=%p, task=%p, tls=%0*lx)\n", 357 current->comm, current->pid, current_thread_info(), current, 358 field, current_thread_info()->tp_value); 359 if (cpu_has_userlocal) { 360 unsigned long tls; 361 362 tls = read_c0_userlocal(); 363 if (tls != current_thread_info()->tp_value) 364 printk("*HwTLS: %0*lx\n", field, tls); 365 } 366 367 if (!user_mode(regs)) 368 /* Necessary for getting the correct stack content */ 369 set_fs(KERNEL_DS); 370 show_stacktrace(current, regs); 371 show_code((unsigned int __user *) regs->cp0_epc); 372 printk("\n"); 373 set_fs(old_fs); 374 } 375 376 static DEFINE_RAW_SPINLOCK(die_lock); 377 378 void __noreturn die(const char *str, struct pt_regs *regs) 379 { 380 static int die_counter; 381 int sig = SIGSEGV; 382 383 oops_enter(); 384 385 if (notify_die(DIE_OOPS, str, regs, 0, current->thread.trap_nr, 386 SIGSEGV) == NOTIFY_STOP) 387 sig = 0; 388 389 console_verbose(); 390 raw_spin_lock_irq(&die_lock); 391 bust_spinlocks(1); 392 393 printk("%s[#%d]:\n", str, ++die_counter); 394 show_registers(regs); 395 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); 396 raw_spin_unlock_irq(&die_lock); 397 398 oops_exit(); 399 400 if (in_interrupt()) 401 panic("Fatal exception in interrupt"); 402 403 if (panic_on_oops) 404 panic("Fatal exception"); 405 406 if (regs && kexec_should_crash(current)) 407 crash_kexec(regs); 408 409 do_exit(sig); 410 } 411 412 extern struct exception_table_entry __start___dbe_table[]; 413 extern struct exception_table_entry __stop___dbe_table[]; 414 415 __asm__( 416 " .section __dbe_table, \"a\"\n" 417 " .previous \n"); 418 419 /* Given an address, look for it in the exception tables. */ 420 static const struct exception_table_entry *search_dbe_tables(unsigned long addr) 421 { 422 const struct exception_table_entry *e; 423 424 e = search_extable(__start___dbe_table, __stop___dbe_table - 1, addr); 425 if (!e) 426 e = search_module_dbetables(addr); 427 return e; 428 } 429 430 asmlinkage void do_be(struct pt_regs *regs) 431 { 432 const int field = 2 * sizeof(unsigned long); 433 const struct exception_table_entry *fixup = NULL; 434 int data = regs->cp0_cause & 4; 435 int action = MIPS_BE_FATAL; 436 enum ctx_state prev_state; 437 438 prev_state = exception_enter(); 439 /* XXX For now. Fixme, this searches the wrong table ... */ 440 if (data && !user_mode(regs)) 441 fixup = search_dbe_tables(exception_epc(regs)); 442 443 if (fixup) 444 action = MIPS_BE_FIXUP; 445 446 if (board_be_handler) 447 action = board_be_handler(regs, fixup != NULL); 448 449 switch (action) { 450 case MIPS_BE_DISCARD: 451 goto out; 452 case MIPS_BE_FIXUP: 453 if (fixup) { 454 regs->cp0_epc = fixup->nextinsn; 455 goto out; 456 } 457 break; 458 default: 459 break; 460 } 461 462 /* 463 * Assume it would be too dangerous to continue ... 464 */ 465 printk(KERN_ALERT "%s bus error, epc == %0*lx, ra == %0*lx\n", 466 data ? "Data" : "Instruction", 467 field, regs->cp0_epc, field, regs->regs[31]); 468 if (notify_die(DIE_OOPS, "bus error", regs, 0, current->thread.trap_nr, 469 SIGBUS) == NOTIFY_STOP) 470 goto out; 471 472 die_if_kernel("Oops", regs); 473 force_sig(SIGBUS, current); 474 475 out: 476 exception_exit(prev_state); 477 } 478 479 /* 480 * ll/sc, rdhwr, sync emulation 481 */ 482 483 #define OPCODE 0xfc000000 484 #define BASE 0x03e00000 485 #define RT 0x001f0000 486 #define OFFSET 0x0000ffff 487 #define LL 0xc0000000 488 #define SC 0xe0000000 489 #define SPEC0 0x00000000 490 #define SPEC3 0x7c000000 491 #define RD 0x0000f800 492 #define FUNC 0x0000003f 493 #define SYNC 0x0000000f 494 #define RDHWR 0x0000003b 495 496 /* microMIPS definitions */ 497 #define MM_POOL32A_FUNC 0xfc00ffff 498 #define MM_RDHWR 0x00006b3c 499 #define MM_RS 0x001f0000 500 #define MM_RT 0x03e00000 501 502 /* 503 * The ll_bit is cleared by r*_switch.S 504 */ 505 506 unsigned int ll_bit; 507 struct task_struct *ll_task; 508 509 static inline int simulate_ll(struct pt_regs *regs, unsigned int opcode) 510 { 511 unsigned long value, __user *vaddr; 512 long offset; 513 514 /* 515 * analyse the ll instruction that just caused a ri exception 516 * and put the referenced address to addr. 517 */ 518 519 /* sign extend offset */ 520 offset = opcode & OFFSET; 521 offset <<= 16; 522 offset >>= 16; 523 524 vaddr = (unsigned long __user *) 525 ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset); 526 527 if ((unsigned long)vaddr & 3) 528 return SIGBUS; 529 if (get_user(value, vaddr)) 530 return SIGSEGV; 531 532 preempt_disable(); 533 534 if (ll_task == NULL || ll_task == current) { 535 ll_bit = 1; 536 } else { 537 ll_bit = 0; 538 } 539 ll_task = current; 540 541 preempt_enable(); 542 543 regs->regs[(opcode & RT) >> 16] = value; 544 545 return 0; 546 } 547 548 static inline int simulate_sc(struct pt_regs *regs, unsigned int opcode) 549 { 550 unsigned long __user *vaddr; 551 unsigned long reg; 552 long offset; 553 554 /* 555 * analyse the sc instruction that just caused a ri exception 556 * and put the referenced address to addr. 557 */ 558 559 /* sign extend offset */ 560 offset = opcode & OFFSET; 561 offset <<= 16; 562 offset >>= 16; 563 564 vaddr = (unsigned long __user *) 565 ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset); 566 reg = (opcode & RT) >> 16; 567 568 if ((unsigned long)vaddr & 3) 569 return SIGBUS; 570 571 preempt_disable(); 572 573 if (ll_bit == 0 || ll_task != current) { 574 regs->regs[reg] = 0; 575 preempt_enable(); 576 return 0; 577 } 578 579 preempt_enable(); 580 581 if (put_user(regs->regs[reg], vaddr)) 582 return SIGSEGV; 583 584 regs->regs[reg] = 1; 585 586 return 0; 587 } 588 589 /* 590 * ll uses the opcode of lwc0 and sc uses the opcode of swc0. That is both 591 * opcodes are supposed to result in coprocessor unusable exceptions if 592 * executed on ll/sc-less processors. That's the theory. In practice a 593 * few processors such as NEC's VR4100 throw reserved instruction exceptions 594 * instead, so we're doing the emulation thing in both exception handlers. 595 */ 596 static int simulate_llsc(struct pt_regs *regs, unsigned int opcode) 597 { 598 if ((opcode & OPCODE) == LL) { 599 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 600 1, regs, 0); 601 return simulate_ll(regs, opcode); 602 } 603 if ((opcode & OPCODE) == SC) { 604 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 605 1, regs, 0); 606 return simulate_sc(regs, opcode); 607 } 608 609 return -1; /* Must be something else ... */ 610 } 611 612 /* 613 * Simulate trapping 'rdhwr' instructions to provide user accessible 614 * registers not implemented in hardware. 615 */ 616 static int simulate_rdhwr(struct pt_regs *regs, int rd, int rt) 617 { 618 struct thread_info *ti = task_thread_info(current); 619 620 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 621 1, regs, 0); 622 switch (rd) { 623 case MIPS_HWR_CPUNUM: /* CPU number */ 624 regs->regs[rt] = smp_processor_id(); 625 return 0; 626 case MIPS_HWR_SYNCISTEP: /* SYNCI length */ 627 regs->regs[rt] = min(current_cpu_data.dcache.linesz, 628 current_cpu_data.icache.linesz); 629 return 0; 630 case MIPS_HWR_CC: /* Read count register */ 631 regs->regs[rt] = read_c0_count(); 632 return 0; 633 case MIPS_HWR_CCRES: /* Count register resolution */ 634 switch (current_cpu_type()) { 635 case CPU_20KC: 636 case CPU_25KF: 637 regs->regs[rt] = 1; 638 break; 639 default: 640 regs->regs[rt] = 2; 641 } 642 return 0; 643 case MIPS_HWR_ULR: /* Read UserLocal register */ 644 regs->regs[rt] = ti->tp_value; 645 return 0; 646 default: 647 return -1; 648 } 649 } 650 651 static int simulate_rdhwr_normal(struct pt_regs *regs, unsigned int opcode) 652 { 653 if ((opcode & OPCODE) == SPEC3 && (opcode & FUNC) == RDHWR) { 654 int rd = (opcode & RD) >> 11; 655 int rt = (opcode & RT) >> 16; 656 657 simulate_rdhwr(regs, rd, rt); 658 return 0; 659 } 660 661 /* Not ours. */ 662 return -1; 663 } 664 665 static int simulate_rdhwr_mm(struct pt_regs *regs, unsigned int opcode) 666 { 667 if ((opcode & MM_POOL32A_FUNC) == MM_RDHWR) { 668 int rd = (opcode & MM_RS) >> 16; 669 int rt = (opcode & MM_RT) >> 21; 670 simulate_rdhwr(regs, rd, rt); 671 return 0; 672 } 673 674 /* Not ours. */ 675 return -1; 676 } 677 678 static int simulate_sync(struct pt_regs *regs, unsigned int opcode) 679 { 680 if ((opcode & OPCODE) == SPEC0 && (opcode & FUNC) == SYNC) { 681 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 682 1, regs, 0); 683 return 0; 684 } 685 686 return -1; /* Must be something else ... */ 687 } 688 689 asmlinkage void do_ov(struct pt_regs *regs) 690 { 691 enum ctx_state prev_state; 692 siginfo_t info = { 693 .si_signo = SIGFPE, 694 .si_code = FPE_INTOVF, 695 .si_addr = (void __user *)regs->cp0_epc, 696 }; 697 698 prev_state = exception_enter(); 699 die_if_kernel("Integer overflow", regs); 700 701 force_sig_info(SIGFPE, &info, current); 702 exception_exit(prev_state); 703 } 704 705 int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcr31) 706 { 707 struct siginfo si = { 0 }; 708 struct vm_area_struct *vma; 709 710 switch (sig) { 711 case 0: 712 return 0; 713 714 case SIGFPE: 715 si.si_addr = fault_addr; 716 si.si_signo = sig; 717 /* 718 * Inexact can happen together with Overflow or Underflow. 719 * Respect the mask to deliver the correct exception. 720 */ 721 fcr31 &= (fcr31 & FPU_CSR_ALL_E) << 722 (ffs(FPU_CSR_ALL_X) - ffs(FPU_CSR_ALL_E)); 723 if (fcr31 & FPU_CSR_INV_X) 724 si.si_code = FPE_FLTINV; 725 else if (fcr31 & FPU_CSR_DIV_X) 726 si.si_code = FPE_FLTDIV; 727 else if (fcr31 & FPU_CSR_OVF_X) 728 si.si_code = FPE_FLTOVF; 729 else if (fcr31 & FPU_CSR_UDF_X) 730 si.si_code = FPE_FLTUND; 731 else if (fcr31 & FPU_CSR_INE_X) 732 si.si_code = FPE_FLTRES; 733 else 734 si.si_code = __SI_FAULT; 735 force_sig_info(sig, &si, current); 736 return 1; 737 738 case SIGBUS: 739 si.si_addr = fault_addr; 740 si.si_signo = sig; 741 si.si_code = BUS_ADRERR; 742 force_sig_info(sig, &si, current); 743 return 1; 744 745 case SIGSEGV: 746 si.si_addr = fault_addr; 747 si.si_signo = sig; 748 down_read(¤t->mm->mmap_sem); 749 vma = find_vma(current->mm, (unsigned long)fault_addr); 750 if (vma && (vma->vm_start <= (unsigned long)fault_addr)) 751 si.si_code = SEGV_ACCERR; 752 else 753 si.si_code = SEGV_MAPERR; 754 up_read(¤t->mm->mmap_sem); 755 force_sig_info(sig, &si, current); 756 return 1; 757 758 default: 759 force_sig(sig, current); 760 return 1; 761 } 762 } 763 764 static int simulate_fp(struct pt_regs *regs, unsigned int opcode, 765 unsigned long old_epc, unsigned long old_ra) 766 { 767 union mips_instruction inst = { .word = opcode }; 768 void __user *fault_addr; 769 unsigned long fcr31; 770 int sig; 771 772 /* If it's obviously not an FP instruction, skip it */ 773 switch (inst.i_format.opcode) { 774 case cop1_op: 775 case cop1x_op: 776 case lwc1_op: 777 case ldc1_op: 778 case swc1_op: 779 case sdc1_op: 780 break; 781 782 default: 783 return -1; 784 } 785 786 /* 787 * do_ri skipped over the instruction via compute_return_epc, undo 788 * that for the FPU emulator. 789 */ 790 regs->cp0_epc = old_epc; 791 regs->regs[31] = old_ra; 792 793 /* Save the FP context to struct thread_struct */ 794 lose_fpu(1); 795 796 /* Run the emulator */ 797 sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1, 798 &fault_addr); 799 fcr31 = current->thread.fpu.fcr31; 800 801 /* 802 * We can't allow the emulated instruction to leave any of 803 * the cause bits set in $fcr31. 804 */ 805 current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X; 806 807 /* Restore the hardware register state */ 808 own_fpu(1); 809 810 /* Send a signal if required. */ 811 process_fpemu_return(sig, fault_addr, fcr31); 812 813 return 0; 814 } 815 816 /* 817 * XXX Delayed fp exceptions when doing a lazy ctx switch XXX 818 */ 819 asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31) 820 { 821 enum ctx_state prev_state; 822 void __user *fault_addr; 823 int sig; 824 825 prev_state = exception_enter(); 826 if (notify_die(DIE_FP, "FP exception", regs, 0, current->thread.trap_nr, 827 SIGFPE) == NOTIFY_STOP) 828 goto out; 829 830 /* Clear FCSR.Cause before enabling interrupts */ 831 write_32bit_cp1_register(CP1_STATUS, fcr31 & ~FPU_CSR_ALL_X); 832 local_irq_enable(); 833 834 die_if_kernel("FP exception in kernel code", regs); 835 836 if (fcr31 & FPU_CSR_UNI_X) { 837 /* 838 * Unimplemented operation exception. If we've got the full 839 * software emulator on-board, let's use it... 840 * 841 * Force FPU to dump state into task/thread context. We're 842 * moving a lot of data here for what is probably a single 843 * instruction, but the alternative is to pre-decode the FP 844 * register operands before invoking the emulator, which seems 845 * a bit extreme for what should be an infrequent event. 846 */ 847 /* Ensure 'resume' not overwrite saved fp context again. */ 848 lose_fpu(1); 849 850 /* Run the emulator */ 851 sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1, 852 &fault_addr); 853 fcr31 = current->thread.fpu.fcr31; 854 855 /* 856 * We can't allow the emulated instruction to leave any of 857 * the cause bits set in $fcr31. 858 */ 859 current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X; 860 861 /* Restore the hardware register state */ 862 own_fpu(1); /* Using the FPU again. */ 863 } else { 864 sig = SIGFPE; 865 fault_addr = (void __user *) regs->cp0_epc; 866 } 867 868 /* Send a signal if required. */ 869 process_fpemu_return(sig, fault_addr, fcr31); 870 871 out: 872 exception_exit(prev_state); 873 } 874 875 void do_trap_or_bp(struct pt_regs *regs, unsigned int code, int si_code, 876 const char *str) 877 { 878 siginfo_t info = { 0 }; 879 char b[40]; 880 881 #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP 882 if (kgdb_ll_trap(DIE_TRAP, str, regs, code, current->thread.trap_nr, 883 SIGTRAP) == NOTIFY_STOP) 884 return; 885 #endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */ 886 887 if (notify_die(DIE_TRAP, str, regs, code, current->thread.trap_nr, 888 SIGTRAP) == NOTIFY_STOP) 889 return; 890 891 /* 892 * A short test says that IRIX 5.3 sends SIGTRAP for all trap 893 * insns, even for trap and break codes that indicate arithmetic 894 * failures. Weird ... 895 * But should we continue the brokenness??? --macro 896 */ 897 switch (code) { 898 case BRK_OVERFLOW: 899 case BRK_DIVZERO: 900 scnprintf(b, sizeof(b), "%s instruction in kernel code", str); 901 die_if_kernel(b, regs); 902 if (code == BRK_DIVZERO) 903 info.si_code = FPE_INTDIV; 904 else 905 info.si_code = FPE_INTOVF; 906 info.si_signo = SIGFPE; 907 info.si_addr = (void __user *) regs->cp0_epc; 908 force_sig_info(SIGFPE, &info, current); 909 break; 910 case BRK_BUG: 911 die_if_kernel("Kernel bug detected", regs); 912 force_sig(SIGTRAP, current); 913 break; 914 case BRK_MEMU: 915 /* 916 * This breakpoint code is used by the FPU emulator to retake 917 * control of the CPU after executing the instruction from the 918 * delay slot of an emulated branch. 919 * 920 * Terminate if exception was recognized as a delay slot return 921 * otherwise handle as normal. 922 */ 923 if (do_dsemulret(regs)) 924 return; 925 926 die_if_kernel("Math emu break/trap", regs); 927 force_sig(SIGTRAP, current); 928 break; 929 default: 930 scnprintf(b, sizeof(b), "%s instruction in kernel code", str); 931 die_if_kernel(b, regs); 932 if (si_code) { 933 info.si_signo = SIGTRAP; 934 info.si_code = si_code; 935 force_sig_info(SIGTRAP, &info, current); 936 } else { 937 force_sig(SIGTRAP, current); 938 } 939 } 940 } 941 942 asmlinkage void do_bp(struct pt_regs *regs) 943 { 944 unsigned long epc = msk_isa16_mode(exception_epc(regs)); 945 unsigned int opcode, bcode; 946 enum ctx_state prev_state; 947 mm_segment_t seg; 948 949 seg = get_fs(); 950 if (!user_mode(regs)) 951 set_fs(KERNEL_DS); 952 953 prev_state = exception_enter(); 954 current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f; 955 if (get_isa16_mode(regs->cp0_epc)) { 956 u16 instr[2]; 957 958 if (__get_user(instr[0], (u16 __user *)epc)) 959 goto out_sigsegv; 960 961 if (!cpu_has_mmips) { 962 /* MIPS16e mode */ 963 bcode = (instr[0] >> 5) & 0x3f; 964 } else if (mm_insn_16bit(instr[0])) { 965 /* 16-bit microMIPS BREAK */ 966 bcode = instr[0] & 0xf; 967 } else { 968 /* 32-bit microMIPS BREAK */ 969 if (__get_user(instr[1], (u16 __user *)(epc + 2))) 970 goto out_sigsegv; 971 opcode = (instr[0] << 16) | instr[1]; 972 bcode = (opcode >> 6) & ((1 << 20) - 1); 973 } 974 } else { 975 if (__get_user(opcode, (unsigned int __user *)epc)) 976 goto out_sigsegv; 977 bcode = (opcode >> 6) & ((1 << 20) - 1); 978 } 979 980 /* 981 * There is the ancient bug in the MIPS assemblers that the break 982 * code starts left to bit 16 instead to bit 6 in the opcode. 983 * Gas is bug-compatible, but not always, grrr... 984 * We handle both cases with a simple heuristics. --macro 985 */ 986 if (bcode >= (1 << 10)) 987 bcode = ((bcode & ((1 << 10) - 1)) << 10) | (bcode >> 10); 988 989 /* 990 * notify the kprobe handlers, if instruction is likely to 991 * pertain to them. 992 */ 993 switch (bcode) { 994 case BRK_UPROBE: 995 if (notify_die(DIE_UPROBE, "uprobe", regs, bcode, 996 current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP) 997 goto out; 998 else 999 break; 1000 case BRK_UPROBE_XOL: 1001 if (notify_die(DIE_UPROBE_XOL, "uprobe_xol", regs, bcode, 1002 current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP) 1003 goto out; 1004 else 1005 break; 1006 case BRK_KPROBE_BP: 1007 if (notify_die(DIE_BREAK, "debug", regs, bcode, 1008 current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP) 1009 goto out; 1010 else 1011 break; 1012 case BRK_KPROBE_SSTEPBP: 1013 if (notify_die(DIE_SSTEPBP, "single_step", regs, bcode, 1014 current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP) 1015 goto out; 1016 else 1017 break; 1018 default: 1019 break; 1020 } 1021 1022 do_trap_or_bp(regs, bcode, TRAP_BRKPT, "Break"); 1023 1024 out: 1025 set_fs(seg); 1026 exception_exit(prev_state); 1027 return; 1028 1029 out_sigsegv: 1030 force_sig(SIGSEGV, current); 1031 goto out; 1032 } 1033 1034 asmlinkage void do_tr(struct pt_regs *regs) 1035 { 1036 u32 opcode, tcode = 0; 1037 enum ctx_state prev_state; 1038 u16 instr[2]; 1039 mm_segment_t seg; 1040 unsigned long epc = msk_isa16_mode(exception_epc(regs)); 1041 1042 seg = get_fs(); 1043 if (!user_mode(regs)) 1044 set_fs(get_ds()); 1045 1046 prev_state = exception_enter(); 1047 current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f; 1048 if (get_isa16_mode(regs->cp0_epc)) { 1049 if (__get_user(instr[0], (u16 __user *)(epc + 0)) || 1050 __get_user(instr[1], (u16 __user *)(epc + 2))) 1051 goto out_sigsegv; 1052 opcode = (instr[0] << 16) | instr[1]; 1053 /* Immediate versions don't provide a code. */ 1054 if (!(opcode & OPCODE)) 1055 tcode = (opcode >> 12) & ((1 << 4) - 1); 1056 } else { 1057 if (__get_user(opcode, (u32 __user *)epc)) 1058 goto out_sigsegv; 1059 /* Immediate versions don't provide a code. */ 1060 if (!(opcode & OPCODE)) 1061 tcode = (opcode >> 6) & ((1 << 10) - 1); 1062 } 1063 1064 do_trap_or_bp(regs, tcode, 0, "Trap"); 1065 1066 out: 1067 set_fs(seg); 1068 exception_exit(prev_state); 1069 return; 1070 1071 out_sigsegv: 1072 force_sig(SIGSEGV, current); 1073 goto out; 1074 } 1075 1076 asmlinkage void do_ri(struct pt_regs *regs) 1077 { 1078 unsigned int __user *epc = (unsigned int __user *)exception_epc(regs); 1079 unsigned long old_epc = regs->cp0_epc; 1080 unsigned long old31 = regs->regs[31]; 1081 enum ctx_state prev_state; 1082 unsigned int opcode = 0; 1083 int status = -1; 1084 1085 /* 1086 * Avoid any kernel code. Just emulate the R2 instruction 1087 * as quickly as possible. 1088 */ 1089 if (mipsr2_emulation && cpu_has_mips_r6 && 1090 likely(user_mode(regs)) && 1091 likely(get_user(opcode, epc) >= 0)) { 1092 unsigned long fcr31 = 0; 1093 1094 status = mipsr2_decoder(regs, opcode, &fcr31); 1095 switch (status) { 1096 case 0: 1097 case SIGEMT: 1098 task_thread_info(current)->r2_emul_return = 1; 1099 return; 1100 case SIGILL: 1101 goto no_r2_instr; 1102 default: 1103 process_fpemu_return(status, 1104 ¤t->thread.cp0_baduaddr, 1105 fcr31); 1106 task_thread_info(current)->r2_emul_return = 1; 1107 return; 1108 } 1109 } 1110 1111 no_r2_instr: 1112 1113 prev_state = exception_enter(); 1114 current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f; 1115 1116 if (notify_die(DIE_RI, "RI Fault", regs, 0, current->thread.trap_nr, 1117 SIGILL) == NOTIFY_STOP) 1118 goto out; 1119 1120 die_if_kernel("Reserved instruction in kernel code", regs); 1121 1122 if (unlikely(compute_return_epc(regs) < 0)) 1123 goto out; 1124 1125 if (!get_isa16_mode(regs->cp0_epc)) { 1126 if (unlikely(get_user(opcode, epc) < 0)) 1127 status = SIGSEGV; 1128 1129 if (!cpu_has_llsc && status < 0) 1130 status = simulate_llsc(regs, opcode); 1131 1132 if (status < 0) 1133 status = simulate_rdhwr_normal(regs, opcode); 1134 1135 if (status < 0) 1136 status = simulate_sync(regs, opcode); 1137 1138 if (status < 0) 1139 status = simulate_fp(regs, opcode, old_epc, old31); 1140 } else if (cpu_has_mmips) { 1141 unsigned short mmop[2] = { 0 }; 1142 1143 if (unlikely(get_user(mmop[0], (u16 __user *)epc + 0) < 0)) 1144 status = SIGSEGV; 1145 if (unlikely(get_user(mmop[1], (u16 __user *)epc + 1) < 0)) 1146 status = SIGSEGV; 1147 opcode = mmop[0]; 1148 opcode = (opcode << 16) | mmop[1]; 1149 1150 if (status < 0) 1151 status = simulate_rdhwr_mm(regs, opcode); 1152 } 1153 1154 if (status < 0) 1155 status = SIGILL; 1156 1157 if (unlikely(status > 0)) { 1158 regs->cp0_epc = old_epc; /* Undo skip-over. */ 1159 regs->regs[31] = old31; 1160 force_sig(status, current); 1161 } 1162 1163 out: 1164 exception_exit(prev_state); 1165 } 1166 1167 /* 1168 * MIPS MT processors may have fewer FPU contexts than CPU threads. If we've 1169 * emulated more than some threshold number of instructions, force migration to 1170 * a "CPU" that has FP support. 1171 */ 1172 static void mt_ase_fp_affinity(void) 1173 { 1174 #ifdef CONFIG_MIPS_MT_FPAFF 1175 if (mt_fpemul_threshold > 0 && 1176 ((current->thread.emulated_fp++ > mt_fpemul_threshold))) { 1177 /* 1178 * If there's no FPU present, or if the application has already 1179 * restricted the allowed set to exclude any CPUs with FPUs, 1180 * we'll skip the procedure. 1181 */ 1182 if (cpumask_intersects(¤t->cpus_allowed, &mt_fpu_cpumask)) { 1183 cpumask_t tmask; 1184 1185 current->thread.user_cpus_allowed 1186 = current->cpus_allowed; 1187 cpumask_and(&tmask, ¤t->cpus_allowed, 1188 &mt_fpu_cpumask); 1189 set_cpus_allowed_ptr(current, &tmask); 1190 set_thread_flag(TIF_FPUBOUND); 1191 } 1192 } 1193 #endif /* CONFIG_MIPS_MT_FPAFF */ 1194 } 1195 1196 /* 1197 * No lock; only written during early bootup by CPU 0. 1198 */ 1199 static RAW_NOTIFIER_HEAD(cu2_chain); 1200 1201 int __ref register_cu2_notifier(struct notifier_block *nb) 1202 { 1203 return raw_notifier_chain_register(&cu2_chain, nb); 1204 } 1205 1206 int cu2_notifier_call_chain(unsigned long val, void *v) 1207 { 1208 return raw_notifier_call_chain(&cu2_chain, val, v); 1209 } 1210 1211 static int default_cu2_call(struct notifier_block *nfb, unsigned long action, 1212 void *data) 1213 { 1214 struct pt_regs *regs = data; 1215 1216 die_if_kernel("COP2: Unhandled kernel unaligned access or invalid " 1217 "instruction", regs); 1218 force_sig(SIGILL, current); 1219 1220 return NOTIFY_OK; 1221 } 1222 1223 static int wait_on_fp_mode_switch(atomic_t *p) 1224 { 1225 /* 1226 * The FP mode for this task is currently being switched. That may 1227 * involve modifications to the format of this tasks FP context which 1228 * make it unsafe to proceed with execution for the moment. Instead, 1229 * schedule some other task. 1230 */ 1231 schedule(); 1232 return 0; 1233 } 1234 1235 static int enable_restore_fp_context(int msa) 1236 { 1237 int err, was_fpu_owner, prior_msa; 1238 1239 /* 1240 * If an FP mode switch is currently underway, wait for it to 1241 * complete before proceeding. 1242 */ 1243 wait_on_atomic_t(¤t->mm->context.fp_mode_switching, 1244 wait_on_fp_mode_switch, TASK_KILLABLE); 1245 1246 if (!used_math()) { 1247 /* First time FP context user. */ 1248 preempt_disable(); 1249 err = init_fpu(); 1250 if (msa && !err) { 1251 enable_msa(); 1252 init_msa_upper(); 1253 set_thread_flag(TIF_USEDMSA); 1254 set_thread_flag(TIF_MSA_CTX_LIVE); 1255 } 1256 preempt_enable(); 1257 if (!err) 1258 set_used_math(); 1259 return err; 1260 } 1261 1262 /* 1263 * This task has formerly used the FP context. 1264 * 1265 * If this thread has no live MSA vector context then we can simply 1266 * restore the scalar FP context. If it has live MSA vector context 1267 * (that is, it has or may have used MSA since last performing a 1268 * function call) then we'll need to restore the vector context. This 1269 * applies even if we're currently only executing a scalar FP 1270 * instruction. This is because if we were to later execute an MSA 1271 * instruction then we'd either have to: 1272 * 1273 * - Restore the vector context & clobber any registers modified by 1274 * scalar FP instructions between now & then. 1275 * 1276 * or 1277 * 1278 * - Not restore the vector context & lose the most significant bits 1279 * of all vector registers. 1280 * 1281 * Neither of those options is acceptable. We cannot restore the least 1282 * significant bits of the registers now & only restore the most 1283 * significant bits later because the most significant bits of any 1284 * vector registers whose aliased FP register is modified now will have 1285 * been zeroed. We'd have no way to know that when restoring the vector 1286 * context & thus may load an outdated value for the most significant 1287 * bits of a vector register. 1288 */ 1289 if (!msa && !thread_msa_context_live()) 1290 return own_fpu(1); 1291 1292 /* 1293 * This task is using or has previously used MSA. Thus we require 1294 * that Status.FR == 1. 1295 */ 1296 preempt_disable(); 1297 was_fpu_owner = is_fpu_owner(); 1298 err = own_fpu_inatomic(0); 1299 if (err) 1300 goto out; 1301 1302 enable_msa(); 1303 write_msa_csr(current->thread.fpu.msacsr); 1304 set_thread_flag(TIF_USEDMSA); 1305 1306 /* 1307 * If this is the first time that the task is using MSA and it has 1308 * previously used scalar FP in this time slice then we already nave 1309 * FP context which we shouldn't clobber. We do however need to clear 1310 * the upper 64b of each vector register so that this task has no 1311 * opportunity to see data left behind by another. 1312 */ 1313 prior_msa = test_and_set_thread_flag(TIF_MSA_CTX_LIVE); 1314 if (!prior_msa && was_fpu_owner) { 1315 init_msa_upper(); 1316 1317 goto out; 1318 } 1319 1320 if (!prior_msa) { 1321 /* 1322 * Restore the least significant 64b of each vector register 1323 * from the existing scalar FP context. 1324 */ 1325 _restore_fp(current); 1326 1327 /* 1328 * The task has not formerly used MSA, so clear the upper 64b 1329 * of each vector register such that it cannot see data left 1330 * behind by another task. 1331 */ 1332 init_msa_upper(); 1333 } else { 1334 /* We need to restore the vector context. */ 1335 restore_msa(current); 1336 1337 /* Restore the scalar FP control & status register */ 1338 if (!was_fpu_owner) 1339 write_32bit_cp1_register(CP1_STATUS, 1340 current->thread.fpu.fcr31); 1341 } 1342 1343 out: 1344 preempt_enable(); 1345 1346 return 0; 1347 } 1348 1349 asmlinkage void do_cpu(struct pt_regs *regs) 1350 { 1351 enum ctx_state prev_state; 1352 unsigned int __user *epc; 1353 unsigned long old_epc, old31; 1354 void __user *fault_addr; 1355 unsigned int opcode; 1356 unsigned long fcr31; 1357 unsigned int cpid; 1358 int status, err; 1359 int sig; 1360 1361 prev_state = exception_enter(); 1362 cpid = (regs->cp0_cause >> CAUSEB_CE) & 3; 1363 1364 if (cpid != 2) 1365 die_if_kernel("do_cpu invoked from kernel context!", regs); 1366 1367 switch (cpid) { 1368 case 0: 1369 epc = (unsigned int __user *)exception_epc(regs); 1370 old_epc = regs->cp0_epc; 1371 old31 = regs->regs[31]; 1372 opcode = 0; 1373 status = -1; 1374 1375 if (unlikely(compute_return_epc(regs) < 0)) 1376 break; 1377 1378 if (!get_isa16_mode(regs->cp0_epc)) { 1379 if (unlikely(get_user(opcode, epc) < 0)) 1380 status = SIGSEGV; 1381 1382 if (!cpu_has_llsc && status < 0) 1383 status = simulate_llsc(regs, opcode); 1384 } 1385 1386 if (status < 0) 1387 status = SIGILL; 1388 1389 if (unlikely(status > 0)) { 1390 regs->cp0_epc = old_epc; /* Undo skip-over. */ 1391 regs->regs[31] = old31; 1392 force_sig(status, current); 1393 } 1394 1395 break; 1396 1397 case 3: 1398 /* 1399 * The COP3 opcode space and consequently the CP0.Status.CU3 1400 * bit and the CP0.Cause.CE=3 encoding have been removed as 1401 * of the MIPS III ISA. From the MIPS IV and MIPS32r2 ISAs 1402 * up the space has been reused for COP1X instructions, that 1403 * are enabled by the CP0.Status.CU1 bit and consequently 1404 * use the CP0.Cause.CE=1 encoding for Coprocessor Unusable 1405 * exceptions. Some FPU-less processors that implement one 1406 * of these ISAs however use this code erroneously for COP1X 1407 * instructions. Therefore we redirect this trap to the FP 1408 * emulator too. 1409 */ 1410 if (raw_cpu_has_fpu || !cpu_has_mips_4_5_64_r2_r6) { 1411 force_sig(SIGILL, current); 1412 break; 1413 } 1414 /* Fall through. */ 1415 1416 case 1: 1417 err = enable_restore_fp_context(0); 1418 1419 if (raw_cpu_has_fpu && !err) 1420 break; 1421 1422 sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 0, 1423 &fault_addr); 1424 fcr31 = current->thread.fpu.fcr31; 1425 1426 /* 1427 * We can't allow the emulated instruction to leave 1428 * any of the cause bits set in $fcr31. 1429 */ 1430 current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X; 1431 1432 /* Send a signal if required. */ 1433 if (!process_fpemu_return(sig, fault_addr, fcr31) && !err) 1434 mt_ase_fp_affinity(); 1435 1436 break; 1437 1438 case 2: 1439 raw_notifier_call_chain(&cu2_chain, CU2_EXCEPTION, regs); 1440 break; 1441 } 1442 1443 exception_exit(prev_state); 1444 } 1445 1446 asmlinkage void do_msa_fpe(struct pt_regs *regs, unsigned int msacsr) 1447 { 1448 enum ctx_state prev_state; 1449 1450 prev_state = exception_enter(); 1451 current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f; 1452 if (notify_die(DIE_MSAFP, "MSA FP exception", regs, 0, 1453 current->thread.trap_nr, SIGFPE) == NOTIFY_STOP) 1454 goto out; 1455 1456 /* Clear MSACSR.Cause before enabling interrupts */ 1457 write_msa_csr(msacsr & ~MSA_CSR_CAUSEF); 1458 local_irq_enable(); 1459 1460 die_if_kernel("do_msa_fpe invoked from kernel context!", regs); 1461 force_sig(SIGFPE, current); 1462 out: 1463 exception_exit(prev_state); 1464 } 1465 1466 asmlinkage void do_msa(struct pt_regs *regs) 1467 { 1468 enum ctx_state prev_state; 1469 int err; 1470 1471 prev_state = exception_enter(); 1472 1473 if (!cpu_has_msa || test_thread_flag(TIF_32BIT_FPREGS)) { 1474 force_sig(SIGILL, current); 1475 goto out; 1476 } 1477 1478 die_if_kernel("do_msa invoked from kernel context!", regs); 1479 1480 err = enable_restore_fp_context(1); 1481 if (err) 1482 force_sig(SIGILL, current); 1483 out: 1484 exception_exit(prev_state); 1485 } 1486 1487 asmlinkage void do_mdmx(struct pt_regs *regs) 1488 { 1489 enum ctx_state prev_state; 1490 1491 prev_state = exception_enter(); 1492 force_sig(SIGILL, current); 1493 exception_exit(prev_state); 1494 } 1495 1496 /* 1497 * Called with interrupts disabled. 1498 */ 1499 asmlinkage void do_watch(struct pt_regs *regs) 1500 { 1501 siginfo_t info = { .si_signo = SIGTRAP, .si_code = TRAP_HWBKPT }; 1502 enum ctx_state prev_state; 1503 1504 prev_state = exception_enter(); 1505 /* 1506 * Clear WP (bit 22) bit of cause register so we don't loop 1507 * forever. 1508 */ 1509 clear_c0_cause(CAUSEF_WP); 1510 1511 /* 1512 * If the current thread has the watch registers loaded, save 1513 * their values and send SIGTRAP. Otherwise another thread 1514 * left the registers set, clear them and continue. 1515 */ 1516 if (test_tsk_thread_flag(current, TIF_LOAD_WATCH)) { 1517 mips_read_watch_registers(); 1518 local_irq_enable(); 1519 force_sig_info(SIGTRAP, &info, current); 1520 } else { 1521 mips_clear_watch_registers(); 1522 local_irq_enable(); 1523 } 1524 exception_exit(prev_state); 1525 } 1526 1527 asmlinkage void do_mcheck(struct pt_regs *regs) 1528 { 1529 int multi_match = regs->cp0_status & ST0_TS; 1530 enum ctx_state prev_state; 1531 mm_segment_t old_fs = get_fs(); 1532 1533 prev_state = exception_enter(); 1534 show_regs(regs); 1535 1536 if (multi_match) { 1537 dump_tlb_regs(); 1538 pr_info("\n"); 1539 dump_tlb_all(); 1540 } 1541 1542 if (!user_mode(regs)) 1543 set_fs(KERNEL_DS); 1544 1545 show_code((unsigned int __user *) regs->cp0_epc); 1546 1547 set_fs(old_fs); 1548 1549 /* 1550 * Some chips may have other causes of machine check (e.g. SB1 1551 * graduation timer) 1552 */ 1553 panic("Caught Machine Check exception - %scaused by multiple " 1554 "matching entries in the TLB.", 1555 (multi_match) ? "" : "not "); 1556 } 1557 1558 asmlinkage void do_mt(struct pt_regs *regs) 1559 { 1560 int subcode; 1561 1562 subcode = (read_vpe_c0_vpecontrol() & VPECONTROL_EXCPT) 1563 >> VPECONTROL_EXCPT_SHIFT; 1564 switch (subcode) { 1565 case 0: 1566 printk(KERN_DEBUG "Thread Underflow\n"); 1567 break; 1568 case 1: 1569 printk(KERN_DEBUG "Thread Overflow\n"); 1570 break; 1571 case 2: 1572 printk(KERN_DEBUG "Invalid YIELD Qualifier\n"); 1573 break; 1574 case 3: 1575 printk(KERN_DEBUG "Gating Storage Exception\n"); 1576 break; 1577 case 4: 1578 printk(KERN_DEBUG "YIELD Scheduler Exception\n"); 1579 break; 1580 case 5: 1581 printk(KERN_DEBUG "Gating Storage Scheduler Exception\n"); 1582 break; 1583 default: 1584 printk(KERN_DEBUG "*** UNKNOWN THREAD EXCEPTION %d ***\n", 1585 subcode); 1586 break; 1587 } 1588 die_if_kernel("MIPS MT Thread exception in kernel", regs); 1589 1590 force_sig(SIGILL, current); 1591 } 1592 1593 1594 asmlinkage void do_dsp(struct pt_regs *regs) 1595 { 1596 if (cpu_has_dsp) 1597 panic("Unexpected DSP exception"); 1598 1599 force_sig(SIGILL, current); 1600 } 1601 1602 asmlinkage void do_reserved(struct pt_regs *regs) 1603 { 1604 /* 1605 * Game over - no way to handle this if it ever occurs. Most probably 1606 * caused by a new unknown cpu type or after another deadly 1607 * hard/software error. 1608 */ 1609 show_regs(regs); 1610 panic("Caught reserved exception %ld - should not happen.", 1611 (regs->cp0_cause & 0x7f) >> 2); 1612 } 1613 1614 static int __initdata l1parity = 1; 1615 static int __init nol1parity(char *s) 1616 { 1617 l1parity = 0; 1618 return 1; 1619 } 1620 __setup("nol1par", nol1parity); 1621 static int __initdata l2parity = 1; 1622 static int __init nol2parity(char *s) 1623 { 1624 l2parity = 0; 1625 return 1; 1626 } 1627 __setup("nol2par", nol2parity); 1628 1629 /* 1630 * Some MIPS CPUs can enable/disable for cache parity detection, but do 1631 * it different ways. 1632 */ 1633 static inline void parity_protection_init(void) 1634 { 1635 switch (current_cpu_type()) { 1636 case CPU_24K: 1637 case CPU_34K: 1638 case CPU_74K: 1639 case CPU_1004K: 1640 case CPU_1074K: 1641 case CPU_INTERAPTIV: 1642 case CPU_PROAPTIV: 1643 case CPU_P5600: 1644 case CPU_QEMU_GENERIC: 1645 case CPU_I6400: 1646 case CPU_P6600: 1647 { 1648 #define ERRCTL_PE 0x80000000 1649 #define ERRCTL_L2P 0x00800000 1650 unsigned long errctl; 1651 unsigned int l1parity_present, l2parity_present; 1652 1653 errctl = read_c0_ecc(); 1654 errctl &= ~(ERRCTL_PE|ERRCTL_L2P); 1655 1656 /* probe L1 parity support */ 1657 write_c0_ecc(errctl | ERRCTL_PE); 1658 back_to_back_c0_hazard(); 1659 l1parity_present = (read_c0_ecc() & ERRCTL_PE); 1660 1661 /* probe L2 parity support */ 1662 write_c0_ecc(errctl|ERRCTL_L2P); 1663 back_to_back_c0_hazard(); 1664 l2parity_present = (read_c0_ecc() & ERRCTL_L2P); 1665 1666 if (l1parity_present && l2parity_present) { 1667 if (l1parity) 1668 errctl |= ERRCTL_PE; 1669 if (l1parity ^ l2parity) 1670 errctl |= ERRCTL_L2P; 1671 } else if (l1parity_present) { 1672 if (l1parity) 1673 errctl |= ERRCTL_PE; 1674 } else if (l2parity_present) { 1675 if (l2parity) 1676 errctl |= ERRCTL_L2P; 1677 } else { 1678 /* No parity available */ 1679 } 1680 1681 printk(KERN_INFO "Writing ErrCtl register=%08lx\n", errctl); 1682 1683 write_c0_ecc(errctl); 1684 back_to_back_c0_hazard(); 1685 errctl = read_c0_ecc(); 1686 printk(KERN_INFO "Readback ErrCtl register=%08lx\n", errctl); 1687 1688 if (l1parity_present) 1689 printk(KERN_INFO "Cache parity protection %sabled\n", 1690 (errctl & ERRCTL_PE) ? "en" : "dis"); 1691 1692 if (l2parity_present) { 1693 if (l1parity_present && l1parity) 1694 errctl ^= ERRCTL_L2P; 1695 printk(KERN_INFO "L2 cache parity protection %sabled\n", 1696 (errctl & ERRCTL_L2P) ? "en" : "dis"); 1697 } 1698 } 1699 break; 1700 1701 case CPU_5KC: 1702 case CPU_5KE: 1703 case CPU_LOONGSON1: 1704 write_c0_ecc(0x80000000); 1705 back_to_back_c0_hazard(); 1706 /* Set the PE bit (bit 31) in the c0_errctl register. */ 1707 printk(KERN_INFO "Cache parity protection %sabled\n", 1708 (read_c0_ecc() & 0x80000000) ? "en" : "dis"); 1709 break; 1710 case CPU_20KC: 1711 case CPU_25KF: 1712 /* Clear the DE bit (bit 16) in the c0_status register. */ 1713 printk(KERN_INFO "Enable cache parity protection for " 1714 "MIPS 20KC/25KF CPUs.\n"); 1715 clear_c0_status(ST0_DE); 1716 break; 1717 default: 1718 break; 1719 } 1720 } 1721 1722 asmlinkage void cache_parity_error(void) 1723 { 1724 const int field = 2 * sizeof(unsigned long); 1725 unsigned int reg_val; 1726 1727 /* For the moment, report the problem and hang. */ 1728 printk("Cache error exception:\n"); 1729 printk("cp0_errorepc == %0*lx\n", field, read_c0_errorepc()); 1730 reg_val = read_c0_cacheerr(); 1731 printk("c0_cacheerr == %08x\n", reg_val); 1732 1733 printk("Decoded c0_cacheerr: %s cache fault in %s reference.\n", 1734 reg_val & (1<<30) ? "secondary" : "primary", 1735 reg_val & (1<<31) ? "data" : "insn"); 1736 if ((cpu_has_mips_r2_r6) && 1737 ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) { 1738 pr_err("Error bits: %s%s%s%s%s%s%s%s\n", 1739 reg_val & (1<<29) ? "ED " : "", 1740 reg_val & (1<<28) ? "ET " : "", 1741 reg_val & (1<<27) ? "ES " : "", 1742 reg_val & (1<<26) ? "EE " : "", 1743 reg_val & (1<<25) ? "EB " : "", 1744 reg_val & (1<<24) ? "EI " : "", 1745 reg_val & (1<<23) ? "E1 " : "", 1746 reg_val & (1<<22) ? "E0 " : ""); 1747 } else { 1748 pr_err("Error bits: %s%s%s%s%s%s%s\n", 1749 reg_val & (1<<29) ? "ED " : "", 1750 reg_val & (1<<28) ? "ET " : "", 1751 reg_val & (1<<26) ? "EE " : "", 1752 reg_val & (1<<25) ? "EB " : "", 1753 reg_val & (1<<24) ? "EI " : "", 1754 reg_val & (1<<23) ? "E1 " : "", 1755 reg_val & (1<<22) ? "E0 " : ""); 1756 } 1757 printk("IDX: 0x%08x\n", reg_val & ((1<<22)-1)); 1758 1759 #if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64) 1760 if (reg_val & (1<<22)) 1761 printk("DErrAddr0: 0x%0*lx\n", field, read_c0_derraddr0()); 1762 1763 if (reg_val & (1<<23)) 1764 printk("DErrAddr1: 0x%0*lx\n", field, read_c0_derraddr1()); 1765 #endif 1766 1767 panic("Can't handle the cache error!"); 1768 } 1769 1770 asmlinkage void do_ftlb(void) 1771 { 1772 const int field = 2 * sizeof(unsigned long); 1773 unsigned int reg_val; 1774 1775 /* For the moment, report the problem and hang. */ 1776 if ((cpu_has_mips_r2_r6) && 1777 (((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS) || 1778 ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_LOONGSON))) { 1779 pr_err("FTLB error exception, cp0_ecc=0x%08x:\n", 1780 read_c0_ecc()); 1781 pr_err("cp0_errorepc == %0*lx\n", field, read_c0_errorepc()); 1782 reg_val = read_c0_cacheerr(); 1783 pr_err("c0_cacheerr == %08x\n", reg_val); 1784 1785 if ((reg_val & 0xc0000000) == 0xc0000000) { 1786 pr_err("Decoded c0_cacheerr: FTLB parity error\n"); 1787 } else { 1788 pr_err("Decoded c0_cacheerr: %s cache fault in %s reference.\n", 1789 reg_val & (1<<30) ? "secondary" : "primary", 1790 reg_val & (1<<31) ? "data" : "insn"); 1791 } 1792 } else { 1793 pr_err("FTLB error exception\n"); 1794 } 1795 /* Just print the cacheerr bits for now */ 1796 cache_parity_error(); 1797 } 1798 1799 /* 1800 * SDBBP EJTAG debug exception handler. 1801 * We skip the instruction and return to the next instruction. 1802 */ 1803 void ejtag_exception_handler(struct pt_regs *regs) 1804 { 1805 const int field = 2 * sizeof(unsigned long); 1806 unsigned long depc, old_epc, old_ra; 1807 unsigned int debug; 1808 1809 printk(KERN_DEBUG "SDBBP EJTAG debug exception - not handled yet, just ignored!\n"); 1810 depc = read_c0_depc(); 1811 debug = read_c0_debug(); 1812 printk(KERN_DEBUG "c0_depc = %0*lx, DEBUG = %08x\n", field, depc, debug); 1813 if (debug & 0x80000000) { 1814 /* 1815 * In branch delay slot. 1816 * We cheat a little bit here and use EPC to calculate the 1817 * debug return address (DEPC). EPC is restored after the 1818 * calculation. 1819 */ 1820 old_epc = regs->cp0_epc; 1821 old_ra = regs->regs[31]; 1822 regs->cp0_epc = depc; 1823 compute_return_epc(regs); 1824 depc = regs->cp0_epc; 1825 regs->cp0_epc = old_epc; 1826 regs->regs[31] = old_ra; 1827 } else 1828 depc += 4; 1829 write_c0_depc(depc); 1830 1831 #if 0 1832 printk(KERN_DEBUG "\n\n----- Enable EJTAG single stepping ----\n\n"); 1833 write_c0_debug(debug | 0x100); 1834 #endif 1835 } 1836 1837 /* 1838 * NMI exception handler. 1839 * No lock; only written during early bootup by CPU 0. 1840 */ 1841 static RAW_NOTIFIER_HEAD(nmi_chain); 1842 1843 int register_nmi_notifier(struct notifier_block *nb) 1844 { 1845 return raw_notifier_chain_register(&nmi_chain, nb); 1846 } 1847 1848 void __noreturn nmi_exception_handler(struct pt_regs *regs) 1849 { 1850 char str[100]; 1851 1852 nmi_enter(); 1853 raw_notifier_call_chain(&nmi_chain, 0, regs); 1854 bust_spinlocks(1); 1855 snprintf(str, 100, "CPU%d NMI taken, CP0_EPC=%lx\n", 1856 smp_processor_id(), regs->cp0_epc); 1857 regs->cp0_epc = read_c0_errorepc(); 1858 die(str, regs); 1859 nmi_exit(); 1860 } 1861 1862 #define VECTORSPACING 0x100 /* for EI/VI mode */ 1863 1864 unsigned long ebase; 1865 EXPORT_SYMBOL_GPL(ebase); 1866 unsigned long exception_handlers[32]; 1867 unsigned long vi_handlers[64]; 1868 1869 void __init *set_except_vector(int n, void *addr) 1870 { 1871 unsigned long handler = (unsigned long) addr; 1872 unsigned long old_handler; 1873 1874 #ifdef CONFIG_CPU_MICROMIPS 1875 /* 1876 * Only the TLB handlers are cache aligned with an even 1877 * address. All other handlers are on an odd address and 1878 * require no modification. Otherwise, MIPS32 mode will 1879 * be entered when handling any TLB exceptions. That 1880 * would be bad...since we must stay in microMIPS mode. 1881 */ 1882 if (!(handler & 0x1)) 1883 handler |= 1; 1884 #endif 1885 old_handler = xchg(&exception_handlers[n], handler); 1886 1887 if (n == 0 && cpu_has_divec) { 1888 #ifdef CONFIG_CPU_MICROMIPS 1889 unsigned long jump_mask = ~((1 << 27) - 1); 1890 #else 1891 unsigned long jump_mask = ~((1 << 28) - 1); 1892 #endif 1893 u32 *buf = (u32 *)(ebase + 0x200); 1894 unsigned int k0 = 26; 1895 if ((handler & jump_mask) == ((ebase + 0x200) & jump_mask)) { 1896 uasm_i_j(&buf, handler & ~jump_mask); 1897 uasm_i_nop(&buf); 1898 } else { 1899 UASM_i_LA(&buf, k0, handler); 1900 uasm_i_jr(&buf, k0); 1901 uasm_i_nop(&buf); 1902 } 1903 local_flush_icache_range(ebase + 0x200, (unsigned long)buf); 1904 } 1905 return (void *)old_handler; 1906 } 1907 1908 static void do_default_vi(void) 1909 { 1910 show_regs(get_irq_regs()); 1911 panic("Caught unexpected vectored interrupt."); 1912 } 1913 1914 static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs) 1915 { 1916 unsigned long handler; 1917 unsigned long old_handler = vi_handlers[n]; 1918 int srssets = current_cpu_data.srsets; 1919 u16 *h; 1920 unsigned char *b; 1921 1922 BUG_ON(!cpu_has_veic && !cpu_has_vint); 1923 1924 if (addr == NULL) { 1925 handler = (unsigned long) do_default_vi; 1926 srs = 0; 1927 } else 1928 handler = (unsigned long) addr; 1929 vi_handlers[n] = handler; 1930 1931 b = (unsigned char *)(ebase + 0x200 + n*VECTORSPACING); 1932 1933 if (srs >= srssets) 1934 panic("Shadow register set %d not supported", srs); 1935 1936 if (cpu_has_veic) { 1937 if (board_bind_eic_interrupt) 1938 board_bind_eic_interrupt(n, srs); 1939 } else if (cpu_has_vint) { 1940 /* SRSMap is only defined if shadow sets are implemented */ 1941 if (srssets > 1) 1942 change_c0_srsmap(0xf << n*4, srs << n*4); 1943 } 1944 1945 if (srs == 0) { 1946 /* 1947 * If no shadow set is selected then use the default handler 1948 * that does normal register saving and standard interrupt exit 1949 */ 1950 extern char except_vec_vi, except_vec_vi_lui; 1951 extern char except_vec_vi_ori, except_vec_vi_end; 1952 extern char rollback_except_vec_vi; 1953 char *vec_start = using_rollback_handler() ? 1954 &rollback_except_vec_vi : &except_vec_vi; 1955 #if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN) 1956 const int lui_offset = &except_vec_vi_lui - vec_start + 2; 1957 const int ori_offset = &except_vec_vi_ori - vec_start + 2; 1958 #else 1959 const int lui_offset = &except_vec_vi_lui - vec_start; 1960 const int ori_offset = &except_vec_vi_ori - vec_start; 1961 #endif 1962 const int handler_len = &except_vec_vi_end - vec_start; 1963 1964 if (handler_len > VECTORSPACING) { 1965 /* 1966 * Sigh... panicing won't help as the console 1967 * is probably not configured :( 1968 */ 1969 panic("VECTORSPACING too small"); 1970 } 1971 1972 set_handler(((unsigned long)b - ebase), vec_start, 1973 #ifdef CONFIG_CPU_MICROMIPS 1974 (handler_len - 1)); 1975 #else 1976 handler_len); 1977 #endif 1978 h = (u16 *)(b + lui_offset); 1979 *h = (handler >> 16) & 0xffff; 1980 h = (u16 *)(b + ori_offset); 1981 *h = (handler & 0xffff); 1982 local_flush_icache_range((unsigned long)b, 1983 (unsigned long)(b+handler_len)); 1984 } 1985 else { 1986 /* 1987 * In other cases jump directly to the interrupt handler. It 1988 * is the handler's responsibility to save registers if required 1989 * (eg hi/lo) and return from the exception using "eret". 1990 */ 1991 u32 insn; 1992 1993 h = (u16 *)b; 1994 /* j handler */ 1995 #ifdef CONFIG_CPU_MICROMIPS 1996 insn = 0xd4000000 | (((u32)handler & 0x07ffffff) >> 1); 1997 #else 1998 insn = 0x08000000 | (((u32)handler & 0x0fffffff) >> 2); 1999 #endif 2000 h[0] = (insn >> 16) & 0xffff; 2001 h[1] = insn & 0xffff; 2002 h[2] = 0; 2003 h[3] = 0; 2004 local_flush_icache_range((unsigned long)b, 2005 (unsigned long)(b+8)); 2006 } 2007 2008 return (void *)old_handler; 2009 } 2010 2011 void *set_vi_handler(int n, vi_handler_t addr) 2012 { 2013 return set_vi_srs_handler(n, addr, 0); 2014 } 2015 2016 extern void tlb_init(void); 2017 2018 /* 2019 * Timer interrupt 2020 */ 2021 int cp0_compare_irq; 2022 EXPORT_SYMBOL_GPL(cp0_compare_irq); 2023 int cp0_compare_irq_shift; 2024 2025 /* 2026 * Performance counter IRQ or -1 if shared with timer 2027 */ 2028 int cp0_perfcount_irq; 2029 EXPORT_SYMBOL_GPL(cp0_perfcount_irq); 2030 2031 /* 2032 * Fast debug channel IRQ or -1 if not present 2033 */ 2034 int cp0_fdc_irq; 2035 EXPORT_SYMBOL_GPL(cp0_fdc_irq); 2036 2037 static int noulri; 2038 2039 static int __init ulri_disable(char *s) 2040 { 2041 pr_info("Disabling ulri\n"); 2042 noulri = 1; 2043 2044 return 1; 2045 } 2046 __setup("noulri", ulri_disable); 2047 2048 /* configure STATUS register */ 2049 static void configure_status(void) 2050 { 2051 /* 2052 * Disable coprocessors and select 32-bit or 64-bit addressing 2053 * and the 16/32 or 32/32 FPR register model. Reset the BEV 2054 * flag that some firmware may have left set and the TS bit (for 2055 * IP27). Set XX for ISA IV code to work. 2056 */ 2057 unsigned int status_set = ST0_CU0; 2058 #ifdef CONFIG_64BIT 2059 status_set |= ST0_FR|ST0_KX|ST0_SX|ST0_UX; 2060 #endif 2061 if (current_cpu_data.isa_level & MIPS_CPU_ISA_IV) 2062 status_set |= ST0_XX; 2063 if (cpu_has_dsp) 2064 status_set |= ST0_MX; 2065 2066 change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX, 2067 status_set); 2068 } 2069 2070 unsigned int hwrena; 2071 EXPORT_SYMBOL_GPL(hwrena); 2072 2073 /* configure HWRENA register */ 2074 static void configure_hwrena(void) 2075 { 2076 hwrena = cpu_hwrena_impl_bits; 2077 2078 if (cpu_has_mips_r2_r6) 2079 hwrena |= MIPS_HWRENA_CPUNUM | 2080 MIPS_HWRENA_SYNCISTEP | 2081 MIPS_HWRENA_CC | 2082 MIPS_HWRENA_CCRES; 2083 2084 if (!noulri && cpu_has_userlocal) 2085 hwrena |= MIPS_HWRENA_ULR; 2086 2087 if (hwrena) 2088 write_c0_hwrena(hwrena); 2089 } 2090 2091 static void configure_exception_vector(void) 2092 { 2093 if (cpu_has_veic || cpu_has_vint) { 2094 unsigned long sr = set_c0_status(ST0_BEV); 2095 write_c0_ebase(ebase); 2096 write_c0_status(sr); 2097 /* Setting vector spacing enables EI/VI mode */ 2098 change_c0_intctl(0x3e0, VECTORSPACING); 2099 } 2100 if (cpu_has_divec) { 2101 if (cpu_has_mipsmt) { 2102 unsigned int vpflags = dvpe(); 2103 set_c0_cause(CAUSEF_IV); 2104 evpe(vpflags); 2105 } else 2106 set_c0_cause(CAUSEF_IV); 2107 } 2108 } 2109 2110 void per_cpu_trap_init(bool is_boot_cpu) 2111 { 2112 unsigned int cpu = smp_processor_id(); 2113 2114 configure_status(); 2115 configure_hwrena(); 2116 2117 configure_exception_vector(); 2118 2119 /* 2120 * Before R2 both interrupt numbers were fixed to 7, so on R2 only: 2121 * 2122 * o read IntCtl.IPTI to determine the timer interrupt 2123 * o read IntCtl.IPPCI to determine the performance counter interrupt 2124 * o read IntCtl.IPFDC to determine the fast debug channel interrupt 2125 */ 2126 if (cpu_has_mips_r2_r6) { 2127 /* 2128 * We shouldn't trust a secondary core has a sane EBASE register 2129 * so use the one calculated by the boot CPU. 2130 */ 2131 if (!is_boot_cpu) 2132 write_c0_ebase(ebase); 2133 2134 cp0_compare_irq_shift = CAUSEB_TI - CAUSEB_IP; 2135 cp0_compare_irq = (read_c0_intctl() >> INTCTLB_IPTI) & 7; 2136 cp0_perfcount_irq = (read_c0_intctl() >> INTCTLB_IPPCI) & 7; 2137 cp0_fdc_irq = (read_c0_intctl() >> INTCTLB_IPFDC) & 7; 2138 if (!cp0_fdc_irq) 2139 cp0_fdc_irq = -1; 2140 2141 } else { 2142 cp0_compare_irq = CP0_LEGACY_COMPARE_IRQ; 2143 cp0_compare_irq_shift = CP0_LEGACY_PERFCNT_IRQ; 2144 cp0_perfcount_irq = -1; 2145 cp0_fdc_irq = -1; 2146 } 2147 2148 if (!cpu_data[cpu].asid_cache) 2149 cpu_data[cpu].asid_cache = asid_first_version(cpu); 2150 2151 atomic_inc(&init_mm.mm_count); 2152 current->active_mm = &init_mm; 2153 BUG_ON(current->mm); 2154 enter_lazy_tlb(&init_mm, current); 2155 2156 /* Boot CPU's cache setup in setup_arch(). */ 2157 if (!is_boot_cpu) 2158 cpu_cache_init(); 2159 tlb_init(); 2160 TLBMISS_HANDLER_SETUP(); 2161 } 2162 2163 /* Install CPU exception handler */ 2164 void set_handler(unsigned long offset, void *addr, unsigned long size) 2165 { 2166 #ifdef CONFIG_CPU_MICROMIPS 2167 memcpy((void *)(ebase + offset), ((unsigned char *)addr - 1), size); 2168 #else 2169 memcpy((void *)(ebase + offset), addr, size); 2170 #endif 2171 local_flush_icache_range(ebase + offset, ebase + offset + size); 2172 } 2173 2174 static char panic_null_cerr[] = 2175 "Trying to set NULL cache error exception handler"; 2176 2177 /* 2178 * Install uncached CPU exception handler. 2179 * This is suitable only for the cache error exception which is the only 2180 * exception handler that is being run uncached. 2181 */ 2182 void set_uncached_handler(unsigned long offset, void *addr, 2183 unsigned long size) 2184 { 2185 unsigned long uncached_ebase = CKSEG1ADDR(ebase); 2186 2187 if (!addr) 2188 panic(panic_null_cerr); 2189 2190 memcpy((void *)(uncached_ebase + offset), addr, size); 2191 } 2192 2193 static int __initdata rdhwr_noopt; 2194 static int __init set_rdhwr_noopt(char *str) 2195 { 2196 rdhwr_noopt = 1; 2197 return 1; 2198 } 2199 2200 __setup("rdhwr_noopt", set_rdhwr_noopt); 2201 2202 void __init trap_init(void) 2203 { 2204 extern char except_vec3_generic; 2205 extern char except_vec4; 2206 extern char except_vec3_r4000; 2207 unsigned long i; 2208 2209 check_wait(); 2210 2211 if (cpu_has_veic || cpu_has_vint) { 2212 unsigned long size = 0x200 + VECTORSPACING*64; 2213 ebase = (unsigned long) 2214 __alloc_bootmem(size, 1 << fls(size), 0); 2215 } else { 2216 ebase = CAC_BASE; 2217 2218 if (cpu_has_mips_r2_r6) 2219 ebase += (read_c0_ebase() & 0x3ffff000); 2220 } 2221 2222 if (cpu_has_mmips) { 2223 unsigned int config3 = read_c0_config3(); 2224 2225 if (IS_ENABLED(CONFIG_CPU_MICROMIPS)) 2226 write_c0_config3(config3 | MIPS_CONF3_ISA_OE); 2227 else 2228 write_c0_config3(config3 & ~MIPS_CONF3_ISA_OE); 2229 } 2230 2231 if (board_ebase_setup) 2232 board_ebase_setup(); 2233 per_cpu_trap_init(true); 2234 2235 /* 2236 * Copy the generic exception handlers to their final destination. 2237 * This will be overridden later as suitable for a particular 2238 * configuration. 2239 */ 2240 set_handler(0x180, &except_vec3_generic, 0x80); 2241 2242 /* 2243 * Setup default vectors 2244 */ 2245 for (i = 0; i <= 31; i++) 2246 set_except_vector(i, handle_reserved); 2247 2248 /* 2249 * Copy the EJTAG debug exception vector handler code to it's final 2250 * destination. 2251 */ 2252 if (cpu_has_ejtag && board_ejtag_handler_setup) 2253 board_ejtag_handler_setup(); 2254 2255 /* 2256 * Only some CPUs have the watch exceptions. 2257 */ 2258 if (cpu_has_watch) 2259 set_except_vector(EXCCODE_WATCH, handle_watch); 2260 2261 /* 2262 * Initialise interrupt handlers 2263 */ 2264 if (cpu_has_veic || cpu_has_vint) { 2265 int nvec = cpu_has_veic ? 64 : 8; 2266 for (i = 0; i < nvec; i++) 2267 set_vi_handler(i, NULL); 2268 } 2269 else if (cpu_has_divec) 2270 set_handler(0x200, &except_vec4, 0x8); 2271 2272 /* 2273 * Some CPUs can enable/disable for cache parity detection, but does 2274 * it different ways. 2275 */ 2276 parity_protection_init(); 2277 2278 /* 2279 * The Data Bus Errors / Instruction Bus Errors are signaled 2280 * by external hardware. Therefore these two exceptions 2281 * may have board specific handlers. 2282 */ 2283 if (board_be_init) 2284 board_be_init(); 2285 2286 set_except_vector(EXCCODE_INT, using_rollback_handler() ? 2287 rollback_handle_int : handle_int); 2288 set_except_vector(EXCCODE_MOD, handle_tlbm); 2289 set_except_vector(EXCCODE_TLBL, handle_tlbl); 2290 set_except_vector(EXCCODE_TLBS, handle_tlbs); 2291 2292 set_except_vector(EXCCODE_ADEL, handle_adel); 2293 set_except_vector(EXCCODE_ADES, handle_ades); 2294 2295 set_except_vector(EXCCODE_IBE, handle_ibe); 2296 set_except_vector(EXCCODE_DBE, handle_dbe); 2297 2298 set_except_vector(EXCCODE_SYS, handle_sys); 2299 set_except_vector(EXCCODE_BP, handle_bp); 2300 set_except_vector(EXCCODE_RI, rdhwr_noopt ? handle_ri : 2301 (cpu_has_vtag_icache ? 2302 handle_ri_rdhwr_vivt : handle_ri_rdhwr)); 2303 set_except_vector(EXCCODE_CPU, handle_cpu); 2304 set_except_vector(EXCCODE_OV, handle_ov); 2305 set_except_vector(EXCCODE_TR, handle_tr); 2306 set_except_vector(EXCCODE_MSAFPE, handle_msa_fpe); 2307 2308 if (current_cpu_type() == CPU_R6000 || 2309 current_cpu_type() == CPU_R6000A) { 2310 /* 2311 * The R6000 is the only R-series CPU that features a machine 2312 * check exception (similar to the R4000 cache error) and 2313 * unaligned ldc1/sdc1 exception. The handlers have not been 2314 * written yet. Well, anyway there is no R6000 machine on the 2315 * current list of targets for Linux/MIPS. 2316 * (Duh, crap, there is someone with a triple R6k machine) 2317 */ 2318 //set_except_vector(14, handle_mc); 2319 //set_except_vector(15, handle_ndc); 2320 } 2321 2322 2323 if (board_nmi_handler_setup) 2324 board_nmi_handler_setup(); 2325 2326 if (cpu_has_fpu && !cpu_has_nofpuex) 2327 set_except_vector(EXCCODE_FPE, handle_fpe); 2328 2329 set_except_vector(MIPS_EXCCODE_TLBPAR, handle_ftlb); 2330 2331 if (cpu_has_rixiex) { 2332 set_except_vector(EXCCODE_TLBRI, tlb_do_page_fault_0); 2333 set_except_vector(EXCCODE_TLBXI, tlb_do_page_fault_0); 2334 } 2335 2336 set_except_vector(EXCCODE_MSADIS, handle_msa); 2337 set_except_vector(EXCCODE_MDMX, handle_mdmx); 2338 2339 if (cpu_has_mcheck) 2340 set_except_vector(EXCCODE_MCHECK, handle_mcheck); 2341 2342 if (cpu_has_mipsmt) 2343 set_except_vector(EXCCODE_THREAD, handle_mt); 2344 2345 set_except_vector(EXCCODE_DSPDIS, handle_dsp); 2346 2347 if (board_cache_error_setup) 2348 board_cache_error_setup(); 2349 2350 if (cpu_has_vce) 2351 /* Special exception: R4[04]00 uses also the divec space. */ 2352 set_handler(0x180, &except_vec3_r4000, 0x100); 2353 else if (cpu_has_4kex) 2354 set_handler(0x180, &except_vec3_generic, 0x80); 2355 else 2356 set_handler(0x080, &except_vec3_generic, 0x80); 2357 2358 local_flush_icache_range(ebase, ebase + 0x400); 2359 2360 sort_extable(__start___dbe_table, __stop___dbe_table); 2361 2362 cu2_notifier(default_cu2_call, 0x80000000); /* Run last */ 2363 } 2364 2365 static int trap_pm_notifier(struct notifier_block *self, unsigned long cmd, 2366 void *v) 2367 { 2368 switch (cmd) { 2369 case CPU_PM_ENTER_FAILED: 2370 case CPU_PM_EXIT: 2371 configure_status(); 2372 configure_hwrena(); 2373 configure_exception_vector(); 2374 2375 /* Restore register with CPU number for TLB handlers */ 2376 TLBMISS_HANDLER_RESTORE(); 2377 2378 break; 2379 } 2380 2381 return NOTIFY_OK; 2382 } 2383 2384 static struct notifier_block trap_pm_notifier_block = { 2385 .notifier_call = trap_pm_notifier, 2386 }; 2387 2388 static int __init trap_pm_init(void) 2389 { 2390 return cpu_pm_register_notifier(&trap_pm_notifier_block); 2391 } 2392 arch_initcall(trap_pm_init); 2393