1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 1994 - 1999, 2000, 01, 06 Ralf Baechle 7 * Copyright (C) 1995, 1996 Paul M. Antoine 8 * Copyright (C) 1998 Ulf Carlsson 9 * Copyright (C) 1999 Silicon Graphics, Inc. 10 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com 11 * Copyright (C) 2002, 2003, 2004, 2005, 2007 Maciej W. Rozycki 12 * Copyright (C) 2000, 2001, 2012 MIPS Technologies, Inc. All rights reserved. 13 * Copyright (C) 2014, Imagination Technologies Ltd. 14 */ 15 #include <linux/bitops.h> 16 #include <linux/bug.h> 17 #include <linux/compiler.h> 18 #include <linux/context_tracking.h> 19 #include <linux/cpu_pm.h> 20 #include <linux/kexec.h> 21 #include <linux/init.h> 22 #include <linux/kernel.h> 23 #include <linux/module.h> 24 #include <linux/mm.h> 25 #include <linux/sched.h> 26 #include <linux/smp.h> 27 #include <linux/spinlock.h> 28 #include <linux/kallsyms.h> 29 #include <linux/bootmem.h> 30 #include <linux/interrupt.h> 31 #include <linux/ptrace.h> 32 #include <linux/kgdb.h> 33 #include <linux/kdebug.h> 34 #include <linux/kprobes.h> 35 #include <linux/notifier.h> 36 #include <linux/kdb.h> 37 #include <linux/irq.h> 38 #include <linux/perf_event.h> 39 40 #include <asm/bootinfo.h> 41 #include <asm/branch.h> 42 #include <asm/break.h> 43 #include <asm/cop2.h> 44 #include <asm/cpu.h> 45 #include <asm/cpu-type.h> 46 #include <asm/dsp.h> 47 #include <asm/fpu.h> 48 #include <asm/fpu_emulator.h> 49 #include <asm/idle.h> 50 #include <asm/mips-r2-to-r6-emul.h> 51 #include <asm/mipsregs.h> 52 #include <asm/mipsmtregs.h> 53 #include <asm/module.h> 54 #include <asm/msa.h> 55 #include <asm/pgtable.h> 56 #include <asm/ptrace.h> 57 #include <asm/sections.h> 58 #include <asm/tlbdebug.h> 59 #include <asm/traps.h> 60 #include <asm/uaccess.h> 61 #include <asm/watch.h> 62 #include <asm/mmu_context.h> 63 #include <asm/types.h> 64 #include <asm/stacktrace.h> 65 #include <asm/uasm.h> 66 67 extern void check_wait(void); 68 extern asmlinkage void rollback_handle_int(void); 69 extern asmlinkage void handle_int(void); 70 extern u32 handle_tlbl[]; 71 extern u32 handle_tlbs[]; 72 extern u32 handle_tlbm[]; 73 extern asmlinkage void handle_adel(void); 74 extern asmlinkage void handle_ades(void); 75 extern asmlinkage void handle_ibe(void); 76 extern asmlinkage void handle_dbe(void); 77 extern asmlinkage void handle_sys(void); 78 extern asmlinkage void handle_bp(void); 79 extern asmlinkage void handle_ri(void); 80 extern asmlinkage void handle_ri_rdhwr_vivt(void); 81 extern asmlinkage void handle_ri_rdhwr(void); 82 extern asmlinkage void handle_cpu(void); 83 extern asmlinkage void handle_ov(void); 84 extern asmlinkage void handle_tr(void); 85 extern asmlinkage void handle_msa_fpe(void); 86 extern asmlinkage void handle_fpe(void); 87 extern asmlinkage void handle_ftlb(void); 88 extern asmlinkage void handle_msa(void); 89 extern asmlinkage void handle_mdmx(void); 90 extern asmlinkage void handle_watch(void); 91 extern asmlinkage void handle_mt(void); 92 extern asmlinkage void handle_dsp(void); 93 extern asmlinkage void handle_mcheck(void); 94 extern asmlinkage void handle_reserved(void); 95 extern void tlb_do_page_fault_0(void); 96 97 void (*board_be_init)(void); 98 int (*board_be_handler)(struct pt_regs *regs, int is_fixup); 99 void (*board_nmi_handler_setup)(void); 100 void (*board_ejtag_handler_setup)(void); 101 void (*board_bind_eic_interrupt)(int irq, int regset); 102 void (*board_ebase_setup)(void); 103 void(*board_cache_error_setup)(void); 104 105 static void show_raw_backtrace(unsigned long reg29) 106 { 107 unsigned long *sp = (unsigned long *)(reg29 & ~3); 108 unsigned long addr; 109 110 printk("Call Trace:"); 111 #ifdef CONFIG_KALLSYMS 112 printk("\n"); 113 #endif 114 while (!kstack_end(sp)) { 115 unsigned long __user *p = 116 (unsigned long __user *)(unsigned long)sp++; 117 if (__get_user(addr, p)) { 118 printk(" (Bad stack address)"); 119 break; 120 } 121 if (__kernel_text_address(addr)) 122 print_ip_sym(addr); 123 } 124 printk("\n"); 125 } 126 127 #ifdef CONFIG_KALLSYMS 128 int raw_show_trace; 129 static int __init set_raw_show_trace(char *str) 130 { 131 raw_show_trace = 1; 132 return 1; 133 } 134 __setup("raw_show_trace", set_raw_show_trace); 135 #endif 136 137 static void show_backtrace(struct task_struct *task, const struct pt_regs *regs) 138 { 139 unsigned long sp = regs->regs[29]; 140 unsigned long ra = regs->regs[31]; 141 unsigned long pc = regs->cp0_epc; 142 143 if (!task) 144 task = current; 145 146 if (raw_show_trace || !__kernel_text_address(pc)) { 147 show_raw_backtrace(sp); 148 return; 149 } 150 printk("Call Trace:\n"); 151 do { 152 print_ip_sym(pc); 153 pc = unwind_stack(task, &sp, pc, &ra); 154 } while (pc); 155 printk("\n"); 156 } 157 158 /* 159 * This routine abuses get_user()/put_user() to reference pointers 160 * with at least a bit of error checking ... 161 */ 162 static void show_stacktrace(struct task_struct *task, 163 const struct pt_regs *regs) 164 { 165 const int field = 2 * sizeof(unsigned long); 166 long stackdata; 167 int i; 168 unsigned long __user *sp = (unsigned long __user *)regs->regs[29]; 169 170 printk("Stack :"); 171 i = 0; 172 while ((unsigned long) sp & (PAGE_SIZE - 1)) { 173 if (i && ((i % (64 / field)) == 0)) 174 printk("\n "); 175 if (i > 39) { 176 printk(" ..."); 177 break; 178 } 179 180 if (__get_user(stackdata, sp++)) { 181 printk(" (Bad stack address)"); 182 break; 183 } 184 185 printk(" %0*lx", field, stackdata); 186 i++; 187 } 188 printk("\n"); 189 show_backtrace(task, regs); 190 } 191 192 void show_stack(struct task_struct *task, unsigned long *sp) 193 { 194 struct pt_regs regs; 195 if (sp) { 196 regs.regs[29] = (unsigned long)sp; 197 regs.regs[31] = 0; 198 regs.cp0_epc = 0; 199 } else { 200 if (task && task != current) { 201 regs.regs[29] = task->thread.reg29; 202 regs.regs[31] = 0; 203 regs.cp0_epc = task->thread.reg31; 204 #ifdef CONFIG_KGDB_KDB 205 } else if (atomic_read(&kgdb_active) != -1 && 206 kdb_current_regs) { 207 memcpy(®s, kdb_current_regs, sizeof(regs)); 208 #endif /* CONFIG_KGDB_KDB */ 209 } else { 210 prepare_frametrace(®s); 211 } 212 } 213 show_stacktrace(task, ®s); 214 } 215 216 static void show_code(unsigned int __user *pc) 217 { 218 long i; 219 unsigned short __user *pc16 = NULL; 220 221 printk("\nCode:"); 222 223 if ((unsigned long)pc & 1) 224 pc16 = (unsigned short __user *)((unsigned long)pc & ~1); 225 for(i = -3 ; i < 6 ; i++) { 226 unsigned int insn; 227 if (pc16 ? __get_user(insn, pc16 + i) : __get_user(insn, pc + i)) { 228 printk(" (Bad address in epc)\n"); 229 break; 230 } 231 printk("%c%0*x%c", (i?' ':'<'), pc16 ? 4 : 8, insn, (i?' ':'>')); 232 } 233 } 234 235 static void __show_regs(const struct pt_regs *regs) 236 { 237 const int field = 2 * sizeof(unsigned long); 238 unsigned int cause = regs->cp0_cause; 239 int i; 240 241 show_regs_print_info(KERN_DEFAULT); 242 243 /* 244 * Saved main processor registers 245 */ 246 for (i = 0; i < 32; ) { 247 if ((i % 4) == 0) 248 printk("$%2d :", i); 249 if (i == 0) 250 printk(" %0*lx", field, 0UL); 251 else if (i == 26 || i == 27) 252 printk(" %*s", field, ""); 253 else 254 printk(" %0*lx", field, regs->regs[i]); 255 256 i++; 257 if ((i % 4) == 0) 258 printk("\n"); 259 } 260 261 #ifdef CONFIG_CPU_HAS_SMARTMIPS 262 printk("Acx : %0*lx\n", field, regs->acx); 263 #endif 264 printk("Hi : %0*lx\n", field, regs->hi); 265 printk("Lo : %0*lx\n", field, regs->lo); 266 267 /* 268 * Saved cp0 registers 269 */ 270 printk("epc : %0*lx %pS\n", field, regs->cp0_epc, 271 (void *) regs->cp0_epc); 272 printk("ra : %0*lx %pS\n", field, regs->regs[31], 273 (void *) regs->regs[31]); 274 275 printk("Status: %08x ", (uint32_t) regs->cp0_status); 276 277 if (cpu_has_3kex) { 278 if (regs->cp0_status & ST0_KUO) 279 printk("KUo "); 280 if (regs->cp0_status & ST0_IEO) 281 printk("IEo "); 282 if (regs->cp0_status & ST0_KUP) 283 printk("KUp "); 284 if (regs->cp0_status & ST0_IEP) 285 printk("IEp "); 286 if (regs->cp0_status & ST0_KUC) 287 printk("KUc "); 288 if (regs->cp0_status & ST0_IEC) 289 printk("IEc "); 290 } else if (cpu_has_4kex) { 291 if (regs->cp0_status & ST0_KX) 292 printk("KX "); 293 if (regs->cp0_status & ST0_SX) 294 printk("SX "); 295 if (regs->cp0_status & ST0_UX) 296 printk("UX "); 297 switch (regs->cp0_status & ST0_KSU) { 298 case KSU_USER: 299 printk("USER "); 300 break; 301 case KSU_SUPERVISOR: 302 printk("SUPERVISOR "); 303 break; 304 case KSU_KERNEL: 305 printk("KERNEL "); 306 break; 307 default: 308 printk("BAD_MODE "); 309 break; 310 } 311 if (regs->cp0_status & ST0_ERL) 312 printk("ERL "); 313 if (regs->cp0_status & ST0_EXL) 314 printk("EXL "); 315 if (regs->cp0_status & ST0_IE) 316 printk("IE "); 317 } 318 printk("\n"); 319 320 printk("Cause : %08x\n", cause); 321 322 cause = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE; 323 if (1 <= cause && cause <= 5) 324 printk("BadVA : %0*lx\n", field, regs->cp0_badvaddr); 325 326 printk("PrId : %08x (%s)\n", read_c0_prid(), 327 cpu_name_string()); 328 } 329 330 /* 331 * FIXME: really the generic show_regs should take a const pointer argument. 332 */ 333 void show_regs(struct pt_regs *regs) 334 { 335 __show_regs((struct pt_regs *)regs); 336 } 337 338 void show_registers(struct pt_regs *regs) 339 { 340 const int field = 2 * sizeof(unsigned long); 341 mm_segment_t old_fs = get_fs(); 342 343 __show_regs(regs); 344 print_modules(); 345 printk("Process %s (pid: %d, threadinfo=%p, task=%p, tls=%0*lx)\n", 346 current->comm, current->pid, current_thread_info(), current, 347 field, current_thread_info()->tp_value); 348 if (cpu_has_userlocal) { 349 unsigned long tls; 350 351 tls = read_c0_userlocal(); 352 if (tls != current_thread_info()->tp_value) 353 printk("*HwTLS: %0*lx\n", field, tls); 354 } 355 356 if (!user_mode(regs)) 357 /* Necessary for getting the correct stack content */ 358 set_fs(KERNEL_DS); 359 show_stacktrace(current, regs); 360 show_code((unsigned int __user *) regs->cp0_epc); 361 printk("\n"); 362 set_fs(old_fs); 363 } 364 365 static int regs_to_trapnr(struct pt_regs *regs) 366 { 367 return (regs->cp0_cause >> 2) & 0x1f; 368 } 369 370 static DEFINE_RAW_SPINLOCK(die_lock); 371 372 void __noreturn die(const char *str, struct pt_regs *regs) 373 { 374 static int die_counter; 375 int sig = SIGSEGV; 376 377 oops_enter(); 378 379 if (notify_die(DIE_OOPS, str, regs, 0, regs_to_trapnr(regs), 380 SIGSEGV) == NOTIFY_STOP) 381 sig = 0; 382 383 console_verbose(); 384 raw_spin_lock_irq(&die_lock); 385 bust_spinlocks(1); 386 387 printk("%s[#%d]:\n", str, ++die_counter); 388 show_registers(regs); 389 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); 390 raw_spin_unlock_irq(&die_lock); 391 392 oops_exit(); 393 394 if (in_interrupt()) 395 panic("Fatal exception in interrupt"); 396 397 if (panic_on_oops) { 398 printk(KERN_EMERG "Fatal exception: panic in 5 seconds"); 399 ssleep(5); 400 panic("Fatal exception"); 401 } 402 403 if (regs && kexec_should_crash(current)) 404 crash_kexec(regs); 405 406 do_exit(sig); 407 } 408 409 extern struct exception_table_entry __start___dbe_table[]; 410 extern struct exception_table_entry __stop___dbe_table[]; 411 412 __asm__( 413 " .section __dbe_table, \"a\"\n" 414 " .previous \n"); 415 416 /* Given an address, look for it in the exception tables. */ 417 static const struct exception_table_entry *search_dbe_tables(unsigned long addr) 418 { 419 const struct exception_table_entry *e; 420 421 e = search_extable(__start___dbe_table, __stop___dbe_table - 1, addr); 422 if (!e) 423 e = search_module_dbetables(addr); 424 return e; 425 } 426 427 asmlinkage void do_be(struct pt_regs *regs) 428 { 429 const int field = 2 * sizeof(unsigned long); 430 const struct exception_table_entry *fixup = NULL; 431 int data = regs->cp0_cause & 4; 432 int action = MIPS_BE_FATAL; 433 enum ctx_state prev_state; 434 435 prev_state = exception_enter(); 436 /* XXX For now. Fixme, this searches the wrong table ... */ 437 if (data && !user_mode(regs)) 438 fixup = search_dbe_tables(exception_epc(regs)); 439 440 if (fixup) 441 action = MIPS_BE_FIXUP; 442 443 if (board_be_handler) 444 action = board_be_handler(regs, fixup != NULL); 445 446 switch (action) { 447 case MIPS_BE_DISCARD: 448 goto out; 449 case MIPS_BE_FIXUP: 450 if (fixup) { 451 regs->cp0_epc = fixup->nextinsn; 452 goto out; 453 } 454 break; 455 default: 456 break; 457 } 458 459 /* 460 * Assume it would be too dangerous to continue ... 461 */ 462 printk(KERN_ALERT "%s bus error, epc == %0*lx, ra == %0*lx\n", 463 data ? "Data" : "Instruction", 464 field, regs->cp0_epc, field, regs->regs[31]); 465 if (notify_die(DIE_OOPS, "bus error", regs, 0, regs_to_trapnr(regs), 466 SIGBUS) == NOTIFY_STOP) 467 goto out; 468 469 die_if_kernel("Oops", regs); 470 force_sig(SIGBUS, current); 471 472 out: 473 exception_exit(prev_state); 474 } 475 476 /* 477 * ll/sc, rdhwr, sync emulation 478 */ 479 480 #define OPCODE 0xfc000000 481 #define BASE 0x03e00000 482 #define RT 0x001f0000 483 #define OFFSET 0x0000ffff 484 #define LL 0xc0000000 485 #define SC 0xe0000000 486 #define SPEC0 0x00000000 487 #define SPEC3 0x7c000000 488 #define RD 0x0000f800 489 #define FUNC 0x0000003f 490 #define SYNC 0x0000000f 491 #define RDHWR 0x0000003b 492 493 /* microMIPS definitions */ 494 #define MM_POOL32A_FUNC 0xfc00ffff 495 #define MM_RDHWR 0x00006b3c 496 #define MM_RS 0x001f0000 497 #define MM_RT 0x03e00000 498 499 /* 500 * The ll_bit is cleared by r*_switch.S 501 */ 502 503 unsigned int ll_bit; 504 struct task_struct *ll_task; 505 506 static inline int simulate_ll(struct pt_regs *regs, unsigned int opcode) 507 { 508 unsigned long value, __user *vaddr; 509 long offset; 510 511 /* 512 * analyse the ll instruction that just caused a ri exception 513 * and put the referenced address to addr. 514 */ 515 516 /* sign extend offset */ 517 offset = opcode & OFFSET; 518 offset <<= 16; 519 offset >>= 16; 520 521 vaddr = (unsigned long __user *) 522 ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset); 523 524 if ((unsigned long)vaddr & 3) 525 return SIGBUS; 526 if (get_user(value, vaddr)) 527 return SIGSEGV; 528 529 preempt_disable(); 530 531 if (ll_task == NULL || ll_task == current) { 532 ll_bit = 1; 533 } else { 534 ll_bit = 0; 535 } 536 ll_task = current; 537 538 preempt_enable(); 539 540 regs->regs[(opcode & RT) >> 16] = value; 541 542 return 0; 543 } 544 545 static inline int simulate_sc(struct pt_regs *regs, unsigned int opcode) 546 { 547 unsigned long __user *vaddr; 548 unsigned long reg; 549 long offset; 550 551 /* 552 * analyse the sc instruction that just caused a ri exception 553 * and put the referenced address to addr. 554 */ 555 556 /* sign extend offset */ 557 offset = opcode & OFFSET; 558 offset <<= 16; 559 offset >>= 16; 560 561 vaddr = (unsigned long __user *) 562 ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset); 563 reg = (opcode & RT) >> 16; 564 565 if ((unsigned long)vaddr & 3) 566 return SIGBUS; 567 568 preempt_disable(); 569 570 if (ll_bit == 0 || ll_task != current) { 571 regs->regs[reg] = 0; 572 preempt_enable(); 573 return 0; 574 } 575 576 preempt_enable(); 577 578 if (put_user(regs->regs[reg], vaddr)) 579 return SIGSEGV; 580 581 regs->regs[reg] = 1; 582 583 return 0; 584 } 585 586 /* 587 * ll uses the opcode of lwc0 and sc uses the opcode of swc0. That is both 588 * opcodes are supposed to result in coprocessor unusable exceptions if 589 * executed on ll/sc-less processors. That's the theory. In practice a 590 * few processors such as NEC's VR4100 throw reserved instruction exceptions 591 * instead, so we're doing the emulation thing in both exception handlers. 592 */ 593 static int simulate_llsc(struct pt_regs *regs, unsigned int opcode) 594 { 595 if ((opcode & OPCODE) == LL) { 596 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 597 1, regs, 0); 598 return simulate_ll(regs, opcode); 599 } 600 if ((opcode & OPCODE) == SC) { 601 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 602 1, regs, 0); 603 return simulate_sc(regs, opcode); 604 } 605 606 return -1; /* Must be something else ... */ 607 } 608 609 /* 610 * Simulate trapping 'rdhwr' instructions to provide user accessible 611 * registers not implemented in hardware. 612 */ 613 static int simulate_rdhwr(struct pt_regs *regs, int rd, int rt) 614 { 615 struct thread_info *ti = task_thread_info(current); 616 617 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 618 1, regs, 0); 619 switch (rd) { 620 case 0: /* CPU number */ 621 regs->regs[rt] = smp_processor_id(); 622 return 0; 623 case 1: /* SYNCI length */ 624 regs->regs[rt] = min(current_cpu_data.dcache.linesz, 625 current_cpu_data.icache.linesz); 626 return 0; 627 case 2: /* Read count register */ 628 regs->regs[rt] = read_c0_count(); 629 return 0; 630 case 3: /* Count register resolution */ 631 switch (current_cpu_type()) { 632 case CPU_20KC: 633 case CPU_25KF: 634 regs->regs[rt] = 1; 635 break; 636 default: 637 regs->regs[rt] = 2; 638 } 639 return 0; 640 case 29: 641 regs->regs[rt] = ti->tp_value; 642 return 0; 643 default: 644 return -1; 645 } 646 } 647 648 static int simulate_rdhwr_normal(struct pt_regs *regs, unsigned int opcode) 649 { 650 if ((opcode & OPCODE) == SPEC3 && (opcode & FUNC) == RDHWR) { 651 int rd = (opcode & RD) >> 11; 652 int rt = (opcode & RT) >> 16; 653 654 simulate_rdhwr(regs, rd, rt); 655 return 0; 656 } 657 658 /* Not ours. */ 659 return -1; 660 } 661 662 static int simulate_rdhwr_mm(struct pt_regs *regs, unsigned short opcode) 663 { 664 if ((opcode & MM_POOL32A_FUNC) == MM_RDHWR) { 665 int rd = (opcode & MM_RS) >> 16; 666 int rt = (opcode & MM_RT) >> 21; 667 simulate_rdhwr(regs, rd, rt); 668 return 0; 669 } 670 671 /* Not ours. */ 672 return -1; 673 } 674 675 static int simulate_sync(struct pt_regs *regs, unsigned int opcode) 676 { 677 if ((opcode & OPCODE) == SPEC0 && (opcode & FUNC) == SYNC) { 678 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 679 1, regs, 0); 680 return 0; 681 } 682 683 return -1; /* Must be something else ... */ 684 } 685 686 asmlinkage void do_ov(struct pt_regs *regs) 687 { 688 enum ctx_state prev_state; 689 siginfo_t info; 690 691 prev_state = exception_enter(); 692 die_if_kernel("Integer overflow", regs); 693 694 info.si_code = FPE_INTOVF; 695 info.si_signo = SIGFPE; 696 info.si_errno = 0; 697 info.si_addr = (void __user *) regs->cp0_epc; 698 force_sig_info(SIGFPE, &info, current); 699 exception_exit(prev_state); 700 } 701 702 int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcr31) 703 { 704 struct siginfo si = { 0 }; 705 706 switch (sig) { 707 case 0: 708 return 0; 709 710 case SIGFPE: 711 si.si_addr = fault_addr; 712 si.si_signo = sig; 713 /* 714 * Inexact can happen together with Overflow or Underflow. 715 * Respect the mask to deliver the correct exception. 716 */ 717 fcr31 &= (fcr31 & FPU_CSR_ALL_E) << 718 (ffs(FPU_CSR_ALL_X) - ffs(FPU_CSR_ALL_E)); 719 if (fcr31 & FPU_CSR_INV_X) 720 si.si_code = FPE_FLTINV; 721 else if (fcr31 & FPU_CSR_DIV_X) 722 si.si_code = FPE_FLTDIV; 723 else if (fcr31 & FPU_CSR_OVF_X) 724 si.si_code = FPE_FLTOVF; 725 else if (fcr31 & FPU_CSR_UDF_X) 726 si.si_code = FPE_FLTUND; 727 else if (fcr31 & FPU_CSR_INE_X) 728 si.si_code = FPE_FLTRES; 729 else 730 si.si_code = __SI_FAULT; 731 force_sig_info(sig, &si, current); 732 return 1; 733 734 case SIGBUS: 735 si.si_addr = fault_addr; 736 si.si_signo = sig; 737 si.si_code = BUS_ADRERR; 738 force_sig_info(sig, &si, current); 739 return 1; 740 741 case SIGSEGV: 742 si.si_addr = fault_addr; 743 si.si_signo = sig; 744 down_read(¤t->mm->mmap_sem); 745 if (find_vma(current->mm, (unsigned long)fault_addr)) 746 si.si_code = SEGV_ACCERR; 747 else 748 si.si_code = SEGV_MAPERR; 749 up_read(¤t->mm->mmap_sem); 750 force_sig_info(sig, &si, current); 751 return 1; 752 753 default: 754 force_sig(sig, current); 755 return 1; 756 } 757 } 758 759 static int simulate_fp(struct pt_regs *regs, unsigned int opcode, 760 unsigned long old_epc, unsigned long old_ra) 761 { 762 union mips_instruction inst = { .word = opcode }; 763 void __user *fault_addr; 764 unsigned long fcr31; 765 int sig; 766 767 /* If it's obviously not an FP instruction, skip it */ 768 switch (inst.i_format.opcode) { 769 case cop1_op: 770 case cop1x_op: 771 case lwc1_op: 772 case ldc1_op: 773 case swc1_op: 774 case sdc1_op: 775 break; 776 777 default: 778 return -1; 779 } 780 781 /* 782 * do_ri skipped over the instruction via compute_return_epc, undo 783 * that for the FPU emulator. 784 */ 785 regs->cp0_epc = old_epc; 786 regs->regs[31] = old_ra; 787 788 /* Save the FP context to struct thread_struct */ 789 lose_fpu(1); 790 791 /* Run the emulator */ 792 sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1, 793 &fault_addr); 794 fcr31 = current->thread.fpu.fcr31; 795 796 /* 797 * We can't allow the emulated instruction to leave any of 798 * the cause bits set in $fcr31. 799 */ 800 current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X; 801 802 /* Restore the hardware register state */ 803 own_fpu(1); 804 805 /* Send a signal if required. */ 806 process_fpemu_return(sig, fault_addr, fcr31); 807 808 return 0; 809 } 810 811 /* 812 * XXX Delayed fp exceptions when doing a lazy ctx switch XXX 813 */ 814 asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31) 815 { 816 enum ctx_state prev_state; 817 void __user *fault_addr; 818 int sig; 819 820 prev_state = exception_enter(); 821 if (notify_die(DIE_FP, "FP exception", regs, 0, regs_to_trapnr(regs), 822 SIGFPE) == NOTIFY_STOP) 823 goto out; 824 825 /* Clear FCSR.Cause before enabling interrupts */ 826 write_32bit_cp1_register(CP1_STATUS, fcr31 & ~FPU_CSR_ALL_X); 827 local_irq_enable(); 828 829 die_if_kernel("FP exception in kernel code", regs); 830 831 if (fcr31 & FPU_CSR_UNI_X) { 832 /* 833 * Unimplemented operation exception. If we've got the full 834 * software emulator on-board, let's use it... 835 * 836 * Force FPU to dump state into task/thread context. We're 837 * moving a lot of data here for what is probably a single 838 * instruction, but the alternative is to pre-decode the FP 839 * register operands before invoking the emulator, which seems 840 * a bit extreme for what should be an infrequent event. 841 */ 842 /* Ensure 'resume' not overwrite saved fp context again. */ 843 lose_fpu(1); 844 845 /* Run the emulator */ 846 sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1, 847 &fault_addr); 848 fcr31 = current->thread.fpu.fcr31; 849 850 /* 851 * We can't allow the emulated instruction to leave any of 852 * the cause bits set in $fcr31. 853 */ 854 current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X; 855 856 /* Restore the hardware register state */ 857 own_fpu(1); /* Using the FPU again. */ 858 } else { 859 sig = SIGFPE; 860 fault_addr = (void __user *) regs->cp0_epc; 861 } 862 863 /* Send a signal if required. */ 864 process_fpemu_return(sig, fault_addr, fcr31); 865 866 out: 867 exception_exit(prev_state); 868 } 869 870 void do_trap_or_bp(struct pt_regs *regs, unsigned int code, 871 const char *str) 872 { 873 siginfo_t info; 874 char b[40]; 875 876 #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP 877 if (kgdb_ll_trap(DIE_TRAP, str, regs, code, regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP) 878 return; 879 #endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */ 880 881 if (notify_die(DIE_TRAP, str, regs, code, regs_to_trapnr(regs), 882 SIGTRAP) == NOTIFY_STOP) 883 return; 884 885 /* 886 * A short test says that IRIX 5.3 sends SIGTRAP for all trap 887 * insns, even for trap and break codes that indicate arithmetic 888 * failures. Weird ... 889 * But should we continue the brokenness??? --macro 890 */ 891 switch (code) { 892 case BRK_OVERFLOW: 893 case BRK_DIVZERO: 894 scnprintf(b, sizeof(b), "%s instruction in kernel code", str); 895 die_if_kernel(b, regs); 896 if (code == BRK_DIVZERO) 897 info.si_code = FPE_INTDIV; 898 else 899 info.si_code = FPE_INTOVF; 900 info.si_signo = SIGFPE; 901 info.si_errno = 0; 902 info.si_addr = (void __user *) regs->cp0_epc; 903 force_sig_info(SIGFPE, &info, current); 904 break; 905 case BRK_BUG: 906 die_if_kernel("Kernel bug detected", regs); 907 force_sig(SIGTRAP, current); 908 break; 909 case BRK_MEMU: 910 /* 911 * This breakpoint code is used by the FPU emulator to retake 912 * control of the CPU after executing the instruction from the 913 * delay slot of an emulated branch. 914 * 915 * Terminate if exception was recognized as a delay slot return 916 * otherwise handle as normal. 917 */ 918 if (do_dsemulret(regs)) 919 return; 920 921 die_if_kernel("Math emu break/trap", regs); 922 force_sig(SIGTRAP, current); 923 break; 924 default: 925 scnprintf(b, sizeof(b), "%s instruction in kernel code", str); 926 die_if_kernel(b, regs); 927 force_sig(SIGTRAP, current); 928 } 929 } 930 931 asmlinkage void do_bp(struct pt_regs *regs) 932 { 933 unsigned long epc = msk_isa16_mode(exception_epc(regs)); 934 unsigned int opcode, bcode; 935 enum ctx_state prev_state; 936 mm_segment_t seg; 937 938 seg = get_fs(); 939 if (!user_mode(regs)) 940 set_fs(KERNEL_DS); 941 942 prev_state = exception_enter(); 943 if (get_isa16_mode(regs->cp0_epc)) { 944 u16 instr[2]; 945 946 if (__get_user(instr[0], (u16 __user *)epc)) 947 goto out_sigsegv; 948 949 if (!cpu_has_mmips) { 950 /* MIPS16e mode */ 951 bcode = (instr[0] >> 5) & 0x3f; 952 } else if (mm_insn_16bit(instr[0])) { 953 /* 16-bit microMIPS BREAK */ 954 bcode = instr[0] & 0xf; 955 } else { 956 /* 32-bit microMIPS BREAK */ 957 if (__get_user(instr[1], (u16 __user *)(epc + 2))) 958 goto out_sigsegv; 959 opcode = (instr[0] << 16) | instr[1]; 960 bcode = (opcode >> 6) & ((1 << 20) - 1); 961 } 962 } else { 963 if (__get_user(opcode, (unsigned int __user *)epc)) 964 goto out_sigsegv; 965 bcode = (opcode >> 6) & ((1 << 20) - 1); 966 } 967 968 /* 969 * There is the ancient bug in the MIPS assemblers that the break 970 * code starts left to bit 16 instead to bit 6 in the opcode. 971 * Gas is bug-compatible, but not always, grrr... 972 * We handle both cases with a simple heuristics. --macro 973 */ 974 if (bcode >= (1 << 10)) 975 bcode = ((bcode & ((1 << 10) - 1)) << 10) | (bcode >> 10); 976 977 /* 978 * notify the kprobe handlers, if instruction is likely to 979 * pertain to them. 980 */ 981 switch (bcode) { 982 case BRK_KPROBE_BP: 983 if (notify_die(DIE_BREAK, "debug", regs, bcode, 984 regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP) 985 goto out; 986 else 987 break; 988 case BRK_KPROBE_SSTEPBP: 989 if (notify_die(DIE_SSTEPBP, "single_step", regs, bcode, 990 regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP) 991 goto out; 992 else 993 break; 994 default: 995 break; 996 } 997 998 do_trap_or_bp(regs, bcode, "Break"); 999 1000 out: 1001 set_fs(seg); 1002 exception_exit(prev_state); 1003 return; 1004 1005 out_sigsegv: 1006 force_sig(SIGSEGV, current); 1007 goto out; 1008 } 1009 1010 asmlinkage void do_tr(struct pt_regs *regs) 1011 { 1012 u32 opcode, tcode = 0; 1013 enum ctx_state prev_state; 1014 u16 instr[2]; 1015 mm_segment_t seg; 1016 unsigned long epc = msk_isa16_mode(exception_epc(regs)); 1017 1018 seg = get_fs(); 1019 if (!user_mode(regs)) 1020 set_fs(get_ds()); 1021 1022 prev_state = exception_enter(); 1023 if (get_isa16_mode(regs->cp0_epc)) { 1024 if (__get_user(instr[0], (u16 __user *)(epc + 0)) || 1025 __get_user(instr[1], (u16 __user *)(epc + 2))) 1026 goto out_sigsegv; 1027 opcode = (instr[0] << 16) | instr[1]; 1028 /* Immediate versions don't provide a code. */ 1029 if (!(opcode & OPCODE)) 1030 tcode = (opcode >> 12) & ((1 << 4) - 1); 1031 } else { 1032 if (__get_user(opcode, (u32 __user *)epc)) 1033 goto out_sigsegv; 1034 /* Immediate versions don't provide a code. */ 1035 if (!(opcode & OPCODE)) 1036 tcode = (opcode >> 6) & ((1 << 10) - 1); 1037 } 1038 1039 do_trap_or_bp(regs, tcode, "Trap"); 1040 1041 out: 1042 set_fs(seg); 1043 exception_exit(prev_state); 1044 return; 1045 1046 out_sigsegv: 1047 force_sig(SIGSEGV, current); 1048 goto out; 1049 } 1050 1051 asmlinkage void do_ri(struct pt_regs *regs) 1052 { 1053 unsigned int __user *epc = (unsigned int __user *)exception_epc(regs); 1054 unsigned long old_epc = regs->cp0_epc; 1055 unsigned long old31 = regs->regs[31]; 1056 enum ctx_state prev_state; 1057 unsigned int opcode = 0; 1058 int status = -1; 1059 1060 /* 1061 * Avoid any kernel code. Just emulate the R2 instruction 1062 * as quickly as possible. 1063 */ 1064 if (mipsr2_emulation && cpu_has_mips_r6 && 1065 likely(user_mode(regs)) && 1066 likely(get_user(opcode, epc) >= 0)) { 1067 unsigned long fcr31 = 0; 1068 1069 status = mipsr2_decoder(regs, opcode, &fcr31); 1070 switch (status) { 1071 case 0: 1072 case SIGEMT: 1073 task_thread_info(current)->r2_emul_return = 1; 1074 return; 1075 case SIGILL: 1076 goto no_r2_instr; 1077 default: 1078 process_fpemu_return(status, 1079 ¤t->thread.cp0_baduaddr, 1080 fcr31); 1081 task_thread_info(current)->r2_emul_return = 1; 1082 return; 1083 } 1084 } 1085 1086 no_r2_instr: 1087 1088 prev_state = exception_enter(); 1089 1090 if (notify_die(DIE_RI, "RI Fault", regs, 0, regs_to_trapnr(regs), 1091 SIGILL) == NOTIFY_STOP) 1092 goto out; 1093 1094 die_if_kernel("Reserved instruction in kernel code", regs); 1095 1096 if (unlikely(compute_return_epc(regs) < 0)) 1097 goto out; 1098 1099 if (get_isa16_mode(regs->cp0_epc)) { 1100 unsigned short mmop[2] = { 0 }; 1101 1102 if (unlikely(get_user(mmop[0], epc) < 0)) 1103 status = SIGSEGV; 1104 if (unlikely(get_user(mmop[1], epc) < 0)) 1105 status = SIGSEGV; 1106 opcode = (mmop[0] << 16) | mmop[1]; 1107 1108 if (status < 0) 1109 status = simulate_rdhwr_mm(regs, opcode); 1110 } else { 1111 if (unlikely(get_user(opcode, epc) < 0)) 1112 status = SIGSEGV; 1113 1114 if (!cpu_has_llsc && status < 0) 1115 status = simulate_llsc(regs, opcode); 1116 1117 if (status < 0) 1118 status = simulate_rdhwr_normal(regs, opcode); 1119 1120 if (status < 0) 1121 status = simulate_sync(regs, opcode); 1122 1123 if (status < 0) 1124 status = simulate_fp(regs, opcode, old_epc, old31); 1125 } 1126 1127 if (status < 0) 1128 status = SIGILL; 1129 1130 if (unlikely(status > 0)) { 1131 regs->cp0_epc = old_epc; /* Undo skip-over. */ 1132 regs->regs[31] = old31; 1133 force_sig(status, current); 1134 } 1135 1136 out: 1137 exception_exit(prev_state); 1138 } 1139 1140 /* 1141 * MIPS MT processors may have fewer FPU contexts than CPU threads. If we've 1142 * emulated more than some threshold number of instructions, force migration to 1143 * a "CPU" that has FP support. 1144 */ 1145 static void mt_ase_fp_affinity(void) 1146 { 1147 #ifdef CONFIG_MIPS_MT_FPAFF 1148 if (mt_fpemul_threshold > 0 && 1149 ((current->thread.emulated_fp++ > mt_fpemul_threshold))) { 1150 /* 1151 * If there's no FPU present, or if the application has already 1152 * restricted the allowed set to exclude any CPUs with FPUs, 1153 * we'll skip the procedure. 1154 */ 1155 if (cpumask_intersects(¤t->cpus_allowed, &mt_fpu_cpumask)) { 1156 cpumask_t tmask; 1157 1158 current->thread.user_cpus_allowed 1159 = current->cpus_allowed; 1160 cpumask_and(&tmask, ¤t->cpus_allowed, 1161 &mt_fpu_cpumask); 1162 set_cpus_allowed_ptr(current, &tmask); 1163 set_thread_flag(TIF_FPUBOUND); 1164 } 1165 } 1166 #endif /* CONFIG_MIPS_MT_FPAFF */ 1167 } 1168 1169 /* 1170 * No lock; only written during early bootup by CPU 0. 1171 */ 1172 static RAW_NOTIFIER_HEAD(cu2_chain); 1173 1174 int __ref register_cu2_notifier(struct notifier_block *nb) 1175 { 1176 return raw_notifier_chain_register(&cu2_chain, nb); 1177 } 1178 1179 int cu2_notifier_call_chain(unsigned long val, void *v) 1180 { 1181 return raw_notifier_call_chain(&cu2_chain, val, v); 1182 } 1183 1184 static int default_cu2_call(struct notifier_block *nfb, unsigned long action, 1185 void *data) 1186 { 1187 struct pt_regs *regs = data; 1188 1189 die_if_kernel("COP2: Unhandled kernel unaligned access or invalid " 1190 "instruction", regs); 1191 force_sig(SIGILL, current); 1192 1193 return NOTIFY_OK; 1194 } 1195 1196 static int wait_on_fp_mode_switch(atomic_t *p) 1197 { 1198 /* 1199 * The FP mode for this task is currently being switched. That may 1200 * involve modifications to the format of this tasks FP context which 1201 * make it unsafe to proceed with execution for the moment. Instead, 1202 * schedule some other task. 1203 */ 1204 schedule(); 1205 return 0; 1206 } 1207 1208 static int enable_restore_fp_context(int msa) 1209 { 1210 int err, was_fpu_owner, prior_msa; 1211 1212 /* 1213 * If an FP mode switch is currently underway, wait for it to 1214 * complete before proceeding. 1215 */ 1216 wait_on_atomic_t(¤t->mm->context.fp_mode_switching, 1217 wait_on_fp_mode_switch, TASK_KILLABLE); 1218 1219 if (!used_math()) { 1220 /* First time FP context user. */ 1221 preempt_disable(); 1222 err = init_fpu(); 1223 if (msa && !err) { 1224 enable_msa(); 1225 _init_msa_upper(); 1226 set_thread_flag(TIF_USEDMSA); 1227 set_thread_flag(TIF_MSA_CTX_LIVE); 1228 } 1229 preempt_enable(); 1230 if (!err) 1231 set_used_math(); 1232 return err; 1233 } 1234 1235 /* 1236 * This task has formerly used the FP context. 1237 * 1238 * If this thread has no live MSA vector context then we can simply 1239 * restore the scalar FP context. If it has live MSA vector context 1240 * (that is, it has or may have used MSA since last performing a 1241 * function call) then we'll need to restore the vector context. This 1242 * applies even if we're currently only executing a scalar FP 1243 * instruction. This is because if we were to later execute an MSA 1244 * instruction then we'd either have to: 1245 * 1246 * - Restore the vector context & clobber any registers modified by 1247 * scalar FP instructions between now & then. 1248 * 1249 * or 1250 * 1251 * - Not restore the vector context & lose the most significant bits 1252 * of all vector registers. 1253 * 1254 * Neither of those options is acceptable. We cannot restore the least 1255 * significant bits of the registers now & only restore the most 1256 * significant bits later because the most significant bits of any 1257 * vector registers whose aliased FP register is modified now will have 1258 * been zeroed. We'd have no way to know that when restoring the vector 1259 * context & thus may load an outdated value for the most significant 1260 * bits of a vector register. 1261 */ 1262 if (!msa && !thread_msa_context_live()) 1263 return own_fpu(1); 1264 1265 /* 1266 * This task is using or has previously used MSA. Thus we require 1267 * that Status.FR == 1. 1268 */ 1269 preempt_disable(); 1270 was_fpu_owner = is_fpu_owner(); 1271 err = own_fpu_inatomic(0); 1272 if (err) 1273 goto out; 1274 1275 enable_msa(); 1276 write_msa_csr(current->thread.fpu.msacsr); 1277 set_thread_flag(TIF_USEDMSA); 1278 1279 /* 1280 * If this is the first time that the task is using MSA and it has 1281 * previously used scalar FP in this time slice then we already nave 1282 * FP context which we shouldn't clobber. We do however need to clear 1283 * the upper 64b of each vector register so that this task has no 1284 * opportunity to see data left behind by another. 1285 */ 1286 prior_msa = test_and_set_thread_flag(TIF_MSA_CTX_LIVE); 1287 if (!prior_msa && was_fpu_owner) { 1288 _init_msa_upper(); 1289 1290 goto out; 1291 } 1292 1293 if (!prior_msa) { 1294 /* 1295 * Restore the least significant 64b of each vector register 1296 * from the existing scalar FP context. 1297 */ 1298 _restore_fp(current); 1299 1300 /* 1301 * The task has not formerly used MSA, so clear the upper 64b 1302 * of each vector register such that it cannot see data left 1303 * behind by another task. 1304 */ 1305 _init_msa_upper(); 1306 } else { 1307 /* We need to restore the vector context. */ 1308 restore_msa(current); 1309 1310 /* Restore the scalar FP control & status register */ 1311 if (!was_fpu_owner) 1312 write_32bit_cp1_register(CP1_STATUS, 1313 current->thread.fpu.fcr31); 1314 } 1315 1316 out: 1317 preempt_enable(); 1318 1319 return 0; 1320 } 1321 1322 asmlinkage void do_cpu(struct pt_regs *regs) 1323 { 1324 enum ctx_state prev_state; 1325 unsigned int __user *epc; 1326 unsigned long old_epc, old31; 1327 void __user *fault_addr; 1328 unsigned int opcode; 1329 unsigned long fcr31; 1330 unsigned int cpid; 1331 int status, err; 1332 unsigned long __maybe_unused flags; 1333 int sig; 1334 1335 prev_state = exception_enter(); 1336 cpid = (regs->cp0_cause >> CAUSEB_CE) & 3; 1337 1338 if (cpid != 2) 1339 die_if_kernel("do_cpu invoked from kernel context!", regs); 1340 1341 switch (cpid) { 1342 case 0: 1343 epc = (unsigned int __user *)exception_epc(regs); 1344 old_epc = regs->cp0_epc; 1345 old31 = regs->regs[31]; 1346 opcode = 0; 1347 status = -1; 1348 1349 if (unlikely(compute_return_epc(regs) < 0)) 1350 break; 1351 1352 if (get_isa16_mode(regs->cp0_epc)) { 1353 unsigned short mmop[2] = { 0 }; 1354 1355 if (unlikely(get_user(mmop[0], epc) < 0)) 1356 status = SIGSEGV; 1357 if (unlikely(get_user(mmop[1], epc) < 0)) 1358 status = SIGSEGV; 1359 opcode = (mmop[0] << 16) | mmop[1]; 1360 1361 if (status < 0) 1362 status = simulate_rdhwr_mm(regs, opcode); 1363 } else { 1364 if (unlikely(get_user(opcode, epc) < 0)) 1365 status = SIGSEGV; 1366 1367 if (!cpu_has_llsc && status < 0) 1368 status = simulate_llsc(regs, opcode); 1369 1370 if (status < 0) 1371 status = simulate_rdhwr_normal(regs, opcode); 1372 } 1373 1374 if (status < 0) 1375 status = SIGILL; 1376 1377 if (unlikely(status > 0)) { 1378 regs->cp0_epc = old_epc; /* Undo skip-over. */ 1379 regs->regs[31] = old31; 1380 force_sig(status, current); 1381 } 1382 1383 break; 1384 1385 case 3: 1386 /* 1387 * The COP3 opcode space and consequently the CP0.Status.CU3 1388 * bit and the CP0.Cause.CE=3 encoding have been removed as 1389 * of the MIPS III ISA. From the MIPS IV and MIPS32r2 ISAs 1390 * up the space has been reused for COP1X instructions, that 1391 * are enabled by the CP0.Status.CU1 bit and consequently 1392 * use the CP0.Cause.CE=1 encoding for Coprocessor Unusable 1393 * exceptions. Some FPU-less processors that implement one 1394 * of these ISAs however use this code erroneously for COP1X 1395 * instructions. Therefore we redirect this trap to the FP 1396 * emulator too. 1397 */ 1398 if (raw_cpu_has_fpu || !cpu_has_mips_4_5_64_r2_r6) { 1399 force_sig(SIGILL, current); 1400 break; 1401 } 1402 /* Fall through. */ 1403 1404 case 1: 1405 err = enable_restore_fp_context(0); 1406 1407 if (raw_cpu_has_fpu && !err) 1408 break; 1409 1410 sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 0, 1411 &fault_addr); 1412 fcr31 = current->thread.fpu.fcr31; 1413 1414 /* 1415 * We can't allow the emulated instruction to leave 1416 * any of the cause bits set in $fcr31. 1417 */ 1418 current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X; 1419 1420 /* Send a signal if required. */ 1421 if (!process_fpemu_return(sig, fault_addr, fcr31) && !err) 1422 mt_ase_fp_affinity(); 1423 1424 break; 1425 1426 case 2: 1427 raw_notifier_call_chain(&cu2_chain, CU2_EXCEPTION, regs); 1428 break; 1429 } 1430 1431 exception_exit(prev_state); 1432 } 1433 1434 asmlinkage void do_msa_fpe(struct pt_regs *regs, unsigned int msacsr) 1435 { 1436 enum ctx_state prev_state; 1437 1438 prev_state = exception_enter(); 1439 if (notify_die(DIE_MSAFP, "MSA FP exception", regs, 0, 1440 regs_to_trapnr(regs), SIGFPE) == NOTIFY_STOP) 1441 goto out; 1442 1443 /* Clear MSACSR.Cause before enabling interrupts */ 1444 write_msa_csr(msacsr & ~MSA_CSR_CAUSEF); 1445 local_irq_enable(); 1446 1447 die_if_kernel("do_msa_fpe invoked from kernel context!", regs); 1448 force_sig(SIGFPE, current); 1449 out: 1450 exception_exit(prev_state); 1451 } 1452 1453 asmlinkage void do_msa(struct pt_regs *regs) 1454 { 1455 enum ctx_state prev_state; 1456 int err; 1457 1458 prev_state = exception_enter(); 1459 1460 if (!cpu_has_msa || test_thread_flag(TIF_32BIT_FPREGS)) { 1461 force_sig(SIGILL, current); 1462 goto out; 1463 } 1464 1465 die_if_kernel("do_msa invoked from kernel context!", regs); 1466 1467 err = enable_restore_fp_context(1); 1468 if (err) 1469 force_sig(SIGILL, current); 1470 out: 1471 exception_exit(prev_state); 1472 } 1473 1474 asmlinkage void do_mdmx(struct pt_regs *regs) 1475 { 1476 enum ctx_state prev_state; 1477 1478 prev_state = exception_enter(); 1479 force_sig(SIGILL, current); 1480 exception_exit(prev_state); 1481 } 1482 1483 /* 1484 * Called with interrupts disabled. 1485 */ 1486 asmlinkage void do_watch(struct pt_regs *regs) 1487 { 1488 enum ctx_state prev_state; 1489 u32 cause; 1490 1491 prev_state = exception_enter(); 1492 /* 1493 * Clear WP (bit 22) bit of cause register so we don't loop 1494 * forever. 1495 */ 1496 cause = read_c0_cause(); 1497 cause &= ~(1 << 22); 1498 write_c0_cause(cause); 1499 1500 /* 1501 * If the current thread has the watch registers loaded, save 1502 * their values and send SIGTRAP. Otherwise another thread 1503 * left the registers set, clear them and continue. 1504 */ 1505 if (test_tsk_thread_flag(current, TIF_LOAD_WATCH)) { 1506 mips_read_watch_registers(); 1507 local_irq_enable(); 1508 force_sig(SIGTRAP, current); 1509 } else { 1510 mips_clear_watch_registers(); 1511 local_irq_enable(); 1512 } 1513 exception_exit(prev_state); 1514 } 1515 1516 asmlinkage void do_mcheck(struct pt_regs *regs) 1517 { 1518 const int field = 2 * sizeof(unsigned long); 1519 int multi_match = regs->cp0_status & ST0_TS; 1520 enum ctx_state prev_state; 1521 1522 prev_state = exception_enter(); 1523 show_regs(regs); 1524 1525 if (multi_match) { 1526 pr_err("Index : %0x\n", read_c0_index()); 1527 pr_err("Pagemask: %0x\n", read_c0_pagemask()); 1528 pr_err("EntryHi : %0*lx\n", field, read_c0_entryhi()); 1529 pr_err("EntryLo0: %0*lx\n", field, read_c0_entrylo0()); 1530 pr_err("EntryLo1: %0*lx\n", field, read_c0_entrylo1()); 1531 pr_err("Wired : %0x\n", read_c0_wired()); 1532 pr_err("Pagegrain: %0x\n", read_c0_pagegrain()); 1533 if (cpu_has_htw) { 1534 pr_err("PWField : %0*lx\n", field, read_c0_pwfield()); 1535 pr_err("PWSize : %0*lx\n", field, read_c0_pwsize()); 1536 pr_err("PWCtl : %0x\n", read_c0_pwctl()); 1537 } 1538 pr_err("\n"); 1539 dump_tlb_all(); 1540 } 1541 1542 show_code((unsigned int __user *) regs->cp0_epc); 1543 1544 /* 1545 * Some chips may have other causes of machine check (e.g. SB1 1546 * graduation timer) 1547 */ 1548 panic("Caught Machine Check exception - %scaused by multiple " 1549 "matching entries in the TLB.", 1550 (multi_match) ? "" : "not "); 1551 } 1552 1553 asmlinkage void do_mt(struct pt_regs *regs) 1554 { 1555 int subcode; 1556 1557 subcode = (read_vpe_c0_vpecontrol() & VPECONTROL_EXCPT) 1558 >> VPECONTROL_EXCPT_SHIFT; 1559 switch (subcode) { 1560 case 0: 1561 printk(KERN_DEBUG "Thread Underflow\n"); 1562 break; 1563 case 1: 1564 printk(KERN_DEBUG "Thread Overflow\n"); 1565 break; 1566 case 2: 1567 printk(KERN_DEBUG "Invalid YIELD Qualifier\n"); 1568 break; 1569 case 3: 1570 printk(KERN_DEBUG "Gating Storage Exception\n"); 1571 break; 1572 case 4: 1573 printk(KERN_DEBUG "YIELD Scheduler Exception\n"); 1574 break; 1575 case 5: 1576 printk(KERN_DEBUG "Gating Storage Scheduler Exception\n"); 1577 break; 1578 default: 1579 printk(KERN_DEBUG "*** UNKNOWN THREAD EXCEPTION %d ***\n", 1580 subcode); 1581 break; 1582 } 1583 die_if_kernel("MIPS MT Thread exception in kernel", regs); 1584 1585 force_sig(SIGILL, current); 1586 } 1587 1588 1589 asmlinkage void do_dsp(struct pt_regs *regs) 1590 { 1591 if (cpu_has_dsp) 1592 panic("Unexpected DSP exception"); 1593 1594 force_sig(SIGILL, current); 1595 } 1596 1597 asmlinkage void do_reserved(struct pt_regs *regs) 1598 { 1599 /* 1600 * Game over - no way to handle this if it ever occurs. Most probably 1601 * caused by a new unknown cpu type or after another deadly 1602 * hard/software error. 1603 */ 1604 show_regs(regs); 1605 panic("Caught reserved exception %ld - should not happen.", 1606 (regs->cp0_cause & 0x7f) >> 2); 1607 } 1608 1609 static int __initdata l1parity = 1; 1610 static int __init nol1parity(char *s) 1611 { 1612 l1parity = 0; 1613 return 1; 1614 } 1615 __setup("nol1par", nol1parity); 1616 static int __initdata l2parity = 1; 1617 static int __init nol2parity(char *s) 1618 { 1619 l2parity = 0; 1620 return 1; 1621 } 1622 __setup("nol2par", nol2parity); 1623 1624 /* 1625 * Some MIPS CPUs can enable/disable for cache parity detection, but do 1626 * it different ways. 1627 */ 1628 static inline void parity_protection_init(void) 1629 { 1630 switch (current_cpu_type()) { 1631 case CPU_24K: 1632 case CPU_34K: 1633 case CPU_74K: 1634 case CPU_1004K: 1635 case CPU_1074K: 1636 case CPU_INTERAPTIV: 1637 case CPU_PROAPTIV: 1638 case CPU_P5600: 1639 case CPU_QEMU_GENERIC: 1640 { 1641 #define ERRCTL_PE 0x80000000 1642 #define ERRCTL_L2P 0x00800000 1643 unsigned long errctl; 1644 unsigned int l1parity_present, l2parity_present; 1645 1646 errctl = read_c0_ecc(); 1647 errctl &= ~(ERRCTL_PE|ERRCTL_L2P); 1648 1649 /* probe L1 parity support */ 1650 write_c0_ecc(errctl | ERRCTL_PE); 1651 back_to_back_c0_hazard(); 1652 l1parity_present = (read_c0_ecc() & ERRCTL_PE); 1653 1654 /* probe L2 parity support */ 1655 write_c0_ecc(errctl|ERRCTL_L2P); 1656 back_to_back_c0_hazard(); 1657 l2parity_present = (read_c0_ecc() & ERRCTL_L2P); 1658 1659 if (l1parity_present && l2parity_present) { 1660 if (l1parity) 1661 errctl |= ERRCTL_PE; 1662 if (l1parity ^ l2parity) 1663 errctl |= ERRCTL_L2P; 1664 } else if (l1parity_present) { 1665 if (l1parity) 1666 errctl |= ERRCTL_PE; 1667 } else if (l2parity_present) { 1668 if (l2parity) 1669 errctl |= ERRCTL_L2P; 1670 } else { 1671 /* No parity available */ 1672 } 1673 1674 printk(KERN_INFO "Writing ErrCtl register=%08lx\n", errctl); 1675 1676 write_c0_ecc(errctl); 1677 back_to_back_c0_hazard(); 1678 errctl = read_c0_ecc(); 1679 printk(KERN_INFO "Readback ErrCtl register=%08lx\n", errctl); 1680 1681 if (l1parity_present) 1682 printk(KERN_INFO "Cache parity protection %sabled\n", 1683 (errctl & ERRCTL_PE) ? "en" : "dis"); 1684 1685 if (l2parity_present) { 1686 if (l1parity_present && l1parity) 1687 errctl ^= ERRCTL_L2P; 1688 printk(KERN_INFO "L2 cache parity protection %sabled\n", 1689 (errctl & ERRCTL_L2P) ? "en" : "dis"); 1690 } 1691 } 1692 break; 1693 1694 case CPU_5KC: 1695 case CPU_5KE: 1696 case CPU_LOONGSON1: 1697 write_c0_ecc(0x80000000); 1698 back_to_back_c0_hazard(); 1699 /* Set the PE bit (bit 31) in the c0_errctl register. */ 1700 printk(KERN_INFO "Cache parity protection %sabled\n", 1701 (read_c0_ecc() & 0x80000000) ? "en" : "dis"); 1702 break; 1703 case CPU_20KC: 1704 case CPU_25KF: 1705 /* Clear the DE bit (bit 16) in the c0_status register. */ 1706 printk(KERN_INFO "Enable cache parity protection for " 1707 "MIPS 20KC/25KF CPUs.\n"); 1708 clear_c0_status(ST0_DE); 1709 break; 1710 default: 1711 break; 1712 } 1713 } 1714 1715 asmlinkage void cache_parity_error(void) 1716 { 1717 const int field = 2 * sizeof(unsigned long); 1718 unsigned int reg_val; 1719 1720 /* For the moment, report the problem and hang. */ 1721 printk("Cache error exception:\n"); 1722 printk("cp0_errorepc == %0*lx\n", field, read_c0_errorepc()); 1723 reg_val = read_c0_cacheerr(); 1724 printk("c0_cacheerr == %08x\n", reg_val); 1725 1726 printk("Decoded c0_cacheerr: %s cache fault in %s reference.\n", 1727 reg_val & (1<<30) ? "secondary" : "primary", 1728 reg_val & (1<<31) ? "data" : "insn"); 1729 if ((cpu_has_mips_r2_r6) && 1730 ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) { 1731 pr_err("Error bits: %s%s%s%s%s%s%s%s\n", 1732 reg_val & (1<<29) ? "ED " : "", 1733 reg_val & (1<<28) ? "ET " : "", 1734 reg_val & (1<<27) ? "ES " : "", 1735 reg_val & (1<<26) ? "EE " : "", 1736 reg_val & (1<<25) ? "EB " : "", 1737 reg_val & (1<<24) ? "EI " : "", 1738 reg_val & (1<<23) ? "E1 " : "", 1739 reg_val & (1<<22) ? "E0 " : ""); 1740 } else { 1741 pr_err("Error bits: %s%s%s%s%s%s%s\n", 1742 reg_val & (1<<29) ? "ED " : "", 1743 reg_val & (1<<28) ? "ET " : "", 1744 reg_val & (1<<26) ? "EE " : "", 1745 reg_val & (1<<25) ? "EB " : "", 1746 reg_val & (1<<24) ? "EI " : "", 1747 reg_val & (1<<23) ? "E1 " : "", 1748 reg_val & (1<<22) ? "E0 " : ""); 1749 } 1750 printk("IDX: 0x%08x\n", reg_val & ((1<<22)-1)); 1751 1752 #if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64) 1753 if (reg_val & (1<<22)) 1754 printk("DErrAddr0: 0x%0*lx\n", field, read_c0_derraddr0()); 1755 1756 if (reg_val & (1<<23)) 1757 printk("DErrAddr1: 0x%0*lx\n", field, read_c0_derraddr1()); 1758 #endif 1759 1760 panic("Can't handle the cache error!"); 1761 } 1762 1763 asmlinkage void do_ftlb(void) 1764 { 1765 const int field = 2 * sizeof(unsigned long); 1766 unsigned int reg_val; 1767 1768 /* For the moment, report the problem and hang. */ 1769 if ((cpu_has_mips_r2_r6) && 1770 ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) { 1771 pr_err("FTLB error exception, cp0_ecc=0x%08x:\n", 1772 read_c0_ecc()); 1773 pr_err("cp0_errorepc == %0*lx\n", field, read_c0_errorepc()); 1774 reg_val = read_c0_cacheerr(); 1775 pr_err("c0_cacheerr == %08x\n", reg_val); 1776 1777 if ((reg_val & 0xc0000000) == 0xc0000000) { 1778 pr_err("Decoded c0_cacheerr: FTLB parity error\n"); 1779 } else { 1780 pr_err("Decoded c0_cacheerr: %s cache fault in %s reference.\n", 1781 reg_val & (1<<30) ? "secondary" : "primary", 1782 reg_val & (1<<31) ? "data" : "insn"); 1783 } 1784 } else { 1785 pr_err("FTLB error exception\n"); 1786 } 1787 /* Just print the cacheerr bits for now */ 1788 cache_parity_error(); 1789 } 1790 1791 /* 1792 * SDBBP EJTAG debug exception handler. 1793 * We skip the instruction and return to the next instruction. 1794 */ 1795 void ejtag_exception_handler(struct pt_regs *regs) 1796 { 1797 const int field = 2 * sizeof(unsigned long); 1798 unsigned long depc, old_epc, old_ra; 1799 unsigned int debug; 1800 1801 printk(KERN_DEBUG "SDBBP EJTAG debug exception - not handled yet, just ignored!\n"); 1802 depc = read_c0_depc(); 1803 debug = read_c0_debug(); 1804 printk(KERN_DEBUG "c0_depc = %0*lx, DEBUG = %08x\n", field, depc, debug); 1805 if (debug & 0x80000000) { 1806 /* 1807 * In branch delay slot. 1808 * We cheat a little bit here and use EPC to calculate the 1809 * debug return address (DEPC). EPC is restored after the 1810 * calculation. 1811 */ 1812 old_epc = regs->cp0_epc; 1813 old_ra = regs->regs[31]; 1814 regs->cp0_epc = depc; 1815 compute_return_epc(regs); 1816 depc = regs->cp0_epc; 1817 regs->cp0_epc = old_epc; 1818 regs->regs[31] = old_ra; 1819 } else 1820 depc += 4; 1821 write_c0_depc(depc); 1822 1823 #if 0 1824 printk(KERN_DEBUG "\n\n----- Enable EJTAG single stepping ----\n\n"); 1825 write_c0_debug(debug | 0x100); 1826 #endif 1827 } 1828 1829 /* 1830 * NMI exception handler. 1831 * No lock; only written during early bootup by CPU 0. 1832 */ 1833 static RAW_NOTIFIER_HEAD(nmi_chain); 1834 1835 int register_nmi_notifier(struct notifier_block *nb) 1836 { 1837 return raw_notifier_chain_register(&nmi_chain, nb); 1838 } 1839 1840 void __noreturn nmi_exception_handler(struct pt_regs *regs) 1841 { 1842 char str[100]; 1843 1844 raw_notifier_call_chain(&nmi_chain, 0, regs); 1845 bust_spinlocks(1); 1846 snprintf(str, 100, "CPU%d NMI taken, CP0_EPC=%lx\n", 1847 smp_processor_id(), regs->cp0_epc); 1848 regs->cp0_epc = read_c0_errorepc(); 1849 die(str, regs); 1850 } 1851 1852 #define VECTORSPACING 0x100 /* for EI/VI mode */ 1853 1854 unsigned long ebase; 1855 unsigned long exception_handlers[32]; 1856 unsigned long vi_handlers[64]; 1857 1858 void __init *set_except_vector(int n, void *addr) 1859 { 1860 unsigned long handler = (unsigned long) addr; 1861 unsigned long old_handler; 1862 1863 #ifdef CONFIG_CPU_MICROMIPS 1864 /* 1865 * Only the TLB handlers are cache aligned with an even 1866 * address. All other handlers are on an odd address and 1867 * require no modification. Otherwise, MIPS32 mode will 1868 * be entered when handling any TLB exceptions. That 1869 * would be bad...since we must stay in microMIPS mode. 1870 */ 1871 if (!(handler & 0x1)) 1872 handler |= 1; 1873 #endif 1874 old_handler = xchg(&exception_handlers[n], handler); 1875 1876 if (n == 0 && cpu_has_divec) { 1877 #ifdef CONFIG_CPU_MICROMIPS 1878 unsigned long jump_mask = ~((1 << 27) - 1); 1879 #else 1880 unsigned long jump_mask = ~((1 << 28) - 1); 1881 #endif 1882 u32 *buf = (u32 *)(ebase + 0x200); 1883 unsigned int k0 = 26; 1884 if ((handler & jump_mask) == ((ebase + 0x200) & jump_mask)) { 1885 uasm_i_j(&buf, handler & ~jump_mask); 1886 uasm_i_nop(&buf); 1887 } else { 1888 UASM_i_LA(&buf, k0, handler); 1889 uasm_i_jr(&buf, k0); 1890 uasm_i_nop(&buf); 1891 } 1892 local_flush_icache_range(ebase + 0x200, (unsigned long)buf); 1893 } 1894 return (void *)old_handler; 1895 } 1896 1897 static void do_default_vi(void) 1898 { 1899 show_regs(get_irq_regs()); 1900 panic("Caught unexpected vectored interrupt."); 1901 } 1902 1903 static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs) 1904 { 1905 unsigned long handler; 1906 unsigned long old_handler = vi_handlers[n]; 1907 int srssets = current_cpu_data.srsets; 1908 u16 *h; 1909 unsigned char *b; 1910 1911 BUG_ON(!cpu_has_veic && !cpu_has_vint); 1912 1913 if (addr == NULL) { 1914 handler = (unsigned long) do_default_vi; 1915 srs = 0; 1916 } else 1917 handler = (unsigned long) addr; 1918 vi_handlers[n] = handler; 1919 1920 b = (unsigned char *)(ebase + 0x200 + n*VECTORSPACING); 1921 1922 if (srs >= srssets) 1923 panic("Shadow register set %d not supported", srs); 1924 1925 if (cpu_has_veic) { 1926 if (board_bind_eic_interrupt) 1927 board_bind_eic_interrupt(n, srs); 1928 } else if (cpu_has_vint) { 1929 /* SRSMap is only defined if shadow sets are implemented */ 1930 if (srssets > 1) 1931 change_c0_srsmap(0xf << n*4, srs << n*4); 1932 } 1933 1934 if (srs == 0) { 1935 /* 1936 * If no shadow set is selected then use the default handler 1937 * that does normal register saving and standard interrupt exit 1938 */ 1939 extern char except_vec_vi, except_vec_vi_lui; 1940 extern char except_vec_vi_ori, except_vec_vi_end; 1941 extern char rollback_except_vec_vi; 1942 char *vec_start = using_rollback_handler() ? 1943 &rollback_except_vec_vi : &except_vec_vi; 1944 #if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN) 1945 const int lui_offset = &except_vec_vi_lui - vec_start + 2; 1946 const int ori_offset = &except_vec_vi_ori - vec_start + 2; 1947 #else 1948 const int lui_offset = &except_vec_vi_lui - vec_start; 1949 const int ori_offset = &except_vec_vi_ori - vec_start; 1950 #endif 1951 const int handler_len = &except_vec_vi_end - vec_start; 1952 1953 if (handler_len > VECTORSPACING) { 1954 /* 1955 * Sigh... panicing won't help as the console 1956 * is probably not configured :( 1957 */ 1958 panic("VECTORSPACING too small"); 1959 } 1960 1961 set_handler(((unsigned long)b - ebase), vec_start, 1962 #ifdef CONFIG_CPU_MICROMIPS 1963 (handler_len - 1)); 1964 #else 1965 handler_len); 1966 #endif 1967 h = (u16 *)(b + lui_offset); 1968 *h = (handler >> 16) & 0xffff; 1969 h = (u16 *)(b + ori_offset); 1970 *h = (handler & 0xffff); 1971 local_flush_icache_range((unsigned long)b, 1972 (unsigned long)(b+handler_len)); 1973 } 1974 else { 1975 /* 1976 * In other cases jump directly to the interrupt handler. It 1977 * is the handler's responsibility to save registers if required 1978 * (eg hi/lo) and return from the exception using "eret". 1979 */ 1980 u32 insn; 1981 1982 h = (u16 *)b; 1983 /* j handler */ 1984 #ifdef CONFIG_CPU_MICROMIPS 1985 insn = 0xd4000000 | (((u32)handler & 0x07ffffff) >> 1); 1986 #else 1987 insn = 0x08000000 | (((u32)handler & 0x0fffffff) >> 2); 1988 #endif 1989 h[0] = (insn >> 16) & 0xffff; 1990 h[1] = insn & 0xffff; 1991 h[2] = 0; 1992 h[3] = 0; 1993 local_flush_icache_range((unsigned long)b, 1994 (unsigned long)(b+8)); 1995 } 1996 1997 return (void *)old_handler; 1998 } 1999 2000 void *set_vi_handler(int n, vi_handler_t addr) 2001 { 2002 return set_vi_srs_handler(n, addr, 0); 2003 } 2004 2005 extern void tlb_init(void); 2006 2007 /* 2008 * Timer interrupt 2009 */ 2010 int cp0_compare_irq; 2011 EXPORT_SYMBOL_GPL(cp0_compare_irq); 2012 int cp0_compare_irq_shift; 2013 2014 /* 2015 * Performance counter IRQ or -1 if shared with timer 2016 */ 2017 int cp0_perfcount_irq; 2018 EXPORT_SYMBOL_GPL(cp0_perfcount_irq); 2019 2020 /* 2021 * Fast debug channel IRQ or -1 if not present 2022 */ 2023 int cp0_fdc_irq; 2024 EXPORT_SYMBOL_GPL(cp0_fdc_irq); 2025 2026 static int noulri; 2027 2028 static int __init ulri_disable(char *s) 2029 { 2030 pr_info("Disabling ulri\n"); 2031 noulri = 1; 2032 2033 return 1; 2034 } 2035 __setup("noulri", ulri_disable); 2036 2037 /* configure STATUS register */ 2038 static void configure_status(void) 2039 { 2040 /* 2041 * Disable coprocessors and select 32-bit or 64-bit addressing 2042 * and the 16/32 or 32/32 FPR register model. Reset the BEV 2043 * flag that some firmware may have left set and the TS bit (for 2044 * IP27). Set XX for ISA IV code to work. 2045 */ 2046 unsigned int status_set = ST0_CU0; 2047 #ifdef CONFIG_64BIT 2048 status_set |= ST0_FR|ST0_KX|ST0_SX|ST0_UX; 2049 #endif 2050 if (current_cpu_data.isa_level & MIPS_CPU_ISA_IV) 2051 status_set |= ST0_XX; 2052 if (cpu_has_dsp) 2053 status_set |= ST0_MX; 2054 2055 change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX, 2056 status_set); 2057 } 2058 2059 /* configure HWRENA register */ 2060 static void configure_hwrena(void) 2061 { 2062 unsigned int hwrena = cpu_hwrena_impl_bits; 2063 2064 if (cpu_has_mips_r2_r6) 2065 hwrena |= 0x0000000f; 2066 2067 if (!noulri && cpu_has_userlocal) 2068 hwrena |= (1 << 29); 2069 2070 if (hwrena) 2071 write_c0_hwrena(hwrena); 2072 } 2073 2074 static void configure_exception_vector(void) 2075 { 2076 if (cpu_has_veic || cpu_has_vint) { 2077 unsigned long sr = set_c0_status(ST0_BEV); 2078 write_c0_ebase(ebase); 2079 write_c0_status(sr); 2080 /* Setting vector spacing enables EI/VI mode */ 2081 change_c0_intctl(0x3e0, VECTORSPACING); 2082 } 2083 if (cpu_has_divec) { 2084 if (cpu_has_mipsmt) { 2085 unsigned int vpflags = dvpe(); 2086 set_c0_cause(CAUSEF_IV); 2087 evpe(vpflags); 2088 } else 2089 set_c0_cause(CAUSEF_IV); 2090 } 2091 } 2092 2093 void per_cpu_trap_init(bool is_boot_cpu) 2094 { 2095 unsigned int cpu = smp_processor_id(); 2096 2097 configure_status(); 2098 configure_hwrena(); 2099 2100 configure_exception_vector(); 2101 2102 /* 2103 * Before R2 both interrupt numbers were fixed to 7, so on R2 only: 2104 * 2105 * o read IntCtl.IPTI to determine the timer interrupt 2106 * o read IntCtl.IPPCI to determine the performance counter interrupt 2107 * o read IntCtl.IPFDC to determine the fast debug channel interrupt 2108 */ 2109 if (cpu_has_mips_r2_r6) { 2110 cp0_compare_irq_shift = CAUSEB_TI - CAUSEB_IP; 2111 cp0_compare_irq = (read_c0_intctl() >> INTCTLB_IPTI) & 7; 2112 cp0_perfcount_irq = (read_c0_intctl() >> INTCTLB_IPPCI) & 7; 2113 cp0_fdc_irq = (read_c0_intctl() >> INTCTLB_IPFDC) & 7; 2114 if (!cp0_fdc_irq) 2115 cp0_fdc_irq = -1; 2116 2117 } else { 2118 cp0_compare_irq = CP0_LEGACY_COMPARE_IRQ; 2119 cp0_compare_irq_shift = CP0_LEGACY_PERFCNT_IRQ; 2120 cp0_perfcount_irq = -1; 2121 cp0_fdc_irq = -1; 2122 } 2123 2124 if (!cpu_data[cpu].asid_cache) 2125 cpu_data[cpu].asid_cache = ASID_FIRST_VERSION; 2126 2127 atomic_inc(&init_mm.mm_count); 2128 current->active_mm = &init_mm; 2129 BUG_ON(current->mm); 2130 enter_lazy_tlb(&init_mm, current); 2131 2132 /* Boot CPU's cache setup in setup_arch(). */ 2133 if (!is_boot_cpu) 2134 cpu_cache_init(); 2135 tlb_init(); 2136 TLBMISS_HANDLER_SETUP(); 2137 } 2138 2139 /* Install CPU exception handler */ 2140 void set_handler(unsigned long offset, void *addr, unsigned long size) 2141 { 2142 #ifdef CONFIG_CPU_MICROMIPS 2143 memcpy((void *)(ebase + offset), ((unsigned char *)addr - 1), size); 2144 #else 2145 memcpy((void *)(ebase + offset), addr, size); 2146 #endif 2147 local_flush_icache_range(ebase + offset, ebase + offset + size); 2148 } 2149 2150 static char panic_null_cerr[] = 2151 "Trying to set NULL cache error exception handler"; 2152 2153 /* 2154 * Install uncached CPU exception handler. 2155 * This is suitable only for the cache error exception which is the only 2156 * exception handler that is being run uncached. 2157 */ 2158 void set_uncached_handler(unsigned long offset, void *addr, 2159 unsigned long size) 2160 { 2161 unsigned long uncached_ebase = CKSEG1ADDR(ebase); 2162 2163 if (!addr) 2164 panic(panic_null_cerr); 2165 2166 memcpy((void *)(uncached_ebase + offset), addr, size); 2167 } 2168 2169 static int __initdata rdhwr_noopt; 2170 static int __init set_rdhwr_noopt(char *str) 2171 { 2172 rdhwr_noopt = 1; 2173 return 1; 2174 } 2175 2176 __setup("rdhwr_noopt", set_rdhwr_noopt); 2177 2178 void __init trap_init(void) 2179 { 2180 extern char except_vec3_generic; 2181 extern char except_vec4; 2182 extern char except_vec3_r4000; 2183 unsigned long i; 2184 2185 check_wait(); 2186 2187 #if defined(CONFIG_KGDB) 2188 if (kgdb_early_setup) 2189 return; /* Already done */ 2190 #endif 2191 2192 if (cpu_has_veic || cpu_has_vint) { 2193 unsigned long size = 0x200 + VECTORSPACING*64; 2194 ebase = (unsigned long) 2195 __alloc_bootmem(size, 1 << fls(size), 0); 2196 } else { 2197 #ifdef CONFIG_KVM_GUEST 2198 #define KVM_GUEST_KSEG0 0x40000000 2199 ebase = KVM_GUEST_KSEG0; 2200 #else 2201 ebase = CKSEG0; 2202 #endif 2203 if (cpu_has_mips_r2_r6) 2204 ebase += (read_c0_ebase() & 0x3ffff000); 2205 } 2206 2207 if (cpu_has_mmips) { 2208 unsigned int config3 = read_c0_config3(); 2209 2210 if (IS_ENABLED(CONFIG_CPU_MICROMIPS)) 2211 write_c0_config3(config3 | MIPS_CONF3_ISA_OE); 2212 else 2213 write_c0_config3(config3 & ~MIPS_CONF3_ISA_OE); 2214 } 2215 2216 if (board_ebase_setup) 2217 board_ebase_setup(); 2218 per_cpu_trap_init(true); 2219 2220 /* 2221 * Copy the generic exception handlers to their final destination. 2222 * This will be overriden later as suitable for a particular 2223 * configuration. 2224 */ 2225 set_handler(0x180, &except_vec3_generic, 0x80); 2226 2227 /* 2228 * Setup default vectors 2229 */ 2230 for (i = 0; i <= 31; i++) 2231 set_except_vector(i, handle_reserved); 2232 2233 /* 2234 * Copy the EJTAG debug exception vector handler code to it's final 2235 * destination. 2236 */ 2237 if (cpu_has_ejtag && board_ejtag_handler_setup) 2238 board_ejtag_handler_setup(); 2239 2240 /* 2241 * Only some CPUs have the watch exceptions. 2242 */ 2243 if (cpu_has_watch) 2244 set_except_vector(23, handle_watch); 2245 2246 /* 2247 * Initialise interrupt handlers 2248 */ 2249 if (cpu_has_veic || cpu_has_vint) { 2250 int nvec = cpu_has_veic ? 64 : 8; 2251 for (i = 0; i < nvec; i++) 2252 set_vi_handler(i, NULL); 2253 } 2254 else if (cpu_has_divec) 2255 set_handler(0x200, &except_vec4, 0x8); 2256 2257 /* 2258 * Some CPUs can enable/disable for cache parity detection, but does 2259 * it different ways. 2260 */ 2261 parity_protection_init(); 2262 2263 /* 2264 * The Data Bus Errors / Instruction Bus Errors are signaled 2265 * by external hardware. Therefore these two exceptions 2266 * may have board specific handlers. 2267 */ 2268 if (board_be_init) 2269 board_be_init(); 2270 2271 set_except_vector(0, using_rollback_handler() ? rollback_handle_int 2272 : handle_int); 2273 set_except_vector(1, handle_tlbm); 2274 set_except_vector(2, handle_tlbl); 2275 set_except_vector(3, handle_tlbs); 2276 2277 set_except_vector(4, handle_adel); 2278 set_except_vector(5, handle_ades); 2279 2280 set_except_vector(6, handle_ibe); 2281 set_except_vector(7, handle_dbe); 2282 2283 set_except_vector(8, handle_sys); 2284 set_except_vector(9, handle_bp); 2285 set_except_vector(10, rdhwr_noopt ? handle_ri : 2286 (cpu_has_vtag_icache ? 2287 handle_ri_rdhwr_vivt : handle_ri_rdhwr)); 2288 set_except_vector(11, handle_cpu); 2289 set_except_vector(12, handle_ov); 2290 set_except_vector(13, handle_tr); 2291 set_except_vector(14, handle_msa_fpe); 2292 2293 if (current_cpu_type() == CPU_R6000 || 2294 current_cpu_type() == CPU_R6000A) { 2295 /* 2296 * The R6000 is the only R-series CPU that features a machine 2297 * check exception (similar to the R4000 cache error) and 2298 * unaligned ldc1/sdc1 exception. The handlers have not been 2299 * written yet. Well, anyway there is no R6000 machine on the 2300 * current list of targets for Linux/MIPS. 2301 * (Duh, crap, there is someone with a triple R6k machine) 2302 */ 2303 //set_except_vector(14, handle_mc); 2304 //set_except_vector(15, handle_ndc); 2305 } 2306 2307 2308 if (board_nmi_handler_setup) 2309 board_nmi_handler_setup(); 2310 2311 if (cpu_has_fpu && !cpu_has_nofpuex) 2312 set_except_vector(15, handle_fpe); 2313 2314 set_except_vector(16, handle_ftlb); 2315 2316 if (cpu_has_rixiex) { 2317 set_except_vector(19, tlb_do_page_fault_0); 2318 set_except_vector(20, tlb_do_page_fault_0); 2319 } 2320 2321 set_except_vector(21, handle_msa); 2322 set_except_vector(22, handle_mdmx); 2323 2324 if (cpu_has_mcheck) 2325 set_except_vector(24, handle_mcheck); 2326 2327 if (cpu_has_mipsmt) 2328 set_except_vector(25, handle_mt); 2329 2330 set_except_vector(26, handle_dsp); 2331 2332 if (board_cache_error_setup) 2333 board_cache_error_setup(); 2334 2335 if (cpu_has_vce) 2336 /* Special exception: R4[04]00 uses also the divec space. */ 2337 set_handler(0x180, &except_vec3_r4000, 0x100); 2338 else if (cpu_has_4kex) 2339 set_handler(0x180, &except_vec3_generic, 0x80); 2340 else 2341 set_handler(0x080, &except_vec3_generic, 0x80); 2342 2343 local_flush_icache_range(ebase, ebase + 0x400); 2344 2345 sort_extable(__start___dbe_table, __stop___dbe_table); 2346 2347 cu2_notifier(default_cu2_call, 0x80000000); /* Run last */ 2348 } 2349 2350 static int trap_pm_notifier(struct notifier_block *self, unsigned long cmd, 2351 void *v) 2352 { 2353 switch (cmd) { 2354 case CPU_PM_ENTER_FAILED: 2355 case CPU_PM_EXIT: 2356 configure_status(); 2357 configure_hwrena(); 2358 configure_exception_vector(); 2359 2360 /* Restore register with CPU number for TLB handlers */ 2361 TLBMISS_HANDLER_RESTORE(); 2362 2363 break; 2364 } 2365 2366 return NOTIFY_OK; 2367 } 2368 2369 static struct notifier_block trap_pm_notifier_block = { 2370 .notifier_call = trap_pm_notifier, 2371 }; 2372 2373 static int __init trap_pm_init(void) 2374 { 2375 return cpu_pm_register_notifier(&trap_pm_notifier_block); 2376 } 2377 arch_initcall(trap_pm_init); 2378