1/* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 1994 - 2000, 2001, 2003 Ralf Baechle 7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc. 8 * Copyright (C) 2002, 2007 Maciej W. Rozycki 9 * Copyright (C) 2001, 2012 MIPS Technologies, Inc. All rights reserved. 10 */ 11#include <linux/init.h> 12 13#include <asm/asm.h> 14#include <asm/asmmacro.h> 15#include <asm/cacheops.h> 16#include <asm/irqflags.h> 17#include <asm/regdef.h> 18#include <asm/fpregdef.h> 19#include <asm/mipsregs.h> 20#include <asm/stackframe.h> 21#include <asm/sync.h> 22#include <asm/war.h> 23#include <asm/thread_info.h> 24 25 __INIT 26 27/* 28 * General exception vector for all other CPUs. 29 * 30 * Be careful when changing this, it has to be at most 128 bytes 31 * to fit into space reserved for the exception handler. 32 */ 33NESTED(except_vec3_generic, 0, sp) 34 .set push 35 .set noat 36 mfc0 k1, CP0_CAUSE 37 andi k1, k1, 0x7c 38#ifdef CONFIG_64BIT 39 dsll k1, k1, 1 40#endif 41 PTR_L k0, exception_handlers(k1) 42 jr k0 43 .set pop 44 END(except_vec3_generic) 45 46/* 47 * General exception handler for CPUs with virtual coherency exception. 48 * 49 * Be careful when changing this, it has to be at most 256 (as a special 50 * exception) bytes to fit into space reserved for the exception handler. 51 */ 52NESTED(except_vec3_r4000, 0, sp) 53 .set push 54 .set arch=r4000 55 .set noat 56 mfc0 k1, CP0_CAUSE 57 li k0, 31<<2 58 andi k1, k1, 0x7c 59 .set push 60 .set noreorder 61 .set nomacro 62 beq k1, k0, handle_vced 63 li k0, 14<<2 64 beq k1, k0, handle_vcei 65#ifdef CONFIG_64BIT 66 dsll k1, k1, 1 67#endif 68 .set pop 69 PTR_L k0, exception_handlers(k1) 70 jr k0 71 72 /* 73 * Big shit, we now may have two dirty primary cache lines for the same 74 * physical address. We can safely invalidate the line pointed to by 75 * c0_badvaddr because after return from this exception handler the 76 * load / store will be re-executed. 77 */ 78handle_vced: 79 MFC0 k0, CP0_BADVADDR 80 li k1, -4 # Is this ... 81 and k0, k1 # ... really needed? 82 mtc0 zero, CP0_TAGLO 83 cache Index_Store_Tag_D, (k0) 84 cache Hit_Writeback_Inv_SD, (k0) 85#ifdef CONFIG_PROC_FS 86 PTR_LA k0, vced_count 87 lw k1, (k0) 88 addiu k1, 1 89 sw k1, (k0) 90#endif 91 eret 92 93handle_vcei: 94 MFC0 k0, CP0_BADVADDR 95 cache Hit_Writeback_Inv_SD, (k0) # also cleans pi 96#ifdef CONFIG_PROC_FS 97 PTR_LA k0, vcei_count 98 lw k1, (k0) 99 addiu k1, 1 100 sw k1, (k0) 101#endif 102 eret 103 .set pop 104 END(except_vec3_r4000) 105 106 __FINIT 107 108 .align 5 /* 32 byte rollback region */ 109LEAF(__r4k_wait) 110 .set push 111 .set noreorder 112 /* start of rollback region */ 113 LONG_L t0, TI_FLAGS($28) 114 nop 115 andi t0, _TIF_NEED_RESCHED 116 bnez t0, 1f 117 nop 118 nop 119 nop 120#ifdef CONFIG_CPU_MICROMIPS 121 nop 122 nop 123 nop 124 nop 125#endif 126 .set MIPS_ISA_ARCH_LEVEL_RAW 127 wait 128 /* end of rollback region (the region size must be power of two) */ 1291: 130 jr ra 131 nop 132 .set pop 133 END(__r4k_wait) 134 135 .macro BUILD_ROLLBACK_PROLOGUE handler 136 FEXPORT(rollback_\handler) 137 .set push 138 .set noat 139 MFC0 k0, CP0_EPC 140 PTR_LA k1, __r4k_wait 141 ori k0, 0x1f /* 32 byte rollback region */ 142 xori k0, 0x1f 143 bne k0, k1, \handler 144 MTC0 k0, CP0_EPC 145 .set pop 146 .endm 147 148 .align 5 149BUILD_ROLLBACK_PROLOGUE handle_int 150NESTED(handle_int, PT_SIZE, sp) 151 .cfi_signal_frame 152#ifdef CONFIG_TRACE_IRQFLAGS 153 /* 154 * Check to see if the interrupted code has just disabled 155 * interrupts and ignore this interrupt for now if so. 156 * 157 * local_irq_disable() disables interrupts and then calls 158 * trace_hardirqs_off() to track the state. If an interrupt is taken 159 * after interrupts are disabled but before the state is updated 160 * it will appear to restore_all that it is incorrectly returning with 161 * interrupts disabled 162 */ 163 .set push 164 .set noat 165 mfc0 k0, CP0_STATUS 166#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) 167 and k0, ST0_IEP 168 bnez k0, 1f 169 170 mfc0 k0, CP0_EPC 171 .set noreorder 172 j k0 173 rfe 174#else 175 and k0, ST0_IE 176 bnez k0, 1f 177 178 eret 179#endif 1801: 181 .set pop 182#endif 183 SAVE_ALL docfi=1 184 CLI 185 TRACE_IRQS_OFF 186 187 LONG_L s0, TI_REGS($28) 188 LONG_S sp, TI_REGS($28) 189 190 /* 191 * SAVE_ALL ensures we are using a valid kernel stack for the thread. 192 * Check if we are already using the IRQ stack. 193 */ 194 move s1, sp # Preserve the sp 195 196 /* Get IRQ stack for this CPU */ 197 ASM_CPUID_MFC0 k0, ASM_SMP_CPUID_REG 198#if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32) 199 lui k1, %hi(irq_stack) 200#else 201 lui k1, %highest(irq_stack) 202 daddiu k1, %higher(irq_stack) 203 dsll k1, 16 204 daddiu k1, %hi(irq_stack) 205 dsll k1, 16 206#endif 207 LONG_SRL k0, SMP_CPUID_PTRSHIFT 208 LONG_ADDU k1, k0 209 LONG_L t0, %lo(irq_stack)(k1) 210 211 # Check if already on IRQ stack 212 PTR_LI t1, ~(_THREAD_SIZE-1) 213 and t1, t1, sp 214 beq t0, t1, 2f 215 216 /* Switch to IRQ stack */ 217 li t1, _IRQ_STACK_START 218 PTR_ADD sp, t0, t1 219 220 /* Save task's sp on IRQ stack so that unwinding can follow it */ 221 LONG_S s1, 0(sp) 2222: 223 jal plat_irq_dispatch 224 225 /* Restore sp */ 226 move sp, s1 227 228 j ret_from_irq 229#ifdef CONFIG_CPU_MICROMIPS 230 nop 231#endif 232 END(handle_int) 233 234 __INIT 235 236/* 237 * Special interrupt vector for MIPS64 ISA & embedded MIPS processors. 238 * This is a dedicated interrupt exception vector which reduces the 239 * interrupt processing overhead. The jump instruction will be replaced 240 * at the initialization time. 241 * 242 * Be careful when changing this, it has to be at most 128 bytes 243 * to fit into space reserved for the exception handler. 244 */ 245NESTED(except_vec4, 0, sp) 2461: j 1b /* Dummy, will be replaced */ 247 END(except_vec4) 248 249/* 250 * EJTAG debug exception handler. 251 * The EJTAG debug exception entry point is 0xbfc00480, which 252 * normally is in the boot PROM, so the boot PROM must do an 253 * unconditional jump to this vector. 254 */ 255NESTED(except_vec_ejtag_debug, 0, sp) 256 j ejtag_debug_handler 257#ifdef CONFIG_CPU_MICROMIPS 258 nop 259#endif 260 END(except_vec_ejtag_debug) 261 262 __FINIT 263 264/* 265 * Vectored interrupt handler. 266 * This prototype is copied to ebase + n*IntCtl.VS and patched 267 * to invoke the handler 268 */ 269BUILD_ROLLBACK_PROLOGUE except_vec_vi 270NESTED(except_vec_vi, 0, sp) 271 SAVE_SOME docfi=1 272 SAVE_AT docfi=1 273 .set push 274 .set noreorder 275 PTR_LA v1, except_vec_vi_handler 276FEXPORT(except_vec_vi_lui) 277 lui v0, 0 /* Patched */ 278 jr v1 279FEXPORT(except_vec_vi_ori) 280 ori v0, 0 /* Patched */ 281 .set pop 282 END(except_vec_vi) 283EXPORT(except_vec_vi_end) 284 285/* 286 * Common Vectored Interrupt code 287 * Complete the register saves and invoke the handler which is passed in $v0 288 */ 289NESTED(except_vec_vi_handler, 0, sp) 290 SAVE_TEMP 291 SAVE_STATIC 292 CLI 293#ifdef CONFIG_TRACE_IRQFLAGS 294 move s0, v0 295 TRACE_IRQS_OFF 296 move v0, s0 297#endif 298 299 LONG_L s0, TI_REGS($28) 300 LONG_S sp, TI_REGS($28) 301 302 /* 303 * SAVE_ALL ensures we are using a valid kernel stack for the thread. 304 * Check if we are already using the IRQ stack. 305 */ 306 move s1, sp # Preserve the sp 307 308 /* Get IRQ stack for this CPU */ 309 ASM_CPUID_MFC0 k0, ASM_SMP_CPUID_REG 310#if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32) 311 lui k1, %hi(irq_stack) 312#else 313 lui k1, %highest(irq_stack) 314 daddiu k1, %higher(irq_stack) 315 dsll k1, 16 316 daddiu k1, %hi(irq_stack) 317 dsll k1, 16 318#endif 319 LONG_SRL k0, SMP_CPUID_PTRSHIFT 320 LONG_ADDU k1, k0 321 LONG_L t0, %lo(irq_stack)(k1) 322 323 # Check if already on IRQ stack 324 PTR_LI t1, ~(_THREAD_SIZE-1) 325 and t1, t1, sp 326 beq t0, t1, 2f 327 328 /* Switch to IRQ stack */ 329 li t1, _IRQ_STACK_START 330 PTR_ADD sp, t0, t1 331 332 /* Save task's sp on IRQ stack so that unwinding can follow it */ 333 LONG_S s1, 0(sp) 3342: 335 jalr v0 336 337 /* Restore sp */ 338 move sp, s1 339 340 j ret_from_irq 341 END(except_vec_vi_handler) 342 343/* 344 * EJTAG debug exception handler. 345 */ 346NESTED(ejtag_debug_handler, PT_SIZE, sp) 347 .set push 348 .set noat 349 MTC0 k0, CP0_DESAVE 350 mfc0 k0, CP0_DEBUG 351 352 sll k0, k0, 30 # Check for SDBBP. 353 bgez k0, ejtag_return 354 355#ifdef CONFIG_SMP 3561: PTR_LA k0, ejtag_debug_buffer_spinlock 357 __SYNC(full, loongson3_war) 3582: ll k0, 0(k0) 359 bnez k0, 2b 360 PTR_LA k0, ejtag_debug_buffer_spinlock 361 sc k0, 0(k0) 362 beqz k0, 1b 363# ifdef CONFIG_WEAK_REORDERING_BEYOND_LLSC 364 sync 365# endif 366 367 PTR_LA k0, ejtag_debug_buffer 368 LONG_S k1, 0(k0) 369 370 ASM_CPUID_MFC0 k1, ASM_SMP_CPUID_REG 371 PTR_SRL k1, SMP_CPUID_PTRSHIFT 372 PTR_SLL k1, LONGLOG 373 PTR_LA k0, ejtag_debug_buffer_per_cpu 374 PTR_ADDU k0, k1 375 376 PTR_LA k1, ejtag_debug_buffer 377 LONG_L k1, 0(k1) 378 LONG_S k1, 0(k0) 379 380 PTR_LA k0, ejtag_debug_buffer_spinlock 381 sw zero, 0(k0) 382#else 383 PTR_LA k0, ejtag_debug_buffer 384 LONG_S k1, 0(k0) 385#endif 386 387 SAVE_ALL 388 move a0, sp 389 jal ejtag_exception_handler 390 RESTORE_ALL 391 392#ifdef CONFIG_SMP 393 ASM_CPUID_MFC0 k1, ASM_SMP_CPUID_REG 394 PTR_SRL k1, SMP_CPUID_PTRSHIFT 395 PTR_SLL k1, LONGLOG 396 PTR_LA k0, ejtag_debug_buffer_per_cpu 397 PTR_ADDU k0, k1 398 LONG_L k1, 0(k0) 399#else 400 PTR_LA k0, ejtag_debug_buffer 401 LONG_L k1, 0(k0) 402#endif 403 404ejtag_return: 405 back_to_back_c0_hazard 406 MFC0 k0, CP0_DESAVE 407 .set mips32 408 deret 409 .set pop 410 END(ejtag_debug_handler) 411 412/* 413 * This buffer is reserved for the use of the EJTAG debug 414 * handler. 415 */ 416 .data 417EXPORT(ejtag_debug_buffer) 418 .fill LONGSIZE 419#ifdef CONFIG_SMP 420EXPORT(ejtag_debug_buffer_spinlock) 421 .fill LONGSIZE 422EXPORT(ejtag_debug_buffer_per_cpu) 423 .fill LONGSIZE * NR_CPUS 424#endif 425 .previous 426 427 __INIT 428 429/* 430 * NMI debug exception handler for MIPS reference boards. 431 * The NMI debug exception entry point is 0xbfc00000, which 432 * normally is in the boot PROM, so the boot PROM must do a 433 * unconditional jump to this vector. 434 */ 435NESTED(except_vec_nmi, 0, sp) 436 j nmi_handler 437#ifdef CONFIG_CPU_MICROMIPS 438 nop 439#endif 440 END(except_vec_nmi) 441 442 __FINIT 443 444NESTED(nmi_handler, PT_SIZE, sp) 445 .cfi_signal_frame 446 .set push 447 .set noat 448 /* 449 * Clear ERL - restore segment mapping 450 * Clear BEV - required for page fault exception handler to work 451 */ 452 mfc0 k0, CP0_STATUS 453 ori k0, k0, ST0_EXL 454 li k1, ~(ST0_BEV | ST0_ERL) 455 and k0, k0, k1 456 mtc0 k0, CP0_STATUS 457 _ehb 458 SAVE_ALL 459 move a0, sp 460 jal nmi_exception_handler 461 /* nmi_exception_handler never returns */ 462 .set pop 463 END(nmi_handler) 464 465 .macro __build_clear_none 466 .endm 467 468 .macro __build_clear_sti 469 TRACE_IRQS_ON 470 STI 471 .endm 472 473 .macro __build_clear_cli 474 CLI 475 TRACE_IRQS_OFF 476 .endm 477 478 .macro __build_clear_fpe 479 CLI 480 TRACE_IRQS_OFF 481 .set push 482 /* gas fails to assemble cfc1 for some archs (octeon).*/ \ 483 .set mips1 484 SET_HARDFLOAT 485 cfc1 a1, fcr31 486 .set pop 487 .endm 488 489 .macro __build_clear_msa_fpe 490 CLI 491 TRACE_IRQS_OFF 492 _cfcmsa a1, MSA_CSR 493 .endm 494 495 .macro __build_clear_ade 496 MFC0 t0, CP0_BADVADDR 497 PTR_S t0, PT_BVADDR(sp) 498 KMODE 499 .endm 500 501 .macro __build_clear_gsexc 502 .set push 503 /* 504 * We need to specify a selector to access the CP0.Diag1 (GSCause) 505 * register. All GSExc-equipped processors have MIPS32. 506 */ 507 .set mips32 508 mfc0 a1, CP0_DIAGNOSTIC1 509 .set pop 510 TRACE_IRQS_ON 511 STI 512 .endm 513 514 .macro __BUILD_silent exception 515 .endm 516 517 /* Gas tries to parse the ASM_PRINT argument as a string containing 518 string escapes and emits bogus warnings if it believes to 519 recognize an unknown escape code. So make the arguments 520 start with an n and gas will believe \n is ok ... */ 521 .macro __BUILD_verbose nexception 522 LONG_L a1, PT_EPC(sp) 523#ifdef CONFIG_32BIT 524 ASM_PRINT("Got \nexception at %08lx\012") 525#endif 526#ifdef CONFIG_64BIT 527 ASM_PRINT("Got \nexception at %016lx\012") 528#endif 529 .endm 530 531 .macro __BUILD_count exception 532 LONG_L t0,exception_count_\exception 533 LONG_ADDIU t0, 1 534 LONG_S t0,exception_count_\exception 535 .comm exception_count\exception, 8, 8 536 .endm 537 538 .macro __BUILD_HANDLER exception handler clear verbose ext 539 .align 5 540 NESTED(handle_\exception, PT_SIZE, sp) 541 .cfi_signal_frame 542 .set noat 543 SAVE_ALL 544 FEXPORT(handle_\exception\ext) 545 __build_clear_\clear 546 .set at 547 __BUILD_\verbose \exception 548 move a0, sp 549 jal do_\handler 550 j ret_from_exception 551 END(handle_\exception) 552 .endm 553 554 .macro BUILD_HANDLER exception handler clear verbose 555 __BUILD_HANDLER \exception \handler \clear \verbose _int 556 .endm 557 558 BUILD_HANDLER adel ade ade silent /* #4 */ 559 BUILD_HANDLER ades ade ade silent /* #5 */ 560 BUILD_HANDLER ibe be cli silent /* #6 */ 561 BUILD_HANDLER dbe be cli silent /* #7 */ 562 BUILD_HANDLER bp bp sti silent /* #9 */ 563 BUILD_HANDLER ri ri sti silent /* #10 */ 564 BUILD_HANDLER cpu cpu sti silent /* #11 */ 565 BUILD_HANDLER ov ov sti silent /* #12 */ 566 BUILD_HANDLER tr tr sti silent /* #13 */ 567 BUILD_HANDLER msa_fpe msa_fpe msa_fpe silent /* #14 */ 568#ifdef CONFIG_MIPS_FP_SUPPORT 569 BUILD_HANDLER fpe fpe fpe silent /* #15 */ 570#endif 571 BUILD_HANDLER ftlb ftlb none silent /* #16 */ 572 BUILD_HANDLER gsexc gsexc gsexc silent /* #16 */ 573 BUILD_HANDLER msa msa sti silent /* #21 */ 574 BUILD_HANDLER mdmx mdmx sti silent /* #22 */ 575#ifdef CONFIG_HARDWARE_WATCHPOINTS 576 /* 577 * For watch, interrupts will be enabled after the watch 578 * registers are read. 579 */ 580 BUILD_HANDLER watch watch cli silent /* #23 */ 581#else 582 BUILD_HANDLER watch watch sti verbose /* #23 */ 583#endif 584 BUILD_HANDLER mcheck mcheck cli verbose /* #24 */ 585 BUILD_HANDLER mt mt sti silent /* #25 */ 586 BUILD_HANDLER dsp dsp sti silent /* #26 */ 587 BUILD_HANDLER reserved reserved sti verbose /* others */ 588 589 .align 5 590 LEAF(handle_ri_rdhwr_tlbp) 591 .set push 592 .set noat 593 .set noreorder 594 /* check if TLB contains a entry for EPC */ 595 MFC0 k1, CP0_ENTRYHI 596 andi k1, MIPS_ENTRYHI_ASID | MIPS_ENTRYHI_ASIDX 597 MFC0 k0, CP0_EPC 598 PTR_SRL k0, _PAGE_SHIFT + 1 599 PTR_SLL k0, _PAGE_SHIFT + 1 600 or k1, k0 601 MTC0 k1, CP0_ENTRYHI 602 mtc0_tlbw_hazard 603 tlbp 604 tlb_probe_hazard 605 mfc0 k1, CP0_INDEX 606 .set pop 607 bltz k1, handle_ri /* slow path */ 608 /* fall thru */ 609 END(handle_ri_rdhwr_tlbp) 610 611 LEAF(handle_ri_rdhwr) 612 .set push 613 .set noat 614 .set noreorder 615 /* MIPS32: 0x7c03e83b: rdhwr v1,$29 */ 616 /* microMIPS: 0x007d6b3c: rdhwr v1,$29 */ 617 MFC0 k1, CP0_EPC 618#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS64_R2) 619 and k0, k1, 1 620 beqz k0, 1f 621 xor k1, k0 622 lhu k0, (k1) 623 lhu k1, 2(k1) 624 ins k1, k0, 16, 16 625 lui k0, 0x007d 626 b docheck 627 ori k0, 0x6b3c 6281: 629 lui k0, 0x7c03 630 lw k1, (k1) 631 ori k0, 0xe83b 632#else 633 andi k0, k1, 1 634 bnez k0, handle_ri 635 lui k0, 0x7c03 636 lw k1, (k1) 637 ori k0, 0xe83b 638#endif 639 .set reorder 640docheck: 641 bne k0, k1, handle_ri /* if not ours */ 642 643isrdhwr: 644 /* The insn is rdhwr. No need to check CAUSE.BD here. */ 645 get_saved_sp /* k1 := current_thread_info */ 646 .set noreorder 647 MFC0 k0, CP0_EPC 648#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) 649 ori k1, _THREAD_MASK 650 xori k1, _THREAD_MASK 651 LONG_L v1, TI_TP_VALUE(k1) 652 LONG_ADDIU k0, 4 653 jr k0 654 rfe 655#else 656#ifndef CONFIG_CPU_DADDI_WORKAROUNDS 657 LONG_ADDIU k0, 4 /* stall on $k0 */ 658#else 659 .set at=v1 660 LONG_ADDIU k0, 4 661 .set noat 662#endif 663 MTC0 k0, CP0_EPC 664 /* I hope three instructions between MTC0 and ERET are enough... */ 665 ori k1, _THREAD_MASK 666 xori k1, _THREAD_MASK 667 LONG_L v1, TI_TP_VALUE(k1) 668 .set push 669 .set arch=r4000 670 eret 671 .set pop 672#endif 673 .set pop 674 END(handle_ri_rdhwr) 675 676#ifdef CONFIG_CPU_R4X00_BUGS64 677/* A temporary overflow handler used by check_daddi(). */ 678 679 __INIT 680 681 BUILD_HANDLER daddi_ov daddi_ov none silent /* #12 */ 682#endif 683