1/* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 1994 - 2000, 2001, 2003 Ralf Baechle 7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc. 8 * Copyright (C) 2002, 2007 Maciej W. Rozycki 9 * Copyright (C) 2001, 2012 MIPS Technologies, Inc. All rights reserved. 10 */ 11#include <linux/init.h> 12 13#include <asm/asm.h> 14#include <asm/asmmacro.h> 15#include <asm/cacheops.h> 16#include <asm/irqflags.h> 17#include <asm/regdef.h> 18#include <asm/fpregdef.h> 19#include <asm/mipsregs.h> 20#include <asm/stackframe.h> 21#include <asm/war.h> 22#include <asm/thread_info.h> 23 24 __INIT 25 26/* 27 * General exception vector for all other CPUs. 28 * 29 * Be careful when changing this, it has to be at most 128 bytes 30 * to fit into space reserved for the exception handler. 31 */ 32NESTED(except_vec3_generic, 0, sp) 33 .set push 34 .set noat 35#if R5432_CP0_INTERRUPT_WAR 36 mfc0 k0, CP0_INDEX 37#endif 38 mfc0 k1, CP0_CAUSE 39 andi k1, k1, 0x7c 40#ifdef CONFIG_64BIT 41 dsll k1, k1, 1 42#endif 43 PTR_L k0, exception_handlers(k1) 44 jr k0 45 .set pop 46 END(except_vec3_generic) 47 48/* 49 * General exception handler for CPUs with virtual coherency exception. 50 * 51 * Be careful when changing this, it has to be at most 256 (as a special 52 * exception) bytes to fit into space reserved for the exception handler. 53 */ 54NESTED(except_vec3_r4000, 0, sp) 55 .set push 56 .set arch=r4000 57 .set noat 58 mfc0 k1, CP0_CAUSE 59 li k0, 31<<2 60 andi k1, k1, 0x7c 61 .set push 62 .set noreorder 63 .set nomacro 64 beq k1, k0, handle_vced 65 li k0, 14<<2 66 beq k1, k0, handle_vcei 67#ifdef CONFIG_64BIT 68 dsll k1, k1, 1 69#endif 70 .set pop 71 PTR_L k0, exception_handlers(k1) 72 jr k0 73 74 /* 75 * Big shit, we now may have two dirty primary cache lines for the same 76 * physical address. We can safely invalidate the line pointed to by 77 * c0_badvaddr because after return from this exception handler the 78 * load / store will be re-executed. 79 */ 80handle_vced: 81 MFC0 k0, CP0_BADVADDR 82 li k1, -4 # Is this ... 83 and k0, k1 # ... really needed? 84 mtc0 zero, CP0_TAGLO 85 cache Index_Store_Tag_D, (k0) 86 cache Hit_Writeback_Inv_SD, (k0) 87#ifdef CONFIG_PROC_FS 88 PTR_LA k0, vced_count 89 lw k1, (k0) 90 addiu k1, 1 91 sw k1, (k0) 92#endif 93 eret 94 95handle_vcei: 96 MFC0 k0, CP0_BADVADDR 97 cache Hit_Writeback_Inv_SD, (k0) # also cleans pi 98#ifdef CONFIG_PROC_FS 99 PTR_LA k0, vcei_count 100 lw k1, (k0) 101 addiu k1, 1 102 sw k1, (k0) 103#endif 104 eret 105 .set pop 106 END(except_vec3_r4000) 107 108 __FINIT 109 110 .align 5 /* 32 byte rollback region */ 111LEAF(__r4k_wait) 112 .set push 113 .set noreorder 114 /* start of rollback region */ 115 LONG_L t0, TI_FLAGS($28) 116 nop 117 andi t0, _TIF_NEED_RESCHED 118 bnez t0, 1f 119 nop 120 nop 121 nop 122#ifdef CONFIG_CPU_MICROMIPS 123 nop 124 nop 125 nop 126 nop 127#endif 128 .set MIPS_ISA_ARCH_LEVEL_RAW 129 wait 130 /* end of rollback region (the region size must be power of two) */ 1311: 132 jr ra 133 nop 134 .set pop 135 END(__r4k_wait) 136 137 .macro BUILD_ROLLBACK_PROLOGUE handler 138 FEXPORT(rollback_\handler) 139 .set push 140 .set noat 141 MFC0 k0, CP0_EPC 142 PTR_LA k1, __r4k_wait 143 ori k0, 0x1f /* 32 byte rollback region */ 144 xori k0, 0x1f 145 bne k0, k1, \handler 146 MTC0 k0, CP0_EPC 147 .set pop 148 .endm 149 150 .align 5 151BUILD_ROLLBACK_PROLOGUE handle_int 152NESTED(handle_int, PT_SIZE, sp) 153 .cfi_signal_frame 154#ifdef CONFIG_TRACE_IRQFLAGS 155 /* 156 * Check to see if the interrupted code has just disabled 157 * interrupts and ignore this interrupt for now if so. 158 * 159 * local_irq_disable() disables interrupts and then calls 160 * trace_hardirqs_off() to track the state. If an interrupt is taken 161 * after interrupts are disabled but before the state is updated 162 * it will appear to restore_all that it is incorrectly returning with 163 * interrupts disabled 164 */ 165 .set push 166 .set noat 167 mfc0 k0, CP0_STATUS 168#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) 169 and k0, ST0_IEP 170 bnez k0, 1f 171 172 mfc0 k0, CP0_EPC 173 .set noreorder 174 j k0 175 rfe 176#else 177 and k0, ST0_IE 178 bnez k0, 1f 179 180 eret 181#endif 1821: 183 .set pop 184#endif 185 SAVE_ALL docfi=1 186 CLI 187 TRACE_IRQS_OFF 188 189 LONG_L s0, TI_REGS($28) 190 LONG_S sp, TI_REGS($28) 191 192 /* 193 * SAVE_ALL ensures we are using a valid kernel stack for the thread. 194 * Check if we are already using the IRQ stack. 195 */ 196 move s1, sp # Preserve the sp 197 198 /* Get IRQ stack for this CPU */ 199 ASM_CPUID_MFC0 k0, ASM_SMP_CPUID_REG 200#if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32) 201 lui k1, %hi(irq_stack) 202#else 203 lui k1, %highest(irq_stack) 204 daddiu k1, %higher(irq_stack) 205 dsll k1, 16 206 daddiu k1, %hi(irq_stack) 207 dsll k1, 16 208#endif 209 LONG_SRL k0, SMP_CPUID_PTRSHIFT 210 LONG_ADDU k1, k0 211 LONG_L t0, %lo(irq_stack)(k1) 212 213 # Check if already on IRQ stack 214 PTR_LI t1, ~(_THREAD_SIZE-1) 215 and t1, t1, sp 216 beq t0, t1, 2f 217 218 /* Switch to IRQ stack */ 219 li t1, _IRQ_STACK_START 220 PTR_ADD sp, t0, t1 221 222 /* Save task's sp on IRQ stack so that unwinding can follow it */ 223 LONG_S s1, 0(sp) 2242: 225 jal plat_irq_dispatch 226 227 /* Restore sp */ 228 move sp, s1 229 230 j ret_from_irq 231#ifdef CONFIG_CPU_MICROMIPS 232 nop 233#endif 234 END(handle_int) 235 236 __INIT 237 238/* 239 * Special interrupt vector for MIPS64 ISA & embedded MIPS processors. 240 * This is a dedicated interrupt exception vector which reduces the 241 * interrupt processing overhead. The jump instruction will be replaced 242 * at the initialization time. 243 * 244 * Be careful when changing this, it has to be at most 128 bytes 245 * to fit into space reserved for the exception handler. 246 */ 247NESTED(except_vec4, 0, sp) 2481: j 1b /* Dummy, will be replaced */ 249 END(except_vec4) 250 251/* 252 * EJTAG debug exception handler. 253 * The EJTAG debug exception entry point is 0xbfc00480, which 254 * normally is in the boot PROM, so the boot PROM must do an 255 * unconditional jump to this vector. 256 */ 257NESTED(except_vec_ejtag_debug, 0, sp) 258 j ejtag_debug_handler 259#ifdef CONFIG_CPU_MICROMIPS 260 nop 261#endif 262 END(except_vec_ejtag_debug) 263 264 __FINIT 265 266/* 267 * Vectored interrupt handler. 268 * This prototype is copied to ebase + n*IntCtl.VS and patched 269 * to invoke the handler 270 */ 271BUILD_ROLLBACK_PROLOGUE except_vec_vi 272NESTED(except_vec_vi, 0, sp) 273 SAVE_SOME docfi=1 274 SAVE_AT docfi=1 275 .set push 276 .set noreorder 277 PTR_LA v1, except_vec_vi_handler 278FEXPORT(except_vec_vi_lui) 279 lui v0, 0 /* Patched */ 280 jr v1 281FEXPORT(except_vec_vi_ori) 282 ori v0, 0 /* Patched */ 283 .set pop 284 END(except_vec_vi) 285EXPORT(except_vec_vi_end) 286 287/* 288 * Common Vectored Interrupt code 289 * Complete the register saves and invoke the handler which is passed in $v0 290 */ 291NESTED(except_vec_vi_handler, 0, sp) 292 SAVE_TEMP 293 SAVE_STATIC 294 CLI 295#ifdef CONFIG_TRACE_IRQFLAGS 296 move s0, v0 297 TRACE_IRQS_OFF 298 move v0, s0 299#endif 300 301 LONG_L s0, TI_REGS($28) 302 LONG_S sp, TI_REGS($28) 303 304 /* 305 * SAVE_ALL ensures we are using a valid kernel stack for the thread. 306 * Check if we are already using the IRQ stack. 307 */ 308 move s1, sp # Preserve the sp 309 310 /* Get IRQ stack for this CPU */ 311 ASM_CPUID_MFC0 k0, ASM_SMP_CPUID_REG 312#if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32) 313 lui k1, %hi(irq_stack) 314#else 315 lui k1, %highest(irq_stack) 316 daddiu k1, %higher(irq_stack) 317 dsll k1, 16 318 daddiu k1, %hi(irq_stack) 319 dsll k1, 16 320#endif 321 LONG_SRL k0, SMP_CPUID_PTRSHIFT 322 LONG_ADDU k1, k0 323 LONG_L t0, %lo(irq_stack)(k1) 324 325 # Check if already on IRQ stack 326 PTR_LI t1, ~(_THREAD_SIZE-1) 327 and t1, t1, sp 328 beq t0, t1, 2f 329 330 /* Switch to IRQ stack */ 331 li t1, _IRQ_STACK_START 332 PTR_ADD sp, t0, t1 333 334 /* Save task's sp on IRQ stack so that unwinding can follow it */ 335 LONG_S s1, 0(sp) 3362: 337 jalr v0 338 339 /* Restore sp */ 340 move sp, s1 341 342 j ret_from_irq 343 END(except_vec_vi_handler) 344 345/* 346 * EJTAG debug exception handler. 347 */ 348NESTED(ejtag_debug_handler, PT_SIZE, sp) 349 .set push 350 .set noat 351 MTC0 k0, CP0_DESAVE 352 mfc0 k0, CP0_DEBUG 353 354 sll k0, k0, 30 # Check for SDBBP. 355 bgez k0, ejtag_return 356 357#ifdef CONFIG_SMP 3581: PTR_LA k0, ejtag_debug_buffer_spinlock 359 ll k0, 0(k0) 360 bnez k0, 1b 361 PTR_LA k0, ejtag_debug_buffer_spinlock 362 sc k0, 0(k0) 363 beqz k0, 1b 364# ifdef CONFIG_WEAK_REORDERING_BEYOND_LLSC 365 sync 366# endif 367 368 PTR_LA k0, ejtag_debug_buffer 369 LONG_S k1, 0(k0) 370 371 ASM_CPUID_MFC0 k1, ASM_SMP_CPUID_REG 372 PTR_SRL k1, SMP_CPUID_PTRSHIFT 373 PTR_SLL k1, LONGLOG 374 PTR_LA k0, ejtag_debug_buffer_per_cpu 375 PTR_ADDU k0, k1 376 377 PTR_LA k1, ejtag_debug_buffer 378 LONG_L k1, 0(k1) 379 LONG_S k1, 0(k0) 380 381 PTR_LA k0, ejtag_debug_buffer_spinlock 382 sw zero, 0(k0) 383#else 384 PTR_LA k0, ejtag_debug_buffer 385 LONG_S k1, 0(k0) 386#endif 387 388 SAVE_ALL 389 move a0, sp 390 jal ejtag_exception_handler 391 RESTORE_ALL 392 393#ifdef CONFIG_SMP 394 ASM_CPUID_MFC0 k1, ASM_SMP_CPUID_REG 395 PTR_SRL k1, SMP_CPUID_PTRSHIFT 396 PTR_SLL k1, LONGLOG 397 PTR_LA k0, ejtag_debug_buffer_per_cpu 398 PTR_ADDU k0, k1 399 LONG_L k1, 0(k0) 400#else 401 PTR_LA k0, ejtag_debug_buffer 402 LONG_L k1, 0(k0) 403#endif 404 405ejtag_return: 406 back_to_back_c0_hazard 407 MFC0 k0, CP0_DESAVE 408 .set mips32 409 deret 410 .set pop 411 END(ejtag_debug_handler) 412 413/* 414 * This buffer is reserved for the use of the EJTAG debug 415 * handler. 416 */ 417 .data 418EXPORT(ejtag_debug_buffer) 419 .fill LONGSIZE 420#ifdef CONFIG_SMP 421EXPORT(ejtag_debug_buffer_spinlock) 422 .fill LONGSIZE 423EXPORT(ejtag_debug_buffer_per_cpu) 424 .fill LONGSIZE * NR_CPUS 425#endif 426 .previous 427 428 __INIT 429 430/* 431 * NMI debug exception handler for MIPS reference boards. 432 * The NMI debug exception entry point is 0xbfc00000, which 433 * normally is in the boot PROM, so the boot PROM must do a 434 * unconditional jump to this vector. 435 */ 436NESTED(except_vec_nmi, 0, sp) 437 j nmi_handler 438#ifdef CONFIG_CPU_MICROMIPS 439 nop 440#endif 441 END(except_vec_nmi) 442 443 __FINIT 444 445NESTED(nmi_handler, PT_SIZE, sp) 446 .cfi_signal_frame 447 .set push 448 .set noat 449 /* 450 * Clear ERL - restore segment mapping 451 * Clear BEV - required for page fault exception handler to work 452 */ 453 mfc0 k0, CP0_STATUS 454 ori k0, k0, ST0_EXL 455 li k1, ~(ST0_BEV | ST0_ERL) 456 and k0, k0, k1 457 mtc0 k0, CP0_STATUS 458 _ehb 459 SAVE_ALL 460 move a0, sp 461 jal nmi_exception_handler 462 /* nmi_exception_handler never returns */ 463 .set pop 464 END(nmi_handler) 465 466 .macro __build_clear_none 467 .endm 468 469 .macro __build_clear_sti 470 TRACE_IRQS_ON 471 STI 472 .endm 473 474 .macro __build_clear_cli 475 CLI 476 TRACE_IRQS_OFF 477 .endm 478 479 .macro __build_clear_fpe 480 .set push 481 /* gas fails to assemble cfc1 for some archs (octeon).*/ \ 482 .set mips1 483 SET_HARDFLOAT 484 cfc1 a1, fcr31 485 .set pop 486 CLI 487 TRACE_IRQS_OFF 488 .endm 489 490 .macro __build_clear_msa_fpe 491 _cfcmsa a1, MSA_CSR 492 CLI 493 TRACE_IRQS_OFF 494 .endm 495 496 .macro __build_clear_ade 497 MFC0 t0, CP0_BADVADDR 498 PTR_S t0, PT_BVADDR(sp) 499 KMODE 500 .endm 501 502 .macro __BUILD_silent exception 503 .endm 504 505 /* Gas tries to parse the PRINT argument as a string containing 506 string escapes and emits bogus warnings if it believes to 507 recognize an unknown escape code. So make the arguments 508 start with an n and gas will believe \n is ok ... */ 509 .macro __BUILD_verbose nexception 510 LONG_L a1, PT_EPC(sp) 511#ifdef CONFIG_32BIT 512 PRINT("Got \nexception at %08lx\012") 513#endif 514#ifdef CONFIG_64BIT 515 PRINT("Got \nexception at %016lx\012") 516#endif 517 .endm 518 519 .macro __BUILD_count exception 520 LONG_L t0,exception_count_\exception 521 LONG_ADDIU t0, 1 522 LONG_S t0,exception_count_\exception 523 .comm exception_count\exception, 8, 8 524 .endm 525 526 .macro __BUILD_HANDLER exception handler clear verbose ext 527 .align 5 528 NESTED(handle_\exception, PT_SIZE, sp) 529 .cfi_signal_frame 530 .set noat 531 SAVE_ALL 532 FEXPORT(handle_\exception\ext) 533 __build_clear_\clear 534 .set at 535 __BUILD_\verbose \exception 536 move a0, sp 537 jal do_\handler 538 j ret_from_exception 539 END(handle_\exception) 540 .endm 541 542 .macro BUILD_HANDLER exception handler clear verbose 543 __BUILD_HANDLER \exception \handler \clear \verbose _int 544 .endm 545 546 BUILD_HANDLER adel ade ade silent /* #4 */ 547 BUILD_HANDLER ades ade ade silent /* #5 */ 548 BUILD_HANDLER ibe be cli silent /* #6 */ 549 BUILD_HANDLER dbe be cli silent /* #7 */ 550 BUILD_HANDLER bp bp sti silent /* #9 */ 551 BUILD_HANDLER ri ri sti silent /* #10 */ 552 BUILD_HANDLER cpu cpu sti silent /* #11 */ 553 BUILD_HANDLER ov ov sti silent /* #12 */ 554 BUILD_HANDLER tr tr sti silent /* #13 */ 555 BUILD_HANDLER msa_fpe msa_fpe msa_fpe silent /* #14 */ 556 BUILD_HANDLER fpe fpe fpe silent /* #15 */ 557 BUILD_HANDLER ftlb ftlb none silent /* #16 */ 558 BUILD_HANDLER msa msa sti silent /* #21 */ 559 BUILD_HANDLER mdmx mdmx sti silent /* #22 */ 560#ifdef CONFIG_HARDWARE_WATCHPOINTS 561 /* 562 * For watch, interrupts will be enabled after the watch 563 * registers are read. 564 */ 565 BUILD_HANDLER watch watch cli silent /* #23 */ 566#else 567 BUILD_HANDLER watch watch sti verbose /* #23 */ 568#endif 569 BUILD_HANDLER mcheck mcheck cli verbose /* #24 */ 570 BUILD_HANDLER mt mt sti silent /* #25 */ 571 BUILD_HANDLER dsp dsp sti silent /* #26 */ 572 BUILD_HANDLER reserved reserved sti verbose /* others */ 573 574 .align 5 575 LEAF(handle_ri_rdhwr_tlbp) 576 .set push 577 .set noat 578 .set noreorder 579 /* check if TLB contains a entry for EPC */ 580 MFC0 k1, CP0_ENTRYHI 581 andi k1, MIPS_ENTRYHI_ASID | MIPS_ENTRYHI_ASIDX 582 MFC0 k0, CP0_EPC 583 PTR_SRL k0, _PAGE_SHIFT + 1 584 PTR_SLL k0, _PAGE_SHIFT + 1 585 or k1, k0 586 MTC0 k1, CP0_ENTRYHI 587 mtc0_tlbw_hazard 588 tlbp 589 tlb_probe_hazard 590 mfc0 k1, CP0_INDEX 591 .set pop 592 bltz k1, handle_ri /* slow path */ 593 /* fall thru */ 594 END(handle_ri_rdhwr_tlbp) 595 596 LEAF(handle_ri_rdhwr) 597 .set push 598 .set noat 599 .set noreorder 600 /* MIPS32: 0x7c03e83b: rdhwr v1,$29 */ 601 /* microMIPS: 0x007d6b3c: rdhwr v1,$29 */ 602 MFC0 k1, CP0_EPC 603#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS64_R2) 604 and k0, k1, 1 605 beqz k0, 1f 606 xor k1, k0 607 lhu k0, (k1) 608 lhu k1, 2(k1) 609 ins k1, k0, 16, 16 610 lui k0, 0x007d 611 b docheck 612 ori k0, 0x6b3c 6131: 614 lui k0, 0x7c03 615 lw k1, (k1) 616 ori k0, 0xe83b 617#else 618 andi k0, k1, 1 619 bnez k0, handle_ri 620 lui k0, 0x7c03 621 lw k1, (k1) 622 ori k0, 0xe83b 623#endif 624 .set reorder 625docheck: 626 bne k0, k1, handle_ri /* if not ours */ 627 628isrdhwr: 629 /* The insn is rdhwr. No need to check CAUSE.BD here. */ 630 get_saved_sp /* k1 := current_thread_info */ 631 .set noreorder 632 MFC0 k0, CP0_EPC 633#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) 634 ori k1, _THREAD_MASK 635 xori k1, _THREAD_MASK 636 LONG_L v1, TI_TP_VALUE(k1) 637 LONG_ADDIU k0, 4 638 jr k0 639 rfe 640#else 641#ifndef CONFIG_CPU_DADDI_WORKAROUNDS 642 LONG_ADDIU k0, 4 /* stall on $k0 */ 643#else 644 .set at=v1 645 LONG_ADDIU k0, 4 646 .set noat 647#endif 648 MTC0 k0, CP0_EPC 649 /* I hope three instructions between MTC0 and ERET are enough... */ 650 ori k1, _THREAD_MASK 651 xori k1, _THREAD_MASK 652 LONG_L v1, TI_TP_VALUE(k1) 653 .set arch=r4000 654 eret 655 .set mips0 656#endif 657 .set pop 658 END(handle_ri_rdhwr) 659 660#ifdef CONFIG_64BIT 661/* A temporary overflow handler used by check_daddi(). */ 662 663 __INIT 664 665 BUILD_HANDLER daddi_ov daddi_ov none silent /* #12 */ 666#endif 667