1/* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 1994 - 2000, 2001, 2003 Ralf Baechle 7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc. 8 * Copyright (C) 2002, 2007 Maciej W. Rozycki 9 * Copyright (C) 2001, 2012 MIPS Technologies, Inc. All rights reserved. 10 */ 11#include <linux/init.h> 12 13#include <asm/asm.h> 14#include <asm/asmmacro.h> 15#include <asm/cacheops.h> 16#include <asm/irqflags.h> 17#include <asm/regdef.h> 18#include <asm/fpregdef.h> 19#include <asm/mipsregs.h> 20#include <asm/stackframe.h> 21#include <asm/war.h> 22#include <asm/thread_info.h> 23 24 __INIT 25 26/* 27 * General exception vector for all other CPUs. 28 * 29 * Be careful when changing this, it has to be at most 128 bytes 30 * to fit into space reserved for the exception handler. 31 */ 32NESTED(except_vec3_generic, 0, sp) 33 .set push 34 .set noat 35#if R5432_CP0_INTERRUPT_WAR 36 mfc0 k0, CP0_INDEX 37#endif 38 mfc0 k1, CP0_CAUSE 39 andi k1, k1, 0x7c 40#ifdef CONFIG_64BIT 41 dsll k1, k1, 1 42#endif 43 PTR_L k0, exception_handlers(k1) 44 jr k0 45 .set pop 46 END(except_vec3_generic) 47 48/* 49 * General exception handler for CPUs with virtual coherency exception. 50 * 51 * Be careful when changing this, it has to be at most 256 (as a special 52 * exception) bytes to fit into space reserved for the exception handler. 53 */ 54NESTED(except_vec3_r4000, 0, sp) 55 .set push 56 .set arch=r4000 57 .set noat 58 mfc0 k1, CP0_CAUSE 59 li k0, 31<<2 60 andi k1, k1, 0x7c 61 .set push 62 .set noreorder 63 .set nomacro 64 beq k1, k0, handle_vced 65 li k0, 14<<2 66 beq k1, k0, handle_vcei 67#ifdef CONFIG_64BIT 68 dsll k1, k1, 1 69#endif 70 .set pop 71 PTR_L k0, exception_handlers(k1) 72 jr k0 73 74 /* 75 * Big shit, we now may have two dirty primary cache lines for the same 76 * physical address. We can safely invalidate the line pointed to by 77 * c0_badvaddr because after return from this exception handler the 78 * load / store will be re-executed. 79 */ 80handle_vced: 81 MFC0 k0, CP0_BADVADDR 82 li k1, -4 # Is this ... 83 and k0, k1 # ... really needed? 84 mtc0 zero, CP0_TAGLO 85 cache Index_Store_Tag_D, (k0) 86 cache Hit_Writeback_Inv_SD, (k0) 87#ifdef CONFIG_PROC_FS 88 PTR_LA k0, vced_count 89 lw k1, (k0) 90 addiu k1, 1 91 sw k1, (k0) 92#endif 93 eret 94 95handle_vcei: 96 MFC0 k0, CP0_BADVADDR 97 cache Hit_Writeback_Inv_SD, (k0) # also cleans pi 98#ifdef CONFIG_PROC_FS 99 PTR_LA k0, vcei_count 100 lw k1, (k0) 101 addiu k1, 1 102 sw k1, (k0) 103#endif 104 eret 105 .set pop 106 END(except_vec3_r4000) 107 108 __FINIT 109 110 .align 5 /* 32 byte rollback region */ 111LEAF(__r4k_wait) 112 .set push 113 .set noreorder 114 /* start of rollback region */ 115 LONG_L t0, TI_FLAGS($28) 116 nop 117 andi t0, _TIF_NEED_RESCHED 118 bnez t0, 1f 119 nop 120 nop 121 nop 122#ifdef CONFIG_CPU_MICROMIPS 123 nop 124 nop 125 nop 126 nop 127#endif 128 .set MIPS_ISA_ARCH_LEVEL_RAW 129 wait 130 /* end of rollback region (the region size must be power of two) */ 1311: 132 jr ra 133 nop 134 .set pop 135 END(__r4k_wait) 136 137 .macro BUILD_ROLLBACK_PROLOGUE handler 138 FEXPORT(rollback_\handler) 139 .set push 140 .set noat 141 MFC0 k0, CP0_EPC 142 PTR_LA k1, __r4k_wait 143 ori k0, 0x1f /* 32 byte rollback region */ 144 xori k0, 0x1f 145 bne k0, k1, \handler 146 MTC0 k0, CP0_EPC 147 .set pop 148 .endm 149 150 .align 5 151BUILD_ROLLBACK_PROLOGUE handle_int 152NESTED(handle_int, PT_SIZE, sp) 153 .cfi_signal_frame 154#ifdef CONFIG_TRACE_IRQFLAGS 155 /* 156 * Check to see if the interrupted code has just disabled 157 * interrupts and ignore this interrupt for now if so. 158 * 159 * local_irq_disable() disables interrupts and then calls 160 * trace_hardirqs_off() to track the state. If an interrupt is taken 161 * after interrupts are disabled but before the state is updated 162 * it will appear to restore_all that it is incorrectly returning with 163 * interrupts disabled 164 */ 165 .set push 166 .set noat 167 mfc0 k0, CP0_STATUS 168#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) 169 and k0, ST0_IEP 170 bnez k0, 1f 171 172 mfc0 k0, CP0_EPC 173 .set noreorder 174 j k0 175 rfe 176#else 177 and k0, ST0_IE 178 bnez k0, 1f 179 180 eret 181#endif 1821: 183 .set pop 184#endif 185 SAVE_ALL docfi=1 186 CLI 187 TRACE_IRQS_OFF 188 189 LONG_L s0, TI_REGS($28) 190 LONG_S sp, TI_REGS($28) 191 192 /* 193 * SAVE_ALL ensures we are using a valid kernel stack for the thread. 194 * Check if we are already using the IRQ stack. 195 */ 196 move s1, sp # Preserve the sp 197 198 /* Get IRQ stack for this CPU */ 199 ASM_CPUID_MFC0 k0, ASM_SMP_CPUID_REG 200#if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32) 201 lui k1, %hi(irq_stack) 202#else 203 lui k1, %highest(irq_stack) 204 daddiu k1, %higher(irq_stack) 205 dsll k1, 16 206 daddiu k1, %hi(irq_stack) 207 dsll k1, 16 208#endif 209 LONG_SRL k0, SMP_CPUID_PTRSHIFT 210 LONG_ADDU k1, k0 211 LONG_L t0, %lo(irq_stack)(k1) 212 213 # Check if already on IRQ stack 214 PTR_LI t1, ~(_THREAD_SIZE-1) 215 and t1, t1, sp 216 beq t0, t1, 2f 217 218 /* Switch to IRQ stack */ 219 li t1, _IRQ_STACK_START 220 PTR_ADD sp, t0, t1 221 222 /* Save task's sp on IRQ stack so that unwinding can follow it */ 223 LONG_S s1, 0(sp) 2242: 225 jal plat_irq_dispatch 226 227 /* Restore sp */ 228 move sp, s1 229 230 j ret_from_irq 231#ifdef CONFIG_CPU_MICROMIPS 232 nop 233#endif 234 END(handle_int) 235 236 __INIT 237 238/* 239 * Special interrupt vector for MIPS64 ISA & embedded MIPS processors. 240 * This is a dedicated interrupt exception vector which reduces the 241 * interrupt processing overhead. The jump instruction will be replaced 242 * at the initialization time. 243 * 244 * Be careful when changing this, it has to be at most 128 bytes 245 * to fit into space reserved for the exception handler. 246 */ 247NESTED(except_vec4, 0, sp) 2481: j 1b /* Dummy, will be replaced */ 249 END(except_vec4) 250 251/* 252 * EJTAG debug exception handler. 253 * The EJTAG debug exception entry point is 0xbfc00480, which 254 * normally is in the boot PROM, so the boot PROM must do an 255 * unconditional jump to this vector. 256 */ 257NESTED(except_vec_ejtag_debug, 0, sp) 258 j ejtag_debug_handler 259#ifdef CONFIG_CPU_MICROMIPS 260 nop 261#endif 262 END(except_vec_ejtag_debug) 263 264 __FINIT 265 266/* 267 * Vectored interrupt handler. 268 * This prototype is copied to ebase + n*IntCtl.VS and patched 269 * to invoke the handler 270 */ 271BUILD_ROLLBACK_PROLOGUE except_vec_vi 272NESTED(except_vec_vi, 0, sp) 273 SAVE_SOME docfi=1 274 SAVE_AT docfi=1 275 .set push 276 .set noreorder 277 PTR_LA v1, except_vec_vi_handler 278FEXPORT(except_vec_vi_lui) 279 lui v0, 0 /* Patched */ 280 jr v1 281FEXPORT(except_vec_vi_ori) 282 ori v0, 0 /* Patched */ 283 .set pop 284 END(except_vec_vi) 285EXPORT(except_vec_vi_end) 286 287/* 288 * Common Vectored Interrupt code 289 * Complete the register saves and invoke the handler which is passed in $v0 290 */ 291NESTED(except_vec_vi_handler, 0, sp) 292 SAVE_TEMP 293 SAVE_STATIC 294 CLI 295#ifdef CONFIG_TRACE_IRQFLAGS 296 move s0, v0 297 TRACE_IRQS_OFF 298 move v0, s0 299#endif 300 301 LONG_L s0, TI_REGS($28) 302 LONG_S sp, TI_REGS($28) 303 304 /* 305 * SAVE_ALL ensures we are using a valid kernel stack for the thread. 306 * Check if we are already using the IRQ stack. 307 */ 308 move s1, sp # Preserve the sp 309 310 /* Get IRQ stack for this CPU */ 311 ASM_CPUID_MFC0 k0, ASM_SMP_CPUID_REG 312#if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32) 313 lui k1, %hi(irq_stack) 314#else 315 lui k1, %highest(irq_stack) 316 daddiu k1, %higher(irq_stack) 317 dsll k1, 16 318 daddiu k1, %hi(irq_stack) 319 dsll k1, 16 320#endif 321 LONG_SRL k0, SMP_CPUID_PTRSHIFT 322 LONG_ADDU k1, k0 323 LONG_L t0, %lo(irq_stack)(k1) 324 325 # Check if already on IRQ stack 326 PTR_LI t1, ~(_THREAD_SIZE-1) 327 and t1, t1, sp 328 beq t0, t1, 2f 329 330 /* Switch to IRQ stack */ 331 li t1, _IRQ_STACK_START 332 PTR_ADD sp, t0, t1 333 334 /* Save task's sp on IRQ stack so that unwinding can follow it */ 335 LONG_S s1, 0(sp) 3362: 337 jalr v0 338 339 /* Restore sp */ 340 move sp, s1 341 342 j ret_from_irq 343 END(except_vec_vi_handler) 344 345/* 346 * EJTAG debug exception handler. 347 */ 348NESTED(ejtag_debug_handler, PT_SIZE, sp) 349 .set push 350 .set noat 351 MTC0 k0, CP0_DESAVE 352 mfc0 k0, CP0_DEBUG 353 354 sll k0, k0, 30 # Check for SDBBP. 355 bgez k0, ejtag_return 356 357 PTR_LA k0, ejtag_debug_buffer 358 LONG_S k1, 0(k0) 359 SAVE_ALL 360 move a0, sp 361 jal ejtag_exception_handler 362 RESTORE_ALL 363 PTR_LA k0, ejtag_debug_buffer 364 LONG_L k1, 0(k0) 365 366ejtag_return: 367 MFC0 k0, CP0_DESAVE 368 .set mips32 369 deret 370 .set pop 371 END(ejtag_debug_handler) 372 373/* 374 * This buffer is reserved for the use of the EJTAG debug 375 * handler. 376 */ 377 .data 378EXPORT(ejtag_debug_buffer) 379 .fill LONGSIZE 380 .previous 381 382 __INIT 383 384/* 385 * NMI debug exception handler for MIPS reference boards. 386 * The NMI debug exception entry point is 0xbfc00000, which 387 * normally is in the boot PROM, so the boot PROM must do a 388 * unconditional jump to this vector. 389 */ 390NESTED(except_vec_nmi, 0, sp) 391 j nmi_handler 392#ifdef CONFIG_CPU_MICROMIPS 393 nop 394#endif 395 END(except_vec_nmi) 396 397 __FINIT 398 399NESTED(nmi_handler, PT_SIZE, sp) 400 .cfi_signal_frame 401 .set push 402 .set noat 403 /* 404 * Clear ERL - restore segment mapping 405 * Clear BEV - required for page fault exception handler to work 406 */ 407 mfc0 k0, CP0_STATUS 408 ori k0, k0, ST0_EXL 409 li k1, ~(ST0_BEV | ST0_ERL) 410 and k0, k0, k1 411 mtc0 k0, CP0_STATUS 412 _ehb 413 SAVE_ALL 414 move a0, sp 415 jal nmi_exception_handler 416 /* nmi_exception_handler never returns */ 417 .set pop 418 END(nmi_handler) 419 420 .macro __build_clear_none 421 .endm 422 423 .macro __build_clear_sti 424 TRACE_IRQS_ON 425 STI 426 .endm 427 428 .macro __build_clear_cli 429 CLI 430 TRACE_IRQS_OFF 431 .endm 432 433 .macro __build_clear_fpe 434 .set push 435 /* gas fails to assemble cfc1 for some archs (octeon).*/ \ 436 .set mips1 437 SET_HARDFLOAT 438 cfc1 a1, fcr31 439 .set pop 440 CLI 441 TRACE_IRQS_OFF 442 .endm 443 444 .macro __build_clear_msa_fpe 445 _cfcmsa a1, MSA_CSR 446 CLI 447 TRACE_IRQS_OFF 448 .endm 449 450 .macro __build_clear_ade 451 MFC0 t0, CP0_BADVADDR 452 PTR_S t0, PT_BVADDR(sp) 453 KMODE 454 .endm 455 456 .macro __BUILD_silent exception 457 .endm 458 459 /* Gas tries to parse the PRINT argument as a string containing 460 string escapes and emits bogus warnings if it believes to 461 recognize an unknown escape code. So make the arguments 462 start with an n and gas will believe \n is ok ... */ 463 .macro __BUILD_verbose nexception 464 LONG_L a1, PT_EPC(sp) 465#ifdef CONFIG_32BIT 466 PRINT("Got \nexception at %08lx\012") 467#endif 468#ifdef CONFIG_64BIT 469 PRINT("Got \nexception at %016lx\012") 470#endif 471 .endm 472 473 .macro __BUILD_count exception 474 LONG_L t0,exception_count_\exception 475 LONG_ADDIU t0, 1 476 LONG_S t0,exception_count_\exception 477 .comm exception_count\exception, 8, 8 478 .endm 479 480 .macro __BUILD_HANDLER exception handler clear verbose ext 481 .align 5 482 NESTED(handle_\exception, PT_SIZE, sp) 483 .cfi_signal_frame 484 .set noat 485 SAVE_ALL 486 FEXPORT(handle_\exception\ext) 487 __build_clear_\clear 488 .set at 489 __BUILD_\verbose \exception 490 move a0, sp 491 jal do_\handler 492 j ret_from_exception 493 END(handle_\exception) 494 .endm 495 496 .macro BUILD_HANDLER exception handler clear verbose 497 __BUILD_HANDLER \exception \handler \clear \verbose _int 498 .endm 499 500 BUILD_HANDLER adel ade ade silent /* #4 */ 501 BUILD_HANDLER ades ade ade silent /* #5 */ 502 BUILD_HANDLER ibe be cli silent /* #6 */ 503 BUILD_HANDLER dbe be cli silent /* #7 */ 504 BUILD_HANDLER bp bp sti silent /* #9 */ 505 BUILD_HANDLER ri ri sti silent /* #10 */ 506 BUILD_HANDLER cpu cpu sti silent /* #11 */ 507 BUILD_HANDLER ov ov sti silent /* #12 */ 508 BUILD_HANDLER tr tr sti silent /* #13 */ 509 BUILD_HANDLER msa_fpe msa_fpe msa_fpe silent /* #14 */ 510 BUILD_HANDLER fpe fpe fpe silent /* #15 */ 511 BUILD_HANDLER ftlb ftlb none silent /* #16 */ 512 BUILD_HANDLER msa msa sti silent /* #21 */ 513 BUILD_HANDLER mdmx mdmx sti silent /* #22 */ 514#ifdef CONFIG_HARDWARE_WATCHPOINTS 515 /* 516 * For watch, interrupts will be enabled after the watch 517 * registers are read. 518 */ 519 BUILD_HANDLER watch watch cli silent /* #23 */ 520#else 521 BUILD_HANDLER watch watch sti verbose /* #23 */ 522#endif 523 BUILD_HANDLER mcheck mcheck cli verbose /* #24 */ 524 BUILD_HANDLER mt mt sti silent /* #25 */ 525 BUILD_HANDLER dsp dsp sti silent /* #26 */ 526 BUILD_HANDLER reserved reserved sti verbose /* others */ 527 528 .align 5 529 LEAF(handle_ri_rdhwr_tlbp) 530 .set push 531 .set noat 532 .set noreorder 533 /* check if TLB contains a entry for EPC */ 534 MFC0 k1, CP0_ENTRYHI 535 andi k1, MIPS_ENTRYHI_ASID | MIPS_ENTRYHI_ASIDX 536 MFC0 k0, CP0_EPC 537 PTR_SRL k0, _PAGE_SHIFT + 1 538 PTR_SLL k0, _PAGE_SHIFT + 1 539 or k1, k0 540 MTC0 k1, CP0_ENTRYHI 541 mtc0_tlbw_hazard 542 tlbp 543 tlb_probe_hazard 544 mfc0 k1, CP0_INDEX 545 .set pop 546 bltz k1, handle_ri /* slow path */ 547 /* fall thru */ 548 END(handle_ri_rdhwr_tlbp) 549 550 LEAF(handle_ri_rdhwr) 551 .set push 552 .set noat 553 .set noreorder 554 /* MIPS32: 0x7c03e83b: rdhwr v1,$29 */ 555 /* microMIPS: 0x007d6b3c: rdhwr v1,$29 */ 556 MFC0 k1, CP0_EPC 557#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS64_R2) 558 and k0, k1, 1 559 beqz k0, 1f 560 xor k1, k0 561 lhu k0, (k1) 562 lhu k1, 2(k1) 563 ins k1, k0, 16, 16 564 lui k0, 0x007d 565 b docheck 566 ori k0, 0x6b3c 5671: 568 lui k0, 0x7c03 569 lw k1, (k1) 570 ori k0, 0xe83b 571#else 572 andi k0, k1, 1 573 bnez k0, handle_ri 574 lui k0, 0x7c03 575 lw k1, (k1) 576 ori k0, 0xe83b 577#endif 578 .set reorder 579docheck: 580 bne k0, k1, handle_ri /* if not ours */ 581 582isrdhwr: 583 /* The insn is rdhwr. No need to check CAUSE.BD here. */ 584 get_saved_sp /* k1 := current_thread_info */ 585 .set noreorder 586 MFC0 k0, CP0_EPC 587#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) 588 ori k1, _THREAD_MASK 589 xori k1, _THREAD_MASK 590 LONG_L v1, TI_TP_VALUE(k1) 591 LONG_ADDIU k0, 4 592 jr k0 593 rfe 594#else 595#ifndef CONFIG_CPU_DADDI_WORKAROUNDS 596 LONG_ADDIU k0, 4 /* stall on $k0 */ 597#else 598 .set at=v1 599 LONG_ADDIU k0, 4 600 .set noat 601#endif 602 MTC0 k0, CP0_EPC 603 /* I hope three instructions between MTC0 and ERET are enough... */ 604 ori k1, _THREAD_MASK 605 xori k1, _THREAD_MASK 606 LONG_L v1, TI_TP_VALUE(k1) 607 .set arch=r4000 608 eret 609 .set mips0 610#endif 611 .set pop 612 END(handle_ri_rdhwr) 613 614#ifdef CONFIG_64BIT 615/* A temporary overflow handler used by check_daddi(). */ 616 617 __INIT 618 619 BUILD_HANDLER daddi_ov daddi_ov none silent /* #12 */ 620#endif 621