1/* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 1994 - 2000, 2001, 2003 Ralf Baechle 7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc. 8 * Copyright (C) 2002, 2007 Maciej W. Rozycki 9 * Copyright (C) 2001, 2012 MIPS Technologies, Inc. All rights reserved. 10 */ 11#include <linux/init.h> 12 13#include <asm/asm.h> 14#include <asm/asmmacro.h> 15#include <asm/cacheops.h> 16#include <asm/irqflags.h> 17#include <asm/regdef.h> 18#include <asm/fpregdef.h> 19#include <asm/mipsregs.h> 20#include <asm/stackframe.h> 21#include <asm/war.h> 22#include <asm/thread_info.h> 23 24 __INIT 25 26/* 27 * General exception vector for all other CPUs. 28 * 29 * Be careful when changing this, it has to be at most 128 bytes 30 * to fit into space reserved for the exception handler. 31 */ 32NESTED(except_vec3_generic, 0, sp) 33 .set push 34 .set noat 35#if R5432_CP0_INTERRUPT_WAR 36 mfc0 k0, CP0_INDEX 37#endif 38 mfc0 k1, CP0_CAUSE 39 andi k1, k1, 0x7c 40#ifdef CONFIG_64BIT 41 dsll k1, k1, 1 42#endif 43 PTR_L k0, exception_handlers(k1) 44 jr k0 45 .set pop 46 END(except_vec3_generic) 47 48/* 49 * General exception handler for CPUs with virtual coherency exception. 50 * 51 * Be careful when changing this, it has to be at most 256 (as a special 52 * exception) bytes to fit into space reserved for the exception handler. 53 */ 54NESTED(except_vec3_r4000, 0, sp) 55 .set push 56 .set arch=r4000 57 .set noat 58 mfc0 k1, CP0_CAUSE 59 li k0, 31<<2 60 andi k1, k1, 0x7c 61 .set push 62 .set noreorder 63 .set nomacro 64 beq k1, k0, handle_vced 65 li k0, 14<<2 66 beq k1, k0, handle_vcei 67#ifdef CONFIG_64BIT 68 dsll k1, k1, 1 69#endif 70 .set pop 71 PTR_L k0, exception_handlers(k1) 72 jr k0 73 74 /* 75 * Big shit, we now may have two dirty primary cache lines for the same 76 * physical address. We can safely invalidate the line pointed to by 77 * c0_badvaddr because after return from this exception handler the 78 * load / store will be re-executed. 79 */ 80handle_vced: 81 MFC0 k0, CP0_BADVADDR 82 li k1, -4 # Is this ... 83 and k0, k1 # ... really needed? 84 mtc0 zero, CP0_TAGLO 85 cache Index_Store_Tag_D, (k0) 86 cache Hit_Writeback_Inv_SD, (k0) 87#ifdef CONFIG_PROC_FS 88 PTR_LA k0, vced_count 89 lw k1, (k0) 90 addiu k1, 1 91 sw k1, (k0) 92#endif 93 eret 94 95handle_vcei: 96 MFC0 k0, CP0_BADVADDR 97 cache Hit_Writeback_Inv_SD, (k0) # also cleans pi 98#ifdef CONFIG_PROC_FS 99 PTR_LA k0, vcei_count 100 lw k1, (k0) 101 addiu k1, 1 102 sw k1, (k0) 103#endif 104 eret 105 .set pop 106 END(except_vec3_r4000) 107 108 __FINIT 109 110 .align 5 /* 32 byte rollback region */ 111LEAF(__r4k_wait) 112 .set push 113 .set noreorder 114 /* start of rollback region */ 115 LONG_L t0, TI_FLAGS($28) 116 nop 117 andi t0, _TIF_NEED_RESCHED 118 bnez t0, 1f 119 nop 120 nop 121 nop 122#ifdef CONFIG_CPU_MICROMIPS 123 nop 124 nop 125 nop 126 nop 127#endif 128 .set MIPS_ISA_ARCH_LEVEL_RAW 129 wait 130 /* end of rollback region (the region size must be power of two) */ 1311: 132 jr ra 133 nop 134 .set pop 135 END(__r4k_wait) 136 137 .macro BUILD_ROLLBACK_PROLOGUE handler 138 FEXPORT(rollback_\handler) 139 .set push 140 .set noat 141 MFC0 k0, CP0_EPC 142 PTR_LA k1, __r4k_wait 143 ori k0, 0x1f /* 32 byte rollback region */ 144 xori k0, 0x1f 145 bne k0, k1, \handler 146 MTC0 k0, CP0_EPC 147 .set pop 148 .endm 149 150 .align 5 151BUILD_ROLLBACK_PROLOGUE handle_int 152NESTED(handle_int, PT_SIZE, sp) 153#ifdef CONFIG_TRACE_IRQFLAGS 154 /* 155 * Check to see if the interrupted code has just disabled 156 * interrupts and ignore this interrupt for now if so. 157 * 158 * local_irq_disable() disables interrupts and then calls 159 * trace_hardirqs_off() to track the state. If an interrupt is taken 160 * after interrupts are disabled but before the state is updated 161 * it will appear to restore_all that it is incorrectly returning with 162 * interrupts disabled 163 */ 164 .set push 165 .set noat 166 mfc0 k0, CP0_STATUS 167#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) 168 and k0, ST0_IEP 169 bnez k0, 1f 170 171 mfc0 k0, CP0_EPC 172 .set noreorder 173 j k0 174 rfe 175#else 176 and k0, ST0_IE 177 bnez k0, 1f 178 179 eret 180#endif 1811: 182 .set pop 183#endif 184 SAVE_ALL 185 CLI 186 TRACE_IRQS_OFF 187 188 LONG_L s0, TI_REGS($28) 189 LONG_S sp, TI_REGS($28) 190 191 /* 192 * SAVE_ALL ensures we are using a valid kernel stack for the thread. 193 * Check if we are already using the IRQ stack. 194 */ 195 move s1, sp # Preserve the sp 196 197 /* Get IRQ stack for this CPU */ 198 ASM_CPUID_MFC0 k0, ASM_SMP_CPUID_REG 199#if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32) 200 lui k1, %hi(irq_stack) 201#else 202 lui k1, %highest(irq_stack) 203 daddiu k1, %higher(irq_stack) 204 dsll k1, 16 205 daddiu k1, %hi(irq_stack) 206 dsll k1, 16 207#endif 208 LONG_SRL k0, SMP_CPUID_PTRSHIFT 209 LONG_ADDU k1, k0 210 LONG_L t0, %lo(irq_stack)(k1) 211 212 # Check if already on IRQ stack 213 PTR_LI t1, ~(_THREAD_SIZE-1) 214 and t1, t1, sp 215 beq t0, t1, 2f 216 217 /* Switch to IRQ stack */ 218 li t1, _IRQ_STACK_SIZE 219 PTR_ADD sp, t0, t1 220 2212: 222 jal plat_irq_dispatch 223 224 /* Restore sp */ 225 move sp, s1 226 227 j ret_from_irq 228#ifdef CONFIG_CPU_MICROMIPS 229 nop 230#endif 231 END(handle_int) 232 233 __INIT 234 235/* 236 * Special interrupt vector for MIPS64 ISA & embedded MIPS processors. 237 * This is a dedicated interrupt exception vector which reduces the 238 * interrupt processing overhead. The jump instruction will be replaced 239 * at the initialization time. 240 * 241 * Be careful when changing this, it has to be at most 128 bytes 242 * to fit into space reserved for the exception handler. 243 */ 244NESTED(except_vec4, 0, sp) 2451: j 1b /* Dummy, will be replaced */ 246 END(except_vec4) 247 248/* 249 * EJTAG debug exception handler. 250 * The EJTAG debug exception entry point is 0xbfc00480, which 251 * normally is in the boot PROM, so the boot PROM must do an 252 * unconditional jump to this vector. 253 */ 254NESTED(except_vec_ejtag_debug, 0, sp) 255 j ejtag_debug_handler 256#ifdef CONFIG_CPU_MICROMIPS 257 nop 258#endif 259 END(except_vec_ejtag_debug) 260 261 __FINIT 262 263/* 264 * Vectored interrupt handler. 265 * This prototype is copied to ebase + n*IntCtl.VS and patched 266 * to invoke the handler 267 */ 268BUILD_ROLLBACK_PROLOGUE except_vec_vi 269NESTED(except_vec_vi, 0, sp) 270 SAVE_SOME 271 SAVE_AT 272 .set push 273 .set noreorder 274 PTR_LA v1, except_vec_vi_handler 275FEXPORT(except_vec_vi_lui) 276 lui v0, 0 /* Patched */ 277 jr v1 278FEXPORT(except_vec_vi_ori) 279 ori v0, 0 /* Patched */ 280 .set pop 281 END(except_vec_vi) 282EXPORT(except_vec_vi_end) 283 284/* 285 * Common Vectored Interrupt code 286 * Complete the register saves and invoke the handler which is passed in $v0 287 */ 288NESTED(except_vec_vi_handler, 0, sp) 289 SAVE_TEMP 290 SAVE_STATIC 291 CLI 292#ifdef CONFIG_TRACE_IRQFLAGS 293 move s0, v0 294 TRACE_IRQS_OFF 295 move v0, s0 296#endif 297 298 LONG_L s0, TI_REGS($28) 299 LONG_S sp, TI_REGS($28) 300 301 /* 302 * SAVE_ALL ensures we are using a valid kernel stack for the thread. 303 * Check if we are already using the IRQ stack. 304 */ 305 move s1, sp # Preserve the sp 306 307 /* Get IRQ stack for this CPU */ 308 ASM_CPUID_MFC0 k0, ASM_SMP_CPUID_REG 309#if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32) 310 lui k1, %hi(irq_stack) 311#else 312 lui k1, %highest(irq_stack) 313 daddiu k1, %higher(irq_stack) 314 dsll k1, 16 315 daddiu k1, %hi(irq_stack) 316 dsll k1, 16 317#endif 318 LONG_SRL k0, SMP_CPUID_PTRSHIFT 319 LONG_ADDU k1, k0 320 LONG_L t0, %lo(irq_stack)(k1) 321 322 # Check if already on IRQ stack 323 PTR_LI t1, ~(_THREAD_SIZE-1) 324 and t1, t1, sp 325 beq t0, t1, 2f 326 327 /* Switch to IRQ stack */ 328 li t1, _IRQ_STACK_SIZE 329 PTR_ADD sp, t0, t1 330 3312: 332 jalr v0 333 334 /* Restore sp */ 335 move sp, s1 336 337 j ret_from_irq 338 END(except_vec_vi_handler) 339 340/* 341 * EJTAG debug exception handler. 342 */ 343NESTED(ejtag_debug_handler, PT_SIZE, sp) 344 .set push 345 .set noat 346 MTC0 k0, CP0_DESAVE 347 mfc0 k0, CP0_DEBUG 348 349 sll k0, k0, 30 # Check for SDBBP. 350 bgez k0, ejtag_return 351 352 PTR_LA k0, ejtag_debug_buffer 353 LONG_S k1, 0(k0) 354 SAVE_ALL 355 move a0, sp 356 jal ejtag_exception_handler 357 RESTORE_ALL 358 PTR_LA k0, ejtag_debug_buffer 359 LONG_L k1, 0(k0) 360 361ejtag_return: 362 MFC0 k0, CP0_DESAVE 363 .set mips32 364 deret 365 .set pop 366 END(ejtag_debug_handler) 367 368/* 369 * This buffer is reserved for the use of the EJTAG debug 370 * handler. 371 */ 372 .data 373EXPORT(ejtag_debug_buffer) 374 .fill LONGSIZE 375 .previous 376 377 __INIT 378 379/* 380 * NMI debug exception handler for MIPS reference boards. 381 * The NMI debug exception entry point is 0xbfc00000, which 382 * normally is in the boot PROM, so the boot PROM must do a 383 * unconditional jump to this vector. 384 */ 385NESTED(except_vec_nmi, 0, sp) 386 j nmi_handler 387#ifdef CONFIG_CPU_MICROMIPS 388 nop 389#endif 390 END(except_vec_nmi) 391 392 __FINIT 393 394NESTED(nmi_handler, PT_SIZE, sp) 395 .set push 396 .set noat 397 /* 398 * Clear ERL - restore segment mapping 399 * Clear BEV - required for page fault exception handler to work 400 */ 401 mfc0 k0, CP0_STATUS 402 ori k0, k0, ST0_EXL 403 li k1, ~(ST0_BEV | ST0_ERL) 404 and k0, k0, k1 405 mtc0 k0, CP0_STATUS 406 _ehb 407 SAVE_ALL 408 move a0, sp 409 jal nmi_exception_handler 410 /* nmi_exception_handler never returns */ 411 .set pop 412 END(nmi_handler) 413 414 .macro __build_clear_none 415 .endm 416 417 .macro __build_clear_sti 418 TRACE_IRQS_ON 419 STI 420 .endm 421 422 .macro __build_clear_cli 423 CLI 424 TRACE_IRQS_OFF 425 .endm 426 427 .macro __build_clear_fpe 428 .set push 429 /* gas fails to assemble cfc1 for some archs (octeon).*/ \ 430 .set mips1 431 SET_HARDFLOAT 432 cfc1 a1, fcr31 433 .set pop 434 CLI 435 TRACE_IRQS_OFF 436 .endm 437 438 .macro __build_clear_msa_fpe 439 _cfcmsa a1, MSA_CSR 440 CLI 441 TRACE_IRQS_OFF 442 .endm 443 444 .macro __build_clear_ade 445 MFC0 t0, CP0_BADVADDR 446 PTR_S t0, PT_BVADDR(sp) 447 KMODE 448 .endm 449 450 .macro __BUILD_silent exception 451 .endm 452 453 /* Gas tries to parse the PRINT argument as a string containing 454 string escapes and emits bogus warnings if it believes to 455 recognize an unknown escape code. So make the arguments 456 start with an n and gas will believe \n is ok ... */ 457 .macro __BUILD_verbose nexception 458 LONG_L a1, PT_EPC(sp) 459#ifdef CONFIG_32BIT 460 PRINT("Got \nexception at %08lx\012") 461#endif 462#ifdef CONFIG_64BIT 463 PRINT("Got \nexception at %016lx\012") 464#endif 465 .endm 466 467 .macro __BUILD_count exception 468 LONG_L t0,exception_count_\exception 469 LONG_ADDIU t0, 1 470 LONG_S t0,exception_count_\exception 471 .comm exception_count\exception, 8, 8 472 .endm 473 474 .macro __BUILD_HANDLER exception handler clear verbose ext 475 .align 5 476 NESTED(handle_\exception, PT_SIZE, sp) 477 .set noat 478 SAVE_ALL 479 FEXPORT(handle_\exception\ext) 480 __build_clear_\clear 481 .set at 482 __BUILD_\verbose \exception 483 move a0, sp 484 PTR_LA ra, ret_from_exception 485 j do_\handler 486 END(handle_\exception) 487 .endm 488 489 .macro BUILD_HANDLER exception handler clear verbose 490 __BUILD_HANDLER \exception \handler \clear \verbose _int 491 .endm 492 493 BUILD_HANDLER adel ade ade silent /* #4 */ 494 BUILD_HANDLER ades ade ade silent /* #5 */ 495 BUILD_HANDLER ibe be cli silent /* #6 */ 496 BUILD_HANDLER dbe be cli silent /* #7 */ 497 BUILD_HANDLER bp bp sti silent /* #9 */ 498 BUILD_HANDLER ri ri sti silent /* #10 */ 499 BUILD_HANDLER cpu cpu sti silent /* #11 */ 500 BUILD_HANDLER ov ov sti silent /* #12 */ 501 BUILD_HANDLER tr tr sti silent /* #13 */ 502 BUILD_HANDLER msa_fpe msa_fpe msa_fpe silent /* #14 */ 503 BUILD_HANDLER fpe fpe fpe silent /* #15 */ 504 BUILD_HANDLER ftlb ftlb none silent /* #16 */ 505 BUILD_HANDLER msa msa sti silent /* #21 */ 506 BUILD_HANDLER mdmx mdmx sti silent /* #22 */ 507#ifdef CONFIG_HARDWARE_WATCHPOINTS 508 /* 509 * For watch, interrupts will be enabled after the watch 510 * registers are read. 511 */ 512 BUILD_HANDLER watch watch cli silent /* #23 */ 513#else 514 BUILD_HANDLER watch watch sti verbose /* #23 */ 515#endif 516 BUILD_HANDLER mcheck mcheck cli verbose /* #24 */ 517 BUILD_HANDLER mt mt sti silent /* #25 */ 518 BUILD_HANDLER dsp dsp sti silent /* #26 */ 519 BUILD_HANDLER reserved reserved sti verbose /* others */ 520 521 .align 5 522 LEAF(handle_ri_rdhwr_vivt) 523 .set push 524 .set noat 525 .set noreorder 526 /* check if TLB contains a entry for EPC */ 527 MFC0 k1, CP0_ENTRYHI 528 andi k1, MIPS_ENTRYHI_ASID | MIPS_ENTRYHI_ASIDX 529 MFC0 k0, CP0_EPC 530 PTR_SRL k0, _PAGE_SHIFT + 1 531 PTR_SLL k0, _PAGE_SHIFT + 1 532 or k1, k0 533 MTC0 k1, CP0_ENTRYHI 534 mtc0_tlbw_hazard 535 tlbp 536 tlb_probe_hazard 537 mfc0 k1, CP0_INDEX 538 .set pop 539 bltz k1, handle_ri /* slow path */ 540 /* fall thru */ 541 END(handle_ri_rdhwr_vivt) 542 543 LEAF(handle_ri_rdhwr) 544 .set push 545 .set noat 546 .set noreorder 547 /* MIPS32: 0x7c03e83b: rdhwr v1,$29 */ 548 /* microMIPS: 0x007d6b3c: rdhwr v1,$29 */ 549 MFC0 k1, CP0_EPC 550#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS64_R2) 551 and k0, k1, 1 552 beqz k0, 1f 553 xor k1, k0 554 lhu k0, (k1) 555 lhu k1, 2(k1) 556 ins k1, k0, 16, 16 557 lui k0, 0x007d 558 b docheck 559 ori k0, 0x6b3c 5601: 561 lui k0, 0x7c03 562 lw k1, (k1) 563 ori k0, 0xe83b 564#else 565 andi k0, k1, 1 566 bnez k0, handle_ri 567 lui k0, 0x7c03 568 lw k1, (k1) 569 ori k0, 0xe83b 570#endif 571 .set reorder 572docheck: 573 bne k0, k1, handle_ri /* if not ours */ 574 575isrdhwr: 576 /* The insn is rdhwr. No need to check CAUSE.BD here. */ 577 get_saved_sp /* k1 := current_thread_info */ 578 .set noreorder 579 MFC0 k0, CP0_EPC 580#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) 581 ori k1, _THREAD_MASK 582 xori k1, _THREAD_MASK 583 LONG_L v1, TI_TP_VALUE(k1) 584 LONG_ADDIU k0, 4 585 jr k0 586 rfe 587#else 588#ifndef CONFIG_CPU_DADDI_WORKAROUNDS 589 LONG_ADDIU k0, 4 /* stall on $k0 */ 590#else 591 .set at=v1 592 LONG_ADDIU k0, 4 593 .set noat 594#endif 595 MTC0 k0, CP0_EPC 596 /* I hope three instructions between MTC0 and ERET are enough... */ 597 ori k1, _THREAD_MASK 598 xori k1, _THREAD_MASK 599 LONG_L v1, TI_TP_VALUE(k1) 600 .set arch=r4000 601 eret 602 .set mips0 603#endif 604 .set pop 605 END(handle_ri_rdhwr) 606 607#ifdef CONFIG_64BIT 608/* A temporary overflow handler used by check_daddi(). */ 609 610 __INIT 611 612 BUILD_HANDLER daddi_ov daddi_ov none silent /* #12 */ 613#endif 614