1/* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 1994 - 2000, 2001, 2003 Ralf Baechle 7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc. 8 * Copyright (C) 2002, 2007 Maciej W. Rozycki 9 * Copyright (C) 2001, 2012 MIPS Technologies, Inc. All rights reserved. 10 */ 11#include <linux/init.h> 12 13#include <asm/asm.h> 14#include <asm/asmmacro.h> 15#include <asm/cacheops.h> 16#include <asm/irqflags.h> 17#include <asm/regdef.h> 18#include <asm/fpregdef.h> 19#include <asm/mipsregs.h> 20#include <asm/stackframe.h> 21#include <asm/war.h> 22#include <asm/thread_info.h> 23 24#ifdef CONFIG_MIPS_MT_SMTC 25#define PANIC_PIC(msg) \ 26 .set push; \ 27 .set nomicromips; \ 28 .set reorder; \ 29 PTR_LA a0,8f; \ 30 .set noat; \ 31 PTR_LA AT, panic; \ 32 jr AT; \ 339: b 9b; \ 34 .set pop; \ 35 TEXT(msg) 36#endif 37 38 __INIT 39 40/* 41 * General exception vector for all other CPUs. 42 * 43 * Be careful when changing this, it has to be at most 128 bytes 44 * to fit into space reserved for the exception handler. 45 */ 46NESTED(except_vec3_generic, 0, sp) 47 .set push 48 .set noat 49#if R5432_CP0_INTERRUPT_WAR 50 mfc0 k0, CP0_INDEX 51#endif 52 mfc0 k1, CP0_CAUSE 53 andi k1, k1, 0x7c 54#ifdef CONFIG_64BIT 55 dsll k1, k1, 1 56#endif 57 PTR_L k0, exception_handlers(k1) 58 jr k0 59 .set pop 60 END(except_vec3_generic) 61 62/* 63 * General exception handler for CPUs with virtual coherency exception. 64 * 65 * Be careful when changing this, it has to be at most 256 (as a special 66 * exception) bytes to fit into space reserved for the exception handler. 67 */ 68NESTED(except_vec3_r4000, 0, sp) 69 .set push 70 .set mips3 71 .set noat 72 mfc0 k1, CP0_CAUSE 73 li k0, 31<<2 74 andi k1, k1, 0x7c 75 .set push 76 .set noreorder 77 .set nomacro 78 beq k1, k0, handle_vced 79 li k0, 14<<2 80 beq k1, k0, handle_vcei 81#ifdef CONFIG_64BIT 82 dsll k1, k1, 1 83#endif 84 .set pop 85 PTR_L k0, exception_handlers(k1) 86 jr k0 87 88 /* 89 * Big shit, we now may have two dirty primary cache lines for the same 90 * physical address. We can safely invalidate the line pointed to by 91 * c0_badvaddr because after return from this exception handler the 92 * load / store will be re-executed. 93 */ 94handle_vced: 95 MFC0 k0, CP0_BADVADDR 96 li k1, -4 # Is this ... 97 and k0, k1 # ... really needed? 98 mtc0 zero, CP0_TAGLO 99 cache Index_Store_Tag_D, (k0) 100 cache Hit_Writeback_Inv_SD, (k0) 101#ifdef CONFIG_PROC_FS 102 PTR_LA k0, vced_count 103 lw k1, (k0) 104 addiu k1, 1 105 sw k1, (k0) 106#endif 107 eret 108 109handle_vcei: 110 MFC0 k0, CP0_BADVADDR 111 cache Hit_Writeback_Inv_SD, (k0) # also cleans pi 112#ifdef CONFIG_PROC_FS 113 PTR_LA k0, vcei_count 114 lw k1, (k0) 115 addiu k1, 1 116 sw k1, (k0) 117#endif 118 eret 119 .set pop 120 END(except_vec3_r4000) 121 122 __FINIT 123 124 .align 5 /* 32 byte rollback region */ 125LEAF(__r4k_wait) 126 .set push 127 .set noreorder 128 /* start of rollback region */ 129 LONG_L t0, TI_FLAGS($28) 130 nop 131 andi t0, _TIF_NEED_RESCHED 132 bnez t0, 1f 133 nop 134 nop 135 nop 136#ifdef CONFIG_CPU_MICROMIPS 137 nop 138 nop 139 nop 140 nop 141#endif 142 .set mips3 143 wait 144 /* end of rollback region (the region size must be power of two) */ 1451: 146 jr ra 147 nop 148 .set pop 149 END(__r4k_wait) 150 151 .macro BUILD_ROLLBACK_PROLOGUE handler 152 FEXPORT(rollback_\handler) 153 .set push 154 .set noat 155 MFC0 k0, CP0_EPC 156 PTR_LA k1, __r4k_wait 157 ori k0, 0x1f /* 32 byte rollback region */ 158 xori k0, 0x1f 159 bne k0, k1, 9f 160 MTC0 k0, CP0_EPC 1619: 162 .set pop 163 .endm 164 165 .align 5 166BUILD_ROLLBACK_PROLOGUE handle_int 167NESTED(handle_int, PT_SIZE, sp) 168#ifdef CONFIG_TRACE_IRQFLAGS 169 /* 170 * Check to see if the interrupted code has just disabled 171 * interrupts and ignore this interrupt for now if so. 172 * 173 * local_irq_disable() disables interrupts and then calls 174 * trace_hardirqs_off() to track the state. If an interrupt is taken 175 * after interrupts are disabled but before the state is updated 176 * it will appear to restore_all that it is incorrectly returning with 177 * interrupts disabled 178 */ 179 .set push 180 .set noat 181 mfc0 k0, CP0_STATUS 182#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) 183 and k0, ST0_IEP 184 bnez k0, 1f 185 186 mfc0 k0, CP0_EPC 187 .set noreorder 188 j k0 189 rfe 190#else 191 and k0, ST0_IE 192 bnez k0, 1f 193 194 eret 195#endif 1961: 197 .set pop 198#endif 199 SAVE_ALL 200 CLI 201 TRACE_IRQS_OFF 202 203 LONG_L s0, TI_REGS($28) 204 LONG_S sp, TI_REGS($28) 205 PTR_LA ra, ret_from_irq 206 PTR_LA v0, plat_irq_dispatch 207 jr v0 208#ifdef CONFIG_CPU_MICROMIPS 209 nop 210#endif 211 END(handle_int) 212 213 __INIT 214 215/* 216 * Special interrupt vector for MIPS64 ISA & embedded MIPS processors. 217 * This is a dedicated interrupt exception vector which reduces the 218 * interrupt processing overhead. The jump instruction will be replaced 219 * at the initialization time. 220 * 221 * Be careful when changing this, it has to be at most 128 bytes 222 * to fit into space reserved for the exception handler. 223 */ 224NESTED(except_vec4, 0, sp) 2251: j 1b /* Dummy, will be replaced */ 226 END(except_vec4) 227 228/* 229 * EJTAG debug exception handler. 230 * The EJTAG debug exception entry point is 0xbfc00480, which 231 * normally is in the boot PROM, so the boot PROM must do an 232 * unconditional jump to this vector. 233 */ 234NESTED(except_vec_ejtag_debug, 0, sp) 235 j ejtag_debug_handler 236#ifdef CONFIG_CPU_MICROMIPS 237 nop 238#endif 239 END(except_vec_ejtag_debug) 240 241 __FINIT 242 243/* 244 * Vectored interrupt handler. 245 * This prototype is copied to ebase + n*IntCtl.VS and patched 246 * to invoke the handler 247 */ 248BUILD_ROLLBACK_PROLOGUE except_vec_vi 249NESTED(except_vec_vi, 0, sp) 250 SAVE_SOME 251 SAVE_AT 252 .set push 253 .set noreorder 254#ifdef CONFIG_MIPS_MT_SMTC 255 /* 256 * To keep from blindly blocking *all* interrupts 257 * during service by SMTC kernel, we also want to 258 * pass the IM value to be cleared. 259 */ 260FEXPORT(except_vec_vi_mori) 261 ori a0, $0, 0 262#endif /* CONFIG_MIPS_MT_SMTC */ 263 PTR_LA v1, except_vec_vi_handler 264FEXPORT(except_vec_vi_lui) 265 lui v0, 0 /* Patched */ 266 jr v1 267FEXPORT(except_vec_vi_ori) 268 ori v0, 0 /* Patched */ 269 .set pop 270 END(except_vec_vi) 271EXPORT(except_vec_vi_end) 272 273/* 274 * Common Vectored Interrupt code 275 * Complete the register saves and invoke the handler which is passed in $v0 276 */ 277NESTED(except_vec_vi_handler, 0, sp) 278 SAVE_TEMP 279 SAVE_STATIC 280#ifdef CONFIG_MIPS_MT_SMTC 281 /* 282 * SMTC has an interesting problem that interrupts are level-triggered, 283 * and the CLI macro will clear EXL, potentially causing a duplicate 284 * interrupt service invocation. So we need to clear the associated 285 * IM bit of Status prior to doing CLI, and restore it after the 286 * service routine has been invoked - we must assume that the 287 * service routine will have cleared the state, and any active 288 * level represents a new or otherwised unserviced event... 289 */ 290 mfc0 t1, CP0_STATUS 291 and t0, a0, t1 292#ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP 293 mfc0 t2, CP0_TCCONTEXT 294 or t2, t0, t2 295 mtc0 t2, CP0_TCCONTEXT 296#endif /* CONFIG_MIPS_MT_SMTC_IM_BACKSTOP */ 297 xor t1, t1, t0 298 mtc0 t1, CP0_STATUS 299 _ehb 300#endif /* CONFIG_MIPS_MT_SMTC */ 301 CLI 302#ifdef CONFIG_TRACE_IRQFLAGS 303 move s0, v0 304#ifdef CONFIG_MIPS_MT_SMTC 305 move s1, a0 306#endif 307 TRACE_IRQS_OFF 308#ifdef CONFIG_MIPS_MT_SMTC 309 move a0, s1 310#endif 311 move v0, s0 312#endif 313 314 LONG_L s0, TI_REGS($28) 315 LONG_S sp, TI_REGS($28) 316 PTR_LA ra, ret_from_irq 317 jr v0 318 END(except_vec_vi_handler) 319 320/* 321 * EJTAG debug exception handler. 322 */ 323NESTED(ejtag_debug_handler, PT_SIZE, sp) 324 .set push 325 .set noat 326 MTC0 k0, CP0_DESAVE 327 mfc0 k0, CP0_DEBUG 328 329 sll k0, k0, 30 # Check for SDBBP. 330 bgez k0, ejtag_return 331 332 PTR_LA k0, ejtag_debug_buffer 333 LONG_S k1, 0(k0) 334 SAVE_ALL 335 move a0, sp 336 jal ejtag_exception_handler 337 RESTORE_ALL 338 PTR_LA k0, ejtag_debug_buffer 339 LONG_L k1, 0(k0) 340 341ejtag_return: 342 MFC0 k0, CP0_DESAVE 343 .set mips32 344 deret 345 .set pop 346 END(ejtag_debug_handler) 347 348/* 349 * This buffer is reserved for the use of the EJTAG debug 350 * handler. 351 */ 352 .data 353EXPORT(ejtag_debug_buffer) 354 .fill LONGSIZE 355 .previous 356 357 __INIT 358 359/* 360 * NMI debug exception handler for MIPS reference boards. 361 * The NMI debug exception entry point is 0xbfc00000, which 362 * normally is in the boot PROM, so the boot PROM must do a 363 * unconditional jump to this vector. 364 */ 365NESTED(except_vec_nmi, 0, sp) 366 j nmi_handler 367#ifdef CONFIG_CPU_MICROMIPS 368 nop 369#endif 370 END(except_vec_nmi) 371 372 __FINIT 373 374NESTED(nmi_handler, PT_SIZE, sp) 375 .set push 376 .set noat 377 SAVE_ALL 378 move a0, sp 379 jal nmi_exception_handler 380 RESTORE_ALL 381 .set mips3 382 eret 383 .set pop 384 END(nmi_handler) 385 386 .macro __build_clear_none 387 .endm 388 389 .macro __build_clear_sti 390 TRACE_IRQS_ON 391 STI 392 .endm 393 394 .macro __build_clear_cli 395 CLI 396 TRACE_IRQS_OFF 397 .endm 398 399 .macro __build_clear_fpe 400 .set push 401 /* gas fails to assemble cfc1 for some archs (octeon).*/ \ 402 .set mips1 403 cfc1 a1, fcr31 404 li a2, ~(0x3f << 12) 405 and a2, a1 406 ctc1 a2, fcr31 407 .set pop 408 TRACE_IRQS_ON 409 STI 410 .endm 411 412 .macro __build_clear_ade 413 MFC0 t0, CP0_BADVADDR 414 PTR_S t0, PT_BVADDR(sp) 415 KMODE 416 .endm 417 418 .macro __BUILD_silent exception 419 .endm 420 421 /* Gas tries to parse the PRINT argument as a string containing 422 string escapes and emits bogus warnings if it believes to 423 recognize an unknown escape code. So make the arguments 424 start with an n and gas will believe \n is ok ... */ 425 .macro __BUILD_verbose nexception 426 LONG_L a1, PT_EPC(sp) 427#ifdef CONFIG_32BIT 428 PRINT("Got \nexception at %08lx\012") 429#endif 430#ifdef CONFIG_64BIT 431 PRINT("Got \nexception at %016lx\012") 432#endif 433 .endm 434 435 .macro __BUILD_count exception 436 LONG_L t0,exception_count_\exception 437 LONG_ADDIU t0, 1 438 LONG_S t0,exception_count_\exception 439 .comm exception_count\exception, 8, 8 440 .endm 441 442 .macro __BUILD_HANDLER exception handler clear verbose ext 443 .align 5 444 NESTED(handle_\exception, PT_SIZE, sp) 445 .set noat 446 SAVE_ALL 447 FEXPORT(handle_\exception\ext) 448 __BUILD_clear_\clear 449 .set at 450 __BUILD_\verbose \exception 451 move a0, sp 452 PTR_LA ra, ret_from_exception 453 j do_\handler 454 END(handle_\exception) 455 .endm 456 457 .macro BUILD_HANDLER exception handler clear verbose 458 __BUILD_HANDLER \exception \handler \clear \verbose _int 459 .endm 460 461 BUILD_HANDLER adel ade ade silent /* #4 */ 462 BUILD_HANDLER ades ade ade silent /* #5 */ 463 BUILD_HANDLER ibe be cli silent /* #6 */ 464 BUILD_HANDLER dbe be cli silent /* #7 */ 465 BUILD_HANDLER bp bp sti silent /* #9 */ 466 BUILD_HANDLER ri ri sti silent /* #10 */ 467 BUILD_HANDLER cpu cpu sti silent /* #11 */ 468 BUILD_HANDLER ov ov sti silent /* #12 */ 469 BUILD_HANDLER tr tr sti silent /* #13 */ 470 BUILD_HANDLER fpe fpe fpe silent /* #15 */ 471 BUILD_HANDLER mdmx mdmx sti silent /* #22 */ 472#ifdef CONFIG_HARDWARE_WATCHPOINTS 473 /* 474 * For watch, interrupts will be enabled after the watch 475 * registers are read. 476 */ 477 BUILD_HANDLER watch watch cli silent /* #23 */ 478#else 479 BUILD_HANDLER watch watch sti verbose /* #23 */ 480#endif 481 BUILD_HANDLER mcheck mcheck cli verbose /* #24 */ 482 BUILD_HANDLER mt mt sti silent /* #25 */ 483 BUILD_HANDLER dsp dsp sti silent /* #26 */ 484 BUILD_HANDLER reserved reserved sti verbose /* others */ 485 486 .align 5 487 LEAF(handle_ri_rdhwr_vivt) 488#ifdef CONFIG_MIPS_MT_SMTC 489 PANIC_PIC("handle_ri_rdhwr_vivt called") 490#else 491 .set push 492 .set noat 493 .set noreorder 494 /* check if TLB contains a entry for EPC */ 495 MFC0 k1, CP0_ENTRYHI 496 andi k1, 0xff /* ASID_MASK */ 497 MFC0 k0, CP0_EPC 498 PTR_SRL k0, _PAGE_SHIFT + 1 499 PTR_SLL k0, _PAGE_SHIFT + 1 500 or k1, k0 501 MTC0 k1, CP0_ENTRYHI 502 mtc0_tlbw_hazard 503 tlbp 504 tlb_probe_hazard 505 mfc0 k1, CP0_INDEX 506 .set pop 507 bltz k1, handle_ri /* slow path */ 508 /* fall thru */ 509#endif 510 END(handle_ri_rdhwr_vivt) 511 512 LEAF(handle_ri_rdhwr) 513 .set push 514 .set noat 515 .set noreorder 516 /* MIPS32: 0x7c03e83b: rdhwr v1,$29 */ 517 /* microMIPS: 0x007d6b3c: rdhwr v1,$29 */ 518 MFC0 k1, CP0_EPC 519#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS64_R2) 520 and k0, k1, 1 521 beqz k0, 1f 522 xor k1, k0 523 lhu k0, (k1) 524 lhu k1, 2(k1) 525 ins k1, k0, 16, 16 526 lui k0, 0x007d 527 b docheck 528 ori k0, 0x6b3c 5291: 530 lui k0, 0x7c03 531 lw k1, (k1) 532 ori k0, 0xe83b 533#else 534 andi k0, k1, 1 535 bnez k0, handle_ri 536 lui k0, 0x7c03 537 lw k1, (k1) 538 ori k0, 0xe83b 539#endif 540 .set reorder 541docheck: 542 bne k0, k1, handle_ri /* if not ours */ 543 544isrdhwr: 545 /* The insn is rdhwr. No need to check CAUSE.BD here. */ 546 get_saved_sp /* k1 := current_thread_info */ 547 .set noreorder 548 MFC0 k0, CP0_EPC 549#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) 550 ori k1, _THREAD_MASK 551 xori k1, _THREAD_MASK 552 LONG_L v1, TI_TP_VALUE(k1) 553 LONG_ADDIU k0, 4 554 jr k0 555 rfe 556#else 557#ifndef CONFIG_CPU_DADDI_WORKAROUNDS 558 LONG_ADDIU k0, 4 /* stall on $k0 */ 559#else 560 .set at=v1 561 LONG_ADDIU k0, 4 562 .set noat 563#endif 564 MTC0 k0, CP0_EPC 565 /* I hope three instructions between MTC0 and ERET are enough... */ 566 ori k1, _THREAD_MASK 567 xori k1, _THREAD_MASK 568 LONG_L v1, TI_TP_VALUE(k1) 569 .set mips3 570 eret 571 .set mips0 572#endif 573 .set pop 574 END(handle_ri_rdhwr) 575 576#ifdef CONFIG_64BIT 577/* A temporary overflow handler used by check_daddi(). */ 578 579 __INIT 580 581 BUILD_HANDLER daddi_ov daddi_ov none silent /* #12 */ 582#endif 583