1/* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 1994 - 2000, 2001, 2003 Ralf Baechle 7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc. 8 * Copyright (C) 2002, 2007 Maciej W. Rozycki 9 * Copyright (C) 2001, 2012 MIPS Technologies, Inc. All rights reserved. 10 */ 11#include <linux/init.h> 12 13#include <asm/asm.h> 14#include <asm/asmmacro.h> 15#include <asm/cacheops.h> 16#include <asm/irqflags.h> 17#include <asm/regdef.h> 18#include <asm/fpregdef.h> 19#include <asm/mipsregs.h> 20#include <asm/stackframe.h> 21#include <asm/war.h> 22#include <asm/thread_info.h> 23 24#ifdef CONFIG_MIPS_MT_SMTC 25#define PANIC_PIC(msg) \ 26 .set push; \ 27 .set nomicromips; \ 28 .set reorder; \ 29 PTR_LA a0,8f; \ 30 .set noat; \ 31 PTR_LA AT, panic; \ 32 jr AT; \ 339: b 9b; \ 34 .set pop; \ 35 TEXT(msg) 36#endif 37 38 __INIT 39 40/* 41 * General exception vector for all other CPUs. 42 * 43 * Be careful when changing this, it has to be at most 128 bytes 44 * to fit into space reserved for the exception handler. 45 */ 46NESTED(except_vec3_generic, 0, sp) 47 .set push 48 .set noat 49#if R5432_CP0_INTERRUPT_WAR 50 mfc0 k0, CP0_INDEX 51#endif 52 mfc0 k1, CP0_CAUSE 53 andi k1, k1, 0x7c 54#ifdef CONFIG_64BIT 55 dsll k1, k1, 1 56#endif 57 PTR_L k0, exception_handlers(k1) 58 jr k0 59 .set pop 60 END(except_vec3_generic) 61 62/* 63 * General exception handler for CPUs with virtual coherency exception. 64 * 65 * Be careful when changing this, it has to be at most 256 (as a special 66 * exception) bytes to fit into space reserved for the exception handler. 67 */ 68NESTED(except_vec3_r4000, 0, sp) 69 .set push 70 .set arch=r4000 71 .set noat 72 mfc0 k1, CP0_CAUSE 73 li k0, 31<<2 74 andi k1, k1, 0x7c 75 .set push 76 .set noreorder 77 .set nomacro 78 beq k1, k0, handle_vced 79 li k0, 14<<2 80 beq k1, k0, handle_vcei 81#ifdef CONFIG_64BIT 82 dsll k1, k1, 1 83#endif 84 .set pop 85 PTR_L k0, exception_handlers(k1) 86 jr k0 87 88 /* 89 * Big shit, we now may have two dirty primary cache lines for the same 90 * physical address. We can safely invalidate the line pointed to by 91 * c0_badvaddr because after return from this exception handler the 92 * load / store will be re-executed. 93 */ 94handle_vced: 95 MFC0 k0, CP0_BADVADDR 96 li k1, -4 # Is this ... 97 and k0, k1 # ... really needed? 98 mtc0 zero, CP0_TAGLO 99 cache Index_Store_Tag_D, (k0) 100 cache Hit_Writeback_Inv_SD, (k0) 101#ifdef CONFIG_PROC_FS 102 PTR_LA k0, vced_count 103 lw k1, (k0) 104 addiu k1, 1 105 sw k1, (k0) 106#endif 107 eret 108 109handle_vcei: 110 MFC0 k0, CP0_BADVADDR 111 cache Hit_Writeback_Inv_SD, (k0) # also cleans pi 112#ifdef CONFIG_PROC_FS 113 PTR_LA k0, vcei_count 114 lw k1, (k0) 115 addiu k1, 1 116 sw k1, (k0) 117#endif 118 eret 119 .set pop 120 END(except_vec3_r4000) 121 122 __FINIT 123 124 .align 5 /* 32 byte rollback region */ 125LEAF(__r4k_wait) 126 .set push 127 .set noreorder 128 /* start of rollback region */ 129 LONG_L t0, TI_FLAGS($28) 130 nop 131 andi t0, _TIF_NEED_RESCHED 132 bnez t0, 1f 133 nop 134 nop 135 nop 136#ifdef CONFIG_CPU_MICROMIPS 137 nop 138 nop 139 nop 140 nop 141#endif 142 .set arch=r4000 143 wait 144 /* end of rollback region (the region size must be power of two) */ 1451: 146 jr ra 147 nop 148 .set pop 149 END(__r4k_wait) 150 151 .macro BUILD_ROLLBACK_PROLOGUE handler 152 FEXPORT(rollback_\handler) 153 .set push 154 .set noat 155 MFC0 k0, CP0_EPC 156 PTR_LA k1, __r4k_wait 157 ori k0, 0x1f /* 32 byte rollback region */ 158 xori k0, 0x1f 159 bne k0, k1, 9f 160 MTC0 k0, CP0_EPC 1619: 162 .set pop 163 .endm 164 165 .align 5 166BUILD_ROLLBACK_PROLOGUE handle_int 167NESTED(handle_int, PT_SIZE, sp) 168#ifdef CONFIG_TRACE_IRQFLAGS 169 /* 170 * Check to see if the interrupted code has just disabled 171 * interrupts and ignore this interrupt for now if so. 172 * 173 * local_irq_disable() disables interrupts and then calls 174 * trace_hardirqs_off() to track the state. If an interrupt is taken 175 * after interrupts are disabled but before the state is updated 176 * it will appear to restore_all that it is incorrectly returning with 177 * interrupts disabled 178 */ 179 .set push 180 .set noat 181 mfc0 k0, CP0_STATUS 182#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) 183 and k0, ST0_IEP 184 bnez k0, 1f 185 186 mfc0 k0, CP0_EPC 187 .set noreorder 188 j k0 189 rfe 190#else 191 and k0, ST0_IE 192 bnez k0, 1f 193 194 eret 195#endif 1961: 197 .set pop 198#endif 199 SAVE_ALL 200 CLI 201 TRACE_IRQS_OFF 202 203 LONG_L s0, TI_REGS($28) 204 LONG_S sp, TI_REGS($28) 205 PTR_LA ra, ret_from_irq 206 PTR_LA v0, plat_irq_dispatch 207 jr v0 208#ifdef CONFIG_CPU_MICROMIPS 209 nop 210#endif 211 END(handle_int) 212 213 __INIT 214 215/* 216 * Special interrupt vector for MIPS64 ISA & embedded MIPS processors. 217 * This is a dedicated interrupt exception vector which reduces the 218 * interrupt processing overhead. The jump instruction will be replaced 219 * at the initialization time. 220 * 221 * Be careful when changing this, it has to be at most 128 bytes 222 * to fit into space reserved for the exception handler. 223 */ 224NESTED(except_vec4, 0, sp) 2251: j 1b /* Dummy, will be replaced */ 226 END(except_vec4) 227 228/* 229 * EJTAG debug exception handler. 230 * The EJTAG debug exception entry point is 0xbfc00480, which 231 * normally is in the boot PROM, so the boot PROM must do an 232 * unconditional jump to this vector. 233 */ 234NESTED(except_vec_ejtag_debug, 0, sp) 235 j ejtag_debug_handler 236#ifdef CONFIG_CPU_MICROMIPS 237 nop 238#endif 239 END(except_vec_ejtag_debug) 240 241 __FINIT 242 243/* 244 * Vectored interrupt handler. 245 * This prototype is copied to ebase + n*IntCtl.VS and patched 246 * to invoke the handler 247 */ 248BUILD_ROLLBACK_PROLOGUE except_vec_vi 249NESTED(except_vec_vi, 0, sp) 250 SAVE_SOME 251 SAVE_AT 252 .set push 253 .set noreorder 254#ifdef CONFIG_MIPS_MT_SMTC 255 /* 256 * To keep from blindly blocking *all* interrupts 257 * during service by SMTC kernel, we also want to 258 * pass the IM value to be cleared. 259 */ 260FEXPORT(except_vec_vi_mori) 261 ori a0, $0, 0 262#endif /* CONFIG_MIPS_MT_SMTC */ 263 PTR_LA v1, except_vec_vi_handler 264FEXPORT(except_vec_vi_lui) 265 lui v0, 0 /* Patched */ 266 jr v1 267FEXPORT(except_vec_vi_ori) 268 ori v0, 0 /* Patched */ 269 .set pop 270 END(except_vec_vi) 271EXPORT(except_vec_vi_end) 272 273/* 274 * Common Vectored Interrupt code 275 * Complete the register saves and invoke the handler which is passed in $v0 276 */ 277NESTED(except_vec_vi_handler, 0, sp) 278 SAVE_TEMP 279 SAVE_STATIC 280#ifdef CONFIG_MIPS_MT_SMTC 281 /* 282 * SMTC has an interesting problem that interrupts are level-triggered, 283 * and the CLI macro will clear EXL, potentially causing a duplicate 284 * interrupt service invocation. So we need to clear the associated 285 * IM bit of Status prior to doing CLI, and restore it after the 286 * service routine has been invoked - we must assume that the 287 * service routine will have cleared the state, and any active 288 * level represents a new or otherwised unserviced event... 289 */ 290 mfc0 t1, CP0_STATUS 291 and t0, a0, t1 292#ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP 293 mfc0 t2, CP0_TCCONTEXT 294 or t2, t0, t2 295 mtc0 t2, CP0_TCCONTEXT 296#endif /* CONFIG_MIPS_MT_SMTC_IM_BACKSTOP */ 297 xor t1, t1, t0 298 mtc0 t1, CP0_STATUS 299 _ehb 300#endif /* CONFIG_MIPS_MT_SMTC */ 301 CLI 302#ifdef CONFIG_TRACE_IRQFLAGS 303 move s0, v0 304#ifdef CONFIG_MIPS_MT_SMTC 305 move s1, a0 306#endif 307 TRACE_IRQS_OFF 308#ifdef CONFIG_MIPS_MT_SMTC 309 move a0, s1 310#endif 311 move v0, s0 312#endif 313 314 LONG_L s0, TI_REGS($28) 315 LONG_S sp, TI_REGS($28) 316 PTR_LA ra, ret_from_irq 317 jr v0 318 END(except_vec_vi_handler) 319 320/* 321 * EJTAG debug exception handler. 322 */ 323NESTED(ejtag_debug_handler, PT_SIZE, sp) 324 .set push 325 .set noat 326 MTC0 k0, CP0_DESAVE 327 mfc0 k0, CP0_DEBUG 328 329 sll k0, k0, 30 # Check for SDBBP. 330 bgez k0, ejtag_return 331 332 PTR_LA k0, ejtag_debug_buffer 333 LONG_S k1, 0(k0) 334 SAVE_ALL 335 move a0, sp 336 jal ejtag_exception_handler 337 RESTORE_ALL 338 PTR_LA k0, ejtag_debug_buffer 339 LONG_L k1, 0(k0) 340 341ejtag_return: 342 MFC0 k0, CP0_DESAVE 343 .set mips32 344 deret 345 .set pop 346 END(ejtag_debug_handler) 347 348/* 349 * This buffer is reserved for the use of the EJTAG debug 350 * handler. 351 */ 352 .data 353EXPORT(ejtag_debug_buffer) 354 .fill LONGSIZE 355 .previous 356 357 __INIT 358 359/* 360 * NMI debug exception handler for MIPS reference boards. 361 * The NMI debug exception entry point is 0xbfc00000, which 362 * normally is in the boot PROM, so the boot PROM must do a 363 * unconditional jump to this vector. 364 */ 365NESTED(except_vec_nmi, 0, sp) 366 j nmi_handler 367#ifdef CONFIG_CPU_MICROMIPS 368 nop 369#endif 370 END(except_vec_nmi) 371 372 __FINIT 373 374NESTED(nmi_handler, PT_SIZE, sp) 375 .set push 376 .set noat 377 /* 378 * Clear ERL - restore segment mapping 379 * Clear BEV - required for page fault exception handler to work 380 */ 381 mfc0 k0, CP0_STATUS 382 ori k0, k0, ST0_EXL 383 li k1, ~(ST0_BEV | ST0_ERL) 384 and k0, k0, k1 385 mtc0 k0, CP0_STATUS 386 _ehb 387 SAVE_ALL 388 move a0, sp 389 jal nmi_exception_handler 390 /* nmi_exception_handler never returns */ 391 .set pop 392 END(nmi_handler) 393 394 .macro __build_clear_none 395 .endm 396 397 .macro __build_clear_sti 398 TRACE_IRQS_ON 399 STI 400 .endm 401 402 .macro __build_clear_cli 403 CLI 404 TRACE_IRQS_OFF 405 .endm 406 407 .macro __build_clear_fpe 408 .set push 409 /* gas fails to assemble cfc1 for some archs (octeon).*/ \ 410 .set mips1 411 cfc1 a1, fcr31 412 li a2, ~(0x3f << 12) 413 and a2, a1 414 ctc1 a2, fcr31 415 .set pop 416 TRACE_IRQS_ON 417 STI 418 .endm 419 420 .macro __build_clear_ade 421 MFC0 t0, CP0_BADVADDR 422 PTR_S t0, PT_BVADDR(sp) 423 KMODE 424 .endm 425 426 .macro __BUILD_silent exception 427 .endm 428 429 /* Gas tries to parse the PRINT argument as a string containing 430 string escapes and emits bogus warnings if it believes to 431 recognize an unknown escape code. So make the arguments 432 start with an n and gas will believe \n is ok ... */ 433 .macro __BUILD_verbose nexception 434 LONG_L a1, PT_EPC(sp) 435#ifdef CONFIG_32BIT 436 PRINT("Got \nexception at %08lx\012") 437#endif 438#ifdef CONFIG_64BIT 439 PRINT("Got \nexception at %016lx\012") 440#endif 441 .endm 442 443 .macro __BUILD_count exception 444 LONG_L t0,exception_count_\exception 445 LONG_ADDIU t0, 1 446 LONG_S t0,exception_count_\exception 447 .comm exception_count\exception, 8, 8 448 .endm 449 450 .macro __BUILD_HANDLER exception handler clear verbose ext 451 .align 5 452 NESTED(handle_\exception, PT_SIZE, sp) 453 .set noat 454 SAVE_ALL 455 FEXPORT(handle_\exception\ext) 456 __BUILD_clear_\clear 457 .set at 458 __BUILD_\verbose \exception 459 move a0, sp 460 PTR_LA ra, ret_from_exception 461 j do_\handler 462 END(handle_\exception) 463 .endm 464 465 .macro BUILD_HANDLER exception handler clear verbose 466 __BUILD_HANDLER \exception \handler \clear \verbose _int 467 .endm 468 469 BUILD_HANDLER adel ade ade silent /* #4 */ 470 BUILD_HANDLER ades ade ade silent /* #5 */ 471 BUILD_HANDLER ibe be cli silent /* #6 */ 472 BUILD_HANDLER dbe be cli silent /* #7 */ 473 BUILD_HANDLER bp bp sti silent /* #9 */ 474 BUILD_HANDLER ri ri sti silent /* #10 */ 475 BUILD_HANDLER cpu cpu sti silent /* #11 */ 476 BUILD_HANDLER ov ov sti silent /* #12 */ 477 BUILD_HANDLER tr tr sti silent /* #13 */ 478 BUILD_HANDLER msa_fpe msa_fpe sti silent /* #14 */ 479 BUILD_HANDLER fpe fpe fpe silent /* #15 */ 480 BUILD_HANDLER ftlb ftlb none silent /* #16 */ 481 BUILD_HANDLER msa msa sti silent /* #21 */ 482 BUILD_HANDLER mdmx mdmx sti silent /* #22 */ 483#ifdef CONFIG_HARDWARE_WATCHPOINTS 484 /* 485 * For watch, interrupts will be enabled after the watch 486 * registers are read. 487 */ 488 BUILD_HANDLER watch watch cli silent /* #23 */ 489#else 490 BUILD_HANDLER watch watch sti verbose /* #23 */ 491#endif 492 BUILD_HANDLER mcheck mcheck cli verbose /* #24 */ 493 BUILD_HANDLER mt mt sti silent /* #25 */ 494 BUILD_HANDLER dsp dsp sti silent /* #26 */ 495 BUILD_HANDLER reserved reserved sti verbose /* others */ 496 497 .align 5 498 LEAF(handle_ri_rdhwr_vivt) 499#ifdef CONFIG_MIPS_MT_SMTC 500 PANIC_PIC("handle_ri_rdhwr_vivt called") 501#else 502 .set push 503 .set noat 504 .set noreorder 505 /* check if TLB contains a entry for EPC */ 506 MFC0 k1, CP0_ENTRYHI 507 andi k1, 0xff /* ASID_MASK */ 508 MFC0 k0, CP0_EPC 509 PTR_SRL k0, _PAGE_SHIFT + 1 510 PTR_SLL k0, _PAGE_SHIFT + 1 511 or k1, k0 512 MTC0 k1, CP0_ENTRYHI 513 mtc0_tlbw_hazard 514 tlbp 515 tlb_probe_hazard 516 mfc0 k1, CP0_INDEX 517 .set pop 518 bltz k1, handle_ri /* slow path */ 519 /* fall thru */ 520#endif 521 END(handle_ri_rdhwr_vivt) 522 523 LEAF(handle_ri_rdhwr) 524 .set push 525 .set noat 526 .set noreorder 527 /* MIPS32: 0x7c03e83b: rdhwr v1,$29 */ 528 /* microMIPS: 0x007d6b3c: rdhwr v1,$29 */ 529 MFC0 k1, CP0_EPC 530#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS64_R2) 531 and k0, k1, 1 532 beqz k0, 1f 533 xor k1, k0 534 lhu k0, (k1) 535 lhu k1, 2(k1) 536 ins k1, k0, 16, 16 537 lui k0, 0x007d 538 b docheck 539 ori k0, 0x6b3c 5401: 541 lui k0, 0x7c03 542 lw k1, (k1) 543 ori k0, 0xe83b 544#else 545 andi k0, k1, 1 546 bnez k0, handle_ri 547 lui k0, 0x7c03 548 lw k1, (k1) 549 ori k0, 0xe83b 550#endif 551 .set reorder 552docheck: 553 bne k0, k1, handle_ri /* if not ours */ 554 555isrdhwr: 556 /* The insn is rdhwr. No need to check CAUSE.BD here. */ 557 get_saved_sp /* k1 := current_thread_info */ 558 .set noreorder 559 MFC0 k0, CP0_EPC 560#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) 561 ori k1, _THREAD_MASK 562 xori k1, _THREAD_MASK 563 LONG_L v1, TI_TP_VALUE(k1) 564 LONG_ADDIU k0, 4 565 jr k0 566 rfe 567#else 568#ifndef CONFIG_CPU_DADDI_WORKAROUNDS 569 LONG_ADDIU k0, 4 /* stall on $k0 */ 570#else 571 .set at=v1 572 LONG_ADDIU k0, 4 573 .set noat 574#endif 575 MTC0 k0, CP0_EPC 576 /* I hope three instructions between MTC0 and ERET are enough... */ 577 ori k1, _THREAD_MASK 578 xori k1, _THREAD_MASK 579 LONG_L v1, TI_TP_VALUE(k1) 580 .set arch=r4000 581 eret 582 .set mips0 583#endif 584 .set pop 585 END(handle_ri_rdhwr) 586 587#ifdef CONFIG_64BIT 588/* A temporary overflow handler used by check_daddi(). */ 589 590 __INIT 591 592 BUILD_HANDLER daddi_ov daddi_ov none silent /* #12 */ 593#endif 594