1/* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 1994 - 2000, 2001, 2003 Ralf Baechle 7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc. 8 * Copyright (C) 2001 MIPS Technologies, Inc. 9 * Copyright (C) 2002 Maciej W. Rozycki 10 */ 11#include <linux/init.h> 12 13#include <asm/asm.h> 14#include <asm/asmmacro.h> 15#include <asm/cacheops.h> 16#include <asm/irqflags.h> 17#include <asm/regdef.h> 18#include <asm/fpregdef.h> 19#include <asm/mipsregs.h> 20#include <asm/stackframe.h> 21#include <asm/war.h> 22#include <asm/page.h> 23 24#define PANIC_PIC(msg) \ 25 .set push; \ 26 .set reorder; \ 27 PTR_LA a0,8f; \ 28 .set noat; \ 29 PTR_LA AT, panic; \ 30 jr AT; \ 319: b 9b; \ 32 .set pop; \ 33 TEXT(msg) 34 35 __INIT 36 37NESTED(except_vec0_generic, 0, sp) 38 PANIC_PIC("Exception vector 0 called") 39 END(except_vec0_generic) 40 41NESTED(except_vec1_generic, 0, sp) 42 PANIC_PIC("Exception vector 1 called") 43 END(except_vec1_generic) 44 45/* 46 * General exception vector for all other CPUs. 47 * 48 * Be careful when changing this, it has to be at most 128 bytes 49 * to fit into space reserved for the exception handler. 50 */ 51NESTED(except_vec3_generic, 0, sp) 52 .set push 53 .set noat 54#if R5432_CP0_INTERRUPT_WAR 55 mfc0 k0, CP0_INDEX 56#endif 57 mfc0 k1, CP0_CAUSE 58 andi k1, k1, 0x7c 59#ifdef CONFIG_64BIT 60 dsll k1, k1, 1 61#endif 62 PTR_L k0, exception_handlers(k1) 63 jr k0 64 .set pop 65 END(except_vec3_generic) 66 67/* 68 * General exception handler for CPUs with virtual coherency exception. 69 * 70 * Be careful when changing this, it has to be at most 256 (as a special 71 * exception) bytes to fit into space reserved for the exception handler. 72 */ 73NESTED(except_vec3_r4000, 0, sp) 74 .set push 75 .set mips3 76 .set noat 77 mfc0 k1, CP0_CAUSE 78 li k0, 31<<2 79 andi k1, k1, 0x7c 80 .set push 81 .set noreorder 82 .set nomacro 83 beq k1, k0, handle_vced 84 li k0, 14<<2 85 beq k1, k0, handle_vcei 86#ifdef CONFIG_64BIT 87 dsll k1, k1, 1 88#endif 89 .set pop 90 PTR_L k0, exception_handlers(k1) 91 jr k0 92 93 /* 94 * Big shit, we now may have two dirty primary cache lines for the same 95 * physical address. We can safely invalidate the line pointed to by 96 * c0_badvaddr because after return from this exception handler the 97 * load / store will be re-executed. 98 */ 99handle_vced: 100 MFC0 k0, CP0_BADVADDR 101 li k1, -4 # Is this ... 102 and k0, k1 # ... really needed? 103 mtc0 zero, CP0_TAGLO 104 cache Index_Store_Tag_D, (k0) 105 cache Hit_Writeback_Inv_SD, (k0) 106#ifdef CONFIG_PROC_FS 107 PTR_LA k0, vced_count 108 lw k1, (k0) 109 addiu k1, 1 110 sw k1, (k0) 111#endif 112 eret 113 114handle_vcei: 115 MFC0 k0, CP0_BADVADDR 116 cache Hit_Writeback_Inv_SD, (k0) # also cleans pi 117#ifdef CONFIG_PROC_FS 118 PTR_LA k0, vcei_count 119 lw k1, (k0) 120 addiu k1, 1 121 sw k1, (k0) 122#endif 123 eret 124 .set pop 125 END(except_vec3_r4000) 126 127 __FINIT 128 129 .align 5 130NESTED(handle_int, PT_SIZE, sp) 131#ifdef CONFIG_TRACE_IRQFLAGS 132 /* 133 * Check to see if the interrupted code has just disabled 134 * interrupts and ignore this interrupt for now if so. 135 * 136 * local_irq_disable() disables interrupts and then calls 137 * trace_hardirqs_off() to track the state. If an interrupt is taken 138 * after interrupts are disabled but before the state is updated 139 * it will appear to restore_all that it is incorrectly returning with 140 * interrupts disabled 141 */ 142 .set push 143 .set noat 144 mfc0 k0, CP0_STATUS 145#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) 146 and k0, ST0_IEP 147 bnez k0, 1f 148 149 mfc0 k0, EP0_EPC 150 .set noreorder 151 j k0 152 rfe 153#else 154 and k0, ST0_IE 155 bnez k0, 1f 156 157 eret 158#endif 1591: 160 .set pop 161#endif 162 SAVE_ALL 163 CLI 164 TRACE_IRQS_OFF 165 166 LONG_L s0, TI_REGS($28) 167 LONG_S sp, TI_REGS($28) 168 PTR_LA ra, ret_from_irq 169 j plat_irq_dispatch 170 END(handle_int) 171 172 __INIT 173 174/* 175 * Special interrupt vector for MIPS64 ISA & embedded MIPS processors. 176 * This is a dedicated interrupt exception vector which reduces the 177 * interrupt processing overhead. The jump instruction will be replaced 178 * at the initialization time. 179 * 180 * Be careful when changing this, it has to be at most 128 bytes 181 * to fit into space reserved for the exception handler. 182 */ 183NESTED(except_vec4, 0, sp) 1841: j 1b /* Dummy, will be replaced */ 185 END(except_vec4) 186 187/* 188 * EJTAG debug exception handler. 189 * The EJTAG debug exception entry point is 0xbfc00480, which 190 * normally is in the boot PROM, so the boot PROM must do a 191 * unconditional jump to this vector. 192 */ 193NESTED(except_vec_ejtag_debug, 0, sp) 194 j ejtag_debug_handler 195 END(except_vec_ejtag_debug) 196 197 __FINIT 198 199/* 200 * Vectored interrupt handler. 201 * This prototype is copied to ebase + n*IntCtl.VS and patched 202 * to invoke the handler 203 */ 204NESTED(except_vec_vi, 0, sp) 205 SAVE_SOME 206 SAVE_AT 207 .set push 208 .set noreorder 209#ifdef CONFIG_MIPS_MT_SMTC 210 /* 211 * To keep from blindly blocking *all* interrupts 212 * during service by SMTC kernel, we also want to 213 * pass the IM value to be cleared. 214 */ 215FEXPORT(except_vec_vi_mori) 216 ori a0, $0, 0 217#endif /* CONFIG_MIPS_MT_SMTC */ 218FEXPORT(except_vec_vi_lui) 219 lui v0, 0 /* Patched */ 220 j except_vec_vi_handler 221FEXPORT(except_vec_vi_ori) 222 ori v0, 0 /* Patched */ 223 .set pop 224 END(except_vec_vi) 225EXPORT(except_vec_vi_end) 226 227/* 228 * Common Vectored Interrupt code 229 * Complete the register saves and invoke the handler which is passed in $v0 230 */ 231NESTED(except_vec_vi_handler, 0, sp) 232 SAVE_TEMP 233 SAVE_STATIC 234#ifdef CONFIG_MIPS_MT_SMTC 235 /* 236 * SMTC has an interesting problem that interrupts are level-triggered, 237 * and the CLI macro will clear EXL, potentially causing a duplicate 238 * interrupt service invocation. So we need to clear the associated 239 * IM bit of Status prior to doing CLI, and restore it after the 240 * service routine has been invoked - we must assume that the 241 * service routine will have cleared the state, and any active 242 * level represents a new or otherwised unserviced event... 243 */ 244 mfc0 t1, CP0_STATUS 245 and t0, a0, t1 246 mfc0 t2, CP0_TCCONTEXT 247 or t0, t0, t2 248 mtc0 t0, CP0_TCCONTEXT 249 xor t1, t1, t0 250 mtc0 t1, CP0_STATUS 251 _ehb 252#endif /* CONFIG_MIPS_MT_SMTC */ 253 CLI 254#ifdef CONFIG_TRACE_IRQFLAGS 255 move s0, v0 256#ifdef CONFIG_MIPS_MT_SMTC 257 move s1, a0 258#endif 259 TRACE_IRQS_OFF 260#ifdef CONFIG_MIPS_MT_SMTC 261 move a0, s1 262#endif 263 move v0, s0 264#endif 265 266 LONG_L s0, TI_REGS($28) 267 LONG_S sp, TI_REGS($28) 268 PTR_LA ra, ret_from_irq 269 jr v0 270 END(except_vec_vi_handler) 271 272/* 273 * EJTAG debug exception handler. 274 */ 275NESTED(ejtag_debug_handler, PT_SIZE, sp) 276 .set push 277 .set noat 278 MTC0 k0, CP0_DESAVE 279 mfc0 k0, CP0_DEBUG 280 281 sll k0, k0, 30 # Check for SDBBP. 282 bgez k0, ejtag_return 283 284 PTR_LA k0, ejtag_debug_buffer 285 LONG_S k1, 0(k0) 286 SAVE_ALL 287 move a0, sp 288 jal ejtag_exception_handler 289 RESTORE_ALL 290 PTR_LA k0, ejtag_debug_buffer 291 LONG_L k1, 0(k0) 292 293ejtag_return: 294 MFC0 k0, CP0_DESAVE 295 .set mips32 296 deret 297 .set pop 298 END(ejtag_debug_handler) 299 300/* 301 * This buffer is reserved for the use of the EJTAG debug 302 * handler. 303 */ 304 .data 305EXPORT(ejtag_debug_buffer) 306 .fill LONGSIZE 307 .previous 308 309 __INIT 310 311/* 312 * NMI debug exception handler for MIPS reference boards. 313 * The NMI debug exception entry point is 0xbfc00000, which 314 * normally is in the boot PROM, so the boot PROM must do a 315 * unconditional jump to this vector. 316 */ 317NESTED(except_vec_nmi, 0, sp) 318 j nmi_handler 319 END(except_vec_nmi) 320 321 __FINIT 322 323NESTED(nmi_handler, PT_SIZE, sp) 324 .set push 325 .set noat 326 SAVE_ALL 327 move a0, sp 328 jal nmi_exception_handler 329 RESTORE_ALL 330 .set mips3 331 eret 332 .set pop 333 END(nmi_handler) 334 335 .macro __build_clear_none 336 .endm 337 338 .macro __build_clear_sti 339 TRACE_IRQS_ON 340 STI 341 .endm 342 343 .macro __build_clear_cli 344 CLI 345 TRACE_IRQS_OFF 346 .endm 347 348 .macro __build_clear_fpe 349 cfc1 a1, fcr31 350 li a2, ~(0x3f << 12) 351 and a2, a1 352 ctc1 a2, fcr31 353 TRACE_IRQS_ON 354 STI 355 .endm 356 357 .macro __build_clear_ade 358 MFC0 t0, CP0_BADVADDR 359 PTR_S t0, PT_BVADDR(sp) 360 KMODE 361 .endm 362 363 .macro __BUILD_silent exception 364 .endm 365 366 /* Gas tries to parse the PRINT argument as a string containing 367 string escapes and emits bogus warnings if it believes to 368 recognize an unknown escape code. So make the arguments 369 start with an n and gas will believe \n is ok ... */ 370 .macro __BUILD_verbose nexception 371 LONG_L a1, PT_EPC(sp) 372#ifdef CONFIG_32BIT 373 PRINT("Got \nexception at %08lx\012") 374#endif 375#ifdef CONFIG_64BIT 376 PRINT("Got \nexception at %016lx\012") 377#endif 378 .endm 379 380 .macro __BUILD_count exception 381 LONG_L t0,exception_count_\exception 382 LONG_ADDIU t0, 1 383 LONG_S t0,exception_count_\exception 384 .comm exception_count\exception, 8, 8 385 .endm 386 387 .macro __BUILD_HANDLER exception handler clear verbose ext 388 .align 5 389 NESTED(handle_\exception, PT_SIZE, sp) 390 .set noat 391 SAVE_ALL 392 FEXPORT(handle_\exception\ext) 393 __BUILD_clear_\clear 394 .set at 395 __BUILD_\verbose \exception 396 move a0, sp 397 PTR_LA ra, ret_from_exception 398 j do_\handler 399 END(handle_\exception) 400 .endm 401 402 .macro BUILD_HANDLER exception handler clear verbose 403 __BUILD_HANDLER \exception \handler \clear \verbose _int 404 .endm 405 406 BUILD_HANDLER adel ade ade silent /* #4 */ 407 BUILD_HANDLER ades ade ade silent /* #5 */ 408 BUILD_HANDLER ibe be cli silent /* #6 */ 409 BUILD_HANDLER dbe be cli silent /* #7 */ 410 BUILD_HANDLER bp bp sti silent /* #9 */ 411 BUILD_HANDLER ri ri sti silent /* #10 */ 412 BUILD_HANDLER cpu cpu sti silent /* #11 */ 413 BUILD_HANDLER ov ov sti silent /* #12 */ 414 BUILD_HANDLER tr tr sti silent /* #13 */ 415 BUILD_HANDLER fpe fpe fpe silent /* #15 */ 416 BUILD_HANDLER mdmx mdmx sti silent /* #22 */ 417 BUILD_HANDLER watch watch sti verbose /* #23 */ 418 BUILD_HANDLER mcheck mcheck cli verbose /* #24 */ 419 BUILD_HANDLER mt mt sti silent /* #25 */ 420 BUILD_HANDLER dsp dsp sti silent /* #26 */ 421 BUILD_HANDLER reserved reserved sti verbose /* others */ 422 423 .align 5 424 LEAF(handle_ri_rdhwr_vivt) 425#ifdef CONFIG_MIPS_MT_SMTC 426 PANIC_PIC("handle_ri_rdhwr_vivt called") 427#else 428 .set push 429 .set noat 430 .set noreorder 431 /* check if TLB contains a entry for EPC */ 432 MFC0 k1, CP0_ENTRYHI 433 andi k1, 0xff /* ASID_MASK */ 434 MFC0 k0, CP0_EPC 435 PTR_SRL k0, PAGE_SHIFT + 1 436 PTR_SLL k0, PAGE_SHIFT + 1 437 or k1, k0 438 MTC0 k1, CP0_ENTRYHI 439 mtc0_tlbw_hazard 440 tlbp 441 tlb_probe_hazard 442 mfc0 k1, CP0_INDEX 443 .set pop 444 bltz k1, handle_ri /* slow path */ 445 /* fall thru */ 446#endif 447 END(handle_ri_rdhwr_vivt) 448 449 LEAF(handle_ri_rdhwr) 450 .set push 451 .set noat 452 .set noreorder 453 /* 0x7c03e83b: rdhwr v1,$29 */ 454 MFC0 k1, CP0_EPC 455 lui k0, 0x7c03 456 lw k1, (k1) 457 ori k0, 0xe83b 458 .set reorder 459 bne k0, k1, handle_ri /* if not ours */ 460 /* The insn is rdhwr. No need to check CAUSE.BD here. */ 461 get_saved_sp /* k1 := current_thread_info */ 462 .set noreorder 463 MFC0 k0, CP0_EPC 464#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) 465 ori k1, _THREAD_MASK 466 xori k1, _THREAD_MASK 467 LONG_L v1, TI_TP_VALUE(k1) 468 LONG_ADDIU k0, 4 469 jr k0 470 rfe 471#else 472 LONG_ADDIU k0, 4 /* stall on $k0 */ 473 MTC0 k0, CP0_EPC 474 /* I hope three instructions between MTC0 and ERET are enough... */ 475 ori k1, _THREAD_MASK 476 xori k1, _THREAD_MASK 477 LONG_L v1, TI_TP_VALUE(k1) 478 .set mips3 479 eret 480 .set mips0 481#endif 482 .set pop 483 END(handle_ri_rdhwr) 484 485#ifdef CONFIG_64BIT 486/* A temporary overflow handler used by check_daddi(). */ 487 488 __INIT 489 490 BUILD_HANDLER daddi_ov daddi_ov none silent /* #12 */ 491#endif 492