1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 #ifndef _ASM_POWERPC_INTERRUPT_H 3 #define _ASM_POWERPC_INTERRUPT_H 4 5 /* BookE/4xx */ 6 #define INTERRUPT_CRITICAL_INPUT 0x100 7 8 /* BookE */ 9 #define INTERRUPT_DEBUG 0xd00 10 #ifdef CONFIG_BOOKE 11 #define INTERRUPT_PERFMON 0x260 12 #define INTERRUPT_DOORBELL 0x280 13 #endif 14 15 /* BookS/4xx/8xx */ 16 #define INTERRUPT_MACHINE_CHECK 0x200 17 18 /* BookS/8xx */ 19 #define INTERRUPT_SYSTEM_RESET 0x100 20 21 /* BookS */ 22 #define INTERRUPT_DATA_SEGMENT 0x380 23 #define INTERRUPT_INST_SEGMENT 0x480 24 #define INTERRUPT_TRACE 0xd00 25 #define INTERRUPT_H_DATA_STORAGE 0xe00 26 #define INTERRUPT_HMI 0xe60 27 #define INTERRUPT_H_FAC_UNAVAIL 0xf80 28 #ifdef CONFIG_PPC_BOOK3S 29 #define INTERRUPT_DOORBELL 0xa00 30 #define INTERRUPT_PERFMON 0xf00 31 #define INTERRUPT_ALTIVEC_UNAVAIL 0xf20 32 #endif 33 34 /* BookE/BookS/4xx/8xx */ 35 #define INTERRUPT_DATA_STORAGE 0x300 36 #define INTERRUPT_INST_STORAGE 0x400 37 #define INTERRUPT_EXTERNAL 0x500 38 #define INTERRUPT_ALIGNMENT 0x600 39 #define INTERRUPT_PROGRAM 0x700 40 #define INTERRUPT_SYSCALL 0xc00 41 #define INTERRUPT_TRACE 0xd00 42 43 /* BookE/BookS/44x */ 44 #define INTERRUPT_FP_UNAVAIL 0x800 45 46 /* BookE/BookS/44x/8xx */ 47 #define INTERRUPT_DECREMENTER 0x900 48 49 #ifndef INTERRUPT_PERFMON 50 #define INTERRUPT_PERFMON 0x0 51 #endif 52 53 /* 8xx */ 54 #define INTERRUPT_SOFT_EMU_8xx 0x1000 55 #define INTERRUPT_INST_TLB_MISS_8xx 0x1100 56 #define INTERRUPT_DATA_TLB_MISS_8xx 0x1200 57 #define INTERRUPT_INST_TLB_ERROR_8xx 0x1300 58 #define INTERRUPT_DATA_TLB_ERROR_8xx 0x1400 59 #define INTERRUPT_DATA_BREAKPOINT_8xx 0x1c00 60 #define INTERRUPT_INST_BREAKPOINT_8xx 0x1d00 61 62 /* 603 */ 63 #define INTERRUPT_INST_TLB_MISS_603 0x1000 64 #define INTERRUPT_DATA_LOAD_TLB_MISS_603 0x1100 65 #define INTERRUPT_DATA_STORE_TLB_MISS_603 0x1200 66 67 #ifndef __ASSEMBLY__ 68 69 #include <linux/context_tracking.h> 70 #include <linux/hardirq.h> 71 #include <asm/cputime.h> 72 #include <asm/ftrace.h> 73 #include <asm/kprobes.h> 74 #include <asm/runlatch.h> 75 76 #ifdef CONFIG_PPC_BOOK3S_64 77 extern char __end_soft_masked[]; 78 bool search_kernel_soft_mask_table(unsigned long addr); 79 unsigned long search_kernel_restart_table(unsigned long addr); 80 81 DECLARE_STATIC_KEY_FALSE(interrupt_exit_not_reentrant); 82 83 static inline bool is_implicit_soft_masked(struct pt_regs *regs) 84 { 85 if (regs->msr & MSR_PR) 86 return false; 87 88 if (regs->nip >= (unsigned long)__end_soft_masked) 89 return false; 90 91 return search_kernel_soft_mask_table(regs->nip); 92 } 93 94 static inline void srr_regs_clobbered(void) 95 { 96 local_paca->srr_valid = 0; 97 local_paca->hsrr_valid = 0; 98 } 99 #else 100 static inline unsigned long search_kernel_restart_table(unsigned long addr) 101 { 102 return 0; 103 } 104 105 static inline bool is_implicit_soft_masked(struct pt_regs *regs) 106 { 107 return false; 108 } 109 110 static inline void srr_regs_clobbered(void) 111 { 112 } 113 #endif 114 115 static inline void nap_adjust_return(struct pt_regs *regs) 116 { 117 #ifdef CONFIG_PPC_970_NAP 118 if (unlikely(test_thread_local_flags(_TLF_NAPPING))) { 119 /* Can avoid a test-and-clear because NMIs do not call this */ 120 clear_thread_local_flags(_TLF_NAPPING); 121 regs_set_return_ip(regs, (unsigned long)power4_idle_nap_return); 122 } 123 #endif 124 } 125 126 struct interrupt_state { 127 }; 128 129 static inline void booke_restore_dbcr0(void) 130 { 131 #ifdef CONFIG_PPC_ADV_DEBUG_REGS 132 unsigned long dbcr0 = current->thread.debug.dbcr0; 133 134 if (IS_ENABLED(CONFIG_PPC32) && unlikely(dbcr0 & DBCR0_IDM)) { 135 mtspr(SPRN_DBSR, -1); 136 mtspr(SPRN_DBCR0, global_dbcr0[smp_processor_id()]); 137 } 138 #endif 139 } 140 141 static inline void interrupt_enter_prepare(struct pt_regs *regs, struct interrupt_state *state) 142 { 143 #ifdef CONFIG_PPC32 144 if (!arch_irq_disabled_regs(regs)) 145 trace_hardirqs_off(); 146 147 if (user_mode(regs)) 148 kuap_lock(); 149 else 150 kuap_save_and_lock(regs); 151 152 if (user_mode(regs)) 153 account_cpu_user_entry(); 154 #endif 155 156 #ifdef CONFIG_PPC64 157 bool trace_enable = false; 158 159 if (IS_ENABLED(CONFIG_TRACE_IRQFLAGS)) { 160 if (irq_soft_mask_set_return(IRQS_ALL_DISABLED) == IRQS_ENABLED) 161 trace_enable = true; 162 } else { 163 irq_soft_mask_set(IRQS_ALL_DISABLED); 164 } 165 166 /* 167 * If the interrupt was taken with HARD_DIS clear, then enable MSR[EE]. 168 * Asynchronous interrupts get here with HARD_DIS set (see below), so 169 * this enables MSR[EE] for synchronous interrupts. IRQs remain 170 * soft-masked. The interrupt handler may later call 171 * interrupt_cond_local_irq_enable() to achieve a regular process 172 * context. 173 */ 174 if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS)) { 175 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) 176 BUG_ON(!(regs->msr & MSR_EE)); 177 __hard_irq_enable(); 178 } else { 179 __hard_RI_enable(); 180 } 181 182 /* Do this when RI=1 because it can cause SLB faults */ 183 if (trace_enable) 184 trace_hardirqs_off(); 185 186 if (user_mode(regs)) { 187 kuap_lock(); 188 CT_WARN_ON(ct_state() != CONTEXT_USER); 189 user_exit_irqoff(); 190 191 account_cpu_user_entry(); 192 account_stolen_time(); 193 } else { 194 kuap_save_and_lock(regs); 195 /* 196 * CT_WARN_ON comes here via program_check_exception, 197 * so avoid recursion. 198 */ 199 if (TRAP(regs) != INTERRUPT_PROGRAM) { 200 CT_WARN_ON(ct_state() != CONTEXT_KERNEL); 201 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) 202 BUG_ON(is_implicit_soft_masked(regs)); 203 } 204 205 /* Move this under a debugging check */ 206 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG) && 207 arch_irq_disabled_regs(regs)) 208 BUG_ON(search_kernel_restart_table(regs->nip)); 209 } 210 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) 211 BUG_ON(!arch_irq_disabled_regs(regs) && !(regs->msr & MSR_EE)); 212 #endif 213 214 booke_restore_dbcr0(); 215 } 216 217 /* 218 * Care should be taken to note that interrupt_exit_prepare and 219 * interrupt_async_exit_prepare do not necessarily return immediately to 220 * regs context (e.g., if regs is usermode, we don't necessarily return to 221 * user mode). Other interrupts might be taken between here and return, 222 * context switch / preemption may occur in the exit path after this, or a 223 * signal may be delivered, etc. 224 * 225 * The real interrupt exit code is platform specific, e.g., 226 * interrupt_exit_user_prepare / interrupt_exit_kernel_prepare for 64s. 227 * 228 * However interrupt_nmi_exit_prepare does return directly to regs, because 229 * NMIs do not do "exit work" or replay soft-masked interrupts. 230 */ 231 static inline void interrupt_exit_prepare(struct pt_regs *regs, struct interrupt_state *state) 232 { 233 } 234 235 static inline void interrupt_async_enter_prepare(struct pt_regs *regs, struct interrupt_state *state) 236 { 237 #ifdef CONFIG_PPC64 238 /* Ensure interrupt_enter_prepare does not enable MSR[EE] */ 239 local_paca->irq_happened |= PACA_IRQ_HARD_DIS; 240 #endif 241 interrupt_enter_prepare(regs, state); 242 #ifdef CONFIG_PPC_BOOK3S_64 243 /* 244 * RI=1 is set by interrupt_enter_prepare, so this thread flags access 245 * has to come afterward (it can cause SLB faults). 246 */ 247 if (cpu_has_feature(CPU_FTR_CTRL) && 248 !test_thread_local_flags(_TLF_RUNLATCH)) 249 __ppc64_runlatch_on(); 250 #endif 251 irq_enter(); 252 } 253 254 static inline void interrupt_async_exit_prepare(struct pt_regs *regs, struct interrupt_state *state) 255 { 256 /* 257 * Adjust at exit so the main handler sees the true NIA. This must 258 * come before irq_exit() because irq_exit can enable interrupts, and 259 * if another interrupt is taken before nap_adjust_return has run 260 * here, then that interrupt would return directly to idle nap return. 261 */ 262 nap_adjust_return(regs); 263 264 irq_exit(); 265 interrupt_exit_prepare(regs, state); 266 } 267 268 struct interrupt_nmi_state { 269 #ifdef CONFIG_PPC64 270 u8 irq_soft_mask; 271 u8 irq_happened; 272 u8 ftrace_enabled; 273 u64 softe; 274 #endif 275 }; 276 277 static inline bool nmi_disables_ftrace(struct pt_regs *regs) 278 { 279 /* Allow DEC and PMI to be traced when they are soft-NMI */ 280 if (IS_ENABLED(CONFIG_PPC_BOOK3S_64)) { 281 if (TRAP(regs) == INTERRUPT_DECREMENTER) 282 return false; 283 if (TRAP(regs) == INTERRUPT_PERFMON) 284 return false; 285 } 286 if (IS_ENABLED(CONFIG_PPC_BOOK3E)) { 287 if (TRAP(regs) == INTERRUPT_PERFMON) 288 return false; 289 } 290 291 return true; 292 } 293 294 static inline void interrupt_nmi_enter_prepare(struct pt_regs *regs, struct interrupt_nmi_state *state) 295 { 296 #ifdef CONFIG_PPC64 297 state->irq_soft_mask = local_paca->irq_soft_mask; 298 state->irq_happened = local_paca->irq_happened; 299 state->softe = regs->softe; 300 301 /* 302 * Set IRQS_ALL_DISABLED unconditionally so irqs_disabled() does 303 * the right thing, and set IRQ_HARD_DIS. We do not want to reconcile 304 * because that goes through irq tracing which we don't want in NMI. 305 */ 306 local_paca->irq_soft_mask = IRQS_ALL_DISABLED; 307 local_paca->irq_happened |= PACA_IRQ_HARD_DIS; 308 309 if (!(regs->msr & MSR_EE) || is_implicit_soft_masked(regs)) { 310 /* 311 * Adjust regs->softe to be soft-masked if it had not been 312 * reconcied (e.g., interrupt entry with MSR[EE]=0 but softe 313 * not yet set disabled), or if it was in an implicit soft 314 * masked state. This makes arch_irq_disabled_regs(regs) 315 * behave as expected. 316 */ 317 regs->softe = IRQS_ALL_DISABLED; 318 } 319 320 __hard_RI_enable(); 321 322 /* Don't do any per-CPU operations until interrupt state is fixed */ 323 324 if (nmi_disables_ftrace(regs)) { 325 state->ftrace_enabled = this_cpu_get_ftrace_enabled(); 326 this_cpu_set_ftrace_enabled(0); 327 } 328 #endif 329 330 /* 331 * Do not use nmi_enter() for pseries hash guest taking a real-mode 332 * NMI because not everything it touches is within the RMA limit. 333 */ 334 if (!IS_ENABLED(CONFIG_PPC_BOOK3S_64) || 335 !firmware_has_feature(FW_FEATURE_LPAR) || 336 radix_enabled() || (mfmsr() & MSR_DR)) 337 nmi_enter(); 338 } 339 340 static inline void interrupt_nmi_exit_prepare(struct pt_regs *regs, struct interrupt_nmi_state *state) 341 { 342 if (!IS_ENABLED(CONFIG_PPC_BOOK3S_64) || 343 !firmware_has_feature(FW_FEATURE_LPAR) || 344 radix_enabled() || (mfmsr() & MSR_DR)) 345 nmi_exit(); 346 347 /* 348 * nmi does not call nap_adjust_return because nmi should not create 349 * new work to do (must use irq_work for that). 350 */ 351 352 #ifdef CONFIG_PPC64 353 #ifdef CONFIG_PPC_BOOK3S 354 if (arch_irq_disabled_regs(regs)) { 355 unsigned long rst = search_kernel_restart_table(regs->nip); 356 if (rst) 357 regs_set_return_ip(regs, rst); 358 } 359 #endif 360 361 if (nmi_disables_ftrace(regs)) 362 this_cpu_set_ftrace_enabled(state->ftrace_enabled); 363 364 /* Check we didn't change the pending interrupt mask. */ 365 WARN_ON_ONCE((state->irq_happened | PACA_IRQ_HARD_DIS) != local_paca->irq_happened); 366 regs->softe = state->softe; 367 local_paca->irq_happened = state->irq_happened; 368 local_paca->irq_soft_mask = state->irq_soft_mask; 369 #endif 370 } 371 372 /* 373 * Don't use noinstr here like x86, but rather add NOKPROBE_SYMBOL to each 374 * function definition. The reason for this is the noinstr section is placed 375 * after the main text section, i.e., very far away from the interrupt entry 376 * asm. That creates problems with fitting linker stubs when building large 377 * kernels. 378 */ 379 #define interrupt_handler __visible noinline notrace __no_kcsan __no_sanitize_address 380 381 /** 382 * DECLARE_INTERRUPT_HANDLER_RAW - Declare raw interrupt handler function 383 * @func: Function name of the entry point 384 * @returns: Returns a value back to asm caller 385 */ 386 #define DECLARE_INTERRUPT_HANDLER_RAW(func) \ 387 __visible long func(struct pt_regs *regs) 388 389 /** 390 * DEFINE_INTERRUPT_HANDLER_RAW - Define raw interrupt handler function 391 * @func: Function name of the entry point 392 * @returns: Returns a value back to asm caller 393 * 394 * @func is called from ASM entry code. 395 * 396 * This is a plain function which does no tracing, reconciling, etc. 397 * The macro is written so it acts as function definition. Append the 398 * body with a pair of curly brackets. 399 * 400 * raw interrupt handlers must not enable or disable interrupts, or 401 * schedule, tracing and instrumentation (ftrace, lockdep, etc) would 402 * not be advisable either, although may be possible in a pinch, the 403 * trace will look odd at least. 404 * 405 * A raw handler may call one of the other interrupt handler functions 406 * to be converted into that interrupt context without these restrictions. 407 * 408 * On PPC64, _RAW handlers may return with fast_interrupt_return. 409 * 410 * Specific handlers may have additional restrictions. 411 */ 412 #define DEFINE_INTERRUPT_HANDLER_RAW(func) \ 413 static __always_inline long ____##func(struct pt_regs *regs); \ 414 \ 415 interrupt_handler long func(struct pt_regs *regs) \ 416 { \ 417 long ret; \ 418 \ 419 __hard_RI_enable(); \ 420 \ 421 ret = ____##func (regs); \ 422 \ 423 return ret; \ 424 } \ 425 NOKPROBE_SYMBOL(func); \ 426 \ 427 static __always_inline long ____##func(struct pt_regs *regs) 428 429 /** 430 * DECLARE_INTERRUPT_HANDLER - Declare synchronous interrupt handler function 431 * @func: Function name of the entry point 432 */ 433 #define DECLARE_INTERRUPT_HANDLER(func) \ 434 __visible void func(struct pt_regs *regs) 435 436 /** 437 * DEFINE_INTERRUPT_HANDLER - Define synchronous interrupt handler function 438 * @func: Function name of the entry point 439 * 440 * @func is called from ASM entry code. 441 * 442 * The macro is written so it acts as function definition. Append the 443 * body with a pair of curly brackets. 444 */ 445 #define DEFINE_INTERRUPT_HANDLER(func) \ 446 static __always_inline void ____##func(struct pt_regs *regs); \ 447 \ 448 interrupt_handler void func(struct pt_regs *regs) \ 449 { \ 450 struct interrupt_state state; \ 451 \ 452 interrupt_enter_prepare(regs, &state); \ 453 \ 454 ____##func (regs); \ 455 \ 456 interrupt_exit_prepare(regs, &state); \ 457 } \ 458 NOKPROBE_SYMBOL(func); \ 459 \ 460 static __always_inline void ____##func(struct pt_regs *regs) 461 462 /** 463 * DECLARE_INTERRUPT_HANDLER_RET - Declare synchronous interrupt handler function 464 * @func: Function name of the entry point 465 * @returns: Returns a value back to asm caller 466 */ 467 #define DECLARE_INTERRUPT_HANDLER_RET(func) \ 468 __visible long func(struct pt_regs *regs) 469 470 /** 471 * DEFINE_INTERRUPT_HANDLER_RET - Define synchronous interrupt handler function 472 * @func: Function name of the entry point 473 * @returns: Returns a value back to asm caller 474 * 475 * @func is called from ASM entry code. 476 * 477 * The macro is written so it acts as function definition. Append the 478 * body with a pair of curly brackets. 479 */ 480 #define DEFINE_INTERRUPT_HANDLER_RET(func) \ 481 static __always_inline long ____##func(struct pt_regs *regs); \ 482 \ 483 interrupt_handler long func(struct pt_regs *regs) \ 484 { \ 485 struct interrupt_state state; \ 486 long ret; \ 487 \ 488 interrupt_enter_prepare(regs, &state); \ 489 \ 490 ret = ____##func (regs); \ 491 \ 492 interrupt_exit_prepare(regs, &state); \ 493 \ 494 return ret; \ 495 } \ 496 NOKPROBE_SYMBOL(func); \ 497 \ 498 static __always_inline long ____##func(struct pt_regs *regs) 499 500 /** 501 * DECLARE_INTERRUPT_HANDLER_ASYNC - Declare asynchronous interrupt handler function 502 * @func: Function name of the entry point 503 */ 504 #define DECLARE_INTERRUPT_HANDLER_ASYNC(func) \ 505 __visible void func(struct pt_regs *regs) 506 507 /** 508 * DEFINE_INTERRUPT_HANDLER_ASYNC - Define asynchronous interrupt handler function 509 * @func: Function name of the entry point 510 * 511 * @func is called from ASM entry code. 512 * 513 * The macro is written so it acts as function definition. Append the 514 * body with a pair of curly brackets. 515 */ 516 #define DEFINE_INTERRUPT_HANDLER_ASYNC(func) \ 517 static __always_inline void ____##func(struct pt_regs *regs); \ 518 \ 519 interrupt_handler void func(struct pt_regs *regs) \ 520 { \ 521 struct interrupt_state state; \ 522 \ 523 interrupt_async_enter_prepare(regs, &state); \ 524 \ 525 ____##func (regs); \ 526 \ 527 interrupt_async_exit_prepare(regs, &state); \ 528 } \ 529 NOKPROBE_SYMBOL(func); \ 530 \ 531 static __always_inline void ____##func(struct pt_regs *regs) 532 533 /** 534 * DECLARE_INTERRUPT_HANDLER_NMI - Declare NMI interrupt handler function 535 * @func: Function name of the entry point 536 * @returns: Returns a value back to asm caller 537 */ 538 #define DECLARE_INTERRUPT_HANDLER_NMI(func) \ 539 __visible long func(struct pt_regs *regs) 540 541 /** 542 * DEFINE_INTERRUPT_HANDLER_NMI - Define NMI interrupt handler function 543 * @func: Function name of the entry point 544 * @returns: Returns a value back to asm caller 545 * 546 * @func is called from ASM entry code. 547 * 548 * The macro is written so it acts as function definition. Append the 549 * body with a pair of curly brackets. 550 */ 551 #define DEFINE_INTERRUPT_HANDLER_NMI(func) \ 552 static __always_inline long ____##func(struct pt_regs *regs); \ 553 \ 554 interrupt_handler long func(struct pt_regs *regs) \ 555 { \ 556 struct interrupt_nmi_state state; \ 557 long ret; \ 558 \ 559 interrupt_nmi_enter_prepare(regs, &state); \ 560 \ 561 ret = ____##func (regs); \ 562 \ 563 interrupt_nmi_exit_prepare(regs, &state); \ 564 \ 565 return ret; \ 566 } \ 567 NOKPROBE_SYMBOL(func); \ 568 \ 569 static __always_inline long ____##func(struct pt_regs *regs) 570 571 572 /* Interrupt handlers */ 573 /* kernel/traps.c */ 574 DECLARE_INTERRUPT_HANDLER_NMI(system_reset_exception); 575 #ifdef CONFIG_PPC_BOOK3S_64 576 DECLARE_INTERRUPT_HANDLER_ASYNC(machine_check_exception_async); 577 #endif 578 DECLARE_INTERRUPT_HANDLER_NMI(machine_check_exception); 579 DECLARE_INTERRUPT_HANDLER(SMIException); 580 DECLARE_INTERRUPT_HANDLER(handle_hmi_exception); 581 DECLARE_INTERRUPT_HANDLER(unknown_exception); 582 DECLARE_INTERRUPT_HANDLER_ASYNC(unknown_async_exception); 583 DECLARE_INTERRUPT_HANDLER_NMI(unknown_nmi_exception); 584 DECLARE_INTERRUPT_HANDLER(instruction_breakpoint_exception); 585 DECLARE_INTERRUPT_HANDLER(RunModeException); 586 DECLARE_INTERRUPT_HANDLER(single_step_exception); 587 DECLARE_INTERRUPT_HANDLER(program_check_exception); 588 DECLARE_INTERRUPT_HANDLER(emulation_assist_interrupt); 589 DECLARE_INTERRUPT_HANDLER(alignment_exception); 590 DECLARE_INTERRUPT_HANDLER(StackOverflow); 591 DECLARE_INTERRUPT_HANDLER(stack_overflow_exception); 592 DECLARE_INTERRUPT_HANDLER(kernel_fp_unavailable_exception); 593 DECLARE_INTERRUPT_HANDLER(altivec_unavailable_exception); 594 DECLARE_INTERRUPT_HANDLER(vsx_unavailable_exception); 595 DECLARE_INTERRUPT_HANDLER(facility_unavailable_exception); 596 DECLARE_INTERRUPT_HANDLER(fp_unavailable_tm); 597 DECLARE_INTERRUPT_HANDLER(altivec_unavailable_tm); 598 DECLARE_INTERRUPT_HANDLER(vsx_unavailable_tm); 599 DECLARE_INTERRUPT_HANDLER_NMI(performance_monitor_exception_nmi); 600 DECLARE_INTERRUPT_HANDLER_ASYNC(performance_monitor_exception_async); 601 DECLARE_INTERRUPT_HANDLER_RAW(performance_monitor_exception); 602 DECLARE_INTERRUPT_HANDLER(DebugException); 603 DECLARE_INTERRUPT_HANDLER(altivec_assist_exception); 604 DECLARE_INTERRUPT_HANDLER(CacheLockingException); 605 DECLARE_INTERRUPT_HANDLER(SPEFloatingPointException); 606 DECLARE_INTERRUPT_HANDLER(SPEFloatingPointRoundException); 607 DECLARE_INTERRUPT_HANDLER_NMI(WatchdogException); 608 DECLARE_INTERRUPT_HANDLER(kernel_bad_stack); 609 610 /* slb.c */ 611 DECLARE_INTERRUPT_HANDLER_RAW(do_slb_fault); 612 DECLARE_INTERRUPT_HANDLER(do_bad_segment_interrupt); 613 614 /* hash_utils.c */ 615 DECLARE_INTERRUPT_HANDLER_RAW(do_hash_fault); 616 617 /* fault.c */ 618 DECLARE_INTERRUPT_HANDLER(do_page_fault); 619 DECLARE_INTERRUPT_HANDLER(do_bad_page_fault_segv); 620 621 /* process.c */ 622 DECLARE_INTERRUPT_HANDLER(do_break); 623 624 /* time.c */ 625 DECLARE_INTERRUPT_HANDLER_ASYNC(timer_interrupt); 626 627 /* mce.c */ 628 DECLARE_INTERRUPT_HANDLER_NMI(machine_check_early); 629 DECLARE_INTERRUPT_HANDLER_NMI(hmi_exception_realmode); 630 631 DECLARE_INTERRUPT_HANDLER_ASYNC(TAUException); 632 633 /* irq.c */ 634 DECLARE_INTERRUPT_HANDLER_ASYNC(do_IRQ); 635 636 void __noreturn unrecoverable_exception(struct pt_regs *regs); 637 638 void replay_system_reset(void); 639 void replay_soft_interrupts(void); 640 641 static inline void interrupt_cond_local_irq_enable(struct pt_regs *regs) 642 { 643 if (!arch_irq_disabled_regs(regs)) 644 local_irq_enable(); 645 } 646 647 #endif /* __ASSEMBLY__ */ 648 649 #endif /* _ASM_POWERPC_INTERRUPT_H */ 650