1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 #ifndef _ASM_POWERPC_INTERRUPT_H 3 #define _ASM_POWERPC_INTERRUPT_H 4 5 /* BookE/4xx */ 6 #define INTERRUPT_CRITICAL_INPUT 0x100 7 8 /* BookE */ 9 #define INTERRUPT_DEBUG 0xd00 10 #ifdef CONFIG_BOOKE 11 #define INTERRUPT_PERFMON 0x260 12 #define INTERRUPT_DOORBELL 0x280 13 #endif 14 15 /* BookS/4xx/8xx */ 16 #define INTERRUPT_MACHINE_CHECK 0x200 17 18 /* BookS/8xx */ 19 #define INTERRUPT_SYSTEM_RESET 0x100 20 21 /* BookS */ 22 #define INTERRUPT_DATA_SEGMENT 0x380 23 #define INTERRUPT_INST_SEGMENT 0x480 24 #define INTERRUPT_TRACE 0xd00 25 #define INTERRUPT_H_DATA_STORAGE 0xe00 26 #define INTERRUPT_HMI 0xe60 27 #define INTERRUPT_H_FAC_UNAVAIL 0xf80 28 #ifdef CONFIG_PPC_BOOK3S 29 #define INTERRUPT_DOORBELL 0xa00 30 #define INTERRUPT_PERFMON 0xf00 31 #define INTERRUPT_ALTIVEC_UNAVAIL 0xf20 32 #endif 33 34 /* BookE/BookS/4xx/8xx */ 35 #define INTERRUPT_DATA_STORAGE 0x300 36 #define INTERRUPT_INST_STORAGE 0x400 37 #define INTERRUPT_EXTERNAL 0x500 38 #define INTERRUPT_ALIGNMENT 0x600 39 #define INTERRUPT_PROGRAM 0x700 40 #define INTERRUPT_SYSCALL 0xc00 41 #define INTERRUPT_TRACE 0xd00 42 43 /* BookE/BookS/44x */ 44 #define INTERRUPT_FP_UNAVAIL 0x800 45 46 /* BookE/BookS/44x/8xx */ 47 #define INTERRUPT_DECREMENTER 0x900 48 49 #ifndef INTERRUPT_PERFMON 50 #define INTERRUPT_PERFMON 0x0 51 #endif 52 53 /* 8xx */ 54 #define INTERRUPT_SOFT_EMU_8xx 0x1000 55 #define INTERRUPT_INST_TLB_MISS_8xx 0x1100 56 #define INTERRUPT_DATA_TLB_MISS_8xx 0x1200 57 #define INTERRUPT_INST_TLB_ERROR_8xx 0x1300 58 #define INTERRUPT_DATA_TLB_ERROR_8xx 0x1400 59 #define INTERRUPT_DATA_BREAKPOINT_8xx 0x1c00 60 #define INTERRUPT_INST_BREAKPOINT_8xx 0x1d00 61 62 /* 603 */ 63 #define INTERRUPT_INST_TLB_MISS_603 0x1000 64 #define INTERRUPT_DATA_LOAD_TLB_MISS_603 0x1100 65 #define INTERRUPT_DATA_STORE_TLB_MISS_603 0x1200 66 67 #ifndef __ASSEMBLY__ 68 69 #include <linux/context_tracking.h> 70 #include <linux/hardirq.h> 71 #include <asm/cputime.h> 72 #include <asm/firmware.h> 73 #include <asm/ftrace.h> 74 #include <asm/kprobes.h> 75 #include <asm/runlatch.h> 76 77 #ifdef CONFIG_PPC_BOOK3S_64 78 extern char __end_soft_masked[]; 79 bool search_kernel_soft_mask_table(unsigned long addr); 80 unsigned long search_kernel_restart_table(unsigned long addr); 81 82 DECLARE_STATIC_KEY_FALSE(interrupt_exit_not_reentrant); 83 84 static inline bool is_implicit_soft_masked(struct pt_regs *regs) 85 { 86 if (regs->msr & MSR_PR) 87 return false; 88 89 if (regs->nip >= (unsigned long)__end_soft_masked) 90 return false; 91 92 return search_kernel_soft_mask_table(regs->nip); 93 } 94 95 static inline void srr_regs_clobbered(void) 96 { 97 local_paca->srr_valid = 0; 98 local_paca->hsrr_valid = 0; 99 } 100 #else 101 static inline unsigned long search_kernel_restart_table(unsigned long addr) 102 { 103 return 0; 104 } 105 106 static inline bool is_implicit_soft_masked(struct pt_regs *regs) 107 { 108 return false; 109 } 110 111 static inline void srr_regs_clobbered(void) 112 { 113 } 114 #endif 115 116 static inline void nap_adjust_return(struct pt_regs *regs) 117 { 118 #ifdef CONFIG_PPC_970_NAP 119 if (unlikely(test_thread_local_flags(_TLF_NAPPING))) { 120 /* Can avoid a test-and-clear because NMIs do not call this */ 121 clear_thread_local_flags(_TLF_NAPPING); 122 regs_set_return_ip(regs, (unsigned long)power4_idle_nap_return); 123 } 124 #endif 125 } 126 127 static inline void booke_restore_dbcr0(void) 128 { 129 #ifdef CONFIG_PPC_ADV_DEBUG_REGS 130 unsigned long dbcr0 = current->thread.debug.dbcr0; 131 132 if (IS_ENABLED(CONFIG_PPC32) && unlikely(dbcr0 & DBCR0_IDM)) { 133 mtspr(SPRN_DBSR, -1); 134 mtspr(SPRN_DBCR0, global_dbcr0[smp_processor_id()]); 135 } 136 #endif 137 } 138 139 static inline void interrupt_enter_prepare(struct pt_regs *regs) 140 { 141 #ifdef CONFIG_PPC32 142 if (!arch_irq_disabled_regs(regs)) 143 trace_hardirqs_off(); 144 145 if (user_mode(regs)) 146 kuap_lock(); 147 else 148 kuap_save_and_lock(regs); 149 150 if (user_mode(regs)) 151 account_cpu_user_entry(); 152 #endif 153 154 #ifdef CONFIG_PPC64 155 bool trace_enable = false; 156 157 if (IS_ENABLED(CONFIG_TRACE_IRQFLAGS)) { 158 if (irq_soft_mask_set_return(IRQS_ALL_DISABLED) == IRQS_ENABLED) 159 trace_enable = true; 160 } else { 161 irq_soft_mask_set(IRQS_ALL_DISABLED); 162 } 163 164 /* 165 * If the interrupt was taken with HARD_DIS clear, then enable MSR[EE]. 166 * Asynchronous interrupts get here with HARD_DIS set (see below), so 167 * this enables MSR[EE] for synchronous interrupts. IRQs remain 168 * soft-masked. The interrupt handler may later call 169 * interrupt_cond_local_irq_enable() to achieve a regular process 170 * context. 171 */ 172 if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS)) { 173 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) 174 BUG_ON(!(regs->msr & MSR_EE)); 175 __hard_irq_enable(); 176 } else { 177 __hard_RI_enable(); 178 } 179 180 /* Do this when RI=1 because it can cause SLB faults */ 181 if (trace_enable) 182 trace_hardirqs_off(); 183 184 if (user_mode(regs)) { 185 kuap_lock(); 186 CT_WARN_ON(ct_state() != CONTEXT_USER); 187 user_exit_irqoff(); 188 189 account_cpu_user_entry(); 190 account_stolen_time(); 191 } else { 192 kuap_save_and_lock(regs); 193 /* 194 * CT_WARN_ON comes here via program_check_exception, 195 * so avoid recursion. 196 */ 197 if (TRAP(regs) != INTERRUPT_PROGRAM) { 198 CT_WARN_ON(ct_state() != CONTEXT_KERNEL); 199 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) 200 BUG_ON(is_implicit_soft_masked(regs)); 201 } 202 203 /* Move this under a debugging check */ 204 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG) && 205 arch_irq_disabled_regs(regs)) 206 BUG_ON(search_kernel_restart_table(regs->nip)); 207 } 208 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) 209 BUG_ON(!arch_irq_disabled_regs(regs) && !(regs->msr & MSR_EE)); 210 #endif 211 212 booke_restore_dbcr0(); 213 } 214 215 /* 216 * Care should be taken to note that interrupt_exit_prepare and 217 * interrupt_async_exit_prepare do not necessarily return immediately to 218 * regs context (e.g., if regs is usermode, we don't necessarily return to 219 * user mode). Other interrupts might be taken between here and return, 220 * context switch / preemption may occur in the exit path after this, or a 221 * signal may be delivered, etc. 222 * 223 * The real interrupt exit code is platform specific, e.g., 224 * interrupt_exit_user_prepare / interrupt_exit_kernel_prepare for 64s. 225 * 226 * However interrupt_nmi_exit_prepare does return directly to regs, because 227 * NMIs do not do "exit work" or replay soft-masked interrupts. 228 */ 229 static inline void interrupt_exit_prepare(struct pt_regs *regs) 230 { 231 } 232 233 static inline void interrupt_async_enter_prepare(struct pt_regs *regs) 234 { 235 #ifdef CONFIG_PPC64 236 /* Ensure interrupt_enter_prepare does not enable MSR[EE] */ 237 local_paca->irq_happened |= PACA_IRQ_HARD_DIS; 238 #endif 239 interrupt_enter_prepare(regs); 240 #ifdef CONFIG_PPC_BOOK3S_64 241 /* 242 * RI=1 is set by interrupt_enter_prepare, so this thread flags access 243 * has to come afterward (it can cause SLB faults). 244 */ 245 if (cpu_has_feature(CPU_FTR_CTRL) && 246 !test_thread_local_flags(_TLF_RUNLATCH)) 247 __ppc64_runlatch_on(); 248 #endif 249 irq_enter(); 250 } 251 252 static inline void interrupt_async_exit_prepare(struct pt_regs *regs) 253 { 254 /* 255 * Adjust at exit so the main handler sees the true NIA. This must 256 * come before irq_exit() because irq_exit can enable interrupts, and 257 * if another interrupt is taken before nap_adjust_return has run 258 * here, then that interrupt would return directly to idle nap return. 259 */ 260 nap_adjust_return(regs); 261 262 irq_exit(); 263 interrupt_exit_prepare(regs); 264 } 265 266 struct interrupt_nmi_state { 267 #ifdef CONFIG_PPC64 268 u8 irq_soft_mask; 269 u8 irq_happened; 270 u8 ftrace_enabled; 271 u64 softe; 272 #endif 273 }; 274 275 static inline bool nmi_disables_ftrace(struct pt_regs *regs) 276 { 277 /* Allow DEC and PMI to be traced when they are soft-NMI */ 278 if (IS_ENABLED(CONFIG_PPC_BOOK3S_64)) { 279 if (TRAP(regs) == INTERRUPT_DECREMENTER) 280 return false; 281 if (TRAP(regs) == INTERRUPT_PERFMON) 282 return false; 283 } 284 if (IS_ENABLED(CONFIG_PPC_BOOK3E)) { 285 if (TRAP(regs) == INTERRUPT_PERFMON) 286 return false; 287 } 288 289 return true; 290 } 291 292 static inline void interrupt_nmi_enter_prepare(struct pt_regs *regs, struct interrupt_nmi_state *state) 293 { 294 #ifdef CONFIG_PPC64 295 state->irq_soft_mask = local_paca->irq_soft_mask; 296 state->irq_happened = local_paca->irq_happened; 297 state->softe = regs->softe; 298 299 /* 300 * Set IRQS_ALL_DISABLED unconditionally so irqs_disabled() does 301 * the right thing, and set IRQ_HARD_DIS. We do not want to reconcile 302 * because that goes through irq tracing which we don't want in NMI. 303 */ 304 local_paca->irq_soft_mask = IRQS_ALL_DISABLED; 305 local_paca->irq_happened |= PACA_IRQ_HARD_DIS; 306 307 if (!(regs->msr & MSR_EE) || is_implicit_soft_masked(regs)) { 308 /* 309 * Adjust regs->softe to be soft-masked if it had not been 310 * reconcied (e.g., interrupt entry with MSR[EE]=0 but softe 311 * not yet set disabled), or if it was in an implicit soft 312 * masked state. This makes arch_irq_disabled_regs(regs) 313 * behave as expected. 314 */ 315 regs->softe = IRQS_ALL_DISABLED; 316 } 317 318 __hard_RI_enable(); 319 320 /* Don't do any per-CPU operations until interrupt state is fixed */ 321 322 if (nmi_disables_ftrace(regs)) { 323 state->ftrace_enabled = this_cpu_get_ftrace_enabled(); 324 this_cpu_set_ftrace_enabled(0); 325 } 326 #endif 327 328 /* If data relocations are enabled, it's safe to use nmi_enter() */ 329 if (mfmsr() & MSR_DR) { 330 nmi_enter(); 331 return; 332 } 333 334 /* 335 * But do not use nmi_enter() for pseries hash guest taking a real-mode 336 * NMI because not everything it touches is within the RMA limit. 337 */ 338 if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && 339 firmware_has_feature(FW_FEATURE_LPAR) && 340 !radix_enabled()) 341 return; 342 343 /* 344 * Likewise, don't use it if we have some form of instrumentation (like 345 * KASAN shadow) that is not safe to access in real mode (even on radix) 346 */ 347 if (IS_ENABLED(CONFIG_KASAN)) 348 return; 349 350 /* Otherwise, it should be safe to call it */ 351 nmi_enter(); 352 } 353 354 static inline void interrupt_nmi_exit_prepare(struct pt_regs *regs, struct interrupt_nmi_state *state) 355 { 356 if (mfmsr() & MSR_DR) { 357 // nmi_exit if relocations are on 358 nmi_exit(); 359 } else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && 360 firmware_has_feature(FW_FEATURE_LPAR) && 361 !radix_enabled()) { 362 // no nmi_exit for a pseries hash guest taking a real mode exception 363 } else if (IS_ENABLED(CONFIG_KASAN)) { 364 // no nmi_exit for KASAN in real mode 365 } else { 366 nmi_exit(); 367 } 368 369 /* 370 * nmi does not call nap_adjust_return because nmi should not create 371 * new work to do (must use irq_work for that). 372 */ 373 374 #ifdef CONFIG_PPC64 375 #ifdef CONFIG_PPC_BOOK3S 376 if (arch_irq_disabled_regs(regs)) { 377 unsigned long rst = search_kernel_restart_table(regs->nip); 378 if (rst) 379 regs_set_return_ip(regs, rst); 380 } 381 #endif 382 383 if (nmi_disables_ftrace(regs)) 384 this_cpu_set_ftrace_enabled(state->ftrace_enabled); 385 386 /* Check we didn't change the pending interrupt mask. */ 387 WARN_ON_ONCE((state->irq_happened | PACA_IRQ_HARD_DIS) != local_paca->irq_happened); 388 regs->softe = state->softe; 389 local_paca->irq_happened = state->irq_happened; 390 local_paca->irq_soft_mask = state->irq_soft_mask; 391 #endif 392 } 393 394 /* 395 * Don't use noinstr here like x86, but rather add NOKPROBE_SYMBOL to each 396 * function definition. The reason for this is the noinstr section is placed 397 * after the main text section, i.e., very far away from the interrupt entry 398 * asm. That creates problems with fitting linker stubs when building large 399 * kernels. 400 */ 401 #define interrupt_handler __visible noinline notrace __no_kcsan __no_sanitize_address 402 403 /** 404 * DECLARE_INTERRUPT_HANDLER_RAW - Declare raw interrupt handler function 405 * @func: Function name of the entry point 406 * @returns: Returns a value back to asm caller 407 */ 408 #define DECLARE_INTERRUPT_HANDLER_RAW(func) \ 409 __visible long func(struct pt_regs *regs) 410 411 /** 412 * DEFINE_INTERRUPT_HANDLER_RAW - Define raw interrupt handler function 413 * @func: Function name of the entry point 414 * @returns: Returns a value back to asm caller 415 * 416 * @func is called from ASM entry code. 417 * 418 * This is a plain function which does no tracing, reconciling, etc. 419 * The macro is written so it acts as function definition. Append the 420 * body with a pair of curly brackets. 421 * 422 * raw interrupt handlers must not enable or disable interrupts, or 423 * schedule, tracing and instrumentation (ftrace, lockdep, etc) would 424 * not be advisable either, although may be possible in a pinch, the 425 * trace will look odd at least. 426 * 427 * A raw handler may call one of the other interrupt handler functions 428 * to be converted into that interrupt context without these restrictions. 429 * 430 * On PPC64, _RAW handlers may return with fast_interrupt_return. 431 * 432 * Specific handlers may have additional restrictions. 433 */ 434 #define DEFINE_INTERRUPT_HANDLER_RAW(func) \ 435 static __always_inline __no_sanitize_address __no_kcsan long \ 436 ____##func(struct pt_regs *regs); \ 437 \ 438 interrupt_handler long func(struct pt_regs *regs) \ 439 { \ 440 long ret; \ 441 \ 442 __hard_RI_enable(); \ 443 \ 444 ret = ____##func (regs); \ 445 \ 446 return ret; \ 447 } \ 448 NOKPROBE_SYMBOL(func); \ 449 \ 450 static __always_inline __no_sanitize_address __no_kcsan long \ 451 ____##func(struct pt_regs *regs) 452 453 /** 454 * DECLARE_INTERRUPT_HANDLER - Declare synchronous interrupt handler function 455 * @func: Function name of the entry point 456 */ 457 #define DECLARE_INTERRUPT_HANDLER(func) \ 458 __visible void func(struct pt_regs *regs) 459 460 /** 461 * DEFINE_INTERRUPT_HANDLER - Define synchronous interrupt handler function 462 * @func: Function name of the entry point 463 * 464 * @func is called from ASM entry code. 465 * 466 * The macro is written so it acts as function definition. Append the 467 * body with a pair of curly brackets. 468 */ 469 #define DEFINE_INTERRUPT_HANDLER(func) \ 470 static __always_inline void ____##func(struct pt_regs *regs); \ 471 \ 472 interrupt_handler void func(struct pt_regs *regs) \ 473 { \ 474 interrupt_enter_prepare(regs); \ 475 \ 476 ____##func (regs); \ 477 \ 478 interrupt_exit_prepare(regs); \ 479 } \ 480 NOKPROBE_SYMBOL(func); \ 481 \ 482 static __always_inline void ____##func(struct pt_regs *regs) 483 484 /** 485 * DECLARE_INTERRUPT_HANDLER_RET - Declare synchronous interrupt handler function 486 * @func: Function name of the entry point 487 * @returns: Returns a value back to asm caller 488 */ 489 #define DECLARE_INTERRUPT_HANDLER_RET(func) \ 490 __visible long func(struct pt_regs *regs) 491 492 /** 493 * DEFINE_INTERRUPT_HANDLER_RET - Define synchronous interrupt handler function 494 * @func: Function name of the entry point 495 * @returns: Returns a value back to asm caller 496 * 497 * @func is called from ASM entry code. 498 * 499 * The macro is written so it acts as function definition. Append the 500 * body with a pair of curly brackets. 501 */ 502 #define DEFINE_INTERRUPT_HANDLER_RET(func) \ 503 static __always_inline long ____##func(struct pt_regs *regs); \ 504 \ 505 interrupt_handler long func(struct pt_regs *regs) \ 506 { \ 507 long ret; \ 508 \ 509 interrupt_enter_prepare(regs); \ 510 \ 511 ret = ____##func (regs); \ 512 \ 513 interrupt_exit_prepare(regs); \ 514 \ 515 return ret; \ 516 } \ 517 NOKPROBE_SYMBOL(func); \ 518 \ 519 static __always_inline long ____##func(struct pt_regs *regs) 520 521 /** 522 * DECLARE_INTERRUPT_HANDLER_ASYNC - Declare asynchronous interrupt handler function 523 * @func: Function name of the entry point 524 */ 525 #define DECLARE_INTERRUPT_HANDLER_ASYNC(func) \ 526 __visible void func(struct pt_regs *regs) 527 528 /** 529 * DEFINE_INTERRUPT_HANDLER_ASYNC - Define asynchronous interrupt handler function 530 * @func: Function name of the entry point 531 * 532 * @func is called from ASM entry code. 533 * 534 * The macro is written so it acts as function definition. Append the 535 * body with a pair of curly brackets. 536 */ 537 #define DEFINE_INTERRUPT_HANDLER_ASYNC(func) \ 538 static __always_inline void ____##func(struct pt_regs *regs); \ 539 \ 540 interrupt_handler void func(struct pt_regs *regs) \ 541 { \ 542 interrupt_async_enter_prepare(regs); \ 543 \ 544 ____##func (regs); \ 545 \ 546 interrupt_async_exit_prepare(regs); \ 547 } \ 548 NOKPROBE_SYMBOL(func); \ 549 \ 550 static __always_inline void ____##func(struct pt_regs *regs) 551 552 /** 553 * DECLARE_INTERRUPT_HANDLER_NMI - Declare NMI interrupt handler function 554 * @func: Function name of the entry point 555 * @returns: Returns a value back to asm caller 556 */ 557 #define DECLARE_INTERRUPT_HANDLER_NMI(func) \ 558 __visible long func(struct pt_regs *regs) 559 560 /** 561 * DEFINE_INTERRUPT_HANDLER_NMI - Define NMI interrupt handler function 562 * @func: Function name of the entry point 563 * @returns: Returns a value back to asm caller 564 * 565 * @func is called from ASM entry code. 566 * 567 * The macro is written so it acts as function definition. Append the 568 * body with a pair of curly brackets. 569 */ 570 #define DEFINE_INTERRUPT_HANDLER_NMI(func) \ 571 static __always_inline __no_sanitize_address __no_kcsan long \ 572 ____##func(struct pt_regs *regs); \ 573 \ 574 interrupt_handler long func(struct pt_regs *regs) \ 575 { \ 576 struct interrupt_nmi_state state; \ 577 long ret; \ 578 \ 579 interrupt_nmi_enter_prepare(regs, &state); \ 580 \ 581 ret = ____##func (regs); \ 582 \ 583 interrupt_nmi_exit_prepare(regs, &state); \ 584 \ 585 return ret; \ 586 } \ 587 NOKPROBE_SYMBOL(func); \ 588 \ 589 static __always_inline __no_sanitize_address __no_kcsan long \ 590 ____##func(struct pt_regs *regs) 591 592 593 /* Interrupt handlers */ 594 /* kernel/traps.c */ 595 DECLARE_INTERRUPT_HANDLER_NMI(system_reset_exception); 596 #ifdef CONFIG_PPC_BOOK3S_64 597 DECLARE_INTERRUPT_HANDLER_ASYNC(machine_check_exception_async); 598 #endif 599 DECLARE_INTERRUPT_HANDLER_NMI(machine_check_exception); 600 DECLARE_INTERRUPT_HANDLER(SMIException); 601 DECLARE_INTERRUPT_HANDLER(handle_hmi_exception); 602 DECLARE_INTERRUPT_HANDLER(unknown_exception); 603 DECLARE_INTERRUPT_HANDLER_ASYNC(unknown_async_exception); 604 DECLARE_INTERRUPT_HANDLER_NMI(unknown_nmi_exception); 605 DECLARE_INTERRUPT_HANDLER(instruction_breakpoint_exception); 606 DECLARE_INTERRUPT_HANDLER(RunModeException); 607 DECLARE_INTERRUPT_HANDLER(single_step_exception); 608 DECLARE_INTERRUPT_HANDLER(program_check_exception); 609 DECLARE_INTERRUPT_HANDLER(emulation_assist_interrupt); 610 DECLARE_INTERRUPT_HANDLER(alignment_exception); 611 DECLARE_INTERRUPT_HANDLER(StackOverflow); 612 DECLARE_INTERRUPT_HANDLER(stack_overflow_exception); 613 DECLARE_INTERRUPT_HANDLER(kernel_fp_unavailable_exception); 614 DECLARE_INTERRUPT_HANDLER(altivec_unavailable_exception); 615 DECLARE_INTERRUPT_HANDLER(vsx_unavailable_exception); 616 DECLARE_INTERRUPT_HANDLER(facility_unavailable_exception); 617 DECLARE_INTERRUPT_HANDLER(fp_unavailable_tm); 618 DECLARE_INTERRUPT_HANDLER(altivec_unavailable_tm); 619 DECLARE_INTERRUPT_HANDLER(vsx_unavailable_tm); 620 DECLARE_INTERRUPT_HANDLER_NMI(performance_monitor_exception_nmi); 621 DECLARE_INTERRUPT_HANDLER_ASYNC(performance_monitor_exception_async); 622 DECLARE_INTERRUPT_HANDLER_RAW(performance_monitor_exception); 623 DECLARE_INTERRUPT_HANDLER(DebugException); 624 DECLARE_INTERRUPT_HANDLER(altivec_assist_exception); 625 DECLARE_INTERRUPT_HANDLER(CacheLockingException); 626 DECLARE_INTERRUPT_HANDLER(SPEFloatingPointException); 627 DECLARE_INTERRUPT_HANDLER(SPEFloatingPointRoundException); 628 DECLARE_INTERRUPT_HANDLER_NMI(WatchdogException); 629 DECLARE_INTERRUPT_HANDLER(kernel_bad_stack); 630 631 /* slb.c */ 632 DECLARE_INTERRUPT_HANDLER_RAW(do_slb_fault); 633 DECLARE_INTERRUPT_HANDLER(do_bad_segment_interrupt); 634 635 /* hash_utils.c */ 636 DECLARE_INTERRUPT_HANDLER(do_hash_fault); 637 638 /* fault.c */ 639 DECLARE_INTERRUPT_HANDLER(do_page_fault); 640 DECLARE_INTERRUPT_HANDLER(do_bad_page_fault_segv); 641 642 /* process.c */ 643 DECLARE_INTERRUPT_HANDLER(do_break); 644 645 /* time.c */ 646 DECLARE_INTERRUPT_HANDLER_ASYNC(timer_interrupt); 647 648 /* mce.c */ 649 DECLARE_INTERRUPT_HANDLER_NMI(machine_check_early); 650 DECLARE_INTERRUPT_HANDLER_NMI(hmi_exception_realmode); 651 652 DECLARE_INTERRUPT_HANDLER_ASYNC(TAUException); 653 654 /* irq.c */ 655 DECLARE_INTERRUPT_HANDLER_ASYNC(do_IRQ); 656 657 void __noreturn unrecoverable_exception(struct pt_regs *regs); 658 659 void replay_system_reset(void); 660 void replay_soft_interrupts(void); 661 662 static inline void interrupt_cond_local_irq_enable(struct pt_regs *regs) 663 { 664 if (!arch_irq_disabled_regs(regs)) 665 local_irq_enable(); 666 } 667 668 long system_call_exception(long r3, long r4, long r5, long r6, long r7, long r8, 669 unsigned long r0, struct pt_regs *regs); 670 notrace unsigned long syscall_exit_prepare(unsigned long r3, struct pt_regs *regs, long scv); 671 notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs); 672 notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs); 673 #ifdef CONFIG_PPC64 674 unsigned long syscall_exit_restart(unsigned long r3, struct pt_regs *regs); 675 unsigned long interrupt_exit_user_restart(struct pt_regs *regs); 676 unsigned long interrupt_exit_kernel_restart(struct pt_regs *regs); 677 #endif 678 679 #endif /* __ASSEMBLY__ */ 680 681 #endif /* _ASM_POWERPC_INTERRUPT_H */ 682