1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 #ifndef _ASM_POWERPC_INTERRUPT_H 3 #define _ASM_POWERPC_INTERRUPT_H 4 5 /* BookE/4xx */ 6 #define INTERRUPT_CRITICAL_INPUT 0x100 7 8 /* BookE */ 9 #define INTERRUPT_DEBUG 0xd00 10 #ifdef CONFIG_BOOKE 11 #define INTERRUPT_PERFMON 0x260 12 #define INTERRUPT_DOORBELL 0x280 13 #endif 14 15 /* BookS/4xx/8xx */ 16 #define INTERRUPT_MACHINE_CHECK 0x200 17 18 /* BookS/8xx */ 19 #define INTERRUPT_SYSTEM_RESET 0x100 20 21 /* BookS */ 22 #define INTERRUPT_DATA_SEGMENT 0x380 23 #define INTERRUPT_INST_SEGMENT 0x480 24 #define INTERRUPT_TRACE 0xd00 25 #define INTERRUPT_H_DATA_STORAGE 0xe00 26 #define INTERRUPT_HMI 0xe60 27 #define INTERRUPT_H_FAC_UNAVAIL 0xf80 28 #ifdef CONFIG_PPC_BOOK3S 29 #define INTERRUPT_DOORBELL 0xa00 30 #define INTERRUPT_PERFMON 0xf00 31 #define INTERRUPT_ALTIVEC_UNAVAIL 0xf20 32 #endif 33 34 /* BookE/BookS/4xx/8xx */ 35 #define INTERRUPT_DATA_STORAGE 0x300 36 #define INTERRUPT_INST_STORAGE 0x400 37 #define INTERRUPT_EXTERNAL 0x500 38 #define INTERRUPT_ALIGNMENT 0x600 39 #define INTERRUPT_PROGRAM 0x700 40 #define INTERRUPT_SYSCALL 0xc00 41 #define INTERRUPT_TRACE 0xd00 42 43 /* BookE/BookS/44x */ 44 #define INTERRUPT_FP_UNAVAIL 0x800 45 46 /* BookE/BookS/44x/8xx */ 47 #define INTERRUPT_DECREMENTER 0x900 48 49 #ifndef INTERRUPT_PERFMON 50 #define INTERRUPT_PERFMON 0x0 51 #endif 52 53 /* 8xx */ 54 #define INTERRUPT_SOFT_EMU_8xx 0x1000 55 #define INTERRUPT_INST_TLB_MISS_8xx 0x1100 56 #define INTERRUPT_DATA_TLB_MISS_8xx 0x1200 57 #define INTERRUPT_INST_TLB_ERROR_8xx 0x1300 58 #define INTERRUPT_DATA_TLB_ERROR_8xx 0x1400 59 #define INTERRUPT_DATA_BREAKPOINT_8xx 0x1c00 60 #define INTERRUPT_INST_BREAKPOINT_8xx 0x1d00 61 62 /* 603 */ 63 #define INTERRUPT_INST_TLB_MISS_603 0x1000 64 #define INTERRUPT_DATA_LOAD_TLB_MISS_603 0x1100 65 #define INTERRUPT_DATA_STORE_TLB_MISS_603 0x1200 66 67 #ifndef __ASSEMBLY__ 68 69 #include <linux/context_tracking.h> 70 #include <linux/hardirq.h> 71 #include <asm/cputime.h> 72 #include <asm/firmware.h> 73 #include <asm/ftrace.h> 74 #include <asm/kprobes.h> 75 #include <asm/runlatch.h> 76 77 #ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG 78 /* 79 * WARN/BUG is handled with a program interrupt so minimise checks here to 80 * avoid recursion and maximise the chance of getting the first oops handled. 81 */ 82 #define INT_SOFT_MASK_BUG_ON(regs, cond) \ 83 do { \ 84 if ((user_mode(regs) || (TRAP(regs) != INTERRUPT_PROGRAM))) \ 85 BUG_ON(cond); \ 86 } while (0) 87 #else 88 #define INT_SOFT_MASK_BUG_ON(regs, cond) 89 #endif 90 91 #ifdef CONFIG_PPC_BOOK3S_64 92 extern char __end_soft_masked[]; 93 bool search_kernel_soft_mask_table(unsigned long addr); 94 unsigned long search_kernel_restart_table(unsigned long addr); 95 96 DECLARE_STATIC_KEY_FALSE(interrupt_exit_not_reentrant); 97 98 static inline bool is_implicit_soft_masked(struct pt_regs *regs) 99 { 100 if (regs->msr & MSR_PR) 101 return false; 102 103 if (regs->nip >= (unsigned long)__end_soft_masked) 104 return false; 105 106 return search_kernel_soft_mask_table(regs->nip); 107 } 108 109 static inline void srr_regs_clobbered(void) 110 { 111 local_paca->srr_valid = 0; 112 local_paca->hsrr_valid = 0; 113 } 114 #else 115 static inline unsigned long search_kernel_restart_table(unsigned long addr) 116 { 117 return 0; 118 } 119 120 static inline bool is_implicit_soft_masked(struct pt_regs *regs) 121 { 122 return false; 123 } 124 125 static inline void srr_regs_clobbered(void) 126 { 127 } 128 #endif 129 130 static inline void nap_adjust_return(struct pt_regs *regs) 131 { 132 #ifdef CONFIG_PPC_970_NAP 133 if (unlikely(test_thread_local_flags(_TLF_NAPPING))) { 134 /* Can avoid a test-and-clear because NMIs do not call this */ 135 clear_thread_local_flags(_TLF_NAPPING); 136 regs_set_return_ip(regs, (unsigned long)power4_idle_nap_return); 137 } 138 #endif 139 } 140 141 static inline void booke_restore_dbcr0(void) 142 { 143 #ifdef CONFIG_PPC_ADV_DEBUG_REGS 144 unsigned long dbcr0 = current->thread.debug.dbcr0; 145 146 if (IS_ENABLED(CONFIG_PPC32) && unlikely(dbcr0 & DBCR0_IDM)) { 147 mtspr(SPRN_DBSR, -1); 148 mtspr(SPRN_DBCR0, global_dbcr0[smp_processor_id()]); 149 } 150 #endif 151 } 152 153 static inline void interrupt_enter_prepare(struct pt_regs *regs) 154 { 155 #ifdef CONFIG_PPC64 156 irq_soft_mask_set(IRQS_ALL_DISABLED); 157 158 /* 159 * If the interrupt was taken with HARD_DIS clear, then enable MSR[EE]. 160 * Asynchronous interrupts get here with HARD_DIS set (see below), so 161 * this enables MSR[EE] for synchronous interrupts. IRQs remain 162 * soft-masked. The interrupt handler may later call 163 * interrupt_cond_local_irq_enable() to achieve a regular process 164 * context. 165 */ 166 if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS)) { 167 INT_SOFT_MASK_BUG_ON(regs, !(regs->msr & MSR_EE)); 168 __hard_irq_enable(); 169 } else { 170 __hard_RI_enable(); 171 } 172 /* Enable MSR[RI] early, to support kernel SLB and hash faults */ 173 #endif 174 175 if (!arch_irq_disabled_regs(regs)) 176 trace_hardirqs_off(); 177 178 if (user_mode(regs)) { 179 kuap_lock(); 180 CT_WARN_ON(ct_state() != CONTEXT_USER); 181 user_exit_irqoff(); 182 183 account_cpu_user_entry(); 184 account_stolen_time(); 185 } else { 186 kuap_save_and_lock(regs); 187 /* 188 * CT_WARN_ON comes here via program_check_exception, 189 * so avoid recursion. 190 */ 191 if (TRAP(regs) != INTERRUPT_PROGRAM) 192 CT_WARN_ON(ct_state() != CONTEXT_KERNEL && 193 ct_state() != CONTEXT_IDLE); 194 INT_SOFT_MASK_BUG_ON(regs, is_implicit_soft_masked(regs)); 195 INT_SOFT_MASK_BUG_ON(regs, arch_irq_disabled_regs(regs) && 196 search_kernel_restart_table(regs->nip)); 197 } 198 INT_SOFT_MASK_BUG_ON(regs, !arch_irq_disabled_regs(regs) && 199 !(regs->msr & MSR_EE)); 200 201 booke_restore_dbcr0(); 202 } 203 204 /* 205 * Care should be taken to note that interrupt_exit_prepare and 206 * interrupt_async_exit_prepare do not necessarily return immediately to 207 * regs context (e.g., if regs is usermode, we don't necessarily return to 208 * user mode). Other interrupts might be taken between here and return, 209 * context switch / preemption may occur in the exit path after this, or a 210 * signal may be delivered, etc. 211 * 212 * The real interrupt exit code is platform specific, e.g., 213 * interrupt_exit_user_prepare / interrupt_exit_kernel_prepare for 64s. 214 * 215 * However interrupt_nmi_exit_prepare does return directly to regs, because 216 * NMIs do not do "exit work" or replay soft-masked interrupts. 217 */ 218 static inline void interrupt_exit_prepare(struct pt_regs *regs) 219 { 220 } 221 222 static inline void interrupt_async_enter_prepare(struct pt_regs *regs) 223 { 224 #ifdef CONFIG_PPC64 225 /* Ensure interrupt_enter_prepare does not enable MSR[EE] */ 226 local_paca->irq_happened |= PACA_IRQ_HARD_DIS; 227 #endif 228 interrupt_enter_prepare(regs); 229 #ifdef CONFIG_PPC_BOOK3S_64 230 /* 231 * RI=1 is set by interrupt_enter_prepare, so this thread flags access 232 * has to come afterward (it can cause SLB faults). 233 */ 234 if (cpu_has_feature(CPU_FTR_CTRL) && 235 !test_thread_local_flags(_TLF_RUNLATCH)) 236 __ppc64_runlatch_on(); 237 #endif 238 irq_enter(); 239 } 240 241 static inline void interrupt_async_exit_prepare(struct pt_regs *regs) 242 { 243 /* 244 * Adjust at exit so the main handler sees the true NIA. This must 245 * come before irq_exit() because irq_exit can enable interrupts, and 246 * if another interrupt is taken before nap_adjust_return has run 247 * here, then that interrupt would return directly to idle nap return. 248 */ 249 nap_adjust_return(regs); 250 251 irq_exit(); 252 interrupt_exit_prepare(regs); 253 } 254 255 struct interrupt_nmi_state { 256 #ifdef CONFIG_PPC64 257 u8 irq_soft_mask; 258 u8 irq_happened; 259 u8 ftrace_enabled; 260 u64 softe; 261 #endif 262 }; 263 264 static inline bool nmi_disables_ftrace(struct pt_regs *regs) 265 { 266 /* Allow DEC and PMI to be traced when they are soft-NMI */ 267 if (IS_ENABLED(CONFIG_PPC_BOOK3S_64)) { 268 if (TRAP(regs) == INTERRUPT_DECREMENTER) 269 return false; 270 if (TRAP(regs) == INTERRUPT_PERFMON) 271 return false; 272 } 273 if (IS_ENABLED(CONFIG_PPC_BOOK3E_64)) { 274 if (TRAP(regs) == INTERRUPT_PERFMON) 275 return false; 276 } 277 278 return true; 279 } 280 281 static inline void interrupt_nmi_enter_prepare(struct pt_regs *regs, struct interrupt_nmi_state *state) 282 { 283 #ifdef CONFIG_PPC64 284 state->irq_soft_mask = local_paca->irq_soft_mask; 285 state->irq_happened = local_paca->irq_happened; 286 state->softe = regs->softe; 287 288 /* 289 * Set IRQS_ALL_DISABLED unconditionally so irqs_disabled() does 290 * the right thing, and set IRQ_HARD_DIS. We do not want to reconcile 291 * because that goes through irq tracing which we don't want in NMI. 292 */ 293 local_paca->irq_soft_mask = IRQS_ALL_DISABLED; 294 local_paca->irq_happened |= PACA_IRQ_HARD_DIS; 295 296 if (!(regs->msr & MSR_EE) || is_implicit_soft_masked(regs)) { 297 /* 298 * Adjust regs->softe to be soft-masked if it had not been 299 * reconcied (e.g., interrupt entry with MSR[EE]=0 but softe 300 * not yet set disabled), or if it was in an implicit soft 301 * masked state. This makes arch_irq_disabled_regs(regs) 302 * behave as expected. 303 */ 304 regs->softe = IRQS_ALL_DISABLED; 305 } 306 307 __hard_RI_enable(); 308 309 /* Don't do any per-CPU operations until interrupt state is fixed */ 310 311 if (nmi_disables_ftrace(regs)) { 312 state->ftrace_enabled = this_cpu_get_ftrace_enabled(); 313 this_cpu_set_ftrace_enabled(0); 314 } 315 #endif 316 317 /* If data relocations are enabled, it's safe to use nmi_enter() */ 318 if (mfmsr() & MSR_DR) { 319 nmi_enter(); 320 return; 321 } 322 323 /* 324 * But do not use nmi_enter() for pseries hash guest taking a real-mode 325 * NMI because not everything it touches is within the RMA limit. 326 */ 327 if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && 328 firmware_has_feature(FW_FEATURE_LPAR) && 329 !radix_enabled()) 330 return; 331 332 /* 333 * Likewise, don't use it if we have some form of instrumentation (like 334 * KASAN shadow) that is not safe to access in real mode (even on radix) 335 */ 336 if (IS_ENABLED(CONFIG_KASAN)) 337 return; 338 339 /* Otherwise, it should be safe to call it */ 340 nmi_enter(); 341 } 342 343 static inline void interrupt_nmi_exit_prepare(struct pt_regs *regs, struct interrupt_nmi_state *state) 344 { 345 if (mfmsr() & MSR_DR) { 346 // nmi_exit if relocations are on 347 nmi_exit(); 348 } else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && 349 firmware_has_feature(FW_FEATURE_LPAR) && 350 !radix_enabled()) { 351 // no nmi_exit for a pseries hash guest taking a real mode exception 352 } else if (IS_ENABLED(CONFIG_KASAN)) { 353 // no nmi_exit for KASAN in real mode 354 } else { 355 nmi_exit(); 356 } 357 358 /* 359 * nmi does not call nap_adjust_return because nmi should not create 360 * new work to do (must use irq_work for that). 361 */ 362 363 #ifdef CONFIG_PPC64 364 #ifdef CONFIG_PPC_BOOK3S 365 if (arch_irq_disabled_regs(regs)) { 366 unsigned long rst = search_kernel_restart_table(regs->nip); 367 if (rst) 368 regs_set_return_ip(regs, rst); 369 } 370 #endif 371 372 if (nmi_disables_ftrace(regs)) 373 this_cpu_set_ftrace_enabled(state->ftrace_enabled); 374 375 /* Check we didn't change the pending interrupt mask. */ 376 WARN_ON_ONCE((state->irq_happened | PACA_IRQ_HARD_DIS) != local_paca->irq_happened); 377 regs->softe = state->softe; 378 local_paca->irq_happened = state->irq_happened; 379 local_paca->irq_soft_mask = state->irq_soft_mask; 380 #endif 381 } 382 383 /* 384 * Don't use noinstr here like x86, but rather add NOKPROBE_SYMBOL to each 385 * function definition. The reason for this is the noinstr section is placed 386 * after the main text section, i.e., very far away from the interrupt entry 387 * asm. That creates problems with fitting linker stubs when building large 388 * kernels. 389 */ 390 #define interrupt_handler __visible noinline notrace __no_kcsan __no_sanitize_address 391 392 /** 393 * DECLARE_INTERRUPT_HANDLER_RAW - Declare raw interrupt handler function 394 * @func: Function name of the entry point 395 * @returns: Returns a value back to asm caller 396 */ 397 #define DECLARE_INTERRUPT_HANDLER_RAW(func) \ 398 __visible long func(struct pt_regs *regs) 399 400 /** 401 * DEFINE_INTERRUPT_HANDLER_RAW - Define raw interrupt handler function 402 * @func: Function name of the entry point 403 * @returns: Returns a value back to asm caller 404 * 405 * @func is called from ASM entry code. 406 * 407 * This is a plain function which does no tracing, reconciling, etc. 408 * The macro is written so it acts as function definition. Append the 409 * body with a pair of curly brackets. 410 * 411 * raw interrupt handlers must not enable or disable interrupts, or 412 * schedule, tracing and instrumentation (ftrace, lockdep, etc) would 413 * not be advisable either, although may be possible in a pinch, the 414 * trace will look odd at least. 415 * 416 * A raw handler may call one of the other interrupt handler functions 417 * to be converted into that interrupt context without these restrictions. 418 * 419 * On PPC64, _RAW handlers may return with fast_interrupt_return. 420 * 421 * Specific handlers may have additional restrictions. 422 */ 423 #define DEFINE_INTERRUPT_HANDLER_RAW(func) \ 424 static __always_inline __no_sanitize_address __no_kcsan long \ 425 ____##func(struct pt_regs *regs); \ 426 \ 427 interrupt_handler long func(struct pt_regs *regs) \ 428 { \ 429 long ret; \ 430 \ 431 __hard_RI_enable(); \ 432 \ 433 ret = ____##func (regs); \ 434 \ 435 return ret; \ 436 } \ 437 NOKPROBE_SYMBOL(func); \ 438 \ 439 static __always_inline __no_sanitize_address __no_kcsan long \ 440 ____##func(struct pt_regs *regs) 441 442 /** 443 * DECLARE_INTERRUPT_HANDLER - Declare synchronous interrupt handler function 444 * @func: Function name of the entry point 445 */ 446 #define DECLARE_INTERRUPT_HANDLER(func) \ 447 __visible void func(struct pt_regs *regs) 448 449 /** 450 * DEFINE_INTERRUPT_HANDLER - Define synchronous interrupt handler function 451 * @func: Function name of the entry point 452 * 453 * @func is called from ASM entry code. 454 * 455 * The macro is written so it acts as function definition. Append the 456 * body with a pair of curly brackets. 457 */ 458 #define DEFINE_INTERRUPT_HANDLER(func) \ 459 static __always_inline void ____##func(struct pt_regs *regs); \ 460 \ 461 interrupt_handler void func(struct pt_regs *regs) \ 462 { \ 463 interrupt_enter_prepare(regs); \ 464 \ 465 ____##func (regs); \ 466 \ 467 interrupt_exit_prepare(regs); \ 468 } \ 469 NOKPROBE_SYMBOL(func); \ 470 \ 471 static __always_inline void ____##func(struct pt_regs *regs) 472 473 /** 474 * DECLARE_INTERRUPT_HANDLER_RET - Declare synchronous interrupt handler function 475 * @func: Function name of the entry point 476 * @returns: Returns a value back to asm caller 477 */ 478 #define DECLARE_INTERRUPT_HANDLER_RET(func) \ 479 __visible long func(struct pt_regs *regs) 480 481 /** 482 * DEFINE_INTERRUPT_HANDLER_RET - Define synchronous interrupt handler function 483 * @func: Function name of the entry point 484 * @returns: Returns a value back to asm caller 485 * 486 * @func is called from ASM entry code. 487 * 488 * The macro is written so it acts as function definition. Append the 489 * body with a pair of curly brackets. 490 */ 491 #define DEFINE_INTERRUPT_HANDLER_RET(func) \ 492 static __always_inline long ____##func(struct pt_regs *regs); \ 493 \ 494 interrupt_handler long func(struct pt_regs *regs) \ 495 { \ 496 long ret; \ 497 \ 498 interrupt_enter_prepare(regs); \ 499 \ 500 ret = ____##func (regs); \ 501 \ 502 interrupt_exit_prepare(regs); \ 503 \ 504 return ret; \ 505 } \ 506 NOKPROBE_SYMBOL(func); \ 507 \ 508 static __always_inline long ____##func(struct pt_regs *regs) 509 510 /** 511 * DECLARE_INTERRUPT_HANDLER_ASYNC - Declare asynchronous interrupt handler function 512 * @func: Function name of the entry point 513 */ 514 #define DECLARE_INTERRUPT_HANDLER_ASYNC(func) \ 515 __visible void func(struct pt_regs *regs) 516 517 /** 518 * DEFINE_INTERRUPT_HANDLER_ASYNC - Define asynchronous interrupt handler function 519 * @func: Function name of the entry point 520 * 521 * @func is called from ASM entry code. 522 * 523 * The macro is written so it acts as function definition. Append the 524 * body with a pair of curly brackets. 525 */ 526 #define DEFINE_INTERRUPT_HANDLER_ASYNC(func) \ 527 static __always_inline void ____##func(struct pt_regs *regs); \ 528 \ 529 interrupt_handler void func(struct pt_regs *regs) \ 530 { \ 531 interrupt_async_enter_prepare(regs); \ 532 \ 533 ____##func (regs); \ 534 \ 535 interrupt_async_exit_prepare(regs); \ 536 } \ 537 NOKPROBE_SYMBOL(func); \ 538 \ 539 static __always_inline void ____##func(struct pt_regs *regs) 540 541 /** 542 * DECLARE_INTERRUPT_HANDLER_NMI - Declare NMI interrupt handler function 543 * @func: Function name of the entry point 544 * @returns: Returns a value back to asm caller 545 */ 546 #define DECLARE_INTERRUPT_HANDLER_NMI(func) \ 547 __visible long func(struct pt_regs *regs) 548 549 /** 550 * DEFINE_INTERRUPT_HANDLER_NMI - Define NMI interrupt handler function 551 * @func: Function name of the entry point 552 * @returns: Returns a value back to asm caller 553 * 554 * @func is called from ASM entry code. 555 * 556 * The macro is written so it acts as function definition. Append the 557 * body with a pair of curly brackets. 558 */ 559 #define DEFINE_INTERRUPT_HANDLER_NMI(func) \ 560 static __always_inline __no_sanitize_address __no_kcsan long \ 561 ____##func(struct pt_regs *regs); \ 562 \ 563 interrupt_handler long func(struct pt_regs *regs) \ 564 { \ 565 struct interrupt_nmi_state state; \ 566 long ret; \ 567 \ 568 interrupt_nmi_enter_prepare(regs, &state); \ 569 \ 570 ret = ____##func (regs); \ 571 \ 572 interrupt_nmi_exit_prepare(regs, &state); \ 573 \ 574 return ret; \ 575 } \ 576 NOKPROBE_SYMBOL(func); \ 577 \ 578 static __always_inline __no_sanitize_address __no_kcsan long \ 579 ____##func(struct pt_regs *regs) 580 581 582 /* Interrupt handlers */ 583 /* kernel/traps.c */ 584 DECLARE_INTERRUPT_HANDLER_NMI(system_reset_exception); 585 #ifdef CONFIG_PPC_BOOK3S_64 586 DECLARE_INTERRUPT_HANDLER_RAW(machine_check_early_boot); 587 DECLARE_INTERRUPT_HANDLER_ASYNC(machine_check_exception_async); 588 #endif 589 DECLARE_INTERRUPT_HANDLER_NMI(machine_check_exception); 590 DECLARE_INTERRUPT_HANDLER(SMIException); 591 DECLARE_INTERRUPT_HANDLER(handle_hmi_exception); 592 DECLARE_INTERRUPT_HANDLER(unknown_exception); 593 DECLARE_INTERRUPT_HANDLER_ASYNC(unknown_async_exception); 594 DECLARE_INTERRUPT_HANDLER_NMI(unknown_nmi_exception); 595 DECLARE_INTERRUPT_HANDLER(instruction_breakpoint_exception); 596 DECLARE_INTERRUPT_HANDLER(RunModeException); 597 DECLARE_INTERRUPT_HANDLER(single_step_exception); 598 DECLARE_INTERRUPT_HANDLER(program_check_exception); 599 DECLARE_INTERRUPT_HANDLER(emulation_assist_interrupt); 600 DECLARE_INTERRUPT_HANDLER(alignment_exception); 601 DECLARE_INTERRUPT_HANDLER(StackOverflow); 602 DECLARE_INTERRUPT_HANDLER(stack_overflow_exception); 603 DECLARE_INTERRUPT_HANDLER(kernel_fp_unavailable_exception); 604 DECLARE_INTERRUPT_HANDLER(altivec_unavailable_exception); 605 DECLARE_INTERRUPT_HANDLER(vsx_unavailable_exception); 606 DECLARE_INTERRUPT_HANDLER(facility_unavailable_exception); 607 DECLARE_INTERRUPT_HANDLER(fp_unavailable_tm); 608 DECLARE_INTERRUPT_HANDLER(altivec_unavailable_tm); 609 DECLARE_INTERRUPT_HANDLER(vsx_unavailable_tm); 610 DECLARE_INTERRUPT_HANDLER_NMI(performance_monitor_exception_nmi); 611 DECLARE_INTERRUPT_HANDLER_ASYNC(performance_monitor_exception_async); 612 DECLARE_INTERRUPT_HANDLER_RAW(performance_monitor_exception); 613 DECLARE_INTERRUPT_HANDLER(DebugException); 614 DECLARE_INTERRUPT_HANDLER(altivec_assist_exception); 615 DECLARE_INTERRUPT_HANDLER(CacheLockingException); 616 DECLARE_INTERRUPT_HANDLER(SPEFloatingPointException); 617 DECLARE_INTERRUPT_HANDLER(SPEFloatingPointRoundException); 618 DECLARE_INTERRUPT_HANDLER_NMI(WatchdogException); 619 DECLARE_INTERRUPT_HANDLER(kernel_bad_stack); 620 621 /* slb.c */ 622 DECLARE_INTERRUPT_HANDLER_RAW(do_slb_fault); 623 DECLARE_INTERRUPT_HANDLER(do_bad_segment_interrupt); 624 625 /* hash_utils.c */ 626 DECLARE_INTERRUPT_HANDLER(do_hash_fault); 627 628 /* fault.c */ 629 DECLARE_INTERRUPT_HANDLER(do_page_fault); 630 DECLARE_INTERRUPT_HANDLER(do_bad_page_fault_segv); 631 632 /* process.c */ 633 DECLARE_INTERRUPT_HANDLER(do_break); 634 635 /* time.c */ 636 DECLARE_INTERRUPT_HANDLER_ASYNC(timer_interrupt); 637 638 /* mce.c */ 639 DECLARE_INTERRUPT_HANDLER_NMI(machine_check_early); 640 DECLARE_INTERRUPT_HANDLER_NMI(hmi_exception_realmode); 641 642 DECLARE_INTERRUPT_HANDLER_ASYNC(TAUException); 643 644 /* irq.c */ 645 DECLARE_INTERRUPT_HANDLER_ASYNC(do_IRQ); 646 647 void __noreturn unrecoverable_exception(struct pt_regs *regs); 648 649 void replay_system_reset(void); 650 void replay_soft_interrupts(void); 651 652 static inline void interrupt_cond_local_irq_enable(struct pt_regs *regs) 653 { 654 if (!arch_irq_disabled_regs(regs)) 655 local_irq_enable(); 656 } 657 658 long system_call_exception(struct pt_regs *regs, unsigned long r0); 659 notrace unsigned long syscall_exit_prepare(unsigned long r3, struct pt_regs *regs, long scv); 660 notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs); 661 notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs); 662 #ifdef CONFIG_PPC64 663 unsigned long syscall_exit_restart(unsigned long r3, struct pt_regs *regs); 664 unsigned long interrupt_exit_user_restart(struct pt_regs *regs); 665 unsigned long interrupt_exit_kernel_restart(struct pt_regs *regs); 666 #endif 667 668 #endif /* __ASSEMBLY__ */ 669 670 #endif /* _ASM_POWERPC_INTERRUPT_H */ 671