1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 #ifndef _ASM_POWERPC_INTERRUPT_H 3 #define _ASM_POWERPC_INTERRUPT_H 4 5 /* BookE/4xx */ 6 #define INTERRUPT_CRITICAL_INPUT 0x100 7 8 /* BookE */ 9 #define INTERRUPT_DEBUG 0xd00 10 #ifdef CONFIG_BOOKE 11 #define INTERRUPT_PERFMON 0x260 12 #define INTERRUPT_DOORBELL 0x280 13 #endif 14 15 /* BookS/4xx/8xx */ 16 #define INTERRUPT_MACHINE_CHECK 0x200 17 18 /* BookS/8xx */ 19 #define INTERRUPT_SYSTEM_RESET 0x100 20 21 /* BookS */ 22 #define INTERRUPT_DATA_SEGMENT 0x380 23 #define INTERRUPT_INST_SEGMENT 0x480 24 #define INTERRUPT_TRACE 0xd00 25 #define INTERRUPT_H_DATA_STORAGE 0xe00 26 #define INTERRUPT_HMI 0xe60 27 #define INTERRUPT_H_FAC_UNAVAIL 0xf80 28 #ifdef CONFIG_PPC_BOOK3S 29 #define INTERRUPT_DOORBELL 0xa00 30 #define INTERRUPT_PERFMON 0xf00 31 #define INTERRUPT_ALTIVEC_UNAVAIL 0xf20 32 #endif 33 34 /* BookE/BookS/4xx/8xx */ 35 #define INTERRUPT_DATA_STORAGE 0x300 36 #define INTERRUPT_INST_STORAGE 0x400 37 #define INTERRUPT_EXTERNAL 0x500 38 #define INTERRUPT_ALIGNMENT 0x600 39 #define INTERRUPT_PROGRAM 0x700 40 #define INTERRUPT_SYSCALL 0xc00 41 #define INTERRUPT_TRACE 0xd00 42 43 /* BookE/BookS/44x */ 44 #define INTERRUPT_FP_UNAVAIL 0x800 45 46 /* BookE/BookS/44x/8xx */ 47 #define INTERRUPT_DECREMENTER 0x900 48 49 #ifndef INTERRUPT_PERFMON 50 #define INTERRUPT_PERFMON 0x0 51 #endif 52 53 /* 8xx */ 54 #define INTERRUPT_SOFT_EMU_8xx 0x1000 55 #define INTERRUPT_INST_TLB_MISS_8xx 0x1100 56 #define INTERRUPT_DATA_TLB_MISS_8xx 0x1200 57 #define INTERRUPT_INST_TLB_ERROR_8xx 0x1300 58 #define INTERRUPT_DATA_TLB_ERROR_8xx 0x1400 59 #define INTERRUPT_DATA_BREAKPOINT_8xx 0x1c00 60 #define INTERRUPT_INST_BREAKPOINT_8xx 0x1d00 61 62 /* 603 */ 63 #define INTERRUPT_INST_TLB_MISS_603 0x1000 64 #define INTERRUPT_DATA_LOAD_TLB_MISS_603 0x1100 65 #define INTERRUPT_DATA_STORE_TLB_MISS_603 0x1200 66 67 #ifndef __ASSEMBLY__ 68 69 #include <linux/context_tracking.h> 70 #include <linux/hardirq.h> 71 #include <asm/cputime.h> 72 #include <asm/firmware.h> 73 #include <asm/ftrace.h> 74 #include <asm/kprobes.h> 75 #include <asm/runlatch.h> 76 77 #ifdef CONFIG_PPC64 78 /* 79 * WARN/BUG is handled with a program interrupt so minimise checks here to 80 * avoid recursion and maximise the chance of getting the first oops handled. 81 */ 82 #define INT_SOFT_MASK_BUG_ON(regs, cond) \ 83 do { \ 84 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG) && \ 85 (user_mode(regs) || (TRAP(regs) != INTERRUPT_PROGRAM))) \ 86 BUG_ON(cond); \ 87 } while (0) 88 #endif 89 90 #ifdef CONFIG_PPC_BOOK3S_64 91 extern char __end_soft_masked[]; 92 bool search_kernel_soft_mask_table(unsigned long addr); 93 unsigned long search_kernel_restart_table(unsigned long addr); 94 95 DECLARE_STATIC_KEY_FALSE(interrupt_exit_not_reentrant); 96 97 static inline bool is_implicit_soft_masked(struct pt_regs *regs) 98 { 99 if (regs->msr & MSR_PR) 100 return false; 101 102 if (regs->nip >= (unsigned long)__end_soft_masked) 103 return false; 104 105 return search_kernel_soft_mask_table(regs->nip); 106 } 107 108 static inline void srr_regs_clobbered(void) 109 { 110 local_paca->srr_valid = 0; 111 local_paca->hsrr_valid = 0; 112 } 113 #else 114 static inline unsigned long search_kernel_restart_table(unsigned long addr) 115 { 116 return 0; 117 } 118 119 static inline bool is_implicit_soft_masked(struct pt_regs *regs) 120 { 121 return false; 122 } 123 124 static inline void srr_regs_clobbered(void) 125 { 126 } 127 #endif 128 129 static inline void nap_adjust_return(struct pt_regs *regs) 130 { 131 #ifdef CONFIG_PPC_970_NAP 132 if (unlikely(test_thread_local_flags(_TLF_NAPPING))) { 133 /* Can avoid a test-and-clear because NMIs do not call this */ 134 clear_thread_local_flags(_TLF_NAPPING); 135 regs_set_return_ip(regs, (unsigned long)power4_idle_nap_return); 136 } 137 #endif 138 } 139 140 static inline void booke_restore_dbcr0(void) 141 { 142 #ifdef CONFIG_PPC_ADV_DEBUG_REGS 143 unsigned long dbcr0 = current->thread.debug.dbcr0; 144 145 if (IS_ENABLED(CONFIG_PPC32) && unlikely(dbcr0 & DBCR0_IDM)) { 146 mtspr(SPRN_DBSR, -1); 147 mtspr(SPRN_DBCR0, global_dbcr0[smp_processor_id()]); 148 } 149 #endif 150 } 151 152 static inline void interrupt_enter_prepare(struct pt_regs *regs) 153 { 154 #ifdef CONFIG_PPC32 155 if (!arch_irq_disabled_regs(regs)) 156 trace_hardirqs_off(); 157 158 if (user_mode(regs)) 159 kuap_lock(); 160 else 161 kuap_save_and_lock(regs); 162 163 if (user_mode(regs)) 164 account_cpu_user_entry(); 165 #endif 166 167 #ifdef CONFIG_PPC64 168 bool trace_enable = false; 169 170 if (IS_ENABLED(CONFIG_TRACE_IRQFLAGS)) { 171 if (irq_soft_mask_set_return(IRQS_ALL_DISABLED) == IRQS_ENABLED) 172 trace_enable = true; 173 } else { 174 irq_soft_mask_set(IRQS_ALL_DISABLED); 175 } 176 177 /* 178 * If the interrupt was taken with HARD_DIS clear, then enable MSR[EE]. 179 * Asynchronous interrupts get here with HARD_DIS set (see below), so 180 * this enables MSR[EE] for synchronous interrupts. IRQs remain 181 * soft-masked. The interrupt handler may later call 182 * interrupt_cond_local_irq_enable() to achieve a regular process 183 * context. 184 */ 185 if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS)) { 186 INT_SOFT_MASK_BUG_ON(regs, !(regs->msr & MSR_EE)); 187 __hard_irq_enable(); 188 } else { 189 __hard_RI_enable(); 190 } 191 192 /* Do this when RI=1 because it can cause SLB faults */ 193 if (trace_enable) 194 trace_hardirqs_off(); 195 196 if (user_mode(regs)) { 197 kuap_lock(); 198 CT_WARN_ON(ct_state() != CONTEXT_USER); 199 user_exit_irqoff(); 200 201 account_cpu_user_entry(); 202 account_stolen_time(); 203 } else { 204 kuap_save_and_lock(regs); 205 /* 206 * CT_WARN_ON comes here via program_check_exception, 207 * so avoid recursion. 208 */ 209 if (TRAP(regs) != INTERRUPT_PROGRAM) 210 CT_WARN_ON(ct_state() != CONTEXT_KERNEL && 211 ct_state() != CONTEXT_IDLE); 212 INT_SOFT_MASK_BUG_ON(regs, is_implicit_soft_masked(regs)); 213 INT_SOFT_MASK_BUG_ON(regs, arch_irq_disabled_regs(regs) && 214 search_kernel_restart_table(regs->nip)); 215 } 216 INT_SOFT_MASK_BUG_ON(regs, !arch_irq_disabled_regs(regs) && 217 !(regs->msr & MSR_EE)); 218 #endif 219 220 booke_restore_dbcr0(); 221 } 222 223 /* 224 * Care should be taken to note that interrupt_exit_prepare and 225 * interrupt_async_exit_prepare do not necessarily return immediately to 226 * regs context (e.g., if regs is usermode, we don't necessarily return to 227 * user mode). Other interrupts might be taken between here and return, 228 * context switch / preemption may occur in the exit path after this, or a 229 * signal may be delivered, etc. 230 * 231 * The real interrupt exit code is platform specific, e.g., 232 * interrupt_exit_user_prepare / interrupt_exit_kernel_prepare for 64s. 233 * 234 * However interrupt_nmi_exit_prepare does return directly to regs, because 235 * NMIs do not do "exit work" or replay soft-masked interrupts. 236 */ 237 static inline void interrupt_exit_prepare(struct pt_regs *regs) 238 { 239 } 240 241 static inline void interrupt_async_enter_prepare(struct pt_regs *regs) 242 { 243 #ifdef CONFIG_PPC64 244 /* Ensure interrupt_enter_prepare does not enable MSR[EE] */ 245 local_paca->irq_happened |= PACA_IRQ_HARD_DIS; 246 #endif 247 interrupt_enter_prepare(regs); 248 #ifdef CONFIG_PPC_BOOK3S_64 249 /* 250 * RI=1 is set by interrupt_enter_prepare, so this thread flags access 251 * has to come afterward (it can cause SLB faults). 252 */ 253 if (cpu_has_feature(CPU_FTR_CTRL) && 254 !test_thread_local_flags(_TLF_RUNLATCH)) 255 __ppc64_runlatch_on(); 256 #endif 257 irq_enter(); 258 } 259 260 static inline void interrupt_async_exit_prepare(struct pt_regs *regs) 261 { 262 /* 263 * Adjust at exit so the main handler sees the true NIA. This must 264 * come before irq_exit() because irq_exit can enable interrupts, and 265 * if another interrupt is taken before nap_adjust_return has run 266 * here, then that interrupt would return directly to idle nap return. 267 */ 268 nap_adjust_return(regs); 269 270 irq_exit(); 271 interrupt_exit_prepare(regs); 272 } 273 274 struct interrupt_nmi_state { 275 #ifdef CONFIG_PPC64 276 u8 irq_soft_mask; 277 u8 irq_happened; 278 u8 ftrace_enabled; 279 u64 softe; 280 #endif 281 }; 282 283 static inline bool nmi_disables_ftrace(struct pt_regs *regs) 284 { 285 /* Allow DEC and PMI to be traced when they are soft-NMI */ 286 if (IS_ENABLED(CONFIG_PPC_BOOK3S_64)) { 287 if (TRAP(regs) == INTERRUPT_DECREMENTER) 288 return false; 289 if (TRAP(regs) == INTERRUPT_PERFMON) 290 return false; 291 } 292 if (IS_ENABLED(CONFIG_PPC_BOOK3E_64)) { 293 if (TRAP(regs) == INTERRUPT_PERFMON) 294 return false; 295 } 296 297 return true; 298 } 299 300 static inline void interrupt_nmi_enter_prepare(struct pt_regs *regs, struct interrupt_nmi_state *state) 301 { 302 #ifdef CONFIG_PPC64 303 state->irq_soft_mask = local_paca->irq_soft_mask; 304 state->irq_happened = local_paca->irq_happened; 305 state->softe = regs->softe; 306 307 /* 308 * Set IRQS_ALL_DISABLED unconditionally so irqs_disabled() does 309 * the right thing, and set IRQ_HARD_DIS. We do not want to reconcile 310 * because that goes through irq tracing which we don't want in NMI. 311 */ 312 local_paca->irq_soft_mask = IRQS_ALL_DISABLED; 313 local_paca->irq_happened |= PACA_IRQ_HARD_DIS; 314 315 if (!(regs->msr & MSR_EE) || is_implicit_soft_masked(regs)) { 316 /* 317 * Adjust regs->softe to be soft-masked if it had not been 318 * reconcied (e.g., interrupt entry with MSR[EE]=0 but softe 319 * not yet set disabled), or if it was in an implicit soft 320 * masked state. This makes arch_irq_disabled_regs(regs) 321 * behave as expected. 322 */ 323 regs->softe = IRQS_ALL_DISABLED; 324 } 325 326 __hard_RI_enable(); 327 328 /* Don't do any per-CPU operations until interrupt state is fixed */ 329 330 if (nmi_disables_ftrace(regs)) { 331 state->ftrace_enabled = this_cpu_get_ftrace_enabled(); 332 this_cpu_set_ftrace_enabled(0); 333 } 334 #endif 335 336 /* If data relocations are enabled, it's safe to use nmi_enter() */ 337 if (mfmsr() & MSR_DR) { 338 nmi_enter(); 339 return; 340 } 341 342 /* 343 * But do not use nmi_enter() for pseries hash guest taking a real-mode 344 * NMI because not everything it touches is within the RMA limit. 345 */ 346 if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && 347 firmware_has_feature(FW_FEATURE_LPAR) && 348 !radix_enabled()) 349 return; 350 351 /* 352 * Likewise, don't use it if we have some form of instrumentation (like 353 * KASAN shadow) that is not safe to access in real mode (even on radix) 354 */ 355 if (IS_ENABLED(CONFIG_KASAN)) 356 return; 357 358 /* Otherwise, it should be safe to call it */ 359 nmi_enter(); 360 } 361 362 static inline void interrupt_nmi_exit_prepare(struct pt_regs *regs, struct interrupt_nmi_state *state) 363 { 364 if (mfmsr() & MSR_DR) { 365 // nmi_exit if relocations are on 366 nmi_exit(); 367 } else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && 368 firmware_has_feature(FW_FEATURE_LPAR) && 369 !radix_enabled()) { 370 // no nmi_exit for a pseries hash guest taking a real mode exception 371 } else if (IS_ENABLED(CONFIG_KASAN)) { 372 // no nmi_exit for KASAN in real mode 373 } else { 374 nmi_exit(); 375 } 376 377 /* 378 * nmi does not call nap_adjust_return because nmi should not create 379 * new work to do (must use irq_work for that). 380 */ 381 382 #ifdef CONFIG_PPC64 383 #ifdef CONFIG_PPC_BOOK3S 384 if (arch_irq_disabled_regs(regs)) { 385 unsigned long rst = search_kernel_restart_table(regs->nip); 386 if (rst) 387 regs_set_return_ip(regs, rst); 388 } 389 #endif 390 391 if (nmi_disables_ftrace(regs)) 392 this_cpu_set_ftrace_enabled(state->ftrace_enabled); 393 394 /* Check we didn't change the pending interrupt mask. */ 395 WARN_ON_ONCE((state->irq_happened | PACA_IRQ_HARD_DIS) != local_paca->irq_happened); 396 regs->softe = state->softe; 397 local_paca->irq_happened = state->irq_happened; 398 local_paca->irq_soft_mask = state->irq_soft_mask; 399 #endif 400 } 401 402 /* 403 * Don't use noinstr here like x86, but rather add NOKPROBE_SYMBOL to each 404 * function definition. The reason for this is the noinstr section is placed 405 * after the main text section, i.e., very far away from the interrupt entry 406 * asm. That creates problems with fitting linker stubs when building large 407 * kernels. 408 */ 409 #define interrupt_handler __visible noinline notrace __no_kcsan __no_sanitize_address 410 411 /** 412 * DECLARE_INTERRUPT_HANDLER_RAW - Declare raw interrupt handler function 413 * @func: Function name of the entry point 414 * @returns: Returns a value back to asm caller 415 */ 416 #define DECLARE_INTERRUPT_HANDLER_RAW(func) \ 417 __visible long func(struct pt_regs *regs) 418 419 /** 420 * DEFINE_INTERRUPT_HANDLER_RAW - Define raw interrupt handler function 421 * @func: Function name of the entry point 422 * @returns: Returns a value back to asm caller 423 * 424 * @func is called from ASM entry code. 425 * 426 * This is a plain function which does no tracing, reconciling, etc. 427 * The macro is written so it acts as function definition. Append the 428 * body with a pair of curly brackets. 429 * 430 * raw interrupt handlers must not enable or disable interrupts, or 431 * schedule, tracing and instrumentation (ftrace, lockdep, etc) would 432 * not be advisable either, although may be possible in a pinch, the 433 * trace will look odd at least. 434 * 435 * A raw handler may call one of the other interrupt handler functions 436 * to be converted into that interrupt context without these restrictions. 437 * 438 * On PPC64, _RAW handlers may return with fast_interrupt_return. 439 * 440 * Specific handlers may have additional restrictions. 441 */ 442 #define DEFINE_INTERRUPT_HANDLER_RAW(func) \ 443 static __always_inline __no_sanitize_address __no_kcsan long \ 444 ____##func(struct pt_regs *regs); \ 445 \ 446 interrupt_handler long func(struct pt_regs *regs) \ 447 { \ 448 long ret; \ 449 \ 450 __hard_RI_enable(); \ 451 \ 452 ret = ____##func (regs); \ 453 \ 454 return ret; \ 455 } \ 456 NOKPROBE_SYMBOL(func); \ 457 \ 458 static __always_inline __no_sanitize_address __no_kcsan long \ 459 ____##func(struct pt_regs *regs) 460 461 /** 462 * DECLARE_INTERRUPT_HANDLER - Declare synchronous interrupt handler function 463 * @func: Function name of the entry point 464 */ 465 #define DECLARE_INTERRUPT_HANDLER(func) \ 466 __visible void func(struct pt_regs *regs) 467 468 /** 469 * DEFINE_INTERRUPT_HANDLER - Define synchronous interrupt handler function 470 * @func: Function name of the entry point 471 * 472 * @func is called from ASM entry code. 473 * 474 * The macro is written so it acts as function definition. Append the 475 * body with a pair of curly brackets. 476 */ 477 #define DEFINE_INTERRUPT_HANDLER(func) \ 478 static __always_inline void ____##func(struct pt_regs *regs); \ 479 \ 480 interrupt_handler void func(struct pt_regs *regs) \ 481 { \ 482 interrupt_enter_prepare(regs); \ 483 \ 484 ____##func (regs); \ 485 \ 486 interrupt_exit_prepare(regs); \ 487 } \ 488 NOKPROBE_SYMBOL(func); \ 489 \ 490 static __always_inline void ____##func(struct pt_regs *regs) 491 492 /** 493 * DECLARE_INTERRUPT_HANDLER_RET - Declare synchronous interrupt handler function 494 * @func: Function name of the entry point 495 * @returns: Returns a value back to asm caller 496 */ 497 #define DECLARE_INTERRUPT_HANDLER_RET(func) \ 498 __visible long func(struct pt_regs *regs) 499 500 /** 501 * DEFINE_INTERRUPT_HANDLER_RET - Define synchronous interrupt handler function 502 * @func: Function name of the entry point 503 * @returns: Returns a value back to asm caller 504 * 505 * @func is called from ASM entry code. 506 * 507 * The macro is written so it acts as function definition. Append the 508 * body with a pair of curly brackets. 509 */ 510 #define DEFINE_INTERRUPT_HANDLER_RET(func) \ 511 static __always_inline long ____##func(struct pt_regs *regs); \ 512 \ 513 interrupt_handler long func(struct pt_regs *regs) \ 514 { \ 515 long ret; \ 516 \ 517 interrupt_enter_prepare(regs); \ 518 \ 519 ret = ____##func (regs); \ 520 \ 521 interrupt_exit_prepare(regs); \ 522 \ 523 return ret; \ 524 } \ 525 NOKPROBE_SYMBOL(func); \ 526 \ 527 static __always_inline long ____##func(struct pt_regs *regs) 528 529 /** 530 * DECLARE_INTERRUPT_HANDLER_ASYNC - Declare asynchronous interrupt handler function 531 * @func: Function name of the entry point 532 */ 533 #define DECLARE_INTERRUPT_HANDLER_ASYNC(func) \ 534 __visible void func(struct pt_regs *regs) 535 536 /** 537 * DEFINE_INTERRUPT_HANDLER_ASYNC - Define asynchronous interrupt handler function 538 * @func: Function name of the entry point 539 * 540 * @func is called from ASM entry code. 541 * 542 * The macro is written so it acts as function definition. Append the 543 * body with a pair of curly brackets. 544 */ 545 #define DEFINE_INTERRUPT_HANDLER_ASYNC(func) \ 546 static __always_inline void ____##func(struct pt_regs *regs); \ 547 \ 548 interrupt_handler void func(struct pt_regs *regs) \ 549 { \ 550 interrupt_async_enter_prepare(regs); \ 551 \ 552 ____##func (regs); \ 553 \ 554 interrupt_async_exit_prepare(regs); \ 555 } \ 556 NOKPROBE_SYMBOL(func); \ 557 \ 558 static __always_inline void ____##func(struct pt_regs *regs) 559 560 /** 561 * DECLARE_INTERRUPT_HANDLER_NMI - Declare NMI interrupt handler function 562 * @func: Function name of the entry point 563 * @returns: Returns a value back to asm caller 564 */ 565 #define DECLARE_INTERRUPT_HANDLER_NMI(func) \ 566 __visible long func(struct pt_regs *regs) 567 568 /** 569 * DEFINE_INTERRUPT_HANDLER_NMI - Define NMI interrupt handler function 570 * @func: Function name of the entry point 571 * @returns: Returns a value back to asm caller 572 * 573 * @func is called from ASM entry code. 574 * 575 * The macro is written so it acts as function definition. Append the 576 * body with a pair of curly brackets. 577 */ 578 #define DEFINE_INTERRUPT_HANDLER_NMI(func) \ 579 static __always_inline __no_sanitize_address __no_kcsan long \ 580 ____##func(struct pt_regs *regs); \ 581 \ 582 interrupt_handler long func(struct pt_regs *regs) \ 583 { \ 584 struct interrupt_nmi_state state; \ 585 long ret; \ 586 \ 587 interrupt_nmi_enter_prepare(regs, &state); \ 588 \ 589 ret = ____##func (regs); \ 590 \ 591 interrupt_nmi_exit_prepare(regs, &state); \ 592 \ 593 return ret; \ 594 } \ 595 NOKPROBE_SYMBOL(func); \ 596 \ 597 static __always_inline __no_sanitize_address __no_kcsan long \ 598 ____##func(struct pt_regs *regs) 599 600 601 /* Interrupt handlers */ 602 /* kernel/traps.c */ 603 DECLARE_INTERRUPT_HANDLER_NMI(system_reset_exception); 604 #ifdef CONFIG_PPC_BOOK3S_64 605 DECLARE_INTERRUPT_HANDLER_RAW(machine_check_early_boot); 606 DECLARE_INTERRUPT_HANDLER_ASYNC(machine_check_exception_async); 607 #endif 608 DECLARE_INTERRUPT_HANDLER_NMI(machine_check_exception); 609 DECLARE_INTERRUPT_HANDLER(SMIException); 610 DECLARE_INTERRUPT_HANDLER(handle_hmi_exception); 611 DECLARE_INTERRUPT_HANDLER(unknown_exception); 612 DECLARE_INTERRUPT_HANDLER_ASYNC(unknown_async_exception); 613 DECLARE_INTERRUPT_HANDLER_NMI(unknown_nmi_exception); 614 DECLARE_INTERRUPT_HANDLER(instruction_breakpoint_exception); 615 DECLARE_INTERRUPT_HANDLER(RunModeException); 616 DECLARE_INTERRUPT_HANDLER(single_step_exception); 617 DECLARE_INTERRUPT_HANDLER(program_check_exception); 618 DECLARE_INTERRUPT_HANDLER(emulation_assist_interrupt); 619 DECLARE_INTERRUPT_HANDLER(alignment_exception); 620 DECLARE_INTERRUPT_HANDLER(StackOverflow); 621 DECLARE_INTERRUPT_HANDLER(stack_overflow_exception); 622 DECLARE_INTERRUPT_HANDLER(kernel_fp_unavailable_exception); 623 DECLARE_INTERRUPT_HANDLER(altivec_unavailable_exception); 624 DECLARE_INTERRUPT_HANDLER(vsx_unavailable_exception); 625 DECLARE_INTERRUPT_HANDLER(facility_unavailable_exception); 626 DECLARE_INTERRUPT_HANDLER(fp_unavailable_tm); 627 DECLARE_INTERRUPT_HANDLER(altivec_unavailable_tm); 628 DECLARE_INTERRUPT_HANDLER(vsx_unavailable_tm); 629 DECLARE_INTERRUPT_HANDLER_NMI(performance_monitor_exception_nmi); 630 DECLARE_INTERRUPT_HANDLER_ASYNC(performance_monitor_exception_async); 631 DECLARE_INTERRUPT_HANDLER_RAW(performance_monitor_exception); 632 DECLARE_INTERRUPT_HANDLER(DebugException); 633 DECLARE_INTERRUPT_HANDLER(altivec_assist_exception); 634 DECLARE_INTERRUPT_HANDLER(CacheLockingException); 635 DECLARE_INTERRUPT_HANDLER(SPEFloatingPointException); 636 DECLARE_INTERRUPT_HANDLER(SPEFloatingPointRoundException); 637 DECLARE_INTERRUPT_HANDLER_NMI(WatchdogException); 638 DECLARE_INTERRUPT_HANDLER(kernel_bad_stack); 639 640 /* slb.c */ 641 DECLARE_INTERRUPT_HANDLER_RAW(do_slb_fault); 642 DECLARE_INTERRUPT_HANDLER(do_bad_segment_interrupt); 643 644 /* hash_utils.c */ 645 DECLARE_INTERRUPT_HANDLER(do_hash_fault); 646 647 /* fault.c */ 648 DECLARE_INTERRUPT_HANDLER(do_page_fault); 649 DECLARE_INTERRUPT_HANDLER(do_bad_page_fault_segv); 650 651 /* process.c */ 652 DECLARE_INTERRUPT_HANDLER(do_break); 653 654 /* time.c */ 655 DECLARE_INTERRUPT_HANDLER_ASYNC(timer_interrupt); 656 657 /* mce.c */ 658 DECLARE_INTERRUPT_HANDLER_NMI(machine_check_early); 659 DECLARE_INTERRUPT_HANDLER_NMI(hmi_exception_realmode); 660 661 DECLARE_INTERRUPT_HANDLER_ASYNC(TAUException); 662 663 /* irq.c */ 664 DECLARE_INTERRUPT_HANDLER_ASYNC(do_IRQ); 665 666 void __noreturn unrecoverable_exception(struct pt_regs *regs); 667 668 void replay_system_reset(void); 669 void replay_soft_interrupts(void); 670 671 static inline void interrupt_cond_local_irq_enable(struct pt_regs *regs) 672 { 673 if (!arch_irq_disabled_regs(regs)) 674 local_irq_enable(); 675 } 676 677 long system_call_exception(struct pt_regs *regs, unsigned long r0); 678 notrace unsigned long syscall_exit_prepare(unsigned long r3, struct pt_regs *regs, long scv); 679 notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs); 680 notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs); 681 #ifdef CONFIG_PPC64 682 unsigned long syscall_exit_restart(unsigned long r3, struct pt_regs *regs); 683 unsigned long interrupt_exit_user_restart(struct pt_regs *regs); 684 unsigned long interrupt_exit_kernel_restart(struct pt_regs *regs); 685 #endif 686 687 #endif /* __ASSEMBLY__ */ 688 689 #endif /* _ASM_POWERPC_INTERRUPT_H */ 690