1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 #ifndef _ASM_POWERPC_INTERRUPT_H 3 #define _ASM_POWERPC_INTERRUPT_H 4 5 /* BookE/4xx */ 6 #define INTERRUPT_CRITICAL_INPUT 0x100 7 8 /* BookE */ 9 #define INTERRUPT_DEBUG 0xd00 10 #ifdef CONFIG_BOOKE 11 #define INTERRUPT_PERFMON 0x260 12 #define INTERRUPT_DOORBELL 0x280 13 #endif 14 15 /* BookS/4xx/8xx */ 16 #define INTERRUPT_MACHINE_CHECK 0x200 17 18 /* BookS/8xx */ 19 #define INTERRUPT_SYSTEM_RESET 0x100 20 21 /* BookS */ 22 #define INTERRUPT_DATA_SEGMENT 0x380 23 #define INTERRUPT_INST_SEGMENT 0x480 24 #define INTERRUPT_TRACE 0xd00 25 #define INTERRUPT_H_DATA_STORAGE 0xe00 26 #define INTERRUPT_HMI 0xe60 27 #define INTERRUPT_H_FAC_UNAVAIL 0xf80 28 #ifdef CONFIG_PPC_BOOK3S 29 #define INTERRUPT_DOORBELL 0xa00 30 #define INTERRUPT_PERFMON 0xf00 31 #define INTERRUPT_ALTIVEC_UNAVAIL 0xf20 32 #endif 33 34 /* BookE/BookS/4xx/8xx */ 35 #define INTERRUPT_DATA_STORAGE 0x300 36 #define INTERRUPT_INST_STORAGE 0x400 37 #define INTERRUPT_EXTERNAL 0x500 38 #define INTERRUPT_ALIGNMENT 0x600 39 #define INTERRUPT_PROGRAM 0x700 40 #define INTERRUPT_SYSCALL 0xc00 41 #define INTERRUPT_TRACE 0xd00 42 43 /* BookE/BookS/44x */ 44 #define INTERRUPT_FP_UNAVAIL 0x800 45 46 /* BookE/BookS/44x/8xx */ 47 #define INTERRUPT_DECREMENTER 0x900 48 49 #ifndef INTERRUPT_PERFMON 50 #define INTERRUPT_PERFMON 0x0 51 #endif 52 53 /* 8xx */ 54 #define INTERRUPT_SOFT_EMU_8xx 0x1000 55 #define INTERRUPT_INST_TLB_MISS_8xx 0x1100 56 #define INTERRUPT_DATA_TLB_MISS_8xx 0x1200 57 #define INTERRUPT_INST_TLB_ERROR_8xx 0x1300 58 #define INTERRUPT_DATA_TLB_ERROR_8xx 0x1400 59 #define INTERRUPT_DATA_BREAKPOINT_8xx 0x1c00 60 #define INTERRUPT_INST_BREAKPOINT_8xx 0x1d00 61 62 /* 603 */ 63 #define INTERRUPT_INST_TLB_MISS_603 0x1000 64 #define INTERRUPT_DATA_LOAD_TLB_MISS_603 0x1100 65 #define INTERRUPT_DATA_STORE_TLB_MISS_603 0x1200 66 67 #ifndef __ASSEMBLY__ 68 69 #include <linux/context_tracking.h> 70 #include <linux/hardirq.h> 71 #include <asm/cputime.h> 72 #include <asm/ftrace.h> 73 #include <asm/kprobes.h> 74 #include <asm/runlatch.h> 75 76 #ifdef CONFIG_PPC_BOOK3S_64 77 extern char __end_soft_masked[]; 78 bool search_kernel_soft_mask_table(unsigned long addr); 79 unsigned long search_kernel_restart_table(unsigned long addr); 80 81 DECLARE_STATIC_KEY_FALSE(interrupt_exit_not_reentrant); 82 83 static inline bool is_implicit_soft_masked(struct pt_regs *regs) 84 { 85 if (regs->msr & MSR_PR) 86 return false; 87 88 if (regs->nip >= (unsigned long)__end_soft_masked) 89 return false; 90 91 return search_kernel_soft_mask_table(regs->nip); 92 } 93 94 static inline void srr_regs_clobbered(void) 95 { 96 local_paca->srr_valid = 0; 97 local_paca->hsrr_valid = 0; 98 } 99 #else 100 static inline bool is_implicit_soft_masked(struct pt_regs *regs) 101 { 102 return false; 103 } 104 105 static inline void srr_regs_clobbered(void) 106 { 107 } 108 #endif 109 110 static inline void nap_adjust_return(struct pt_regs *regs) 111 { 112 #ifdef CONFIG_PPC_970_NAP 113 if (unlikely(test_thread_local_flags(_TLF_NAPPING))) { 114 /* Can avoid a test-and-clear because NMIs do not call this */ 115 clear_thread_local_flags(_TLF_NAPPING); 116 regs_set_return_ip(regs, (unsigned long)power4_idle_nap_return); 117 } 118 #endif 119 } 120 121 struct interrupt_state { 122 }; 123 124 static inline void booke_restore_dbcr0(void) 125 { 126 #ifdef CONFIG_PPC_ADV_DEBUG_REGS 127 unsigned long dbcr0 = current->thread.debug.dbcr0; 128 129 if (IS_ENABLED(CONFIG_PPC32) && unlikely(dbcr0 & DBCR0_IDM)) { 130 mtspr(SPRN_DBSR, -1); 131 mtspr(SPRN_DBCR0, global_dbcr0[smp_processor_id()]); 132 } 133 #endif 134 } 135 136 static inline void interrupt_enter_prepare(struct pt_regs *regs, struct interrupt_state *state) 137 { 138 #ifdef CONFIG_PPC32 139 if (!arch_irq_disabled_regs(regs)) 140 trace_hardirqs_off(); 141 142 if (user_mode(regs)) { 143 kuep_lock(); 144 account_cpu_user_entry(); 145 } else { 146 kuap_save_and_lock(regs); 147 } 148 #endif 149 150 #ifdef CONFIG_PPC64 151 if (irq_soft_mask_set_return(IRQS_ALL_DISABLED) == IRQS_ENABLED) 152 trace_hardirqs_off(); 153 local_paca->irq_happened |= PACA_IRQ_HARD_DIS; 154 155 if (user_mode(regs)) { 156 CT_WARN_ON(ct_state() != CONTEXT_USER); 157 user_exit_irqoff(); 158 159 account_cpu_user_entry(); 160 account_stolen_time(); 161 } else { 162 /* 163 * CT_WARN_ON comes here via program_check_exception, 164 * so avoid recursion. 165 */ 166 if (TRAP(regs) != INTERRUPT_PROGRAM) { 167 CT_WARN_ON(ct_state() != CONTEXT_KERNEL); 168 BUG_ON(is_implicit_soft_masked(regs)); 169 } 170 #ifdef CONFIG_PPC_BOOK3S 171 /* Move this under a debugging check */ 172 if (arch_irq_disabled_regs(regs)) 173 BUG_ON(search_kernel_restart_table(regs->nip)); 174 #endif 175 } 176 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) 177 BUG_ON(!arch_irq_disabled_regs(regs) && !(regs->msr & MSR_EE)); 178 #endif 179 180 booke_restore_dbcr0(); 181 } 182 183 /* 184 * Care should be taken to note that interrupt_exit_prepare and 185 * interrupt_async_exit_prepare do not necessarily return immediately to 186 * regs context (e.g., if regs is usermode, we don't necessarily return to 187 * user mode). Other interrupts might be taken between here and return, 188 * context switch / preemption may occur in the exit path after this, or a 189 * signal may be delivered, etc. 190 * 191 * The real interrupt exit code is platform specific, e.g., 192 * interrupt_exit_user_prepare / interrupt_exit_kernel_prepare for 64s. 193 * 194 * However interrupt_nmi_exit_prepare does return directly to regs, because 195 * NMIs do not do "exit work" or replay soft-masked interrupts. 196 */ 197 static inline void interrupt_exit_prepare(struct pt_regs *regs, struct interrupt_state *state) 198 { 199 } 200 201 static inline void interrupt_async_enter_prepare(struct pt_regs *regs, struct interrupt_state *state) 202 { 203 #ifdef CONFIG_PPC_BOOK3S_64 204 if (cpu_has_feature(CPU_FTR_CTRL) && 205 !test_thread_local_flags(_TLF_RUNLATCH)) 206 __ppc64_runlatch_on(); 207 #endif 208 209 interrupt_enter_prepare(regs, state); 210 irq_enter(); 211 } 212 213 static inline void interrupt_async_exit_prepare(struct pt_regs *regs, struct interrupt_state *state) 214 { 215 /* 216 * Adjust at exit so the main handler sees the true NIA. This must 217 * come before irq_exit() because irq_exit can enable interrupts, and 218 * if another interrupt is taken before nap_adjust_return has run 219 * here, then that interrupt would return directly to idle nap return. 220 */ 221 nap_adjust_return(regs); 222 223 irq_exit(); 224 interrupt_exit_prepare(regs, state); 225 } 226 227 struct interrupt_nmi_state { 228 #ifdef CONFIG_PPC64 229 u8 irq_soft_mask; 230 u8 irq_happened; 231 u8 ftrace_enabled; 232 u64 softe; 233 #endif 234 }; 235 236 static inline bool nmi_disables_ftrace(struct pt_regs *regs) 237 { 238 /* Allow DEC and PMI to be traced when they are soft-NMI */ 239 if (IS_ENABLED(CONFIG_PPC_BOOK3S_64)) { 240 if (TRAP(regs) == INTERRUPT_DECREMENTER) 241 return false; 242 if (TRAP(regs) == INTERRUPT_PERFMON) 243 return false; 244 } 245 if (IS_ENABLED(CONFIG_PPC_BOOK3E)) { 246 if (TRAP(regs) == INTERRUPT_PERFMON) 247 return false; 248 } 249 250 return true; 251 } 252 253 static inline void interrupt_nmi_enter_prepare(struct pt_regs *regs, struct interrupt_nmi_state *state) 254 { 255 #ifdef CONFIG_PPC64 256 state->irq_soft_mask = local_paca->irq_soft_mask; 257 state->irq_happened = local_paca->irq_happened; 258 state->softe = regs->softe; 259 260 /* 261 * Set IRQS_ALL_DISABLED unconditionally so irqs_disabled() does 262 * the right thing, and set IRQ_HARD_DIS. We do not want to reconcile 263 * because that goes through irq tracing which we don't want in NMI. 264 */ 265 local_paca->irq_soft_mask = IRQS_ALL_DISABLED; 266 local_paca->irq_happened |= PACA_IRQ_HARD_DIS; 267 268 if (!(regs->msr & MSR_EE) || is_implicit_soft_masked(regs)) { 269 /* 270 * Adjust regs->softe to be soft-masked if it had not been 271 * reconcied (e.g., interrupt entry with MSR[EE]=0 but softe 272 * not yet set disabled), or if it was in an implicit soft 273 * masked state. This makes arch_irq_disabled_regs(regs) 274 * behave as expected. 275 */ 276 regs->softe = IRQS_ALL_DISABLED; 277 } 278 279 /* Don't do any per-CPU operations until interrupt state is fixed */ 280 281 if (nmi_disables_ftrace(regs)) { 282 state->ftrace_enabled = this_cpu_get_ftrace_enabled(); 283 this_cpu_set_ftrace_enabled(0); 284 } 285 #endif 286 287 /* 288 * Do not use nmi_enter() for pseries hash guest taking a real-mode 289 * NMI because not everything it touches is within the RMA limit. 290 */ 291 if (!IS_ENABLED(CONFIG_PPC_BOOK3S_64) || 292 !firmware_has_feature(FW_FEATURE_LPAR) || 293 radix_enabled() || (mfmsr() & MSR_DR)) 294 nmi_enter(); 295 } 296 297 static inline void interrupt_nmi_exit_prepare(struct pt_regs *regs, struct interrupt_nmi_state *state) 298 { 299 if (!IS_ENABLED(CONFIG_PPC_BOOK3S_64) || 300 !firmware_has_feature(FW_FEATURE_LPAR) || 301 radix_enabled() || (mfmsr() & MSR_DR)) 302 nmi_exit(); 303 304 /* 305 * nmi does not call nap_adjust_return because nmi should not create 306 * new work to do (must use irq_work for that). 307 */ 308 309 #ifdef CONFIG_PPC64 310 #ifdef CONFIG_PPC_BOOK3S 311 if (arch_irq_disabled_regs(regs)) { 312 unsigned long rst = search_kernel_restart_table(regs->nip); 313 if (rst) 314 regs_set_return_ip(regs, rst); 315 } 316 #endif 317 318 if (nmi_disables_ftrace(regs)) 319 this_cpu_set_ftrace_enabled(state->ftrace_enabled); 320 321 /* Check we didn't change the pending interrupt mask. */ 322 WARN_ON_ONCE((state->irq_happened | PACA_IRQ_HARD_DIS) != local_paca->irq_happened); 323 regs->softe = state->softe; 324 local_paca->irq_happened = state->irq_happened; 325 local_paca->irq_soft_mask = state->irq_soft_mask; 326 #endif 327 } 328 329 /* 330 * Don't use noinstr here like x86, but rather add NOKPROBE_SYMBOL to each 331 * function definition. The reason for this is the noinstr section is placed 332 * after the main text section, i.e., very far away from the interrupt entry 333 * asm. That creates problems with fitting linker stubs when building large 334 * kernels. 335 */ 336 #define interrupt_handler __visible noinline notrace __no_kcsan __no_sanitize_address 337 338 /** 339 * DECLARE_INTERRUPT_HANDLER_RAW - Declare raw interrupt handler function 340 * @func: Function name of the entry point 341 * @returns: Returns a value back to asm caller 342 */ 343 #define DECLARE_INTERRUPT_HANDLER_RAW(func) \ 344 __visible long func(struct pt_regs *regs) 345 346 /** 347 * DEFINE_INTERRUPT_HANDLER_RAW - Define raw interrupt handler function 348 * @func: Function name of the entry point 349 * @returns: Returns a value back to asm caller 350 * 351 * @func is called from ASM entry code. 352 * 353 * This is a plain function which does no tracing, reconciling, etc. 354 * The macro is written so it acts as function definition. Append the 355 * body with a pair of curly brackets. 356 * 357 * raw interrupt handlers must not enable or disable interrupts, or 358 * schedule, tracing and instrumentation (ftrace, lockdep, etc) would 359 * not be advisable either, although may be possible in a pinch, the 360 * trace will look odd at least. 361 * 362 * A raw handler may call one of the other interrupt handler functions 363 * to be converted into that interrupt context without these restrictions. 364 * 365 * On PPC64, _RAW handlers may return with fast_interrupt_return. 366 * 367 * Specific handlers may have additional restrictions. 368 */ 369 #define DEFINE_INTERRUPT_HANDLER_RAW(func) \ 370 static __always_inline long ____##func(struct pt_regs *regs); \ 371 \ 372 interrupt_handler long func(struct pt_regs *regs) \ 373 { \ 374 long ret; \ 375 \ 376 ret = ____##func (regs); \ 377 \ 378 return ret; \ 379 } \ 380 NOKPROBE_SYMBOL(func); \ 381 \ 382 static __always_inline long ____##func(struct pt_regs *regs) 383 384 /** 385 * DECLARE_INTERRUPT_HANDLER - Declare synchronous interrupt handler function 386 * @func: Function name of the entry point 387 */ 388 #define DECLARE_INTERRUPT_HANDLER(func) \ 389 __visible void func(struct pt_regs *regs) 390 391 /** 392 * DEFINE_INTERRUPT_HANDLER - Define synchronous interrupt handler function 393 * @func: Function name of the entry point 394 * 395 * @func is called from ASM entry code. 396 * 397 * The macro is written so it acts as function definition. Append the 398 * body with a pair of curly brackets. 399 */ 400 #define DEFINE_INTERRUPT_HANDLER(func) \ 401 static __always_inline void ____##func(struct pt_regs *regs); \ 402 \ 403 interrupt_handler void func(struct pt_regs *regs) \ 404 { \ 405 struct interrupt_state state; \ 406 \ 407 interrupt_enter_prepare(regs, &state); \ 408 \ 409 ____##func (regs); \ 410 \ 411 interrupt_exit_prepare(regs, &state); \ 412 } \ 413 NOKPROBE_SYMBOL(func); \ 414 \ 415 static __always_inline void ____##func(struct pt_regs *regs) 416 417 /** 418 * DECLARE_INTERRUPT_HANDLER_RET - Declare synchronous interrupt handler function 419 * @func: Function name of the entry point 420 * @returns: Returns a value back to asm caller 421 */ 422 #define DECLARE_INTERRUPT_HANDLER_RET(func) \ 423 __visible long func(struct pt_regs *regs) 424 425 /** 426 * DEFINE_INTERRUPT_HANDLER_RET - Define synchronous interrupt handler function 427 * @func: Function name of the entry point 428 * @returns: Returns a value back to asm caller 429 * 430 * @func is called from ASM entry code. 431 * 432 * The macro is written so it acts as function definition. Append the 433 * body with a pair of curly brackets. 434 */ 435 #define DEFINE_INTERRUPT_HANDLER_RET(func) \ 436 static __always_inline long ____##func(struct pt_regs *regs); \ 437 \ 438 interrupt_handler long func(struct pt_regs *regs) \ 439 { \ 440 struct interrupt_state state; \ 441 long ret; \ 442 \ 443 interrupt_enter_prepare(regs, &state); \ 444 \ 445 ret = ____##func (regs); \ 446 \ 447 interrupt_exit_prepare(regs, &state); \ 448 \ 449 return ret; \ 450 } \ 451 NOKPROBE_SYMBOL(func); \ 452 \ 453 static __always_inline long ____##func(struct pt_regs *regs) 454 455 /** 456 * DECLARE_INTERRUPT_HANDLER_ASYNC - Declare asynchronous interrupt handler function 457 * @func: Function name of the entry point 458 */ 459 #define DECLARE_INTERRUPT_HANDLER_ASYNC(func) \ 460 __visible void func(struct pt_regs *regs) 461 462 /** 463 * DEFINE_INTERRUPT_HANDLER_ASYNC - Define asynchronous interrupt handler function 464 * @func: Function name of the entry point 465 * 466 * @func is called from ASM entry code. 467 * 468 * The macro is written so it acts as function definition. Append the 469 * body with a pair of curly brackets. 470 */ 471 #define DEFINE_INTERRUPT_HANDLER_ASYNC(func) \ 472 static __always_inline void ____##func(struct pt_regs *regs); \ 473 \ 474 interrupt_handler void func(struct pt_regs *regs) \ 475 { \ 476 struct interrupt_state state; \ 477 \ 478 interrupt_async_enter_prepare(regs, &state); \ 479 \ 480 ____##func (regs); \ 481 \ 482 interrupt_async_exit_prepare(regs, &state); \ 483 } \ 484 NOKPROBE_SYMBOL(func); \ 485 \ 486 static __always_inline void ____##func(struct pt_regs *regs) 487 488 /** 489 * DECLARE_INTERRUPT_HANDLER_NMI - Declare NMI interrupt handler function 490 * @func: Function name of the entry point 491 * @returns: Returns a value back to asm caller 492 */ 493 #define DECLARE_INTERRUPT_HANDLER_NMI(func) \ 494 __visible long func(struct pt_regs *regs) 495 496 /** 497 * DEFINE_INTERRUPT_HANDLER_NMI - Define NMI interrupt handler function 498 * @func: Function name of the entry point 499 * @returns: Returns a value back to asm caller 500 * 501 * @func is called from ASM entry code. 502 * 503 * The macro is written so it acts as function definition. Append the 504 * body with a pair of curly brackets. 505 */ 506 #define DEFINE_INTERRUPT_HANDLER_NMI(func) \ 507 static __always_inline long ____##func(struct pt_regs *regs); \ 508 \ 509 interrupt_handler long func(struct pt_regs *regs) \ 510 { \ 511 struct interrupt_nmi_state state; \ 512 long ret; \ 513 \ 514 interrupt_nmi_enter_prepare(regs, &state); \ 515 \ 516 ret = ____##func (regs); \ 517 \ 518 interrupt_nmi_exit_prepare(regs, &state); \ 519 \ 520 return ret; \ 521 } \ 522 NOKPROBE_SYMBOL(func); \ 523 \ 524 static __always_inline long ____##func(struct pt_regs *regs) 525 526 527 /* Interrupt handlers */ 528 /* kernel/traps.c */ 529 DECLARE_INTERRUPT_HANDLER_NMI(system_reset_exception); 530 #ifdef CONFIG_PPC_BOOK3S_64 531 DECLARE_INTERRUPT_HANDLER_ASYNC(machine_check_exception_async); 532 #endif 533 DECLARE_INTERRUPT_HANDLER_NMI(machine_check_exception); 534 DECLARE_INTERRUPT_HANDLER(SMIException); 535 DECLARE_INTERRUPT_HANDLER(handle_hmi_exception); 536 DECLARE_INTERRUPT_HANDLER(unknown_exception); 537 DECLARE_INTERRUPT_HANDLER_ASYNC(unknown_async_exception); 538 DECLARE_INTERRUPT_HANDLER_NMI(unknown_nmi_exception); 539 DECLARE_INTERRUPT_HANDLER(instruction_breakpoint_exception); 540 DECLARE_INTERRUPT_HANDLER(RunModeException); 541 DECLARE_INTERRUPT_HANDLER(single_step_exception); 542 DECLARE_INTERRUPT_HANDLER(program_check_exception); 543 DECLARE_INTERRUPT_HANDLER(emulation_assist_interrupt); 544 DECLARE_INTERRUPT_HANDLER(alignment_exception); 545 DECLARE_INTERRUPT_HANDLER(StackOverflow); 546 DECLARE_INTERRUPT_HANDLER(stack_overflow_exception); 547 DECLARE_INTERRUPT_HANDLER(kernel_fp_unavailable_exception); 548 DECLARE_INTERRUPT_HANDLER(altivec_unavailable_exception); 549 DECLARE_INTERRUPT_HANDLER(vsx_unavailable_exception); 550 DECLARE_INTERRUPT_HANDLER(facility_unavailable_exception); 551 DECLARE_INTERRUPT_HANDLER(fp_unavailable_tm); 552 DECLARE_INTERRUPT_HANDLER(altivec_unavailable_tm); 553 DECLARE_INTERRUPT_HANDLER(vsx_unavailable_tm); 554 DECLARE_INTERRUPT_HANDLER_NMI(performance_monitor_exception_nmi); 555 DECLARE_INTERRUPT_HANDLER_ASYNC(performance_monitor_exception_async); 556 DECLARE_INTERRUPT_HANDLER_RAW(performance_monitor_exception); 557 DECLARE_INTERRUPT_HANDLER(DebugException); 558 DECLARE_INTERRUPT_HANDLER(altivec_assist_exception); 559 DECLARE_INTERRUPT_HANDLER(CacheLockingException); 560 DECLARE_INTERRUPT_HANDLER(SPEFloatingPointException); 561 DECLARE_INTERRUPT_HANDLER(SPEFloatingPointRoundException); 562 DECLARE_INTERRUPT_HANDLER_NMI(WatchdogException); 563 DECLARE_INTERRUPT_HANDLER(kernel_bad_stack); 564 565 /* slb.c */ 566 DECLARE_INTERRUPT_HANDLER_RAW(do_slb_fault); 567 DECLARE_INTERRUPT_HANDLER(do_bad_slb_fault); 568 569 /* hash_utils.c */ 570 DECLARE_INTERRUPT_HANDLER_RAW(do_hash_fault); 571 572 /* fault.c */ 573 DECLARE_INTERRUPT_HANDLER(do_page_fault); 574 DECLARE_INTERRUPT_HANDLER(do_bad_page_fault_segv); 575 576 /* process.c */ 577 DECLARE_INTERRUPT_HANDLER(do_break); 578 579 /* time.c */ 580 DECLARE_INTERRUPT_HANDLER_ASYNC(timer_interrupt); 581 582 /* mce.c */ 583 DECLARE_INTERRUPT_HANDLER_NMI(machine_check_early); 584 DECLARE_INTERRUPT_HANDLER_NMI(hmi_exception_realmode); 585 586 DECLARE_INTERRUPT_HANDLER_ASYNC(TAUException); 587 588 /* irq.c */ 589 DECLARE_INTERRUPT_HANDLER_ASYNC(do_IRQ); 590 591 void __noreturn unrecoverable_exception(struct pt_regs *regs); 592 593 void replay_system_reset(void); 594 void replay_soft_interrupts(void); 595 596 static inline void interrupt_cond_local_irq_enable(struct pt_regs *regs) 597 { 598 if (!arch_irq_disabled_regs(regs)) 599 local_irq_enable(); 600 } 601 602 #endif /* __ASSEMBLY__ */ 603 604 #endif /* _ASM_POWERPC_INTERRUPT_H */ 605