1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 #ifndef _ASM_POWERPC_INTERRUPT_H 3 #define _ASM_POWERPC_INTERRUPT_H 4 5 /* BookE/4xx */ 6 #define INTERRUPT_CRITICAL_INPUT 0x100 7 8 /* BookE */ 9 #define INTERRUPT_DEBUG 0xd00 10 #ifdef CONFIG_BOOKE 11 #define INTERRUPT_PERFMON 0x260 12 #define INTERRUPT_DOORBELL 0x280 13 #endif 14 15 /* BookS/4xx/8xx */ 16 #define INTERRUPT_MACHINE_CHECK 0x200 17 18 /* BookS/8xx */ 19 #define INTERRUPT_SYSTEM_RESET 0x100 20 21 /* BookS */ 22 #define INTERRUPT_DATA_SEGMENT 0x380 23 #define INTERRUPT_INST_SEGMENT 0x480 24 #define INTERRUPT_TRACE 0xd00 25 #define INTERRUPT_H_DATA_STORAGE 0xe00 26 #define INTERRUPT_HMI 0xe60 27 #define INTERRUPT_H_FAC_UNAVAIL 0xf80 28 #ifdef CONFIG_PPC_BOOK3S 29 #define INTERRUPT_DOORBELL 0xa00 30 #define INTERRUPT_PERFMON 0xf00 31 #define INTERRUPT_ALTIVEC_UNAVAIL 0xf20 32 #endif 33 34 /* BookE/BookS/4xx/8xx */ 35 #define INTERRUPT_DATA_STORAGE 0x300 36 #define INTERRUPT_INST_STORAGE 0x400 37 #define INTERRUPT_EXTERNAL 0x500 38 #define INTERRUPT_ALIGNMENT 0x600 39 #define INTERRUPT_PROGRAM 0x700 40 #define INTERRUPT_SYSCALL 0xc00 41 #define INTERRUPT_TRACE 0xd00 42 43 /* BookE/BookS/44x */ 44 #define INTERRUPT_FP_UNAVAIL 0x800 45 46 /* BookE/BookS/44x/8xx */ 47 #define INTERRUPT_DECREMENTER 0x900 48 49 #ifndef INTERRUPT_PERFMON 50 #define INTERRUPT_PERFMON 0x0 51 #endif 52 53 /* 8xx */ 54 #define INTERRUPT_SOFT_EMU_8xx 0x1000 55 #define INTERRUPT_INST_TLB_MISS_8xx 0x1100 56 #define INTERRUPT_DATA_TLB_MISS_8xx 0x1200 57 #define INTERRUPT_INST_TLB_ERROR_8xx 0x1300 58 #define INTERRUPT_DATA_TLB_ERROR_8xx 0x1400 59 #define INTERRUPT_DATA_BREAKPOINT_8xx 0x1c00 60 #define INTERRUPT_INST_BREAKPOINT_8xx 0x1d00 61 62 /* 603 */ 63 #define INTERRUPT_INST_TLB_MISS_603 0x1000 64 #define INTERRUPT_DATA_LOAD_TLB_MISS_603 0x1100 65 #define INTERRUPT_DATA_STORE_TLB_MISS_603 0x1200 66 67 #ifndef __ASSEMBLY__ 68 69 #include <linux/context_tracking.h> 70 #include <linux/hardirq.h> 71 #include <asm/cputime.h> 72 #include <asm/ftrace.h> 73 #include <asm/kprobes.h> 74 #include <asm/runlatch.h> 75 76 static inline void nap_adjust_return(struct pt_regs *regs) 77 { 78 #ifdef CONFIG_PPC_970_NAP 79 if (unlikely(test_thread_local_flags(_TLF_NAPPING))) { 80 /* Can avoid a test-and-clear because NMIs do not call this */ 81 clear_thread_local_flags(_TLF_NAPPING); 82 regs->nip = (unsigned long)power4_idle_nap_return; 83 } 84 #endif 85 } 86 87 struct interrupt_state { 88 }; 89 90 static inline void booke_restore_dbcr0(void) 91 { 92 #ifdef CONFIG_PPC_ADV_DEBUG_REGS 93 unsigned long dbcr0 = current->thread.debug.dbcr0; 94 95 if (IS_ENABLED(CONFIG_PPC32) && unlikely(dbcr0 & DBCR0_IDM)) { 96 mtspr(SPRN_DBSR, -1); 97 mtspr(SPRN_DBCR0, global_dbcr0[smp_processor_id()]); 98 } 99 #endif 100 } 101 102 static inline void interrupt_enter_prepare(struct pt_regs *regs, struct interrupt_state *state) 103 { 104 #ifdef CONFIG_PPC32 105 if (!arch_irq_disabled_regs(regs)) 106 trace_hardirqs_off(); 107 108 if (user_mode(regs)) { 109 kuep_lock(); 110 account_cpu_user_entry(); 111 } else { 112 kuap_save_and_lock(regs); 113 } 114 #endif 115 116 #ifdef CONFIG_PPC64 117 if (irq_soft_mask_set_return(IRQS_ALL_DISABLED) == IRQS_ENABLED) 118 trace_hardirqs_off(); 119 local_paca->irq_happened |= PACA_IRQ_HARD_DIS; 120 121 if (user_mode(regs)) { 122 CT_WARN_ON(ct_state() != CONTEXT_USER); 123 user_exit_irqoff(); 124 125 account_cpu_user_entry(); 126 account_stolen_time(); 127 } else { 128 /* 129 * CT_WARN_ON comes here via program_check_exception, 130 * so avoid recursion. 131 */ 132 if (TRAP(regs) != INTERRUPT_PROGRAM) 133 CT_WARN_ON(ct_state() != CONTEXT_KERNEL); 134 } 135 #endif 136 137 booke_restore_dbcr0(); 138 } 139 140 /* 141 * Care should be taken to note that interrupt_exit_prepare and 142 * interrupt_async_exit_prepare do not necessarily return immediately to 143 * regs context (e.g., if regs is usermode, we don't necessarily return to 144 * user mode). Other interrupts might be taken between here and return, 145 * context switch / preemption may occur in the exit path after this, or a 146 * signal may be delivered, etc. 147 * 148 * The real interrupt exit code is platform specific, e.g., 149 * interrupt_exit_user_prepare / interrupt_exit_kernel_prepare for 64s. 150 * 151 * However interrupt_nmi_exit_prepare does return directly to regs, because 152 * NMIs do not do "exit work" or replay soft-masked interrupts. 153 */ 154 static inline void interrupt_exit_prepare(struct pt_regs *regs, struct interrupt_state *state) 155 { 156 } 157 158 static inline void interrupt_async_enter_prepare(struct pt_regs *regs, struct interrupt_state *state) 159 { 160 #ifdef CONFIG_PPC_BOOK3S_64 161 if (cpu_has_feature(CPU_FTR_CTRL) && 162 !test_thread_local_flags(_TLF_RUNLATCH)) 163 __ppc64_runlatch_on(); 164 #endif 165 166 interrupt_enter_prepare(regs, state); 167 irq_enter(); 168 } 169 170 static inline void interrupt_async_exit_prepare(struct pt_regs *regs, struct interrupt_state *state) 171 { 172 /* 173 * Adjust at exit so the main handler sees the true NIA. This must 174 * come before irq_exit() because irq_exit can enable interrupts, and 175 * if another interrupt is taken before nap_adjust_return has run 176 * here, then that interrupt would return directly to idle nap return. 177 */ 178 nap_adjust_return(regs); 179 180 irq_exit(); 181 interrupt_exit_prepare(regs, state); 182 } 183 184 struct interrupt_nmi_state { 185 #ifdef CONFIG_PPC64 186 u8 irq_soft_mask; 187 u8 irq_happened; 188 u8 ftrace_enabled; 189 #endif 190 }; 191 192 static inline bool nmi_disables_ftrace(struct pt_regs *regs) 193 { 194 /* Allow DEC and PMI to be traced when they are soft-NMI */ 195 if (IS_ENABLED(CONFIG_PPC_BOOK3S_64)) { 196 if (TRAP(regs) == INTERRUPT_DECREMENTER) 197 return false; 198 if (TRAP(regs) == INTERRUPT_PERFMON) 199 return false; 200 } 201 if (IS_ENABLED(CONFIG_PPC_BOOK3E)) { 202 if (TRAP(regs) == INTERRUPT_PERFMON) 203 return false; 204 } 205 206 return true; 207 } 208 209 static inline void interrupt_nmi_enter_prepare(struct pt_regs *regs, struct interrupt_nmi_state *state) 210 { 211 #ifdef CONFIG_PPC64 212 state->irq_soft_mask = local_paca->irq_soft_mask; 213 state->irq_happened = local_paca->irq_happened; 214 215 /* 216 * Set IRQS_ALL_DISABLED unconditionally so irqs_disabled() does 217 * the right thing, and set IRQ_HARD_DIS. We do not want to reconcile 218 * because that goes through irq tracing which we don't want in NMI. 219 */ 220 local_paca->irq_soft_mask = IRQS_ALL_DISABLED; 221 local_paca->irq_happened |= PACA_IRQ_HARD_DIS; 222 223 if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && !(regs->msr & MSR_PR) && 224 regs->nip < (unsigned long)__end_interrupts) { 225 // Kernel code running below __end_interrupts is 226 // implicitly soft-masked. 227 regs->softe = IRQS_ALL_DISABLED; 228 } 229 230 /* Don't do any per-CPU operations until interrupt state is fixed */ 231 232 if (nmi_disables_ftrace(regs)) { 233 state->ftrace_enabled = this_cpu_get_ftrace_enabled(); 234 this_cpu_set_ftrace_enabled(0); 235 } 236 #endif 237 238 /* 239 * Do not use nmi_enter() for pseries hash guest taking a real-mode 240 * NMI because not everything it touches is within the RMA limit. 241 */ 242 if (!IS_ENABLED(CONFIG_PPC_BOOK3S_64) || 243 !firmware_has_feature(FW_FEATURE_LPAR) || 244 radix_enabled() || (mfmsr() & MSR_DR)) 245 nmi_enter(); 246 } 247 248 static inline void interrupt_nmi_exit_prepare(struct pt_regs *regs, struct interrupt_nmi_state *state) 249 { 250 if (!IS_ENABLED(CONFIG_PPC_BOOK3S_64) || 251 !firmware_has_feature(FW_FEATURE_LPAR) || 252 radix_enabled() || (mfmsr() & MSR_DR)) 253 nmi_exit(); 254 255 /* 256 * nmi does not call nap_adjust_return because nmi should not create 257 * new work to do (must use irq_work for that). 258 */ 259 260 #ifdef CONFIG_PPC64 261 if (nmi_disables_ftrace(regs)) 262 this_cpu_set_ftrace_enabled(state->ftrace_enabled); 263 264 /* Check we didn't change the pending interrupt mask. */ 265 WARN_ON_ONCE((state->irq_happened | PACA_IRQ_HARD_DIS) != local_paca->irq_happened); 266 local_paca->irq_happened = state->irq_happened; 267 local_paca->irq_soft_mask = state->irq_soft_mask; 268 #endif 269 } 270 271 /* 272 * Don't use noinstr here like x86, but rather add NOKPROBE_SYMBOL to each 273 * function definition. The reason for this is the noinstr section is placed 274 * after the main text section, i.e., very far away from the interrupt entry 275 * asm. That creates problems with fitting linker stubs when building large 276 * kernels. 277 */ 278 #define interrupt_handler __visible noinline notrace __no_kcsan __no_sanitize_address 279 280 /** 281 * DECLARE_INTERRUPT_HANDLER_RAW - Declare raw interrupt handler function 282 * @func: Function name of the entry point 283 * @returns: Returns a value back to asm caller 284 */ 285 #define DECLARE_INTERRUPT_HANDLER_RAW(func) \ 286 __visible long func(struct pt_regs *regs) 287 288 /** 289 * DEFINE_INTERRUPT_HANDLER_RAW - Define raw interrupt handler function 290 * @func: Function name of the entry point 291 * @returns: Returns a value back to asm caller 292 * 293 * @func is called from ASM entry code. 294 * 295 * This is a plain function which does no tracing, reconciling, etc. 296 * The macro is written so it acts as function definition. Append the 297 * body with a pair of curly brackets. 298 * 299 * raw interrupt handlers must not enable or disable interrupts, or 300 * schedule, tracing and instrumentation (ftrace, lockdep, etc) would 301 * not be advisable either, although may be possible in a pinch, the 302 * trace will look odd at least. 303 * 304 * A raw handler may call one of the other interrupt handler functions 305 * to be converted into that interrupt context without these restrictions. 306 * 307 * On PPC64, _RAW handlers may return with fast_interrupt_return. 308 * 309 * Specific handlers may have additional restrictions. 310 */ 311 #define DEFINE_INTERRUPT_HANDLER_RAW(func) \ 312 static __always_inline long ____##func(struct pt_regs *regs); \ 313 \ 314 interrupt_handler long func(struct pt_regs *regs) \ 315 { \ 316 long ret; \ 317 \ 318 ret = ____##func (regs); \ 319 \ 320 return ret; \ 321 } \ 322 NOKPROBE_SYMBOL(func); \ 323 \ 324 static __always_inline long ____##func(struct pt_regs *regs) 325 326 /** 327 * DECLARE_INTERRUPT_HANDLER - Declare synchronous interrupt handler function 328 * @func: Function name of the entry point 329 */ 330 #define DECLARE_INTERRUPT_HANDLER(func) \ 331 __visible void func(struct pt_regs *regs) 332 333 /** 334 * DEFINE_INTERRUPT_HANDLER - Define synchronous interrupt handler function 335 * @func: Function name of the entry point 336 * 337 * @func is called from ASM entry code. 338 * 339 * The macro is written so it acts as function definition. Append the 340 * body with a pair of curly brackets. 341 */ 342 #define DEFINE_INTERRUPT_HANDLER(func) \ 343 static __always_inline void ____##func(struct pt_regs *regs); \ 344 \ 345 interrupt_handler void func(struct pt_regs *regs) \ 346 { \ 347 struct interrupt_state state; \ 348 \ 349 interrupt_enter_prepare(regs, &state); \ 350 \ 351 ____##func (regs); \ 352 \ 353 interrupt_exit_prepare(regs, &state); \ 354 } \ 355 NOKPROBE_SYMBOL(func); \ 356 \ 357 static __always_inline void ____##func(struct pt_regs *regs) 358 359 /** 360 * DECLARE_INTERRUPT_HANDLER_RET - Declare synchronous interrupt handler function 361 * @func: Function name of the entry point 362 * @returns: Returns a value back to asm caller 363 */ 364 #define DECLARE_INTERRUPT_HANDLER_RET(func) \ 365 __visible long func(struct pt_regs *regs) 366 367 /** 368 * DEFINE_INTERRUPT_HANDLER_RET - Define synchronous interrupt handler function 369 * @func: Function name of the entry point 370 * @returns: Returns a value back to asm caller 371 * 372 * @func is called from ASM entry code. 373 * 374 * The macro is written so it acts as function definition. Append the 375 * body with a pair of curly brackets. 376 */ 377 #define DEFINE_INTERRUPT_HANDLER_RET(func) \ 378 static __always_inline long ____##func(struct pt_regs *regs); \ 379 \ 380 interrupt_handler long func(struct pt_regs *regs) \ 381 { \ 382 struct interrupt_state state; \ 383 long ret; \ 384 \ 385 interrupt_enter_prepare(regs, &state); \ 386 \ 387 ret = ____##func (regs); \ 388 \ 389 interrupt_exit_prepare(regs, &state); \ 390 \ 391 return ret; \ 392 } \ 393 NOKPROBE_SYMBOL(func); \ 394 \ 395 static __always_inline long ____##func(struct pt_regs *regs) 396 397 /** 398 * DECLARE_INTERRUPT_HANDLER_ASYNC - Declare asynchronous interrupt handler function 399 * @func: Function name of the entry point 400 */ 401 #define DECLARE_INTERRUPT_HANDLER_ASYNC(func) \ 402 __visible void func(struct pt_regs *regs) 403 404 /** 405 * DEFINE_INTERRUPT_HANDLER_ASYNC - Define asynchronous interrupt handler function 406 * @func: Function name of the entry point 407 * 408 * @func is called from ASM entry code. 409 * 410 * The macro is written so it acts as function definition. Append the 411 * body with a pair of curly brackets. 412 */ 413 #define DEFINE_INTERRUPT_HANDLER_ASYNC(func) \ 414 static __always_inline void ____##func(struct pt_regs *regs); \ 415 \ 416 interrupt_handler void func(struct pt_regs *regs) \ 417 { \ 418 struct interrupt_state state; \ 419 \ 420 interrupt_async_enter_prepare(regs, &state); \ 421 \ 422 ____##func (regs); \ 423 \ 424 interrupt_async_exit_prepare(regs, &state); \ 425 } \ 426 NOKPROBE_SYMBOL(func); \ 427 \ 428 static __always_inline void ____##func(struct pt_regs *regs) 429 430 /** 431 * DECLARE_INTERRUPT_HANDLER_NMI - Declare NMI interrupt handler function 432 * @func: Function name of the entry point 433 * @returns: Returns a value back to asm caller 434 */ 435 #define DECLARE_INTERRUPT_HANDLER_NMI(func) \ 436 __visible long func(struct pt_regs *regs) 437 438 /** 439 * DEFINE_INTERRUPT_HANDLER_NMI - Define NMI interrupt handler function 440 * @func: Function name of the entry point 441 * @returns: Returns a value back to asm caller 442 * 443 * @func is called from ASM entry code. 444 * 445 * The macro is written so it acts as function definition. Append the 446 * body with a pair of curly brackets. 447 */ 448 #define DEFINE_INTERRUPT_HANDLER_NMI(func) \ 449 static __always_inline long ____##func(struct pt_regs *regs); \ 450 \ 451 interrupt_handler long func(struct pt_regs *regs) \ 452 { \ 453 struct interrupt_nmi_state state; \ 454 long ret; \ 455 \ 456 interrupt_nmi_enter_prepare(regs, &state); \ 457 \ 458 ret = ____##func (regs); \ 459 \ 460 interrupt_nmi_exit_prepare(regs, &state); \ 461 \ 462 return ret; \ 463 } \ 464 NOKPROBE_SYMBOL(func); \ 465 \ 466 static __always_inline long ____##func(struct pt_regs *regs) 467 468 469 /* Interrupt handlers */ 470 /* kernel/traps.c */ 471 DECLARE_INTERRUPT_HANDLER_NMI(system_reset_exception); 472 #ifdef CONFIG_PPC_BOOK3S_64 473 DECLARE_INTERRUPT_HANDLER_ASYNC(machine_check_exception); 474 #else 475 DECLARE_INTERRUPT_HANDLER_NMI(machine_check_exception); 476 #endif 477 DECLARE_INTERRUPT_HANDLER(SMIException); 478 DECLARE_INTERRUPT_HANDLER(handle_hmi_exception); 479 DECLARE_INTERRUPT_HANDLER(unknown_exception); 480 DECLARE_INTERRUPT_HANDLER_ASYNC(unknown_async_exception); 481 DECLARE_INTERRUPT_HANDLER_NMI(unknown_nmi_exception); 482 DECLARE_INTERRUPT_HANDLER(instruction_breakpoint_exception); 483 DECLARE_INTERRUPT_HANDLER(RunModeException); 484 DECLARE_INTERRUPT_HANDLER(single_step_exception); 485 DECLARE_INTERRUPT_HANDLER(program_check_exception); 486 DECLARE_INTERRUPT_HANDLER(emulation_assist_interrupt); 487 DECLARE_INTERRUPT_HANDLER(alignment_exception); 488 DECLARE_INTERRUPT_HANDLER(StackOverflow); 489 DECLARE_INTERRUPT_HANDLER(stack_overflow_exception); 490 DECLARE_INTERRUPT_HANDLER(kernel_fp_unavailable_exception); 491 DECLARE_INTERRUPT_HANDLER(altivec_unavailable_exception); 492 DECLARE_INTERRUPT_HANDLER(vsx_unavailable_exception); 493 DECLARE_INTERRUPT_HANDLER(facility_unavailable_exception); 494 DECLARE_INTERRUPT_HANDLER(fp_unavailable_tm); 495 DECLARE_INTERRUPT_HANDLER(altivec_unavailable_tm); 496 DECLARE_INTERRUPT_HANDLER(vsx_unavailable_tm); 497 DECLARE_INTERRUPT_HANDLER_NMI(performance_monitor_exception_nmi); 498 DECLARE_INTERRUPT_HANDLER_ASYNC(performance_monitor_exception_async); 499 DECLARE_INTERRUPT_HANDLER_RAW(performance_monitor_exception); 500 DECLARE_INTERRUPT_HANDLER(DebugException); 501 DECLARE_INTERRUPT_HANDLER(altivec_assist_exception); 502 DECLARE_INTERRUPT_HANDLER(CacheLockingException); 503 DECLARE_INTERRUPT_HANDLER(SPEFloatingPointException); 504 DECLARE_INTERRUPT_HANDLER(SPEFloatingPointRoundException); 505 DECLARE_INTERRUPT_HANDLER_NMI(WatchdogException); 506 DECLARE_INTERRUPT_HANDLER(kernel_bad_stack); 507 508 /* slb.c */ 509 DECLARE_INTERRUPT_HANDLER_RAW(do_slb_fault); 510 DECLARE_INTERRUPT_HANDLER(do_bad_slb_fault); 511 512 /* hash_utils.c */ 513 DECLARE_INTERRUPT_HANDLER_RAW(do_hash_fault); 514 515 /* fault.c */ 516 DECLARE_INTERRUPT_HANDLER(do_page_fault); 517 DECLARE_INTERRUPT_HANDLER(do_bad_page_fault_segv); 518 519 /* process.c */ 520 DECLARE_INTERRUPT_HANDLER(do_break); 521 522 /* time.c */ 523 DECLARE_INTERRUPT_HANDLER_ASYNC(timer_interrupt); 524 525 /* mce.c */ 526 DECLARE_INTERRUPT_HANDLER_NMI(machine_check_early); 527 DECLARE_INTERRUPT_HANDLER_NMI(hmi_exception_realmode); 528 529 DECLARE_INTERRUPT_HANDLER_ASYNC(TAUException); 530 531 void __noreturn unrecoverable_exception(struct pt_regs *regs); 532 533 void replay_system_reset(void); 534 void replay_soft_interrupts(void); 535 536 static inline void interrupt_cond_local_irq_enable(struct pt_regs *regs) 537 { 538 if (!arch_irq_disabled_regs(regs)) 539 local_irq_enable(); 540 } 541 542 #endif /* __ASSEMBLY__ */ 543 544 #endif /* _ASM_POWERPC_INTERRUPT_H */ 545