1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu> 4 */ 5 #ifndef _ASM_POWERPC_HW_IRQ_H 6 #define _ASM_POWERPC_HW_IRQ_H 7 8 #ifdef __KERNEL__ 9 10 #include <linux/errno.h> 11 #include <linux/compiler.h> 12 #include <asm/ptrace.h> 13 #include <asm/processor.h> 14 15 #ifdef CONFIG_PPC64 16 17 /* 18 * PACA flags in paca->irq_happened. 19 * 20 * This bits are set when interrupts occur while soft-disabled 21 * and allow a proper replay. 22 * 23 * The PACA_IRQ_HARD_DIS is set whenever we hard disable. It is almost 24 * always in synch with the MSR[EE] state, except: 25 * - A window in interrupt entry, where hardware disables MSR[EE] and that 26 * must be "reconciled" with the soft mask state. 27 * - NMI interrupts that hit in awkward places, until they fix the state. 28 * - When local irqs are being enabled and state is being fixed up. 29 * - When returning from an interrupt there are some windows where this 30 * can become out of synch, but gets fixed before the RFI or before 31 * executing the next user instruction (see arch/powerpc/kernel/interrupt.c). 32 */ 33 #define PACA_IRQ_HARD_DIS 0x01 34 #define PACA_IRQ_DBELL 0x02 35 #define PACA_IRQ_EE 0x04 36 #define PACA_IRQ_DEC 0x08 /* Or FIT */ 37 #define PACA_IRQ_HMI 0x10 38 #define PACA_IRQ_PMI 0x20 39 40 /* 41 * Some soft-masked interrupts must be hard masked until they are replayed 42 * (e.g., because the soft-masked handler does not clear the exception). 43 */ 44 #ifdef CONFIG_PPC_BOOK3S 45 #define PACA_IRQ_MUST_HARD_MASK (PACA_IRQ_EE|PACA_IRQ_PMI) 46 #else 47 #define PACA_IRQ_MUST_HARD_MASK (PACA_IRQ_EE) 48 #endif 49 50 #endif /* CONFIG_PPC64 */ 51 52 /* 53 * flags for paca->irq_soft_mask 54 */ 55 #define IRQS_ENABLED 0 56 #define IRQS_DISABLED 1 /* local_irq_disable() interrupts */ 57 #define IRQS_PMI_DISABLED 2 58 #define IRQS_ALL_DISABLED (IRQS_DISABLED | IRQS_PMI_DISABLED) 59 60 #ifndef __ASSEMBLY__ 61 62 static inline void __hard_irq_enable(void) 63 { 64 if (IS_ENABLED(CONFIG_BOOKE_OR_40x)) 65 wrtee(MSR_EE); 66 else if (IS_ENABLED(CONFIG_PPC_8xx)) 67 wrtspr(SPRN_EIE); 68 else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64)) 69 __mtmsrd(MSR_EE | MSR_RI, 1); 70 else 71 mtmsr(mfmsr() | MSR_EE); 72 } 73 74 static inline void __hard_irq_disable(void) 75 { 76 if (IS_ENABLED(CONFIG_BOOKE_OR_40x)) 77 wrtee(0); 78 else if (IS_ENABLED(CONFIG_PPC_8xx)) 79 wrtspr(SPRN_EID); 80 else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64)) 81 __mtmsrd(MSR_RI, 1); 82 else 83 mtmsr(mfmsr() & ~MSR_EE); 84 } 85 86 static inline void __hard_EE_RI_disable(void) 87 { 88 if (IS_ENABLED(CONFIG_BOOKE_OR_40x)) 89 wrtee(0); 90 else if (IS_ENABLED(CONFIG_PPC_8xx)) 91 wrtspr(SPRN_NRI); 92 else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64)) 93 __mtmsrd(0, 1); 94 else 95 mtmsr(mfmsr() & ~(MSR_EE | MSR_RI)); 96 } 97 98 static inline void __hard_RI_enable(void) 99 { 100 if (IS_ENABLED(CONFIG_BOOKE_OR_40x)) 101 return; 102 103 if (IS_ENABLED(CONFIG_PPC_8xx)) 104 wrtspr(SPRN_EID); 105 else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64)) 106 __mtmsrd(MSR_RI, 1); 107 else 108 mtmsr(mfmsr() | MSR_RI); 109 } 110 111 #ifdef CONFIG_PPC64 112 #include <asm/paca.h> 113 114 static inline notrace unsigned long irq_soft_mask_return(void) 115 { 116 return READ_ONCE(local_paca->irq_soft_mask); 117 } 118 119 /* 120 * The "memory" clobber acts as both a compiler barrier 121 * for the critical section and as a clobber because 122 * we changed paca->irq_soft_mask 123 */ 124 static inline notrace void irq_soft_mask_set(unsigned long mask) 125 { 126 /* 127 * The irq mask must always include the STD bit if any are set. 128 * 129 * and interrupts don't get replayed until the standard 130 * interrupt (local_irq_disable()) is unmasked. 131 * 132 * Other masks must only provide additional masking beyond 133 * the standard, and they are also not replayed until the 134 * standard interrupt becomes unmasked. 135 * 136 * This could be changed, but it will require partial 137 * unmasks to be replayed, among other things. For now, take 138 * the simple approach. 139 */ 140 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) 141 WARN_ON(mask && !(mask & IRQS_DISABLED)); 142 143 WRITE_ONCE(local_paca->irq_soft_mask, mask); 144 barrier(); 145 } 146 147 static inline notrace unsigned long irq_soft_mask_set_return(unsigned long mask) 148 { 149 unsigned long flags = irq_soft_mask_return(); 150 151 irq_soft_mask_set(mask); 152 153 return flags; 154 } 155 156 static inline notrace unsigned long irq_soft_mask_or_return(unsigned long mask) 157 { 158 unsigned long flags = irq_soft_mask_return(); 159 160 irq_soft_mask_set(flags | mask); 161 162 return flags; 163 } 164 165 static inline unsigned long arch_local_save_flags(void) 166 { 167 return irq_soft_mask_return(); 168 } 169 170 static inline void arch_local_irq_disable(void) 171 { 172 irq_soft_mask_set(IRQS_DISABLED); 173 } 174 175 extern void arch_local_irq_restore(unsigned long); 176 177 static inline void arch_local_irq_enable(void) 178 { 179 arch_local_irq_restore(IRQS_ENABLED); 180 } 181 182 static inline unsigned long arch_local_irq_save(void) 183 { 184 return irq_soft_mask_set_return(IRQS_DISABLED); 185 } 186 187 static inline bool arch_irqs_disabled_flags(unsigned long flags) 188 { 189 return flags & IRQS_DISABLED; 190 } 191 192 static inline bool arch_irqs_disabled(void) 193 { 194 return arch_irqs_disabled_flags(arch_local_save_flags()); 195 } 196 197 static inline void set_pmi_irq_pending(void) 198 { 199 /* 200 * Invoked from PMU callback functions to set PMI bit in the paca. 201 * This has to be called with irq's disabled (via hard_irq_disable()). 202 */ 203 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) 204 WARN_ON_ONCE(mfmsr() & MSR_EE); 205 206 get_paca()->irq_happened |= PACA_IRQ_PMI; 207 } 208 209 static inline void clear_pmi_irq_pending(void) 210 { 211 /* 212 * Invoked from PMU callback functions to clear the pending PMI bit 213 * in the paca. 214 */ 215 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) 216 WARN_ON_ONCE(mfmsr() & MSR_EE); 217 218 get_paca()->irq_happened &= ~PACA_IRQ_PMI; 219 } 220 221 static inline bool pmi_irq_pending(void) 222 { 223 /* 224 * Invoked from PMU callback functions to check if there is a pending 225 * PMI bit in the paca. 226 */ 227 if (get_paca()->irq_happened & PACA_IRQ_PMI) 228 return true; 229 230 return false; 231 } 232 233 #ifdef CONFIG_PPC_BOOK3S 234 /* 235 * To support disabling and enabling of irq with PMI, set of 236 * new powerpc_local_irq_pmu_save() and powerpc_local_irq_restore() 237 * functions are added. These macros are implemented using generic 238 * linux local_irq_* code from include/linux/irqflags.h. 239 */ 240 #define raw_local_irq_pmu_save(flags) \ 241 do { \ 242 typecheck(unsigned long, flags); \ 243 flags = irq_soft_mask_or_return(IRQS_DISABLED | \ 244 IRQS_PMI_DISABLED); \ 245 } while(0) 246 247 #define raw_local_irq_pmu_restore(flags) \ 248 do { \ 249 typecheck(unsigned long, flags); \ 250 arch_local_irq_restore(flags); \ 251 } while(0) 252 253 #ifdef CONFIG_TRACE_IRQFLAGS 254 #define powerpc_local_irq_pmu_save(flags) \ 255 do { \ 256 raw_local_irq_pmu_save(flags); \ 257 if (!raw_irqs_disabled_flags(flags)) \ 258 trace_hardirqs_off(); \ 259 } while(0) 260 #define powerpc_local_irq_pmu_restore(flags) \ 261 do { \ 262 if (!raw_irqs_disabled_flags(flags)) \ 263 trace_hardirqs_on(); \ 264 raw_local_irq_pmu_restore(flags); \ 265 } while(0) 266 #else 267 #define powerpc_local_irq_pmu_save(flags) \ 268 do { \ 269 raw_local_irq_pmu_save(flags); \ 270 } while(0) 271 #define powerpc_local_irq_pmu_restore(flags) \ 272 do { \ 273 raw_local_irq_pmu_restore(flags); \ 274 } while (0) 275 #endif /* CONFIG_TRACE_IRQFLAGS */ 276 277 #endif /* CONFIG_PPC_BOOK3S */ 278 279 #define hard_irq_disable() do { \ 280 unsigned long flags; \ 281 __hard_irq_disable(); \ 282 flags = irq_soft_mask_set_return(IRQS_ALL_DISABLED); \ 283 local_paca->irq_happened |= PACA_IRQ_HARD_DIS; \ 284 if (!arch_irqs_disabled_flags(flags)) { \ 285 WRITE_ONCE(local_paca->saved_r1, current_stack_pointer);\ 286 trace_hardirqs_off(); \ 287 } \ 288 } while(0) 289 290 static inline bool __lazy_irq_pending(u8 irq_happened) 291 { 292 return !!(irq_happened & ~PACA_IRQ_HARD_DIS); 293 } 294 295 /* 296 * Check if a lazy IRQ is pending. Should be called with IRQs hard disabled. 297 */ 298 static inline bool lazy_irq_pending(void) 299 { 300 return __lazy_irq_pending(get_paca()->irq_happened); 301 } 302 303 /* 304 * Check if a lazy IRQ is pending, with no debugging checks. 305 * Should be called with IRQs hard disabled. 306 * For use in RI disabled code or other constrained situations. 307 */ 308 static inline bool lazy_irq_pending_nocheck(void) 309 { 310 return __lazy_irq_pending(local_paca->irq_happened); 311 } 312 313 bool power_pmu_wants_prompt_pmi(void); 314 315 /* 316 * This is called by asynchronous interrupts to check whether to 317 * conditionally re-enable hard interrupts after having cleared 318 * the source of the interrupt. They are kept disabled if there 319 * is a different soft-masked interrupt pending that requires hard 320 * masking. 321 */ 322 static inline bool should_hard_irq_enable(void) 323 { 324 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) { 325 WARN_ON(irq_soft_mask_return() == IRQS_ENABLED); 326 WARN_ON(mfmsr() & MSR_EE); 327 } 328 329 if (!IS_ENABLED(CONFIG_PERF_EVENTS)) 330 return false; 331 /* 332 * If the PMU is not running, there is not much reason to enable 333 * MSR[EE] in irq handlers because any interrupts would just be 334 * soft-masked. 335 * 336 * TODO: Add test for 64e 337 */ 338 if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && !power_pmu_wants_prompt_pmi()) 339 return false; 340 341 if (get_paca()->irq_happened & PACA_IRQ_MUST_HARD_MASK) 342 return false; 343 344 return true; 345 } 346 347 /* 348 * Do the hard enabling, only call this if should_hard_irq_enable is true. 349 */ 350 static inline void do_hard_irq_enable(void) 351 { 352 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) { 353 WARN_ON(irq_soft_mask_return() == IRQS_ENABLED); 354 WARN_ON(get_paca()->irq_happened & PACA_IRQ_MUST_HARD_MASK); 355 WARN_ON(mfmsr() & MSR_EE); 356 } 357 /* 358 * This allows PMI interrupts (and watchdog soft-NMIs) through. 359 * There is no other reason to enable this way. 360 */ 361 get_paca()->irq_happened &= ~PACA_IRQ_HARD_DIS; 362 __hard_irq_enable(); 363 } 364 365 static inline bool arch_irq_disabled_regs(struct pt_regs *regs) 366 { 367 return (regs->softe & IRQS_DISABLED); 368 } 369 370 extern bool prep_irq_for_idle(void); 371 extern bool prep_irq_for_idle_irqsoff(void); 372 extern void irq_set_pending_from_srr1(unsigned long srr1); 373 374 #define fini_irq_for_idle_irqsoff() trace_hardirqs_off(); 375 376 extern void force_external_irq_replay(void); 377 378 static inline void irq_soft_mask_regs_set_state(struct pt_regs *regs, unsigned long val) 379 { 380 regs->softe = val; 381 } 382 #else /* CONFIG_PPC64 */ 383 384 static inline notrace unsigned long irq_soft_mask_return(void) 385 { 386 return 0; 387 } 388 389 static inline unsigned long arch_local_save_flags(void) 390 { 391 return mfmsr(); 392 } 393 394 static inline void arch_local_irq_restore(unsigned long flags) 395 { 396 if (IS_ENABLED(CONFIG_BOOKE)) 397 wrtee(flags); 398 else 399 mtmsr(flags); 400 } 401 402 static inline unsigned long arch_local_irq_save(void) 403 { 404 unsigned long flags = arch_local_save_flags(); 405 406 if (IS_ENABLED(CONFIG_BOOKE)) 407 wrtee(0); 408 else if (IS_ENABLED(CONFIG_PPC_8xx)) 409 wrtspr(SPRN_EID); 410 else 411 mtmsr(flags & ~MSR_EE); 412 413 return flags; 414 } 415 416 static inline void arch_local_irq_disable(void) 417 { 418 __hard_irq_disable(); 419 } 420 421 static inline void arch_local_irq_enable(void) 422 { 423 __hard_irq_enable(); 424 } 425 426 static inline bool arch_irqs_disabled_flags(unsigned long flags) 427 { 428 return (flags & MSR_EE) == 0; 429 } 430 431 static inline bool arch_irqs_disabled(void) 432 { 433 return arch_irqs_disabled_flags(arch_local_save_flags()); 434 } 435 436 #define hard_irq_disable() arch_local_irq_disable() 437 438 static inline bool arch_irq_disabled_regs(struct pt_regs *regs) 439 { 440 return !(regs->msr & MSR_EE); 441 } 442 443 static __always_inline bool should_hard_irq_enable(void) 444 { 445 return false; 446 } 447 448 static inline void do_hard_irq_enable(void) 449 { 450 BUILD_BUG(); 451 } 452 453 static inline void clear_pmi_irq_pending(void) { } 454 static inline void set_pmi_irq_pending(void) { } 455 static inline bool pmi_irq_pending(void) { return false; } 456 457 static inline void irq_soft_mask_regs_set_state(struct pt_regs *regs, unsigned long val) 458 { 459 } 460 #endif /* CONFIG_PPC64 */ 461 462 #define ARCH_IRQ_INIT_FLAGS IRQ_NOREQUEST 463 464 #endif /* __ASSEMBLY__ */ 465 #endif /* __KERNEL__ */ 466 #endif /* _ASM_POWERPC_HW_IRQ_H */ 467