1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu> 4 */ 5 #ifndef _ASM_POWERPC_HW_IRQ_H 6 #define _ASM_POWERPC_HW_IRQ_H 7 8 #ifdef __KERNEL__ 9 10 #include <linux/errno.h> 11 #include <linux/compiler.h> 12 #include <asm/ptrace.h> 13 #include <asm/processor.h> 14 15 #ifdef CONFIG_PPC64 16 17 /* 18 * PACA flags in paca->irq_happened. 19 * 20 * This bits are set when interrupts occur while soft-disabled 21 * and allow a proper replay. 22 * 23 * The PACA_IRQ_HARD_DIS is set whenever we hard disable. It is almost 24 * always in synch with the MSR[EE] state, except: 25 * - A window in interrupt entry, where hardware disables MSR[EE] and that 26 * must be "reconciled" with the soft mask state. 27 * - NMI interrupts that hit in awkward places, until they fix the state. 28 * - When local irqs are being enabled and state is being fixed up. 29 * - When returning from an interrupt there are some windows where this 30 * can become out of synch, but gets fixed before the RFI or before 31 * executing the next user instruction (see arch/powerpc/kernel/interrupt.c). 32 */ 33 #define PACA_IRQ_HARD_DIS 0x01 34 #define PACA_IRQ_DBELL 0x02 35 #define PACA_IRQ_EE 0x04 36 #define PACA_IRQ_DEC 0x08 /* Or FIT */ 37 #define PACA_IRQ_HMI 0x10 38 #define PACA_IRQ_PMI 0x20 39 40 /* 41 * Some soft-masked interrupts must be hard masked until they are replayed 42 * (e.g., because the soft-masked handler does not clear the exception). 43 */ 44 #ifdef CONFIG_PPC_BOOK3S 45 #define PACA_IRQ_MUST_HARD_MASK (PACA_IRQ_EE|PACA_IRQ_PMI) 46 #else 47 #define PACA_IRQ_MUST_HARD_MASK (PACA_IRQ_EE) 48 #endif 49 50 #endif /* CONFIG_PPC64 */ 51 52 /* 53 * flags for paca->irq_soft_mask 54 */ 55 #define IRQS_ENABLED 0 56 #define IRQS_DISABLED 1 /* local_irq_disable() interrupts */ 57 #define IRQS_PMI_DISABLED 2 58 #define IRQS_ALL_DISABLED (IRQS_DISABLED | IRQS_PMI_DISABLED) 59 60 #ifndef __ASSEMBLY__ 61 62 static inline void __hard_irq_enable(void) 63 { 64 if (IS_ENABLED(CONFIG_BOOKE_OR_40x)) 65 wrtee(MSR_EE); 66 else if (IS_ENABLED(CONFIG_PPC_8xx)) 67 wrtspr(SPRN_EIE); 68 else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64)) 69 __mtmsrd(MSR_EE | MSR_RI, 1); 70 else 71 mtmsr(mfmsr() | MSR_EE); 72 } 73 74 static inline void __hard_irq_disable(void) 75 { 76 if (IS_ENABLED(CONFIG_BOOKE_OR_40x)) 77 wrtee(0); 78 else if (IS_ENABLED(CONFIG_PPC_8xx)) 79 wrtspr(SPRN_EID); 80 else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64)) 81 __mtmsrd(MSR_RI, 1); 82 else 83 mtmsr(mfmsr() & ~MSR_EE); 84 } 85 86 static inline void __hard_EE_RI_disable(void) 87 { 88 if (IS_ENABLED(CONFIG_BOOKE_OR_40x)) 89 wrtee(0); 90 else if (IS_ENABLED(CONFIG_PPC_8xx)) 91 wrtspr(SPRN_NRI); 92 else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64)) 93 __mtmsrd(0, 1); 94 else 95 mtmsr(mfmsr() & ~(MSR_EE | MSR_RI)); 96 } 97 98 static inline void __hard_RI_enable(void) 99 { 100 if (IS_ENABLED(CONFIG_BOOKE_OR_40x)) 101 return; 102 103 if (IS_ENABLED(CONFIG_PPC_8xx)) 104 wrtspr(SPRN_EID); 105 else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64)) 106 __mtmsrd(MSR_RI, 1); 107 else 108 mtmsr(mfmsr() | MSR_RI); 109 } 110 111 #ifdef CONFIG_PPC64 112 #include <asm/paca.h> 113 114 static inline notrace unsigned long irq_soft_mask_return(void) 115 { 116 unsigned long flags; 117 118 asm volatile( 119 "lbz %0,%1(13)" 120 : "=r" (flags) 121 : "i" (offsetof(struct paca_struct, irq_soft_mask))); 122 123 return flags; 124 } 125 126 /* 127 * The "memory" clobber acts as both a compiler barrier 128 * for the critical section and as a clobber because 129 * we changed paca->irq_soft_mask 130 */ 131 static inline notrace void irq_soft_mask_set(unsigned long mask) 132 { 133 /* 134 * The irq mask must always include the STD bit if any are set. 135 * 136 * and interrupts don't get replayed until the standard 137 * interrupt (local_irq_disable()) is unmasked. 138 * 139 * Other masks must only provide additional masking beyond 140 * the standard, and they are also not replayed until the 141 * standard interrupt becomes unmasked. 142 * 143 * This could be changed, but it will require partial 144 * unmasks to be replayed, among other things. For now, take 145 * the simple approach. 146 */ 147 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) 148 WARN_ON(mask && !(mask & IRQS_DISABLED)); 149 150 asm volatile( 151 "stb %0,%1(13)" 152 : 153 : "r" (mask), 154 "i" (offsetof(struct paca_struct, irq_soft_mask)) 155 : "memory"); 156 } 157 158 static inline notrace unsigned long irq_soft_mask_set_return(unsigned long mask) 159 { 160 unsigned long flags = irq_soft_mask_return(); 161 162 irq_soft_mask_set(mask); 163 164 return flags; 165 } 166 167 static inline notrace unsigned long irq_soft_mask_or_return(unsigned long mask) 168 { 169 unsigned long flags = irq_soft_mask_return(); 170 171 irq_soft_mask_set(flags | mask); 172 173 return flags; 174 } 175 176 static inline unsigned long arch_local_save_flags(void) 177 { 178 return irq_soft_mask_return(); 179 } 180 181 static inline void arch_local_irq_disable(void) 182 { 183 irq_soft_mask_set(IRQS_DISABLED); 184 } 185 186 extern void arch_local_irq_restore(unsigned long); 187 188 static inline void arch_local_irq_enable(void) 189 { 190 arch_local_irq_restore(IRQS_ENABLED); 191 } 192 193 static inline unsigned long arch_local_irq_save(void) 194 { 195 return irq_soft_mask_set_return(IRQS_DISABLED); 196 } 197 198 static inline bool arch_irqs_disabled_flags(unsigned long flags) 199 { 200 return flags & IRQS_DISABLED; 201 } 202 203 static inline bool arch_irqs_disabled(void) 204 { 205 return arch_irqs_disabled_flags(arch_local_save_flags()); 206 } 207 208 static inline void set_pmi_irq_pending(void) 209 { 210 /* 211 * Invoked from PMU callback functions to set PMI bit in the paca. 212 * This has to be called with irq's disabled (via hard_irq_disable()). 213 */ 214 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) 215 WARN_ON_ONCE(mfmsr() & MSR_EE); 216 217 get_paca()->irq_happened |= PACA_IRQ_PMI; 218 } 219 220 static inline void clear_pmi_irq_pending(void) 221 { 222 /* 223 * Invoked from PMU callback functions to clear the pending PMI bit 224 * in the paca. 225 */ 226 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) 227 WARN_ON_ONCE(mfmsr() & MSR_EE); 228 229 get_paca()->irq_happened &= ~PACA_IRQ_PMI; 230 } 231 232 static inline bool pmi_irq_pending(void) 233 { 234 /* 235 * Invoked from PMU callback functions to check if there is a pending 236 * PMI bit in the paca. 237 */ 238 if (get_paca()->irq_happened & PACA_IRQ_PMI) 239 return true; 240 241 return false; 242 } 243 244 #ifdef CONFIG_PPC_BOOK3S 245 /* 246 * To support disabling and enabling of irq with PMI, set of 247 * new powerpc_local_irq_pmu_save() and powerpc_local_irq_restore() 248 * functions are added. These macros are implemented using generic 249 * linux local_irq_* code from include/linux/irqflags.h. 250 */ 251 #define raw_local_irq_pmu_save(flags) \ 252 do { \ 253 typecheck(unsigned long, flags); \ 254 flags = irq_soft_mask_or_return(IRQS_DISABLED | \ 255 IRQS_PMI_DISABLED); \ 256 } while(0) 257 258 #define raw_local_irq_pmu_restore(flags) \ 259 do { \ 260 typecheck(unsigned long, flags); \ 261 arch_local_irq_restore(flags); \ 262 } while(0) 263 264 #ifdef CONFIG_TRACE_IRQFLAGS 265 #define powerpc_local_irq_pmu_save(flags) \ 266 do { \ 267 raw_local_irq_pmu_save(flags); \ 268 if (!raw_irqs_disabled_flags(flags)) \ 269 trace_hardirqs_off(); \ 270 } while(0) 271 #define powerpc_local_irq_pmu_restore(flags) \ 272 do { \ 273 if (!raw_irqs_disabled_flags(flags)) \ 274 trace_hardirqs_on(); \ 275 raw_local_irq_pmu_restore(flags); \ 276 } while(0) 277 #else 278 #define powerpc_local_irq_pmu_save(flags) \ 279 do { \ 280 raw_local_irq_pmu_save(flags); \ 281 } while(0) 282 #define powerpc_local_irq_pmu_restore(flags) \ 283 do { \ 284 raw_local_irq_pmu_restore(flags); \ 285 } while (0) 286 #endif /* CONFIG_TRACE_IRQFLAGS */ 287 288 #endif /* CONFIG_PPC_BOOK3S */ 289 290 #define hard_irq_disable() do { \ 291 unsigned long flags; \ 292 __hard_irq_disable(); \ 293 flags = irq_soft_mask_set_return(IRQS_ALL_DISABLED); \ 294 local_paca->irq_happened |= PACA_IRQ_HARD_DIS; \ 295 if (!arch_irqs_disabled_flags(flags)) { \ 296 asm volatile("std%X0 %1,%0" : "=m" (local_paca->saved_r1) \ 297 : "r" (current_stack_pointer)); \ 298 trace_hardirqs_off(); \ 299 } \ 300 } while(0) 301 302 static inline bool __lazy_irq_pending(u8 irq_happened) 303 { 304 return !!(irq_happened & ~PACA_IRQ_HARD_DIS); 305 } 306 307 /* 308 * Check if a lazy IRQ is pending. Should be called with IRQs hard disabled. 309 */ 310 static inline bool lazy_irq_pending(void) 311 { 312 return __lazy_irq_pending(get_paca()->irq_happened); 313 } 314 315 /* 316 * Check if a lazy IRQ is pending, with no debugging checks. 317 * Should be called with IRQs hard disabled. 318 * For use in RI disabled code or other constrained situations. 319 */ 320 static inline bool lazy_irq_pending_nocheck(void) 321 { 322 return __lazy_irq_pending(local_paca->irq_happened); 323 } 324 325 bool power_pmu_wants_prompt_pmi(void); 326 327 /* 328 * This is called by asynchronous interrupts to check whether to 329 * conditionally re-enable hard interrupts after having cleared 330 * the source of the interrupt. They are kept disabled if there 331 * is a different soft-masked interrupt pending that requires hard 332 * masking. 333 */ 334 static inline bool should_hard_irq_enable(void) 335 { 336 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) { 337 WARN_ON(irq_soft_mask_return() == IRQS_ENABLED); 338 WARN_ON(mfmsr() & MSR_EE); 339 } 340 341 if (!IS_ENABLED(CONFIG_PERF_EVENTS)) 342 return false; 343 /* 344 * If the PMU is not running, there is not much reason to enable 345 * MSR[EE] in irq handlers because any interrupts would just be 346 * soft-masked. 347 * 348 * TODO: Add test for 64e 349 */ 350 if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && !power_pmu_wants_prompt_pmi()) 351 return false; 352 353 if (get_paca()->irq_happened & PACA_IRQ_MUST_HARD_MASK) 354 return false; 355 356 return true; 357 } 358 359 /* 360 * Do the hard enabling, only call this if should_hard_irq_enable is true. 361 */ 362 static inline void do_hard_irq_enable(void) 363 { 364 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) { 365 WARN_ON(irq_soft_mask_return() == IRQS_ENABLED); 366 WARN_ON(get_paca()->irq_happened & PACA_IRQ_MUST_HARD_MASK); 367 WARN_ON(mfmsr() & MSR_EE); 368 } 369 /* 370 * This allows PMI interrupts (and watchdog soft-NMIs) through. 371 * There is no other reason to enable this way. 372 */ 373 get_paca()->irq_happened &= ~PACA_IRQ_HARD_DIS; 374 __hard_irq_enable(); 375 } 376 377 static inline bool arch_irq_disabled_regs(struct pt_regs *regs) 378 { 379 return (regs->softe & IRQS_DISABLED); 380 } 381 382 extern bool prep_irq_for_idle(void); 383 extern bool prep_irq_for_idle_irqsoff(void); 384 extern void irq_set_pending_from_srr1(unsigned long srr1); 385 386 #define fini_irq_for_idle_irqsoff() trace_hardirqs_off(); 387 388 extern void force_external_irq_replay(void); 389 390 static inline void irq_soft_mask_regs_set_state(struct pt_regs *regs, unsigned long val) 391 { 392 regs->softe = val; 393 } 394 #else /* CONFIG_PPC64 */ 395 396 static inline notrace unsigned long irq_soft_mask_return(void) 397 { 398 return 0; 399 } 400 401 static inline unsigned long arch_local_save_flags(void) 402 { 403 return mfmsr(); 404 } 405 406 static inline void arch_local_irq_restore(unsigned long flags) 407 { 408 if (IS_ENABLED(CONFIG_BOOKE)) 409 wrtee(flags); 410 else 411 mtmsr(flags); 412 } 413 414 static inline unsigned long arch_local_irq_save(void) 415 { 416 unsigned long flags = arch_local_save_flags(); 417 418 if (IS_ENABLED(CONFIG_BOOKE)) 419 wrtee(0); 420 else if (IS_ENABLED(CONFIG_PPC_8xx)) 421 wrtspr(SPRN_EID); 422 else 423 mtmsr(flags & ~MSR_EE); 424 425 return flags; 426 } 427 428 static inline void arch_local_irq_disable(void) 429 { 430 __hard_irq_disable(); 431 } 432 433 static inline void arch_local_irq_enable(void) 434 { 435 __hard_irq_enable(); 436 } 437 438 static inline bool arch_irqs_disabled_flags(unsigned long flags) 439 { 440 return (flags & MSR_EE) == 0; 441 } 442 443 static inline bool arch_irqs_disabled(void) 444 { 445 return arch_irqs_disabled_flags(arch_local_save_flags()); 446 } 447 448 #define hard_irq_disable() arch_local_irq_disable() 449 450 static inline bool arch_irq_disabled_regs(struct pt_regs *regs) 451 { 452 return !(regs->msr & MSR_EE); 453 } 454 455 static __always_inline bool should_hard_irq_enable(void) 456 { 457 return false; 458 } 459 460 static inline void do_hard_irq_enable(void) 461 { 462 BUILD_BUG(); 463 } 464 465 static inline void clear_pmi_irq_pending(void) { } 466 static inline void set_pmi_irq_pending(void) { } 467 static inline bool pmi_irq_pending(void) { return false; } 468 469 static inline void irq_soft_mask_regs_set_state(struct pt_regs *regs, unsigned long val) 470 { 471 } 472 #endif /* CONFIG_PPC64 */ 473 474 static inline unsigned long mtmsr_isync_irqsafe(unsigned long msr) 475 { 476 #ifdef CONFIG_PPC64 477 if (arch_irqs_disabled()) { 478 /* 479 * With soft-masking, MSR[EE] can change from 1 to 0 480 * asynchronously when irqs are disabled, and we don't want to 481 * set MSR[EE] back to 1 here if that has happened. A race-free 482 * way to do this is ensure EE is already 0. Another way it 483 * could be done is with a RESTART_TABLE handler, but that's 484 * probably overkill here. 485 */ 486 msr &= ~MSR_EE; 487 mtmsr_isync(msr); 488 irq_soft_mask_set(IRQS_ALL_DISABLED); 489 local_paca->irq_happened |= PACA_IRQ_HARD_DIS; 490 } else 491 #endif 492 mtmsr_isync(msr); 493 494 return msr; 495 } 496 497 498 #define ARCH_IRQ_INIT_FLAGS IRQ_NOREQUEST 499 500 #endif /* __ASSEMBLY__ */ 501 #endif /* __KERNEL__ */ 502 #endif /* _ASM_POWERPC_HW_IRQ_H */ 503