1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu> 4 */ 5 #ifndef _ASM_POWERPC_HW_IRQ_H 6 #define _ASM_POWERPC_HW_IRQ_H 7 8 #ifdef __KERNEL__ 9 10 #include <linux/errno.h> 11 #include <linux/compiler.h> 12 #include <asm/ptrace.h> 13 #include <asm/processor.h> 14 15 #ifdef CONFIG_PPC64 16 17 /* 18 * PACA flags in paca->irq_happened. 19 * 20 * This bits are set when interrupts occur while soft-disabled 21 * and allow a proper replay. Additionally, PACA_IRQ_HARD_DIS 22 * is set whenever we manually hard disable. 23 */ 24 #define PACA_IRQ_HARD_DIS 0x01 25 #define PACA_IRQ_DBELL 0x02 26 #define PACA_IRQ_EE 0x04 27 #define PACA_IRQ_DEC 0x08 /* Or FIT */ 28 #define PACA_IRQ_EE_EDGE 0x10 /* BookE only */ 29 #define PACA_IRQ_HMI 0x20 30 #define PACA_IRQ_PMI 0x40 31 32 /* 33 * Some soft-masked interrupts must be hard masked until they are replayed 34 * (e.g., because the soft-masked handler does not clear the exception). 35 */ 36 #ifdef CONFIG_PPC_BOOK3S 37 #define PACA_IRQ_MUST_HARD_MASK (PACA_IRQ_EE|PACA_IRQ_PMI) 38 #else 39 #define PACA_IRQ_MUST_HARD_MASK (PACA_IRQ_EE) 40 #endif 41 42 /* 43 * flags for paca->irq_soft_mask 44 */ 45 #define IRQS_ENABLED 0 46 #define IRQS_DISABLED 1 /* local_irq_disable() interrupts */ 47 #define IRQS_PMI_DISABLED 2 48 #define IRQS_ALL_DISABLED (IRQS_DISABLED | IRQS_PMI_DISABLED) 49 50 #endif /* CONFIG_PPC64 */ 51 52 #ifndef __ASSEMBLY__ 53 54 extern void replay_system_reset(void); 55 extern void __replay_interrupt(unsigned int vector); 56 57 extern void timer_interrupt(struct pt_regs *); 58 extern void performance_monitor_exception(struct pt_regs *regs); 59 extern void WatchdogException(struct pt_regs *regs); 60 extern void unknown_exception(struct pt_regs *regs); 61 62 #ifdef CONFIG_PPC64 63 #include <asm/paca.h> 64 65 static inline notrace unsigned long irq_soft_mask_return(void) 66 { 67 unsigned long flags; 68 69 asm volatile( 70 "lbz %0,%1(13)" 71 : "=r" (flags) 72 : "i" (offsetof(struct paca_struct, irq_soft_mask))); 73 74 return flags; 75 } 76 77 /* 78 * The "memory" clobber acts as both a compiler barrier 79 * for the critical section and as a clobber because 80 * we changed paca->irq_soft_mask 81 */ 82 static inline notrace void irq_soft_mask_set(unsigned long mask) 83 { 84 #ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG 85 /* 86 * The irq mask must always include the STD bit if any are set. 87 * 88 * and interrupts don't get replayed until the standard 89 * interrupt (local_irq_disable()) is unmasked. 90 * 91 * Other masks must only provide additional masking beyond 92 * the standard, and they are also not replayed until the 93 * standard interrupt becomes unmasked. 94 * 95 * This could be changed, but it will require partial 96 * unmasks to be replayed, among other things. For now, take 97 * the simple approach. 98 */ 99 WARN_ON(mask && !(mask & IRQS_DISABLED)); 100 #endif 101 102 asm volatile( 103 "stb %0,%1(13)" 104 : 105 : "r" (mask), 106 "i" (offsetof(struct paca_struct, irq_soft_mask)) 107 : "memory"); 108 } 109 110 static inline notrace unsigned long irq_soft_mask_set_return(unsigned long mask) 111 { 112 unsigned long flags; 113 114 #ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG 115 WARN_ON(mask && !(mask & IRQS_DISABLED)); 116 #endif 117 118 asm volatile( 119 "lbz %0,%1(13); stb %2,%1(13)" 120 : "=&r" (flags) 121 : "i" (offsetof(struct paca_struct, irq_soft_mask)), 122 "r" (mask) 123 : "memory"); 124 125 return flags; 126 } 127 128 static inline notrace unsigned long irq_soft_mask_or_return(unsigned long mask) 129 { 130 unsigned long flags, tmp; 131 132 asm volatile( 133 "lbz %0,%2(13); or %1,%0,%3; stb %1,%2(13)" 134 : "=&r" (flags), "=r" (tmp) 135 : "i" (offsetof(struct paca_struct, irq_soft_mask)), 136 "r" (mask) 137 : "memory"); 138 139 #ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG 140 WARN_ON((mask | flags) && !((mask | flags) & IRQS_DISABLED)); 141 #endif 142 143 return flags; 144 } 145 146 static inline unsigned long arch_local_save_flags(void) 147 { 148 return irq_soft_mask_return(); 149 } 150 151 static inline void arch_local_irq_disable(void) 152 { 153 irq_soft_mask_set(IRQS_DISABLED); 154 } 155 156 extern void arch_local_irq_restore(unsigned long); 157 158 static inline void arch_local_irq_enable(void) 159 { 160 arch_local_irq_restore(IRQS_ENABLED); 161 } 162 163 static inline unsigned long arch_local_irq_save(void) 164 { 165 return irq_soft_mask_set_return(IRQS_DISABLED); 166 } 167 168 static inline bool arch_irqs_disabled_flags(unsigned long flags) 169 { 170 return flags & IRQS_DISABLED; 171 } 172 173 static inline bool arch_irqs_disabled(void) 174 { 175 return arch_irqs_disabled_flags(arch_local_save_flags()); 176 } 177 178 #ifdef CONFIG_PPC_BOOK3S 179 /* 180 * To support disabling and enabling of irq with PMI, set of 181 * new powerpc_local_irq_pmu_save() and powerpc_local_irq_restore() 182 * functions are added. These macros are implemented using generic 183 * linux local_irq_* code from include/linux/irqflags.h. 184 */ 185 #define raw_local_irq_pmu_save(flags) \ 186 do { \ 187 typecheck(unsigned long, flags); \ 188 flags = irq_soft_mask_or_return(IRQS_DISABLED | \ 189 IRQS_PMI_DISABLED); \ 190 } while(0) 191 192 #define raw_local_irq_pmu_restore(flags) \ 193 do { \ 194 typecheck(unsigned long, flags); \ 195 arch_local_irq_restore(flags); \ 196 } while(0) 197 198 #ifdef CONFIG_TRACE_IRQFLAGS 199 #define powerpc_local_irq_pmu_save(flags) \ 200 do { \ 201 raw_local_irq_pmu_save(flags); \ 202 trace_hardirqs_off(); \ 203 } while(0) 204 #define powerpc_local_irq_pmu_restore(flags) \ 205 do { \ 206 if (raw_irqs_disabled_flags(flags)) { \ 207 raw_local_irq_pmu_restore(flags); \ 208 trace_hardirqs_off(); \ 209 } else { \ 210 trace_hardirqs_on(); \ 211 raw_local_irq_pmu_restore(flags); \ 212 } \ 213 } while(0) 214 #else 215 #define powerpc_local_irq_pmu_save(flags) \ 216 do { \ 217 raw_local_irq_pmu_save(flags); \ 218 } while(0) 219 #define powerpc_local_irq_pmu_restore(flags) \ 220 do { \ 221 raw_local_irq_pmu_restore(flags); \ 222 } while (0) 223 #endif /* CONFIG_TRACE_IRQFLAGS */ 224 225 #endif /* CONFIG_PPC_BOOK3S */ 226 227 #ifdef CONFIG_PPC_BOOK3E 228 #define __hard_irq_enable() asm volatile("wrteei 1" : : : "memory") 229 #define __hard_irq_disable() asm volatile("wrteei 0" : : : "memory") 230 #else 231 #define __hard_irq_enable() __mtmsrd(local_paca->kernel_msr | MSR_EE, 1) 232 #define __hard_irq_disable() __mtmsrd(local_paca->kernel_msr, 1) 233 #endif 234 235 #define hard_irq_disable() do { \ 236 unsigned long flags; \ 237 __hard_irq_disable(); \ 238 flags = irq_soft_mask_set_return(IRQS_ALL_DISABLED); \ 239 local_paca->irq_happened |= PACA_IRQ_HARD_DIS; \ 240 if (!arch_irqs_disabled_flags(flags)) \ 241 trace_hardirqs_off(); \ 242 } while(0) 243 244 static inline bool lazy_irq_pending(void) 245 { 246 return !!(get_paca()->irq_happened & ~PACA_IRQ_HARD_DIS); 247 } 248 249 /* 250 * This is called by asynchronous interrupts to conditionally 251 * re-enable hard interrupts when soft-disabled after having 252 * cleared the source of the interrupt 253 */ 254 static inline void may_hard_irq_enable(void) 255 { 256 get_paca()->irq_happened &= ~PACA_IRQ_HARD_DIS; 257 if (!(get_paca()->irq_happened & PACA_IRQ_MUST_HARD_MASK)) 258 __hard_irq_enable(); 259 } 260 261 static inline bool arch_irq_disabled_regs(struct pt_regs *regs) 262 { 263 return (regs->softe & IRQS_DISABLED); 264 } 265 266 extern bool prep_irq_for_idle(void); 267 extern bool prep_irq_for_idle_irqsoff(void); 268 extern void irq_set_pending_from_srr1(unsigned long srr1); 269 270 #define fini_irq_for_idle_irqsoff() trace_hardirqs_off(); 271 272 extern void force_external_irq_replay(void); 273 274 #else /* CONFIG_PPC64 */ 275 276 #define SET_MSR_EE(x) mtmsr(x) 277 278 static inline unsigned long arch_local_save_flags(void) 279 { 280 return mfmsr(); 281 } 282 283 static inline void arch_local_irq_restore(unsigned long flags) 284 { 285 #if defined(CONFIG_BOOKE) 286 asm volatile("wrtee %0" : : "r" (flags) : "memory"); 287 #else 288 mtmsr(flags); 289 #endif 290 } 291 292 static inline unsigned long arch_local_irq_save(void) 293 { 294 unsigned long flags = arch_local_save_flags(); 295 #ifdef CONFIG_BOOKE 296 asm volatile("wrteei 0" : : : "memory"); 297 #elif defined(CONFIG_PPC_8xx) 298 wrtspr(SPRN_EID); 299 #else 300 SET_MSR_EE(flags & ~MSR_EE); 301 #endif 302 return flags; 303 } 304 305 static inline void arch_local_irq_disable(void) 306 { 307 #ifdef CONFIG_BOOKE 308 asm volatile("wrteei 0" : : : "memory"); 309 #elif defined(CONFIG_PPC_8xx) 310 wrtspr(SPRN_EID); 311 #else 312 arch_local_irq_save(); 313 #endif 314 } 315 316 static inline void arch_local_irq_enable(void) 317 { 318 #ifdef CONFIG_BOOKE 319 asm volatile("wrteei 1" : : : "memory"); 320 #elif defined(CONFIG_PPC_8xx) 321 wrtspr(SPRN_EIE); 322 #else 323 unsigned long msr = mfmsr(); 324 SET_MSR_EE(msr | MSR_EE); 325 #endif 326 } 327 328 static inline bool arch_irqs_disabled_flags(unsigned long flags) 329 { 330 return (flags & MSR_EE) == 0; 331 } 332 333 static inline bool arch_irqs_disabled(void) 334 { 335 return arch_irqs_disabled_flags(arch_local_save_flags()); 336 } 337 338 #define hard_irq_disable() arch_local_irq_disable() 339 340 static inline bool arch_irq_disabled_regs(struct pt_regs *regs) 341 { 342 return !(regs->msr & MSR_EE); 343 } 344 345 static inline void may_hard_irq_enable(void) { } 346 347 #endif /* CONFIG_PPC64 */ 348 349 #define ARCH_IRQ_INIT_FLAGS IRQ_NOREQUEST 350 351 /* 352 * interrupt-retrigger: should we handle this via lost interrupts and IPIs 353 * or should we not care like we do now ? --BenH. 354 */ 355 struct irq_chip; 356 357 #endif /* __ASSEMBLY__ */ 358 #endif /* __KERNEL__ */ 359 #endif /* _ASM_POWERPC_HW_IRQ_H */ 360