xref: /openbmc/linux/arch/powerpc/include/asm/hw_irq.h (revision 3a83e4e6)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
4  */
5 #ifndef _ASM_POWERPC_HW_IRQ_H
6 #define _ASM_POWERPC_HW_IRQ_H
7 
8 #ifdef __KERNEL__
9 
10 #include <linux/errno.h>
11 #include <linux/compiler.h>
12 #include <asm/ptrace.h>
13 #include <asm/processor.h>
14 
15 #ifdef CONFIG_PPC64
16 
17 /*
18  * PACA flags in paca->irq_happened.
19  *
20  * This bits are set when interrupts occur while soft-disabled
21  * and allow a proper replay. Additionally, PACA_IRQ_HARD_DIS
22  * is set whenever we manually hard disable.
23  */
24 #define PACA_IRQ_HARD_DIS	0x01
25 #define PACA_IRQ_DBELL		0x02
26 #define PACA_IRQ_EE		0x04
27 #define PACA_IRQ_DEC		0x08 /* Or FIT */
28 #define PACA_IRQ_EE_EDGE	0x10 /* BookE only */
29 #define PACA_IRQ_HMI		0x20
30 #define PACA_IRQ_PMI		0x40
31 
32 /*
33  * Some soft-masked interrupts must be hard masked until they are replayed
34  * (e.g., because the soft-masked handler does not clear the exception).
35  */
36 #ifdef CONFIG_PPC_BOOK3S
37 #define PACA_IRQ_MUST_HARD_MASK	(PACA_IRQ_EE|PACA_IRQ_PMI)
38 #else
39 #define PACA_IRQ_MUST_HARD_MASK	(PACA_IRQ_EE)
40 #endif
41 
42 /*
43  * flags for paca->irq_soft_mask
44  */
45 #define IRQS_ENABLED		0
46 #define IRQS_DISABLED		1 /* local_irq_disable() interrupts */
47 #define IRQS_PMI_DISABLED	2
48 #define IRQS_ALL_DISABLED	(IRQS_DISABLED | IRQS_PMI_DISABLED)
49 
50 #endif /* CONFIG_PPC64 */
51 
52 #ifndef __ASSEMBLY__
53 
54 extern void replay_system_reset(void);
55 extern void replay_soft_interrupts(void);
56 
57 extern void timer_interrupt(struct pt_regs *);
58 extern void timer_broadcast_interrupt(void);
59 extern void performance_monitor_exception(struct pt_regs *regs);
60 extern void WatchdogException(struct pt_regs *regs);
61 extern void unknown_exception(struct pt_regs *regs);
62 
63 #ifdef CONFIG_PPC64
64 #include <asm/paca.h>
65 
66 static inline notrace unsigned long irq_soft_mask_return(void)
67 {
68 	unsigned long flags;
69 
70 	asm volatile(
71 		"lbz %0,%1(13)"
72 		: "=r" (flags)
73 		: "i" (offsetof(struct paca_struct, irq_soft_mask)));
74 
75 	return flags;
76 }
77 
78 /*
79  * The "memory" clobber acts as both a compiler barrier
80  * for the critical section and as a clobber because
81  * we changed paca->irq_soft_mask
82  */
83 static inline notrace void irq_soft_mask_set(unsigned long mask)
84 {
85 #ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
86 	/*
87 	 * The irq mask must always include the STD bit if any are set.
88 	 *
89 	 * and interrupts don't get replayed until the standard
90 	 * interrupt (local_irq_disable()) is unmasked.
91 	 *
92 	 * Other masks must only provide additional masking beyond
93 	 * the standard, and they are also not replayed until the
94 	 * standard interrupt becomes unmasked.
95 	 *
96 	 * This could be changed, but it will require partial
97 	 * unmasks to be replayed, among other things. For now, take
98 	 * the simple approach.
99 	 */
100 	WARN_ON(mask && !(mask & IRQS_DISABLED));
101 #endif
102 
103 	asm volatile(
104 		"stb %0,%1(13)"
105 		:
106 		: "r" (mask),
107 		  "i" (offsetof(struct paca_struct, irq_soft_mask))
108 		: "memory");
109 }
110 
111 static inline notrace unsigned long irq_soft_mask_set_return(unsigned long mask)
112 {
113 	unsigned long flags;
114 
115 #ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
116 	WARN_ON(mask && !(mask & IRQS_DISABLED));
117 #endif
118 
119 	asm volatile(
120 		"lbz %0,%1(13); stb %2,%1(13)"
121 		: "=&r" (flags)
122 		: "i" (offsetof(struct paca_struct, irq_soft_mask)),
123 		  "r" (mask)
124 		: "memory");
125 
126 	return flags;
127 }
128 
129 static inline notrace unsigned long irq_soft_mask_or_return(unsigned long mask)
130 {
131 	unsigned long flags, tmp;
132 
133 	asm volatile(
134 		"lbz %0,%2(13); or %1,%0,%3; stb %1,%2(13)"
135 		: "=&r" (flags), "=r" (tmp)
136 		: "i" (offsetof(struct paca_struct, irq_soft_mask)),
137 		  "r" (mask)
138 		: "memory");
139 
140 #ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
141 	WARN_ON((mask | flags) && !((mask | flags) & IRQS_DISABLED));
142 #endif
143 
144 	return flags;
145 }
146 
147 static inline unsigned long arch_local_save_flags(void)
148 {
149 	return irq_soft_mask_return();
150 }
151 
152 static inline void arch_local_irq_disable(void)
153 {
154 	irq_soft_mask_set(IRQS_DISABLED);
155 }
156 
157 extern void arch_local_irq_restore(unsigned long);
158 
159 static inline void arch_local_irq_enable(void)
160 {
161 	arch_local_irq_restore(IRQS_ENABLED);
162 }
163 
164 static inline unsigned long arch_local_irq_save(void)
165 {
166 	return irq_soft_mask_set_return(IRQS_DISABLED);
167 }
168 
169 static inline bool arch_irqs_disabled_flags(unsigned long flags)
170 {
171 	return flags & IRQS_DISABLED;
172 }
173 
174 static inline bool arch_irqs_disabled(void)
175 {
176 	return arch_irqs_disabled_flags(arch_local_save_flags());
177 }
178 
179 #ifdef CONFIG_PPC_BOOK3S
180 /*
181  * To support disabling and enabling of irq with PMI, set of
182  * new powerpc_local_irq_pmu_save() and powerpc_local_irq_restore()
183  * functions are added. These macros are implemented using generic
184  * linux local_irq_* code from include/linux/irqflags.h.
185  */
186 #define raw_local_irq_pmu_save(flags)					\
187 	do {								\
188 		typecheck(unsigned long, flags);			\
189 		flags = irq_soft_mask_or_return(IRQS_DISABLED |	\
190 				IRQS_PMI_DISABLED);			\
191 	} while(0)
192 
193 #define raw_local_irq_pmu_restore(flags)				\
194 	do {								\
195 		typecheck(unsigned long, flags);			\
196 		arch_local_irq_restore(flags);				\
197 	} while(0)
198 
199 #ifdef CONFIG_TRACE_IRQFLAGS
200 #define powerpc_local_irq_pmu_save(flags)			\
201 	 do {							\
202 		raw_local_irq_pmu_save(flags);			\
203 		if (!raw_irqs_disabled_flags(flags))		\
204 			trace_hardirqs_off();			\
205 	} while(0)
206 #define powerpc_local_irq_pmu_restore(flags)			\
207 	do {							\
208 		if (!raw_irqs_disabled_flags(flags))		\
209 			trace_hardirqs_on();			\
210 		raw_local_irq_pmu_restore(flags);		\
211 	} while(0)
212 #else
213 #define powerpc_local_irq_pmu_save(flags)			\
214 	do {							\
215 		raw_local_irq_pmu_save(flags);			\
216 	} while(0)
217 #define powerpc_local_irq_pmu_restore(flags)			\
218 	do {							\
219 		raw_local_irq_pmu_restore(flags);		\
220 	} while (0)
221 #endif  /* CONFIG_TRACE_IRQFLAGS */
222 
223 #endif /* CONFIG_PPC_BOOK3S */
224 
225 #ifdef CONFIG_PPC_BOOK3E
226 #define __hard_irq_enable()	wrtee(MSR_EE)
227 #define __hard_irq_disable()	wrtee(0)
228 #define __hard_EE_RI_disable()	wrtee(0)
229 #define __hard_RI_enable()	do { } while (0)
230 #else
231 #define __hard_irq_enable()	__mtmsrd(MSR_EE|MSR_RI, 1)
232 #define __hard_irq_disable()	__mtmsrd(MSR_RI, 1)
233 #define __hard_EE_RI_disable()	__mtmsrd(0, 1)
234 #define __hard_RI_enable()	__mtmsrd(MSR_RI, 1)
235 #endif
236 
237 #define hard_irq_disable()	do {					\
238 	unsigned long flags;						\
239 	__hard_irq_disable();						\
240 	flags = irq_soft_mask_set_return(IRQS_ALL_DISABLED);		\
241 	local_paca->irq_happened |= PACA_IRQ_HARD_DIS;			\
242 	if (!arch_irqs_disabled_flags(flags)) {				\
243 		asm ("stdx %%r1, 0, %1 ;"				\
244 		     : "=m" (local_paca->saved_r1)			\
245 		     : "b" (&local_paca->saved_r1));			\
246 		trace_hardirqs_off();					\
247 	}								\
248 } while(0)
249 
250 static inline bool __lazy_irq_pending(u8 irq_happened)
251 {
252 	return !!(irq_happened & ~PACA_IRQ_HARD_DIS);
253 }
254 
255 /*
256  * Check if a lazy IRQ is pending. Should be called with IRQs hard disabled.
257  */
258 static inline bool lazy_irq_pending(void)
259 {
260 	return __lazy_irq_pending(get_paca()->irq_happened);
261 }
262 
263 /*
264  * Check if a lazy IRQ is pending, with no debugging checks.
265  * Should be called with IRQs hard disabled.
266  * For use in RI disabled code or other constrained situations.
267  */
268 static inline bool lazy_irq_pending_nocheck(void)
269 {
270 	return __lazy_irq_pending(local_paca->irq_happened);
271 }
272 
273 /*
274  * This is called by asynchronous interrupts to conditionally
275  * re-enable hard interrupts after having cleared the source
276  * of the interrupt. They are kept disabled if there is a different
277  * soft-masked interrupt pending that requires hard masking.
278  */
279 static inline void may_hard_irq_enable(void)
280 {
281 	if (!(get_paca()->irq_happened & PACA_IRQ_MUST_HARD_MASK)) {
282 		get_paca()->irq_happened &= ~PACA_IRQ_HARD_DIS;
283 		__hard_irq_enable();
284 	}
285 }
286 
287 static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
288 {
289 	return (regs->softe & IRQS_DISABLED);
290 }
291 
292 extern bool prep_irq_for_idle(void);
293 extern bool prep_irq_for_idle_irqsoff(void);
294 extern void irq_set_pending_from_srr1(unsigned long srr1);
295 
296 #define fini_irq_for_idle_irqsoff() trace_hardirqs_off();
297 
298 extern void force_external_irq_replay(void);
299 
300 #else /* CONFIG_PPC64 */
301 
302 static inline unsigned long arch_local_save_flags(void)
303 {
304 	return mfmsr();
305 }
306 
307 static inline void arch_local_irq_restore(unsigned long flags)
308 {
309 	if (IS_ENABLED(CONFIG_BOOKE))
310 		wrtee(flags);
311 	else
312 		mtmsr(flags);
313 }
314 
315 static inline unsigned long arch_local_irq_save(void)
316 {
317 	unsigned long flags = arch_local_save_flags();
318 
319 	if (IS_ENABLED(CONFIG_BOOKE))
320 		wrtee(0);
321 	else if (IS_ENABLED(CONFIG_PPC_8xx))
322 		wrtspr(SPRN_EID);
323 	else
324 		mtmsr(flags & ~MSR_EE);
325 
326 	return flags;
327 }
328 
329 static inline void arch_local_irq_disable(void)
330 {
331 	if (IS_ENABLED(CONFIG_BOOKE))
332 		wrtee(0);
333 	else if (IS_ENABLED(CONFIG_PPC_8xx))
334 		wrtspr(SPRN_EID);
335 	else
336 		mtmsr(mfmsr() & ~MSR_EE);
337 }
338 
339 static inline void arch_local_irq_enable(void)
340 {
341 	if (IS_ENABLED(CONFIG_BOOKE))
342 		wrtee(MSR_EE);
343 	else if (IS_ENABLED(CONFIG_PPC_8xx))
344 		wrtspr(SPRN_EIE);
345 	else
346 		mtmsr(mfmsr() | MSR_EE);
347 }
348 
349 static inline bool arch_irqs_disabled_flags(unsigned long flags)
350 {
351 	return (flags & MSR_EE) == 0;
352 }
353 
354 static inline bool arch_irqs_disabled(void)
355 {
356 	return arch_irqs_disabled_flags(arch_local_save_flags());
357 }
358 
359 #define hard_irq_disable()		arch_local_irq_disable()
360 
361 static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
362 {
363 	return !(regs->msr & MSR_EE);
364 }
365 
366 static inline void may_hard_irq_enable(void) { }
367 
368 #endif /* CONFIG_PPC64 */
369 
370 #define ARCH_IRQ_INIT_FLAGS	IRQ_NOREQUEST
371 
372 /*
373  * interrupt-retrigger: should we handle this via lost interrupts and IPIs
374  * or should we not care like we do now ? --BenH.
375  */
376 struct irq_chip;
377 
378 #endif  /* __ASSEMBLY__ */
379 #endif	/* __KERNEL__ */
380 #endif	/* _ASM_POWERPC_HW_IRQ_H */
381