xref: /openbmc/linux/arch/powerpc/include/asm/hw_irq.h (revision 8dda2eac)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
4  */
5 #ifndef _ASM_POWERPC_HW_IRQ_H
6 #define _ASM_POWERPC_HW_IRQ_H
7 
8 #ifdef __KERNEL__
9 
10 #include <linux/errno.h>
11 #include <linux/compiler.h>
12 #include <asm/ptrace.h>
13 #include <asm/processor.h>
14 
15 #ifdef CONFIG_PPC64
16 
17 /*
18  * PACA flags in paca->irq_happened.
19  *
20  * This bits are set when interrupts occur while soft-disabled
21  * and allow a proper replay.
22  *
23  * The PACA_IRQ_HARD_DIS is set whenever we hard disable. It is almost
24  * always in synch with the MSR[EE] state, except:
25  * - A window in interrupt entry, where hardware disables MSR[EE] and that
26  *   must be "reconciled" with the soft mask state.
27  * - NMI interrupts that hit in awkward places, until they fix the state.
28  * - When local irqs are being enabled and state is being fixed up.
29  * - When returning from an interrupt there are some windows where this
30  *   can become out of synch, but gets fixed before the RFI or before
31  *   executing the next user instruction (see arch/powerpc/kernel/interrupt.c).
32  */
33 #define PACA_IRQ_HARD_DIS	0x01
34 #define PACA_IRQ_DBELL		0x02
35 #define PACA_IRQ_EE		0x04
36 #define PACA_IRQ_DEC		0x08 /* Or FIT */
37 #define PACA_IRQ_HMI		0x10
38 #define PACA_IRQ_PMI		0x20
39 
40 /*
41  * Some soft-masked interrupts must be hard masked until they are replayed
42  * (e.g., because the soft-masked handler does not clear the exception).
43  */
44 #ifdef CONFIG_PPC_BOOK3S
45 #define PACA_IRQ_MUST_HARD_MASK	(PACA_IRQ_EE|PACA_IRQ_PMI)
46 #else
47 #define PACA_IRQ_MUST_HARD_MASK	(PACA_IRQ_EE)
48 #endif
49 
50 #endif /* CONFIG_PPC64 */
51 
52 /*
53  * flags for paca->irq_soft_mask
54  */
55 #define IRQS_ENABLED		0
56 #define IRQS_DISABLED		1 /* local_irq_disable() interrupts */
57 #define IRQS_PMI_DISABLED	2
58 #define IRQS_ALL_DISABLED	(IRQS_DISABLED | IRQS_PMI_DISABLED)
59 
60 #ifndef __ASSEMBLY__
61 
62 static inline void __hard_irq_enable(void)
63 {
64 	if (IS_ENABLED(CONFIG_BOOKE) || IS_ENABLED(CONFIG_40x))
65 		wrtee(MSR_EE);
66 	else if (IS_ENABLED(CONFIG_PPC_8xx))
67 		wrtspr(SPRN_EIE);
68 	else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64))
69 		__mtmsrd(MSR_EE | MSR_RI, 1);
70 	else
71 		mtmsr(mfmsr() | MSR_EE);
72 }
73 
74 static inline void __hard_irq_disable(void)
75 {
76 	if (IS_ENABLED(CONFIG_BOOKE) || IS_ENABLED(CONFIG_40x))
77 		wrtee(0);
78 	else if (IS_ENABLED(CONFIG_PPC_8xx))
79 		wrtspr(SPRN_EID);
80 	else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64))
81 		__mtmsrd(MSR_RI, 1);
82 	else
83 		mtmsr(mfmsr() & ~MSR_EE);
84 }
85 
86 static inline void __hard_EE_RI_disable(void)
87 {
88 	if (IS_ENABLED(CONFIG_BOOKE) || IS_ENABLED(CONFIG_40x))
89 		wrtee(0);
90 	else if (IS_ENABLED(CONFIG_PPC_8xx))
91 		wrtspr(SPRN_NRI);
92 	else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64))
93 		__mtmsrd(0, 1);
94 	else
95 		mtmsr(mfmsr() & ~(MSR_EE | MSR_RI));
96 }
97 
98 static inline void __hard_RI_enable(void)
99 {
100 	if (IS_ENABLED(CONFIG_BOOKE) || IS_ENABLED(CONFIG_40x))
101 		return;
102 
103 	if (IS_ENABLED(CONFIG_PPC_8xx))
104 		wrtspr(SPRN_EID);
105 	else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64))
106 		__mtmsrd(MSR_RI, 1);
107 	else
108 		mtmsr(mfmsr() | MSR_RI);
109 }
110 
111 #ifdef CONFIG_PPC64
112 #include <asm/paca.h>
113 
114 static inline notrace unsigned long irq_soft_mask_return(void)
115 {
116 	unsigned long flags;
117 
118 	asm volatile(
119 		"lbz %0,%1(13)"
120 		: "=r" (flags)
121 		: "i" (offsetof(struct paca_struct, irq_soft_mask)));
122 
123 	return flags;
124 }
125 
126 /*
127  * The "memory" clobber acts as both a compiler barrier
128  * for the critical section and as a clobber because
129  * we changed paca->irq_soft_mask
130  */
131 static inline notrace void irq_soft_mask_set(unsigned long mask)
132 {
133 #ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
134 	/*
135 	 * The irq mask must always include the STD bit if any are set.
136 	 *
137 	 * and interrupts don't get replayed until the standard
138 	 * interrupt (local_irq_disable()) is unmasked.
139 	 *
140 	 * Other masks must only provide additional masking beyond
141 	 * the standard, and they are also not replayed until the
142 	 * standard interrupt becomes unmasked.
143 	 *
144 	 * This could be changed, but it will require partial
145 	 * unmasks to be replayed, among other things. For now, take
146 	 * the simple approach.
147 	 */
148 	WARN_ON(mask && !(mask & IRQS_DISABLED));
149 #endif
150 
151 	asm volatile(
152 		"stb %0,%1(13)"
153 		:
154 		: "r" (mask),
155 		  "i" (offsetof(struct paca_struct, irq_soft_mask))
156 		: "memory");
157 }
158 
159 static inline notrace unsigned long irq_soft_mask_set_return(unsigned long mask)
160 {
161 	unsigned long flags;
162 
163 #ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
164 	WARN_ON(mask && !(mask & IRQS_DISABLED));
165 #endif
166 
167 	asm volatile(
168 		"lbz %0,%1(13); stb %2,%1(13)"
169 		: "=&r" (flags)
170 		: "i" (offsetof(struct paca_struct, irq_soft_mask)),
171 		  "r" (mask)
172 		: "memory");
173 
174 	return flags;
175 }
176 
177 static inline notrace unsigned long irq_soft_mask_or_return(unsigned long mask)
178 {
179 	unsigned long flags, tmp;
180 
181 	asm volatile(
182 		"lbz %0,%2(13); or %1,%0,%3; stb %1,%2(13)"
183 		: "=&r" (flags), "=r" (tmp)
184 		: "i" (offsetof(struct paca_struct, irq_soft_mask)),
185 		  "r" (mask)
186 		: "memory");
187 
188 #ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
189 	WARN_ON((mask | flags) && !((mask | flags) & IRQS_DISABLED));
190 #endif
191 
192 	return flags;
193 }
194 
195 static inline unsigned long arch_local_save_flags(void)
196 {
197 	return irq_soft_mask_return();
198 }
199 
200 static inline void arch_local_irq_disable(void)
201 {
202 	irq_soft_mask_set(IRQS_DISABLED);
203 }
204 
205 extern void arch_local_irq_restore(unsigned long);
206 
207 static inline void arch_local_irq_enable(void)
208 {
209 	arch_local_irq_restore(IRQS_ENABLED);
210 }
211 
212 static inline unsigned long arch_local_irq_save(void)
213 {
214 	return irq_soft_mask_set_return(IRQS_DISABLED);
215 }
216 
217 static inline bool arch_irqs_disabled_flags(unsigned long flags)
218 {
219 	return flags & IRQS_DISABLED;
220 }
221 
222 static inline bool arch_irqs_disabled(void)
223 {
224 	return arch_irqs_disabled_flags(arch_local_save_flags());
225 }
226 
227 #ifdef CONFIG_PPC_BOOK3S
228 /*
229  * To support disabling and enabling of irq with PMI, set of
230  * new powerpc_local_irq_pmu_save() and powerpc_local_irq_restore()
231  * functions are added. These macros are implemented using generic
232  * linux local_irq_* code from include/linux/irqflags.h.
233  */
234 #define raw_local_irq_pmu_save(flags)					\
235 	do {								\
236 		typecheck(unsigned long, flags);			\
237 		flags = irq_soft_mask_or_return(IRQS_DISABLED |	\
238 				IRQS_PMI_DISABLED);			\
239 	} while(0)
240 
241 #define raw_local_irq_pmu_restore(flags)				\
242 	do {								\
243 		typecheck(unsigned long, flags);			\
244 		arch_local_irq_restore(flags);				\
245 	} while(0)
246 
247 #ifdef CONFIG_TRACE_IRQFLAGS
248 #define powerpc_local_irq_pmu_save(flags)			\
249 	 do {							\
250 		raw_local_irq_pmu_save(flags);			\
251 		if (!raw_irqs_disabled_flags(flags))		\
252 			trace_hardirqs_off();			\
253 	} while(0)
254 #define powerpc_local_irq_pmu_restore(flags)			\
255 	do {							\
256 		if (!raw_irqs_disabled_flags(flags))		\
257 			trace_hardirqs_on();			\
258 		raw_local_irq_pmu_restore(flags);		\
259 	} while(0)
260 #else
261 #define powerpc_local_irq_pmu_save(flags)			\
262 	do {							\
263 		raw_local_irq_pmu_save(flags);			\
264 	} while(0)
265 #define powerpc_local_irq_pmu_restore(flags)			\
266 	do {							\
267 		raw_local_irq_pmu_restore(flags);		\
268 	} while (0)
269 #endif  /* CONFIG_TRACE_IRQFLAGS */
270 
271 #endif /* CONFIG_PPC_BOOK3S */
272 
273 #define hard_irq_disable()	do {					\
274 	unsigned long flags;						\
275 	__hard_irq_disable();						\
276 	flags = irq_soft_mask_set_return(IRQS_ALL_DISABLED);		\
277 	local_paca->irq_happened |= PACA_IRQ_HARD_DIS;			\
278 	if (!arch_irqs_disabled_flags(flags)) {				\
279 		asm ("stdx %%r1, 0, %1 ;"				\
280 		     : "=m" (local_paca->saved_r1)			\
281 		     : "b" (&local_paca->saved_r1));			\
282 		trace_hardirqs_off();					\
283 	}								\
284 } while(0)
285 
286 static inline bool __lazy_irq_pending(u8 irq_happened)
287 {
288 	return !!(irq_happened & ~PACA_IRQ_HARD_DIS);
289 }
290 
291 /*
292  * Check if a lazy IRQ is pending. Should be called with IRQs hard disabled.
293  */
294 static inline bool lazy_irq_pending(void)
295 {
296 	return __lazy_irq_pending(get_paca()->irq_happened);
297 }
298 
299 /*
300  * Check if a lazy IRQ is pending, with no debugging checks.
301  * Should be called with IRQs hard disabled.
302  * For use in RI disabled code or other constrained situations.
303  */
304 static inline bool lazy_irq_pending_nocheck(void)
305 {
306 	return __lazy_irq_pending(local_paca->irq_happened);
307 }
308 
309 /*
310  * This is called by asynchronous interrupts to conditionally
311  * re-enable hard interrupts after having cleared the source
312  * of the interrupt. They are kept disabled if there is a different
313  * soft-masked interrupt pending that requires hard masking.
314  */
315 static inline void may_hard_irq_enable(void)
316 {
317 	if (!(get_paca()->irq_happened & PACA_IRQ_MUST_HARD_MASK)) {
318 		get_paca()->irq_happened &= ~PACA_IRQ_HARD_DIS;
319 		__hard_irq_enable();
320 	}
321 }
322 
323 static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
324 {
325 	return (regs->softe & IRQS_DISABLED);
326 }
327 
328 extern bool prep_irq_for_idle(void);
329 extern bool prep_irq_for_idle_irqsoff(void);
330 extern void irq_set_pending_from_srr1(unsigned long srr1);
331 
332 #define fini_irq_for_idle_irqsoff() trace_hardirqs_off();
333 
334 extern void force_external_irq_replay(void);
335 
336 static inline void irq_soft_mask_regs_set_state(struct pt_regs *regs, unsigned long val)
337 {
338 	regs->softe = val;
339 }
340 #else /* CONFIG_PPC64 */
341 
342 static inline notrace unsigned long irq_soft_mask_return(void)
343 {
344 	return 0;
345 }
346 
347 static inline unsigned long arch_local_save_flags(void)
348 {
349 	return mfmsr();
350 }
351 
352 static inline void arch_local_irq_restore(unsigned long flags)
353 {
354 	if (IS_ENABLED(CONFIG_BOOKE))
355 		wrtee(flags);
356 	else
357 		mtmsr(flags);
358 }
359 
360 static inline unsigned long arch_local_irq_save(void)
361 {
362 	unsigned long flags = arch_local_save_flags();
363 
364 	if (IS_ENABLED(CONFIG_BOOKE))
365 		wrtee(0);
366 	else if (IS_ENABLED(CONFIG_PPC_8xx))
367 		wrtspr(SPRN_EID);
368 	else
369 		mtmsr(flags & ~MSR_EE);
370 
371 	return flags;
372 }
373 
374 static inline void arch_local_irq_disable(void)
375 {
376 	__hard_irq_disable();
377 }
378 
379 static inline void arch_local_irq_enable(void)
380 {
381 	__hard_irq_enable();
382 }
383 
384 static inline bool arch_irqs_disabled_flags(unsigned long flags)
385 {
386 	return (flags & MSR_EE) == 0;
387 }
388 
389 static inline bool arch_irqs_disabled(void)
390 {
391 	return arch_irqs_disabled_flags(arch_local_save_flags());
392 }
393 
394 #define hard_irq_disable()		arch_local_irq_disable()
395 
396 static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
397 {
398 	return !(regs->msr & MSR_EE);
399 }
400 
401 static inline bool may_hard_irq_enable(void)
402 {
403 	return false;
404 }
405 
406 static inline void do_hard_irq_enable(void)
407 {
408 	BUILD_BUG();
409 }
410 
411 static inline void irq_soft_mask_regs_set_state(struct pt_regs *regs, unsigned long val)
412 {
413 }
414 #endif /* CONFIG_PPC64 */
415 
416 #define ARCH_IRQ_INIT_FLAGS	IRQ_NOREQUEST
417 
418 #endif  /* __ASSEMBLY__ */
419 #endif	/* __KERNEL__ */
420 #endif	/* _ASM_POWERPC_HW_IRQ_H */
421