xref: /openbmc/linux/arch/powerpc/include/asm/hw_irq.h (revision bbecb07f)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
4  */
5 #ifndef _ASM_POWERPC_HW_IRQ_H
6 #define _ASM_POWERPC_HW_IRQ_H
7 
8 #ifdef __KERNEL__
9 
10 #include <linux/errno.h>
11 #include <linux/compiler.h>
12 #include <asm/ptrace.h>
13 #include <asm/processor.h>
14 
15 #ifdef CONFIG_PPC64
16 
17 /*
18  * PACA flags in paca->irq_happened.
19  *
20  * This bits are set when interrupts occur while soft-disabled
21  * and allow a proper replay. Additionally, PACA_IRQ_HARD_DIS
22  * is set whenever we manually hard disable.
23  */
24 #define PACA_IRQ_HARD_DIS	0x01
25 #define PACA_IRQ_DBELL		0x02
26 #define PACA_IRQ_EE		0x04
27 #define PACA_IRQ_DEC		0x08 /* Or FIT */
28 #define PACA_IRQ_EE_EDGE	0x10 /* BookE only */
29 #define PACA_IRQ_HMI		0x20
30 
31 #endif /* CONFIG_PPC64 */
32 
33 #ifndef __ASSEMBLY__
34 
35 extern void replay_system_reset(void);
36 extern void __replay_interrupt(unsigned int vector);
37 
38 extern void timer_interrupt(struct pt_regs *);
39 extern void performance_monitor_exception(struct pt_regs *regs);
40 extern void WatchdogException(struct pt_regs *regs);
41 extern void unknown_exception(struct pt_regs *regs);
42 
43 #ifdef CONFIG_PPC64
44 #include <asm/paca.h>
45 
46 static inline unsigned long arch_local_save_flags(void)
47 {
48 	unsigned long flags;
49 
50 	asm volatile(
51 		"lbz %0,%1(13)"
52 		: "=r" (flags)
53 		: "i" (offsetof(struct paca_struct, soft_enabled)));
54 
55 	return flags;
56 }
57 
58 static inline unsigned long arch_local_irq_disable(void)
59 {
60 	unsigned long flags, zero;
61 
62 	asm volatile(
63 		"li %1,0; lbz %0,%2(13); stb %1,%2(13)"
64 		: "=r" (flags), "=&r" (zero)
65 		: "i" (offsetof(struct paca_struct, soft_enabled))
66 		: "memory");
67 
68 	return flags;
69 }
70 
71 extern void arch_local_irq_restore(unsigned long);
72 
73 static inline void arch_local_irq_enable(void)
74 {
75 	arch_local_irq_restore(1);
76 }
77 
78 static inline unsigned long arch_local_irq_save(void)
79 {
80 	return arch_local_irq_disable();
81 }
82 
83 static inline bool arch_irqs_disabled_flags(unsigned long flags)
84 {
85 	return flags == 0;
86 }
87 
88 static inline bool arch_irqs_disabled(void)
89 {
90 	return arch_irqs_disabled_flags(arch_local_save_flags());
91 }
92 
93 #ifdef CONFIG_PPC_BOOK3E
94 #define __hard_irq_enable()	asm volatile("wrteei 1" : : : "memory")
95 #define __hard_irq_disable()	asm volatile("wrteei 0" : : : "memory")
96 #else
97 #define __hard_irq_enable()	__mtmsrd(local_paca->kernel_msr | MSR_EE, 1)
98 #define __hard_irq_disable()	__mtmsrd(local_paca->kernel_msr, 1)
99 #endif
100 
101 #define hard_irq_disable()	do {			\
102 	u8 _was_enabled;				\
103 	__hard_irq_disable();				\
104 	_was_enabled = local_paca->soft_enabled;	\
105 	local_paca->soft_enabled = 0;			\
106 	local_paca->irq_happened |= PACA_IRQ_HARD_DIS;	\
107 	if (_was_enabled)				\
108 		trace_hardirqs_off();			\
109 } while(0)
110 
111 static inline bool lazy_irq_pending(void)
112 {
113 	return !!(get_paca()->irq_happened & ~PACA_IRQ_HARD_DIS);
114 }
115 
116 /*
117  * This is called by asynchronous interrupts to conditionally
118  * re-enable hard interrupts when soft-disabled after having
119  * cleared the source of the interrupt
120  */
121 static inline void may_hard_irq_enable(void)
122 {
123 	get_paca()->irq_happened &= ~PACA_IRQ_HARD_DIS;
124 	if (!(get_paca()->irq_happened & PACA_IRQ_EE))
125 		__hard_irq_enable();
126 }
127 
128 static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
129 {
130 	return !regs->softe;
131 }
132 
133 extern bool prep_irq_for_idle(void);
134 extern bool prep_irq_for_idle_irqsoff(void);
135 extern void irq_set_pending_from_srr1(unsigned long srr1);
136 
137 #define fini_irq_for_idle_irqsoff() trace_hardirqs_off();
138 
139 extern void force_external_irq_replay(void);
140 
141 #else /* CONFIG_PPC64 */
142 
143 #define SET_MSR_EE(x)	mtmsr(x)
144 
145 static inline unsigned long arch_local_save_flags(void)
146 {
147 	return mfmsr();
148 }
149 
150 static inline void arch_local_irq_restore(unsigned long flags)
151 {
152 #if defined(CONFIG_BOOKE)
153 	asm volatile("wrtee %0" : : "r" (flags) : "memory");
154 #else
155 	mtmsr(flags);
156 #endif
157 }
158 
159 static inline unsigned long arch_local_irq_save(void)
160 {
161 	unsigned long flags = arch_local_save_flags();
162 #ifdef CONFIG_BOOKE
163 	asm volatile("wrteei 0" : : : "memory");
164 #elif defined(CONFIG_PPC_8xx)
165 	wrtspr(SPRN_EID);
166 #else
167 	SET_MSR_EE(flags & ~MSR_EE);
168 #endif
169 	return flags;
170 }
171 
172 static inline void arch_local_irq_disable(void)
173 {
174 #ifdef CONFIG_BOOKE
175 	asm volatile("wrteei 0" : : : "memory");
176 #elif defined(CONFIG_PPC_8xx)
177 	wrtspr(SPRN_EID);
178 #else
179 	arch_local_irq_save();
180 #endif
181 }
182 
183 static inline void arch_local_irq_enable(void)
184 {
185 #ifdef CONFIG_BOOKE
186 	asm volatile("wrteei 1" : : : "memory");
187 #elif defined(CONFIG_PPC_8xx)
188 	wrtspr(SPRN_EIE);
189 #else
190 	unsigned long msr = mfmsr();
191 	SET_MSR_EE(msr | MSR_EE);
192 #endif
193 }
194 
195 static inline bool arch_irqs_disabled_flags(unsigned long flags)
196 {
197 	return (flags & MSR_EE) == 0;
198 }
199 
200 static inline bool arch_irqs_disabled(void)
201 {
202 	return arch_irqs_disabled_flags(arch_local_save_flags());
203 }
204 
205 #define hard_irq_disable()		arch_local_irq_disable()
206 
207 static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
208 {
209 	return !(regs->msr & MSR_EE);
210 }
211 
212 static inline void may_hard_irq_enable(void) { }
213 
214 #endif /* CONFIG_PPC64 */
215 
216 #define ARCH_IRQ_INIT_FLAGS	IRQ_NOREQUEST
217 
218 /*
219  * interrupt-retrigger: should we handle this via lost interrupts and IPIs
220  * or should we not care like we do now ? --BenH.
221  */
222 struct irq_chip;
223 
224 #endif  /* __ASSEMBLY__ */
225 #endif	/* __KERNEL__ */
226 #endif	/* _ASM_POWERPC_HW_IRQ_H */
227