xref: /openbmc/linux/arch/powerpc/include/asm/hw_irq.h (revision 6aa7de05)
1 /*
2  * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
3  */
4 #ifndef _ASM_POWERPC_HW_IRQ_H
5 #define _ASM_POWERPC_HW_IRQ_H
6 
7 #ifdef __KERNEL__
8 
9 #include <linux/errno.h>
10 #include <linux/compiler.h>
11 #include <asm/ptrace.h>
12 #include <asm/processor.h>
13 
14 #ifdef CONFIG_PPC64
15 
16 /*
17  * PACA flags in paca->irq_happened.
18  *
19  * This bits are set when interrupts occur while soft-disabled
20  * and allow a proper replay. Additionally, PACA_IRQ_HARD_DIS
21  * is set whenever we manually hard disable.
22  */
23 #define PACA_IRQ_HARD_DIS	0x01
24 #define PACA_IRQ_DBELL		0x02
25 #define PACA_IRQ_EE		0x04
26 #define PACA_IRQ_DEC		0x08 /* Or FIT */
27 #define PACA_IRQ_EE_EDGE	0x10 /* BookE only */
28 #define PACA_IRQ_HMI		0x20
29 
30 #endif /* CONFIG_PPC64 */
31 
32 #ifndef __ASSEMBLY__
33 
34 extern void __replay_interrupt(unsigned int vector);
35 
36 extern void timer_interrupt(struct pt_regs *);
37 extern void performance_monitor_exception(struct pt_regs *regs);
38 extern void WatchdogException(struct pt_regs *regs);
39 extern void unknown_exception(struct pt_regs *regs);
40 
41 #ifdef CONFIG_PPC64
42 #include <asm/paca.h>
43 
44 static inline unsigned long arch_local_save_flags(void)
45 {
46 	unsigned long flags;
47 
48 	asm volatile(
49 		"lbz %0,%1(13)"
50 		: "=r" (flags)
51 		: "i" (offsetof(struct paca_struct, soft_enabled)));
52 
53 	return flags;
54 }
55 
56 static inline unsigned long arch_local_irq_disable(void)
57 {
58 	unsigned long flags, zero;
59 
60 	asm volatile(
61 		"li %1,0; lbz %0,%2(13); stb %1,%2(13)"
62 		: "=r" (flags), "=&r" (zero)
63 		: "i" (offsetof(struct paca_struct, soft_enabled))
64 		: "memory");
65 
66 	return flags;
67 }
68 
69 extern void arch_local_irq_restore(unsigned long);
70 
71 static inline void arch_local_irq_enable(void)
72 {
73 	arch_local_irq_restore(1);
74 }
75 
76 static inline unsigned long arch_local_irq_save(void)
77 {
78 	return arch_local_irq_disable();
79 }
80 
81 static inline bool arch_irqs_disabled_flags(unsigned long flags)
82 {
83 	return flags == 0;
84 }
85 
86 static inline bool arch_irqs_disabled(void)
87 {
88 	return arch_irqs_disabled_flags(arch_local_save_flags());
89 }
90 
91 #ifdef CONFIG_PPC_BOOK3E
92 #define __hard_irq_enable()	asm volatile("wrteei 1" : : : "memory")
93 #define __hard_irq_disable()	asm volatile("wrteei 0" : : : "memory")
94 #else
95 #define __hard_irq_enable()	__mtmsrd(local_paca->kernel_msr | MSR_EE, 1)
96 #define __hard_irq_disable()	__mtmsrd(local_paca->kernel_msr, 1)
97 #endif
98 
99 #define hard_irq_disable()	do {			\
100 	u8 _was_enabled;				\
101 	__hard_irq_disable();				\
102 	_was_enabled = local_paca->soft_enabled;	\
103 	local_paca->soft_enabled = 0;			\
104 	local_paca->irq_happened |= PACA_IRQ_HARD_DIS;	\
105 	if (_was_enabled)				\
106 		trace_hardirqs_off();			\
107 } while(0)
108 
109 static inline bool lazy_irq_pending(void)
110 {
111 	return !!(get_paca()->irq_happened & ~PACA_IRQ_HARD_DIS);
112 }
113 
114 /*
115  * This is called by asynchronous interrupts to conditionally
116  * re-enable hard interrupts when soft-disabled after having
117  * cleared the source of the interrupt
118  */
119 static inline void may_hard_irq_enable(void)
120 {
121 	get_paca()->irq_happened &= ~PACA_IRQ_HARD_DIS;
122 	if (!(get_paca()->irq_happened & PACA_IRQ_EE))
123 		__hard_irq_enable();
124 }
125 
126 static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
127 {
128 	return !regs->softe;
129 }
130 
131 extern bool prep_irq_for_idle(void);
132 extern bool prep_irq_for_idle_irqsoff(void);
133 extern void irq_set_pending_from_srr1(unsigned long srr1);
134 
135 #define fini_irq_for_idle_irqsoff() trace_hardirqs_off();
136 
137 extern void force_external_irq_replay(void);
138 
139 #else /* CONFIG_PPC64 */
140 
141 #define SET_MSR_EE(x)	mtmsr(x)
142 
143 static inline unsigned long arch_local_save_flags(void)
144 {
145 	return mfmsr();
146 }
147 
148 static inline void arch_local_irq_restore(unsigned long flags)
149 {
150 #if defined(CONFIG_BOOKE)
151 	asm volatile("wrtee %0" : : "r" (flags) : "memory");
152 #else
153 	mtmsr(flags);
154 #endif
155 }
156 
157 static inline unsigned long arch_local_irq_save(void)
158 {
159 	unsigned long flags = arch_local_save_flags();
160 #ifdef CONFIG_BOOKE
161 	asm volatile("wrteei 0" : : : "memory");
162 #elif defined(CONFIG_PPC_8xx)
163 	wrtspr(SPRN_EID);
164 #else
165 	SET_MSR_EE(flags & ~MSR_EE);
166 #endif
167 	return flags;
168 }
169 
170 static inline void arch_local_irq_disable(void)
171 {
172 #ifdef CONFIG_BOOKE
173 	asm volatile("wrteei 0" : : : "memory");
174 #elif defined(CONFIG_PPC_8xx)
175 	wrtspr(SPRN_EID);
176 #else
177 	arch_local_irq_save();
178 #endif
179 }
180 
181 static inline void arch_local_irq_enable(void)
182 {
183 #ifdef CONFIG_BOOKE
184 	asm volatile("wrteei 1" : : : "memory");
185 #elif defined(CONFIG_PPC_8xx)
186 	wrtspr(SPRN_EIE);
187 #else
188 	unsigned long msr = mfmsr();
189 	SET_MSR_EE(msr | MSR_EE);
190 #endif
191 }
192 
193 static inline bool arch_irqs_disabled_flags(unsigned long flags)
194 {
195 	return (flags & MSR_EE) == 0;
196 }
197 
198 static inline bool arch_irqs_disabled(void)
199 {
200 	return arch_irqs_disabled_flags(arch_local_save_flags());
201 }
202 
203 #define hard_irq_disable()		arch_local_irq_disable()
204 
205 static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
206 {
207 	return !(regs->msr & MSR_EE);
208 }
209 
210 static inline void may_hard_irq_enable(void) { }
211 
212 #endif /* CONFIG_PPC64 */
213 
214 #define ARCH_IRQ_INIT_FLAGS	IRQ_NOREQUEST
215 
216 /*
217  * interrupt-retrigger: should we handle this via lost interrupts and IPIs
218  * or should we not care like we do now ? --BenH.
219  */
220 struct irq_chip;
221 
222 #endif  /* __ASSEMBLY__ */
223 #endif	/* __KERNEL__ */
224 #endif	/* _ASM_POWERPC_HW_IRQ_H */
225