xref: /openbmc/linux/arch/powerpc/include/asm/hw_irq.h (revision 63dc02bd)
1 /*
2  * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
3  */
4 #ifndef _ASM_POWERPC_HW_IRQ_H
5 #define _ASM_POWERPC_HW_IRQ_H
6 
7 #ifdef __KERNEL__
8 
9 #include <linux/errno.h>
10 #include <linux/compiler.h>
11 #include <asm/ptrace.h>
12 #include <asm/processor.h>
13 
14 #ifdef CONFIG_PPC64
15 
16 /*
17  * PACA flags in paca->irq_happened.
18  *
19  * This bits are set when interrupts occur while soft-disabled
20  * and allow a proper replay. Additionally, PACA_IRQ_HARD_DIS
21  * is set whenever we manually hard disable.
22  */
23 #define PACA_IRQ_HARD_DIS	0x01
24 #define PACA_IRQ_DBELL		0x02
25 #define PACA_IRQ_EE		0x04
26 #define PACA_IRQ_DEC		0x08 /* Or FIT */
27 #define PACA_IRQ_EE_EDGE	0x10 /* BookE only */
28 
29 #endif /* CONFIG_PPC64 */
30 
31 #ifndef __ASSEMBLY__
32 
33 extern void __replay_interrupt(unsigned int vector);
34 
35 extern void timer_interrupt(struct pt_regs *);
36 
37 #ifdef CONFIG_PPC64
38 #include <asm/paca.h>
39 
40 static inline unsigned long arch_local_save_flags(void)
41 {
42 	unsigned long flags;
43 
44 	asm volatile(
45 		"lbz %0,%1(13)"
46 		: "=r" (flags)
47 		: "i" (offsetof(struct paca_struct, soft_enabled)));
48 
49 	return flags;
50 }
51 
52 static inline unsigned long arch_local_irq_disable(void)
53 {
54 	unsigned long flags, zero;
55 
56 	asm volatile(
57 		"li %1,0; lbz %0,%2(13); stb %1,%2(13)"
58 		: "=r" (flags), "=&r" (zero)
59 		: "i" (offsetof(struct paca_struct, soft_enabled))
60 		: "memory");
61 
62 	return flags;
63 }
64 
65 extern void arch_local_irq_restore(unsigned long);
66 
67 static inline void arch_local_irq_enable(void)
68 {
69 	arch_local_irq_restore(1);
70 }
71 
72 static inline unsigned long arch_local_irq_save(void)
73 {
74 	return arch_local_irq_disable();
75 }
76 
77 static inline bool arch_irqs_disabled_flags(unsigned long flags)
78 {
79 	return flags == 0;
80 }
81 
82 static inline bool arch_irqs_disabled(void)
83 {
84 	return arch_irqs_disabled_flags(arch_local_save_flags());
85 }
86 
87 #ifdef CONFIG_PPC_BOOK3E
88 #define __hard_irq_enable()	asm volatile("wrteei 1" : : : "memory");
89 #define __hard_irq_disable()	asm volatile("wrteei 0" : : : "memory");
90 #else
91 #define __hard_irq_enable()	__mtmsrd(local_paca->kernel_msr | MSR_EE, 1)
92 #define __hard_irq_disable()	__mtmsrd(local_paca->kernel_msr, 1)
93 #endif
94 
95 static inline void hard_irq_disable(void)
96 {
97 	__hard_irq_disable();
98 	get_paca()->soft_enabled = 0;
99 	get_paca()->irq_happened |= PACA_IRQ_HARD_DIS;
100 }
101 
102 /*
103  * This is called by asynchronous interrupts to conditionally
104  * re-enable hard interrupts when soft-disabled after having
105  * cleared the source of the interrupt
106  */
107 static inline void may_hard_irq_enable(void)
108 {
109 	get_paca()->irq_happened &= ~PACA_IRQ_HARD_DIS;
110 	if (!(get_paca()->irq_happened & PACA_IRQ_EE))
111 		__hard_irq_enable();
112 }
113 
114 static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
115 {
116 	return !regs->softe;
117 }
118 
119 #else /* CONFIG_PPC64 */
120 
121 #define SET_MSR_EE(x)	mtmsr(x)
122 
123 static inline unsigned long arch_local_save_flags(void)
124 {
125 	return mfmsr();
126 }
127 
128 static inline void arch_local_irq_restore(unsigned long flags)
129 {
130 #if defined(CONFIG_BOOKE)
131 	asm volatile("wrtee %0" : : "r" (flags) : "memory");
132 #else
133 	mtmsr(flags);
134 #endif
135 }
136 
137 static inline unsigned long arch_local_irq_save(void)
138 {
139 	unsigned long flags = arch_local_save_flags();
140 #ifdef CONFIG_BOOKE
141 	asm volatile("wrteei 0" : : : "memory");
142 #else
143 	SET_MSR_EE(flags & ~MSR_EE);
144 #endif
145 	return flags;
146 }
147 
148 static inline void arch_local_irq_disable(void)
149 {
150 #ifdef CONFIG_BOOKE
151 	asm volatile("wrteei 0" : : : "memory");
152 #else
153 	arch_local_irq_save();
154 #endif
155 }
156 
157 static inline void arch_local_irq_enable(void)
158 {
159 #ifdef CONFIG_BOOKE
160 	asm volatile("wrteei 1" : : : "memory");
161 #else
162 	unsigned long msr = mfmsr();
163 	SET_MSR_EE(msr | MSR_EE);
164 #endif
165 }
166 
167 static inline bool arch_irqs_disabled_flags(unsigned long flags)
168 {
169 	return (flags & MSR_EE) == 0;
170 }
171 
172 static inline bool arch_irqs_disabled(void)
173 {
174 	return arch_irqs_disabled_flags(arch_local_save_flags());
175 }
176 
177 #define hard_irq_disable()		arch_local_irq_disable()
178 
179 static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
180 {
181 	return !(regs->msr & MSR_EE);
182 }
183 
184 static inline void may_hard_irq_enable(void) { }
185 
186 #endif /* CONFIG_PPC64 */
187 
188 #define ARCH_IRQ_INIT_FLAGS	IRQ_NOREQUEST
189 
190 /*
191  * interrupt-retrigger: should we handle this via lost interrupts and IPIs
192  * or should we not care like we do now ? --BenH.
193  */
194 struct irq_chip;
195 
196 #endif  /* __ASSEMBLY__ */
197 #endif	/* __KERNEL__ */
198 #endif	/* _ASM_POWERPC_HW_IRQ_H */
199