xref: /openbmc/linux/arch/powerpc/include/asm/hw_irq.h (revision fd589a8f)
1 /*
2  * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
3  */
4 #ifndef _ASM_POWERPC_HW_IRQ_H
5 #define _ASM_POWERPC_HW_IRQ_H
6 
7 #ifdef __KERNEL__
8 
9 #include <linux/errno.h>
10 #include <linux/compiler.h>
11 #include <asm/ptrace.h>
12 #include <asm/processor.h>
13 
14 extern void timer_interrupt(struct pt_regs *);
15 
16 #ifdef CONFIG_PPC64
17 #include <asm/paca.h>
18 
19 static inline unsigned long local_get_flags(void)
20 {
21 	unsigned long flags;
22 
23 	__asm__ __volatile__("lbz %0,%1(13)"
24 	: "=r" (flags)
25 	: "i" (offsetof(struct paca_struct, soft_enabled)));
26 
27 	return flags;
28 }
29 
30 static inline unsigned long raw_local_irq_disable(void)
31 {
32 	unsigned long flags, zero;
33 
34 	__asm__ __volatile__("li %1,0; lbz %0,%2(13); stb %1,%2(13)"
35 	: "=r" (flags), "=&r" (zero)
36 	: "i" (offsetof(struct paca_struct, soft_enabled))
37 	: "memory");
38 
39 	return flags;
40 }
41 
42 extern void raw_local_irq_restore(unsigned long);
43 extern void iseries_handle_interrupts(void);
44 
45 #define raw_local_irq_enable()		raw_local_irq_restore(1)
46 #define raw_local_save_flags(flags)	((flags) = local_get_flags())
47 #define raw_local_irq_save(flags)	((flags) = raw_local_irq_disable())
48 
49 #define raw_irqs_disabled()		(local_get_flags() == 0)
50 #define raw_irqs_disabled_flags(flags)	((flags) == 0)
51 
52 #ifdef CONFIG_PPC_BOOK3E
53 #define __hard_irq_enable()	__asm__ __volatile__("wrteei 1": : :"memory");
54 #define __hard_irq_disable()	__asm__ __volatile__("wrteei 0": : :"memory");
55 #else
56 #define __hard_irq_enable()	__mtmsrd(mfmsr() | MSR_EE, 1)
57 #define __hard_irq_disable()	__mtmsrd(mfmsr() & ~MSR_EE, 1)
58 #endif
59 
60 #define  hard_irq_disable()			\
61 	do {					\
62 		__hard_irq_disable();		\
63 		get_paca()->soft_enabled = 0;	\
64 		get_paca()->hard_enabled = 0;	\
65 	} while(0)
66 
67 static inline int irqs_disabled_flags(unsigned long flags)
68 {
69 	return flags == 0;
70 }
71 
72 #else
73 
74 #if defined(CONFIG_BOOKE)
75 #define SET_MSR_EE(x)	mtmsr(x)
76 #define raw_local_irq_restore(flags)	__asm__ __volatile__("wrtee %0" : : "r" (flags) : "memory")
77 #else
78 #define SET_MSR_EE(x)	mtmsr(x)
79 #define raw_local_irq_restore(flags)	mtmsr(flags)
80 #endif
81 
82 static inline void raw_local_irq_disable(void)
83 {
84 #ifdef CONFIG_BOOKE
85 	__asm__ __volatile__("wrteei 0": : :"memory");
86 #else
87 	unsigned long msr;
88 
89 	msr = mfmsr();
90 	SET_MSR_EE(msr & ~MSR_EE);
91 #endif
92 }
93 
94 static inline void raw_local_irq_enable(void)
95 {
96 #ifdef CONFIG_BOOKE
97 	__asm__ __volatile__("wrteei 1": : :"memory");
98 #else
99 	unsigned long msr;
100 
101 	msr = mfmsr();
102 	SET_MSR_EE(msr | MSR_EE);
103 #endif
104 }
105 
106 static inline void raw_local_irq_save_ptr(unsigned long *flags)
107 {
108 	unsigned long msr;
109 	msr = mfmsr();
110 	*flags = msr;
111 #ifdef CONFIG_BOOKE
112 	__asm__ __volatile__("wrteei 0": : :"memory");
113 #else
114 	SET_MSR_EE(msr & ~MSR_EE);
115 #endif
116 }
117 
118 #define raw_local_save_flags(flags)	((flags) = mfmsr())
119 #define raw_local_irq_save(flags)	raw_local_irq_save_ptr(&flags)
120 #define raw_irqs_disabled()		((mfmsr() & MSR_EE) == 0)
121 #define raw_irqs_disabled_flags(flags)	(((flags) & MSR_EE) == 0)
122 
123 #define hard_irq_disable()		raw_local_irq_disable()
124 
125 static inline int irqs_disabled_flags(unsigned long flags)
126 {
127 	return (flags & MSR_EE) == 0;
128 }
129 
130 #endif /* CONFIG_PPC64 */
131 
132 /*
133  * interrupt-retrigger: should we handle this via lost interrupts and IPIs
134  * or should we not care like we do now ? --BenH.
135  */
136 struct irq_chip;
137 
138 #ifdef CONFIG_PERF_COUNTERS
139 
140 #ifdef CONFIG_PPC64
141 static inline unsigned long test_perf_counter_pending(void)
142 {
143 	unsigned long x;
144 
145 	asm volatile("lbz %0,%1(13)"
146 		: "=r" (x)
147 		: "i" (offsetof(struct paca_struct, perf_counter_pending)));
148 	return x;
149 }
150 
151 static inline void set_perf_counter_pending(void)
152 {
153 	asm volatile("stb %0,%1(13)" : :
154 		"r" (1),
155 		"i" (offsetof(struct paca_struct, perf_counter_pending)));
156 }
157 
158 static inline void clear_perf_counter_pending(void)
159 {
160 	asm volatile("stb %0,%1(13)" : :
161 		"r" (0),
162 		"i" (offsetof(struct paca_struct, perf_counter_pending)));
163 }
164 #endif /* CONFIG_PPC64 */
165 
166 #else  /* CONFIG_PERF_COUNTERS */
167 
168 static inline unsigned long test_perf_counter_pending(void)
169 {
170 	return 0;
171 }
172 
173 static inline void clear_perf_counter_pending(void) {}
174 #endif /* CONFIG_PERF_COUNTERS */
175 
176 #endif	/* __KERNEL__ */
177 #endif	/* _ASM_POWERPC_HW_IRQ_H */
178