xref: /openbmc/linux/arch/mips/include/asm/irqflags.h (revision 6ee73861)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003 by Ralf Baechle
7  * Copyright (C) 1996 by Paul M. Antoine
8  * Copyright (C) 1999 Silicon Graphics
9  * Copyright (C) 2000 MIPS Technologies, Inc.
10  */
11 #ifndef _ASM_IRQFLAGS_H
12 #define _ASM_IRQFLAGS_H
13 
14 #ifndef __ASSEMBLY__
15 
16 #include <linux/compiler.h>
17 #include <asm/hazards.h>
18 
19 __asm__(
20 	"	.macro	raw_local_irq_enable				\n"
21 	"	.set	push						\n"
22 	"	.set	reorder						\n"
23 	"	.set	noat						\n"
24 #ifdef CONFIG_MIPS_MT_SMTC
25 	"	mfc0	$1, $2, 1	# SMTC - clear TCStatus.IXMT	\n"
26 	"	ori	$1, 0x400					\n"
27 	"	xori	$1, 0x400					\n"
28 	"	mtc0	$1, $2, 1					\n"
29 #elif defined(CONFIG_CPU_MIPSR2)
30 	"	ei							\n"
31 #else
32 	"	mfc0	$1,$12						\n"
33 	"	ori	$1,0x1f						\n"
34 	"	xori	$1,0x1e						\n"
35 	"	mtc0	$1,$12						\n"
36 #endif
37 	"	irq_enable_hazard					\n"
38 	"	.set	pop						\n"
39 	"	.endm");
40 
41 extern void smtc_ipi_replay(void);
42 
43 static inline void raw_local_irq_enable(void)
44 {
45 #ifdef CONFIG_MIPS_MT_SMTC
46 	/*
47 	 * SMTC kernel needs to do a software replay of queued
48 	 * IPIs, at the cost of call overhead on each local_irq_enable()
49 	 */
50 	smtc_ipi_replay();
51 #endif
52 	__asm__ __volatile__(
53 		"raw_local_irq_enable"
54 		: /* no outputs */
55 		: /* no inputs */
56 		: "memory");
57 }
58 
59 
60 /*
61  * For cli() we have to insert nops to make sure that the new value
62  * has actually arrived in the status register before the end of this
63  * macro.
64  * R4000/R4400 need three nops, the R4600 two nops and the R10000 needs
65  * no nops at all.
66  */
67 /*
68  * For TX49, operating only IE bit is not enough.
69  *
70  * If mfc0 $12 follows store and the mfc0 is last instruction of a
71  * page and fetching the next instruction causes TLB miss, the result
72  * of the mfc0 might wrongly contain EXL bit.
73  *
74  * ERT-TX49H2-027, ERT-TX49H3-012, ERT-TX49HL3-006, ERT-TX49H4-008
75  *
76  * Workaround: mask EXL bit of the result or place a nop before mfc0.
77  */
78 __asm__(
79 	"	.macro	raw_local_irq_disable\n"
80 	"	.set	push						\n"
81 	"	.set	noat						\n"
82 #ifdef CONFIG_MIPS_MT_SMTC
83 	"	mfc0	$1, $2, 1					\n"
84 	"	ori	$1, 0x400					\n"
85 	"	.set	noreorder					\n"
86 	"	mtc0	$1, $2, 1					\n"
87 #elif defined(CONFIG_CPU_MIPSR2)
88 	"	di							\n"
89 #else
90 	"	mfc0	$1,$12						\n"
91 	"	ori	$1,0x1f						\n"
92 	"	xori	$1,0x1f						\n"
93 	"	.set	noreorder					\n"
94 	"	mtc0	$1,$12						\n"
95 #endif
96 	"	irq_disable_hazard					\n"
97 	"	.set	pop						\n"
98 	"	.endm							\n");
99 
100 static inline void raw_local_irq_disable(void)
101 {
102 	__asm__ __volatile__(
103 		"raw_local_irq_disable"
104 		: /* no outputs */
105 		: /* no inputs */
106 		: "memory");
107 }
108 
109 __asm__(
110 	"	.macro	raw_local_save_flags flags			\n"
111 	"	.set	push						\n"
112 	"	.set	reorder						\n"
113 #ifdef CONFIG_MIPS_MT_SMTC
114 	"	mfc0	\\flags, $2, 1					\n"
115 #else
116 	"	mfc0	\\flags, $12					\n"
117 #endif
118 	"	.set	pop						\n"
119 	"	.endm							\n");
120 
121 #define raw_local_save_flags(x)						\
122 __asm__ __volatile__(							\
123 	"raw_local_save_flags %0"					\
124 	: "=r" (x))
125 
126 __asm__(
127 	"	.macro	raw_local_irq_save result			\n"
128 	"	.set	push						\n"
129 	"	.set	reorder						\n"
130 	"	.set	noat						\n"
131 #ifdef CONFIG_MIPS_MT_SMTC
132 	"	mfc0	\\result, $2, 1					\n"
133 	"	ori	$1, \\result, 0x400				\n"
134 	"	.set	noreorder					\n"
135 	"	mtc0	$1, $2, 1					\n"
136 	"	andi	\\result, \\result, 0x400			\n"
137 #elif defined(CONFIG_CPU_MIPSR2)
138 	"	di	\\result					\n"
139 	"	andi	\\result, 1					\n"
140 #else
141 	"	mfc0	\\result, $12					\n"
142 	"	ori	$1, \\result, 0x1f				\n"
143 	"	xori	$1, 0x1f					\n"
144 	"	.set	noreorder					\n"
145 	"	mtc0	$1, $12						\n"
146 #endif
147 	"	irq_disable_hazard					\n"
148 	"	.set	pop						\n"
149 	"	.endm							\n");
150 
151 #define raw_local_irq_save(x)						\
152 __asm__ __volatile__(							\
153 	"raw_local_irq_save\t%0"					\
154 	: "=r" (x)							\
155 	: /* no inputs */						\
156 	: "memory")
157 
158 __asm__(
159 	"	.macro	raw_local_irq_restore flags			\n"
160 	"	.set	push						\n"
161 	"	.set	noreorder					\n"
162 	"	.set	noat						\n"
163 #ifdef CONFIG_MIPS_MT_SMTC
164 	"mfc0	$1, $2, 1						\n"
165 	"andi	\\flags, 0x400						\n"
166 	"ori	$1, 0x400						\n"
167 	"xori	$1, 0x400						\n"
168 	"or	\\flags, $1						\n"
169 	"mtc0	\\flags, $2, 1						\n"
170 #elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU)
171 	/*
172 	 * Slow, but doesn't suffer from a relativly unlikely race
173 	 * condition we're having since days 1.
174 	 */
175 	"	beqz	\\flags, 1f					\n"
176 	"	 di							\n"
177 	"	ei							\n"
178 	"1:								\n"
179 #elif defined(CONFIG_CPU_MIPSR2)
180 	/*
181 	 * Fast, dangerous.  Life is fun, life is good.
182 	 */
183 	"	mfc0	$1, $12						\n"
184 	"	ins	$1, \\flags, 0, 1				\n"
185 	"	mtc0	$1, $12						\n"
186 #else
187 	"	mfc0	$1, $12						\n"
188 	"	andi	\\flags, 1					\n"
189 	"	ori	$1, 0x1f					\n"
190 	"	xori	$1, 0x1f					\n"
191 	"	or	\\flags, $1					\n"
192 	"	mtc0	\\flags, $12					\n"
193 #endif
194 	"	irq_disable_hazard					\n"
195 	"	.set	pop						\n"
196 	"	.endm							\n");
197 
198 
199 static inline void raw_local_irq_restore(unsigned long flags)
200 {
201 	unsigned long __tmp1;
202 
203 #ifdef CONFIG_MIPS_MT_SMTC
204 	/*
205 	 * SMTC kernel needs to do a software replay of queued
206 	 * IPIs, at the cost of branch and call overhead on each
207 	 * local_irq_restore()
208 	 */
209 	if (unlikely(!(flags & 0x0400)))
210 		smtc_ipi_replay();
211 #endif
212 
213 	__asm__ __volatile__(
214 		"raw_local_irq_restore\t%0"
215 		: "=r" (__tmp1)
216 		: "0" (flags)
217 		: "memory");
218 }
219 
220 static inline void __raw_local_irq_restore(unsigned long flags)
221 {
222 	unsigned long __tmp1;
223 
224 	__asm__ __volatile__(
225 		"raw_local_irq_restore\t%0"
226 		: "=r" (__tmp1)
227 		: "0" (flags)
228 		: "memory");
229 }
230 
231 static inline int raw_irqs_disabled_flags(unsigned long flags)
232 {
233 #ifdef CONFIG_MIPS_MT_SMTC
234 	/*
235 	 * SMTC model uses TCStatus.IXMT to disable interrupts for a thread/CPU
236 	 */
237 	return flags & 0x400;
238 #else
239 	return !(flags & 1);
240 #endif
241 }
242 
243 #endif
244 
245 /*
246  * Do the CPU's IRQ-state tracing from assembly code.
247  */
248 #ifdef CONFIG_TRACE_IRQFLAGS
249 /* Reload some registers clobbered by trace_hardirqs_on */
250 #ifdef CONFIG_64BIT
251 # define TRACE_IRQS_RELOAD_REGS						\
252 	LONG_L	$11, PT_R11(sp);					\
253 	LONG_L	$10, PT_R10(sp);					\
254 	LONG_L	$9, PT_R9(sp);						\
255 	LONG_L	$8, PT_R8(sp);						\
256 	LONG_L	$7, PT_R7(sp);						\
257 	LONG_L	$6, PT_R6(sp);						\
258 	LONG_L	$5, PT_R5(sp);						\
259 	LONG_L	$4, PT_R4(sp);						\
260 	LONG_L	$2, PT_R2(sp)
261 #else
262 # define TRACE_IRQS_RELOAD_REGS						\
263 	LONG_L	$7, PT_R7(sp);						\
264 	LONG_L	$6, PT_R6(sp);						\
265 	LONG_L	$5, PT_R5(sp);						\
266 	LONG_L	$4, PT_R4(sp);						\
267 	LONG_L	$2, PT_R2(sp)
268 #endif
269 # define TRACE_IRQS_ON							\
270 	CLI;	/* make sure trace_hardirqs_on() is called in kernel level */ \
271 	jal	trace_hardirqs_on
272 # define TRACE_IRQS_ON_RELOAD						\
273 	TRACE_IRQS_ON;							\
274 	TRACE_IRQS_RELOAD_REGS
275 # define TRACE_IRQS_OFF							\
276 	jal	trace_hardirqs_off
277 #else
278 # define TRACE_IRQS_ON
279 # define TRACE_IRQS_ON_RELOAD
280 # define TRACE_IRQS_OFF
281 #endif
282 
283 #endif /* _ASM_IRQFLAGS_H */
284