xref: /openbmc/linux/arch/mips/include/asm/hazards.h (revision 84d517f3)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 2003, 04, 07 Ralf Baechle <ralf@linux-mips.org>
7  * Copyright (C) MIPS Technologies, Inc.
8  *   written by Ralf Baechle <ralf@linux-mips.org>
9  */
10 #ifndef _ASM_HAZARDS_H
11 #define _ASM_HAZARDS_H
12 
13 #include <linux/stringify.h>
14 
15 #define ___ssnop							\
16 	sll	$0, $0, 1
17 
18 #define ___ehb								\
19 	sll	$0, $0, 3
20 
21 /*
22  * TLB hazards
23  */
24 #if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_CPU_CAVIUM_OCTEON)
25 
26 /*
27  * MIPSR2 defines ehb for hazard avoidance
28  */
29 
30 #define __mtc0_tlbw_hazard						\
31 	___ehb
32 
33 #define __tlbw_use_hazard						\
34 	___ehb
35 
36 #define __tlb_probe_hazard						\
37 	___ehb
38 
39 #define __irq_enable_hazard						\
40 	___ehb
41 
42 #define __irq_disable_hazard						\
43 	___ehb
44 
45 #define __back_to_back_c0_hazard					\
46 	___ehb
47 
48 /*
49  * gcc has a tradition of misscompiling the previous construct using the
50  * address of a label as argument to inline assembler.	Gas otoh has the
51  * annoying difference between la and dla which are only usable for 32-bit
52  * rsp. 64-bit code, so can't be used without conditional compilation.
53  * The alterantive is switching the assembler to 64-bit code which happens
54  * to work right even for 32-bit code ...
55  */
56 #define instruction_hazard()						\
57 do {									\
58 	unsigned long tmp;						\
59 									\
60 	__asm__ __volatile__(						\
61 	"	.set	mips64r2				\n"	\
62 	"	dla	%0, 1f					\n"	\
63 	"	jr.hb	%0					\n"	\
64 	"	.set	mips0					\n"	\
65 	"1:							\n"	\
66 	: "=r" (tmp));							\
67 } while (0)
68 
69 #elif (defined(CONFIG_CPU_MIPSR1) && !defined(CONFIG_MIPS_ALCHEMY)) || \
70 	defined(CONFIG_CPU_BMIPS)
71 
72 /*
73  * These are slightly complicated by the fact that we guarantee R1 kernels to
74  * run fine on R2 processors.
75  */
76 
77 #define __mtc0_tlbw_hazard						\
78 	___ssnop;							\
79 	___ssnop;							\
80 	___ehb
81 
82 #define __tlbw_use_hazard						\
83 	___ssnop;							\
84 	___ssnop;							\
85 	___ssnop;							\
86 	___ehb
87 
88 #define __tlb_probe_hazard						\
89 	___ssnop;							\
90 	___ssnop;							\
91 	___ssnop;							\
92 	___ehb
93 
94 #define __irq_enable_hazard						\
95 	___ssnop;							\
96 	___ssnop;							\
97 	___ssnop;							\
98 	___ehb
99 
100 #define __irq_disable_hazard						\
101 	___ssnop;							\
102 	___ssnop;							\
103 	___ssnop;							\
104 	___ehb
105 
106 #define __back_to_back_c0_hazard					\
107 	___ssnop;							\
108 	___ssnop;							\
109 	___ssnop;							\
110 	___ehb
111 
112 /*
113  * gcc has a tradition of misscompiling the previous construct using the
114  * address of a label as argument to inline assembler.	Gas otoh has the
115  * annoying difference between la and dla which are only usable for 32-bit
116  * rsp. 64-bit code, so can't be used without conditional compilation.
117  * The alterantive is switching the assembler to 64-bit code which happens
118  * to work right even for 32-bit code ...
119  */
120 #define __instruction_hazard()						\
121 do {									\
122 	unsigned long tmp;						\
123 									\
124 	__asm__ __volatile__(						\
125 	"	.set	mips64r2				\n"	\
126 	"	dla	%0, 1f					\n"	\
127 	"	jr.hb	%0					\n"	\
128 	"	.set	mips0					\n"	\
129 	"1:							\n"	\
130 	: "=r" (tmp));							\
131 } while (0)
132 
133 #define instruction_hazard()						\
134 do {									\
135 	if (cpu_has_mips_r2)						\
136 		__instruction_hazard();					\
137 } while (0)
138 
139 #elif defined(CONFIG_MIPS_ALCHEMY) || defined(CONFIG_CPU_CAVIUM_OCTEON) || \
140 	defined(CONFIG_CPU_LOONGSON2) || defined(CONFIG_CPU_R10000) || \
141 	defined(CONFIG_CPU_R5500) || defined(CONFIG_CPU_XLR)
142 
143 /*
144  * R10000 rocks - all hazards handled in hardware, so this becomes a nobrainer.
145  */
146 
147 #define __mtc0_tlbw_hazard
148 
149 #define __tlbw_use_hazard
150 
151 #define __tlb_probe_hazard
152 
153 #define __irq_enable_hazard
154 
155 #define __irq_disable_hazard
156 
157 #define __back_to_back_c0_hazard
158 
159 #define instruction_hazard() do { } while (0)
160 
161 #elif defined(CONFIG_CPU_SB1)
162 
163 /*
164  * Mostly like R4000 for historic reasons
165  */
166 #define __mtc0_tlbw_hazard
167 
168 #define __tlbw_use_hazard
169 
170 #define __tlb_probe_hazard
171 
172 #define __irq_enable_hazard
173 
174 #define __irq_disable_hazard						\
175 	___ssnop;							\
176 	___ssnop;							\
177 	___ssnop
178 
179 #define __back_to_back_c0_hazard
180 
181 #define instruction_hazard() do { } while (0)
182 
183 #else
184 
185 /*
186  * Finally the catchall case for all other processors including R4000, R4400,
187  * R4600, R4700, R5000, RM7000, NEC VR41xx etc.
188  *
189  * The taken branch will result in a two cycle penalty for the two killed
190  * instructions on R4000 / R4400.  Other processors only have a single cycle
191  * hazard so this is nice trick to have an optimal code for a range of
192  * processors.
193  */
194 #define __mtc0_tlbw_hazard						\
195 	nop;								\
196 	nop
197 
198 #define __tlbw_use_hazard						\
199 	nop;								\
200 	nop;								\
201 	nop
202 
203 #define __tlb_probe_hazard						\
204 	nop;								\
205 	nop;								\
206 	nop
207 
208 #define __irq_enable_hazard						\
209 	___ssnop;							\
210 	___ssnop;							\
211 	___ssnop
212 
213 #define __irq_disable_hazard						\
214 	nop;								\
215 	nop;								\
216 	nop
217 
218 #define __back_to_back_c0_hazard					\
219 	___ssnop;							\
220 	___ssnop;							\
221 	___ssnop
222 
223 #define instruction_hazard() do { } while (0)
224 
225 #endif
226 
227 
228 /* FPU hazards */
229 
230 #if defined(CONFIG_CPU_SB1)
231 
232 #define __enable_fpu_hazard						\
233 	.set	push;							\
234 	.set	mips64;							\
235 	.set	noreorder;						\
236 	___ssnop;							\
237 	bnezl	$0, .+4;						\
238 	___ssnop;							\
239 	.set	pop
240 
241 #define __disable_fpu_hazard
242 
243 #elif defined(CONFIG_CPU_MIPSR2)
244 
245 #define __enable_fpu_hazard						\
246 	___ehb
247 
248 #define __disable_fpu_hazard						\
249 	___ehb
250 
251 #else
252 
253 #define __enable_fpu_hazard						\
254 	nop;								\
255 	nop;								\
256 	nop;								\
257 	nop
258 
259 #define __disable_fpu_hazard						\
260 	___ehb
261 
262 #endif
263 
264 #ifdef __ASSEMBLY__
265 
266 #define _ssnop ___ssnop
267 #define	_ehb ___ehb
268 #define mtc0_tlbw_hazard __mtc0_tlbw_hazard
269 #define tlbw_use_hazard __tlbw_use_hazard
270 #define tlb_probe_hazard __tlb_probe_hazard
271 #define irq_enable_hazard __irq_enable_hazard
272 #define irq_disable_hazard __irq_disable_hazard
273 #define back_to_back_c0_hazard __back_to_back_c0_hazard
274 #define enable_fpu_hazard __enable_fpu_hazard
275 #define disable_fpu_hazard __disable_fpu_hazard
276 
277 #else
278 
279 #define _ssnop()							\
280 do {									\
281 	__asm__ __volatile__(						\
282 	__stringify(___ssnop)						\
283 	);								\
284 } while (0)
285 
286 #define	_ehb()								\
287 do {									\
288 	__asm__ __volatile__(						\
289 	__stringify(___ehb)						\
290 	);								\
291 } while (0)
292 
293 
294 #define mtc0_tlbw_hazard()						\
295 do {									\
296 	__asm__ __volatile__(						\
297 	__stringify(__mtc0_tlbw_hazard)					\
298 	);								\
299 } while (0)
300 
301 
302 #define tlbw_use_hazard()						\
303 do {									\
304 	__asm__ __volatile__(						\
305 	__stringify(__tlbw_use_hazard)					\
306 	);								\
307 } while (0)
308 
309 
310 #define tlb_probe_hazard()						\
311 do {									\
312 	__asm__ __volatile__(						\
313 	__stringify(__tlb_probe_hazard)					\
314 	);								\
315 } while (0)
316 
317 
318 #define irq_enable_hazard()						\
319 do {									\
320 	__asm__ __volatile__(						\
321 	__stringify(__irq_enable_hazard)				\
322 	);								\
323 } while (0)
324 
325 
326 #define irq_disable_hazard()						\
327 do {									\
328 	__asm__ __volatile__(						\
329 	__stringify(__irq_disable_hazard)				\
330 	);								\
331 } while (0)
332 
333 
334 #define back_to_back_c0_hazard() 					\
335 do {									\
336 	__asm__ __volatile__(						\
337 	__stringify(__back_to_back_c0_hazard)				\
338 	);								\
339 } while (0)
340 
341 
342 #define enable_fpu_hazard()						\
343 do {									\
344 	__asm__ __volatile__(						\
345 	__stringify(__enable_fpu_hazard)				\
346 	);								\
347 } while (0)
348 
349 
350 #define disable_fpu_hazard()						\
351 do {									\
352 	__asm__ __volatile__(						\
353 	__stringify(__disable_fpu_hazard)				\
354 	);								\
355 } while (0)
356 
357 /*
358  * MIPS R2 instruction hazard barrier.   Needs to be called as a subroutine.
359  */
360 extern void mips_ihb(void);
361 
362 #endif /* __ASSEMBLY__  */
363 
364 #endif /* _ASM_HAZARDS_H */
365