xref: /openbmc/linux/arch/mips/include/asm/r4kcache.h (revision ba61bb17)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Inline assembly cache operations.
7  *
8  * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
9  * Copyright (C) 1997 - 2002 Ralf Baechle (ralf@gnu.org)
10  * Copyright (C) 2004 Ralf Baechle (ralf@linux-mips.org)
11  */
12 #ifndef _ASM_R4KCACHE_H
13 #define _ASM_R4KCACHE_H
14 
15 #include <linux/stringify.h>
16 
17 #include <asm/asm.h>
18 #include <asm/cacheops.h>
19 #include <asm/compiler.h>
20 #include <asm/cpu-features.h>
21 #include <asm/cpu-type.h>
22 #include <asm/mipsmtregs.h>
23 #include <linux/uaccess.h> /* for uaccess_kernel() */
24 
25 extern void (*r4k_blast_dcache)(void);
26 extern void (*r4k_blast_icache)(void);
27 
28 /*
29  * This macro return a properly sign-extended address suitable as base address
30  * for indexed cache operations.  Two issues here:
31  *
32  *  - The MIPS32 and MIPS64 specs permit an implementation to directly derive
33  *    the index bits from the virtual address.	This breaks with tradition
34  *    set by the R4000.	 To keep unpleasant surprises from happening we pick
35  *    an address in KSEG0 / CKSEG0.
36  *  - We need a properly sign extended address for 64-bit code.	 To get away
37  *    without ifdefs we let the compiler do it by a type cast.
38  */
39 #define INDEX_BASE	CKSEG0
40 
41 #define cache_op(op,addr)						\
42 	__asm__ __volatile__(						\
43 	"	.set	push					\n"	\
44 	"	.set	noreorder				\n"	\
45 	"	.set "MIPS_ISA_ARCH_LEVEL"			\n"	\
46 	"	cache	%0, %1					\n"	\
47 	"	.set	pop					\n"	\
48 	:								\
49 	: "i" (op), "R" (*(unsigned char *)(addr)))
50 
51 #ifdef CONFIG_MIPS_MT
52 
53 #define __iflush_prologue						\
54 	unsigned long redundance;					\
55 	extern int mt_n_iflushes;					\
56 	for (redundance = 0; redundance < mt_n_iflushes; redundance++) {
57 
58 #define __iflush_epilogue						\
59 	}
60 
61 #define __dflush_prologue						\
62 	unsigned long redundance;					\
63 	extern int mt_n_dflushes;					\
64 	for (redundance = 0; redundance < mt_n_dflushes; redundance++) {
65 
66 #define __dflush_epilogue \
67 	}
68 
69 #define __inv_dflush_prologue __dflush_prologue
70 #define __inv_dflush_epilogue __dflush_epilogue
71 #define __sflush_prologue {
72 #define __sflush_epilogue }
73 #define __inv_sflush_prologue __sflush_prologue
74 #define __inv_sflush_epilogue __sflush_epilogue
75 
76 #else /* CONFIG_MIPS_MT */
77 
78 #define __iflush_prologue {
79 #define __iflush_epilogue }
80 #define __dflush_prologue {
81 #define __dflush_epilogue }
82 #define __inv_dflush_prologue {
83 #define __inv_dflush_epilogue }
84 #define __sflush_prologue {
85 #define __sflush_epilogue }
86 #define __inv_sflush_prologue {
87 #define __inv_sflush_epilogue }
88 
89 #endif /* CONFIG_MIPS_MT */
90 
91 static inline void flush_icache_line_indexed(unsigned long addr)
92 {
93 	__iflush_prologue
94 	cache_op(Index_Invalidate_I, addr);
95 	__iflush_epilogue
96 }
97 
98 static inline void flush_dcache_line_indexed(unsigned long addr)
99 {
100 	__dflush_prologue
101 	cache_op(Index_Writeback_Inv_D, addr);
102 	__dflush_epilogue
103 }
104 
105 static inline void flush_scache_line_indexed(unsigned long addr)
106 {
107 	cache_op(Index_Writeback_Inv_SD, addr);
108 }
109 
110 static inline void flush_icache_line(unsigned long addr)
111 {
112 	__iflush_prologue
113 	switch (boot_cpu_type()) {
114 	case CPU_LOONGSON2:
115 		cache_op(Hit_Invalidate_I_Loongson2, addr);
116 		break;
117 
118 	default:
119 		cache_op(Hit_Invalidate_I, addr);
120 		break;
121 	}
122 	__iflush_epilogue
123 }
124 
125 static inline void flush_dcache_line(unsigned long addr)
126 {
127 	__dflush_prologue
128 	cache_op(Hit_Writeback_Inv_D, addr);
129 	__dflush_epilogue
130 }
131 
132 static inline void invalidate_dcache_line(unsigned long addr)
133 {
134 	__dflush_prologue
135 	cache_op(Hit_Invalidate_D, addr);
136 	__dflush_epilogue
137 }
138 
139 static inline void invalidate_scache_line(unsigned long addr)
140 {
141 	cache_op(Hit_Invalidate_SD, addr);
142 }
143 
144 static inline void flush_scache_line(unsigned long addr)
145 {
146 	cache_op(Hit_Writeback_Inv_SD, addr);
147 }
148 
149 #define protected_cache_op(op,addr)				\
150 ({								\
151 	int __err = 0;						\
152 	__asm__ __volatile__(					\
153 	"	.set	push			\n"		\
154 	"	.set	noreorder		\n"		\
155 	"	.set "MIPS_ISA_ARCH_LEVEL"	\n"		\
156 	"1:	cache	%1, (%2)		\n"		\
157 	"2:	.insn				\n"		\
158 	"	.set	pop			\n"		\
159 	"	.section .fixup,\"ax\"		\n"		\
160 	"3:	li	%0, %3			\n"		\
161 	"	j	2b			\n"		\
162 	"	.previous			\n"		\
163 	"	.section __ex_table,\"a\"	\n"		\
164 	"	"STR(PTR)" 1b, 3b		\n"		\
165 	"	.previous"					\
166 	: "+r" (__err)						\
167 	: "i" (op), "r" (addr), "i" (-EFAULT));			\
168 	__err;							\
169 })
170 
171 
172 #define protected_cachee_op(op,addr)				\
173 ({								\
174 	int __err = 0;						\
175 	__asm__ __volatile__(					\
176 	"	.set	push			\n"		\
177 	"	.set	noreorder		\n"		\
178 	"	.set	mips0			\n"		\
179 	"	.set	eva			\n"		\
180 	"1:	cachee	%1, (%2)		\n"		\
181 	"2:	.insn				\n"		\
182 	"	.set	pop			\n"		\
183 	"	.section .fixup,\"ax\"		\n"		\
184 	"3:	li	%0, %3			\n"		\
185 	"	j	2b			\n"		\
186 	"	.previous			\n"		\
187 	"	.section __ex_table,\"a\"	\n"		\
188 	"	"STR(PTR)" 1b, 3b		\n"		\
189 	"	.previous"					\
190 	: "+r" (__err)						\
191 	: "i" (op), "r" (addr), "i" (-EFAULT));			\
192 	__err;							\
193 })
194 
195 /*
196  * The next two are for badland addresses like signal trampolines.
197  */
198 static inline int protected_flush_icache_line(unsigned long addr)
199 {
200 	switch (boot_cpu_type()) {
201 	case CPU_LOONGSON2:
202 		return protected_cache_op(Hit_Invalidate_I_Loongson2, addr);
203 
204 	default:
205 #ifdef CONFIG_EVA
206 		return protected_cachee_op(Hit_Invalidate_I, addr);
207 #else
208 		return protected_cache_op(Hit_Invalidate_I, addr);
209 #endif
210 	}
211 }
212 
213 /*
214  * R10000 / R12000 hazard - these processors don't support the Hit_Writeback_D
215  * cacheop so we use Hit_Writeback_Inv_D which is supported by all R4000-style
216  * caches.  We're talking about one cacheline unnecessarily getting invalidated
217  * here so the penalty isn't overly hard.
218  */
219 static inline int protected_writeback_dcache_line(unsigned long addr)
220 {
221 #ifdef CONFIG_EVA
222 	return protected_cachee_op(Hit_Writeback_Inv_D, addr);
223 #else
224 	return protected_cache_op(Hit_Writeback_Inv_D, addr);
225 #endif
226 }
227 
228 static inline int protected_writeback_scache_line(unsigned long addr)
229 {
230 #ifdef CONFIG_EVA
231 	return protected_cachee_op(Hit_Writeback_Inv_SD, addr);
232 #else
233 	return protected_cache_op(Hit_Writeback_Inv_SD, addr);
234 #endif
235 }
236 
237 /*
238  * This one is RM7000-specific
239  */
240 static inline void invalidate_tcache_page(unsigned long addr)
241 {
242 	cache_op(Page_Invalidate_T, addr);
243 }
244 
245 #ifndef CONFIG_CPU_MIPSR6
246 #define cache16_unroll32(base,op)					\
247 	__asm__ __volatile__(						\
248 	"	.set push					\n"	\
249 	"	.set noreorder					\n"	\
250 	"	.set mips3					\n"	\
251 	"	cache %1, 0x000(%0); cache %1, 0x010(%0)	\n"	\
252 	"	cache %1, 0x020(%0); cache %1, 0x030(%0)	\n"	\
253 	"	cache %1, 0x040(%0); cache %1, 0x050(%0)	\n"	\
254 	"	cache %1, 0x060(%0); cache %1, 0x070(%0)	\n"	\
255 	"	cache %1, 0x080(%0); cache %1, 0x090(%0)	\n"	\
256 	"	cache %1, 0x0a0(%0); cache %1, 0x0b0(%0)	\n"	\
257 	"	cache %1, 0x0c0(%0); cache %1, 0x0d0(%0)	\n"	\
258 	"	cache %1, 0x0e0(%0); cache %1, 0x0f0(%0)	\n"	\
259 	"	cache %1, 0x100(%0); cache %1, 0x110(%0)	\n"	\
260 	"	cache %1, 0x120(%0); cache %1, 0x130(%0)	\n"	\
261 	"	cache %1, 0x140(%0); cache %1, 0x150(%0)	\n"	\
262 	"	cache %1, 0x160(%0); cache %1, 0x170(%0)	\n"	\
263 	"	cache %1, 0x180(%0); cache %1, 0x190(%0)	\n"	\
264 	"	cache %1, 0x1a0(%0); cache %1, 0x1b0(%0)	\n"	\
265 	"	cache %1, 0x1c0(%0); cache %1, 0x1d0(%0)	\n"	\
266 	"	cache %1, 0x1e0(%0); cache %1, 0x1f0(%0)	\n"	\
267 	"	.set pop					\n"	\
268 		:							\
269 		: "r" (base),						\
270 		  "i" (op));
271 
272 #define cache32_unroll32(base,op)					\
273 	__asm__ __volatile__(						\
274 	"	.set push					\n"	\
275 	"	.set noreorder					\n"	\
276 	"	.set mips3					\n"	\
277 	"	cache %1, 0x000(%0); cache %1, 0x020(%0)	\n"	\
278 	"	cache %1, 0x040(%0); cache %1, 0x060(%0)	\n"	\
279 	"	cache %1, 0x080(%0); cache %1, 0x0a0(%0)	\n"	\
280 	"	cache %1, 0x0c0(%0); cache %1, 0x0e0(%0)	\n"	\
281 	"	cache %1, 0x100(%0); cache %1, 0x120(%0)	\n"	\
282 	"	cache %1, 0x140(%0); cache %1, 0x160(%0)	\n"	\
283 	"	cache %1, 0x180(%0); cache %1, 0x1a0(%0)	\n"	\
284 	"	cache %1, 0x1c0(%0); cache %1, 0x1e0(%0)	\n"	\
285 	"	cache %1, 0x200(%0); cache %1, 0x220(%0)	\n"	\
286 	"	cache %1, 0x240(%0); cache %1, 0x260(%0)	\n"	\
287 	"	cache %1, 0x280(%0); cache %1, 0x2a0(%0)	\n"	\
288 	"	cache %1, 0x2c0(%0); cache %1, 0x2e0(%0)	\n"	\
289 	"	cache %1, 0x300(%0); cache %1, 0x320(%0)	\n"	\
290 	"	cache %1, 0x340(%0); cache %1, 0x360(%0)	\n"	\
291 	"	cache %1, 0x380(%0); cache %1, 0x3a0(%0)	\n"	\
292 	"	cache %1, 0x3c0(%0); cache %1, 0x3e0(%0)	\n"	\
293 	"	.set pop					\n"	\
294 		:							\
295 		: "r" (base),						\
296 		  "i" (op));
297 
298 #define cache64_unroll32(base,op)					\
299 	__asm__ __volatile__(						\
300 	"	.set push					\n"	\
301 	"	.set noreorder					\n"	\
302 	"	.set mips3					\n"	\
303 	"	cache %1, 0x000(%0); cache %1, 0x040(%0)	\n"	\
304 	"	cache %1, 0x080(%0); cache %1, 0x0c0(%0)	\n"	\
305 	"	cache %1, 0x100(%0); cache %1, 0x140(%0)	\n"	\
306 	"	cache %1, 0x180(%0); cache %1, 0x1c0(%0)	\n"	\
307 	"	cache %1, 0x200(%0); cache %1, 0x240(%0)	\n"	\
308 	"	cache %1, 0x280(%0); cache %1, 0x2c0(%0)	\n"	\
309 	"	cache %1, 0x300(%0); cache %1, 0x340(%0)	\n"	\
310 	"	cache %1, 0x380(%0); cache %1, 0x3c0(%0)	\n"	\
311 	"	cache %1, 0x400(%0); cache %1, 0x440(%0)	\n"	\
312 	"	cache %1, 0x480(%0); cache %1, 0x4c0(%0)	\n"	\
313 	"	cache %1, 0x500(%0); cache %1, 0x540(%0)	\n"	\
314 	"	cache %1, 0x580(%0); cache %1, 0x5c0(%0)	\n"	\
315 	"	cache %1, 0x600(%0); cache %1, 0x640(%0)	\n"	\
316 	"	cache %1, 0x680(%0); cache %1, 0x6c0(%0)	\n"	\
317 	"	cache %1, 0x700(%0); cache %1, 0x740(%0)	\n"	\
318 	"	cache %1, 0x780(%0); cache %1, 0x7c0(%0)	\n"	\
319 	"	.set pop					\n"	\
320 		:							\
321 		: "r" (base),						\
322 		  "i" (op));
323 
324 #define cache128_unroll32(base,op)					\
325 	__asm__ __volatile__(						\
326 	"	.set push					\n"	\
327 	"	.set noreorder					\n"	\
328 	"	.set mips3					\n"	\
329 	"	cache %1, 0x000(%0); cache %1, 0x080(%0)	\n"	\
330 	"	cache %1, 0x100(%0); cache %1, 0x180(%0)	\n"	\
331 	"	cache %1, 0x200(%0); cache %1, 0x280(%0)	\n"	\
332 	"	cache %1, 0x300(%0); cache %1, 0x380(%0)	\n"	\
333 	"	cache %1, 0x400(%0); cache %1, 0x480(%0)	\n"	\
334 	"	cache %1, 0x500(%0); cache %1, 0x580(%0)	\n"	\
335 	"	cache %1, 0x600(%0); cache %1, 0x680(%0)	\n"	\
336 	"	cache %1, 0x700(%0); cache %1, 0x780(%0)	\n"	\
337 	"	cache %1, 0x800(%0); cache %1, 0x880(%0)	\n"	\
338 	"	cache %1, 0x900(%0); cache %1, 0x980(%0)	\n"	\
339 	"	cache %1, 0xa00(%0); cache %1, 0xa80(%0)	\n"	\
340 	"	cache %1, 0xb00(%0); cache %1, 0xb80(%0)	\n"	\
341 	"	cache %1, 0xc00(%0); cache %1, 0xc80(%0)	\n"	\
342 	"	cache %1, 0xd00(%0); cache %1, 0xd80(%0)	\n"	\
343 	"	cache %1, 0xe00(%0); cache %1, 0xe80(%0)	\n"	\
344 	"	cache %1, 0xf00(%0); cache %1, 0xf80(%0)	\n"	\
345 	"	.set pop					\n"	\
346 		:							\
347 		: "r" (base),						\
348 		  "i" (op));
349 
350 #else
351 /*
352  * MIPS R6 changed the cache opcode and moved to a 8-bit offset field.
353  * This means we now need to increment the base register before we flush
354  * more cache lines
355  */
356 #define cache16_unroll32(base,op)				\
357 	__asm__ __volatile__(					\
358 	"	.set push\n"					\
359 	"	.set noreorder\n"				\
360 	"	.set mips64r6\n"				\
361 	"	.set noat\n"					\
362 	"	cache %1, 0x000(%0); cache %1, 0x010(%0)\n"	\
363 	"	cache %1, 0x020(%0); cache %1, 0x030(%0)\n"	\
364 	"	cache %1, 0x040(%0); cache %1, 0x050(%0)\n"	\
365 	"	cache %1, 0x060(%0); cache %1, 0x070(%0)\n"	\
366 	"	cache %1, 0x080(%0); cache %1, 0x090(%0)\n"	\
367 	"	cache %1, 0x0a0(%0); cache %1, 0x0b0(%0)\n"	\
368 	"	cache %1, 0x0c0(%0); cache %1, 0x0d0(%0)\n"	\
369 	"	cache %1, 0x0e0(%0); cache %1, 0x0f0(%0)\n"	\
370 	"	"__stringify(LONG_ADDIU)" $1, %0, 0x100	\n"	\
371 	"	cache %1, 0x000($1); cache %1, 0x010($1)\n"	\
372 	"	cache %1, 0x020($1); cache %1, 0x030($1)\n"	\
373 	"	cache %1, 0x040($1); cache %1, 0x050($1)\n"	\
374 	"	cache %1, 0x060($1); cache %1, 0x070($1)\n"	\
375 	"	cache %1, 0x080($1); cache %1, 0x090($1)\n"	\
376 	"	cache %1, 0x0a0($1); cache %1, 0x0b0($1)\n"	\
377 	"	cache %1, 0x0c0($1); cache %1, 0x0d0($1)\n"	\
378 	"	cache %1, 0x0e0($1); cache %1, 0x0f0($1)\n"	\
379 	"	.set pop\n"					\
380 		:						\
381 		: "r" (base),					\
382 		  "i" (op));
383 
384 #define cache32_unroll32(base,op)				\
385 	__asm__ __volatile__(					\
386 	"	.set push\n"					\
387 	"	.set noreorder\n"				\
388 	"	.set mips64r6\n"				\
389 	"	.set noat\n"					\
390 	"	cache %1, 0x000(%0); cache %1, 0x020(%0)\n"	\
391 	"	cache %1, 0x040(%0); cache %1, 0x060(%0)\n"	\
392 	"	cache %1, 0x080(%0); cache %1, 0x0a0(%0)\n"	\
393 	"	cache %1, 0x0c0(%0); cache %1, 0x0e0(%0)\n"	\
394 	"	"__stringify(LONG_ADDIU)" $1, %0, 0x100 \n"	\
395 	"	cache %1, 0x000($1); cache %1, 0x020($1)\n"	\
396 	"	cache %1, 0x040($1); cache %1, 0x060($1)\n"	\
397 	"	cache %1, 0x080($1); cache %1, 0x0a0($1)\n"	\
398 	"	cache %1, 0x0c0($1); cache %1, 0x0e0($1)\n"	\
399 	"	"__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"	\
400 	"	cache %1, 0x000($1); cache %1, 0x020($1)\n"	\
401 	"	cache %1, 0x040($1); cache %1, 0x060($1)\n"	\
402 	"	cache %1, 0x080($1); cache %1, 0x0a0($1)\n"	\
403 	"	cache %1, 0x0c0($1); cache %1, 0x0e0($1)\n"	\
404 	"	"__stringify(LONG_ADDIU)" $1, $1, 0x100\n"	\
405 	"	cache %1, 0x000($1); cache %1, 0x020($1)\n"	\
406 	"	cache %1, 0x040($1); cache %1, 0x060($1)\n"	\
407 	"	cache %1, 0x080($1); cache %1, 0x0a0($1)\n"	\
408 	"	cache %1, 0x0c0($1); cache %1, 0x0e0($1)\n"	\
409 	"	.set pop\n"					\
410 		:						\
411 		: "r" (base),					\
412 		  "i" (op));
413 
414 #define cache64_unroll32(base,op)				\
415 	__asm__ __volatile__(					\
416 	"	.set push\n"					\
417 	"	.set noreorder\n"				\
418 	"	.set mips64r6\n"				\
419 	"	.set noat\n"					\
420 	"	cache %1, 0x000(%0); cache %1, 0x040(%0)\n"	\
421 	"	cache %1, 0x080(%0); cache %1, 0x0c0(%0)\n"	\
422 	"	"__stringify(LONG_ADDIU)" $1, %0, 0x100 \n"	\
423 	"	cache %1, 0x000($1); cache %1, 0x040($1)\n"	\
424 	"	cache %1, 0x080($1); cache %1, 0x0c0($1)\n"	\
425 	"	"__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"	\
426 	"	cache %1, 0x000($1); cache %1, 0x040($1)\n"	\
427 	"	cache %1, 0x080($1); cache %1, 0x0c0($1)\n"	\
428 	"	"__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"	\
429 	"	cache %1, 0x000($1); cache %1, 0x040($1)\n"	\
430 	"	cache %1, 0x080($1); cache %1, 0x0c0($1)\n"	\
431 	"	"__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"	\
432 	"	cache %1, 0x000($1); cache %1, 0x040($1)\n"	\
433 	"	cache %1, 0x080($1); cache %1, 0x0c0($1)\n"	\
434 	"	"__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"	\
435 	"	cache %1, 0x000($1); cache %1, 0x040($1)\n"	\
436 	"	cache %1, 0x080($1); cache %1, 0x0c0($1)\n"	\
437 	"	"__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"	\
438 	"	cache %1, 0x000($1); cache %1, 0x040($1)\n"	\
439 	"	cache %1, 0x080($1); cache %1, 0x0c0($1)\n"	\
440 	"	"__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"	\
441 	"	cache %1, 0x000($1); cache %1, 0x040($1)\n"	\
442 	"	cache %1, 0x080($1); cache %1, 0x0c0($1)\n"	\
443 	"	.set pop\n"					\
444 		:						\
445 		: "r" (base),					\
446 		  "i" (op));
447 
448 #define cache128_unroll32(base,op)				\
449 	__asm__ __volatile__(					\
450 	"	.set push\n"					\
451 	"	.set noreorder\n"				\
452 	"	.set mips64r6\n"				\
453 	"	.set noat\n"					\
454 	"	cache %1, 0x000(%0); cache %1, 0x080(%0)\n"	\
455 	"	"__stringify(LONG_ADDIU)" $1, %0, 0x100 \n"	\
456 	"	cache %1, 0x000($1); cache %1, 0x080($1)\n"	\
457 	"	"__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"	\
458 	"	cache %1, 0x000($1); cache %1, 0x080($1)\n"	\
459 	"	"__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"	\
460 	"	cache %1, 0x000($1); cache %1, 0x080($1)\n"	\
461 	"	"__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"	\
462 	"	cache %1, 0x000($1); cache %1, 0x080($1)\n"	\
463 	"	"__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"	\
464 	"	cache %1, 0x000($1); cache %1, 0x080($1)\n"	\
465 	"	"__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"	\
466 	"	cache %1, 0x000($1); cache %1, 0x080($1)\n"	\
467 	"	"__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"	\
468 	"	cache %1, 0x000($1); cache %1, 0x080($1)\n"	\
469 	"	"__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"	\
470 	"	cache %1, 0x000($1); cache %1, 0x080($1)\n"	\
471 	"	"__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"	\
472 	"	cache %1, 0x000($1); cache %1, 0x080($1)\n"	\
473 	"	"__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"	\
474 	"	cache %1, 0x000($1); cache %1, 0x080($1)\n"	\
475 	"	"__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"	\
476 	"	cache %1, 0x000($1); cache %1, 0x080($1)\n"	\
477 	"	"__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"	\
478 	"	cache %1, 0x000($1); cache %1, 0x080($1)\n"	\
479 	"	"__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"	\
480 	"	cache %1, 0x000($1); cache %1, 0x080($1)\n"	\
481 	"	"__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"	\
482 	"	cache %1, 0x000($1); cache %1, 0x080($1)\n"	\
483 	"	"__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"	\
484 	"	cache %1, 0x000($1); cache %1, 0x080($1)\n"	\
485 	"	"__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"	\
486 	"	cache %1, 0x000($1); cache %1, 0x080($1)\n"	\
487 	"	.set pop\n"					\
488 		:						\
489 		: "r" (base),					\
490 		  "i" (op));
491 #endif /* CONFIG_CPU_MIPSR6 */
492 
493 /*
494  * Perform the cache operation specified by op using a user mode virtual
495  * address while in kernel mode.
496  */
497 #define cache16_unroll32_user(base,op)					\
498 	__asm__ __volatile__(						\
499 	"	.set push					\n"	\
500 	"	.set noreorder					\n"	\
501 	"	.set mips0					\n"	\
502 	"	.set eva					\n"	\
503 	"	cachee %1, 0x000(%0); cachee %1, 0x010(%0)	\n"	\
504 	"	cachee %1, 0x020(%0); cachee %1, 0x030(%0)	\n"	\
505 	"	cachee %1, 0x040(%0); cachee %1, 0x050(%0)	\n"	\
506 	"	cachee %1, 0x060(%0); cachee %1, 0x070(%0)	\n"	\
507 	"	cachee %1, 0x080(%0); cachee %1, 0x090(%0)	\n"	\
508 	"	cachee %1, 0x0a0(%0); cachee %1, 0x0b0(%0)	\n"	\
509 	"	cachee %1, 0x0c0(%0); cachee %1, 0x0d0(%0)	\n"	\
510 	"	cachee %1, 0x0e0(%0); cachee %1, 0x0f0(%0)	\n"	\
511 	"	cachee %1, 0x100(%0); cachee %1, 0x110(%0)	\n"	\
512 	"	cachee %1, 0x120(%0); cachee %1, 0x130(%0)	\n"	\
513 	"	cachee %1, 0x140(%0); cachee %1, 0x150(%0)	\n"	\
514 	"	cachee %1, 0x160(%0); cachee %1, 0x170(%0)	\n"	\
515 	"	cachee %1, 0x180(%0); cachee %1, 0x190(%0)	\n"	\
516 	"	cachee %1, 0x1a0(%0); cachee %1, 0x1b0(%0)	\n"	\
517 	"	cachee %1, 0x1c0(%0); cachee %1, 0x1d0(%0)	\n"	\
518 	"	cachee %1, 0x1e0(%0); cachee %1, 0x1f0(%0)	\n"	\
519 	"	.set pop					\n"	\
520 		:							\
521 		: "r" (base),						\
522 		  "i" (op));
523 
524 #define cache32_unroll32_user(base, op)					\
525 	__asm__ __volatile__(						\
526 	"	.set push					\n"	\
527 	"	.set noreorder					\n"	\
528 	"	.set mips0					\n"	\
529 	"	.set eva					\n"	\
530 	"	cachee %1, 0x000(%0); cachee %1, 0x020(%0)	\n"	\
531 	"	cachee %1, 0x040(%0); cachee %1, 0x060(%0)	\n"	\
532 	"	cachee %1, 0x080(%0); cachee %1, 0x0a0(%0)	\n"	\
533 	"	cachee %1, 0x0c0(%0); cachee %1, 0x0e0(%0)	\n"	\
534 	"	cachee %1, 0x100(%0); cachee %1, 0x120(%0)	\n"	\
535 	"	cachee %1, 0x140(%0); cachee %1, 0x160(%0)	\n"	\
536 	"	cachee %1, 0x180(%0); cachee %1, 0x1a0(%0)	\n"	\
537 	"	cachee %1, 0x1c0(%0); cachee %1, 0x1e0(%0)	\n"	\
538 	"	cachee %1, 0x200(%0); cachee %1, 0x220(%0)	\n"	\
539 	"	cachee %1, 0x240(%0); cachee %1, 0x260(%0)	\n"	\
540 	"	cachee %1, 0x280(%0); cachee %1, 0x2a0(%0)	\n"	\
541 	"	cachee %1, 0x2c0(%0); cachee %1, 0x2e0(%0)	\n"	\
542 	"	cachee %1, 0x300(%0); cachee %1, 0x320(%0)	\n"	\
543 	"	cachee %1, 0x340(%0); cachee %1, 0x360(%0)	\n"	\
544 	"	cachee %1, 0x380(%0); cachee %1, 0x3a0(%0)	\n"	\
545 	"	cachee %1, 0x3c0(%0); cachee %1, 0x3e0(%0)	\n"	\
546 	"	.set pop					\n"	\
547 		:							\
548 		: "r" (base),						\
549 		  "i" (op));
550 
551 #define cache64_unroll32_user(base, op)					\
552 	__asm__ __volatile__(						\
553 	"	.set push					\n"	\
554 	"	.set noreorder					\n"	\
555 	"	.set mips0					\n"	\
556 	"	.set eva					\n"	\
557 	"	cachee %1, 0x000(%0); cachee %1, 0x040(%0)	\n"	\
558 	"	cachee %1, 0x080(%0); cachee %1, 0x0c0(%0)	\n"	\
559 	"	cachee %1, 0x100(%0); cachee %1, 0x140(%0)	\n"	\
560 	"	cachee %1, 0x180(%0); cachee %1, 0x1c0(%0)	\n"	\
561 	"	cachee %1, 0x200(%0); cachee %1, 0x240(%0)	\n"	\
562 	"	cachee %1, 0x280(%0); cachee %1, 0x2c0(%0)	\n"	\
563 	"	cachee %1, 0x300(%0); cachee %1, 0x340(%0)	\n"	\
564 	"	cachee %1, 0x380(%0); cachee %1, 0x3c0(%0)	\n"	\
565 	"	cachee %1, 0x400(%0); cachee %1, 0x440(%0)	\n"	\
566 	"	cachee %1, 0x480(%0); cachee %1, 0x4c0(%0)	\n"	\
567 	"	cachee %1, 0x500(%0); cachee %1, 0x540(%0)	\n"	\
568 	"	cachee %1, 0x580(%0); cachee %1, 0x5c0(%0)	\n"	\
569 	"	cachee %1, 0x600(%0); cachee %1, 0x640(%0)	\n"	\
570 	"	cachee %1, 0x680(%0); cachee %1, 0x6c0(%0)	\n"	\
571 	"	cachee %1, 0x700(%0); cachee %1, 0x740(%0)	\n"	\
572 	"	cachee %1, 0x780(%0); cachee %1, 0x7c0(%0)	\n"	\
573 	"	.set pop					\n"	\
574 		:							\
575 		: "r" (base),						\
576 		  "i" (op));
577 
578 /* build blast_xxx, blast_xxx_page, blast_xxx_page_indexed */
579 #define __BUILD_BLAST_CACHE(pfx, desc, indexop, hitop, lsize, extra)	\
580 static inline void extra##blast_##pfx##cache##lsize(void)		\
581 {									\
582 	unsigned long start = INDEX_BASE;				\
583 	unsigned long end = start + current_cpu_data.desc.waysize;	\
584 	unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit;	\
585 	unsigned long ws_end = current_cpu_data.desc.ways <<		\
586 			       current_cpu_data.desc.waybit;		\
587 	unsigned long ws, addr;						\
588 									\
589 	__##pfx##flush_prologue						\
590 									\
591 	for (ws = 0; ws < ws_end; ws += ws_inc)				\
592 		for (addr = start; addr < end; addr += lsize * 32)	\
593 			cache##lsize##_unroll32(addr|ws, indexop);	\
594 									\
595 	__##pfx##flush_epilogue						\
596 }									\
597 									\
598 static inline void extra##blast_##pfx##cache##lsize##_page(unsigned long page) \
599 {									\
600 	unsigned long start = page;					\
601 	unsigned long end = page + PAGE_SIZE;				\
602 									\
603 	__##pfx##flush_prologue						\
604 									\
605 	do {								\
606 		cache##lsize##_unroll32(start, hitop);			\
607 		start += lsize * 32;					\
608 	} while (start < end);						\
609 									\
610 	__##pfx##flush_epilogue						\
611 }									\
612 									\
613 static inline void extra##blast_##pfx##cache##lsize##_page_indexed(unsigned long page) \
614 {									\
615 	unsigned long indexmask = current_cpu_data.desc.waysize - 1;	\
616 	unsigned long start = INDEX_BASE + (page & indexmask);		\
617 	unsigned long end = start + PAGE_SIZE;				\
618 	unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit;	\
619 	unsigned long ws_end = current_cpu_data.desc.ways <<		\
620 			       current_cpu_data.desc.waybit;		\
621 	unsigned long ws, addr;						\
622 									\
623 	__##pfx##flush_prologue						\
624 									\
625 	for (ws = 0; ws < ws_end; ws += ws_inc)				\
626 		for (addr = start; addr < end; addr += lsize * 32)	\
627 			cache##lsize##_unroll32(addr|ws, indexop);	\
628 									\
629 	__##pfx##flush_epilogue						\
630 }
631 
632 __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 16, )
633 __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16, )
634 __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16, )
635 __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 32, )
636 __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32, )
637 __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I_Loongson2, 32, loongson2_)
638 __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32, )
639 __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 64, )
640 __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64, )
641 __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64, )
642 __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 128, )
643 __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 128, )
644 __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128, )
645 
646 __BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 16, )
647 __BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 32, )
648 __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 16, )
649 __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 32, )
650 __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 64, )
651 __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 128, )
652 
653 #define __BUILD_BLAST_USER_CACHE(pfx, desc, indexop, hitop, lsize) \
654 static inline void blast_##pfx##cache##lsize##_user_page(unsigned long page) \
655 {									\
656 	unsigned long start = page;					\
657 	unsigned long end = page + PAGE_SIZE;				\
658 									\
659 	__##pfx##flush_prologue						\
660 									\
661 	do {								\
662 		cache##lsize##_unroll32_user(start, hitop);             \
663 		start += lsize * 32;					\
664 	} while (start < end);						\
665 									\
666 	__##pfx##flush_epilogue						\
667 }
668 
669 __BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D,
670 			 16)
671 __BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16)
672 __BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D,
673 			 32)
674 __BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32)
675 __BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D,
676 			 64)
677 __BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64)
678 
679 /* build blast_xxx_range, protected_blast_xxx_range */
680 #define __BUILD_BLAST_CACHE_RANGE(pfx, desc, hitop, prot, extra)	\
681 static inline void prot##extra##blast_##pfx##cache##_range(unsigned long start, \
682 						    unsigned long end)	\
683 {									\
684 	unsigned long lsize = cpu_##desc##_line_size();			\
685 	unsigned long addr = start & ~(lsize - 1);			\
686 	unsigned long aend = (end - 1) & ~(lsize - 1);			\
687 									\
688 	__##pfx##flush_prologue						\
689 									\
690 	while (1) {							\
691 		prot##cache_op(hitop, addr);				\
692 		if (addr == aend)					\
693 			break;						\
694 		addr += lsize;						\
695 	}								\
696 									\
697 	__##pfx##flush_epilogue						\
698 }
699 
700 #ifndef CONFIG_EVA
701 
702 __BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_, )
703 __BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, protected_, )
704 
705 #else
706 
707 #define __BUILD_PROT_BLAST_CACHE_RANGE(pfx, desc, hitop)		\
708 static inline void protected_blast_##pfx##cache##_range(unsigned long start,\
709 							unsigned long end) \
710 {									\
711 	unsigned long lsize = cpu_##desc##_line_size();			\
712 	unsigned long addr = start & ~(lsize - 1);			\
713 	unsigned long aend = (end - 1) & ~(lsize - 1);			\
714 									\
715 	__##pfx##flush_prologue						\
716 									\
717 	if (!uaccess_kernel()) {					\
718 		while (1) {						\
719 			protected_cachee_op(hitop, addr);		\
720 			if (addr == aend)				\
721 				break;					\
722 			addr += lsize;					\
723 		}							\
724 	} else {							\
725 		while (1) {						\
726 			protected_cache_op(hitop, addr);		\
727 			if (addr == aend)				\
728 				break;					\
729 			addr += lsize;					\
730 		}                                                       \
731 									\
732 	}								\
733 	__##pfx##flush_epilogue						\
734 }
735 
736 __BUILD_PROT_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D)
737 __BUILD_PROT_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I)
738 
739 #endif
740 __BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, protected_, )
741 __BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I_Loongson2, \
742 	protected_, loongson2_)
743 __BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, , )
744 __BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, , )
745 __BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, , )
746 /* blast_inv_dcache_range */
747 __BUILD_BLAST_CACHE_RANGE(inv_d, dcache, Hit_Invalidate_D, , )
748 __BUILD_BLAST_CACHE_RANGE(inv_s, scache, Hit_Invalidate_SD, , )
749 
750 #endif /* _ASM_R4KCACHE_H */
751