xref: /openbmc/linux/arch/mips/include/asm/r4kcache.h (revision 93d90ad7)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Inline assembly cache operations.
7  *
8  * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
9  * Copyright (C) 1997 - 2002 Ralf Baechle (ralf@gnu.org)
10  * Copyright (C) 2004 Ralf Baechle (ralf@linux-mips.org)
11  */
12 #ifndef _ASM_R4KCACHE_H
13 #define _ASM_R4KCACHE_H
14 
15 #include <asm/asm.h>
16 #include <asm/cacheops.h>
17 #include <asm/cpu-features.h>
18 #include <asm/cpu-type.h>
19 #include <asm/mipsmtregs.h>
20 #include <asm/uaccess.h> /* for segment_eq() */
21 
22 extern void (*r4k_blast_dcache)(void);
23 extern void (*r4k_blast_icache)(void);
24 
25 /*
26  * This macro return a properly sign-extended address suitable as base address
27  * for indexed cache operations.  Two issues here:
28  *
29  *  - The MIPS32 and MIPS64 specs permit an implementation to directly derive
30  *    the index bits from the virtual address.	This breaks with tradition
31  *    set by the R4000.	 To keep unpleasant surprises from happening we pick
32  *    an address in KSEG0 / CKSEG0.
33  *  - We need a properly sign extended address for 64-bit code.	 To get away
34  *    without ifdefs we let the compiler do it by a type cast.
35  */
36 #define INDEX_BASE	CKSEG0
37 
38 #define cache_op(op,addr)						\
39 	__asm__ __volatile__(						\
40 	"	.set	push					\n"	\
41 	"	.set	noreorder				\n"	\
42 	"	.set	arch=r4000				\n"	\
43 	"	cache	%0, %1					\n"	\
44 	"	.set	pop					\n"	\
45 	:								\
46 	: "i" (op), "R" (*(unsigned char *)(addr)))
47 
48 #ifdef CONFIG_MIPS_MT
49 
50 #define __iflush_prologue						\
51 	unsigned long redundance;					\
52 	extern int mt_n_iflushes;					\
53 	for (redundance = 0; redundance < mt_n_iflushes; redundance++) {
54 
55 #define __iflush_epilogue						\
56 	}
57 
58 #define __dflush_prologue						\
59 	unsigned long redundance;					\
60 	extern int mt_n_dflushes;					\
61 	for (redundance = 0; redundance < mt_n_dflushes; redundance++) {
62 
63 #define __dflush_epilogue \
64 	}
65 
66 #define __inv_dflush_prologue __dflush_prologue
67 #define __inv_dflush_epilogue __dflush_epilogue
68 #define __sflush_prologue {
69 #define __sflush_epilogue }
70 #define __inv_sflush_prologue __sflush_prologue
71 #define __inv_sflush_epilogue __sflush_epilogue
72 
73 #else /* CONFIG_MIPS_MT */
74 
75 #define __iflush_prologue {
76 #define __iflush_epilogue }
77 #define __dflush_prologue {
78 #define __dflush_epilogue }
79 #define __inv_dflush_prologue {
80 #define __inv_dflush_epilogue }
81 #define __sflush_prologue {
82 #define __sflush_epilogue }
83 #define __inv_sflush_prologue {
84 #define __inv_sflush_epilogue }
85 
86 #endif /* CONFIG_MIPS_MT */
87 
88 static inline void flush_icache_line_indexed(unsigned long addr)
89 {
90 	__iflush_prologue
91 	cache_op(Index_Invalidate_I, addr);
92 	__iflush_epilogue
93 }
94 
95 static inline void flush_dcache_line_indexed(unsigned long addr)
96 {
97 	__dflush_prologue
98 	cache_op(Index_Writeback_Inv_D, addr);
99 	__dflush_epilogue
100 }
101 
102 static inline void flush_scache_line_indexed(unsigned long addr)
103 {
104 	cache_op(Index_Writeback_Inv_SD, addr);
105 }
106 
107 static inline void flush_icache_line(unsigned long addr)
108 {
109 	__iflush_prologue
110 	switch (boot_cpu_type()) {
111 	case CPU_LOONGSON2:
112 		cache_op(Hit_Invalidate_I_Loongson2, addr);
113 		break;
114 
115 	default:
116 		cache_op(Hit_Invalidate_I, addr);
117 		break;
118 	}
119 	__iflush_epilogue
120 }
121 
122 static inline void flush_dcache_line(unsigned long addr)
123 {
124 	__dflush_prologue
125 	cache_op(Hit_Writeback_Inv_D, addr);
126 	__dflush_epilogue
127 }
128 
129 static inline void invalidate_dcache_line(unsigned long addr)
130 {
131 	__dflush_prologue
132 	cache_op(Hit_Invalidate_D, addr);
133 	__dflush_epilogue
134 }
135 
136 static inline void invalidate_scache_line(unsigned long addr)
137 {
138 	cache_op(Hit_Invalidate_SD, addr);
139 }
140 
141 static inline void flush_scache_line(unsigned long addr)
142 {
143 	cache_op(Hit_Writeback_Inv_SD, addr);
144 }
145 
146 #define protected_cache_op(op,addr)				\
147 	__asm__ __volatile__(					\
148 	"	.set	push			\n"		\
149 	"	.set	noreorder		\n"		\
150 	"	.set	arch=r4000		\n"		\
151 	"1:	cache	%0, (%1)		\n"		\
152 	"2:	.set	pop			\n"		\
153 	"	.section __ex_table,\"a\"	\n"		\
154 	"	"STR(PTR)" 1b, 2b		\n"		\
155 	"	.previous"					\
156 	:							\
157 	: "i" (op), "r" (addr))
158 
159 #define protected_cachee_op(op,addr)				\
160 	__asm__ __volatile__(					\
161 	"	.set	push			\n"		\
162 	"	.set	noreorder		\n"		\
163 	"	.set	mips0			\n"		\
164 	"	.set	eva			\n"		\
165 	"1:	cachee	%0, (%1)		\n"		\
166 	"2:	.set	pop			\n"		\
167 	"	.section __ex_table,\"a\"	\n"		\
168 	"	"STR(PTR)" 1b, 2b		\n"		\
169 	"	.previous"					\
170 	:							\
171 	: "i" (op), "r" (addr))
172 
173 /*
174  * The next two are for badland addresses like signal trampolines.
175  */
176 static inline void protected_flush_icache_line(unsigned long addr)
177 {
178 	switch (boot_cpu_type()) {
179 	case CPU_LOONGSON2:
180 		protected_cache_op(Hit_Invalidate_I_Loongson2, addr);
181 		break;
182 
183 	default:
184 #ifdef CONFIG_EVA
185 		protected_cachee_op(Hit_Invalidate_I, addr);
186 #else
187 		protected_cache_op(Hit_Invalidate_I, addr);
188 #endif
189 		break;
190 	}
191 }
192 
193 /*
194  * R10000 / R12000 hazard - these processors don't support the Hit_Writeback_D
195  * cacheop so we use Hit_Writeback_Inv_D which is supported by all R4000-style
196  * caches.  We're talking about one cacheline unnecessarily getting invalidated
197  * here so the penalty isn't overly hard.
198  */
199 static inline void protected_writeback_dcache_line(unsigned long addr)
200 {
201 #ifdef CONFIG_EVA
202 	protected_cachee_op(Hit_Writeback_Inv_D, addr);
203 #else
204 	protected_cache_op(Hit_Writeback_Inv_D, addr);
205 #endif
206 }
207 
208 static inline void protected_writeback_scache_line(unsigned long addr)
209 {
210 	protected_cache_op(Hit_Writeback_Inv_SD, addr);
211 }
212 
213 /*
214  * This one is RM7000-specific
215  */
216 static inline void invalidate_tcache_page(unsigned long addr)
217 {
218 	cache_op(Page_Invalidate_T, addr);
219 }
220 
221 #define cache16_unroll32(base,op)					\
222 	__asm__ __volatile__(						\
223 	"	.set push					\n"	\
224 	"	.set noreorder					\n"	\
225 	"	.set mips3					\n"	\
226 	"	cache %1, 0x000(%0); cache %1, 0x010(%0)	\n"	\
227 	"	cache %1, 0x020(%0); cache %1, 0x030(%0)	\n"	\
228 	"	cache %1, 0x040(%0); cache %1, 0x050(%0)	\n"	\
229 	"	cache %1, 0x060(%0); cache %1, 0x070(%0)	\n"	\
230 	"	cache %1, 0x080(%0); cache %1, 0x090(%0)	\n"	\
231 	"	cache %1, 0x0a0(%0); cache %1, 0x0b0(%0)	\n"	\
232 	"	cache %1, 0x0c0(%0); cache %1, 0x0d0(%0)	\n"	\
233 	"	cache %1, 0x0e0(%0); cache %1, 0x0f0(%0)	\n"	\
234 	"	cache %1, 0x100(%0); cache %1, 0x110(%0)	\n"	\
235 	"	cache %1, 0x120(%0); cache %1, 0x130(%0)	\n"	\
236 	"	cache %1, 0x140(%0); cache %1, 0x150(%0)	\n"	\
237 	"	cache %1, 0x160(%0); cache %1, 0x170(%0)	\n"	\
238 	"	cache %1, 0x180(%0); cache %1, 0x190(%0)	\n"	\
239 	"	cache %1, 0x1a0(%0); cache %1, 0x1b0(%0)	\n"	\
240 	"	cache %1, 0x1c0(%0); cache %1, 0x1d0(%0)	\n"	\
241 	"	cache %1, 0x1e0(%0); cache %1, 0x1f0(%0)	\n"	\
242 	"	.set pop					\n"	\
243 		:							\
244 		: "r" (base),						\
245 		  "i" (op));
246 
247 #define cache32_unroll32(base,op)					\
248 	__asm__ __volatile__(						\
249 	"	.set push					\n"	\
250 	"	.set noreorder					\n"	\
251 	"	.set mips3					\n"	\
252 	"	cache %1, 0x000(%0); cache %1, 0x020(%0)	\n"	\
253 	"	cache %1, 0x040(%0); cache %1, 0x060(%0)	\n"	\
254 	"	cache %1, 0x080(%0); cache %1, 0x0a0(%0)	\n"	\
255 	"	cache %1, 0x0c0(%0); cache %1, 0x0e0(%0)	\n"	\
256 	"	cache %1, 0x100(%0); cache %1, 0x120(%0)	\n"	\
257 	"	cache %1, 0x140(%0); cache %1, 0x160(%0)	\n"	\
258 	"	cache %1, 0x180(%0); cache %1, 0x1a0(%0)	\n"	\
259 	"	cache %1, 0x1c0(%0); cache %1, 0x1e0(%0)	\n"	\
260 	"	cache %1, 0x200(%0); cache %1, 0x220(%0)	\n"	\
261 	"	cache %1, 0x240(%0); cache %1, 0x260(%0)	\n"	\
262 	"	cache %1, 0x280(%0); cache %1, 0x2a0(%0)	\n"	\
263 	"	cache %1, 0x2c0(%0); cache %1, 0x2e0(%0)	\n"	\
264 	"	cache %1, 0x300(%0); cache %1, 0x320(%0)	\n"	\
265 	"	cache %1, 0x340(%0); cache %1, 0x360(%0)	\n"	\
266 	"	cache %1, 0x380(%0); cache %1, 0x3a0(%0)	\n"	\
267 	"	cache %1, 0x3c0(%0); cache %1, 0x3e0(%0)	\n"	\
268 	"	.set pop					\n"	\
269 		:							\
270 		: "r" (base),						\
271 		  "i" (op));
272 
273 #define cache64_unroll32(base,op)					\
274 	__asm__ __volatile__(						\
275 	"	.set push					\n"	\
276 	"	.set noreorder					\n"	\
277 	"	.set mips3					\n"	\
278 	"	cache %1, 0x000(%0); cache %1, 0x040(%0)	\n"	\
279 	"	cache %1, 0x080(%0); cache %1, 0x0c0(%0)	\n"	\
280 	"	cache %1, 0x100(%0); cache %1, 0x140(%0)	\n"	\
281 	"	cache %1, 0x180(%0); cache %1, 0x1c0(%0)	\n"	\
282 	"	cache %1, 0x200(%0); cache %1, 0x240(%0)	\n"	\
283 	"	cache %1, 0x280(%0); cache %1, 0x2c0(%0)	\n"	\
284 	"	cache %1, 0x300(%0); cache %1, 0x340(%0)	\n"	\
285 	"	cache %1, 0x380(%0); cache %1, 0x3c0(%0)	\n"	\
286 	"	cache %1, 0x400(%0); cache %1, 0x440(%0)	\n"	\
287 	"	cache %1, 0x480(%0); cache %1, 0x4c0(%0)	\n"	\
288 	"	cache %1, 0x500(%0); cache %1, 0x540(%0)	\n"	\
289 	"	cache %1, 0x580(%0); cache %1, 0x5c0(%0)	\n"	\
290 	"	cache %1, 0x600(%0); cache %1, 0x640(%0)	\n"	\
291 	"	cache %1, 0x680(%0); cache %1, 0x6c0(%0)	\n"	\
292 	"	cache %1, 0x700(%0); cache %1, 0x740(%0)	\n"	\
293 	"	cache %1, 0x780(%0); cache %1, 0x7c0(%0)	\n"	\
294 	"	.set pop					\n"	\
295 		:							\
296 		: "r" (base),						\
297 		  "i" (op));
298 
299 #define cache128_unroll32(base,op)					\
300 	__asm__ __volatile__(						\
301 	"	.set push					\n"	\
302 	"	.set noreorder					\n"	\
303 	"	.set mips3					\n"	\
304 	"	cache %1, 0x000(%0); cache %1, 0x080(%0)	\n"	\
305 	"	cache %1, 0x100(%0); cache %1, 0x180(%0)	\n"	\
306 	"	cache %1, 0x200(%0); cache %1, 0x280(%0)	\n"	\
307 	"	cache %1, 0x300(%0); cache %1, 0x380(%0)	\n"	\
308 	"	cache %1, 0x400(%0); cache %1, 0x480(%0)	\n"	\
309 	"	cache %1, 0x500(%0); cache %1, 0x580(%0)	\n"	\
310 	"	cache %1, 0x600(%0); cache %1, 0x680(%0)	\n"	\
311 	"	cache %1, 0x700(%0); cache %1, 0x780(%0)	\n"	\
312 	"	cache %1, 0x800(%0); cache %1, 0x880(%0)	\n"	\
313 	"	cache %1, 0x900(%0); cache %1, 0x980(%0)	\n"	\
314 	"	cache %1, 0xa00(%0); cache %1, 0xa80(%0)	\n"	\
315 	"	cache %1, 0xb00(%0); cache %1, 0xb80(%0)	\n"	\
316 	"	cache %1, 0xc00(%0); cache %1, 0xc80(%0)	\n"	\
317 	"	cache %1, 0xd00(%0); cache %1, 0xd80(%0)	\n"	\
318 	"	cache %1, 0xe00(%0); cache %1, 0xe80(%0)	\n"	\
319 	"	cache %1, 0xf00(%0); cache %1, 0xf80(%0)	\n"	\
320 	"	.set pop					\n"	\
321 		:							\
322 		: "r" (base),						\
323 		  "i" (op));
324 
325 /*
326  * Perform the cache operation specified by op using a user mode virtual
327  * address while in kernel mode.
328  */
329 #define cache16_unroll32_user(base,op)					\
330 	__asm__ __volatile__(						\
331 	"	.set push					\n"	\
332 	"	.set noreorder					\n"	\
333 	"	.set mips0					\n"	\
334 	"	.set eva					\n"	\
335 	"	cachee %1, 0x000(%0); cachee %1, 0x010(%0)	\n"	\
336 	"	cachee %1, 0x020(%0); cachee %1, 0x030(%0)	\n"	\
337 	"	cachee %1, 0x040(%0); cachee %1, 0x050(%0)	\n"	\
338 	"	cachee %1, 0x060(%0); cachee %1, 0x070(%0)	\n"	\
339 	"	cachee %1, 0x080(%0); cachee %1, 0x090(%0)	\n"	\
340 	"	cachee %1, 0x0a0(%0); cachee %1, 0x0b0(%0)	\n"	\
341 	"	cachee %1, 0x0c0(%0); cachee %1, 0x0d0(%0)	\n"	\
342 	"	cachee %1, 0x0e0(%0); cachee %1, 0x0f0(%0)	\n"	\
343 	"	cachee %1, 0x100(%0); cachee %1, 0x110(%0)	\n"	\
344 	"	cachee %1, 0x120(%0); cachee %1, 0x130(%0)	\n"	\
345 	"	cachee %1, 0x140(%0); cachee %1, 0x150(%0)	\n"	\
346 	"	cachee %1, 0x160(%0); cachee %1, 0x170(%0)	\n"	\
347 	"	cachee %1, 0x180(%0); cachee %1, 0x190(%0)	\n"	\
348 	"	cachee %1, 0x1a0(%0); cachee %1, 0x1b0(%0)	\n"	\
349 	"	cachee %1, 0x1c0(%0); cachee %1, 0x1d0(%0)	\n"	\
350 	"	cachee %1, 0x1e0(%0); cachee %1, 0x1f0(%0)	\n"	\
351 	"	.set pop					\n"	\
352 		:							\
353 		: "r" (base),						\
354 		  "i" (op));
355 
356 #define cache32_unroll32_user(base, op)					\
357 	__asm__ __volatile__(						\
358 	"	.set push					\n"	\
359 	"	.set noreorder					\n"	\
360 	"	.set mips0					\n"	\
361 	"	.set eva					\n"	\
362 	"	cachee %1, 0x000(%0); cachee %1, 0x020(%0)	\n"	\
363 	"	cachee %1, 0x040(%0); cachee %1, 0x060(%0)	\n"	\
364 	"	cachee %1, 0x080(%0); cachee %1, 0x0a0(%0)	\n"	\
365 	"	cachee %1, 0x0c0(%0); cachee %1, 0x0e0(%0)	\n"	\
366 	"	cachee %1, 0x100(%0); cachee %1, 0x120(%0)	\n"	\
367 	"	cachee %1, 0x140(%0); cachee %1, 0x160(%0)	\n"	\
368 	"	cachee %1, 0x180(%0); cachee %1, 0x1a0(%0)	\n"	\
369 	"	cachee %1, 0x1c0(%0); cachee %1, 0x1e0(%0)	\n"	\
370 	"	cachee %1, 0x200(%0); cachee %1, 0x220(%0)	\n"	\
371 	"	cachee %1, 0x240(%0); cachee %1, 0x260(%0)	\n"	\
372 	"	cachee %1, 0x280(%0); cachee %1, 0x2a0(%0)	\n"	\
373 	"	cachee %1, 0x2c0(%0); cachee %1, 0x2e0(%0)	\n"	\
374 	"	cachee %1, 0x300(%0); cachee %1, 0x320(%0)	\n"	\
375 	"	cachee %1, 0x340(%0); cachee %1, 0x360(%0)	\n"	\
376 	"	cachee %1, 0x380(%0); cachee %1, 0x3a0(%0)	\n"	\
377 	"	cachee %1, 0x3c0(%0); cachee %1, 0x3e0(%0)	\n"	\
378 	"	.set pop					\n"	\
379 		:							\
380 		: "r" (base),						\
381 		  "i" (op));
382 
383 #define cache64_unroll32_user(base, op)					\
384 	__asm__ __volatile__(						\
385 	"	.set push					\n"	\
386 	"	.set noreorder					\n"	\
387 	"	.set mips0					\n"	\
388 	"	.set eva					\n"	\
389 	"	cachee %1, 0x000(%0); cachee %1, 0x040(%0)	\n"	\
390 	"	cachee %1, 0x080(%0); cachee %1, 0x0c0(%0)	\n"	\
391 	"	cachee %1, 0x100(%0); cachee %1, 0x140(%0)	\n"	\
392 	"	cachee %1, 0x180(%0); cachee %1, 0x1c0(%0)	\n"	\
393 	"	cachee %1, 0x200(%0); cachee %1, 0x240(%0)	\n"	\
394 	"	cachee %1, 0x280(%0); cachee %1, 0x2c0(%0)	\n"	\
395 	"	cachee %1, 0x300(%0); cachee %1, 0x340(%0)	\n"	\
396 	"	cachee %1, 0x380(%0); cachee %1, 0x3c0(%0)	\n"	\
397 	"	cachee %1, 0x400(%0); cachee %1, 0x440(%0)	\n"	\
398 	"	cachee %1, 0x480(%0); cachee %1, 0x4c0(%0)	\n"	\
399 	"	cachee %1, 0x500(%0); cachee %1, 0x540(%0)	\n"	\
400 	"	cachee %1, 0x580(%0); cachee %1, 0x5c0(%0)	\n"	\
401 	"	cachee %1, 0x600(%0); cachee %1, 0x640(%0)	\n"	\
402 	"	cachee %1, 0x680(%0); cachee %1, 0x6c0(%0)	\n"	\
403 	"	cachee %1, 0x700(%0); cachee %1, 0x740(%0)	\n"	\
404 	"	cachee %1, 0x780(%0); cachee %1, 0x7c0(%0)	\n"	\
405 	"	.set pop					\n"	\
406 		:							\
407 		: "r" (base),						\
408 		  "i" (op));
409 
410 /* build blast_xxx, blast_xxx_page, blast_xxx_page_indexed */
411 #define __BUILD_BLAST_CACHE(pfx, desc, indexop, hitop, lsize, extra)	\
412 static inline void extra##blast_##pfx##cache##lsize(void)		\
413 {									\
414 	unsigned long start = INDEX_BASE;				\
415 	unsigned long end = start + current_cpu_data.desc.waysize;	\
416 	unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit;	\
417 	unsigned long ws_end = current_cpu_data.desc.ways <<		\
418 			       current_cpu_data.desc.waybit;		\
419 	unsigned long ws, addr;						\
420 									\
421 	__##pfx##flush_prologue						\
422 									\
423 	for (ws = 0; ws < ws_end; ws += ws_inc)				\
424 		for (addr = start; addr < end; addr += lsize * 32)	\
425 			cache##lsize##_unroll32(addr|ws, indexop);	\
426 									\
427 	__##pfx##flush_epilogue						\
428 }									\
429 									\
430 static inline void extra##blast_##pfx##cache##lsize##_page(unsigned long page) \
431 {									\
432 	unsigned long start = page;					\
433 	unsigned long end = page + PAGE_SIZE;				\
434 									\
435 	__##pfx##flush_prologue						\
436 									\
437 	do {								\
438 		cache##lsize##_unroll32(start, hitop);			\
439 		start += lsize * 32;					\
440 	} while (start < end);						\
441 									\
442 	__##pfx##flush_epilogue						\
443 }									\
444 									\
445 static inline void extra##blast_##pfx##cache##lsize##_page_indexed(unsigned long page) \
446 {									\
447 	unsigned long indexmask = current_cpu_data.desc.waysize - 1;	\
448 	unsigned long start = INDEX_BASE + (page & indexmask);		\
449 	unsigned long end = start + PAGE_SIZE;				\
450 	unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit;	\
451 	unsigned long ws_end = current_cpu_data.desc.ways <<		\
452 			       current_cpu_data.desc.waybit;		\
453 	unsigned long ws, addr;						\
454 									\
455 	__##pfx##flush_prologue						\
456 									\
457 	for (ws = 0; ws < ws_end; ws += ws_inc)				\
458 		for (addr = start; addr < end; addr += lsize * 32)	\
459 			cache##lsize##_unroll32(addr|ws, indexop);	\
460 									\
461 	__##pfx##flush_epilogue						\
462 }
463 
464 __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 16, )
465 __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16, )
466 __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16, )
467 __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 32, )
468 __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32, )
469 __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I_Loongson2, 32, loongson2_)
470 __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32, )
471 __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 64, )
472 __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64, )
473 __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64, )
474 __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 128, )
475 __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 128, )
476 __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128, )
477 
478 __BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 16, )
479 __BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 32, )
480 __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 16, )
481 __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 32, )
482 __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 64, )
483 __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 128, )
484 
485 #define __BUILD_BLAST_USER_CACHE(pfx, desc, indexop, hitop, lsize) \
486 static inline void blast_##pfx##cache##lsize##_user_page(unsigned long page) \
487 {									\
488 	unsigned long start = page;					\
489 	unsigned long end = page + PAGE_SIZE;				\
490 									\
491 	__##pfx##flush_prologue						\
492 									\
493 	do {								\
494 		cache##lsize##_unroll32_user(start, hitop);             \
495 		start += lsize * 32;					\
496 	} while (start < end);						\
497 									\
498 	__##pfx##flush_epilogue						\
499 }
500 
501 __BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D,
502 			 16)
503 __BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16)
504 __BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D,
505 			 32)
506 __BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32)
507 __BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D,
508 			 64)
509 __BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64)
510 
511 /* build blast_xxx_range, protected_blast_xxx_range */
512 #define __BUILD_BLAST_CACHE_RANGE(pfx, desc, hitop, prot, extra)	\
513 static inline void prot##extra##blast_##pfx##cache##_range(unsigned long start, \
514 						    unsigned long end)	\
515 {									\
516 	unsigned long lsize = cpu_##desc##_line_size();			\
517 	unsigned long addr = start & ~(lsize - 1);			\
518 	unsigned long aend = (end - 1) & ~(lsize - 1);			\
519 									\
520 	__##pfx##flush_prologue						\
521 									\
522 	while (1) {							\
523 		prot##cache_op(hitop, addr);				\
524 		if (addr == aend)					\
525 			break;						\
526 		addr += lsize;						\
527 	}								\
528 									\
529 	__##pfx##flush_epilogue						\
530 }
531 
532 #ifndef CONFIG_EVA
533 
534 __BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_, )
535 __BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, protected_, )
536 
537 #else
538 
539 #define __BUILD_PROT_BLAST_CACHE_RANGE(pfx, desc, hitop)		\
540 static inline void protected_blast_##pfx##cache##_range(unsigned long start,\
541 							unsigned long end) \
542 {									\
543 	unsigned long lsize = cpu_##desc##_line_size();			\
544 	unsigned long addr = start & ~(lsize - 1);			\
545 	unsigned long aend = (end - 1) & ~(lsize - 1);			\
546 									\
547 	__##pfx##flush_prologue						\
548 									\
549 	if (segment_eq(get_fs(), USER_DS)) {				\
550 		while (1) {						\
551 			protected_cachee_op(hitop, addr);		\
552 			if (addr == aend)				\
553 				break;					\
554 			addr += lsize;					\
555 		}							\
556 	} else {							\
557 		while (1) {						\
558 			protected_cache_op(hitop, addr);		\
559 			if (addr == aend)				\
560 				break;					\
561 			addr += lsize;					\
562 		}                                                       \
563 									\
564 	}								\
565 	__##pfx##flush_epilogue						\
566 }
567 
568 __BUILD_PROT_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D)
569 __BUILD_PROT_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I)
570 
571 #endif
572 __BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, protected_, )
573 __BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I_Loongson2, \
574 	protected_, loongson2_)
575 __BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, , )
576 __BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, , )
577 __BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, , )
578 /* blast_inv_dcache_range */
579 __BUILD_BLAST_CACHE_RANGE(inv_d, dcache, Hit_Invalidate_D, , )
580 __BUILD_BLAST_CACHE_RANGE(inv_s, scache, Hit_Invalidate_SD, , )
581 
582 #endif /* _ASM_R4KCACHE_H */
583