xref: /openbmc/linux/arch/mips/include/asm/r4kcache.h (revision 4a44a19b)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Inline assembly cache operations.
7  *
8  * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
9  * Copyright (C) 1997 - 2002 Ralf Baechle (ralf@gnu.org)
10  * Copyright (C) 2004 Ralf Baechle (ralf@linux-mips.org)
11  */
12 #ifndef _ASM_R4KCACHE_H
13 #define _ASM_R4KCACHE_H
14 
15 #include <asm/asm.h>
16 #include <asm/cacheops.h>
17 #include <asm/cpu-features.h>
18 #include <asm/cpu-type.h>
19 #include <asm/mipsmtregs.h>
20 #include <asm/uaccess.h> /* for segment_eq() */
21 
22 extern void (*r4k_blast_dcache)(void);
23 extern void (*r4k_blast_icache)(void);
24 
25 /*
26  * This macro return a properly sign-extended address suitable as base address
27  * for indexed cache operations.  Two issues here:
28  *
29  *  - The MIPS32 and MIPS64 specs permit an implementation to directly derive
30  *    the index bits from the virtual address.	This breaks with tradition
31  *    set by the R4000.	 To keep unpleasant surprises from happening we pick
32  *    an address in KSEG0 / CKSEG0.
33  *  - We need a properly sign extended address for 64-bit code.	 To get away
34  *    without ifdefs we let the compiler do it by a type cast.
35  */
36 #define INDEX_BASE	CKSEG0
37 
38 #define cache_op(op,addr)						\
39 	__asm__ __volatile__(						\
40 	"	.set	push					\n"	\
41 	"	.set	noreorder				\n"	\
42 	"	.set	arch=r4000				\n"	\
43 	"	cache	%0, %1					\n"	\
44 	"	.set	pop					\n"	\
45 	:								\
46 	: "i" (op), "R" (*(unsigned char *)(addr)))
47 
48 #ifdef CONFIG_MIPS_MT
49 
50 /*
51  * Optionally force single-threaded execution during I-cache flushes.
52  */
53 #define PROTECT_CACHE_FLUSHES 1
54 
55 #ifdef PROTECT_CACHE_FLUSHES
56 
57 extern int mt_protiflush;
58 extern int mt_protdflush;
59 extern void mt_cflush_lockdown(void);
60 extern void mt_cflush_release(void);
61 
62 #define BEGIN_MT_IPROT \
63 	unsigned long flags = 0;			\
64 	unsigned long mtflags = 0;			\
65 	if(mt_protiflush) {				\
66 		local_irq_save(flags);			\
67 		ehb();					\
68 		mtflags = dvpe();			\
69 		mt_cflush_lockdown();			\
70 	}
71 
72 #define END_MT_IPROT \
73 	if(mt_protiflush) {				\
74 		mt_cflush_release();			\
75 		evpe(mtflags);				\
76 		local_irq_restore(flags);		\
77 	}
78 
79 #define BEGIN_MT_DPROT \
80 	unsigned long flags = 0;			\
81 	unsigned long mtflags = 0;			\
82 	if(mt_protdflush) {				\
83 		local_irq_save(flags);			\
84 		ehb();					\
85 		mtflags = dvpe();			\
86 		mt_cflush_lockdown();			\
87 	}
88 
89 #define END_MT_DPROT \
90 	if(mt_protdflush) {				\
91 		mt_cflush_release();			\
92 		evpe(mtflags);				\
93 		local_irq_restore(flags);		\
94 	}
95 
96 #else
97 
98 #define BEGIN_MT_IPROT
99 #define BEGIN_MT_DPROT
100 #define END_MT_IPROT
101 #define END_MT_DPROT
102 
103 #endif /* PROTECT_CACHE_FLUSHES */
104 
105 #define __iflush_prologue						\
106 	unsigned long redundance;					\
107 	extern int mt_n_iflushes;					\
108 	BEGIN_MT_IPROT							\
109 	for (redundance = 0; redundance < mt_n_iflushes; redundance++) {
110 
111 #define __iflush_epilogue						\
112 	END_MT_IPROT							\
113 	}
114 
115 #define __dflush_prologue						\
116 	unsigned long redundance;					\
117 	extern int mt_n_dflushes;					\
118 	BEGIN_MT_DPROT							\
119 	for (redundance = 0; redundance < mt_n_dflushes; redundance++) {
120 
121 #define __dflush_epilogue \
122 	END_MT_DPROT	 \
123 	}
124 
125 #define __inv_dflush_prologue __dflush_prologue
126 #define __inv_dflush_epilogue __dflush_epilogue
127 #define __sflush_prologue {
128 #define __sflush_epilogue }
129 #define __inv_sflush_prologue __sflush_prologue
130 #define __inv_sflush_epilogue __sflush_epilogue
131 
132 #else /* CONFIG_MIPS_MT */
133 
134 #define __iflush_prologue {
135 #define __iflush_epilogue }
136 #define __dflush_prologue {
137 #define __dflush_epilogue }
138 #define __inv_dflush_prologue {
139 #define __inv_dflush_epilogue }
140 #define __sflush_prologue {
141 #define __sflush_epilogue }
142 #define __inv_sflush_prologue {
143 #define __inv_sflush_epilogue }
144 
145 #endif /* CONFIG_MIPS_MT */
146 
147 static inline void flush_icache_line_indexed(unsigned long addr)
148 {
149 	__iflush_prologue
150 	cache_op(Index_Invalidate_I, addr);
151 	__iflush_epilogue
152 }
153 
154 static inline void flush_dcache_line_indexed(unsigned long addr)
155 {
156 	__dflush_prologue
157 	cache_op(Index_Writeback_Inv_D, addr);
158 	__dflush_epilogue
159 }
160 
161 static inline void flush_scache_line_indexed(unsigned long addr)
162 {
163 	cache_op(Index_Writeback_Inv_SD, addr);
164 }
165 
166 static inline void flush_icache_line(unsigned long addr)
167 {
168 	__iflush_prologue
169 	switch (boot_cpu_type()) {
170 	case CPU_LOONGSON2:
171 		cache_op(Hit_Invalidate_I_Loongson2, addr);
172 		break;
173 
174 	default:
175 		cache_op(Hit_Invalidate_I, addr);
176 		break;
177 	}
178 	__iflush_epilogue
179 }
180 
181 static inline void flush_dcache_line(unsigned long addr)
182 {
183 	__dflush_prologue
184 	cache_op(Hit_Writeback_Inv_D, addr);
185 	__dflush_epilogue
186 }
187 
188 static inline void invalidate_dcache_line(unsigned long addr)
189 {
190 	__dflush_prologue
191 	cache_op(Hit_Invalidate_D, addr);
192 	__dflush_epilogue
193 }
194 
195 static inline void invalidate_scache_line(unsigned long addr)
196 {
197 	cache_op(Hit_Invalidate_SD, addr);
198 }
199 
200 static inline void flush_scache_line(unsigned long addr)
201 {
202 	cache_op(Hit_Writeback_Inv_SD, addr);
203 }
204 
205 #define protected_cache_op(op,addr)				\
206 	__asm__ __volatile__(					\
207 	"	.set	push			\n"		\
208 	"	.set	noreorder		\n"		\
209 	"	.set	arch=r4000		\n"		\
210 	"1:	cache	%0, (%1)		\n"		\
211 	"2:	.set	pop			\n"		\
212 	"	.section __ex_table,\"a\"	\n"		\
213 	"	"STR(PTR)" 1b, 2b		\n"		\
214 	"	.previous"					\
215 	:							\
216 	: "i" (op), "r" (addr))
217 
218 #define protected_cachee_op(op,addr)				\
219 	__asm__ __volatile__(					\
220 	"	.set	push			\n"		\
221 	"	.set	noreorder		\n"		\
222 	"	.set	mips0			\n"		\
223 	"	.set	eva			\n"		\
224 	"1:	cachee	%0, (%1)		\n"		\
225 	"2:	.set	pop			\n"		\
226 	"	.section __ex_table,\"a\"	\n"		\
227 	"	"STR(PTR)" 1b, 2b		\n"		\
228 	"	.previous"					\
229 	:							\
230 	: "i" (op), "r" (addr))
231 
232 /*
233  * The next two are for badland addresses like signal trampolines.
234  */
235 static inline void protected_flush_icache_line(unsigned long addr)
236 {
237 	switch (boot_cpu_type()) {
238 	case CPU_LOONGSON2:
239 		protected_cache_op(Hit_Invalidate_I_Loongson2, addr);
240 		break;
241 
242 	default:
243 #ifdef CONFIG_EVA
244 		protected_cachee_op(Hit_Invalidate_I, addr);
245 #else
246 		protected_cache_op(Hit_Invalidate_I, addr);
247 #endif
248 		break;
249 	}
250 }
251 
252 /*
253  * R10000 / R12000 hazard - these processors don't support the Hit_Writeback_D
254  * cacheop so we use Hit_Writeback_Inv_D which is supported by all R4000-style
255  * caches.  We're talking about one cacheline unnecessarily getting invalidated
256  * here so the penalty isn't overly hard.
257  */
258 static inline void protected_writeback_dcache_line(unsigned long addr)
259 {
260 	protected_cache_op(Hit_Writeback_Inv_D, addr);
261 }
262 
263 static inline void protected_writeback_scache_line(unsigned long addr)
264 {
265 	protected_cache_op(Hit_Writeback_Inv_SD, addr);
266 }
267 
268 /*
269  * This one is RM7000-specific
270  */
271 static inline void invalidate_tcache_page(unsigned long addr)
272 {
273 	cache_op(Page_Invalidate_T, addr);
274 }
275 
276 #define cache16_unroll32(base,op)					\
277 	__asm__ __volatile__(						\
278 	"	.set push					\n"	\
279 	"	.set noreorder					\n"	\
280 	"	.set mips3					\n"	\
281 	"	cache %1, 0x000(%0); cache %1, 0x010(%0)	\n"	\
282 	"	cache %1, 0x020(%0); cache %1, 0x030(%0)	\n"	\
283 	"	cache %1, 0x040(%0); cache %1, 0x050(%0)	\n"	\
284 	"	cache %1, 0x060(%0); cache %1, 0x070(%0)	\n"	\
285 	"	cache %1, 0x080(%0); cache %1, 0x090(%0)	\n"	\
286 	"	cache %1, 0x0a0(%0); cache %1, 0x0b0(%0)	\n"	\
287 	"	cache %1, 0x0c0(%0); cache %1, 0x0d0(%0)	\n"	\
288 	"	cache %1, 0x0e0(%0); cache %1, 0x0f0(%0)	\n"	\
289 	"	cache %1, 0x100(%0); cache %1, 0x110(%0)	\n"	\
290 	"	cache %1, 0x120(%0); cache %1, 0x130(%0)	\n"	\
291 	"	cache %1, 0x140(%0); cache %1, 0x150(%0)	\n"	\
292 	"	cache %1, 0x160(%0); cache %1, 0x170(%0)	\n"	\
293 	"	cache %1, 0x180(%0); cache %1, 0x190(%0)	\n"	\
294 	"	cache %1, 0x1a0(%0); cache %1, 0x1b0(%0)	\n"	\
295 	"	cache %1, 0x1c0(%0); cache %1, 0x1d0(%0)	\n"	\
296 	"	cache %1, 0x1e0(%0); cache %1, 0x1f0(%0)	\n"	\
297 	"	.set pop					\n"	\
298 		:							\
299 		: "r" (base),						\
300 		  "i" (op));
301 
302 #define cache32_unroll32(base,op)					\
303 	__asm__ __volatile__(						\
304 	"	.set push					\n"	\
305 	"	.set noreorder					\n"	\
306 	"	.set mips3					\n"	\
307 	"	cache %1, 0x000(%0); cache %1, 0x020(%0)	\n"	\
308 	"	cache %1, 0x040(%0); cache %1, 0x060(%0)	\n"	\
309 	"	cache %1, 0x080(%0); cache %1, 0x0a0(%0)	\n"	\
310 	"	cache %1, 0x0c0(%0); cache %1, 0x0e0(%0)	\n"	\
311 	"	cache %1, 0x100(%0); cache %1, 0x120(%0)	\n"	\
312 	"	cache %1, 0x140(%0); cache %1, 0x160(%0)	\n"	\
313 	"	cache %1, 0x180(%0); cache %1, 0x1a0(%0)	\n"	\
314 	"	cache %1, 0x1c0(%0); cache %1, 0x1e0(%0)	\n"	\
315 	"	cache %1, 0x200(%0); cache %1, 0x220(%0)	\n"	\
316 	"	cache %1, 0x240(%0); cache %1, 0x260(%0)	\n"	\
317 	"	cache %1, 0x280(%0); cache %1, 0x2a0(%0)	\n"	\
318 	"	cache %1, 0x2c0(%0); cache %1, 0x2e0(%0)	\n"	\
319 	"	cache %1, 0x300(%0); cache %1, 0x320(%0)	\n"	\
320 	"	cache %1, 0x340(%0); cache %1, 0x360(%0)	\n"	\
321 	"	cache %1, 0x380(%0); cache %1, 0x3a0(%0)	\n"	\
322 	"	cache %1, 0x3c0(%0); cache %1, 0x3e0(%0)	\n"	\
323 	"	.set pop					\n"	\
324 		:							\
325 		: "r" (base),						\
326 		  "i" (op));
327 
328 #define cache64_unroll32(base,op)					\
329 	__asm__ __volatile__(						\
330 	"	.set push					\n"	\
331 	"	.set noreorder					\n"	\
332 	"	.set mips3					\n"	\
333 	"	cache %1, 0x000(%0); cache %1, 0x040(%0)	\n"	\
334 	"	cache %1, 0x080(%0); cache %1, 0x0c0(%0)	\n"	\
335 	"	cache %1, 0x100(%0); cache %1, 0x140(%0)	\n"	\
336 	"	cache %1, 0x180(%0); cache %1, 0x1c0(%0)	\n"	\
337 	"	cache %1, 0x200(%0); cache %1, 0x240(%0)	\n"	\
338 	"	cache %1, 0x280(%0); cache %1, 0x2c0(%0)	\n"	\
339 	"	cache %1, 0x300(%0); cache %1, 0x340(%0)	\n"	\
340 	"	cache %1, 0x380(%0); cache %1, 0x3c0(%0)	\n"	\
341 	"	cache %1, 0x400(%0); cache %1, 0x440(%0)	\n"	\
342 	"	cache %1, 0x480(%0); cache %1, 0x4c0(%0)	\n"	\
343 	"	cache %1, 0x500(%0); cache %1, 0x540(%0)	\n"	\
344 	"	cache %1, 0x580(%0); cache %1, 0x5c0(%0)	\n"	\
345 	"	cache %1, 0x600(%0); cache %1, 0x640(%0)	\n"	\
346 	"	cache %1, 0x680(%0); cache %1, 0x6c0(%0)	\n"	\
347 	"	cache %1, 0x700(%0); cache %1, 0x740(%0)	\n"	\
348 	"	cache %1, 0x780(%0); cache %1, 0x7c0(%0)	\n"	\
349 	"	.set pop					\n"	\
350 		:							\
351 		: "r" (base),						\
352 		  "i" (op));
353 
354 #define cache128_unroll32(base,op)					\
355 	__asm__ __volatile__(						\
356 	"	.set push					\n"	\
357 	"	.set noreorder					\n"	\
358 	"	.set mips3					\n"	\
359 	"	cache %1, 0x000(%0); cache %1, 0x080(%0)	\n"	\
360 	"	cache %1, 0x100(%0); cache %1, 0x180(%0)	\n"	\
361 	"	cache %1, 0x200(%0); cache %1, 0x280(%0)	\n"	\
362 	"	cache %1, 0x300(%0); cache %1, 0x380(%0)	\n"	\
363 	"	cache %1, 0x400(%0); cache %1, 0x480(%0)	\n"	\
364 	"	cache %1, 0x500(%0); cache %1, 0x580(%0)	\n"	\
365 	"	cache %1, 0x600(%0); cache %1, 0x680(%0)	\n"	\
366 	"	cache %1, 0x700(%0); cache %1, 0x780(%0)	\n"	\
367 	"	cache %1, 0x800(%0); cache %1, 0x880(%0)	\n"	\
368 	"	cache %1, 0x900(%0); cache %1, 0x980(%0)	\n"	\
369 	"	cache %1, 0xa00(%0); cache %1, 0xa80(%0)	\n"	\
370 	"	cache %1, 0xb00(%0); cache %1, 0xb80(%0)	\n"	\
371 	"	cache %1, 0xc00(%0); cache %1, 0xc80(%0)	\n"	\
372 	"	cache %1, 0xd00(%0); cache %1, 0xd80(%0)	\n"	\
373 	"	cache %1, 0xe00(%0); cache %1, 0xe80(%0)	\n"	\
374 	"	cache %1, 0xf00(%0); cache %1, 0xf80(%0)	\n"	\
375 	"	.set pop					\n"	\
376 		:							\
377 		: "r" (base),						\
378 		  "i" (op));
379 
380 /*
381  * Perform the cache operation specified by op using a user mode virtual
382  * address while in kernel mode.
383  */
384 #define cache16_unroll32_user(base,op)					\
385 	__asm__ __volatile__(						\
386 	"	.set push					\n"	\
387 	"	.set noreorder					\n"	\
388 	"	.set mips0					\n"	\
389 	"	.set eva					\n"	\
390 	"	cachee %1, 0x000(%0); cachee %1, 0x010(%0)	\n"	\
391 	"	cachee %1, 0x020(%0); cachee %1, 0x030(%0)	\n"	\
392 	"	cachee %1, 0x040(%0); cachee %1, 0x050(%0)	\n"	\
393 	"	cachee %1, 0x060(%0); cachee %1, 0x070(%0)	\n"	\
394 	"	cachee %1, 0x080(%0); cachee %1, 0x090(%0)	\n"	\
395 	"	cachee %1, 0x0a0(%0); cachee %1, 0x0b0(%0)	\n"	\
396 	"	cachee %1, 0x0c0(%0); cachee %1, 0x0d0(%0)	\n"	\
397 	"	cachee %1, 0x0e0(%0); cachee %1, 0x0f0(%0)	\n"	\
398 	"	cachee %1, 0x100(%0); cachee %1, 0x110(%0)	\n"	\
399 	"	cachee %1, 0x120(%0); cachee %1, 0x130(%0)	\n"	\
400 	"	cachee %1, 0x140(%0); cachee %1, 0x150(%0)	\n"	\
401 	"	cachee %1, 0x160(%0); cachee %1, 0x170(%0)	\n"	\
402 	"	cachee %1, 0x180(%0); cachee %1, 0x190(%0)	\n"	\
403 	"	cachee %1, 0x1a0(%0); cachee %1, 0x1b0(%0)	\n"	\
404 	"	cachee %1, 0x1c0(%0); cachee %1, 0x1d0(%0)	\n"	\
405 	"	cachee %1, 0x1e0(%0); cachee %1, 0x1f0(%0)	\n"	\
406 	"	.set pop					\n"	\
407 		:							\
408 		: "r" (base),						\
409 		  "i" (op));
410 
411 #define cache32_unroll32_user(base, op)					\
412 	__asm__ __volatile__(						\
413 	"	.set push					\n"	\
414 	"	.set noreorder					\n"	\
415 	"	.set mips0					\n"	\
416 	"	.set eva					\n"	\
417 	"	cachee %1, 0x000(%0); cachee %1, 0x020(%0)	\n"	\
418 	"	cachee %1, 0x040(%0); cachee %1, 0x060(%0)	\n"	\
419 	"	cachee %1, 0x080(%0); cachee %1, 0x0a0(%0)	\n"	\
420 	"	cachee %1, 0x0c0(%0); cachee %1, 0x0e0(%0)	\n"	\
421 	"	cachee %1, 0x100(%0); cachee %1, 0x120(%0)	\n"	\
422 	"	cachee %1, 0x140(%0); cachee %1, 0x160(%0)	\n"	\
423 	"	cachee %1, 0x180(%0); cachee %1, 0x1a0(%0)	\n"	\
424 	"	cachee %1, 0x1c0(%0); cachee %1, 0x1e0(%0)	\n"	\
425 	"	cachee %1, 0x200(%0); cachee %1, 0x220(%0)	\n"	\
426 	"	cachee %1, 0x240(%0); cachee %1, 0x260(%0)	\n"	\
427 	"	cachee %1, 0x280(%0); cachee %1, 0x2a0(%0)	\n"	\
428 	"	cachee %1, 0x2c0(%0); cachee %1, 0x2e0(%0)	\n"	\
429 	"	cachee %1, 0x300(%0); cachee %1, 0x320(%0)	\n"	\
430 	"	cachee %1, 0x340(%0); cachee %1, 0x360(%0)	\n"	\
431 	"	cachee %1, 0x380(%0); cachee %1, 0x3a0(%0)	\n"	\
432 	"	cachee %1, 0x3c0(%0); cachee %1, 0x3e0(%0)	\n"	\
433 	"	.set pop					\n"	\
434 		:							\
435 		: "r" (base),						\
436 		  "i" (op));
437 
438 #define cache64_unroll32_user(base, op)					\
439 	__asm__ __volatile__(						\
440 	"	.set push					\n"	\
441 	"	.set noreorder					\n"	\
442 	"	.set mips0					\n"	\
443 	"	.set eva					\n"	\
444 	"	cachee %1, 0x000(%0); cachee %1, 0x040(%0)	\n"	\
445 	"	cachee %1, 0x080(%0); cachee %1, 0x0c0(%0)	\n"	\
446 	"	cachee %1, 0x100(%0); cachee %1, 0x140(%0)	\n"	\
447 	"	cachee %1, 0x180(%0); cachee %1, 0x1c0(%0)	\n"	\
448 	"	cachee %1, 0x200(%0); cachee %1, 0x240(%0)	\n"	\
449 	"	cachee %1, 0x280(%0); cachee %1, 0x2c0(%0)	\n"	\
450 	"	cachee %1, 0x300(%0); cachee %1, 0x340(%0)	\n"	\
451 	"	cachee %1, 0x380(%0); cachee %1, 0x3c0(%0)	\n"	\
452 	"	cachee %1, 0x400(%0); cachee %1, 0x440(%0)	\n"	\
453 	"	cachee %1, 0x480(%0); cachee %1, 0x4c0(%0)	\n"	\
454 	"	cachee %1, 0x500(%0); cachee %1, 0x540(%0)	\n"	\
455 	"	cachee %1, 0x580(%0); cachee %1, 0x5c0(%0)	\n"	\
456 	"	cachee %1, 0x600(%0); cachee %1, 0x640(%0)	\n"	\
457 	"	cachee %1, 0x680(%0); cachee %1, 0x6c0(%0)	\n"	\
458 	"	cachee %1, 0x700(%0); cachee %1, 0x740(%0)	\n"	\
459 	"	cachee %1, 0x780(%0); cachee %1, 0x7c0(%0)	\n"	\
460 	"	.set pop					\n"	\
461 		:							\
462 		: "r" (base),						\
463 		  "i" (op));
464 
465 /* build blast_xxx, blast_xxx_page, blast_xxx_page_indexed */
466 #define __BUILD_BLAST_CACHE(pfx, desc, indexop, hitop, lsize, extra)	\
467 static inline void extra##blast_##pfx##cache##lsize(void)		\
468 {									\
469 	unsigned long start = INDEX_BASE;				\
470 	unsigned long end = start + current_cpu_data.desc.waysize;	\
471 	unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit;	\
472 	unsigned long ws_end = current_cpu_data.desc.ways <<		\
473 			       current_cpu_data.desc.waybit;		\
474 	unsigned long ws, addr;						\
475 									\
476 	__##pfx##flush_prologue						\
477 									\
478 	for (ws = 0; ws < ws_end; ws += ws_inc)				\
479 		for (addr = start; addr < end; addr += lsize * 32)	\
480 			cache##lsize##_unroll32(addr|ws, indexop);	\
481 									\
482 	__##pfx##flush_epilogue						\
483 }									\
484 									\
485 static inline void extra##blast_##pfx##cache##lsize##_page(unsigned long page) \
486 {									\
487 	unsigned long start = page;					\
488 	unsigned long end = page + PAGE_SIZE;				\
489 									\
490 	__##pfx##flush_prologue						\
491 									\
492 	do {								\
493 		cache##lsize##_unroll32(start, hitop);			\
494 		start += lsize * 32;					\
495 	} while (start < end);						\
496 									\
497 	__##pfx##flush_epilogue						\
498 }									\
499 									\
500 static inline void extra##blast_##pfx##cache##lsize##_page_indexed(unsigned long page) \
501 {									\
502 	unsigned long indexmask = current_cpu_data.desc.waysize - 1;	\
503 	unsigned long start = INDEX_BASE + (page & indexmask);		\
504 	unsigned long end = start + PAGE_SIZE;				\
505 	unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit;	\
506 	unsigned long ws_end = current_cpu_data.desc.ways <<		\
507 			       current_cpu_data.desc.waybit;		\
508 	unsigned long ws, addr;						\
509 									\
510 	__##pfx##flush_prologue						\
511 									\
512 	for (ws = 0; ws < ws_end; ws += ws_inc)				\
513 		for (addr = start; addr < end; addr += lsize * 32)	\
514 			cache##lsize##_unroll32(addr|ws, indexop);	\
515 									\
516 	__##pfx##flush_epilogue						\
517 }
518 
519 __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 16, )
520 __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16, )
521 __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16, )
522 __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 32, )
523 __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32, )
524 __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I_Loongson2, 32, loongson2_)
525 __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32, )
526 __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 64, )
527 __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64, )
528 __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64, )
529 __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 128, )
530 __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 128, )
531 __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128, )
532 
533 __BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 16, )
534 __BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 32, )
535 __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 16, )
536 __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 32, )
537 __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 64, )
538 __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 128, )
539 
540 #define __BUILD_BLAST_USER_CACHE(pfx, desc, indexop, hitop, lsize) \
541 static inline void blast_##pfx##cache##lsize##_user_page(unsigned long page) \
542 {									\
543 	unsigned long start = page;					\
544 	unsigned long end = page + PAGE_SIZE;				\
545 									\
546 	__##pfx##flush_prologue						\
547 									\
548 	do {								\
549 		cache##lsize##_unroll32_user(start, hitop);             \
550 		start += lsize * 32;					\
551 	} while (start < end);						\
552 									\
553 	__##pfx##flush_epilogue						\
554 }
555 
556 __BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D,
557 			 16)
558 __BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16)
559 __BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D,
560 			 32)
561 __BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32)
562 __BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D,
563 			 64)
564 __BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64)
565 
566 /* build blast_xxx_range, protected_blast_xxx_range */
567 #define __BUILD_BLAST_CACHE_RANGE(pfx, desc, hitop, prot, extra)	\
568 static inline void prot##extra##blast_##pfx##cache##_range(unsigned long start, \
569 						    unsigned long end)	\
570 {									\
571 	unsigned long lsize = cpu_##desc##_line_size();			\
572 	unsigned long addr = start & ~(lsize - 1);			\
573 	unsigned long aend = (end - 1) & ~(lsize - 1);			\
574 									\
575 	__##pfx##flush_prologue						\
576 									\
577 	while (1) {							\
578 		prot##cache_op(hitop, addr);				\
579 		if (addr == aend)					\
580 			break;						\
581 		addr += lsize;						\
582 	}								\
583 									\
584 	__##pfx##flush_epilogue						\
585 }
586 
587 #ifndef CONFIG_EVA
588 
589 __BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_, )
590 __BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, protected_, )
591 
592 #else
593 
594 #define __BUILD_PROT_BLAST_CACHE_RANGE(pfx, desc, hitop)		\
595 static inline void protected_blast_##pfx##cache##_range(unsigned long start,\
596 							unsigned long end) \
597 {									\
598 	unsigned long lsize = cpu_##desc##_line_size();			\
599 	unsigned long addr = start & ~(lsize - 1);			\
600 	unsigned long aend = (end - 1) & ~(lsize - 1);			\
601 									\
602 	__##pfx##flush_prologue						\
603 									\
604 	if (segment_eq(get_fs(), USER_DS)) {				\
605 		while (1) {						\
606 			protected_cachee_op(hitop, addr);		\
607 			if (addr == aend)				\
608 				break;					\
609 			addr += lsize;					\
610 		}							\
611 	} else {							\
612 		while (1) {						\
613 			protected_cache_op(hitop, addr);		\
614 			if (addr == aend)				\
615 				break;					\
616 			addr += lsize;					\
617 		}                                                       \
618 									\
619 	}								\
620 	__##pfx##flush_epilogue						\
621 }
622 
623 __BUILD_PROT_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D)
624 __BUILD_PROT_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I)
625 
626 #endif
627 __BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, protected_, )
628 __BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I_Loongson2, \
629 	protected_, loongson2_)
630 __BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, , )
631 __BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, , )
632 __BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, , )
633 /* blast_inv_dcache_range */
634 __BUILD_BLAST_CACHE_RANGE(inv_d, dcache, Hit_Invalidate_D, , )
635 __BUILD_BLAST_CACHE_RANGE(inv_s, scache, Hit_Invalidate_SD, , )
636 
637 #endif /* _ASM_R4KCACHE_H */
638