xref: /openbmc/linux/arch/mips/mm/c-r4k.c (revision f42b3800)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
7  * Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002 Ralf Baechle (ralf@gnu.org)
8  * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
9  */
10 #include <linux/hardirq.h>
11 #include <linux/init.h>
12 #include <linux/highmem.h>
13 #include <linux/kernel.h>
14 #include <linux/linkage.h>
15 #include <linux/sched.h>
16 #include <linux/mm.h>
17 #include <linux/bitops.h>
18 
19 #include <asm/bcache.h>
20 #include <asm/bootinfo.h>
21 #include <asm/cache.h>
22 #include <asm/cacheops.h>
23 #include <asm/cpu.h>
24 #include <asm/cpu-features.h>
25 #include <asm/io.h>
26 #include <asm/page.h>
27 #include <asm/pgtable.h>
28 #include <asm/r4kcache.h>
29 #include <asm/sections.h>
30 #include <asm/system.h>
31 #include <asm/mmu_context.h>
32 #include <asm/war.h>
33 #include <asm/cacheflush.h> /* for run_uncached() */
34 
35 
36 /*
37  * Special Variant of smp_call_function for use by cache functions:
38  *
39  *  o No return value
40  *  o collapses to normal function call on UP kernels
41  *  o collapses to normal function call on systems with a single shared
42  *    primary cache.
43  */
44 static inline void r4k_on_each_cpu(void (*func) (void *info), void *info,
45                                    int retry, int wait)
46 {
47 	preempt_disable();
48 
49 #if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC)
50 	smp_call_function(func, info, retry, wait);
51 #endif
52 	func(info);
53 	preempt_enable();
54 }
55 
56 /*
57  * Must die.
58  */
59 static unsigned long icache_size __read_mostly;
60 static unsigned long dcache_size __read_mostly;
61 static unsigned long scache_size __read_mostly;
62 
63 /*
64  * Dummy cache handling routines for machines without boardcaches
65  */
66 static void cache_noop(void) {}
67 
68 static struct bcache_ops no_sc_ops = {
69 	.bc_enable = (void *)cache_noop,
70 	.bc_disable = (void *)cache_noop,
71 	.bc_wback_inv = (void *)cache_noop,
72 	.bc_inv = (void *)cache_noop
73 };
74 
75 struct bcache_ops *bcops = &no_sc_ops;
76 
77 #define cpu_is_r4600_v1_x()	((read_c0_prid() & 0xfffffff0) == 0x00002010)
78 #define cpu_is_r4600_v2_x()	((read_c0_prid() & 0xfffffff0) == 0x00002020)
79 
80 #define R4600_HIT_CACHEOP_WAR_IMPL					\
81 do {									\
82 	if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())		\
83 		*(volatile unsigned long *)CKSEG1;			\
84 	if (R4600_V1_HIT_CACHEOP_WAR)					\
85 		__asm__ __volatile__("nop;nop;nop;nop");		\
86 } while (0)
87 
88 static void (*r4k_blast_dcache_page)(unsigned long addr);
89 
90 static inline void r4k_blast_dcache_page_dc32(unsigned long addr)
91 {
92 	R4600_HIT_CACHEOP_WAR_IMPL;
93 	blast_dcache32_page(addr);
94 }
95 
96 static void __cpuinit r4k_blast_dcache_page_setup(void)
97 {
98 	unsigned long  dc_lsize = cpu_dcache_line_size();
99 
100 	if (dc_lsize == 0)
101 		r4k_blast_dcache_page = (void *)cache_noop;
102 	else if (dc_lsize == 16)
103 		r4k_blast_dcache_page = blast_dcache16_page;
104 	else if (dc_lsize == 32)
105 		r4k_blast_dcache_page = r4k_blast_dcache_page_dc32;
106 }
107 
108 static void (* r4k_blast_dcache_page_indexed)(unsigned long addr);
109 
110 static void __cpuinit r4k_blast_dcache_page_indexed_setup(void)
111 {
112 	unsigned long dc_lsize = cpu_dcache_line_size();
113 
114 	if (dc_lsize == 0)
115 		r4k_blast_dcache_page_indexed = (void *)cache_noop;
116 	else if (dc_lsize == 16)
117 		r4k_blast_dcache_page_indexed = blast_dcache16_page_indexed;
118 	else if (dc_lsize == 32)
119 		r4k_blast_dcache_page_indexed = blast_dcache32_page_indexed;
120 }
121 
122 static void (* r4k_blast_dcache)(void);
123 
124 static void __cpuinit r4k_blast_dcache_setup(void)
125 {
126 	unsigned long dc_lsize = cpu_dcache_line_size();
127 
128 	if (dc_lsize == 0)
129 		r4k_blast_dcache = (void *)cache_noop;
130 	else if (dc_lsize == 16)
131 		r4k_blast_dcache = blast_dcache16;
132 	else if (dc_lsize == 32)
133 		r4k_blast_dcache = blast_dcache32;
134 }
135 
136 /* force code alignment (used for TX49XX_ICACHE_INDEX_INV_WAR) */
137 #define JUMP_TO_ALIGN(order) \
138 	__asm__ __volatile__( \
139 		"b\t1f\n\t" \
140 		".align\t" #order "\n\t" \
141 		"1:\n\t" \
142 		)
143 #define CACHE32_UNROLL32_ALIGN	JUMP_TO_ALIGN(10) /* 32 * 32 = 1024 */
144 #define CACHE32_UNROLL32_ALIGN2	JUMP_TO_ALIGN(11)
145 
146 static inline void blast_r4600_v1_icache32(void)
147 {
148 	unsigned long flags;
149 
150 	local_irq_save(flags);
151 	blast_icache32();
152 	local_irq_restore(flags);
153 }
154 
155 static inline void tx49_blast_icache32(void)
156 {
157 	unsigned long start = INDEX_BASE;
158 	unsigned long end = start + current_cpu_data.icache.waysize;
159 	unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
160 	unsigned long ws_end = current_cpu_data.icache.ways <<
161 	                       current_cpu_data.icache.waybit;
162 	unsigned long ws, addr;
163 
164 	CACHE32_UNROLL32_ALIGN2;
165 	/* I'm in even chunk.  blast odd chunks */
166 	for (ws = 0; ws < ws_end; ws += ws_inc)
167 		for (addr = start + 0x400; addr < end; addr += 0x400 * 2)
168 			cache32_unroll32(addr|ws, Index_Invalidate_I);
169 	CACHE32_UNROLL32_ALIGN;
170 	/* I'm in odd chunk.  blast even chunks */
171 	for (ws = 0; ws < ws_end; ws += ws_inc)
172 		for (addr = start; addr < end; addr += 0x400 * 2)
173 			cache32_unroll32(addr|ws, Index_Invalidate_I);
174 }
175 
176 static inline void blast_icache32_r4600_v1_page_indexed(unsigned long page)
177 {
178 	unsigned long flags;
179 
180 	local_irq_save(flags);
181 	blast_icache32_page_indexed(page);
182 	local_irq_restore(flags);
183 }
184 
185 static inline void tx49_blast_icache32_page_indexed(unsigned long page)
186 {
187 	unsigned long indexmask = current_cpu_data.icache.waysize - 1;
188 	unsigned long start = INDEX_BASE + (page & indexmask);
189 	unsigned long end = start + PAGE_SIZE;
190 	unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
191 	unsigned long ws_end = current_cpu_data.icache.ways <<
192 	                       current_cpu_data.icache.waybit;
193 	unsigned long ws, addr;
194 
195 	CACHE32_UNROLL32_ALIGN2;
196 	/* I'm in even chunk.  blast odd chunks */
197 	for (ws = 0; ws < ws_end; ws += ws_inc)
198 		for (addr = start + 0x400; addr < end; addr += 0x400 * 2)
199 			cache32_unroll32(addr|ws, Index_Invalidate_I);
200 	CACHE32_UNROLL32_ALIGN;
201 	/* I'm in odd chunk.  blast even chunks */
202 	for (ws = 0; ws < ws_end; ws += ws_inc)
203 		for (addr = start; addr < end; addr += 0x400 * 2)
204 			cache32_unroll32(addr|ws, Index_Invalidate_I);
205 }
206 
207 static void (* r4k_blast_icache_page)(unsigned long addr);
208 
209 static void __cpuinit r4k_blast_icache_page_setup(void)
210 {
211 	unsigned long ic_lsize = cpu_icache_line_size();
212 
213 	if (ic_lsize == 0)
214 		r4k_blast_icache_page = (void *)cache_noop;
215 	else if (ic_lsize == 16)
216 		r4k_blast_icache_page = blast_icache16_page;
217 	else if (ic_lsize == 32)
218 		r4k_blast_icache_page = blast_icache32_page;
219 	else if (ic_lsize == 64)
220 		r4k_blast_icache_page = blast_icache64_page;
221 }
222 
223 
224 static void (* r4k_blast_icache_page_indexed)(unsigned long addr);
225 
226 static void __cpuinit r4k_blast_icache_page_indexed_setup(void)
227 {
228 	unsigned long ic_lsize = cpu_icache_line_size();
229 
230 	if (ic_lsize == 0)
231 		r4k_blast_icache_page_indexed = (void *)cache_noop;
232 	else if (ic_lsize == 16)
233 		r4k_blast_icache_page_indexed = blast_icache16_page_indexed;
234 	else if (ic_lsize == 32) {
235 		if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x())
236 			r4k_blast_icache_page_indexed =
237 				blast_icache32_r4600_v1_page_indexed;
238 		else if (TX49XX_ICACHE_INDEX_INV_WAR)
239 			r4k_blast_icache_page_indexed =
240 				tx49_blast_icache32_page_indexed;
241 		else
242 			r4k_blast_icache_page_indexed =
243 				blast_icache32_page_indexed;
244 	} else if (ic_lsize == 64)
245 		r4k_blast_icache_page_indexed = blast_icache64_page_indexed;
246 }
247 
248 static void (* r4k_blast_icache)(void);
249 
250 static void __cpuinit r4k_blast_icache_setup(void)
251 {
252 	unsigned long ic_lsize = cpu_icache_line_size();
253 
254 	if (ic_lsize == 0)
255 		r4k_blast_icache = (void *)cache_noop;
256 	else if (ic_lsize == 16)
257 		r4k_blast_icache = blast_icache16;
258 	else if (ic_lsize == 32) {
259 		if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x())
260 			r4k_blast_icache = blast_r4600_v1_icache32;
261 		else if (TX49XX_ICACHE_INDEX_INV_WAR)
262 			r4k_blast_icache = tx49_blast_icache32;
263 		else
264 			r4k_blast_icache = blast_icache32;
265 	} else if (ic_lsize == 64)
266 		r4k_blast_icache = blast_icache64;
267 }
268 
269 static void (* r4k_blast_scache_page)(unsigned long addr);
270 
271 static void __cpuinit r4k_blast_scache_page_setup(void)
272 {
273 	unsigned long sc_lsize = cpu_scache_line_size();
274 
275 	if (scache_size == 0)
276 		r4k_blast_scache_page = (void *)cache_noop;
277 	else if (sc_lsize == 16)
278 		r4k_blast_scache_page = blast_scache16_page;
279 	else if (sc_lsize == 32)
280 		r4k_blast_scache_page = blast_scache32_page;
281 	else if (sc_lsize == 64)
282 		r4k_blast_scache_page = blast_scache64_page;
283 	else if (sc_lsize == 128)
284 		r4k_blast_scache_page = blast_scache128_page;
285 }
286 
287 static void (* r4k_blast_scache_page_indexed)(unsigned long addr);
288 
289 static void __cpuinit r4k_blast_scache_page_indexed_setup(void)
290 {
291 	unsigned long sc_lsize = cpu_scache_line_size();
292 
293 	if (scache_size == 0)
294 		r4k_blast_scache_page_indexed = (void *)cache_noop;
295 	else if (sc_lsize == 16)
296 		r4k_blast_scache_page_indexed = blast_scache16_page_indexed;
297 	else if (sc_lsize == 32)
298 		r4k_blast_scache_page_indexed = blast_scache32_page_indexed;
299 	else if (sc_lsize == 64)
300 		r4k_blast_scache_page_indexed = blast_scache64_page_indexed;
301 	else if (sc_lsize == 128)
302 		r4k_blast_scache_page_indexed = blast_scache128_page_indexed;
303 }
304 
305 static void (* r4k_blast_scache)(void);
306 
307 static void __cpuinit r4k_blast_scache_setup(void)
308 {
309 	unsigned long sc_lsize = cpu_scache_line_size();
310 
311 	if (scache_size == 0)
312 		r4k_blast_scache = (void *)cache_noop;
313 	else if (sc_lsize == 16)
314 		r4k_blast_scache = blast_scache16;
315 	else if (sc_lsize == 32)
316 		r4k_blast_scache = blast_scache32;
317 	else if (sc_lsize == 64)
318 		r4k_blast_scache = blast_scache64;
319 	else if (sc_lsize == 128)
320 		r4k_blast_scache = blast_scache128;
321 }
322 
323 static inline void local_r4k___flush_cache_all(void * args)
324 {
325 #if defined(CONFIG_CPU_LOONGSON2)
326 	r4k_blast_scache();
327 	return;
328 #endif
329 	r4k_blast_dcache();
330 	r4k_blast_icache();
331 
332 	switch (current_cpu_type()) {
333 	case CPU_R4000SC:
334 	case CPU_R4000MC:
335 	case CPU_R4400SC:
336 	case CPU_R4400MC:
337 	case CPU_R10000:
338 	case CPU_R12000:
339 	case CPU_R14000:
340 		r4k_blast_scache();
341 	}
342 }
343 
344 static void r4k___flush_cache_all(void)
345 {
346 	r4k_on_each_cpu(local_r4k___flush_cache_all, NULL, 1, 1);
347 }
348 
349 static inline int has_valid_asid(const struct mm_struct *mm)
350 {
351 #if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC)
352 	int i;
353 
354 	for_each_online_cpu(i)
355 		if (cpu_context(i, mm))
356 			return 1;
357 
358 	return 0;
359 #else
360 	return cpu_context(smp_processor_id(), mm);
361 #endif
362 }
363 
364 static void r4k__flush_cache_vmap(void)
365 {
366 	r4k_blast_dcache();
367 }
368 
369 static void r4k__flush_cache_vunmap(void)
370 {
371 	r4k_blast_dcache();
372 }
373 
374 static inline void local_r4k_flush_cache_range(void * args)
375 {
376 	struct vm_area_struct *vma = args;
377 	int exec = vma->vm_flags & VM_EXEC;
378 
379 	if (!(has_valid_asid(vma->vm_mm)))
380 		return;
381 
382 	r4k_blast_dcache();
383 	if (exec)
384 		r4k_blast_icache();
385 }
386 
387 static void r4k_flush_cache_range(struct vm_area_struct *vma,
388 	unsigned long start, unsigned long end)
389 {
390 	int exec = vma->vm_flags & VM_EXEC;
391 
392 	if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc))
393 		r4k_on_each_cpu(local_r4k_flush_cache_range, vma, 1, 1);
394 }
395 
396 static inline void local_r4k_flush_cache_mm(void * args)
397 {
398 	struct mm_struct *mm = args;
399 
400 	if (!has_valid_asid(mm))
401 		return;
402 
403 	/*
404 	 * Kludge alert.  For obscure reasons R4000SC and R4400SC go nuts if we
405 	 * only flush the primary caches but R10000 and R12000 behave sane ...
406 	 * R4000SC and R4400SC indexed S-cache ops also invalidate primary
407 	 * caches, so we can bail out early.
408 	 */
409 	if (current_cpu_type() == CPU_R4000SC ||
410 	    current_cpu_type() == CPU_R4000MC ||
411 	    current_cpu_type() == CPU_R4400SC ||
412 	    current_cpu_type() == CPU_R4400MC) {
413 		r4k_blast_scache();
414 		return;
415 	}
416 
417 	r4k_blast_dcache();
418 }
419 
420 static void r4k_flush_cache_mm(struct mm_struct *mm)
421 {
422 	if (!cpu_has_dc_aliases)
423 		return;
424 
425 	r4k_on_each_cpu(local_r4k_flush_cache_mm, mm, 1, 1);
426 }
427 
428 struct flush_cache_page_args {
429 	struct vm_area_struct *vma;
430 	unsigned long addr;
431 	unsigned long pfn;
432 };
433 
434 static inline void local_r4k_flush_cache_page(void *args)
435 {
436 	struct flush_cache_page_args *fcp_args = args;
437 	struct vm_area_struct *vma = fcp_args->vma;
438 	unsigned long addr = fcp_args->addr;
439 	struct page *page = pfn_to_page(fcp_args->pfn);
440 	int exec = vma->vm_flags & VM_EXEC;
441 	struct mm_struct *mm = vma->vm_mm;
442 	pgd_t *pgdp;
443 	pud_t *pudp;
444 	pmd_t *pmdp;
445 	pte_t *ptep;
446 	void *vaddr;
447 
448 	/*
449 	 * If ownes no valid ASID yet, cannot possibly have gotten
450 	 * this page into the cache.
451 	 */
452 	if (!has_valid_asid(mm))
453 		return;
454 
455 	addr &= PAGE_MASK;
456 	pgdp = pgd_offset(mm, addr);
457 	pudp = pud_offset(pgdp, addr);
458 	pmdp = pmd_offset(pudp, addr);
459 	ptep = pte_offset(pmdp, addr);
460 
461 	/*
462 	 * If the page isn't marked valid, the page cannot possibly be
463 	 * in the cache.
464 	 */
465 	if (!(pte_present(*ptep)))
466 		return;
467 
468 	if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID))
469 		vaddr = NULL;
470 	else {
471 		/*
472 		 * Use kmap_coherent or kmap_atomic to do flushes for
473 		 * another ASID than the current one.
474 		 */
475 		if (cpu_has_dc_aliases)
476 			vaddr = kmap_coherent(page, addr);
477 		else
478 			vaddr = kmap_atomic(page, KM_USER0);
479 		addr = (unsigned long)vaddr;
480 	}
481 
482 	if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) {
483 		r4k_blast_dcache_page(addr);
484 	}
485 	if (exec) {
486 		if (vaddr && cpu_has_vtag_icache && mm == current->active_mm) {
487 			int cpu = smp_processor_id();
488 
489 			if (cpu_context(cpu, mm) != 0)
490 				drop_mmu_context(mm, cpu);
491 		} else
492 			r4k_blast_icache_page(addr);
493 	}
494 
495 	if (vaddr) {
496 		if (cpu_has_dc_aliases)
497 			kunmap_coherent();
498 		else
499 			kunmap_atomic(vaddr, KM_USER0);
500 	}
501 }
502 
503 static void r4k_flush_cache_page(struct vm_area_struct *vma,
504 	unsigned long addr, unsigned long pfn)
505 {
506 	struct flush_cache_page_args args;
507 
508 	args.vma = vma;
509 	args.addr = addr;
510 	args.pfn = pfn;
511 
512 	r4k_on_each_cpu(local_r4k_flush_cache_page, &args, 1, 1);
513 }
514 
515 static inline void local_r4k_flush_data_cache_page(void * addr)
516 {
517 	r4k_blast_dcache_page((unsigned long) addr);
518 }
519 
520 static void r4k_flush_data_cache_page(unsigned long addr)
521 {
522 	if (in_atomic())
523 		local_r4k_flush_data_cache_page((void *)addr);
524 	else
525 		r4k_on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr,
526 			        1, 1);
527 }
528 
529 struct flush_icache_range_args {
530 	unsigned long start;
531 	unsigned long end;
532 };
533 
534 static inline void local_r4k_flush_icache_range(void *args)
535 {
536 	struct flush_icache_range_args *fir_args = args;
537 	unsigned long start = fir_args->start;
538 	unsigned long end = fir_args->end;
539 
540 	if (!cpu_has_ic_fills_f_dc) {
541 		if (end - start >= dcache_size) {
542 			r4k_blast_dcache();
543 		} else {
544 			R4600_HIT_CACHEOP_WAR_IMPL;
545 			protected_blast_dcache_range(start, end);
546 		}
547 	}
548 
549 	if (end - start > icache_size)
550 		r4k_blast_icache();
551 	else
552 		protected_blast_icache_range(start, end);
553 }
554 
555 static void r4k_flush_icache_range(unsigned long start, unsigned long end)
556 {
557 	struct flush_icache_range_args args;
558 
559 	args.start = start;
560 	args.end = end;
561 
562 	r4k_on_each_cpu(local_r4k_flush_icache_range, &args, 1, 1);
563 	instruction_hazard();
564 }
565 
566 #ifdef CONFIG_DMA_NONCOHERENT
567 
568 static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
569 {
570 	/* Catch bad driver code */
571 	BUG_ON(size == 0);
572 
573 	if (cpu_has_inclusive_pcaches) {
574 		if (size >= scache_size)
575 			r4k_blast_scache();
576 		else
577 			blast_scache_range(addr, addr + size);
578 		return;
579 	}
580 
581 	/*
582 	 * Either no secondary cache or the available caches don't have the
583 	 * subset property so we have to flush the primary caches
584 	 * explicitly
585 	 */
586 	if (size >= dcache_size) {
587 		r4k_blast_dcache();
588 	} else {
589 		R4600_HIT_CACHEOP_WAR_IMPL;
590 		blast_dcache_range(addr, addr + size);
591 	}
592 
593 	bc_wback_inv(addr, size);
594 }
595 
596 static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
597 {
598 	/* Catch bad driver code */
599 	BUG_ON(size == 0);
600 
601 	if (cpu_has_inclusive_pcaches) {
602 		if (size >= scache_size)
603 			r4k_blast_scache();
604 		else
605 			blast_inv_scache_range(addr, addr + size);
606 		return;
607 	}
608 
609 	if (size >= dcache_size) {
610 		r4k_blast_dcache();
611 	} else {
612 		R4600_HIT_CACHEOP_WAR_IMPL;
613 		blast_inv_dcache_range(addr, addr + size);
614 	}
615 
616 	bc_inv(addr, size);
617 }
618 #endif /* CONFIG_DMA_NONCOHERENT */
619 
620 /*
621  * While we're protected against bad userland addresses we don't care
622  * very much about what happens in that case.  Usually a segmentation
623  * fault will dump the process later on anyway ...
624  */
625 static void local_r4k_flush_cache_sigtramp(void * arg)
626 {
627 	unsigned long ic_lsize = cpu_icache_line_size();
628 	unsigned long dc_lsize = cpu_dcache_line_size();
629 	unsigned long sc_lsize = cpu_scache_line_size();
630 	unsigned long addr = (unsigned long) arg;
631 
632 	R4600_HIT_CACHEOP_WAR_IMPL;
633 	if (dc_lsize)
634 		protected_writeback_dcache_line(addr & ~(dc_lsize - 1));
635 	if (!cpu_icache_snoops_remote_store && scache_size)
636 		protected_writeback_scache_line(addr & ~(sc_lsize - 1));
637 	if (ic_lsize)
638 		protected_flush_icache_line(addr & ~(ic_lsize - 1));
639 	if (MIPS4K_ICACHE_REFILL_WAR) {
640 		__asm__ __volatile__ (
641 			".set push\n\t"
642 			".set noat\n\t"
643 			".set mips3\n\t"
644 #ifdef CONFIG_32BIT
645 			"la	$at,1f\n\t"
646 #endif
647 #ifdef CONFIG_64BIT
648 			"dla	$at,1f\n\t"
649 #endif
650 			"cache	%0,($at)\n\t"
651 			"nop; nop; nop\n"
652 			"1:\n\t"
653 			".set pop"
654 			:
655 			: "i" (Hit_Invalidate_I));
656 	}
657 	if (MIPS_CACHE_SYNC_WAR)
658 		__asm__ __volatile__ ("sync");
659 }
660 
661 static void r4k_flush_cache_sigtramp(unsigned long addr)
662 {
663 	r4k_on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr, 1, 1);
664 }
665 
666 static void r4k_flush_icache_all(void)
667 {
668 	if (cpu_has_vtag_icache)
669 		r4k_blast_icache();
670 }
671 
672 static inline void rm7k_erratum31(void)
673 {
674 	const unsigned long ic_lsize = 32;
675 	unsigned long addr;
676 
677 	/* RM7000 erratum #31. The icache is screwed at startup. */
678 	write_c0_taglo(0);
679 	write_c0_taghi(0);
680 
681 	for (addr = INDEX_BASE; addr <= INDEX_BASE + 4096; addr += ic_lsize) {
682 		__asm__ __volatile__ (
683 			".set push\n\t"
684 			".set noreorder\n\t"
685 			".set mips3\n\t"
686 			"cache\t%1, 0(%0)\n\t"
687 			"cache\t%1, 0x1000(%0)\n\t"
688 			"cache\t%1, 0x2000(%0)\n\t"
689 			"cache\t%1, 0x3000(%0)\n\t"
690 			"cache\t%2, 0(%0)\n\t"
691 			"cache\t%2, 0x1000(%0)\n\t"
692 			"cache\t%2, 0x2000(%0)\n\t"
693 			"cache\t%2, 0x3000(%0)\n\t"
694 			"cache\t%1, 0(%0)\n\t"
695 			"cache\t%1, 0x1000(%0)\n\t"
696 			"cache\t%1, 0x2000(%0)\n\t"
697 			"cache\t%1, 0x3000(%0)\n\t"
698 			".set pop\n"
699 			:
700 			: "r" (addr), "i" (Index_Store_Tag_I), "i" (Fill));
701 	}
702 }
703 
704 static char *way_string[] __cpuinitdata = { NULL, "direct mapped", "2-way",
705 	"3-way", "4-way", "5-way", "6-way", "7-way", "8-way"
706 };
707 
708 static void __cpuinit probe_pcache(void)
709 {
710 	struct cpuinfo_mips *c = &current_cpu_data;
711 	unsigned int config = read_c0_config();
712 	unsigned int prid = read_c0_prid();
713 	unsigned long config1;
714 	unsigned int lsize;
715 
716 	switch (c->cputype) {
717 	case CPU_R4600:			/* QED style two way caches? */
718 	case CPU_R4700:
719 	case CPU_R5000:
720 	case CPU_NEVADA:
721 		icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
722 		c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
723 		c->icache.ways = 2;
724 		c->icache.waybit = __ffs(icache_size/2);
725 
726 		dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
727 		c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
728 		c->dcache.ways = 2;
729 		c->dcache.waybit= __ffs(dcache_size/2);
730 
731 		c->options |= MIPS_CPU_CACHE_CDEX_P;
732 		break;
733 
734 	case CPU_R5432:
735 	case CPU_R5500:
736 		icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
737 		c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
738 		c->icache.ways = 2;
739 		c->icache.waybit= 0;
740 
741 		dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
742 		c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
743 		c->dcache.ways = 2;
744 		c->dcache.waybit = 0;
745 
746 		c->options |= MIPS_CPU_CACHE_CDEX_P;
747 		break;
748 
749 	case CPU_TX49XX:
750 		icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
751 		c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
752 		c->icache.ways = 4;
753 		c->icache.waybit= 0;
754 
755 		dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
756 		c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
757 		c->dcache.ways = 4;
758 		c->dcache.waybit = 0;
759 
760 		c->options |= MIPS_CPU_CACHE_CDEX_P;
761 		c->options |= MIPS_CPU_PREFETCH;
762 		break;
763 
764 	case CPU_R4000PC:
765 	case CPU_R4000SC:
766 	case CPU_R4000MC:
767 	case CPU_R4400PC:
768 	case CPU_R4400SC:
769 	case CPU_R4400MC:
770 	case CPU_R4300:
771 		icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
772 		c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
773 		c->icache.ways = 1;
774 		c->icache.waybit = 0; 	/* doesn't matter */
775 
776 		dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
777 		c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
778 		c->dcache.ways = 1;
779 		c->dcache.waybit = 0;	/* does not matter */
780 
781 		c->options |= MIPS_CPU_CACHE_CDEX_P;
782 		break;
783 
784 	case CPU_R10000:
785 	case CPU_R12000:
786 	case CPU_R14000:
787 		icache_size = 1 << (12 + ((config & R10K_CONF_IC) >> 29));
788 		c->icache.linesz = 64;
789 		c->icache.ways = 2;
790 		c->icache.waybit = 0;
791 
792 		dcache_size = 1 << (12 + ((config & R10K_CONF_DC) >> 26));
793 		c->dcache.linesz = 32;
794 		c->dcache.ways = 2;
795 		c->dcache.waybit = 0;
796 
797 		c->options |= MIPS_CPU_PREFETCH;
798 		break;
799 
800 	case CPU_VR4133:
801 		write_c0_config(config & ~VR41_CONF_P4K);
802 	case CPU_VR4131:
803 		/* Workaround for cache instruction bug of VR4131 */
804 		if (c->processor_id == 0x0c80U || c->processor_id == 0x0c81U ||
805 		    c->processor_id == 0x0c82U) {
806 			config |= 0x00400000U;
807 			if (c->processor_id == 0x0c80U)
808 				config |= VR41_CONF_BP;
809 			write_c0_config(config);
810 		} else
811 			c->options |= MIPS_CPU_CACHE_CDEX_P;
812 
813 		icache_size = 1 << (10 + ((config & CONF_IC) >> 9));
814 		c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
815 		c->icache.ways = 2;
816 		c->icache.waybit = __ffs(icache_size/2);
817 
818 		dcache_size = 1 << (10 + ((config & CONF_DC) >> 6));
819 		c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
820 		c->dcache.ways = 2;
821 		c->dcache.waybit = __ffs(dcache_size/2);
822 		break;
823 
824 	case CPU_VR41XX:
825 	case CPU_VR4111:
826 	case CPU_VR4121:
827 	case CPU_VR4122:
828 	case CPU_VR4181:
829 	case CPU_VR4181A:
830 		icache_size = 1 << (10 + ((config & CONF_IC) >> 9));
831 		c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
832 		c->icache.ways = 1;
833 		c->icache.waybit = 0; 	/* doesn't matter */
834 
835 		dcache_size = 1 << (10 + ((config & CONF_DC) >> 6));
836 		c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
837 		c->dcache.ways = 1;
838 		c->dcache.waybit = 0;	/* does not matter */
839 
840 		c->options |= MIPS_CPU_CACHE_CDEX_P;
841 		break;
842 
843 	case CPU_RM7000:
844 		rm7k_erratum31();
845 
846 	case CPU_RM9000:
847 		icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
848 		c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
849 		c->icache.ways = 4;
850 		c->icache.waybit = __ffs(icache_size / c->icache.ways);
851 
852 		dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
853 		c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
854 		c->dcache.ways = 4;
855 		c->dcache.waybit = __ffs(dcache_size / c->dcache.ways);
856 
857 #if !defined(CONFIG_SMP) || !defined(RM9000_CDEX_SMP_WAR)
858 		c->options |= MIPS_CPU_CACHE_CDEX_P;
859 #endif
860 		c->options |= MIPS_CPU_PREFETCH;
861 		break;
862 
863 	case CPU_LOONGSON2:
864 		icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
865 		c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
866 		if (prid & 0x3)
867 			c->icache.ways = 4;
868 		else
869 			c->icache.ways = 2;
870 		c->icache.waybit = 0;
871 
872 		dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
873 		c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
874 		if (prid & 0x3)
875 			c->dcache.ways = 4;
876 		else
877 			c->dcache.ways = 2;
878 		c->dcache.waybit = 0;
879 		break;
880 
881 	default:
882 		if (!(config & MIPS_CONF_M))
883 			panic("Don't know how to probe P-caches on this cpu.");
884 
885 		/*
886 		 * So we seem to be a MIPS32 or MIPS64 CPU
887 		 * So let's probe the I-cache ...
888 		 */
889 		config1 = read_c0_config1();
890 
891 		if ((lsize = ((config1 >> 19) & 7)))
892 			c->icache.linesz = 2 << lsize;
893 		else
894 			c->icache.linesz = lsize;
895 		c->icache.sets = 64 << ((config1 >> 22) & 7);
896 		c->icache.ways = 1 + ((config1 >> 16) & 7);
897 
898 		icache_size = c->icache.sets *
899 		              c->icache.ways *
900 		              c->icache.linesz;
901 		c->icache.waybit = __ffs(icache_size/c->icache.ways);
902 
903 		if (config & 0x8)		/* VI bit */
904 			c->icache.flags |= MIPS_CACHE_VTAG;
905 
906 		/*
907 		 * Now probe the MIPS32 / MIPS64 data cache.
908 		 */
909 		c->dcache.flags = 0;
910 
911 		if ((lsize = ((config1 >> 10) & 7)))
912 			c->dcache.linesz = 2 << lsize;
913 		else
914 			c->dcache.linesz= lsize;
915 		c->dcache.sets = 64 << ((config1 >> 13) & 7);
916 		c->dcache.ways = 1 + ((config1 >> 7) & 7);
917 
918 		dcache_size = c->dcache.sets *
919 		              c->dcache.ways *
920 		              c->dcache.linesz;
921 		c->dcache.waybit = __ffs(dcache_size/c->dcache.ways);
922 
923 		c->options |= MIPS_CPU_PREFETCH;
924 		break;
925 	}
926 
927 	/*
928 	 * Processor configuration sanity check for the R4000SC erratum
929 	 * #5.  With page sizes larger than 32kB there is no possibility
930 	 * to get a VCE exception anymore so we don't care about this
931 	 * misconfiguration.  The case is rather theoretical anyway;
932 	 * presumably no vendor is shipping his hardware in the "bad"
933 	 * configuration.
934 	 */
935 	if ((prid & 0xff00) == PRID_IMP_R4000 && (prid & 0xff) < 0x40 &&
936 	    !(config & CONF_SC) && c->icache.linesz != 16 &&
937 	    PAGE_SIZE <= 0x8000)
938 		panic("Improper R4000SC processor configuration detected");
939 
940 	/* compute a couple of other cache variables */
941 	c->icache.waysize = icache_size / c->icache.ways;
942 	c->dcache.waysize = dcache_size / c->dcache.ways;
943 
944 	c->icache.sets = c->icache.linesz ?
945 		icache_size / (c->icache.linesz * c->icache.ways) : 0;
946 	c->dcache.sets = c->dcache.linesz ?
947 		dcache_size / (c->dcache.linesz * c->dcache.ways) : 0;
948 
949 	/*
950 	 * R10000 and R12000 P-caches are odd in a positive way.  They're 32kB
951 	 * 2-way virtually indexed so normally would suffer from aliases.  So
952 	 * normally they'd suffer from aliases but magic in the hardware deals
953 	 * with that for us so we don't need to take care ourselves.
954 	 */
955 	switch (c->cputype) {
956 	case CPU_20KC:
957 	case CPU_25KF:
958 	case CPU_SB1:
959 	case CPU_SB1A:
960 		c->dcache.flags |= MIPS_CACHE_PINDEX;
961 		break;
962 
963 	case CPU_R10000:
964 	case CPU_R12000:
965 	case CPU_R14000:
966 		break;
967 
968 	case CPU_24K:
969 	case CPU_34K:
970 	case CPU_74K:
971 		if ((read_c0_config7() & (1 << 16))) {
972 			/* effectively physically indexed dcache,
973 			   thus no virtual aliases. */
974 			c->dcache.flags |= MIPS_CACHE_PINDEX;
975 			break;
976 		}
977 	default:
978 		if (c->dcache.waysize > PAGE_SIZE)
979 			c->dcache.flags |= MIPS_CACHE_ALIASES;
980 	}
981 
982 	switch (c->cputype) {
983 	case CPU_20KC:
984 		/*
985 		 * Some older 20Kc chips doesn't have the 'VI' bit in
986 		 * the config register.
987 		 */
988 		c->icache.flags |= MIPS_CACHE_VTAG;
989 		break;
990 
991 	case CPU_AU1000:
992 	case CPU_AU1500:
993 	case CPU_AU1100:
994 	case CPU_AU1550:
995 	case CPU_AU1200:
996 	case CPU_AU1210:
997 	case CPU_AU1250:
998 		c->icache.flags |= MIPS_CACHE_IC_F_DC;
999 		break;
1000 	}
1001 
1002 #ifdef  CONFIG_CPU_LOONGSON2
1003 	/*
1004 	 * LOONGSON2 has 4 way icache, but when using indexed cache op,
1005 	 * one op will act on all 4 ways
1006 	 */
1007 	c->icache.ways = 1;
1008 #endif
1009 
1010 	printk("Primary instruction cache %ldkB, %s, %s, linesize %d bytes.\n",
1011 	       icache_size >> 10,
1012 	       cpu_has_vtag_icache ? "VIVT" : "VIPT",
1013 	       way_string[c->icache.ways], c->icache.linesz);
1014 
1015 	printk("Primary data cache %ldkB, %s, %s, %s, linesize %d bytes\n",
1016 	       dcache_size >> 10, way_string[c->dcache.ways],
1017 	       (c->dcache.flags & MIPS_CACHE_PINDEX) ? "PIPT" : "VIPT",
1018 	       (c->dcache.flags & MIPS_CACHE_ALIASES) ?
1019 			"cache aliases" : "no aliases",
1020 	       c->dcache.linesz);
1021 }
1022 
1023 /*
1024  * If you even _breathe_ on this function, look at the gcc output and make sure
1025  * it does not pop things on and off the stack for the cache sizing loop that
1026  * executes in KSEG1 space or else you will crash and burn badly.  You have
1027  * been warned.
1028  */
1029 static int __cpuinit probe_scache(void)
1030 {
1031 	unsigned long flags, addr, begin, end, pow2;
1032 	unsigned int config = read_c0_config();
1033 	struct cpuinfo_mips *c = &current_cpu_data;
1034 	int tmp;
1035 
1036 	if (config & CONF_SC)
1037 		return 0;
1038 
1039 	begin = (unsigned long) &_stext;
1040 	begin &= ~((4 * 1024 * 1024) - 1);
1041 	end = begin + (4 * 1024 * 1024);
1042 
1043 	/*
1044 	 * This is such a bitch, you'd think they would make it easy to do
1045 	 * this.  Away you daemons of stupidity!
1046 	 */
1047 	local_irq_save(flags);
1048 
1049 	/* Fill each size-multiple cache line with a valid tag. */
1050 	pow2 = (64 * 1024);
1051 	for (addr = begin; addr < end; addr = (begin + pow2)) {
1052 		unsigned long *p = (unsigned long *) addr;
1053 		__asm__ __volatile__("nop" : : "r" (*p)); /* whee... */
1054 		pow2 <<= 1;
1055 	}
1056 
1057 	/* Load first line with zero (therefore invalid) tag. */
1058 	write_c0_taglo(0);
1059 	write_c0_taghi(0);
1060 	__asm__ __volatile__("nop; nop; nop; nop;"); /* avoid the hazard */
1061 	cache_op(Index_Store_Tag_I, begin);
1062 	cache_op(Index_Store_Tag_D, begin);
1063 	cache_op(Index_Store_Tag_SD, begin);
1064 
1065 	/* Now search for the wrap around point. */
1066 	pow2 = (128 * 1024);
1067 	tmp = 0;
1068 	for (addr = begin + (128 * 1024); addr < end; addr = begin + pow2) {
1069 		cache_op(Index_Load_Tag_SD, addr);
1070 		__asm__ __volatile__("nop; nop; nop; nop;"); /* hazard... */
1071 		if (!read_c0_taglo())
1072 			break;
1073 		pow2 <<= 1;
1074 	}
1075 	local_irq_restore(flags);
1076 	addr -= begin;
1077 
1078 	scache_size = addr;
1079 	c->scache.linesz = 16 << ((config & R4K_CONF_SB) >> 22);
1080 	c->scache.ways = 1;
1081 	c->dcache.waybit = 0;		/* does not matter */
1082 
1083 	return 1;
1084 }
1085 
1086 #if defined(CONFIG_CPU_LOONGSON2)
1087 static void __init loongson2_sc_init(void)
1088 {
1089 	struct cpuinfo_mips *c = &current_cpu_data;
1090 
1091 	scache_size = 512*1024;
1092 	c->scache.linesz = 32;
1093 	c->scache.ways = 4;
1094 	c->scache.waybit = 0;
1095 	c->scache.waysize = scache_size / (c->scache.ways);
1096 	c->scache.sets = scache_size / (c->scache.linesz * c->scache.ways);
1097 	pr_info("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
1098 	       scache_size >> 10, way_string[c->scache.ways], c->scache.linesz);
1099 
1100 	c->options |= MIPS_CPU_INCLUSIVE_CACHES;
1101 }
1102 #endif
1103 
1104 extern int r5k_sc_init(void);
1105 extern int rm7k_sc_init(void);
1106 extern int mips_sc_init(void);
1107 
1108 static void __cpuinit setup_scache(void)
1109 {
1110 	struct cpuinfo_mips *c = &current_cpu_data;
1111 	unsigned int config = read_c0_config();
1112 	int sc_present = 0;
1113 
1114 	/*
1115 	 * Do the probing thing on R4000SC and R4400SC processors.  Other
1116 	 * processors don't have a S-cache that would be relevant to the
1117 	 * Linux memory management.
1118 	 */
1119 	switch (c->cputype) {
1120 	case CPU_R4000SC:
1121 	case CPU_R4000MC:
1122 	case CPU_R4400SC:
1123 	case CPU_R4400MC:
1124 		sc_present = run_uncached(probe_scache);
1125 		if (sc_present)
1126 			c->options |= MIPS_CPU_CACHE_CDEX_S;
1127 		break;
1128 
1129 	case CPU_R10000:
1130 	case CPU_R12000:
1131 	case CPU_R14000:
1132 		scache_size = 0x80000 << ((config & R10K_CONF_SS) >> 16);
1133 		c->scache.linesz = 64 << ((config >> 13) & 1);
1134 		c->scache.ways = 2;
1135 		c->scache.waybit= 0;
1136 		sc_present = 1;
1137 		break;
1138 
1139 	case CPU_R5000:
1140 	case CPU_NEVADA:
1141 #ifdef CONFIG_R5000_CPU_SCACHE
1142 		r5k_sc_init();
1143 #endif
1144                 return;
1145 
1146 	case CPU_RM7000:
1147 	case CPU_RM9000:
1148 #ifdef CONFIG_RM7000_CPU_SCACHE
1149 		rm7k_sc_init();
1150 #endif
1151 		return;
1152 
1153 #if defined(CONFIG_CPU_LOONGSON2)
1154 	case CPU_LOONGSON2:
1155 		loongson2_sc_init();
1156 		return;
1157 #endif
1158 
1159 	default:
1160 		if (c->isa_level == MIPS_CPU_ISA_M32R1 ||
1161 		    c->isa_level == MIPS_CPU_ISA_M32R2 ||
1162 		    c->isa_level == MIPS_CPU_ISA_M64R1 ||
1163 		    c->isa_level == MIPS_CPU_ISA_M64R2) {
1164 #ifdef CONFIG_MIPS_CPU_SCACHE
1165 			if (mips_sc_init ()) {
1166 				scache_size = c->scache.ways * c->scache.sets * c->scache.linesz;
1167 				printk("MIPS secondary cache %ldkB, %s, linesize %d bytes.\n",
1168 				       scache_size >> 10,
1169 				       way_string[c->scache.ways], c->scache.linesz);
1170 			}
1171 #else
1172 			if (!(c->scache.flags & MIPS_CACHE_NOT_PRESENT))
1173 				panic("Dunno how to handle MIPS32 / MIPS64 second level cache");
1174 #endif
1175 			return;
1176 		}
1177 		sc_present = 0;
1178 	}
1179 
1180 	if (!sc_present)
1181 		return;
1182 
1183 	/* compute a couple of other cache variables */
1184 	c->scache.waysize = scache_size / c->scache.ways;
1185 
1186 	c->scache.sets = scache_size / (c->scache.linesz * c->scache.ways);
1187 
1188 	printk("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
1189 	       scache_size >> 10, way_string[c->scache.ways], c->scache.linesz);
1190 
1191 	c->options |= MIPS_CPU_INCLUSIVE_CACHES;
1192 }
1193 
1194 void au1x00_fixup_config_od(void)
1195 {
1196 	/*
1197 	 * c0_config.od (bit 19) was write only (and read as 0)
1198 	 * on the early revisions of Alchemy SOCs.  It disables the bus
1199 	 * transaction overlapping and needs to be set to fix various errata.
1200 	 */
1201 	switch (read_c0_prid()) {
1202 	case 0x00030100: /* Au1000 DA */
1203 	case 0x00030201: /* Au1000 HA */
1204 	case 0x00030202: /* Au1000 HB */
1205 	case 0x01030200: /* Au1500 AB */
1206 	/*
1207 	 * Au1100 errata actually keeps silence about this bit, so we set it
1208 	 * just in case for those revisions that require it to be set according
1209 	 * to arch/mips/au1000/common/cputable.c
1210 	 */
1211 	case 0x02030200: /* Au1100 AB */
1212 	case 0x02030201: /* Au1100 BA */
1213 	case 0x02030202: /* Au1100 BC */
1214 		set_c0_config(1 << 19);
1215 		break;
1216 	}
1217 }
1218 
1219 static void __cpuinit coherency_setup(void)
1220 {
1221 	change_c0_config(CONF_CM_CMASK, CONF_CM_DEFAULT);
1222 
1223 	/*
1224 	 * c0_status.cu=0 specifies that updates by the sc instruction use
1225 	 * the coherency mode specified by the TLB; 1 means cachable
1226 	 * coherent update on write will be used.  Not all processors have
1227 	 * this bit and; some wire it to zero, others like Toshiba had the
1228 	 * silly idea of putting something else there ...
1229 	 */
1230 	switch (current_cpu_type()) {
1231 	case CPU_R4000PC:
1232 	case CPU_R4000SC:
1233 	case CPU_R4000MC:
1234 	case CPU_R4400PC:
1235 	case CPU_R4400SC:
1236 	case CPU_R4400MC:
1237 		clear_c0_config(CONF_CU);
1238 		break;
1239 	/*
1240 	 * We need to catch the early Alchemy SOCs with
1241 	 * the write-only co_config.od bit and set it back to one...
1242 	 */
1243 	case CPU_AU1000: /* rev. DA, HA, HB */
1244 	case CPU_AU1100: /* rev. AB, BA, BC ?? */
1245 	case CPU_AU1500: /* rev. AB */
1246 		au1x00_fixup_config_od();
1247 		break;
1248 	}
1249 }
1250 
1251 void __cpuinit r4k_cache_init(void)
1252 {
1253 	extern void build_clear_page(void);
1254 	extern void build_copy_page(void);
1255 	extern char __weak except_vec2_generic;
1256 	extern char __weak except_vec2_sb1;
1257 	struct cpuinfo_mips *c = &current_cpu_data;
1258 
1259 	switch (c->cputype) {
1260 	case CPU_SB1:
1261 	case CPU_SB1A:
1262 		set_uncached_handler(0x100, &except_vec2_sb1, 0x80);
1263 		break;
1264 
1265 	default:
1266 		set_uncached_handler(0x100, &except_vec2_generic, 0x80);
1267 		break;
1268 	}
1269 
1270 	probe_pcache();
1271 	setup_scache();
1272 
1273 	r4k_blast_dcache_page_setup();
1274 	r4k_blast_dcache_page_indexed_setup();
1275 	r4k_blast_dcache_setup();
1276 	r4k_blast_icache_page_setup();
1277 	r4k_blast_icache_page_indexed_setup();
1278 	r4k_blast_icache_setup();
1279 	r4k_blast_scache_page_setup();
1280 	r4k_blast_scache_page_indexed_setup();
1281 	r4k_blast_scache_setup();
1282 
1283 	/*
1284 	 * Some MIPS32 and MIPS64 processors have physically indexed caches.
1285 	 * This code supports virtually indexed processors and will be
1286 	 * unnecessarily inefficient on physically indexed processors.
1287 	 */
1288 	if (c->dcache.linesz)
1289 		shm_align_mask = max_t( unsigned long,
1290 					c->dcache.sets * c->dcache.linesz - 1,
1291 					PAGE_SIZE - 1);
1292 	else
1293 		shm_align_mask = PAGE_SIZE-1;
1294 
1295 	__flush_cache_vmap	= r4k__flush_cache_vmap;
1296 	__flush_cache_vunmap	= r4k__flush_cache_vunmap;
1297 
1298 	flush_cache_all		= cache_noop;
1299 	__flush_cache_all	= r4k___flush_cache_all;
1300 	flush_cache_mm		= r4k_flush_cache_mm;
1301 	flush_cache_page	= r4k_flush_cache_page;
1302 	flush_cache_range	= r4k_flush_cache_range;
1303 
1304 	flush_cache_sigtramp	= r4k_flush_cache_sigtramp;
1305 	flush_icache_all	= r4k_flush_icache_all;
1306 	local_flush_data_cache_page	= local_r4k_flush_data_cache_page;
1307 	flush_data_cache_page	= r4k_flush_data_cache_page;
1308 	flush_icache_range	= r4k_flush_icache_range;
1309 
1310 #ifdef CONFIG_DMA_NONCOHERENT
1311 	_dma_cache_wback_inv	= r4k_dma_cache_wback_inv;
1312 	_dma_cache_wback	= r4k_dma_cache_wback_inv;
1313 	_dma_cache_inv		= r4k_dma_cache_inv;
1314 #endif
1315 
1316 	build_clear_page();
1317 	build_copy_page();
1318 	local_r4k___flush_cache_all(NULL);
1319 	coherency_setup();
1320 }
1321