xref: /openbmc/linux/arch/mips/mm/c-r4k.c (revision f7777dcc)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
7  * Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002 Ralf Baechle (ralf@gnu.org)
8  * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
9  */
10 #include <linux/hardirq.h>
11 #include <linux/init.h>
12 #include <linux/highmem.h>
13 #include <linux/kernel.h>
14 #include <linux/linkage.h>
15 #include <linux/preempt.h>
16 #include <linux/sched.h>
17 #include <linux/smp.h>
18 #include <linux/mm.h>
19 #include <linux/module.h>
20 #include <linux/bitops.h>
21 
22 #include <asm/bcache.h>
23 #include <asm/bootinfo.h>
24 #include <asm/cache.h>
25 #include <asm/cacheops.h>
26 #include <asm/cpu.h>
27 #include <asm/cpu-features.h>
28 #include <asm/cpu-type.h>
29 #include <asm/io.h>
30 #include <asm/page.h>
31 #include <asm/pgtable.h>
32 #include <asm/r4kcache.h>
33 #include <asm/sections.h>
34 #include <asm/mmu_context.h>
35 #include <asm/war.h>
36 #include <asm/cacheflush.h> /* for run_uncached() */
37 #include <asm/traps.h>
38 #include <asm/dma-coherence.h>
39 
40 /*
41  * Special Variant of smp_call_function for use by cache functions:
42  *
43  *  o No return value
44  *  o collapses to normal function call on UP kernels
45  *  o collapses to normal function call on systems with a single shared
46  *    primary cache.
47  *  o doesn't disable interrupts on the local CPU
48  */
49 static inline void r4k_on_each_cpu(void (*func) (void *info), void *info)
50 {
51 	preempt_disable();
52 
53 #if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC)
54 	smp_call_function(func, info, 1);
55 #endif
56 	func(info);
57 	preempt_enable();
58 }
59 
60 #if defined(CONFIG_MIPS_CMP)
61 #define cpu_has_safe_index_cacheops 0
62 #else
63 #define cpu_has_safe_index_cacheops 1
64 #endif
65 
66 /*
67  * Must die.
68  */
69 static unsigned long icache_size __read_mostly;
70 static unsigned long dcache_size __read_mostly;
71 static unsigned long scache_size __read_mostly;
72 
73 /*
74  * Dummy cache handling routines for machines without boardcaches
75  */
76 static void cache_noop(void) {}
77 
78 static struct bcache_ops no_sc_ops = {
79 	.bc_enable = (void *)cache_noop,
80 	.bc_disable = (void *)cache_noop,
81 	.bc_wback_inv = (void *)cache_noop,
82 	.bc_inv = (void *)cache_noop
83 };
84 
85 struct bcache_ops *bcops = &no_sc_ops;
86 
87 #define cpu_is_r4600_v1_x()	((read_c0_prid() & 0xfffffff0) == 0x00002010)
88 #define cpu_is_r4600_v2_x()	((read_c0_prid() & 0xfffffff0) == 0x00002020)
89 
90 #define R4600_HIT_CACHEOP_WAR_IMPL					\
91 do {									\
92 	if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())		\
93 		*(volatile unsigned long *)CKSEG1;			\
94 	if (R4600_V1_HIT_CACHEOP_WAR)					\
95 		__asm__ __volatile__("nop;nop;nop;nop");		\
96 } while (0)
97 
98 static void (*r4k_blast_dcache_page)(unsigned long addr);
99 
100 static inline void r4k_blast_dcache_page_dc32(unsigned long addr)
101 {
102 	R4600_HIT_CACHEOP_WAR_IMPL;
103 	blast_dcache32_page(addr);
104 }
105 
106 static inline void r4k_blast_dcache_page_dc64(unsigned long addr)
107 {
108 	R4600_HIT_CACHEOP_WAR_IMPL;
109 	blast_dcache64_page(addr);
110 }
111 
112 static void r4k_blast_dcache_page_setup(void)
113 {
114 	unsigned long  dc_lsize = cpu_dcache_line_size();
115 
116 	if (dc_lsize == 0)
117 		r4k_blast_dcache_page = (void *)cache_noop;
118 	else if (dc_lsize == 16)
119 		r4k_blast_dcache_page = blast_dcache16_page;
120 	else if (dc_lsize == 32)
121 		r4k_blast_dcache_page = r4k_blast_dcache_page_dc32;
122 	else if (dc_lsize == 64)
123 		r4k_blast_dcache_page = r4k_blast_dcache_page_dc64;
124 }
125 
126 static void (* r4k_blast_dcache_page_indexed)(unsigned long addr);
127 
128 static void r4k_blast_dcache_page_indexed_setup(void)
129 {
130 	unsigned long dc_lsize = cpu_dcache_line_size();
131 
132 	if (dc_lsize == 0)
133 		r4k_blast_dcache_page_indexed = (void *)cache_noop;
134 	else if (dc_lsize == 16)
135 		r4k_blast_dcache_page_indexed = blast_dcache16_page_indexed;
136 	else if (dc_lsize == 32)
137 		r4k_blast_dcache_page_indexed = blast_dcache32_page_indexed;
138 	else if (dc_lsize == 64)
139 		r4k_blast_dcache_page_indexed = blast_dcache64_page_indexed;
140 }
141 
142 void (* r4k_blast_dcache)(void);
143 EXPORT_SYMBOL(r4k_blast_dcache);
144 
145 static void r4k_blast_dcache_setup(void)
146 {
147 	unsigned long dc_lsize = cpu_dcache_line_size();
148 
149 	if (dc_lsize == 0)
150 		r4k_blast_dcache = (void *)cache_noop;
151 	else if (dc_lsize == 16)
152 		r4k_blast_dcache = blast_dcache16;
153 	else if (dc_lsize == 32)
154 		r4k_blast_dcache = blast_dcache32;
155 	else if (dc_lsize == 64)
156 		r4k_blast_dcache = blast_dcache64;
157 }
158 
159 /* force code alignment (used for TX49XX_ICACHE_INDEX_INV_WAR) */
160 #define JUMP_TO_ALIGN(order) \
161 	__asm__ __volatile__( \
162 		"b\t1f\n\t" \
163 		".align\t" #order "\n\t" \
164 		"1:\n\t" \
165 		)
166 #define CACHE32_UNROLL32_ALIGN	JUMP_TO_ALIGN(10) /* 32 * 32 = 1024 */
167 #define CACHE32_UNROLL32_ALIGN2 JUMP_TO_ALIGN(11)
168 
169 static inline void blast_r4600_v1_icache32(void)
170 {
171 	unsigned long flags;
172 
173 	local_irq_save(flags);
174 	blast_icache32();
175 	local_irq_restore(flags);
176 }
177 
178 static inline void tx49_blast_icache32(void)
179 {
180 	unsigned long start = INDEX_BASE;
181 	unsigned long end = start + current_cpu_data.icache.waysize;
182 	unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
183 	unsigned long ws_end = current_cpu_data.icache.ways <<
184 			       current_cpu_data.icache.waybit;
185 	unsigned long ws, addr;
186 
187 	CACHE32_UNROLL32_ALIGN2;
188 	/* I'm in even chunk.  blast odd chunks */
189 	for (ws = 0; ws < ws_end; ws += ws_inc)
190 		for (addr = start + 0x400; addr < end; addr += 0x400 * 2)
191 			cache32_unroll32(addr|ws, Index_Invalidate_I);
192 	CACHE32_UNROLL32_ALIGN;
193 	/* I'm in odd chunk.  blast even chunks */
194 	for (ws = 0; ws < ws_end; ws += ws_inc)
195 		for (addr = start; addr < end; addr += 0x400 * 2)
196 			cache32_unroll32(addr|ws, Index_Invalidate_I);
197 }
198 
199 static inline void blast_icache32_r4600_v1_page_indexed(unsigned long page)
200 {
201 	unsigned long flags;
202 
203 	local_irq_save(flags);
204 	blast_icache32_page_indexed(page);
205 	local_irq_restore(flags);
206 }
207 
208 static inline void tx49_blast_icache32_page_indexed(unsigned long page)
209 {
210 	unsigned long indexmask = current_cpu_data.icache.waysize - 1;
211 	unsigned long start = INDEX_BASE + (page & indexmask);
212 	unsigned long end = start + PAGE_SIZE;
213 	unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
214 	unsigned long ws_end = current_cpu_data.icache.ways <<
215 			       current_cpu_data.icache.waybit;
216 	unsigned long ws, addr;
217 
218 	CACHE32_UNROLL32_ALIGN2;
219 	/* I'm in even chunk.  blast odd chunks */
220 	for (ws = 0; ws < ws_end; ws += ws_inc)
221 		for (addr = start + 0x400; addr < end; addr += 0x400 * 2)
222 			cache32_unroll32(addr|ws, Index_Invalidate_I);
223 	CACHE32_UNROLL32_ALIGN;
224 	/* I'm in odd chunk.  blast even chunks */
225 	for (ws = 0; ws < ws_end; ws += ws_inc)
226 		for (addr = start; addr < end; addr += 0x400 * 2)
227 			cache32_unroll32(addr|ws, Index_Invalidate_I);
228 }
229 
230 static void (* r4k_blast_icache_page)(unsigned long addr);
231 
232 static void r4k_blast_icache_page_setup(void)
233 {
234 	unsigned long ic_lsize = cpu_icache_line_size();
235 
236 	if (ic_lsize == 0)
237 		r4k_blast_icache_page = (void *)cache_noop;
238 	else if (ic_lsize == 16)
239 		r4k_blast_icache_page = blast_icache16_page;
240 	else if (ic_lsize == 32)
241 		r4k_blast_icache_page = blast_icache32_page;
242 	else if (ic_lsize == 64)
243 		r4k_blast_icache_page = blast_icache64_page;
244 }
245 
246 
247 static void (* r4k_blast_icache_page_indexed)(unsigned long addr);
248 
249 static void r4k_blast_icache_page_indexed_setup(void)
250 {
251 	unsigned long ic_lsize = cpu_icache_line_size();
252 
253 	if (ic_lsize == 0)
254 		r4k_blast_icache_page_indexed = (void *)cache_noop;
255 	else if (ic_lsize == 16)
256 		r4k_blast_icache_page_indexed = blast_icache16_page_indexed;
257 	else if (ic_lsize == 32) {
258 		if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x())
259 			r4k_blast_icache_page_indexed =
260 				blast_icache32_r4600_v1_page_indexed;
261 		else if (TX49XX_ICACHE_INDEX_INV_WAR)
262 			r4k_blast_icache_page_indexed =
263 				tx49_blast_icache32_page_indexed;
264 		else
265 			r4k_blast_icache_page_indexed =
266 				blast_icache32_page_indexed;
267 	} else if (ic_lsize == 64)
268 		r4k_blast_icache_page_indexed = blast_icache64_page_indexed;
269 }
270 
271 void (* r4k_blast_icache)(void);
272 EXPORT_SYMBOL(r4k_blast_icache);
273 
274 static void r4k_blast_icache_setup(void)
275 {
276 	unsigned long ic_lsize = cpu_icache_line_size();
277 
278 	if (ic_lsize == 0)
279 		r4k_blast_icache = (void *)cache_noop;
280 	else if (ic_lsize == 16)
281 		r4k_blast_icache = blast_icache16;
282 	else if (ic_lsize == 32) {
283 		if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x())
284 			r4k_blast_icache = blast_r4600_v1_icache32;
285 		else if (TX49XX_ICACHE_INDEX_INV_WAR)
286 			r4k_blast_icache = tx49_blast_icache32;
287 		else
288 			r4k_blast_icache = blast_icache32;
289 	} else if (ic_lsize == 64)
290 		r4k_blast_icache = blast_icache64;
291 }
292 
293 static void (* r4k_blast_scache_page)(unsigned long addr);
294 
295 static void r4k_blast_scache_page_setup(void)
296 {
297 	unsigned long sc_lsize = cpu_scache_line_size();
298 
299 	if (scache_size == 0)
300 		r4k_blast_scache_page = (void *)cache_noop;
301 	else if (sc_lsize == 16)
302 		r4k_blast_scache_page = blast_scache16_page;
303 	else if (sc_lsize == 32)
304 		r4k_blast_scache_page = blast_scache32_page;
305 	else if (sc_lsize == 64)
306 		r4k_blast_scache_page = blast_scache64_page;
307 	else if (sc_lsize == 128)
308 		r4k_blast_scache_page = blast_scache128_page;
309 }
310 
311 static void (* r4k_blast_scache_page_indexed)(unsigned long addr);
312 
313 static void r4k_blast_scache_page_indexed_setup(void)
314 {
315 	unsigned long sc_lsize = cpu_scache_line_size();
316 
317 	if (scache_size == 0)
318 		r4k_blast_scache_page_indexed = (void *)cache_noop;
319 	else if (sc_lsize == 16)
320 		r4k_blast_scache_page_indexed = blast_scache16_page_indexed;
321 	else if (sc_lsize == 32)
322 		r4k_blast_scache_page_indexed = blast_scache32_page_indexed;
323 	else if (sc_lsize == 64)
324 		r4k_blast_scache_page_indexed = blast_scache64_page_indexed;
325 	else if (sc_lsize == 128)
326 		r4k_blast_scache_page_indexed = blast_scache128_page_indexed;
327 }
328 
329 static void (* r4k_blast_scache)(void);
330 
331 static void r4k_blast_scache_setup(void)
332 {
333 	unsigned long sc_lsize = cpu_scache_line_size();
334 
335 	if (scache_size == 0)
336 		r4k_blast_scache = (void *)cache_noop;
337 	else if (sc_lsize == 16)
338 		r4k_blast_scache = blast_scache16;
339 	else if (sc_lsize == 32)
340 		r4k_blast_scache = blast_scache32;
341 	else if (sc_lsize == 64)
342 		r4k_blast_scache = blast_scache64;
343 	else if (sc_lsize == 128)
344 		r4k_blast_scache = blast_scache128;
345 }
346 
347 static inline void local_r4k___flush_cache_all(void * args)
348 {
349 #if defined(CONFIG_CPU_LOONGSON2)
350 	r4k_blast_scache();
351 	return;
352 #endif
353 	r4k_blast_dcache();
354 	r4k_blast_icache();
355 
356 	switch (current_cpu_type()) {
357 	case CPU_R4000SC:
358 	case CPU_R4000MC:
359 	case CPU_R4400SC:
360 	case CPU_R4400MC:
361 	case CPU_R10000:
362 	case CPU_R12000:
363 	case CPU_R14000:
364 		r4k_blast_scache();
365 	}
366 }
367 
368 static void r4k___flush_cache_all(void)
369 {
370 	r4k_on_each_cpu(local_r4k___flush_cache_all, NULL);
371 }
372 
373 static inline int has_valid_asid(const struct mm_struct *mm)
374 {
375 #if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC)
376 	int i;
377 
378 	for_each_online_cpu(i)
379 		if (cpu_context(i, mm))
380 			return 1;
381 
382 	return 0;
383 #else
384 	return cpu_context(smp_processor_id(), mm);
385 #endif
386 }
387 
388 static void r4k__flush_cache_vmap(void)
389 {
390 	r4k_blast_dcache();
391 }
392 
393 static void r4k__flush_cache_vunmap(void)
394 {
395 	r4k_blast_dcache();
396 }
397 
398 static inline void local_r4k_flush_cache_range(void * args)
399 {
400 	struct vm_area_struct *vma = args;
401 	int exec = vma->vm_flags & VM_EXEC;
402 
403 	if (!(has_valid_asid(vma->vm_mm)))
404 		return;
405 
406 	r4k_blast_dcache();
407 	if (exec)
408 		r4k_blast_icache();
409 }
410 
411 static void r4k_flush_cache_range(struct vm_area_struct *vma,
412 	unsigned long start, unsigned long end)
413 {
414 	int exec = vma->vm_flags & VM_EXEC;
415 
416 	if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc))
417 		r4k_on_each_cpu(local_r4k_flush_cache_range, vma);
418 }
419 
420 static inline void local_r4k_flush_cache_mm(void * args)
421 {
422 	struct mm_struct *mm = args;
423 
424 	if (!has_valid_asid(mm))
425 		return;
426 
427 	/*
428 	 * Kludge alert.  For obscure reasons R4000SC and R4400SC go nuts if we
429 	 * only flush the primary caches but R10000 and R12000 behave sane ...
430 	 * R4000SC and R4400SC indexed S-cache ops also invalidate primary
431 	 * caches, so we can bail out early.
432 	 */
433 	if (current_cpu_type() == CPU_R4000SC ||
434 	    current_cpu_type() == CPU_R4000MC ||
435 	    current_cpu_type() == CPU_R4400SC ||
436 	    current_cpu_type() == CPU_R4400MC) {
437 		r4k_blast_scache();
438 		return;
439 	}
440 
441 	r4k_blast_dcache();
442 }
443 
444 static void r4k_flush_cache_mm(struct mm_struct *mm)
445 {
446 	if (!cpu_has_dc_aliases)
447 		return;
448 
449 	r4k_on_each_cpu(local_r4k_flush_cache_mm, mm);
450 }
451 
452 struct flush_cache_page_args {
453 	struct vm_area_struct *vma;
454 	unsigned long addr;
455 	unsigned long pfn;
456 };
457 
458 static inline void local_r4k_flush_cache_page(void *args)
459 {
460 	struct flush_cache_page_args *fcp_args = args;
461 	struct vm_area_struct *vma = fcp_args->vma;
462 	unsigned long addr = fcp_args->addr;
463 	struct page *page = pfn_to_page(fcp_args->pfn);
464 	int exec = vma->vm_flags & VM_EXEC;
465 	struct mm_struct *mm = vma->vm_mm;
466 	int map_coherent = 0;
467 	pgd_t *pgdp;
468 	pud_t *pudp;
469 	pmd_t *pmdp;
470 	pte_t *ptep;
471 	void *vaddr;
472 
473 	/*
474 	 * If ownes no valid ASID yet, cannot possibly have gotten
475 	 * this page into the cache.
476 	 */
477 	if (!has_valid_asid(mm))
478 		return;
479 
480 	addr &= PAGE_MASK;
481 	pgdp = pgd_offset(mm, addr);
482 	pudp = pud_offset(pgdp, addr);
483 	pmdp = pmd_offset(pudp, addr);
484 	ptep = pte_offset(pmdp, addr);
485 
486 	/*
487 	 * If the page isn't marked valid, the page cannot possibly be
488 	 * in the cache.
489 	 */
490 	if (!(pte_present(*ptep)))
491 		return;
492 
493 	if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID))
494 		vaddr = NULL;
495 	else {
496 		/*
497 		 * Use kmap_coherent or kmap_atomic to do flushes for
498 		 * another ASID than the current one.
499 		 */
500 		map_coherent = (cpu_has_dc_aliases &&
501 				page_mapped(page) && !Page_dcache_dirty(page));
502 		if (map_coherent)
503 			vaddr = kmap_coherent(page, addr);
504 		else
505 			vaddr = kmap_atomic(page);
506 		addr = (unsigned long)vaddr;
507 	}
508 
509 	if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) {
510 		r4k_blast_dcache_page(addr);
511 		if (exec && !cpu_icache_snoops_remote_store)
512 			r4k_blast_scache_page(addr);
513 	}
514 	if (exec) {
515 		if (vaddr && cpu_has_vtag_icache && mm == current->active_mm) {
516 			int cpu = smp_processor_id();
517 
518 			if (cpu_context(cpu, mm) != 0)
519 				drop_mmu_context(mm, cpu);
520 		} else
521 			r4k_blast_icache_page(addr);
522 	}
523 
524 	if (vaddr) {
525 		if (map_coherent)
526 			kunmap_coherent();
527 		else
528 			kunmap_atomic(vaddr);
529 	}
530 }
531 
532 static void r4k_flush_cache_page(struct vm_area_struct *vma,
533 	unsigned long addr, unsigned long pfn)
534 {
535 	struct flush_cache_page_args args;
536 
537 	args.vma = vma;
538 	args.addr = addr;
539 	args.pfn = pfn;
540 
541 	r4k_on_each_cpu(local_r4k_flush_cache_page, &args);
542 }
543 
544 static inline void local_r4k_flush_data_cache_page(void * addr)
545 {
546 	r4k_blast_dcache_page((unsigned long) addr);
547 }
548 
549 static void r4k_flush_data_cache_page(unsigned long addr)
550 {
551 	if (in_atomic())
552 		local_r4k_flush_data_cache_page((void *)addr);
553 	else
554 		r4k_on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr);
555 }
556 
557 struct flush_icache_range_args {
558 	unsigned long start;
559 	unsigned long end;
560 };
561 
562 static inline void local_r4k_flush_icache_range(unsigned long start, unsigned long end)
563 {
564 	if (!cpu_has_ic_fills_f_dc) {
565 		if (end - start >= dcache_size) {
566 			r4k_blast_dcache();
567 		} else {
568 			R4600_HIT_CACHEOP_WAR_IMPL;
569 			protected_blast_dcache_range(start, end);
570 		}
571 	}
572 
573 	if (end - start > icache_size)
574 		r4k_blast_icache();
575 	else
576 		protected_blast_icache_range(start, end);
577 }
578 
579 static inline void local_r4k_flush_icache_range_ipi(void *args)
580 {
581 	struct flush_icache_range_args *fir_args = args;
582 	unsigned long start = fir_args->start;
583 	unsigned long end = fir_args->end;
584 
585 	local_r4k_flush_icache_range(start, end);
586 }
587 
588 static void r4k_flush_icache_range(unsigned long start, unsigned long end)
589 {
590 	struct flush_icache_range_args args;
591 
592 	args.start = start;
593 	args.end = end;
594 
595 	r4k_on_each_cpu(local_r4k_flush_icache_range_ipi, &args);
596 	instruction_hazard();
597 }
598 
599 #ifdef CONFIG_DMA_NONCOHERENT
600 
601 static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
602 {
603 	/* Catch bad driver code */
604 	BUG_ON(size == 0);
605 
606 	preempt_disable();
607 	if (cpu_has_inclusive_pcaches) {
608 		if (size >= scache_size)
609 			r4k_blast_scache();
610 		else
611 			blast_scache_range(addr, addr + size);
612 		preempt_enable();
613 		__sync();
614 		return;
615 	}
616 
617 	/*
618 	 * Either no secondary cache or the available caches don't have the
619 	 * subset property so we have to flush the primary caches
620 	 * explicitly
621 	 */
622 	if (cpu_has_safe_index_cacheops && size >= dcache_size) {
623 		r4k_blast_dcache();
624 	} else {
625 		R4600_HIT_CACHEOP_WAR_IMPL;
626 		blast_dcache_range(addr, addr + size);
627 	}
628 	preempt_enable();
629 
630 	bc_wback_inv(addr, size);
631 	__sync();
632 }
633 
634 static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
635 {
636 	/* Catch bad driver code */
637 	BUG_ON(size == 0);
638 
639 	preempt_disable();
640 	if (cpu_has_inclusive_pcaches) {
641 		if (size >= scache_size)
642 			r4k_blast_scache();
643 		else {
644 			/*
645 			 * There is no clearly documented alignment requirement
646 			 * for the cache instruction on MIPS processors and
647 			 * some processors, among them the RM5200 and RM7000
648 			 * QED processors will throw an address error for cache
649 			 * hit ops with insufficient alignment.	 Solved by
650 			 * aligning the address to cache line size.
651 			 */
652 			blast_inv_scache_range(addr, addr + size);
653 		}
654 		preempt_enable();
655 		__sync();
656 		return;
657 	}
658 
659 	if (cpu_has_safe_index_cacheops && size >= dcache_size) {
660 		r4k_blast_dcache();
661 	} else {
662 		R4600_HIT_CACHEOP_WAR_IMPL;
663 		blast_inv_dcache_range(addr, addr + size);
664 	}
665 	preempt_enable();
666 
667 	bc_inv(addr, size);
668 	__sync();
669 }
670 #endif /* CONFIG_DMA_NONCOHERENT */
671 
672 /*
673  * While we're protected against bad userland addresses we don't care
674  * very much about what happens in that case.  Usually a segmentation
675  * fault will dump the process later on anyway ...
676  */
677 static void local_r4k_flush_cache_sigtramp(void * arg)
678 {
679 	unsigned long ic_lsize = cpu_icache_line_size();
680 	unsigned long dc_lsize = cpu_dcache_line_size();
681 	unsigned long sc_lsize = cpu_scache_line_size();
682 	unsigned long addr = (unsigned long) arg;
683 
684 	R4600_HIT_CACHEOP_WAR_IMPL;
685 	if (dc_lsize)
686 		protected_writeback_dcache_line(addr & ~(dc_lsize - 1));
687 	if (!cpu_icache_snoops_remote_store && scache_size)
688 		protected_writeback_scache_line(addr & ~(sc_lsize - 1));
689 	if (ic_lsize)
690 		protected_flush_icache_line(addr & ~(ic_lsize - 1));
691 	if (MIPS4K_ICACHE_REFILL_WAR) {
692 		__asm__ __volatile__ (
693 			".set push\n\t"
694 			".set noat\n\t"
695 			".set mips3\n\t"
696 #ifdef CONFIG_32BIT
697 			"la	$at,1f\n\t"
698 #endif
699 #ifdef CONFIG_64BIT
700 			"dla	$at,1f\n\t"
701 #endif
702 			"cache	%0,($at)\n\t"
703 			"nop; nop; nop\n"
704 			"1:\n\t"
705 			".set pop"
706 			:
707 			: "i" (Hit_Invalidate_I));
708 	}
709 	if (MIPS_CACHE_SYNC_WAR)
710 		__asm__ __volatile__ ("sync");
711 }
712 
713 static void r4k_flush_cache_sigtramp(unsigned long addr)
714 {
715 	r4k_on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr);
716 }
717 
718 static void r4k_flush_icache_all(void)
719 {
720 	if (cpu_has_vtag_icache)
721 		r4k_blast_icache();
722 }
723 
724 struct flush_kernel_vmap_range_args {
725 	unsigned long	vaddr;
726 	int		size;
727 };
728 
729 static inline void local_r4k_flush_kernel_vmap_range(void *args)
730 {
731 	struct flush_kernel_vmap_range_args *vmra = args;
732 	unsigned long vaddr = vmra->vaddr;
733 	int size = vmra->size;
734 
735 	/*
736 	 * Aliases only affect the primary caches so don't bother with
737 	 * S-caches or T-caches.
738 	 */
739 	if (cpu_has_safe_index_cacheops && size >= dcache_size)
740 		r4k_blast_dcache();
741 	else {
742 		R4600_HIT_CACHEOP_WAR_IMPL;
743 		blast_dcache_range(vaddr, vaddr + size);
744 	}
745 }
746 
747 static void r4k_flush_kernel_vmap_range(unsigned long vaddr, int size)
748 {
749 	struct flush_kernel_vmap_range_args args;
750 
751 	args.vaddr = (unsigned long) vaddr;
752 	args.size = size;
753 
754 	r4k_on_each_cpu(local_r4k_flush_kernel_vmap_range, &args);
755 }
756 
757 static inline void rm7k_erratum31(void)
758 {
759 	const unsigned long ic_lsize = 32;
760 	unsigned long addr;
761 
762 	/* RM7000 erratum #31. The icache is screwed at startup. */
763 	write_c0_taglo(0);
764 	write_c0_taghi(0);
765 
766 	for (addr = INDEX_BASE; addr <= INDEX_BASE + 4096; addr += ic_lsize) {
767 		__asm__ __volatile__ (
768 			".set push\n\t"
769 			".set noreorder\n\t"
770 			".set mips3\n\t"
771 			"cache\t%1, 0(%0)\n\t"
772 			"cache\t%1, 0x1000(%0)\n\t"
773 			"cache\t%1, 0x2000(%0)\n\t"
774 			"cache\t%1, 0x3000(%0)\n\t"
775 			"cache\t%2, 0(%0)\n\t"
776 			"cache\t%2, 0x1000(%0)\n\t"
777 			"cache\t%2, 0x2000(%0)\n\t"
778 			"cache\t%2, 0x3000(%0)\n\t"
779 			"cache\t%1, 0(%0)\n\t"
780 			"cache\t%1, 0x1000(%0)\n\t"
781 			"cache\t%1, 0x2000(%0)\n\t"
782 			"cache\t%1, 0x3000(%0)\n\t"
783 			".set pop\n"
784 			:
785 			: "r" (addr), "i" (Index_Store_Tag_I), "i" (Fill));
786 	}
787 }
788 
789 static inline void alias_74k_erratum(struct cpuinfo_mips *c)
790 {
791 	unsigned int imp = c->processor_id & PRID_IMP_MASK;
792 	unsigned int rev = c->processor_id & PRID_REV_MASK;
793 
794 	/*
795 	 * Early versions of the 74K do not update the cache tags on a
796 	 * vtag miss/ptag hit which can occur in the case of KSEG0/KUSEG
797 	 * aliases. In this case it is better to treat the cache as always
798 	 * having aliases.
799 	 */
800 	switch (imp) {
801 	case PRID_IMP_74K:
802 		if (rev <= PRID_REV_ENCODE_332(2, 4, 0))
803 			c->dcache.flags |= MIPS_CACHE_VTAG;
804 		if (rev == PRID_REV_ENCODE_332(2, 4, 0))
805 			write_c0_config6(read_c0_config6() | MIPS_CONF6_SYND);
806 		break;
807 	case PRID_IMP_1074K:
808 		if (rev <= PRID_REV_ENCODE_332(1, 1, 0)) {
809 			c->dcache.flags |= MIPS_CACHE_VTAG;
810 			write_c0_config6(read_c0_config6() | MIPS_CONF6_SYND);
811 		}
812 		break;
813 	default:
814 		BUG();
815 	}
816 }
817 
818 static char *way_string[] = { NULL, "direct mapped", "2-way",
819 	"3-way", "4-way", "5-way", "6-way", "7-way", "8-way"
820 };
821 
822 static void probe_pcache(void)
823 {
824 	struct cpuinfo_mips *c = &current_cpu_data;
825 	unsigned int config = read_c0_config();
826 	unsigned int prid = read_c0_prid();
827 	unsigned long config1;
828 	unsigned int lsize;
829 
830 	switch (current_cpu_type()) {
831 	case CPU_R4600:			/* QED style two way caches? */
832 	case CPU_R4700:
833 	case CPU_R5000:
834 	case CPU_NEVADA:
835 		icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
836 		c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
837 		c->icache.ways = 2;
838 		c->icache.waybit = __ffs(icache_size/2);
839 
840 		dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
841 		c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
842 		c->dcache.ways = 2;
843 		c->dcache.waybit= __ffs(dcache_size/2);
844 
845 		c->options |= MIPS_CPU_CACHE_CDEX_P;
846 		break;
847 
848 	case CPU_R5432:
849 	case CPU_R5500:
850 		icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
851 		c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
852 		c->icache.ways = 2;
853 		c->icache.waybit= 0;
854 
855 		dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
856 		c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
857 		c->dcache.ways = 2;
858 		c->dcache.waybit = 0;
859 
860 		c->options |= MIPS_CPU_CACHE_CDEX_P | MIPS_CPU_PREFETCH;
861 		break;
862 
863 	case CPU_TX49XX:
864 		icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
865 		c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
866 		c->icache.ways = 4;
867 		c->icache.waybit= 0;
868 
869 		dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
870 		c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
871 		c->dcache.ways = 4;
872 		c->dcache.waybit = 0;
873 
874 		c->options |= MIPS_CPU_CACHE_CDEX_P;
875 		c->options |= MIPS_CPU_PREFETCH;
876 		break;
877 
878 	case CPU_R4000PC:
879 	case CPU_R4000SC:
880 	case CPU_R4000MC:
881 	case CPU_R4400PC:
882 	case CPU_R4400SC:
883 	case CPU_R4400MC:
884 	case CPU_R4300:
885 		icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
886 		c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
887 		c->icache.ways = 1;
888 		c->icache.waybit = 0;	/* doesn't matter */
889 
890 		dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
891 		c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
892 		c->dcache.ways = 1;
893 		c->dcache.waybit = 0;	/* does not matter */
894 
895 		c->options |= MIPS_CPU_CACHE_CDEX_P;
896 		break;
897 
898 	case CPU_R10000:
899 	case CPU_R12000:
900 	case CPU_R14000:
901 		icache_size = 1 << (12 + ((config & R10K_CONF_IC) >> 29));
902 		c->icache.linesz = 64;
903 		c->icache.ways = 2;
904 		c->icache.waybit = 0;
905 
906 		dcache_size = 1 << (12 + ((config & R10K_CONF_DC) >> 26));
907 		c->dcache.linesz = 32;
908 		c->dcache.ways = 2;
909 		c->dcache.waybit = 0;
910 
911 		c->options |= MIPS_CPU_PREFETCH;
912 		break;
913 
914 	case CPU_VR4133:
915 		write_c0_config(config & ~VR41_CONF_P4K);
916 	case CPU_VR4131:
917 		/* Workaround for cache instruction bug of VR4131 */
918 		if (c->processor_id == 0x0c80U || c->processor_id == 0x0c81U ||
919 		    c->processor_id == 0x0c82U) {
920 			config |= 0x00400000U;
921 			if (c->processor_id == 0x0c80U)
922 				config |= VR41_CONF_BP;
923 			write_c0_config(config);
924 		} else
925 			c->options |= MIPS_CPU_CACHE_CDEX_P;
926 
927 		icache_size = 1 << (10 + ((config & CONF_IC) >> 9));
928 		c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
929 		c->icache.ways = 2;
930 		c->icache.waybit = __ffs(icache_size/2);
931 
932 		dcache_size = 1 << (10 + ((config & CONF_DC) >> 6));
933 		c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
934 		c->dcache.ways = 2;
935 		c->dcache.waybit = __ffs(dcache_size/2);
936 		break;
937 
938 	case CPU_VR41XX:
939 	case CPU_VR4111:
940 	case CPU_VR4121:
941 	case CPU_VR4122:
942 	case CPU_VR4181:
943 	case CPU_VR4181A:
944 		icache_size = 1 << (10 + ((config & CONF_IC) >> 9));
945 		c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
946 		c->icache.ways = 1;
947 		c->icache.waybit = 0;	/* doesn't matter */
948 
949 		dcache_size = 1 << (10 + ((config & CONF_DC) >> 6));
950 		c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
951 		c->dcache.ways = 1;
952 		c->dcache.waybit = 0;	/* does not matter */
953 
954 		c->options |= MIPS_CPU_CACHE_CDEX_P;
955 		break;
956 
957 	case CPU_RM7000:
958 		rm7k_erratum31();
959 
960 		icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
961 		c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
962 		c->icache.ways = 4;
963 		c->icache.waybit = __ffs(icache_size / c->icache.ways);
964 
965 		dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
966 		c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
967 		c->dcache.ways = 4;
968 		c->dcache.waybit = __ffs(dcache_size / c->dcache.ways);
969 
970 		c->options |= MIPS_CPU_CACHE_CDEX_P;
971 		c->options |= MIPS_CPU_PREFETCH;
972 		break;
973 
974 	case CPU_LOONGSON2:
975 		icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
976 		c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
977 		if (prid & 0x3)
978 			c->icache.ways = 4;
979 		else
980 			c->icache.ways = 2;
981 		c->icache.waybit = 0;
982 
983 		dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
984 		c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
985 		if (prid & 0x3)
986 			c->dcache.ways = 4;
987 		else
988 			c->dcache.ways = 2;
989 		c->dcache.waybit = 0;
990 		break;
991 
992 	default:
993 		if (!(config & MIPS_CONF_M))
994 			panic("Don't know how to probe P-caches on this cpu.");
995 
996 		/*
997 		 * So we seem to be a MIPS32 or MIPS64 CPU
998 		 * So let's probe the I-cache ...
999 		 */
1000 		config1 = read_c0_config1();
1001 
1002 		if ((lsize = ((config1 >> 19) & 7)))
1003 			c->icache.linesz = 2 << lsize;
1004 		else
1005 			c->icache.linesz = lsize;
1006 		c->icache.sets = 32 << (((config1 >> 22) + 1) & 7);
1007 		c->icache.ways = 1 + ((config1 >> 16) & 7);
1008 
1009 		icache_size = c->icache.sets *
1010 			      c->icache.ways *
1011 			      c->icache.linesz;
1012 		c->icache.waybit = __ffs(icache_size/c->icache.ways);
1013 
1014 		if (config & 0x8)		/* VI bit */
1015 			c->icache.flags |= MIPS_CACHE_VTAG;
1016 
1017 		/*
1018 		 * Now probe the MIPS32 / MIPS64 data cache.
1019 		 */
1020 		c->dcache.flags = 0;
1021 
1022 		if ((lsize = ((config1 >> 10) & 7)))
1023 			c->dcache.linesz = 2 << lsize;
1024 		else
1025 			c->dcache.linesz= lsize;
1026 		c->dcache.sets = 32 << (((config1 >> 13) + 1) & 7);
1027 		c->dcache.ways = 1 + ((config1 >> 7) & 7);
1028 
1029 		dcache_size = c->dcache.sets *
1030 			      c->dcache.ways *
1031 			      c->dcache.linesz;
1032 		c->dcache.waybit = __ffs(dcache_size/c->dcache.ways);
1033 
1034 		c->options |= MIPS_CPU_PREFETCH;
1035 		break;
1036 	}
1037 
1038 	/*
1039 	 * Processor configuration sanity check for the R4000SC erratum
1040 	 * #5.	With page sizes larger than 32kB there is no possibility
1041 	 * to get a VCE exception anymore so we don't care about this
1042 	 * misconfiguration.  The case is rather theoretical anyway;
1043 	 * presumably no vendor is shipping his hardware in the "bad"
1044 	 * configuration.
1045 	 */
1046 	if ((prid & PRID_IMP_MASK) == PRID_IMP_R4000 &&
1047 	    (prid & PRID_REV_MASK) < PRID_REV_R4400 &&
1048 	    !(config & CONF_SC) && c->icache.linesz != 16 &&
1049 	    PAGE_SIZE <= 0x8000)
1050 		panic("Improper R4000SC processor configuration detected");
1051 
1052 	/* compute a couple of other cache variables */
1053 	c->icache.waysize = icache_size / c->icache.ways;
1054 	c->dcache.waysize = dcache_size / c->dcache.ways;
1055 
1056 	c->icache.sets = c->icache.linesz ?
1057 		icache_size / (c->icache.linesz * c->icache.ways) : 0;
1058 	c->dcache.sets = c->dcache.linesz ?
1059 		dcache_size / (c->dcache.linesz * c->dcache.ways) : 0;
1060 
1061 	/*
1062 	 * R10000 and R12000 P-caches are odd in a positive way.  They're 32kB
1063 	 * 2-way virtually indexed so normally would suffer from aliases.  So
1064 	 * normally they'd suffer from aliases but magic in the hardware deals
1065 	 * with that for us so we don't need to take care ourselves.
1066 	 */
1067 	switch (current_cpu_type()) {
1068 	case CPU_20KC:
1069 	case CPU_25KF:
1070 	case CPU_SB1:
1071 	case CPU_SB1A:
1072 	case CPU_XLR:
1073 		c->dcache.flags |= MIPS_CACHE_PINDEX;
1074 		break;
1075 
1076 	case CPU_R10000:
1077 	case CPU_R12000:
1078 	case CPU_R14000:
1079 		break;
1080 
1081 	case CPU_M14KC:
1082 	case CPU_M14KEC:
1083 	case CPU_24K:
1084 	case CPU_34K:
1085 	case CPU_74K:
1086 	case CPU_1004K:
1087 		if (current_cpu_type() == CPU_74K)
1088 			alias_74k_erratum(c);
1089 		if ((read_c0_config7() & (1 << 16))) {
1090 			/* effectively physically indexed dcache,
1091 			   thus no virtual aliases. */
1092 			c->dcache.flags |= MIPS_CACHE_PINDEX;
1093 			break;
1094 		}
1095 	default:
1096 		if (c->dcache.waysize > PAGE_SIZE)
1097 			c->dcache.flags |= MIPS_CACHE_ALIASES;
1098 	}
1099 
1100 	switch (current_cpu_type()) {
1101 	case CPU_20KC:
1102 		/*
1103 		 * Some older 20Kc chips doesn't have the 'VI' bit in
1104 		 * the config register.
1105 		 */
1106 		c->icache.flags |= MIPS_CACHE_VTAG;
1107 		break;
1108 
1109 	case CPU_ALCHEMY:
1110 		c->icache.flags |= MIPS_CACHE_IC_F_DC;
1111 		break;
1112 	}
1113 
1114 #ifdef	CONFIG_CPU_LOONGSON2
1115 	/*
1116 	 * LOONGSON2 has 4 way icache, but when using indexed cache op,
1117 	 * one op will act on all 4 ways
1118 	 */
1119 	c->icache.ways = 1;
1120 #endif
1121 
1122 	printk("Primary instruction cache %ldkB, %s, %s, linesize %d bytes.\n",
1123 	       icache_size >> 10,
1124 	       c->icache.flags & MIPS_CACHE_VTAG ? "VIVT" : "VIPT",
1125 	       way_string[c->icache.ways], c->icache.linesz);
1126 
1127 	printk("Primary data cache %ldkB, %s, %s, %s, linesize %d bytes\n",
1128 	       dcache_size >> 10, way_string[c->dcache.ways],
1129 	       (c->dcache.flags & MIPS_CACHE_PINDEX) ? "PIPT" : "VIPT",
1130 	       (c->dcache.flags & MIPS_CACHE_ALIASES) ?
1131 			"cache aliases" : "no aliases",
1132 	       c->dcache.linesz);
1133 }
1134 
1135 /*
1136  * If you even _breathe_ on this function, look at the gcc output and make sure
1137  * it does not pop things on and off the stack for the cache sizing loop that
1138  * executes in KSEG1 space or else you will crash and burn badly.  You have
1139  * been warned.
1140  */
1141 static int probe_scache(void)
1142 {
1143 	unsigned long flags, addr, begin, end, pow2;
1144 	unsigned int config = read_c0_config();
1145 	struct cpuinfo_mips *c = &current_cpu_data;
1146 
1147 	if (config & CONF_SC)
1148 		return 0;
1149 
1150 	begin = (unsigned long) &_stext;
1151 	begin &= ~((4 * 1024 * 1024) - 1);
1152 	end = begin + (4 * 1024 * 1024);
1153 
1154 	/*
1155 	 * This is such a bitch, you'd think they would make it easy to do
1156 	 * this.  Away you daemons of stupidity!
1157 	 */
1158 	local_irq_save(flags);
1159 
1160 	/* Fill each size-multiple cache line with a valid tag. */
1161 	pow2 = (64 * 1024);
1162 	for (addr = begin; addr < end; addr = (begin + pow2)) {
1163 		unsigned long *p = (unsigned long *) addr;
1164 		__asm__ __volatile__("nop" : : "r" (*p)); /* whee... */
1165 		pow2 <<= 1;
1166 	}
1167 
1168 	/* Load first line with zero (therefore invalid) tag. */
1169 	write_c0_taglo(0);
1170 	write_c0_taghi(0);
1171 	__asm__ __volatile__("nop; nop; nop; nop;"); /* avoid the hazard */
1172 	cache_op(Index_Store_Tag_I, begin);
1173 	cache_op(Index_Store_Tag_D, begin);
1174 	cache_op(Index_Store_Tag_SD, begin);
1175 
1176 	/* Now search for the wrap around point. */
1177 	pow2 = (128 * 1024);
1178 	for (addr = begin + (128 * 1024); addr < end; addr = begin + pow2) {
1179 		cache_op(Index_Load_Tag_SD, addr);
1180 		__asm__ __volatile__("nop; nop; nop; nop;"); /* hazard... */
1181 		if (!read_c0_taglo())
1182 			break;
1183 		pow2 <<= 1;
1184 	}
1185 	local_irq_restore(flags);
1186 	addr -= begin;
1187 
1188 	scache_size = addr;
1189 	c->scache.linesz = 16 << ((config & R4K_CONF_SB) >> 22);
1190 	c->scache.ways = 1;
1191 	c->dcache.waybit = 0;		/* does not matter */
1192 
1193 	return 1;
1194 }
1195 
1196 #if defined(CONFIG_CPU_LOONGSON2)
1197 static void __init loongson2_sc_init(void)
1198 {
1199 	struct cpuinfo_mips *c = &current_cpu_data;
1200 
1201 	scache_size = 512*1024;
1202 	c->scache.linesz = 32;
1203 	c->scache.ways = 4;
1204 	c->scache.waybit = 0;
1205 	c->scache.waysize = scache_size / (c->scache.ways);
1206 	c->scache.sets = scache_size / (c->scache.linesz * c->scache.ways);
1207 	pr_info("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
1208 	       scache_size >> 10, way_string[c->scache.ways], c->scache.linesz);
1209 
1210 	c->options |= MIPS_CPU_INCLUSIVE_CACHES;
1211 }
1212 #endif
1213 
1214 extern int r5k_sc_init(void);
1215 extern int rm7k_sc_init(void);
1216 extern int mips_sc_init(void);
1217 
1218 static void setup_scache(void)
1219 {
1220 	struct cpuinfo_mips *c = &current_cpu_data;
1221 	unsigned int config = read_c0_config();
1222 	int sc_present = 0;
1223 
1224 	/*
1225 	 * Do the probing thing on R4000SC and R4400SC processors.  Other
1226 	 * processors don't have a S-cache that would be relevant to the
1227 	 * Linux memory management.
1228 	 */
1229 	switch (current_cpu_type()) {
1230 	case CPU_R4000SC:
1231 	case CPU_R4000MC:
1232 	case CPU_R4400SC:
1233 	case CPU_R4400MC:
1234 		sc_present = run_uncached(probe_scache);
1235 		if (sc_present)
1236 			c->options |= MIPS_CPU_CACHE_CDEX_S;
1237 		break;
1238 
1239 	case CPU_R10000:
1240 	case CPU_R12000:
1241 	case CPU_R14000:
1242 		scache_size = 0x80000 << ((config & R10K_CONF_SS) >> 16);
1243 		c->scache.linesz = 64 << ((config >> 13) & 1);
1244 		c->scache.ways = 2;
1245 		c->scache.waybit= 0;
1246 		sc_present = 1;
1247 		break;
1248 
1249 	case CPU_R5000:
1250 	case CPU_NEVADA:
1251 #ifdef CONFIG_R5000_CPU_SCACHE
1252 		r5k_sc_init();
1253 #endif
1254 		return;
1255 
1256 	case CPU_RM7000:
1257 #ifdef CONFIG_RM7000_CPU_SCACHE
1258 		rm7k_sc_init();
1259 #endif
1260 		return;
1261 
1262 #if defined(CONFIG_CPU_LOONGSON2)
1263 	case CPU_LOONGSON2:
1264 		loongson2_sc_init();
1265 		return;
1266 #endif
1267 	case CPU_XLP:
1268 		/* don't need to worry about L2, fully coherent */
1269 		return;
1270 
1271 	default:
1272 		if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 |
1273 				    MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)) {
1274 #ifdef CONFIG_MIPS_CPU_SCACHE
1275 			if (mips_sc_init ()) {
1276 				scache_size = c->scache.ways * c->scache.sets * c->scache.linesz;
1277 				printk("MIPS secondary cache %ldkB, %s, linesize %d bytes.\n",
1278 				       scache_size >> 10,
1279 				       way_string[c->scache.ways], c->scache.linesz);
1280 			}
1281 #else
1282 			if (!(c->scache.flags & MIPS_CACHE_NOT_PRESENT))
1283 				panic("Dunno how to handle MIPS32 / MIPS64 second level cache");
1284 #endif
1285 			return;
1286 		}
1287 		sc_present = 0;
1288 	}
1289 
1290 	if (!sc_present)
1291 		return;
1292 
1293 	/* compute a couple of other cache variables */
1294 	c->scache.waysize = scache_size / c->scache.ways;
1295 
1296 	c->scache.sets = scache_size / (c->scache.linesz * c->scache.ways);
1297 
1298 	printk("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
1299 	       scache_size >> 10, way_string[c->scache.ways], c->scache.linesz);
1300 
1301 	c->options |= MIPS_CPU_INCLUSIVE_CACHES;
1302 }
1303 
1304 void au1x00_fixup_config_od(void)
1305 {
1306 	/*
1307 	 * c0_config.od (bit 19) was write only (and read as 0)
1308 	 * on the early revisions of Alchemy SOCs.  It disables the bus
1309 	 * transaction overlapping and needs to be set to fix various errata.
1310 	 */
1311 	switch (read_c0_prid()) {
1312 	case 0x00030100: /* Au1000 DA */
1313 	case 0x00030201: /* Au1000 HA */
1314 	case 0x00030202: /* Au1000 HB */
1315 	case 0x01030200: /* Au1500 AB */
1316 	/*
1317 	 * Au1100 errata actually keeps silence about this bit, so we set it
1318 	 * just in case for those revisions that require it to be set according
1319 	 * to the (now gone) cpu table.
1320 	 */
1321 	case 0x02030200: /* Au1100 AB */
1322 	case 0x02030201: /* Au1100 BA */
1323 	case 0x02030202: /* Au1100 BC */
1324 		set_c0_config(1 << 19);
1325 		break;
1326 	}
1327 }
1328 
1329 /* CP0 hazard avoidance. */
1330 #define NXP_BARRIER()							\
1331 	 __asm__ __volatile__(						\
1332 	".set noreorder\n\t"						\
1333 	"nop; nop; nop; nop; nop; nop;\n\t"				\
1334 	".set reorder\n\t")
1335 
1336 static void nxp_pr4450_fixup_config(void)
1337 {
1338 	unsigned long config0;
1339 
1340 	config0 = read_c0_config();
1341 
1342 	/* clear all three cache coherency fields */
1343 	config0 &= ~(0x7 | (7 << 25) | (7 << 28));
1344 	config0 |= (((_page_cachable_default >> _CACHE_SHIFT) <<  0) |
1345 		    ((_page_cachable_default >> _CACHE_SHIFT) << 25) |
1346 		    ((_page_cachable_default >> _CACHE_SHIFT) << 28));
1347 	write_c0_config(config0);
1348 	NXP_BARRIER();
1349 }
1350 
1351 static int cca = -1;
1352 
1353 static int __init cca_setup(char *str)
1354 {
1355 	get_option(&str, &cca);
1356 
1357 	return 0;
1358 }
1359 
1360 early_param("cca", cca_setup);
1361 
1362 static void coherency_setup(void)
1363 {
1364 	if (cca < 0 || cca > 7)
1365 		cca = read_c0_config() & CONF_CM_CMASK;
1366 	_page_cachable_default = cca << _CACHE_SHIFT;
1367 
1368 	pr_debug("Using cache attribute %d\n", cca);
1369 	change_c0_config(CONF_CM_CMASK, cca);
1370 
1371 	/*
1372 	 * c0_status.cu=0 specifies that updates by the sc instruction use
1373 	 * the coherency mode specified by the TLB; 1 means cachable
1374 	 * coherent update on write will be used.  Not all processors have
1375 	 * this bit and; some wire it to zero, others like Toshiba had the
1376 	 * silly idea of putting something else there ...
1377 	 */
1378 	switch (current_cpu_type()) {
1379 	case CPU_R4000PC:
1380 	case CPU_R4000SC:
1381 	case CPU_R4000MC:
1382 	case CPU_R4400PC:
1383 	case CPU_R4400SC:
1384 	case CPU_R4400MC:
1385 		clear_c0_config(CONF_CU);
1386 		break;
1387 	/*
1388 	 * We need to catch the early Alchemy SOCs with
1389 	 * the write-only co_config.od bit and set it back to one on:
1390 	 * Au1000 rev DA, HA, HB;  Au1100 AB, BA, BC, Au1500 AB
1391 	 */
1392 	case CPU_ALCHEMY:
1393 		au1x00_fixup_config_od();
1394 		break;
1395 
1396 	case PRID_IMP_PR4450:
1397 		nxp_pr4450_fixup_config();
1398 		break;
1399 	}
1400 }
1401 
1402 static void r4k_cache_error_setup(void)
1403 {
1404 	extern char __weak except_vec2_generic;
1405 	extern char __weak except_vec2_sb1;
1406 
1407 	switch (current_cpu_type()) {
1408 	case CPU_SB1:
1409 	case CPU_SB1A:
1410 		set_uncached_handler(0x100, &except_vec2_sb1, 0x80);
1411 		break;
1412 
1413 	default:
1414 		set_uncached_handler(0x100, &except_vec2_generic, 0x80);
1415 		break;
1416 	}
1417 }
1418 
1419 void r4k_cache_init(void)
1420 {
1421 	extern void build_clear_page(void);
1422 	extern void build_copy_page(void);
1423 	struct cpuinfo_mips *c = &current_cpu_data;
1424 
1425 	probe_pcache();
1426 	setup_scache();
1427 
1428 	r4k_blast_dcache_page_setup();
1429 	r4k_blast_dcache_page_indexed_setup();
1430 	r4k_blast_dcache_setup();
1431 	r4k_blast_icache_page_setup();
1432 	r4k_blast_icache_page_indexed_setup();
1433 	r4k_blast_icache_setup();
1434 	r4k_blast_scache_page_setup();
1435 	r4k_blast_scache_page_indexed_setup();
1436 	r4k_blast_scache_setup();
1437 
1438 	/*
1439 	 * Some MIPS32 and MIPS64 processors have physically indexed caches.
1440 	 * This code supports virtually indexed processors and will be
1441 	 * unnecessarily inefficient on physically indexed processors.
1442 	 */
1443 	if (c->dcache.linesz)
1444 		shm_align_mask = max_t( unsigned long,
1445 					c->dcache.sets * c->dcache.linesz - 1,
1446 					PAGE_SIZE - 1);
1447 	else
1448 		shm_align_mask = PAGE_SIZE-1;
1449 
1450 	__flush_cache_vmap	= r4k__flush_cache_vmap;
1451 	__flush_cache_vunmap	= r4k__flush_cache_vunmap;
1452 
1453 	flush_cache_all		= cache_noop;
1454 	__flush_cache_all	= r4k___flush_cache_all;
1455 	flush_cache_mm		= r4k_flush_cache_mm;
1456 	flush_cache_page	= r4k_flush_cache_page;
1457 	flush_cache_range	= r4k_flush_cache_range;
1458 
1459 	__flush_kernel_vmap_range = r4k_flush_kernel_vmap_range;
1460 
1461 	flush_cache_sigtramp	= r4k_flush_cache_sigtramp;
1462 	flush_icache_all	= r4k_flush_icache_all;
1463 	local_flush_data_cache_page	= local_r4k_flush_data_cache_page;
1464 	flush_data_cache_page	= r4k_flush_data_cache_page;
1465 	flush_icache_range	= r4k_flush_icache_range;
1466 	local_flush_icache_range	= local_r4k_flush_icache_range;
1467 
1468 #if defined(CONFIG_DMA_NONCOHERENT)
1469 	if (coherentio) {
1470 		_dma_cache_wback_inv	= (void *)cache_noop;
1471 		_dma_cache_wback	= (void *)cache_noop;
1472 		_dma_cache_inv		= (void *)cache_noop;
1473 	} else {
1474 		_dma_cache_wback_inv	= r4k_dma_cache_wback_inv;
1475 		_dma_cache_wback	= r4k_dma_cache_wback_inv;
1476 		_dma_cache_inv		= r4k_dma_cache_inv;
1477 	}
1478 #endif
1479 
1480 	build_clear_page();
1481 	build_copy_page();
1482 
1483 	/*
1484 	 * We want to run CMP kernels on core with and without coherent
1485 	 * caches. Therefore, do not use CONFIG_MIPS_CMP to decide whether
1486 	 * or not to flush caches.
1487 	 */
1488 	local_r4k___flush_cache_all(NULL);
1489 
1490 	coherency_setup();
1491 	board_cache_error_setup = r4k_cache_error_setup;
1492 }
1493