xref: /openbmc/linux/arch/mips/mm/c-r4k.c (revision 545e4006)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
7  * Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002 Ralf Baechle (ralf@gnu.org)
8  * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
9  */
10 #include <linux/hardirq.h>
11 #include <linux/init.h>
12 #include <linux/highmem.h>
13 #include <linux/kernel.h>
14 #include <linux/linkage.h>
15 #include <linux/sched.h>
16 #include <linux/mm.h>
17 #include <linux/module.h>
18 #include <linux/bitops.h>
19 
20 #include <asm/bcache.h>
21 #include <asm/bootinfo.h>
22 #include <asm/cache.h>
23 #include <asm/cacheops.h>
24 #include <asm/cpu.h>
25 #include <asm/cpu-features.h>
26 #include <asm/io.h>
27 #include <asm/page.h>
28 #include <asm/pgtable.h>
29 #include <asm/r4kcache.h>
30 #include <asm/sections.h>
31 #include <asm/system.h>
32 #include <asm/mmu_context.h>
33 #include <asm/war.h>
34 #include <asm/cacheflush.h> /* for run_uncached() */
35 
36 
37 /*
38  * Special Variant of smp_call_function for use by cache functions:
39  *
40  *  o No return value
41  *  o collapses to normal function call on UP kernels
42  *  o collapses to normal function call on systems with a single shared
43  *    primary cache.
44  */
45 static inline void r4k_on_each_cpu(void (*func) (void *info), void *info,
46                                    int wait)
47 {
48 	preempt_disable();
49 
50 #if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC)
51 	smp_call_function(func, info, wait);
52 #endif
53 	func(info);
54 	preempt_enable();
55 }
56 
57 #if defined(CONFIG_MIPS_CMP)
58 #define cpu_has_safe_index_cacheops 0
59 #else
60 #define cpu_has_safe_index_cacheops 1
61 #endif
62 
63 /*
64  * Must die.
65  */
66 static unsigned long icache_size __read_mostly;
67 static unsigned long dcache_size __read_mostly;
68 static unsigned long scache_size __read_mostly;
69 
70 /*
71  * Dummy cache handling routines for machines without boardcaches
72  */
73 static void cache_noop(void) {}
74 
75 static struct bcache_ops no_sc_ops = {
76 	.bc_enable = (void *)cache_noop,
77 	.bc_disable = (void *)cache_noop,
78 	.bc_wback_inv = (void *)cache_noop,
79 	.bc_inv = (void *)cache_noop
80 };
81 
82 struct bcache_ops *bcops = &no_sc_ops;
83 
84 #define cpu_is_r4600_v1_x()	((read_c0_prid() & 0xfffffff0) == 0x00002010)
85 #define cpu_is_r4600_v2_x()	((read_c0_prid() & 0xfffffff0) == 0x00002020)
86 
87 #define R4600_HIT_CACHEOP_WAR_IMPL					\
88 do {									\
89 	if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())		\
90 		*(volatile unsigned long *)CKSEG1;			\
91 	if (R4600_V1_HIT_CACHEOP_WAR)					\
92 		__asm__ __volatile__("nop;nop;nop;nop");		\
93 } while (0)
94 
95 static void (*r4k_blast_dcache_page)(unsigned long addr);
96 
97 static inline void r4k_blast_dcache_page_dc32(unsigned long addr)
98 {
99 	R4600_HIT_CACHEOP_WAR_IMPL;
100 	blast_dcache32_page(addr);
101 }
102 
103 static void __cpuinit r4k_blast_dcache_page_setup(void)
104 {
105 	unsigned long  dc_lsize = cpu_dcache_line_size();
106 
107 	if (dc_lsize == 0)
108 		r4k_blast_dcache_page = (void *)cache_noop;
109 	else if (dc_lsize == 16)
110 		r4k_blast_dcache_page = blast_dcache16_page;
111 	else if (dc_lsize == 32)
112 		r4k_blast_dcache_page = r4k_blast_dcache_page_dc32;
113 }
114 
115 static void (* r4k_blast_dcache_page_indexed)(unsigned long addr);
116 
117 static void __cpuinit r4k_blast_dcache_page_indexed_setup(void)
118 {
119 	unsigned long dc_lsize = cpu_dcache_line_size();
120 
121 	if (dc_lsize == 0)
122 		r4k_blast_dcache_page_indexed = (void *)cache_noop;
123 	else if (dc_lsize == 16)
124 		r4k_blast_dcache_page_indexed = blast_dcache16_page_indexed;
125 	else if (dc_lsize == 32)
126 		r4k_blast_dcache_page_indexed = blast_dcache32_page_indexed;
127 }
128 
129 static void (* r4k_blast_dcache)(void);
130 
131 static void __cpuinit r4k_blast_dcache_setup(void)
132 {
133 	unsigned long dc_lsize = cpu_dcache_line_size();
134 
135 	if (dc_lsize == 0)
136 		r4k_blast_dcache = (void *)cache_noop;
137 	else if (dc_lsize == 16)
138 		r4k_blast_dcache = blast_dcache16;
139 	else if (dc_lsize == 32)
140 		r4k_blast_dcache = blast_dcache32;
141 }
142 
143 /* force code alignment (used for TX49XX_ICACHE_INDEX_INV_WAR) */
144 #define JUMP_TO_ALIGN(order) \
145 	__asm__ __volatile__( \
146 		"b\t1f\n\t" \
147 		".align\t" #order "\n\t" \
148 		"1:\n\t" \
149 		)
150 #define CACHE32_UNROLL32_ALIGN	JUMP_TO_ALIGN(10) /* 32 * 32 = 1024 */
151 #define CACHE32_UNROLL32_ALIGN2	JUMP_TO_ALIGN(11)
152 
153 static inline void blast_r4600_v1_icache32(void)
154 {
155 	unsigned long flags;
156 
157 	local_irq_save(flags);
158 	blast_icache32();
159 	local_irq_restore(flags);
160 }
161 
162 static inline void tx49_blast_icache32(void)
163 {
164 	unsigned long start = INDEX_BASE;
165 	unsigned long end = start + current_cpu_data.icache.waysize;
166 	unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
167 	unsigned long ws_end = current_cpu_data.icache.ways <<
168 	                       current_cpu_data.icache.waybit;
169 	unsigned long ws, addr;
170 
171 	CACHE32_UNROLL32_ALIGN2;
172 	/* I'm in even chunk.  blast odd chunks */
173 	for (ws = 0; ws < ws_end; ws += ws_inc)
174 		for (addr = start + 0x400; addr < end; addr += 0x400 * 2)
175 			cache32_unroll32(addr|ws, Index_Invalidate_I);
176 	CACHE32_UNROLL32_ALIGN;
177 	/* I'm in odd chunk.  blast even chunks */
178 	for (ws = 0; ws < ws_end; ws += ws_inc)
179 		for (addr = start; addr < end; addr += 0x400 * 2)
180 			cache32_unroll32(addr|ws, Index_Invalidate_I);
181 }
182 
183 static inline void blast_icache32_r4600_v1_page_indexed(unsigned long page)
184 {
185 	unsigned long flags;
186 
187 	local_irq_save(flags);
188 	blast_icache32_page_indexed(page);
189 	local_irq_restore(flags);
190 }
191 
192 static inline void tx49_blast_icache32_page_indexed(unsigned long page)
193 {
194 	unsigned long indexmask = current_cpu_data.icache.waysize - 1;
195 	unsigned long start = INDEX_BASE + (page & indexmask);
196 	unsigned long end = start + PAGE_SIZE;
197 	unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
198 	unsigned long ws_end = current_cpu_data.icache.ways <<
199 	                       current_cpu_data.icache.waybit;
200 	unsigned long ws, addr;
201 
202 	CACHE32_UNROLL32_ALIGN2;
203 	/* I'm in even chunk.  blast odd chunks */
204 	for (ws = 0; ws < ws_end; ws += ws_inc)
205 		for (addr = start + 0x400; addr < end; addr += 0x400 * 2)
206 			cache32_unroll32(addr|ws, Index_Invalidate_I);
207 	CACHE32_UNROLL32_ALIGN;
208 	/* I'm in odd chunk.  blast even chunks */
209 	for (ws = 0; ws < ws_end; ws += ws_inc)
210 		for (addr = start; addr < end; addr += 0x400 * 2)
211 			cache32_unroll32(addr|ws, Index_Invalidate_I);
212 }
213 
214 static void (* r4k_blast_icache_page)(unsigned long addr);
215 
216 static void __cpuinit r4k_blast_icache_page_setup(void)
217 {
218 	unsigned long ic_lsize = cpu_icache_line_size();
219 
220 	if (ic_lsize == 0)
221 		r4k_blast_icache_page = (void *)cache_noop;
222 	else if (ic_lsize == 16)
223 		r4k_blast_icache_page = blast_icache16_page;
224 	else if (ic_lsize == 32)
225 		r4k_blast_icache_page = blast_icache32_page;
226 	else if (ic_lsize == 64)
227 		r4k_blast_icache_page = blast_icache64_page;
228 }
229 
230 
231 static void (* r4k_blast_icache_page_indexed)(unsigned long addr);
232 
233 static void __cpuinit r4k_blast_icache_page_indexed_setup(void)
234 {
235 	unsigned long ic_lsize = cpu_icache_line_size();
236 
237 	if (ic_lsize == 0)
238 		r4k_blast_icache_page_indexed = (void *)cache_noop;
239 	else if (ic_lsize == 16)
240 		r4k_blast_icache_page_indexed = blast_icache16_page_indexed;
241 	else if (ic_lsize == 32) {
242 		if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x())
243 			r4k_blast_icache_page_indexed =
244 				blast_icache32_r4600_v1_page_indexed;
245 		else if (TX49XX_ICACHE_INDEX_INV_WAR)
246 			r4k_blast_icache_page_indexed =
247 				tx49_blast_icache32_page_indexed;
248 		else
249 			r4k_blast_icache_page_indexed =
250 				blast_icache32_page_indexed;
251 	} else if (ic_lsize == 64)
252 		r4k_blast_icache_page_indexed = blast_icache64_page_indexed;
253 }
254 
255 static void (* r4k_blast_icache)(void);
256 
257 static void __cpuinit r4k_blast_icache_setup(void)
258 {
259 	unsigned long ic_lsize = cpu_icache_line_size();
260 
261 	if (ic_lsize == 0)
262 		r4k_blast_icache = (void *)cache_noop;
263 	else if (ic_lsize == 16)
264 		r4k_blast_icache = blast_icache16;
265 	else if (ic_lsize == 32) {
266 		if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x())
267 			r4k_blast_icache = blast_r4600_v1_icache32;
268 		else if (TX49XX_ICACHE_INDEX_INV_WAR)
269 			r4k_blast_icache = tx49_blast_icache32;
270 		else
271 			r4k_blast_icache = blast_icache32;
272 	} else if (ic_lsize == 64)
273 		r4k_blast_icache = blast_icache64;
274 }
275 
276 static void (* r4k_blast_scache_page)(unsigned long addr);
277 
278 static void __cpuinit r4k_blast_scache_page_setup(void)
279 {
280 	unsigned long sc_lsize = cpu_scache_line_size();
281 
282 	if (scache_size == 0)
283 		r4k_blast_scache_page = (void *)cache_noop;
284 	else if (sc_lsize == 16)
285 		r4k_blast_scache_page = blast_scache16_page;
286 	else if (sc_lsize == 32)
287 		r4k_blast_scache_page = blast_scache32_page;
288 	else if (sc_lsize == 64)
289 		r4k_blast_scache_page = blast_scache64_page;
290 	else if (sc_lsize == 128)
291 		r4k_blast_scache_page = blast_scache128_page;
292 }
293 
294 static void (* r4k_blast_scache_page_indexed)(unsigned long addr);
295 
296 static void __cpuinit r4k_blast_scache_page_indexed_setup(void)
297 {
298 	unsigned long sc_lsize = cpu_scache_line_size();
299 
300 	if (scache_size == 0)
301 		r4k_blast_scache_page_indexed = (void *)cache_noop;
302 	else if (sc_lsize == 16)
303 		r4k_blast_scache_page_indexed = blast_scache16_page_indexed;
304 	else if (sc_lsize == 32)
305 		r4k_blast_scache_page_indexed = blast_scache32_page_indexed;
306 	else if (sc_lsize == 64)
307 		r4k_blast_scache_page_indexed = blast_scache64_page_indexed;
308 	else if (sc_lsize == 128)
309 		r4k_blast_scache_page_indexed = blast_scache128_page_indexed;
310 }
311 
312 static void (* r4k_blast_scache)(void);
313 
314 static void __cpuinit r4k_blast_scache_setup(void)
315 {
316 	unsigned long sc_lsize = cpu_scache_line_size();
317 
318 	if (scache_size == 0)
319 		r4k_blast_scache = (void *)cache_noop;
320 	else if (sc_lsize == 16)
321 		r4k_blast_scache = blast_scache16;
322 	else if (sc_lsize == 32)
323 		r4k_blast_scache = blast_scache32;
324 	else if (sc_lsize == 64)
325 		r4k_blast_scache = blast_scache64;
326 	else if (sc_lsize == 128)
327 		r4k_blast_scache = blast_scache128;
328 }
329 
330 static inline void local_r4k___flush_cache_all(void * args)
331 {
332 #if defined(CONFIG_CPU_LOONGSON2)
333 	r4k_blast_scache();
334 	return;
335 #endif
336 	r4k_blast_dcache();
337 	r4k_blast_icache();
338 
339 	switch (current_cpu_type()) {
340 	case CPU_R4000SC:
341 	case CPU_R4000MC:
342 	case CPU_R4400SC:
343 	case CPU_R4400MC:
344 	case CPU_R10000:
345 	case CPU_R12000:
346 	case CPU_R14000:
347 		r4k_blast_scache();
348 	}
349 }
350 
351 static void r4k___flush_cache_all(void)
352 {
353 	r4k_on_each_cpu(local_r4k___flush_cache_all, NULL, 1);
354 }
355 
356 static inline int has_valid_asid(const struct mm_struct *mm)
357 {
358 #if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC)
359 	int i;
360 
361 	for_each_online_cpu(i)
362 		if (cpu_context(i, mm))
363 			return 1;
364 
365 	return 0;
366 #else
367 	return cpu_context(smp_processor_id(), mm);
368 #endif
369 }
370 
371 static void r4k__flush_cache_vmap(void)
372 {
373 	r4k_blast_dcache();
374 }
375 
376 static void r4k__flush_cache_vunmap(void)
377 {
378 	r4k_blast_dcache();
379 }
380 
381 static inline void local_r4k_flush_cache_range(void * args)
382 {
383 	struct vm_area_struct *vma = args;
384 	int exec = vma->vm_flags & VM_EXEC;
385 
386 	if (!(has_valid_asid(vma->vm_mm)))
387 		return;
388 
389 	r4k_blast_dcache();
390 	if (exec)
391 		r4k_blast_icache();
392 }
393 
394 static void r4k_flush_cache_range(struct vm_area_struct *vma,
395 	unsigned long start, unsigned long end)
396 {
397 	int exec = vma->vm_flags & VM_EXEC;
398 
399 	if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc))
400 		r4k_on_each_cpu(local_r4k_flush_cache_range, vma, 1);
401 }
402 
403 static inline void local_r4k_flush_cache_mm(void * args)
404 {
405 	struct mm_struct *mm = args;
406 
407 	if (!has_valid_asid(mm))
408 		return;
409 
410 	/*
411 	 * Kludge alert.  For obscure reasons R4000SC and R4400SC go nuts if we
412 	 * only flush the primary caches but R10000 and R12000 behave sane ...
413 	 * R4000SC and R4400SC indexed S-cache ops also invalidate primary
414 	 * caches, so we can bail out early.
415 	 */
416 	if (current_cpu_type() == CPU_R4000SC ||
417 	    current_cpu_type() == CPU_R4000MC ||
418 	    current_cpu_type() == CPU_R4400SC ||
419 	    current_cpu_type() == CPU_R4400MC) {
420 		r4k_blast_scache();
421 		return;
422 	}
423 
424 	r4k_blast_dcache();
425 }
426 
427 static void r4k_flush_cache_mm(struct mm_struct *mm)
428 {
429 	if (!cpu_has_dc_aliases)
430 		return;
431 
432 	r4k_on_each_cpu(local_r4k_flush_cache_mm, mm, 1);
433 }
434 
435 struct flush_cache_page_args {
436 	struct vm_area_struct *vma;
437 	unsigned long addr;
438 	unsigned long pfn;
439 };
440 
441 static inline void local_r4k_flush_cache_page(void *args)
442 {
443 	struct flush_cache_page_args *fcp_args = args;
444 	struct vm_area_struct *vma = fcp_args->vma;
445 	unsigned long addr = fcp_args->addr;
446 	struct page *page = pfn_to_page(fcp_args->pfn);
447 	int exec = vma->vm_flags & VM_EXEC;
448 	struct mm_struct *mm = vma->vm_mm;
449 	int map_coherent = 0;
450 	pgd_t *pgdp;
451 	pud_t *pudp;
452 	pmd_t *pmdp;
453 	pte_t *ptep;
454 	void *vaddr;
455 
456 	/*
457 	 * If ownes no valid ASID yet, cannot possibly have gotten
458 	 * this page into the cache.
459 	 */
460 	if (!has_valid_asid(mm))
461 		return;
462 
463 	addr &= PAGE_MASK;
464 	pgdp = pgd_offset(mm, addr);
465 	pudp = pud_offset(pgdp, addr);
466 	pmdp = pmd_offset(pudp, addr);
467 	ptep = pte_offset(pmdp, addr);
468 
469 	/*
470 	 * If the page isn't marked valid, the page cannot possibly be
471 	 * in the cache.
472 	 */
473 	if (!(pte_present(*ptep)))
474 		return;
475 
476 	if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID))
477 		vaddr = NULL;
478 	else {
479 		/*
480 		 * Use kmap_coherent or kmap_atomic to do flushes for
481 		 * another ASID than the current one.
482 		 */
483 		map_coherent = (cpu_has_dc_aliases &&
484 				page_mapped(page) && !Page_dcache_dirty(page));
485 		if (map_coherent)
486 			vaddr = kmap_coherent(page, addr);
487 		else
488 			vaddr = kmap_atomic(page, KM_USER0);
489 		addr = (unsigned long)vaddr;
490 	}
491 
492 	if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) {
493 		r4k_blast_dcache_page(addr);
494 		if (exec && !cpu_icache_snoops_remote_store)
495 			r4k_blast_scache_page(addr);
496 	}
497 	if (exec) {
498 		if (vaddr && cpu_has_vtag_icache && mm == current->active_mm) {
499 			int cpu = smp_processor_id();
500 
501 			if (cpu_context(cpu, mm) != 0)
502 				drop_mmu_context(mm, cpu);
503 		} else
504 			r4k_blast_icache_page(addr);
505 	}
506 
507 	if (vaddr) {
508 		if (map_coherent)
509 			kunmap_coherent();
510 		else
511 			kunmap_atomic(vaddr, KM_USER0);
512 	}
513 }
514 
515 static void r4k_flush_cache_page(struct vm_area_struct *vma,
516 	unsigned long addr, unsigned long pfn)
517 {
518 	struct flush_cache_page_args args;
519 
520 	args.vma = vma;
521 	args.addr = addr;
522 	args.pfn = pfn;
523 
524 	r4k_on_each_cpu(local_r4k_flush_cache_page, &args, 1);
525 }
526 
527 static inline void local_r4k_flush_data_cache_page(void * addr)
528 {
529 	r4k_blast_dcache_page((unsigned long) addr);
530 }
531 
532 static void r4k_flush_data_cache_page(unsigned long addr)
533 {
534 	if (in_atomic())
535 		local_r4k_flush_data_cache_page((void *)addr);
536 	else
537 		r4k_on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr,
538 			        1);
539 }
540 
541 struct flush_icache_range_args {
542 	unsigned long start;
543 	unsigned long end;
544 };
545 
546 static inline void local_r4k_flush_icache_range(void *args)
547 {
548 	struct flush_icache_range_args *fir_args = args;
549 	unsigned long start = fir_args->start;
550 	unsigned long end = fir_args->end;
551 
552 	if (!cpu_has_ic_fills_f_dc) {
553 		if (end - start >= dcache_size) {
554 			r4k_blast_dcache();
555 		} else {
556 			R4600_HIT_CACHEOP_WAR_IMPL;
557 			protected_blast_dcache_range(start, end);
558 		}
559 	}
560 
561 	if (end - start > icache_size)
562 		r4k_blast_icache();
563 	else
564 		protected_blast_icache_range(start, end);
565 }
566 
567 static void r4k_flush_icache_range(unsigned long start, unsigned long end)
568 {
569 	struct flush_icache_range_args args;
570 
571 	args.start = start;
572 	args.end = end;
573 
574 	r4k_on_each_cpu(local_r4k_flush_icache_range, &args, 1);
575 	instruction_hazard();
576 }
577 
578 #ifdef CONFIG_DMA_NONCOHERENT
579 
580 static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
581 {
582 	/* Catch bad driver code */
583 	BUG_ON(size == 0);
584 
585 	if (cpu_has_inclusive_pcaches) {
586 		if (size >= scache_size)
587 			r4k_blast_scache();
588 		else
589 			blast_scache_range(addr, addr + size);
590 		return;
591 	}
592 
593 	/*
594 	 * Either no secondary cache or the available caches don't have the
595 	 * subset property so we have to flush the primary caches
596 	 * explicitly
597 	 */
598 	if (cpu_has_safe_index_cacheops && size >= dcache_size) {
599 		r4k_blast_dcache();
600 	} else {
601 		R4600_HIT_CACHEOP_WAR_IMPL;
602 		blast_dcache_range(addr, addr + size);
603 	}
604 
605 	bc_wback_inv(addr, size);
606 }
607 
608 static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
609 {
610 	/* Catch bad driver code */
611 	BUG_ON(size == 0);
612 
613 	if (cpu_has_inclusive_pcaches) {
614 		if (size >= scache_size)
615 			r4k_blast_scache();
616 		else
617 			blast_inv_scache_range(addr, addr + size);
618 		return;
619 	}
620 
621 	if (cpu_has_safe_index_cacheops && size >= dcache_size) {
622 		r4k_blast_dcache();
623 	} else {
624 		R4600_HIT_CACHEOP_WAR_IMPL;
625 		blast_inv_dcache_range(addr, addr + size);
626 	}
627 
628 	bc_inv(addr, size);
629 }
630 #endif /* CONFIG_DMA_NONCOHERENT */
631 
632 /*
633  * While we're protected against bad userland addresses we don't care
634  * very much about what happens in that case.  Usually a segmentation
635  * fault will dump the process later on anyway ...
636  */
637 static void local_r4k_flush_cache_sigtramp(void * arg)
638 {
639 	unsigned long ic_lsize = cpu_icache_line_size();
640 	unsigned long dc_lsize = cpu_dcache_line_size();
641 	unsigned long sc_lsize = cpu_scache_line_size();
642 	unsigned long addr = (unsigned long) arg;
643 
644 	R4600_HIT_CACHEOP_WAR_IMPL;
645 	if (dc_lsize)
646 		protected_writeback_dcache_line(addr & ~(dc_lsize - 1));
647 	if (!cpu_icache_snoops_remote_store && scache_size)
648 		protected_writeback_scache_line(addr & ~(sc_lsize - 1));
649 	if (ic_lsize)
650 		protected_flush_icache_line(addr & ~(ic_lsize - 1));
651 	if (MIPS4K_ICACHE_REFILL_WAR) {
652 		__asm__ __volatile__ (
653 			".set push\n\t"
654 			".set noat\n\t"
655 			".set mips3\n\t"
656 #ifdef CONFIG_32BIT
657 			"la	$at,1f\n\t"
658 #endif
659 #ifdef CONFIG_64BIT
660 			"dla	$at,1f\n\t"
661 #endif
662 			"cache	%0,($at)\n\t"
663 			"nop; nop; nop\n"
664 			"1:\n\t"
665 			".set pop"
666 			:
667 			: "i" (Hit_Invalidate_I));
668 	}
669 	if (MIPS_CACHE_SYNC_WAR)
670 		__asm__ __volatile__ ("sync");
671 }
672 
673 static void r4k_flush_cache_sigtramp(unsigned long addr)
674 {
675 	r4k_on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr, 1);
676 }
677 
678 static void r4k_flush_icache_all(void)
679 {
680 	if (cpu_has_vtag_icache)
681 		r4k_blast_icache();
682 }
683 
684 static inline void rm7k_erratum31(void)
685 {
686 	const unsigned long ic_lsize = 32;
687 	unsigned long addr;
688 
689 	/* RM7000 erratum #31. The icache is screwed at startup. */
690 	write_c0_taglo(0);
691 	write_c0_taghi(0);
692 
693 	for (addr = INDEX_BASE; addr <= INDEX_BASE + 4096; addr += ic_lsize) {
694 		__asm__ __volatile__ (
695 			".set push\n\t"
696 			".set noreorder\n\t"
697 			".set mips3\n\t"
698 			"cache\t%1, 0(%0)\n\t"
699 			"cache\t%1, 0x1000(%0)\n\t"
700 			"cache\t%1, 0x2000(%0)\n\t"
701 			"cache\t%1, 0x3000(%0)\n\t"
702 			"cache\t%2, 0(%0)\n\t"
703 			"cache\t%2, 0x1000(%0)\n\t"
704 			"cache\t%2, 0x2000(%0)\n\t"
705 			"cache\t%2, 0x3000(%0)\n\t"
706 			"cache\t%1, 0(%0)\n\t"
707 			"cache\t%1, 0x1000(%0)\n\t"
708 			"cache\t%1, 0x2000(%0)\n\t"
709 			"cache\t%1, 0x3000(%0)\n\t"
710 			".set pop\n"
711 			:
712 			: "r" (addr), "i" (Index_Store_Tag_I), "i" (Fill));
713 	}
714 }
715 
716 static char *way_string[] __cpuinitdata = { NULL, "direct mapped", "2-way",
717 	"3-way", "4-way", "5-way", "6-way", "7-way", "8-way"
718 };
719 
720 static void __cpuinit probe_pcache(void)
721 {
722 	struct cpuinfo_mips *c = &current_cpu_data;
723 	unsigned int config = read_c0_config();
724 	unsigned int prid = read_c0_prid();
725 	unsigned long config1;
726 	unsigned int lsize;
727 
728 	switch (c->cputype) {
729 	case CPU_R4600:			/* QED style two way caches? */
730 	case CPU_R4700:
731 	case CPU_R5000:
732 	case CPU_NEVADA:
733 		icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
734 		c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
735 		c->icache.ways = 2;
736 		c->icache.waybit = __ffs(icache_size/2);
737 
738 		dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
739 		c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
740 		c->dcache.ways = 2;
741 		c->dcache.waybit= __ffs(dcache_size/2);
742 
743 		c->options |= MIPS_CPU_CACHE_CDEX_P;
744 		break;
745 
746 	case CPU_R5432:
747 	case CPU_R5500:
748 		icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
749 		c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
750 		c->icache.ways = 2;
751 		c->icache.waybit= 0;
752 
753 		dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
754 		c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
755 		c->dcache.ways = 2;
756 		c->dcache.waybit = 0;
757 
758 		c->options |= MIPS_CPU_CACHE_CDEX_P;
759 		break;
760 
761 	case CPU_TX49XX:
762 		icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
763 		c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
764 		c->icache.ways = 4;
765 		c->icache.waybit= 0;
766 
767 		dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
768 		c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
769 		c->dcache.ways = 4;
770 		c->dcache.waybit = 0;
771 
772 		c->options |= MIPS_CPU_CACHE_CDEX_P;
773 		c->options |= MIPS_CPU_PREFETCH;
774 		break;
775 
776 	case CPU_R4000PC:
777 	case CPU_R4000SC:
778 	case CPU_R4000MC:
779 	case CPU_R4400PC:
780 	case CPU_R4400SC:
781 	case CPU_R4400MC:
782 	case CPU_R4300:
783 		icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
784 		c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
785 		c->icache.ways = 1;
786 		c->icache.waybit = 0; 	/* doesn't matter */
787 
788 		dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
789 		c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
790 		c->dcache.ways = 1;
791 		c->dcache.waybit = 0;	/* does not matter */
792 
793 		c->options |= MIPS_CPU_CACHE_CDEX_P;
794 		break;
795 
796 	case CPU_R10000:
797 	case CPU_R12000:
798 	case CPU_R14000:
799 		icache_size = 1 << (12 + ((config & R10K_CONF_IC) >> 29));
800 		c->icache.linesz = 64;
801 		c->icache.ways = 2;
802 		c->icache.waybit = 0;
803 
804 		dcache_size = 1 << (12 + ((config & R10K_CONF_DC) >> 26));
805 		c->dcache.linesz = 32;
806 		c->dcache.ways = 2;
807 		c->dcache.waybit = 0;
808 
809 		c->options |= MIPS_CPU_PREFETCH;
810 		break;
811 
812 	case CPU_VR4133:
813 		write_c0_config(config & ~VR41_CONF_P4K);
814 	case CPU_VR4131:
815 		/* Workaround for cache instruction bug of VR4131 */
816 		if (c->processor_id == 0x0c80U || c->processor_id == 0x0c81U ||
817 		    c->processor_id == 0x0c82U) {
818 			config |= 0x00400000U;
819 			if (c->processor_id == 0x0c80U)
820 				config |= VR41_CONF_BP;
821 			write_c0_config(config);
822 		} else
823 			c->options |= MIPS_CPU_CACHE_CDEX_P;
824 
825 		icache_size = 1 << (10 + ((config & CONF_IC) >> 9));
826 		c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
827 		c->icache.ways = 2;
828 		c->icache.waybit = __ffs(icache_size/2);
829 
830 		dcache_size = 1 << (10 + ((config & CONF_DC) >> 6));
831 		c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
832 		c->dcache.ways = 2;
833 		c->dcache.waybit = __ffs(dcache_size/2);
834 		break;
835 
836 	case CPU_VR41XX:
837 	case CPU_VR4111:
838 	case CPU_VR4121:
839 	case CPU_VR4122:
840 	case CPU_VR4181:
841 	case CPU_VR4181A:
842 		icache_size = 1 << (10 + ((config & CONF_IC) >> 9));
843 		c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
844 		c->icache.ways = 1;
845 		c->icache.waybit = 0; 	/* doesn't matter */
846 
847 		dcache_size = 1 << (10 + ((config & CONF_DC) >> 6));
848 		c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
849 		c->dcache.ways = 1;
850 		c->dcache.waybit = 0;	/* does not matter */
851 
852 		c->options |= MIPS_CPU_CACHE_CDEX_P;
853 		break;
854 
855 	case CPU_RM7000:
856 		rm7k_erratum31();
857 
858 	case CPU_RM9000:
859 		icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
860 		c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
861 		c->icache.ways = 4;
862 		c->icache.waybit = __ffs(icache_size / c->icache.ways);
863 
864 		dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
865 		c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
866 		c->dcache.ways = 4;
867 		c->dcache.waybit = __ffs(dcache_size / c->dcache.ways);
868 
869 #if !defined(CONFIG_SMP) || !defined(RM9000_CDEX_SMP_WAR)
870 		c->options |= MIPS_CPU_CACHE_CDEX_P;
871 #endif
872 		c->options |= MIPS_CPU_PREFETCH;
873 		break;
874 
875 	case CPU_LOONGSON2:
876 		icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
877 		c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
878 		if (prid & 0x3)
879 			c->icache.ways = 4;
880 		else
881 			c->icache.ways = 2;
882 		c->icache.waybit = 0;
883 
884 		dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
885 		c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
886 		if (prid & 0x3)
887 			c->dcache.ways = 4;
888 		else
889 			c->dcache.ways = 2;
890 		c->dcache.waybit = 0;
891 		break;
892 
893 	default:
894 		if (!(config & MIPS_CONF_M))
895 			panic("Don't know how to probe P-caches on this cpu.");
896 
897 		/*
898 		 * So we seem to be a MIPS32 or MIPS64 CPU
899 		 * So let's probe the I-cache ...
900 		 */
901 		config1 = read_c0_config1();
902 
903 		if ((lsize = ((config1 >> 19) & 7)))
904 			c->icache.linesz = 2 << lsize;
905 		else
906 			c->icache.linesz = lsize;
907 		c->icache.sets = 64 << ((config1 >> 22) & 7);
908 		c->icache.ways = 1 + ((config1 >> 16) & 7);
909 
910 		icache_size = c->icache.sets *
911 		              c->icache.ways *
912 		              c->icache.linesz;
913 		c->icache.waybit = __ffs(icache_size/c->icache.ways);
914 
915 		if (config & 0x8)		/* VI bit */
916 			c->icache.flags |= MIPS_CACHE_VTAG;
917 
918 		/*
919 		 * Now probe the MIPS32 / MIPS64 data cache.
920 		 */
921 		c->dcache.flags = 0;
922 
923 		if ((lsize = ((config1 >> 10) & 7)))
924 			c->dcache.linesz = 2 << lsize;
925 		else
926 			c->dcache.linesz= lsize;
927 		c->dcache.sets = 64 << ((config1 >> 13) & 7);
928 		c->dcache.ways = 1 + ((config1 >> 7) & 7);
929 
930 		dcache_size = c->dcache.sets *
931 		              c->dcache.ways *
932 		              c->dcache.linesz;
933 		c->dcache.waybit = __ffs(dcache_size/c->dcache.ways);
934 
935 		c->options |= MIPS_CPU_PREFETCH;
936 		break;
937 	}
938 
939 	/*
940 	 * Processor configuration sanity check for the R4000SC erratum
941 	 * #5.  With page sizes larger than 32kB there is no possibility
942 	 * to get a VCE exception anymore so we don't care about this
943 	 * misconfiguration.  The case is rather theoretical anyway;
944 	 * presumably no vendor is shipping his hardware in the "bad"
945 	 * configuration.
946 	 */
947 	if ((prid & 0xff00) == PRID_IMP_R4000 && (prid & 0xff) < 0x40 &&
948 	    !(config & CONF_SC) && c->icache.linesz != 16 &&
949 	    PAGE_SIZE <= 0x8000)
950 		panic("Improper R4000SC processor configuration detected");
951 
952 	/* compute a couple of other cache variables */
953 	c->icache.waysize = icache_size / c->icache.ways;
954 	c->dcache.waysize = dcache_size / c->dcache.ways;
955 
956 	c->icache.sets = c->icache.linesz ?
957 		icache_size / (c->icache.linesz * c->icache.ways) : 0;
958 	c->dcache.sets = c->dcache.linesz ?
959 		dcache_size / (c->dcache.linesz * c->dcache.ways) : 0;
960 
961 	/*
962 	 * R10000 and R12000 P-caches are odd in a positive way.  They're 32kB
963 	 * 2-way virtually indexed so normally would suffer from aliases.  So
964 	 * normally they'd suffer from aliases but magic in the hardware deals
965 	 * with that for us so we don't need to take care ourselves.
966 	 */
967 	switch (c->cputype) {
968 	case CPU_20KC:
969 	case CPU_25KF:
970 	case CPU_SB1:
971 	case CPU_SB1A:
972 		c->dcache.flags |= MIPS_CACHE_PINDEX;
973 		break;
974 
975 	case CPU_R10000:
976 	case CPU_R12000:
977 	case CPU_R14000:
978 		break;
979 
980 	case CPU_24K:
981 	case CPU_34K:
982 	case CPU_74K:
983 	case CPU_1004K:
984 		if ((read_c0_config7() & (1 << 16))) {
985 			/* effectively physically indexed dcache,
986 			   thus no virtual aliases. */
987 			c->dcache.flags |= MIPS_CACHE_PINDEX;
988 			break;
989 		}
990 	default:
991 		if (c->dcache.waysize > PAGE_SIZE)
992 			c->dcache.flags |= MIPS_CACHE_ALIASES;
993 	}
994 
995 	switch (c->cputype) {
996 	case CPU_20KC:
997 		/*
998 		 * Some older 20Kc chips doesn't have the 'VI' bit in
999 		 * the config register.
1000 		 */
1001 		c->icache.flags |= MIPS_CACHE_VTAG;
1002 		break;
1003 
1004 	case CPU_AU1000:
1005 	case CPU_AU1500:
1006 	case CPU_AU1100:
1007 	case CPU_AU1550:
1008 	case CPU_AU1200:
1009 	case CPU_AU1210:
1010 	case CPU_AU1250:
1011 		c->icache.flags |= MIPS_CACHE_IC_F_DC;
1012 		break;
1013 	}
1014 
1015 #ifdef  CONFIG_CPU_LOONGSON2
1016 	/*
1017 	 * LOONGSON2 has 4 way icache, but when using indexed cache op,
1018 	 * one op will act on all 4 ways
1019 	 */
1020 	c->icache.ways = 1;
1021 #endif
1022 
1023 	printk("Primary instruction cache %ldkB, %s, %s, linesize %d bytes.\n",
1024 	       icache_size >> 10,
1025 	       cpu_has_vtag_icache ? "VIVT" : "VIPT",
1026 	       way_string[c->icache.ways], c->icache.linesz);
1027 
1028 	printk("Primary data cache %ldkB, %s, %s, %s, linesize %d bytes\n",
1029 	       dcache_size >> 10, way_string[c->dcache.ways],
1030 	       (c->dcache.flags & MIPS_CACHE_PINDEX) ? "PIPT" : "VIPT",
1031 	       (c->dcache.flags & MIPS_CACHE_ALIASES) ?
1032 			"cache aliases" : "no aliases",
1033 	       c->dcache.linesz);
1034 }
1035 
1036 /*
1037  * If you even _breathe_ on this function, look at the gcc output and make sure
1038  * it does not pop things on and off the stack for the cache sizing loop that
1039  * executes in KSEG1 space or else you will crash and burn badly.  You have
1040  * been warned.
1041  */
1042 static int __cpuinit probe_scache(void)
1043 {
1044 	unsigned long flags, addr, begin, end, pow2;
1045 	unsigned int config = read_c0_config();
1046 	struct cpuinfo_mips *c = &current_cpu_data;
1047 	int tmp;
1048 
1049 	if (config & CONF_SC)
1050 		return 0;
1051 
1052 	begin = (unsigned long) &_stext;
1053 	begin &= ~((4 * 1024 * 1024) - 1);
1054 	end = begin + (4 * 1024 * 1024);
1055 
1056 	/*
1057 	 * This is such a bitch, you'd think they would make it easy to do
1058 	 * this.  Away you daemons of stupidity!
1059 	 */
1060 	local_irq_save(flags);
1061 
1062 	/* Fill each size-multiple cache line with a valid tag. */
1063 	pow2 = (64 * 1024);
1064 	for (addr = begin; addr < end; addr = (begin + pow2)) {
1065 		unsigned long *p = (unsigned long *) addr;
1066 		__asm__ __volatile__("nop" : : "r" (*p)); /* whee... */
1067 		pow2 <<= 1;
1068 	}
1069 
1070 	/* Load first line with zero (therefore invalid) tag. */
1071 	write_c0_taglo(0);
1072 	write_c0_taghi(0);
1073 	__asm__ __volatile__("nop; nop; nop; nop;"); /* avoid the hazard */
1074 	cache_op(Index_Store_Tag_I, begin);
1075 	cache_op(Index_Store_Tag_D, begin);
1076 	cache_op(Index_Store_Tag_SD, begin);
1077 
1078 	/* Now search for the wrap around point. */
1079 	pow2 = (128 * 1024);
1080 	tmp = 0;
1081 	for (addr = begin + (128 * 1024); addr < end; addr = begin + pow2) {
1082 		cache_op(Index_Load_Tag_SD, addr);
1083 		__asm__ __volatile__("nop; nop; nop; nop;"); /* hazard... */
1084 		if (!read_c0_taglo())
1085 			break;
1086 		pow2 <<= 1;
1087 	}
1088 	local_irq_restore(flags);
1089 	addr -= begin;
1090 
1091 	scache_size = addr;
1092 	c->scache.linesz = 16 << ((config & R4K_CONF_SB) >> 22);
1093 	c->scache.ways = 1;
1094 	c->dcache.waybit = 0;		/* does not matter */
1095 
1096 	return 1;
1097 }
1098 
1099 #if defined(CONFIG_CPU_LOONGSON2)
1100 static void __init loongson2_sc_init(void)
1101 {
1102 	struct cpuinfo_mips *c = &current_cpu_data;
1103 
1104 	scache_size = 512*1024;
1105 	c->scache.linesz = 32;
1106 	c->scache.ways = 4;
1107 	c->scache.waybit = 0;
1108 	c->scache.waysize = scache_size / (c->scache.ways);
1109 	c->scache.sets = scache_size / (c->scache.linesz * c->scache.ways);
1110 	pr_info("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
1111 	       scache_size >> 10, way_string[c->scache.ways], c->scache.linesz);
1112 
1113 	c->options |= MIPS_CPU_INCLUSIVE_CACHES;
1114 }
1115 #endif
1116 
1117 extern int r5k_sc_init(void);
1118 extern int rm7k_sc_init(void);
1119 extern int mips_sc_init(void);
1120 
1121 static void __cpuinit setup_scache(void)
1122 {
1123 	struct cpuinfo_mips *c = &current_cpu_data;
1124 	unsigned int config = read_c0_config();
1125 	int sc_present = 0;
1126 
1127 	/*
1128 	 * Do the probing thing on R4000SC and R4400SC processors.  Other
1129 	 * processors don't have a S-cache that would be relevant to the
1130 	 * Linux memory management.
1131 	 */
1132 	switch (c->cputype) {
1133 	case CPU_R4000SC:
1134 	case CPU_R4000MC:
1135 	case CPU_R4400SC:
1136 	case CPU_R4400MC:
1137 		sc_present = run_uncached(probe_scache);
1138 		if (sc_present)
1139 			c->options |= MIPS_CPU_CACHE_CDEX_S;
1140 		break;
1141 
1142 	case CPU_R10000:
1143 	case CPU_R12000:
1144 	case CPU_R14000:
1145 		scache_size = 0x80000 << ((config & R10K_CONF_SS) >> 16);
1146 		c->scache.linesz = 64 << ((config >> 13) & 1);
1147 		c->scache.ways = 2;
1148 		c->scache.waybit= 0;
1149 		sc_present = 1;
1150 		break;
1151 
1152 	case CPU_R5000:
1153 	case CPU_NEVADA:
1154 #ifdef CONFIG_R5000_CPU_SCACHE
1155 		r5k_sc_init();
1156 #endif
1157                 return;
1158 
1159 	case CPU_RM7000:
1160 	case CPU_RM9000:
1161 #ifdef CONFIG_RM7000_CPU_SCACHE
1162 		rm7k_sc_init();
1163 #endif
1164 		return;
1165 
1166 #if defined(CONFIG_CPU_LOONGSON2)
1167 	case CPU_LOONGSON2:
1168 		loongson2_sc_init();
1169 		return;
1170 #endif
1171 
1172 	default:
1173 		if (c->isa_level == MIPS_CPU_ISA_M32R1 ||
1174 		    c->isa_level == MIPS_CPU_ISA_M32R2 ||
1175 		    c->isa_level == MIPS_CPU_ISA_M64R1 ||
1176 		    c->isa_level == MIPS_CPU_ISA_M64R2) {
1177 #ifdef CONFIG_MIPS_CPU_SCACHE
1178 			if (mips_sc_init ()) {
1179 				scache_size = c->scache.ways * c->scache.sets * c->scache.linesz;
1180 				printk("MIPS secondary cache %ldkB, %s, linesize %d bytes.\n",
1181 				       scache_size >> 10,
1182 				       way_string[c->scache.ways], c->scache.linesz);
1183 			}
1184 #else
1185 			if (!(c->scache.flags & MIPS_CACHE_NOT_PRESENT))
1186 				panic("Dunno how to handle MIPS32 / MIPS64 second level cache");
1187 #endif
1188 			return;
1189 		}
1190 		sc_present = 0;
1191 	}
1192 
1193 	if (!sc_present)
1194 		return;
1195 
1196 	/* compute a couple of other cache variables */
1197 	c->scache.waysize = scache_size / c->scache.ways;
1198 
1199 	c->scache.sets = scache_size / (c->scache.linesz * c->scache.ways);
1200 
1201 	printk("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
1202 	       scache_size >> 10, way_string[c->scache.ways], c->scache.linesz);
1203 
1204 	c->options |= MIPS_CPU_INCLUSIVE_CACHES;
1205 }
1206 
1207 void au1x00_fixup_config_od(void)
1208 {
1209 	/*
1210 	 * c0_config.od (bit 19) was write only (and read as 0)
1211 	 * on the early revisions of Alchemy SOCs.  It disables the bus
1212 	 * transaction overlapping and needs to be set to fix various errata.
1213 	 */
1214 	switch (read_c0_prid()) {
1215 	case 0x00030100: /* Au1000 DA */
1216 	case 0x00030201: /* Au1000 HA */
1217 	case 0x00030202: /* Au1000 HB */
1218 	case 0x01030200: /* Au1500 AB */
1219 	/*
1220 	 * Au1100 errata actually keeps silence about this bit, so we set it
1221 	 * just in case for those revisions that require it to be set according
1222 	 * to arch/mips/au1000/common/cputable.c
1223 	 */
1224 	case 0x02030200: /* Au1100 AB */
1225 	case 0x02030201: /* Au1100 BA */
1226 	case 0x02030202: /* Au1100 BC */
1227 		set_c0_config(1 << 19);
1228 		break;
1229 	}
1230 }
1231 
1232 /* CP0 hazard avoidance. */
1233 #define NXP_BARRIER()							\
1234 	 __asm__ __volatile__(						\
1235 	".set noreorder\n\t"						\
1236 	"nop; nop; nop; nop; nop; nop;\n\t"				\
1237 	".set reorder\n\t")
1238 
1239 static void nxp_pr4450_fixup_config(void)
1240 {
1241 	unsigned long config0;
1242 
1243 	config0 = read_c0_config();
1244 
1245 	/* clear all three cache coherency fields */
1246 	config0 &= ~(0x7 | (7 << 25) | (7 << 28));
1247 	config0 |= (((_page_cachable_default >> _CACHE_SHIFT) <<  0) |
1248 		    ((_page_cachable_default >> _CACHE_SHIFT) << 25) |
1249 		    ((_page_cachable_default >> _CACHE_SHIFT) << 28));
1250 	write_c0_config(config0);
1251 	NXP_BARRIER();
1252 }
1253 
1254 static int __cpuinitdata cca = -1;
1255 
1256 static int __init cca_setup(char *str)
1257 {
1258 	get_option(&str, &cca);
1259 
1260 	return 1;
1261 }
1262 
1263 __setup("cca=", cca_setup);
1264 
1265 static void __cpuinit coherency_setup(void)
1266 {
1267 	if (cca < 0 || cca > 7)
1268 		cca = read_c0_config() & CONF_CM_CMASK;
1269 	_page_cachable_default = cca << _CACHE_SHIFT;
1270 
1271 	pr_debug("Using cache attribute %d\n", cca);
1272 	change_c0_config(CONF_CM_CMASK, cca);
1273 
1274 	/*
1275 	 * c0_status.cu=0 specifies that updates by the sc instruction use
1276 	 * the coherency mode specified by the TLB; 1 means cachable
1277 	 * coherent update on write will be used.  Not all processors have
1278 	 * this bit and; some wire it to zero, others like Toshiba had the
1279 	 * silly idea of putting something else there ...
1280 	 */
1281 	switch (current_cpu_type()) {
1282 	case CPU_R4000PC:
1283 	case CPU_R4000SC:
1284 	case CPU_R4000MC:
1285 	case CPU_R4400PC:
1286 	case CPU_R4400SC:
1287 	case CPU_R4400MC:
1288 		clear_c0_config(CONF_CU);
1289 		break;
1290 	/*
1291 	 * We need to catch the early Alchemy SOCs with
1292 	 * the write-only co_config.od bit and set it back to one...
1293 	 */
1294 	case CPU_AU1000: /* rev. DA, HA, HB */
1295 	case CPU_AU1100: /* rev. AB, BA, BC ?? */
1296 	case CPU_AU1500: /* rev. AB */
1297 		au1x00_fixup_config_od();
1298 		break;
1299 
1300 	case PRID_IMP_PR4450:
1301 		nxp_pr4450_fixup_config();
1302 		break;
1303 	}
1304 }
1305 
1306 #if defined(CONFIG_DMA_NONCOHERENT)
1307 
1308 static int __cpuinitdata coherentio;
1309 
1310 static int __init setcoherentio(char *str)
1311 {
1312 	coherentio = 1;
1313 
1314 	return 1;
1315 }
1316 
1317 __setup("coherentio", setcoherentio);
1318 #endif
1319 
1320 void __cpuinit r4k_cache_init(void)
1321 {
1322 	extern void build_clear_page(void);
1323 	extern void build_copy_page(void);
1324 	extern char __weak except_vec2_generic;
1325 	extern char __weak except_vec2_sb1;
1326 	struct cpuinfo_mips *c = &current_cpu_data;
1327 
1328 	switch (c->cputype) {
1329 	case CPU_SB1:
1330 	case CPU_SB1A:
1331 		set_uncached_handler(0x100, &except_vec2_sb1, 0x80);
1332 		break;
1333 
1334 	default:
1335 		set_uncached_handler(0x100, &except_vec2_generic, 0x80);
1336 		break;
1337 	}
1338 
1339 	probe_pcache();
1340 	setup_scache();
1341 
1342 	r4k_blast_dcache_page_setup();
1343 	r4k_blast_dcache_page_indexed_setup();
1344 	r4k_blast_dcache_setup();
1345 	r4k_blast_icache_page_setup();
1346 	r4k_blast_icache_page_indexed_setup();
1347 	r4k_blast_icache_setup();
1348 	r4k_blast_scache_page_setup();
1349 	r4k_blast_scache_page_indexed_setup();
1350 	r4k_blast_scache_setup();
1351 
1352 	/*
1353 	 * Some MIPS32 and MIPS64 processors have physically indexed caches.
1354 	 * This code supports virtually indexed processors and will be
1355 	 * unnecessarily inefficient on physically indexed processors.
1356 	 */
1357 	if (c->dcache.linesz)
1358 		shm_align_mask = max_t( unsigned long,
1359 					c->dcache.sets * c->dcache.linesz - 1,
1360 					PAGE_SIZE - 1);
1361 	else
1362 		shm_align_mask = PAGE_SIZE-1;
1363 
1364 	__flush_cache_vmap	= r4k__flush_cache_vmap;
1365 	__flush_cache_vunmap	= r4k__flush_cache_vunmap;
1366 
1367 	flush_cache_all		= cache_noop;
1368 	__flush_cache_all	= r4k___flush_cache_all;
1369 	flush_cache_mm		= r4k_flush_cache_mm;
1370 	flush_cache_page	= r4k_flush_cache_page;
1371 	flush_cache_range	= r4k_flush_cache_range;
1372 
1373 	flush_cache_sigtramp	= r4k_flush_cache_sigtramp;
1374 	flush_icache_all	= r4k_flush_icache_all;
1375 	local_flush_data_cache_page	= local_r4k_flush_data_cache_page;
1376 	flush_data_cache_page	= r4k_flush_data_cache_page;
1377 	flush_icache_range	= r4k_flush_icache_range;
1378 
1379 #if defined(CONFIG_DMA_NONCOHERENT)
1380 	if (coherentio) {
1381 		_dma_cache_wback_inv	= (void *)cache_noop;
1382 		_dma_cache_wback	= (void *)cache_noop;
1383 		_dma_cache_inv		= (void *)cache_noop;
1384 	} else {
1385 		_dma_cache_wback_inv	= r4k_dma_cache_wback_inv;
1386 		_dma_cache_wback	= r4k_dma_cache_wback_inv;
1387 		_dma_cache_inv		= r4k_dma_cache_inv;
1388 	}
1389 #endif
1390 
1391 	build_clear_page();
1392 	build_copy_page();
1393 #if !defined(CONFIG_MIPS_CMP)
1394 	local_r4k___flush_cache_all(NULL);
1395 #endif
1396 	coherency_setup();
1397 }
1398