xref: /openbmc/linux/arch/mips/mm/cache.c (revision e868d61272caa648214046a096e5a6bfc068dc8c)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1994 - 2003, 07 by Ralf Baechle (ralf@linux-mips.org)
7  * Copyright (C) 2007 MIPS Technologies, Inc.
8  */
9 #include <linux/init.h>
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/sched.h>
13 #include <linux/mm.h>
14 
15 #include <asm/cacheflush.h>
16 #include <asm/processor.h>
17 #include <asm/cpu.h>
18 #include <asm/cpu-features.h>
19 
20 /* Cache operations. */
21 void (*flush_cache_all)(void);
22 void (*__flush_cache_all)(void);
23 void (*flush_cache_mm)(struct mm_struct *mm);
24 void (*flush_cache_range)(struct vm_area_struct *vma, unsigned long start,
25 	unsigned long end);
26 void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page,
27 	unsigned long pfn);
28 void (*flush_icache_range)(unsigned long start, unsigned long end);
29 
30 /* MIPS specific cache operations */
31 void (*flush_cache_sigtramp)(unsigned long addr);
32 void (*local_flush_data_cache_page)(void * addr);
33 void (*flush_data_cache_page)(unsigned long addr);
34 void (*flush_icache_all)(void);
35 
36 EXPORT_SYMBOL_GPL(local_flush_data_cache_page);
37 EXPORT_SYMBOL(flush_data_cache_page);
38 
39 #ifdef CONFIG_DMA_NONCOHERENT
40 
41 /* DMA cache operations. */
42 void (*_dma_cache_wback_inv)(unsigned long start, unsigned long size);
43 void (*_dma_cache_wback)(unsigned long start, unsigned long size);
44 void (*_dma_cache_inv)(unsigned long start, unsigned long size);
45 
46 EXPORT_SYMBOL(_dma_cache_wback_inv);
47 EXPORT_SYMBOL(_dma_cache_wback);
48 EXPORT_SYMBOL(_dma_cache_inv);
49 
50 #endif /* CONFIG_DMA_NONCOHERENT */
51 
52 /*
53  * We could optimize the case where the cache argument is not BCACHE but
54  * that seems very atypical use ...
55  */
56 asmlinkage int sys_cacheflush(unsigned long addr,
57 	unsigned long bytes, unsigned int cache)
58 {
59 	if (bytes == 0)
60 		return 0;
61 	if (!access_ok(VERIFY_WRITE, (void __user *) addr, bytes))
62 		return -EFAULT;
63 
64 	flush_icache_range(addr, addr + bytes);
65 
66 	return 0;
67 }
68 
69 void __flush_dcache_page(struct page *page)
70 {
71 	struct address_space *mapping = page_mapping(page);
72 	unsigned long addr;
73 
74 	if (PageHighMem(page))
75 		return;
76 	if (mapping && !mapping_mapped(mapping)) {
77 		SetPageDcacheDirty(page);
78 		return;
79 	}
80 
81 	/*
82 	 * We could delay the flush for the !page_mapping case too.  But that
83 	 * case is for exec env/arg pages and those are %99 certainly going to
84 	 * get faulted into the tlb (and thus flushed) anyways.
85 	 */
86 	addr = (unsigned long) page_address(page);
87 	flush_data_cache_page(addr);
88 }
89 
90 EXPORT_SYMBOL(__flush_dcache_page);
91 
92 void __flush_anon_page(struct page *page, unsigned long vmaddr)
93 {
94 	if (pages_do_alias((unsigned long)page_address(page), vmaddr)) {
95 		void *kaddr;
96 
97 		kaddr = kmap_coherent(page, vmaddr);
98 		flush_data_cache_page((unsigned long)kaddr);
99 		kunmap_coherent();
100 	}
101 }
102 
103 EXPORT_SYMBOL(__flush_anon_page);
104 
105 void __update_cache(struct vm_area_struct *vma, unsigned long address,
106 	pte_t pte)
107 {
108 	struct page *page;
109 	unsigned long pfn, addr;
110 	int exec = (vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc;
111 
112 	pfn = pte_pfn(pte);
113 	if (unlikely(!pfn_valid(pfn)))
114 		return;
115 	page = pfn_to_page(pfn);
116 	if (page_mapping(page) && Page_dcache_dirty(page)) {
117 		addr = (unsigned long) page_address(page);
118 		if (exec || pages_do_alias(addr, address & PAGE_MASK))
119 			flush_data_cache_page(addr);
120 		ClearPageDcacheDirty(page);
121 	}
122 }
123 
124 static char cache_panic[] __initdata = "Yeee, unsupported cache architecture.";
125 
126 void __init cpu_cache_init(void)
127 {
128 	if (cpu_has_3k_cache) {
129 		extern void __weak r3k_cache_init(void);
130 
131 		r3k_cache_init();
132 		return;
133 	}
134 	if (cpu_has_6k_cache) {
135 		extern void __weak r6k_cache_init(void);
136 
137 		r6k_cache_init();
138 		return;
139 	}
140 	if (cpu_has_4k_cache) {
141 		extern void __weak r4k_cache_init(void);
142 
143 		r4k_cache_init();
144 		return;
145 	}
146 	if (cpu_has_8k_cache) {
147 		extern void __weak r8k_cache_init(void);
148 
149 		r8k_cache_init();
150 		return;
151 	}
152 	if (cpu_has_tx39_cache) {
153 		extern void __weak tx39_cache_init(void);
154 
155 		tx39_cache_init();
156 		return;
157 	}
158 	if (cpu_has_sb1_cache) {
159 		extern void __weak sb1_cache_init(void);
160 
161 		sb1_cache_init();
162 		return;
163 	}
164 
165 	panic(cache_panic);
166 }
167