xref: /openbmc/linux/arch/mips/mm/cache.c (revision 293d5b43)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1994 - 2003, 06, 07 by Ralf Baechle (ralf@linux-mips.org)
7  * Copyright (C) 2007 MIPS Technologies, Inc.
8  */
9 #include <linux/fs.h>
10 #include <linux/fcntl.h>
11 #include <linux/kernel.h>
12 #include <linux/linkage.h>
13 #include <linux/module.h>
14 #include <linux/sched.h>
15 #include <linux/syscalls.h>
16 #include <linux/mm.h>
17 
18 #include <asm/cacheflush.h>
19 #include <asm/highmem.h>
20 #include <asm/processor.h>
21 #include <asm/cpu.h>
22 #include <asm/cpu-features.h>
23 
24 /* Cache operations. */
25 void (*flush_cache_all)(void);
26 void (*__flush_cache_all)(void);
27 void (*flush_cache_mm)(struct mm_struct *mm);
28 void (*flush_cache_range)(struct vm_area_struct *vma, unsigned long start,
29 	unsigned long end);
30 void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page,
31 	unsigned long pfn);
32 void (*flush_icache_range)(unsigned long start, unsigned long end);
33 EXPORT_SYMBOL_GPL(flush_icache_range);
34 void (*local_flush_icache_range)(unsigned long start, unsigned long end);
35 EXPORT_SYMBOL_GPL(local_flush_icache_range);
36 
37 void (*__flush_cache_vmap)(void);
38 void (*__flush_cache_vunmap)(void);
39 
40 void (*__flush_kernel_vmap_range)(unsigned long vaddr, int size);
41 EXPORT_SYMBOL_GPL(__flush_kernel_vmap_range);
42 void (*__invalidate_kernel_vmap_range)(unsigned long vaddr, int size);
43 
44 /* MIPS specific cache operations */
45 void (*flush_cache_sigtramp)(unsigned long addr);
46 void (*local_flush_data_cache_page)(void * addr);
47 void (*flush_data_cache_page)(unsigned long addr);
48 void (*flush_icache_all)(void);
49 
50 EXPORT_SYMBOL_GPL(local_flush_data_cache_page);
51 EXPORT_SYMBOL(flush_data_cache_page);
52 EXPORT_SYMBOL(flush_icache_all);
53 
54 #if defined(CONFIG_DMA_NONCOHERENT) || defined(CONFIG_DMA_MAYBE_COHERENT)
55 
56 /* DMA cache operations. */
57 void (*_dma_cache_wback_inv)(unsigned long start, unsigned long size);
58 void (*_dma_cache_wback)(unsigned long start, unsigned long size);
59 void (*_dma_cache_inv)(unsigned long start, unsigned long size);
60 
61 EXPORT_SYMBOL(_dma_cache_wback_inv);
62 
63 #endif /* CONFIG_DMA_NONCOHERENT || CONFIG_DMA_MAYBE_COHERENT */
64 
65 /*
66  * We could optimize the case where the cache argument is not BCACHE but
67  * that seems very atypical use ...
68  */
69 SYSCALL_DEFINE3(cacheflush, unsigned long, addr, unsigned long, bytes,
70 	unsigned int, cache)
71 {
72 	if (bytes == 0)
73 		return 0;
74 	if (!access_ok(VERIFY_WRITE, (void __user *) addr, bytes))
75 		return -EFAULT;
76 
77 	flush_icache_range(addr, addr + bytes);
78 
79 	return 0;
80 }
81 
82 void __flush_dcache_page(struct page *page)
83 {
84 	struct address_space *mapping = page_mapping(page);
85 	unsigned long addr;
86 
87 	if (mapping && !mapping_mapped(mapping)) {
88 		SetPageDcacheDirty(page);
89 		return;
90 	}
91 
92 	/*
93 	 * We could delay the flush for the !page_mapping case too.  But that
94 	 * case is for exec env/arg pages and those are %99 certainly going to
95 	 * get faulted into the tlb (and thus flushed) anyways.
96 	 */
97 	if (PageHighMem(page))
98 		addr = (unsigned long)kmap_atomic(page);
99 	else
100 		addr = (unsigned long)page_address(page);
101 
102 	flush_data_cache_page(addr);
103 
104 	if (PageHighMem(page))
105 		__kunmap_atomic((void *)addr);
106 }
107 
108 EXPORT_SYMBOL(__flush_dcache_page);
109 
110 void __flush_anon_page(struct page *page, unsigned long vmaddr)
111 {
112 	unsigned long addr = (unsigned long) page_address(page);
113 
114 	if (pages_do_alias(addr, vmaddr)) {
115 		if (page_mapcount(page) && !Page_dcache_dirty(page)) {
116 			void *kaddr;
117 
118 			kaddr = kmap_coherent(page, vmaddr);
119 			flush_data_cache_page((unsigned long)kaddr);
120 			kunmap_coherent();
121 		} else
122 			flush_data_cache_page(addr);
123 	}
124 }
125 
126 EXPORT_SYMBOL(__flush_anon_page);
127 
128 void __update_cache(unsigned long address, pte_t pte)
129 {
130 	struct page *page;
131 	unsigned long pfn, addr;
132 	int exec = !pte_no_exec(pte) && !cpu_has_ic_fills_f_dc;
133 
134 	pfn = pte_pfn(pte);
135 	if (unlikely(!pfn_valid(pfn)))
136 		return;
137 	page = pfn_to_page(pfn);
138 	if (Page_dcache_dirty(page)) {
139 		if (PageHighMem(page))
140 			addr = (unsigned long)kmap_atomic(page);
141 		else
142 			addr = (unsigned long)page_address(page);
143 
144 		if (exec || pages_do_alias(addr, address & PAGE_MASK))
145 			flush_data_cache_page(addr);
146 
147 		if (PageHighMem(page))
148 			__kunmap_atomic((void *)addr);
149 
150 		ClearPageDcacheDirty(page);
151 	}
152 }
153 
154 unsigned long _page_cachable_default;
155 EXPORT_SYMBOL(_page_cachable_default);
156 
157 static inline void setup_protection_map(void)
158 {
159 	if (cpu_has_rixi) {
160 		protection_map[0]  = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
161 		protection_map[1]  = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
162 		protection_map[2]  = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
163 		protection_map[3]  = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
164 		protection_map[4]  = __pgprot(_page_cachable_default | _PAGE_PRESENT);
165 		protection_map[5]  = __pgprot(_page_cachable_default | _PAGE_PRESENT);
166 		protection_map[6]  = __pgprot(_page_cachable_default | _PAGE_PRESENT);
167 		protection_map[7]  = __pgprot(_page_cachable_default | _PAGE_PRESENT);
168 
169 		protection_map[8]  = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
170 		protection_map[9]  = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
171 		protection_map[10] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE | _PAGE_NO_READ);
172 		protection_map[11] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE);
173 		protection_map[12] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
174 		protection_map[13] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
175 		protection_map[14] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE);
176 		protection_map[15] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE);
177 
178 	} else {
179 		protection_map[0] = PAGE_NONE;
180 		protection_map[1] = PAGE_READONLY;
181 		protection_map[2] = PAGE_COPY;
182 		protection_map[3] = PAGE_COPY;
183 		protection_map[4] = PAGE_READONLY;
184 		protection_map[5] = PAGE_READONLY;
185 		protection_map[6] = PAGE_COPY;
186 		protection_map[7] = PAGE_COPY;
187 		protection_map[8] = PAGE_NONE;
188 		protection_map[9] = PAGE_READONLY;
189 		protection_map[10] = PAGE_SHARED;
190 		protection_map[11] = PAGE_SHARED;
191 		protection_map[12] = PAGE_READONLY;
192 		protection_map[13] = PAGE_READONLY;
193 		protection_map[14] = PAGE_SHARED;
194 		protection_map[15] = PAGE_SHARED;
195 	}
196 }
197 
198 void cpu_cache_init(void)
199 {
200 	if (cpu_has_3k_cache) {
201 		extern void __weak r3k_cache_init(void);
202 
203 		r3k_cache_init();
204 	}
205 	if (cpu_has_6k_cache) {
206 		extern void __weak r6k_cache_init(void);
207 
208 		r6k_cache_init();
209 	}
210 	if (cpu_has_4k_cache) {
211 		extern void __weak r4k_cache_init(void);
212 
213 		r4k_cache_init();
214 	}
215 	if (cpu_has_8k_cache) {
216 		extern void __weak r8k_cache_init(void);
217 
218 		r8k_cache_init();
219 	}
220 	if (cpu_has_tx39_cache) {
221 		extern void __weak tx39_cache_init(void);
222 
223 		tx39_cache_init();
224 	}
225 
226 	if (cpu_has_octeon_cache) {
227 		extern void __weak octeon_cache_init(void);
228 
229 		octeon_cache_init();
230 	}
231 
232 	setup_protection_map();
233 }
234 
235 int __weak __uncached_access(struct file *file, unsigned long addr)
236 {
237 	if (file->f_flags & O_DSYNC)
238 		return 1;
239 
240 	return addr >= __pa(high_memory);
241 }
242