xref: /openbmc/linux/arch/mips/mm/cache.c (revision d2999e1b)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1994 - 2003, 06, 07 by Ralf Baechle (ralf@linux-mips.org)
7  * Copyright (C) 2007 MIPS Technologies, Inc.
8  */
9 #include <linux/fs.h>
10 #include <linux/fcntl.h>
11 #include <linux/kernel.h>
12 #include <linux/linkage.h>
13 #include <linux/module.h>
14 #include <linux/sched.h>
15 #include <linux/syscalls.h>
16 #include <linux/mm.h>
17 
18 #include <asm/cacheflush.h>
19 #include <asm/processor.h>
20 #include <asm/cpu.h>
21 #include <asm/cpu-features.h>
22 
23 /* Cache operations. */
24 void (*flush_cache_all)(void);
25 void (*__flush_cache_all)(void);
26 void (*flush_cache_mm)(struct mm_struct *mm);
27 void (*flush_cache_range)(struct vm_area_struct *vma, unsigned long start,
28 	unsigned long end);
29 void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page,
30 	unsigned long pfn);
31 void (*flush_icache_range)(unsigned long start, unsigned long end);
32 EXPORT_SYMBOL_GPL(flush_icache_range);
33 void (*local_flush_icache_range)(unsigned long start, unsigned long end);
34 EXPORT_SYMBOL_GPL(local_flush_icache_range);
35 
36 void (*__flush_cache_vmap)(void);
37 void (*__flush_cache_vunmap)(void);
38 
39 void (*__flush_kernel_vmap_range)(unsigned long vaddr, int size);
40 EXPORT_SYMBOL_GPL(__flush_kernel_vmap_range);
41 void (*__invalidate_kernel_vmap_range)(unsigned long vaddr, int size);
42 
43 /* MIPS specific cache operations */
44 void (*flush_cache_sigtramp)(unsigned long addr);
45 void (*local_flush_data_cache_page)(void * addr);
46 void (*flush_data_cache_page)(unsigned long addr);
47 void (*flush_icache_all)(void);
48 
49 EXPORT_SYMBOL_GPL(local_flush_data_cache_page);
50 EXPORT_SYMBOL(flush_data_cache_page);
51 EXPORT_SYMBOL(flush_icache_all);
52 
53 #if defined(CONFIG_DMA_NONCOHERENT) || defined(CONFIG_DMA_MAYBE_COHERENT)
54 
55 /* DMA cache operations. */
56 void (*_dma_cache_wback_inv)(unsigned long start, unsigned long size);
57 void (*_dma_cache_wback)(unsigned long start, unsigned long size);
58 void (*_dma_cache_inv)(unsigned long start, unsigned long size);
59 
60 EXPORT_SYMBOL(_dma_cache_wback_inv);
61 
62 #endif /* CONFIG_DMA_NONCOHERENT || CONFIG_DMA_MAYBE_COHERENT */
63 
64 /*
65  * We could optimize the case where the cache argument is not BCACHE but
66  * that seems very atypical use ...
67  */
68 SYSCALL_DEFINE3(cacheflush, unsigned long, addr, unsigned long, bytes,
69 	unsigned int, cache)
70 {
71 	if (bytes == 0)
72 		return 0;
73 	if (!access_ok(VERIFY_WRITE, (void __user *) addr, bytes))
74 		return -EFAULT;
75 
76 	flush_icache_range(addr, addr + bytes);
77 
78 	return 0;
79 }
80 
81 void __flush_dcache_page(struct page *page)
82 {
83 	struct address_space *mapping = page_mapping(page);
84 	unsigned long addr;
85 
86 	if (PageHighMem(page))
87 		return;
88 	if (mapping && !mapping_mapped(mapping)) {
89 		SetPageDcacheDirty(page);
90 		return;
91 	}
92 
93 	/*
94 	 * We could delay the flush for the !page_mapping case too.  But that
95 	 * case is for exec env/arg pages and those are %99 certainly going to
96 	 * get faulted into the tlb (and thus flushed) anyways.
97 	 */
98 	addr = (unsigned long) page_address(page);
99 	flush_data_cache_page(addr);
100 }
101 
102 EXPORT_SYMBOL(__flush_dcache_page);
103 
104 void __flush_anon_page(struct page *page, unsigned long vmaddr)
105 {
106 	unsigned long addr = (unsigned long) page_address(page);
107 
108 	if (pages_do_alias(addr, vmaddr)) {
109 		if (page_mapped(page) && !Page_dcache_dirty(page)) {
110 			void *kaddr;
111 
112 			kaddr = kmap_coherent(page, vmaddr);
113 			flush_data_cache_page((unsigned long)kaddr);
114 			kunmap_coherent();
115 		} else
116 			flush_data_cache_page(addr);
117 	}
118 }
119 
120 EXPORT_SYMBOL(__flush_anon_page);
121 
122 void __update_cache(struct vm_area_struct *vma, unsigned long address,
123 	pte_t pte)
124 {
125 	struct page *page;
126 	unsigned long pfn, addr;
127 	int exec = (vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc;
128 
129 	pfn = pte_pfn(pte);
130 	if (unlikely(!pfn_valid(pfn)))
131 		return;
132 	page = pfn_to_page(pfn);
133 	if (page_mapping(page) && Page_dcache_dirty(page)) {
134 		addr = (unsigned long) page_address(page);
135 		if (exec || pages_do_alias(addr, address & PAGE_MASK))
136 			flush_data_cache_page(addr);
137 		ClearPageDcacheDirty(page);
138 	}
139 }
140 
141 unsigned long _page_cachable_default;
142 EXPORT_SYMBOL(_page_cachable_default);
143 
144 static inline void setup_protection_map(void)
145 {
146 	if (cpu_has_rixi) {
147 		protection_map[0]  = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
148 		protection_map[1]  = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
149 		protection_map[2]  = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
150 		protection_map[3]  = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
151 		protection_map[4]  = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_READ);
152 		protection_map[5]  = __pgprot(_page_cachable_default | _PAGE_PRESENT);
153 		protection_map[6]  = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_READ);
154 		protection_map[7]  = __pgprot(_page_cachable_default | _PAGE_PRESENT);
155 
156 		protection_map[8]  = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
157 		protection_map[9]  = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
158 		protection_map[10] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE | _PAGE_NO_READ);
159 		protection_map[11] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE);
160 		protection_map[12] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_READ);
161 		protection_map[13] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
162 		protection_map[14] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE  | _PAGE_NO_READ);
163 		protection_map[15] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE);
164 
165 	} else {
166 		protection_map[0] = PAGE_NONE;
167 		protection_map[1] = PAGE_READONLY;
168 		protection_map[2] = PAGE_COPY;
169 		protection_map[3] = PAGE_COPY;
170 		protection_map[4] = PAGE_READONLY;
171 		protection_map[5] = PAGE_READONLY;
172 		protection_map[6] = PAGE_COPY;
173 		protection_map[7] = PAGE_COPY;
174 		protection_map[8] = PAGE_NONE;
175 		protection_map[9] = PAGE_READONLY;
176 		protection_map[10] = PAGE_SHARED;
177 		protection_map[11] = PAGE_SHARED;
178 		protection_map[12] = PAGE_READONLY;
179 		protection_map[13] = PAGE_READONLY;
180 		protection_map[14] = PAGE_SHARED;
181 		protection_map[15] = PAGE_SHARED;
182 	}
183 }
184 
185 void cpu_cache_init(void)
186 {
187 	if (cpu_has_3k_cache) {
188 		extern void __weak r3k_cache_init(void);
189 
190 		r3k_cache_init();
191 	}
192 	if (cpu_has_6k_cache) {
193 		extern void __weak r6k_cache_init(void);
194 
195 		r6k_cache_init();
196 	}
197 	if (cpu_has_4k_cache) {
198 		extern void __weak r4k_cache_init(void);
199 
200 		r4k_cache_init();
201 	}
202 	if (cpu_has_8k_cache) {
203 		extern void __weak r8k_cache_init(void);
204 
205 		r8k_cache_init();
206 	}
207 	if (cpu_has_tx39_cache) {
208 		extern void __weak tx39_cache_init(void);
209 
210 		tx39_cache_init();
211 	}
212 
213 	if (cpu_has_octeon_cache) {
214 		extern void __weak octeon_cache_init(void);
215 
216 		octeon_cache_init();
217 	}
218 
219 	setup_protection_map();
220 }
221 
222 int __weak __uncached_access(struct file *file, unsigned long addr)
223 {
224 	if (file->f_flags & O_DSYNC)
225 		return 1;
226 
227 	return addr >= __pa(high_memory);
228 }
229