1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 1994 - 2003, 06, 07 by Ralf Baechle (ralf@linux-mips.org) 7 * Copyright (C) 2007 MIPS Technologies, Inc. 8 */ 9 #include <linux/fs.h> 10 #include <linux/fcntl.h> 11 #include <linux/kernel.h> 12 #include <linux/linkage.h> 13 #include <linux/export.h> 14 #include <linux/sched.h> 15 #include <linux/syscalls.h> 16 #include <linux/mm.h> 17 18 #include <asm/cacheflush.h> 19 #include <asm/highmem.h> 20 #include <asm/processor.h> 21 #include <asm/cpu.h> 22 #include <asm/cpu-features.h> 23 #include <asm/setup.h> 24 25 /* Cache operations. */ 26 void (*flush_cache_all)(void); 27 void (*__flush_cache_all)(void); 28 EXPORT_SYMBOL_GPL(__flush_cache_all); 29 void (*flush_cache_mm)(struct mm_struct *mm); 30 void (*flush_cache_range)(struct vm_area_struct *vma, unsigned long start, 31 unsigned long end); 32 void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page, 33 unsigned long pfn); 34 void (*flush_icache_range)(unsigned long start, unsigned long end); 35 EXPORT_SYMBOL_GPL(flush_icache_range); 36 void (*local_flush_icache_range)(unsigned long start, unsigned long end); 37 EXPORT_SYMBOL_GPL(local_flush_icache_range); 38 void (*__flush_icache_user_range)(unsigned long start, unsigned long end); 39 EXPORT_SYMBOL_GPL(__flush_icache_user_range); 40 void (*__local_flush_icache_user_range)(unsigned long start, unsigned long end); 41 EXPORT_SYMBOL_GPL(__local_flush_icache_user_range); 42 43 void (*__flush_cache_vmap)(void); 44 void (*__flush_cache_vunmap)(void); 45 46 void (*__flush_kernel_vmap_range)(unsigned long vaddr, int size); 47 EXPORT_SYMBOL_GPL(__flush_kernel_vmap_range); 48 49 /* MIPS specific cache operations */ 50 void (*local_flush_data_cache_page)(void * addr); 51 void (*flush_data_cache_page)(unsigned long addr); 52 void (*flush_icache_all)(void); 53 54 EXPORT_SYMBOL_GPL(local_flush_data_cache_page); 55 EXPORT_SYMBOL(flush_data_cache_page); 56 EXPORT_SYMBOL(flush_icache_all); 57 58 #ifdef CONFIG_DMA_NONCOHERENT 59 60 /* DMA cache operations. */ 61 void (*_dma_cache_wback_inv)(unsigned long start, unsigned long size); 62 void (*_dma_cache_wback)(unsigned long start, unsigned long size); 63 void (*_dma_cache_inv)(unsigned long start, unsigned long size); 64 65 EXPORT_SYMBOL(_dma_cache_wback_inv); 66 67 #endif /* CONFIG_DMA_NONCOHERENT */ 68 69 /* 70 * We could optimize the case where the cache argument is not BCACHE but 71 * that seems very atypical use ... 72 */ 73 SYSCALL_DEFINE3(cacheflush, unsigned long, addr, unsigned long, bytes, 74 unsigned int, cache) 75 { 76 if (bytes == 0) 77 return 0; 78 if (!access_ok((void __user *) addr, bytes)) 79 return -EFAULT; 80 81 __flush_icache_user_range(addr, addr + bytes); 82 83 return 0; 84 } 85 86 void __flush_dcache_page(struct page *page) 87 { 88 struct address_space *mapping = page_mapping_file(page); 89 unsigned long addr; 90 91 if (mapping && !mapping_mapped(mapping)) { 92 SetPageDcacheDirty(page); 93 return; 94 } 95 96 /* 97 * We could delay the flush for the !page_mapping case too. But that 98 * case is for exec env/arg pages and those are %99 certainly going to 99 * get faulted into the tlb (and thus flushed) anyways. 100 */ 101 if (PageHighMem(page)) 102 addr = (unsigned long)kmap_atomic(page); 103 else 104 addr = (unsigned long)page_address(page); 105 106 flush_data_cache_page(addr); 107 108 if (PageHighMem(page)) 109 __kunmap_atomic((void *)addr); 110 } 111 112 EXPORT_SYMBOL(__flush_dcache_page); 113 114 void __flush_anon_page(struct page *page, unsigned long vmaddr) 115 { 116 unsigned long addr = (unsigned long) page_address(page); 117 118 if (pages_do_alias(addr, vmaddr)) { 119 if (page_mapcount(page) && !Page_dcache_dirty(page)) { 120 void *kaddr; 121 122 kaddr = kmap_coherent(page, vmaddr); 123 flush_data_cache_page((unsigned long)kaddr); 124 kunmap_coherent(); 125 } else 126 flush_data_cache_page(addr); 127 } 128 } 129 130 EXPORT_SYMBOL(__flush_anon_page); 131 132 void __update_cache(unsigned long address, pte_t pte) 133 { 134 struct page *page; 135 unsigned long pfn, addr; 136 int exec = !pte_no_exec(pte) && !cpu_has_ic_fills_f_dc; 137 138 pfn = pte_pfn(pte); 139 if (unlikely(!pfn_valid(pfn))) 140 return; 141 page = pfn_to_page(pfn); 142 if (Page_dcache_dirty(page)) { 143 if (PageHighMem(page)) 144 addr = (unsigned long)kmap_atomic(page); 145 else 146 addr = (unsigned long)page_address(page); 147 148 if (exec || pages_do_alias(addr, address & PAGE_MASK)) 149 flush_data_cache_page(addr); 150 151 if (PageHighMem(page)) 152 __kunmap_atomic((void *)addr); 153 154 ClearPageDcacheDirty(page); 155 } 156 } 157 158 unsigned long _page_cachable_default; 159 EXPORT_SYMBOL(_page_cachable_default); 160 161 static inline void setup_protection_map(void) 162 { 163 if (cpu_has_rixi) { 164 protection_map[0] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ); 165 protection_map[1] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC); 166 protection_map[2] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ); 167 protection_map[3] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC); 168 protection_map[4] = __pgprot(_page_cachable_default | _PAGE_PRESENT); 169 protection_map[5] = __pgprot(_page_cachable_default | _PAGE_PRESENT); 170 protection_map[6] = __pgprot(_page_cachable_default | _PAGE_PRESENT); 171 protection_map[7] = __pgprot(_page_cachable_default | _PAGE_PRESENT); 172 173 protection_map[8] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ); 174 protection_map[9] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC); 175 protection_map[10] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE | _PAGE_NO_READ); 176 protection_map[11] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE); 177 protection_map[12] = __pgprot(_page_cachable_default | _PAGE_PRESENT); 178 protection_map[13] = __pgprot(_page_cachable_default | _PAGE_PRESENT); 179 protection_map[14] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE); 180 protection_map[15] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE); 181 182 } else { 183 protection_map[0] = PAGE_NONE; 184 protection_map[1] = PAGE_READONLY; 185 protection_map[2] = PAGE_COPY; 186 protection_map[3] = PAGE_COPY; 187 protection_map[4] = PAGE_READONLY; 188 protection_map[5] = PAGE_READONLY; 189 protection_map[6] = PAGE_COPY; 190 protection_map[7] = PAGE_COPY; 191 protection_map[8] = PAGE_NONE; 192 protection_map[9] = PAGE_READONLY; 193 protection_map[10] = PAGE_SHARED; 194 protection_map[11] = PAGE_SHARED; 195 protection_map[12] = PAGE_READONLY; 196 protection_map[13] = PAGE_READONLY; 197 protection_map[14] = PAGE_SHARED; 198 protection_map[15] = PAGE_SHARED; 199 } 200 } 201 202 void cpu_cache_init(void) 203 { 204 if (cpu_has_3k_cache) { 205 extern void __weak r3k_cache_init(void); 206 207 r3k_cache_init(); 208 } 209 if (cpu_has_6k_cache) { 210 extern void __weak r6k_cache_init(void); 211 212 r6k_cache_init(); 213 } 214 if (cpu_has_4k_cache) { 215 extern void __weak r4k_cache_init(void); 216 217 r4k_cache_init(); 218 } 219 if (cpu_has_8k_cache) { 220 extern void __weak r8k_cache_init(void); 221 222 r8k_cache_init(); 223 } 224 if (cpu_has_tx39_cache) { 225 extern void __weak tx39_cache_init(void); 226 227 tx39_cache_init(); 228 } 229 230 if (cpu_has_octeon_cache) { 231 extern void __weak octeon_cache_init(void); 232 233 octeon_cache_init(); 234 } 235 236 setup_protection_map(); 237 } 238 239 int __weak __uncached_access(struct file *file, unsigned long addr) 240 { 241 if (file->f_flags & O_DSYNC) 242 return 1; 243 244 return addr >= __pa(high_memory); 245 } 246