1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 1994 - 2003, 06, 07 by Ralf Baechle (ralf@linux-mips.org) 7 * Copyright (C) 2007 MIPS Technologies, Inc. 8 */ 9 #include <linux/fs.h> 10 #include <linux/fcntl.h> 11 #include <linux/kernel.h> 12 #include <linux/linkage.h> 13 #include <linux/export.h> 14 #include <linux/sched.h> 15 #include <linux/syscalls.h> 16 #include <linux/mm.h> 17 #include <linux/highmem.h> 18 #include <linux/pagemap.h> 19 20 #include <asm/bcache.h> 21 #include <asm/cacheflush.h> 22 #include <asm/processor.h> 23 #include <asm/cpu.h> 24 #include <asm/cpu-features.h> 25 #include <asm/setup.h> 26 #include <asm/pgtable.h> 27 28 /* Cache operations. */ 29 void (*flush_cache_all)(void); 30 void (*__flush_cache_all)(void); 31 EXPORT_SYMBOL_GPL(__flush_cache_all); 32 void (*flush_cache_mm)(struct mm_struct *mm); 33 void (*flush_cache_range)(struct vm_area_struct *vma, unsigned long start, 34 unsigned long end); 35 void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page, 36 unsigned long pfn); 37 void (*flush_icache_range)(unsigned long start, unsigned long end); 38 EXPORT_SYMBOL_GPL(flush_icache_range); 39 void (*local_flush_icache_range)(unsigned long start, unsigned long end); 40 EXPORT_SYMBOL_GPL(local_flush_icache_range); 41 void (*__flush_icache_user_range)(unsigned long start, unsigned long end); 42 void (*__local_flush_icache_user_range)(unsigned long start, unsigned long end); 43 EXPORT_SYMBOL_GPL(__local_flush_icache_user_range); 44 45 void (*__flush_cache_vmap)(void); 46 void (*__flush_cache_vunmap)(void); 47 48 void (*__flush_kernel_vmap_range)(unsigned long vaddr, int size); 49 EXPORT_SYMBOL_GPL(__flush_kernel_vmap_range); 50 51 /* MIPS specific cache operations */ 52 void (*flush_data_cache_page)(unsigned long addr); 53 void (*flush_icache_all)(void); 54 55 EXPORT_SYMBOL(flush_data_cache_page); 56 EXPORT_SYMBOL(flush_icache_all); 57 58 /* 59 * Dummy cache handling routine 60 */ 61 62 void cache_noop(void) {} 63 64 #ifdef CONFIG_BOARD_SCACHE 65 66 static struct bcache_ops no_sc_ops = { 67 .bc_enable = (void *)cache_noop, 68 .bc_disable = (void *)cache_noop, 69 .bc_wback_inv = (void *)cache_noop, 70 .bc_inv = (void *)cache_noop 71 }; 72 73 struct bcache_ops *bcops = &no_sc_ops; 74 #endif 75 76 #ifdef CONFIG_DMA_NONCOHERENT 77 78 /* DMA cache operations. */ 79 void (*_dma_cache_wback_inv)(unsigned long start, unsigned long size); 80 void (*_dma_cache_wback)(unsigned long start, unsigned long size); 81 void (*_dma_cache_inv)(unsigned long start, unsigned long size); 82 83 #endif /* CONFIG_DMA_NONCOHERENT */ 84 85 /* 86 * We could optimize the case where the cache argument is not BCACHE but 87 * that seems very atypical use ... 88 */ 89 SYSCALL_DEFINE3(cacheflush, unsigned long, addr, unsigned long, bytes, 90 unsigned int, cache) 91 { 92 if (bytes == 0) 93 return 0; 94 if (!access_ok((void __user *) addr, bytes)) 95 return -EFAULT; 96 97 __flush_icache_user_range(addr, addr + bytes); 98 99 return 0; 100 } 101 102 void __flush_dcache_page(struct page *page) 103 { 104 struct address_space *mapping = page_mapping_file(page); 105 unsigned long addr; 106 107 if (mapping && !mapping_mapped(mapping)) { 108 SetPageDcacheDirty(page); 109 return; 110 } 111 112 /* 113 * We could delay the flush for the !page_mapping case too. But that 114 * case is for exec env/arg pages and those are %99 certainly going to 115 * get faulted into the tlb (and thus flushed) anyways. 116 */ 117 if (PageHighMem(page)) 118 addr = (unsigned long)kmap_atomic(page); 119 else 120 addr = (unsigned long)page_address(page); 121 122 flush_data_cache_page(addr); 123 124 if (PageHighMem(page)) 125 kunmap_atomic((void *)addr); 126 } 127 128 EXPORT_SYMBOL(__flush_dcache_page); 129 130 void __flush_anon_page(struct page *page, unsigned long vmaddr) 131 { 132 unsigned long addr = (unsigned long) page_address(page); 133 134 if (pages_do_alias(addr, vmaddr)) { 135 if (page_mapcount(page) && !Page_dcache_dirty(page)) { 136 void *kaddr; 137 138 kaddr = kmap_coherent(page, vmaddr); 139 flush_data_cache_page((unsigned long)kaddr); 140 kunmap_coherent(); 141 } else 142 flush_data_cache_page(addr); 143 } 144 } 145 146 EXPORT_SYMBOL(__flush_anon_page); 147 148 void __update_cache(unsigned long address, pte_t pte) 149 { 150 struct page *page; 151 unsigned long pfn, addr; 152 int exec = !pte_no_exec(pte) && !cpu_has_ic_fills_f_dc; 153 154 pfn = pte_pfn(pte); 155 if (unlikely(!pfn_valid(pfn))) 156 return; 157 page = pfn_to_page(pfn); 158 if (Page_dcache_dirty(page)) { 159 if (PageHighMem(page)) 160 addr = (unsigned long)kmap_atomic(page); 161 else 162 addr = (unsigned long)page_address(page); 163 164 if (exec || pages_do_alias(addr, address & PAGE_MASK)) 165 flush_data_cache_page(addr); 166 167 if (PageHighMem(page)) 168 kunmap_atomic((void *)addr); 169 170 ClearPageDcacheDirty(page); 171 } 172 } 173 174 unsigned long _page_cachable_default; 175 EXPORT_SYMBOL(_page_cachable_default); 176 177 #define PM(p) __pgprot(_page_cachable_default | (p)) 178 179 static pgprot_t protection_map[16] __ro_after_init; 180 DECLARE_VM_GET_PAGE_PROT 181 182 static inline void setup_protection_map(void) 183 { 184 protection_map[0] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ); 185 protection_map[1] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC); 186 protection_map[2] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ); 187 protection_map[3] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC); 188 protection_map[4] = PM(_PAGE_PRESENT); 189 protection_map[5] = PM(_PAGE_PRESENT); 190 protection_map[6] = PM(_PAGE_PRESENT); 191 protection_map[7] = PM(_PAGE_PRESENT); 192 193 protection_map[8] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ); 194 protection_map[9] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC); 195 protection_map[10] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE | 196 _PAGE_NO_READ); 197 protection_map[11] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE); 198 protection_map[12] = PM(_PAGE_PRESENT); 199 protection_map[13] = PM(_PAGE_PRESENT); 200 protection_map[14] = PM(_PAGE_PRESENT | _PAGE_WRITE); 201 protection_map[15] = PM(_PAGE_PRESENT | _PAGE_WRITE); 202 } 203 204 #undef PM 205 206 void cpu_cache_init(void) 207 { 208 if (cpu_has_3k_cache) { 209 extern void __weak r3k_cache_init(void); 210 211 r3k_cache_init(); 212 } 213 if (cpu_has_4k_cache) { 214 extern void __weak r4k_cache_init(void); 215 216 r4k_cache_init(); 217 } 218 219 if (cpu_has_octeon_cache) { 220 extern void __weak octeon_cache_init(void); 221 222 octeon_cache_init(); 223 } 224 225 setup_protection_map(); 226 } 227