1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 1994 - 2003, 06, 07 by Ralf Baechle (ralf@linux-mips.org) 7 * Copyright (C) 2007 MIPS Technologies, Inc. 8 */ 9 #include <linux/fs.h> 10 #include <linux/fcntl.h> 11 #include <linux/kernel.h> 12 #include <linux/linkage.h> 13 #include <linux/export.h> 14 #include <linux/sched.h> 15 #include <linux/syscalls.h> 16 #include <linux/mm.h> 17 #include <linux/highmem.h> 18 19 #include <asm/cacheflush.h> 20 #include <asm/processor.h> 21 #include <asm/cpu.h> 22 #include <asm/cpu-features.h> 23 #include <asm/setup.h> 24 #include <asm/pgtable.h> 25 26 /* Cache operations. */ 27 void (*flush_cache_all)(void); 28 void (*__flush_cache_all)(void); 29 EXPORT_SYMBOL_GPL(__flush_cache_all); 30 void (*flush_cache_mm)(struct mm_struct *mm); 31 void (*flush_cache_range)(struct vm_area_struct *vma, unsigned long start, 32 unsigned long end); 33 void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page, 34 unsigned long pfn); 35 void (*flush_icache_range)(unsigned long start, unsigned long end); 36 EXPORT_SYMBOL_GPL(flush_icache_range); 37 void (*local_flush_icache_range)(unsigned long start, unsigned long end); 38 EXPORT_SYMBOL_GPL(local_flush_icache_range); 39 void (*__flush_icache_user_range)(unsigned long start, unsigned long end); 40 void (*__local_flush_icache_user_range)(unsigned long start, unsigned long end); 41 EXPORT_SYMBOL_GPL(__local_flush_icache_user_range); 42 43 void (*__flush_cache_vmap)(void); 44 void (*__flush_cache_vunmap)(void); 45 46 void (*__flush_kernel_vmap_range)(unsigned long vaddr, int size); 47 EXPORT_SYMBOL_GPL(__flush_kernel_vmap_range); 48 49 /* MIPS specific cache operations */ 50 void (*local_flush_data_cache_page)(void * addr); 51 void (*flush_data_cache_page)(unsigned long addr); 52 void (*flush_icache_all)(void); 53 54 EXPORT_SYMBOL_GPL(local_flush_data_cache_page); 55 EXPORT_SYMBOL(flush_data_cache_page); 56 EXPORT_SYMBOL(flush_icache_all); 57 58 #ifdef CONFIG_DMA_NONCOHERENT 59 60 /* DMA cache operations. */ 61 void (*_dma_cache_wback_inv)(unsigned long start, unsigned long size); 62 void (*_dma_cache_wback)(unsigned long start, unsigned long size); 63 void (*_dma_cache_inv)(unsigned long start, unsigned long size); 64 65 #endif /* CONFIG_DMA_NONCOHERENT */ 66 67 /* 68 * We could optimize the case where the cache argument is not BCACHE but 69 * that seems very atypical use ... 70 */ 71 SYSCALL_DEFINE3(cacheflush, unsigned long, addr, unsigned long, bytes, 72 unsigned int, cache) 73 { 74 if (bytes == 0) 75 return 0; 76 if (!access_ok((void __user *) addr, bytes)) 77 return -EFAULT; 78 79 __flush_icache_user_range(addr, addr + bytes); 80 81 return 0; 82 } 83 84 void __flush_dcache_page(struct page *page) 85 { 86 struct address_space *mapping = page_mapping_file(page); 87 unsigned long addr; 88 89 if (mapping && !mapping_mapped(mapping)) { 90 SetPageDcacheDirty(page); 91 return; 92 } 93 94 /* 95 * We could delay the flush for the !page_mapping case too. But that 96 * case is for exec env/arg pages and those are %99 certainly going to 97 * get faulted into the tlb (and thus flushed) anyways. 98 */ 99 if (PageHighMem(page)) 100 addr = (unsigned long)kmap_atomic(page); 101 else 102 addr = (unsigned long)page_address(page); 103 104 flush_data_cache_page(addr); 105 106 if (PageHighMem(page)) 107 kunmap_atomic((void *)addr); 108 } 109 110 EXPORT_SYMBOL(__flush_dcache_page); 111 112 void __flush_anon_page(struct page *page, unsigned long vmaddr) 113 { 114 unsigned long addr = (unsigned long) page_address(page); 115 116 if (pages_do_alias(addr, vmaddr)) { 117 if (page_mapcount(page) && !Page_dcache_dirty(page)) { 118 void *kaddr; 119 120 kaddr = kmap_coherent(page, vmaddr); 121 flush_data_cache_page((unsigned long)kaddr); 122 kunmap_coherent(); 123 } else 124 flush_data_cache_page(addr); 125 } 126 } 127 128 EXPORT_SYMBOL(__flush_anon_page); 129 130 void __update_cache(unsigned long address, pte_t pte) 131 { 132 struct page *page; 133 unsigned long pfn, addr; 134 int exec = !pte_no_exec(pte) && !cpu_has_ic_fills_f_dc; 135 136 pfn = pte_pfn(pte); 137 if (unlikely(!pfn_valid(pfn))) 138 return; 139 page = pfn_to_page(pfn); 140 if (Page_dcache_dirty(page)) { 141 if (PageHighMem(page)) 142 addr = (unsigned long)kmap_atomic(page); 143 else 144 addr = (unsigned long)page_address(page); 145 146 if (exec || pages_do_alias(addr, address & PAGE_MASK)) 147 flush_data_cache_page(addr); 148 149 if (PageHighMem(page)) 150 kunmap_atomic((void *)addr); 151 152 ClearPageDcacheDirty(page); 153 } 154 } 155 156 unsigned long _page_cachable_default; 157 EXPORT_SYMBOL(_page_cachable_default); 158 159 #define PM(p) __pgprot(_page_cachable_default | (p)) 160 #define PVA(p) PM(_PAGE_VALID | _PAGE_ACCESSED | (p)) 161 162 static inline void setup_protection_map(void) 163 { 164 protection_map[0] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ); 165 protection_map[1] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC); 166 protection_map[2] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ); 167 protection_map[3] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC); 168 protection_map[4] = PVA(_PAGE_PRESENT); 169 protection_map[5] = PVA(_PAGE_PRESENT); 170 protection_map[6] = PVA(_PAGE_PRESENT); 171 protection_map[7] = PVA(_PAGE_PRESENT); 172 173 protection_map[8] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ); 174 protection_map[9] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC); 175 protection_map[10] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE | 176 _PAGE_NO_READ); 177 protection_map[11] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE); 178 protection_map[12] = PVA(_PAGE_PRESENT); 179 protection_map[13] = PVA(_PAGE_PRESENT); 180 protection_map[14] = PVA(_PAGE_PRESENT); 181 protection_map[15] = PVA(_PAGE_PRESENT); 182 } 183 184 #undef _PVA 185 #undef PM 186 187 void cpu_cache_init(void) 188 { 189 if (cpu_has_3k_cache) { 190 extern void __weak r3k_cache_init(void); 191 192 r3k_cache_init(); 193 } 194 if (cpu_has_4k_cache) { 195 extern void __weak r4k_cache_init(void); 196 197 r4k_cache_init(); 198 } 199 if (cpu_has_tx39_cache) { 200 extern void __weak tx39_cache_init(void); 201 202 tx39_cache_init(); 203 } 204 205 if (cpu_has_octeon_cache) { 206 extern void __weak octeon_cache_init(void); 207 208 octeon_cache_init(); 209 } 210 211 setup_protection_map(); 212 } 213