1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 2009, Wind River Systems Inc 7 * Implemented by fredrik.markstrom@gmail.com and ivarholmqvist@gmail.com 8 */ 9 10 #include <linux/export.h> 11 #include <linux/sched.h> 12 #include <linux/mm.h> 13 #include <linux/fs.h> 14 15 #include <asm/cacheflush.h> 16 #include <asm/cpuinfo.h> 17 18 static void __flush_dcache(unsigned long start, unsigned long end) 19 { 20 unsigned long addr; 21 22 start &= ~(cpuinfo.dcache_line_size - 1); 23 end += (cpuinfo.dcache_line_size - 1); 24 end &= ~(cpuinfo.dcache_line_size - 1); 25 26 for (addr = start; addr < end; addr += cpuinfo.dcache_line_size) { 27 __asm__ __volatile__ (" flushda 0(%0)\n" 28 : /* Outputs */ 29 : /* Inputs */ "r"(addr) 30 /* : No clobber */); 31 } 32 } 33 34 static void __flush_dcache_all(unsigned long start, unsigned long end) 35 { 36 unsigned long addr; 37 38 start &= ~(cpuinfo.dcache_line_size - 1); 39 end += (cpuinfo.dcache_line_size - 1); 40 end &= ~(cpuinfo.dcache_line_size - 1); 41 42 if (end > start + cpuinfo.dcache_size) 43 end = start + cpuinfo.dcache_size; 44 45 for (addr = start; addr < end; addr += cpuinfo.dcache_line_size) { 46 __asm__ __volatile__ (" flushd 0(%0)\n" 47 : /* Outputs */ 48 : /* Inputs */ "r"(addr) 49 /* : No clobber */); 50 } 51 } 52 53 static void __invalidate_dcache(unsigned long start, unsigned long end) 54 { 55 unsigned long addr; 56 57 start &= ~(cpuinfo.dcache_line_size - 1); 58 end += (cpuinfo.dcache_line_size - 1); 59 end &= ~(cpuinfo.dcache_line_size - 1); 60 61 for (addr = start; addr < end; addr += cpuinfo.dcache_line_size) { 62 __asm__ __volatile__ (" initda 0(%0)\n" 63 : /* Outputs */ 64 : /* Inputs */ "r"(addr) 65 /* : No clobber */); 66 } 67 } 68 69 static void __flush_icache(unsigned long start, unsigned long end) 70 { 71 unsigned long addr; 72 73 start &= ~(cpuinfo.icache_line_size - 1); 74 end += (cpuinfo.icache_line_size - 1); 75 end &= ~(cpuinfo.icache_line_size - 1); 76 77 if (end > start + cpuinfo.icache_size) 78 end = start + cpuinfo.icache_size; 79 80 for (addr = start; addr < end; addr += cpuinfo.icache_line_size) { 81 __asm__ __volatile__ (" flushi %0\n" 82 : /* Outputs */ 83 : /* Inputs */ "r"(addr) 84 /* : No clobber */); 85 } 86 __asm__ __volatile(" flushp\n"); 87 } 88 89 static void flush_aliases(struct address_space *mapping, struct page *page) 90 { 91 struct mm_struct *mm = current->active_mm; 92 struct vm_area_struct *mpnt; 93 pgoff_t pgoff; 94 95 pgoff = page->index; 96 97 flush_dcache_mmap_lock(mapping); 98 vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) { 99 unsigned long offset; 100 101 if (mpnt->vm_mm != mm) 102 continue; 103 if (!(mpnt->vm_flags & VM_MAYSHARE)) 104 continue; 105 106 offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT; 107 flush_cache_page(mpnt, mpnt->vm_start + offset, 108 page_to_pfn(page)); 109 } 110 flush_dcache_mmap_unlock(mapping); 111 } 112 113 void flush_cache_all(void) 114 { 115 __flush_dcache_all(0, cpuinfo.dcache_size); 116 __flush_icache(0, cpuinfo.icache_size); 117 } 118 119 void flush_cache_mm(struct mm_struct *mm) 120 { 121 flush_cache_all(); 122 } 123 124 void flush_cache_dup_mm(struct mm_struct *mm) 125 { 126 flush_cache_all(); 127 } 128 129 void flush_icache_range(unsigned long start, unsigned long end) 130 { 131 __flush_dcache(start, end); 132 __flush_icache(start, end); 133 } 134 135 void flush_dcache_range(unsigned long start, unsigned long end) 136 { 137 __flush_dcache(start, end); 138 __flush_icache(start, end); 139 } 140 EXPORT_SYMBOL(flush_dcache_range); 141 142 void invalidate_dcache_range(unsigned long start, unsigned long end) 143 { 144 __invalidate_dcache(start, end); 145 } 146 EXPORT_SYMBOL(invalidate_dcache_range); 147 148 void flush_cache_range(struct vm_area_struct *vma, unsigned long start, 149 unsigned long end) 150 { 151 __flush_dcache(start, end); 152 if (vma == NULL || (vma->vm_flags & VM_EXEC)) 153 __flush_icache(start, end); 154 } 155 156 void flush_icache_page(struct vm_area_struct *vma, struct page *page) 157 { 158 unsigned long start = (unsigned long) page_address(page); 159 unsigned long end = start + PAGE_SIZE; 160 161 __flush_dcache(start, end); 162 __flush_icache(start, end); 163 } 164 165 void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, 166 unsigned long pfn) 167 { 168 unsigned long start = vmaddr; 169 unsigned long end = start + PAGE_SIZE; 170 171 __flush_dcache(start, end); 172 if (vma->vm_flags & VM_EXEC) 173 __flush_icache(start, end); 174 } 175 176 void __flush_dcache_page(struct address_space *mapping, struct page *page) 177 { 178 /* 179 * Writeback any data associated with the kernel mapping of this 180 * page. This ensures that data in the physical page is mutually 181 * coherent with the kernels mapping. 182 */ 183 unsigned long start = (unsigned long)page_address(page); 184 185 __flush_dcache_all(start, start + PAGE_SIZE); 186 } 187 188 void flush_dcache_page(struct page *page) 189 { 190 struct address_space *mapping; 191 192 /* 193 * The zero page is never written to, so never has any dirty 194 * cache lines, and therefore never needs to be flushed. 195 */ 196 if (page == ZERO_PAGE(0)) 197 return; 198 199 mapping = page_mapping(page); 200 201 /* Flush this page if there are aliases. */ 202 if (mapping && !mapping_mapped(mapping)) { 203 clear_bit(PG_dcache_clean, &page->flags); 204 } else { 205 __flush_dcache_page(mapping, page); 206 if (mapping) { 207 unsigned long start = (unsigned long)page_address(page); 208 flush_aliases(mapping, page); 209 flush_icache_range(start, start + PAGE_SIZE); 210 } 211 set_bit(PG_dcache_clean, &page->flags); 212 } 213 } 214 EXPORT_SYMBOL(flush_dcache_page); 215 216 void update_mmu_cache(struct vm_area_struct *vma, 217 unsigned long address, pte_t *pte) 218 { 219 unsigned long pfn = pte_pfn(*pte); 220 struct page *page; 221 struct address_space *mapping; 222 223 if (!pfn_valid(pfn)) 224 return; 225 226 /* 227 * The zero page is never written to, so never has any dirty 228 * cache lines, and therefore never needs to be flushed. 229 */ 230 page = pfn_to_page(pfn); 231 if (page == ZERO_PAGE(0)) 232 return; 233 234 mapping = page_mapping(page); 235 if (!test_and_set_bit(PG_dcache_clean, &page->flags)) 236 __flush_dcache_page(mapping, page); 237 238 if(mapping) 239 { 240 flush_aliases(mapping, page); 241 if (vma->vm_flags & VM_EXEC) 242 flush_icache_page(vma, page); 243 } 244 } 245 246 void copy_user_page(void *vto, void *vfrom, unsigned long vaddr, 247 struct page *to) 248 { 249 __flush_dcache(vaddr, vaddr + PAGE_SIZE); 250 __flush_icache(vaddr, vaddr + PAGE_SIZE); 251 copy_page(vto, vfrom); 252 __flush_dcache((unsigned long)vto, (unsigned long)vto + PAGE_SIZE); 253 __flush_icache((unsigned long)vto, (unsigned long)vto + PAGE_SIZE); 254 } 255 256 void clear_user_page(void *addr, unsigned long vaddr, struct page *page) 257 { 258 __flush_dcache(vaddr, vaddr + PAGE_SIZE); 259 __flush_icache(vaddr, vaddr + PAGE_SIZE); 260 clear_page(addr); 261 __flush_dcache((unsigned long)addr, (unsigned long)addr + PAGE_SIZE); 262 __flush_icache((unsigned long)addr, (unsigned long)addr + PAGE_SIZE); 263 } 264 265 void copy_from_user_page(struct vm_area_struct *vma, struct page *page, 266 unsigned long user_vaddr, 267 void *dst, void *src, int len) 268 { 269 flush_cache_page(vma, user_vaddr, page_to_pfn(page)); 270 memcpy(dst, src, len); 271 __flush_dcache_all((unsigned long)src, (unsigned long)src + len); 272 if (vma->vm_flags & VM_EXEC) 273 __flush_icache((unsigned long)src, (unsigned long)src + len); 274 } 275 276 void copy_to_user_page(struct vm_area_struct *vma, struct page *page, 277 unsigned long user_vaddr, 278 void *dst, void *src, int len) 279 { 280 flush_cache_page(vma, user_vaddr, page_to_pfn(page)); 281 memcpy(dst, src, len); 282 __flush_dcache_all((unsigned long)dst, (unsigned long)dst + len); 283 if (vma->vm_flags & VM_EXEC) 284 __flush_icache((unsigned long)dst, (unsigned long)dst + len); 285 } 286