1 /* 2 * linux/arch/arm/mm/flush.c 3 * 4 * Copyright (C) 1995-2002 Russell King 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 #include <linux/module.h> 11 #include <linux/mm.h> 12 #include <linux/pagemap.h> 13 #include <linux/highmem.h> 14 15 #include <asm/cacheflush.h> 16 #include <asm/cachetype.h> 17 #include <asm/highmem.h> 18 #include <asm/smp_plat.h> 19 #include <asm/system.h> 20 #include <asm/tlbflush.h> 21 #include <asm/smp_plat.h> 22 23 #include "mm.h" 24 25 #ifdef CONFIG_CPU_CACHE_VIPT 26 27 #define ALIAS_FLUSH_START 0xffff4000 28 29 static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr) 30 { 31 unsigned long to = ALIAS_FLUSH_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT); 32 const int zero = 0; 33 34 set_pte_ext(TOP_PTE(to), pfn_pte(pfn, PAGE_KERNEL), 0); 35 flush_tlb_kernel_page(to); 36 37 asm( "mcrr p15, 0, %1, %0, c14\n" 38 " mcr p15, 0, %2, c7, c10, 4" 39 : 40 : "r" (to), "r" (to + PAGE_SIZE - L1_CACHE_BYTES), "r" (zero) 41 : "cc"); 42 } 43 44 static void flush_icache_alias(unsigned long pfn, unsigned long vaddr, unsigned long len) 45 { 46 unsigned long colour = CACHE_COLOUR(vaddr); 47 unsigned long offset = vaddr & (PAGE_SIZE - 1); 48 unsigned long to; 49 50 set_pte_ext(TOP_PTE(ALIAS_FLUSH_START) + colour, pfn_pte(pfn, PAGE_KERNEL), 0); 51 to = ALIAS_FLUSH_START + (colour << PAGE_SHIFT) + offset; 52 flush_tlb_kernel_page(to); 53 flush_icache_range(to, to + len); 54 } 55 56 void flush_cache_mm(struct mm_struct *mm) 57 { 58 if (cache_is_vivt()) { 59 vivt_flush_cache_mm(mm); 60 return; 61 } 62 63 if (cache_is_vipt_aliasing()) { 64 asm( "mcr p15, 0, %0, c7, c14, 0\n" 65 " mcr p15, 0, %0, c7, c10, 4" 66 : 67 : "r" (0) 68 : "cc"); 69 } 70 } 71 72 void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) 73 { 74 if (cache_is_vivt()) { 75 vivt_flush_cache_range(vma, start, end); 76 return; 77 } 78 79 if (cache_is_vipt_aliasing()) { 80 asm( "mcr p15, 0, %0, c7, c14, 0\n" 81 " mcr p15, 0, %0, c7, c10, 4" 82 : 83 : "r" (0) 84 : "cc"); 85 } 86 87 if (vma->vm_flags & VM_EXEC) 88 __flush_icache_all(); 89 } 90 91 void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn) 92 { 93 if (cache_is_vivt()) { 94 vivt_flush_cache_page(vma, user_addr, pfn); 95 return; 96 } 97 98 if (cache_is_vipt_aliasing()) { 99 flush_pfn_alias(pfn, user_addr); 100 __flush_icache_all(); 101 } 102 103 if (vma->vm_flags & VM_EXEC && icache_is_vivt_asid_tagged()) 104 __flush_icache_all(); 105 } 106 107 #else 108 #define flush_pfn_alias(pfn,vaddr) do { } while (0) 109 #define flush_icache_alias(pfn,vaddr,len) do { } while (0) 110 #endif 111 112 static void flush_ptrace_access_other(void *args) 113 { 114 __flush_icache_all(); 115 } 116 117 static 118 void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, 119 unsigned long uaddr, void *kaddr, unsigned long len) 120 { 121 if (cache_is_vivt()) { 122 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { 123 unsigned long addr = (unsigned long)kaddr; 124 __cpuc_coherent_kern_range(addr, addr + len); 125 } 126 return; 127 } 128 129 if (cache_is_vipt_aliasing()) { 130 flush_pfn_alias(page_to_pfn(page), uaddr); 131 __flush_icache_all(); 132 return; 133 } 134 135 /* VIPT non-aliasing D-cache */ 136 if (vma->vm_flags & VM_EXEC) { 137 unsigned long addr = (unsigned long)kaddr; 138 if (icache_is_vipt_aliasing()) 139 flush_icache_alias(page_to_pfn(page), uaddr, len); 140 else 141 __cpuc_coherent_kern_range(addr, addr + len); 142 if (cache_ops_need_broadcast()) 143 smp_call_function(flush_ptrace_access_other, 144 NULL, 1); 145 } 146 } 147 148 /* 149 * Copy user data from/to a page which is mapped into a different 150 * processes address space. Really, we want to allow our "user 151 * space" model to handle this. 152 * 153 * Note that this code needs to run on the current CPU. 154 */ 155 void copy_to_user_page(struct vm_area_struct *vma, struct page *page, 156 unsigned long uaddr, void *dst, const void *src, 157 unsigned long len) 158 { 159 #ifdef CONFIG_SMP 160 preempt_disable(); 161 #endif 162 memcpy(dst, src, len); 163 flush_ptrace_access(vma, page, uaddr, dst, len); 164 #ifdef CONFIG_SMP 165 preempt_enable(); 166 #endif 167 } 168 169 void __flush_dcache_page(struct address_space *mapping, struct page *page) 170 { 171 /* 172 * Writeback any data associated with the kernel mapping of this 173 * page. This ensures that data in the physical page is mutually 174 * coherent with the kernels mapping. 175 */ 176 if (!PageHighMem(page)) { 177 __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE); 178 } else { 179 void *addr = kmap_high_get(page); 180 if (addr) { 181 __cpuc_flush_dcache_area(addr, PAGE_SIZE); 182 kunmap_high(page); 183 } else if (cache_is_vipt()) { 184 /* unmapped pages might still be cached */ 185 addr = kmap_atomic(page); 186 __cpuc_flush_dcache_area(addr, PAGE_SIZE); 187 kunmap_atomic(addr); 188 } 189 } 190 191 /* 192 * If this is a page cache page, and we have an aliasing VIPT cache, 193 * we only need to do one flush - which would be at the relevant 194 * userspace colour, which is congruent with page->index. 195 */ 196 if (mapping && cache_is_vipt_aliasing()) 197 flush_pfn_alias(page_to_pfn(page), 198 page->index << PAGE_CACHE_SHIFT); 199 } 200 201 static void __flush_dcache_aliases(struct address_space *mapping, struct page *page) 202 { 203 struct mm_struct *mm = current->active_mm; 204 struct vm_area_struct *mpnt; 205 struct prio_tree_iter iter; 206 pgoff_t pgoff; 207 208 /* 209 * There are possible user space mappings of this page: 210 * - VIVT cache: we need to also write back and invalidate all user 211 * data in the current VM view associated with this page. 212 * - aliasing VIPT: we only need to find one mapping of this page. 213 */ 214 pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 215 216 flush_dcache_mmap_lock(mapping); 217 vma_prio_tree_foreach(mpnt, &iter, &mapping->i_mmap, pgoff, pgoff) { 218 unsigned long offset; 219 220 /* 221 * If this VMA is not in our MM, we can ignore it. 222 */ 223 if (mpnt->vm_mm != mm) 224 continue; 225 if (!(mpnt->vm_flags & VM_MAYSHARE)) 226 continue; 227 offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT; 228 flush_cache_page(mpnt, mpnt->vm_start + offset, page_to_pfn(page)); 229 } 230 flush_dcache_mmap_unlock(mapping); 231 } 232 233 #if __LINUX_ARM_ARCH__ >= 6 234 void __sync_icache_dcache(pte_t pteval) 235 { 236 unsigned long pfn; 237 struct page *page; 238 struct address_space *mapping; 239 240 if (!pte_present_user(pteval)) 241 return; 242 if (cache_is_vipt_nonaliasing() && !pte_exec(pteval)) 243 /* only flush non-aliasing VIPT caches for exec mappings */ 244 return; 245 pfn = pte_pfn(pteval); 246 if (!pfn_valid(pfn)) 247 return; 248 249 page = pfn_to_page(pfn); 250 if (cache_is_vipt_aliasing()) 251 mapping = page_mapping(page); 252 else 253 mapping = NULL; 254 255 if (!test_and_set_bit(PG_dcache_clean, &page->flags)) 256 __flush_dcache_page(mapping, page); 257 /* pte_exec() already checked above for non-aliasing VIPT cache */ 258 if (cache_is_vipt_nonaliasing() || pte_exec(pteval)) 259 __flush_icache_all(); 260 } 261 #endif 262 263 /* 264 * Ensure cache coherency between kernel mapping and userspace mapping 265 * of this page. 266 * 267 * We have three cases to consider: 268 * - VIPT non-aliasing cache: fully coherent so nothing required. 269 * - VIVT: fully aliasing, so we need to handle every alias in our 270 * current VM view. 271 * - VIPT aliasing: need to handle one alias in our current VM view. 272 * 273 * If we need to handle aliasing: 274 * If the page only exists in the page cache and there are no user 275 * space mappings, we can be lazy and remember that we may have dirty 276 * kernel cache lines for later. Otherwise, we assume we have 277 * aliasing mappings. 278 * 279 * Note that we disable the lazy flush for SMP. 280 */ 281 void flush_dcache_page(struct page *page) 282 { 283 struct address_space *mapping; 284 285 /* 286 * The zero page is never written to, so never has any dirty 287 * cache lines, and therefore never needs to be flushed. 288 */ 289 if (page == ZERO_PAGE(0)) 290 return; 291 292 mapping = page_mapping(page); 293 294 if (!cache_ops_need_broadcast() && 295 mapping && !mapping_mapped(mapping)) 296 clear_bit(PG_dcache_clean, &page->flags); 297 else { 298 __flush_dcache_page(mapping, page); 299 if (mapping && cache_is_vivt()) 300 __flush_dcache_aliases(mapping, page); 301 else if (mapping) 302 __flush_icache_all(); 303 set_bit(PG_dcache_clean, &page->flags); 304 } 305 } 306 EXPORT_SYMBOL(flush_dcache_page); 307 308 /* 309 * Flush an anonymous page so that users of get_user_pages() 310 * can safely access the data. The expected sequence is: 311 * 312 * get_user_pages() 313 * -> flush_anon_page 314 * memcpy() to/from page 315 * if written to page, flush_dcache_page() 316 */ 317 void __flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr) 318 { 319 unsigned long pfn; 320 321 /* VIPT non-aliasing caches need do nothing */ 322 if (cache_is_vipt_nonaliasing()) 323 return; 324 325 /* 326 * Write back and invalidate userspace mapping. 327 */ 328 pfn = page_to_pfn(page); 329 if (cache_is_vivt()) { 330 flush_cache_page(vma, vmaddr, pfn); 331 } else { 332 /* 333 * For aliasing VIPT, we can flush an alias of the 334 * userspace address only. 335 */ 336 flush_pfn_alias(pfn, vmaddr); 337 __flush_icache_all(); 338 } 339 340 /* 341 * Invalidate kernel mapping. No data should be contained 342 * in this mapping of the page. FIXME: this is overkill 343 * since we actually ask for a write-back and invalidate. 344 */ 345 __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE); 346 } 347