1 /* 2 * linux/arch/arm/mm/flush.c 3 * 4 * Copyright (C) 1995-2002 Russell King 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 #include <linux/module.h> 11 #include <linux/mm.h> 12 #include <linux/pagemap.h> 13 14 #include <asm/cacheflush.h> 15 #include <asm/system.h> 16 17 static void __flush_dcache_page(struct address_space *mapping, struct page *page) 18 { 19 struct mm_struct *mm = current->active_mm; 20 struct vm_area_struct *mpnt; 21 struct prio_tree_iter iter; 22 pgoff_t pgoff; 23 24 /* 25 * Writeback any data associated with the kernel mapping of this 26 * page. This ensures that data in the physical page is mutually 27 * coherent with the kernels mapping. 28 */ 29 __cpuc_flush_dcache_page(page_address(page)); 30 31 /* 32 * If there's no mapping pointer here, then this page isn't 33 * visible to userspace yet, so there are no cache lines 34 * associated with any other aliases. 35 */ 36 if (!mapping) 37 return; 38 39 /* 40 * There are possible user space mappings of this page: 41 * - VIVT cache: we need to also write back and invalidate all user 42 * data in the current VM view associated with this page. 43 * - aliasing VIPT: we only need to find one mapping of this page. 44 */ 45 pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 46 47 flush_dcache_mmap_lock(mapping); 48 vma_prio_tree_foreach(mpnt, &iter, &mapping->i_mmap, pgoff, pgoff) { 49 unsigned long offset; 50 51 /* 52 * If this VMA is not in our MM, we can ignore it. 53 */ 54 if (mpnt->vm_mm != mm) 55 continue; 56 if (!(mpnt->vm_flags & VM_MAYSHARE)) 57 continue; 58 offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT; 59 flush_cache_page(mpnt, mpnt->vm_start + offset, page_to_pfn(page)); 60 if (cache_is_vipt()) 61 break; 62 } 63 flush_dcache_mmap_unlock(mapping); 64 } 65 66 /* 67 * Ensure cache coherency between kernel mapping and userspace mapping 68 * of this page. 69 * 70 * We have three cases to consider: 71 * - VIPT non-aliasing cache: fully coherent so nothing required. 72 * - VIVT: fully aliasing, so we need to handle every alias in our 73 * current VM view. 74 * - VIPT aliasing: need to handle one alias in our current VM view. 75 * 76 * If we need to handle aliasing: 77 * If the page only exists in the page cache and there are no user 78 * space mappings, we can be lazy and remember that we may have dirty 79 * kernel cache lines for later. Otherwise, we assume we have 80 * aliasing mappings. 81 */ 82 void flush_dcache_page(struct page *page) 83 { 84 struct address_space *mapping = page_mapping(page); 85 86 if (cache_is_vipt_nonaliasing()) 87 return; 88 89 if (mapping && !mapping_mapped(mapping)) 90 set_bit(PG_dcache_dirty, &page->flags); 91 else 92 __flush_dcache_page(mapping, page); 93 } 94 EXPORT_SYMBOL(flush_dcache_page); 95