xref: /openbmc/linux/arch/arm/mm/flush.c (revision a09d2831)
1 /*
2  *  linux/arch/arm/mm/flush.c
3  *
4  *  Copyright (C) 1995-2002 Russell King
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #include <linux/module.h>
11 #include <linux/mm.h>
12 #include <linux/pagemap.h>
13 
14 #include <asm/cacheflush.h>
15 #include <asm/cachetype.h>
16 #include <asm/system.h>
17 #include <asm/tlbflush.h>
18 
19 #include "mm.h"
20 
21 #ifdef CONFIG_CPU_CACHE_VIPT
22 
23 #define ALIAS_FLUSH_START	0xffff4000
24 
25 static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)
26 {
27 	unsigned long to = ALIAS_FLUSH_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
28 	const int zero = 0;
29 
30 	set_pte_ext(TOP_PTE(to), pfn_pte(pfn, PAGE_KERNEL), 0);
31 	flush_tlb_kernel_page(to);
32 
33 	asm(	"mcrr	p15, 0, %1, %0, c14\n"
34 	"	mcr	p15, 0, %2, c7, c10, 4"
35 	    :
36 	    : "r" (to), "r" (to + PAGE_SIZE - L1_CACHE_BYTES), "r" (zero)
37 	    : "cc");
38 }
39 
40 void flush_cache_mm(struct mm_struct *mm)
41 {
42 	if (cache_is_vivt()) {
43 		vivt_flush_cache_mm(mm);
44 		return;
45 	}
46 
47 	if (cache_is_vipt_aliasing()) {
48 		asm(	"mcr	p15, 0, %0, c7, c14, 0\n"
49 		"	mcr	p15, 0, %0, c7, c10, 4"
50 		    :
51 		    : "r" (0)
52 		    : "cc");
53 	}
54 }
55 
56 void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
57 {
58 	if (cache_is_vivt()) {
59 		vivt_flush_cache_range(vma, start, end);
60 		return;
61 	}
62 
63 	if (cache_is_vipt_aliasing()) {
64 		asm(	"mcr	p15, 0, %0, c7, c14, 0\n"
65 		"	mcr	p15, 0, %0, c7, c10, 4"
66 		    :
67 		    : "r" (0)
68 		    : "cc");
69 	}
70 
71 	if (vma->vm_flags & VM_EXEC)
72 		__flush_icache_all();
73 }
74 
75 void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
76 {
77 	if (cache_is_vivt()) {
78 		vivt_flush_cache_page(vma, user_addr, pfn);
79 		return;
80 	}
81 
82 	if (cache_is_vipt_aliasing()) {
83 		flush_pfn_alias(pfn, user_addr);
84 		__flush_icache_all();
85 	}
86 
87 	if (vma->vm_flags & VM_EXEC && icache_is_vivt_asid_tagged())
88 		__flush_icache_all();
89 }
90 
91 void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
92 			 unsigned long uaddr, void *kaddr,
93 			 unsigned long len, int write)
94 {
95 	if (cache_is_vivt()) {
96 		vivt_flush_ptrace_access(vma, page, uaddr, kaddr, len, write);
97 		return;
98 	}
99 
100 	if (cache_is_vipt_aliasing()) {
101 		flush_pfn_alias(page_to_pfn(page), uaddr);
102 		__flush_icache_all();
103 		return;
104 	}
105 
106 	/* VIPT non-aliasing cache */
107 	if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm)) &&
108 	    vma->vm_flags & VM_EXEC) {
109 		unsigned long addr = (unsigned long)kaddr;
110 		/* only flushing the kernel mapping on non-aliasing VIPT */
111 		__cpuc_coherent_kern_range(addr, addr + len);
112 	}
113 }
114 #else
115 #define flush_pfn_alias(pfn,vaddr)	do { } while (0)
116 #endif
117 
118 void __flush_dcache_page(struct address_space *mapping, struct page *page)
119 {
120 	void *addr = page_address(page);
121 
122 	/*
123 	 * Writeback any data associated with the kernel mapping of this
124 	 * page.  This ensures that data in the physical page is mutually
125 	 * coherent with the kernels mapping.
126 	 */
127 #ifdef CONFIG_HIGHMEM
128 	/*
129 	 * kmap_atomic() doesn't set the page virtual address, and
130 	 * kunmap_atomic() takes care of cache flushing already.
131 	 */
132 	if (addr)
133 #endif
134 		__cpuc_flush_dcache_area(addr, PAGE_SIZE);
135 
136 	/*
137 	 * If this is a page cache page, and we have an aliasing VIPT cache,
138 	 * we only need to do one flush - which would be at the relevant
139 	 * userspace colour, which is congruent with page->index.
140 	 */
141 	if (mapping && cache_is_vipt_aliasing())
142 		flush_pfn_alias(page_to_pfn(page),
143 				page->index << PAGE_CACHE_SHIFT);
144 }
145 
146 static void __flush_dcache_aliases(struct address_space *mapping, struct page *page)
147 {
148 	struct mm_struct *mm = current->active_mm;
149 	struct vm_area_struct *mpnt;
150 	struct prio_tree_iter iter;
151 	pgoff_t pgoff;
152 
153 	/*
154 	 * There are possible user space mappings of this page:
155 	 * - VIVT cache: we need to also write back and invalidate all user
156 	 *   data in the current VM view associated with this page.
157 	 * - aliasing VIPT: we only need to find one mapping of this page.
158 	 */
159 	pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
160 
161 	flush_dcache_mmap_lock(mapping);
162 	vma_prio_tree_foreach(mpnt, &iter, &mapping->i_mmap, pgoff, pgoff) {
163 		unsigned long offset;
164 
165 		/*
166 		 * If this VMA is not in our MM, we can ignore it.
167 		 */
168 		if (mpnt->vm_mm != mm)
169 			continue;
170 		if (!(mpnt->vm_flags & VM_MAYSHARE))
171 			continue;
172 		offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
173 		flush_cache_page(mpnt, mpnt->vm_start + offset, page_to_pfn(page));
174 	}
175 	flush_dcache_mmap_unlock(mapping);
176 }
177 
178 /*
179  * Ensure cache coherency between kernel mapping and userspace mapping
180  * of this page.
181  *
182  * We have three cases to consider:
183  *  - VIPT non-aliasing cache: fully coherent so nothing required.
184  *  - VIVT: fully aliasing, so we need to handle every alias in our
185  *          current VM view.
186  *  - VIPT aliasing: need to handle one alias in our current VM view.
187  *
188  * If we need to handle aliasing:
189  *  If the page only exists in the page cache and there are no user
190  *  space mappings, we can be lazy and remember that we may have dirty
191  *  kernel cache lines for later.  Otherwise, we assume we have
192  *  aliasing mappings.
193  *
194  * Note that we disable the lazy flush for SMP.
195  */
196 void flush_dcache_page(struct page *page)
197 {
198 	struct address_space *mapping;
199 
200 	/*
201 	 * The zero page is never written to, so never has any dirty
202 	 * cache lines, and therefore never needs to be flushed.
203 	 */
204 	if (page == ZERO_PAGE(0))
205 		return;
206 
207 	mapping = page_mapping(page);
208 
209 #ifndef CONFIG_SMP
210 	if (!PageHighMem(page) && mapping && !mapping_mapped(mapping))
211 		set_bit(PG_dcache_dirty, &page->flags);
212 	else
213 #endif
214 	{
215 		__flush_dcache_page(mapping, page);
216 		if (mapping && cache_is_vivt())
217 			__flush_dcache_aliases(mapping, page);
218 		else if (mapping)
219 			__flush_icache_all();
220 	}
221 }
222 EXPORT_SYMBOL(flush_dcache_page);
223 
224 /*
225  * Flush an anonymous page so that users of get_user_pages()
226  * can safely access the data.  The expected sequence is:
227  *
228  *  get_user_pages()
229  *    -> flush_anon_page
230  *  memcpy() to/from page
231  *  if written to page, flush_dcache_page()
232  */
233 void __flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
234 {
235 	unsigned long pfn;
236 
237 	/* VIPT non-aliasing caches need do nothing */
238 	if (cache_is_vipt_nonaliasing())
239 		return;
240 
241 	/*
242 	 * Write back and invalidate userspace mapping.
243 	 */
244 	pfn = page_to_pfn(page);
245 	if (cache_is_vivt()) {
246 		flush_cache_page(vma, vmaddr, pfn);
247 	} else {
248 		/*
249 		 * For aliasing VIPT, we can flush an alias of the
250 		 * userspace address only.
251 		 */
252 		flush_pfn_alias(pfn, vmaddr);
253 		__flush_icache_all();
254 	}
255 
256 	/*
257 	 * Invalidate kernel mapping.  No data should be contained
258 	 * in this mapping of the page.  FIXME: this is overkill
259 	 * since we actually ask for a write-back and invalidate.
260 	 */
261 	__cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
262 }
263