xref: /openbmc/linux/arch/arm/mm/flush.c (revision 54cbac81)
1 /*
2  *  linux/arch/arm/mm/flush.c
3  *
4  *  Copyright (C) 1995-2002 Russell King
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #include <linux/module.h>
11 #include <linux/mm.h>
12 #include <linux/pagemap.h>
13 #include <linux/highmem.h>
14 
15 #include <asm/cacheflush.h>
16 #include <asm/cachetype.h>
17 #include <asm/highmem.h>
18 #include <asm/smp_plat.h>
19 #include <asm/tlbflush.h>
20 
21 #include "mm.h"
22 
23 #ifdef CONFIG_CPU_CACHE_VIPT
24 
25 static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)
26 {
27 	unsigned long to = FLUSH_ALIAS_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
28 	const int zero = 0;
29 
30 	set_top_pte(to, pfn_pte(pfn, PAGE_KERNEL));
31 
32 	asm(	"mcrr	p15, 0, %1, %0, c14\n"
33 	"	mcr	p15, 0, %2, c7, c10, 4"
34 	    :
35 	    : "r" (to), "r" (to + PAGE_SIZE - L1_CACHE_BYTES), "r" (zero)
36 	    : "cc");
37 }
38 
39 static void flush_icache_alias(unsigned long pfn, unsigned long vaddr, unsigned long len)
40 {
41 	unsigned long va = FLUSH_ALIAS_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
42 	unsigned long offset = vaddr & (PAGE_SIZE - 1);
43 	unsigned long to;
44 
45 	set_top_pte(va, pfn_pte(pfn, PAGE_KERNEL));
46 	to = va + offset;
47 	flush_icache_range(to, to + len);
48 }
49 
50 void flush_cache_mm(struct mm_struct *mm)
51 {
52 	if (cache_is_vivt()) {
53 		vivt_flush_cache_mm(mm);
54 		return;
55 	}
56 
57 	if (cache_is_vipt_aliasing()) {
58 		asm(	"mcr	p15, 0, %0, c7, c14, 0\n"
59 		"	mcr	p15, 0, %0, c7, c10, 4"
60 		    :
61 		    : "r" (0)
62 		    : "cc");
63 	}
64 }
65 
66 void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
67 {
68 	if (cache_is_vivt()) {
69 		vivt_flush_cache_range(vma, start, end);
70 		return;
71 	}
72 
73 	if (cache_is_vipt_aliasing()) {
74 		asm(	"mcr	p15, 0, %0, c7, c14, 0\n"
75 		"	mcr	p15, 0, %0, c7, c10, 4"
76 		    :
77 		    : "r" (0)
78 		    : "cc");
79 	}
80 
81 	if (vma->vm_flags & VM_EXEC)
82 		__flush_icache_all();
83 }
84 
85 void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
86 {
87 	if (cache_is_vivt()) {
88 		vivt_flush_cache_page(vma, user_addr, pfn);
89 		return;
90 	}
91 
92 	if (cache_is_vipt_aliasing()) {
93 		flush_pfn_alias(pfn, user_addr);
94 		__flush_icache_all();
95 	}
96 
97 	if (vma->vm_flags & VM_EXEC && icache_is_vivt_asid_tagged())
98 		__flush_icache_all();
99 }
100 
101 #else
102 #define flush_pfn_alias(pfn,vaddr)		do { } while (0)
103 #define flush_icache_alias(pfn,vaddr,len)	do { } while (0)
104 #endif
105 
106 static void flush_ptrace_access_other(void *args)
107 {
108 	__flush_icache_all();
109 }
110 
111 static
112 void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
113 			 unsigned long uaddr, void *kaddr, unsigned long len)
114 {
115 	if (cache_is_vivt()) {
116 		if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
117 			unsigned long addr = (unsigned long)kaddr;
118 			__cpuc_coherent_kern_range(addr, addr + len);
119 		}
120 		return;
121 	}
122 
123 	if (cache_is_vipt_aliasing()) {
124 		flush_pfn_alias(page_to_pfn(page), uaddr);
125 		__flush_icache_all();
126 		return;
127 	}
128 
129 	/* VIPT non-aliasing D-cache */
130 	if (vma->vm_flags & VM_EXEC) {
131 		unsigned long addr = (unsigned long)kaddr;
132 		if (icache_is_vipt_aliasing())
133 			flush_icache_alias(page_to_pfn(page), uaddr, len);
134 		else
135 			__cpuc_coherent_kern_range(addr, addr + len);
136 		if (cache_ops_need_broadcast())
137 			smp_call_function(flush_ptrace_access_other,
138 					  NULL, 1);
139 	}
140 }
141 
142 /*
143  * Copy user data from/to a page which is mapped into a different
144  * processes address space.  Really, we want to allow our "user
145  * space" model to handle this.
146  *
147  * Note that this code needs to run on the current CPU.
148  */
149 void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
150 		       unsigned long uaddr, void *dst, const void *src,
151 		       unsigned long len)
152 {
153 #ifdef CONFIG_SMP
154 	preempt_disable();
155 #endif
156 	memcpy(dst, src, len);
157 	flush_ptrace_access(vma, page, uaddr, dst, len);
158 #ifdef CONFIG_SMP
159 	preempt_enable();
160 #endif
161 }
162 
163 void __flush_dcache_page(struct address_space *mapping, struct page *page)
164 {
165 	/*
166 	 * Writeback any data associated with the kernel mapping of this
167 	 * page.  This ensures that data in the physical page is mutually
168 	 * coherent with the kernels mapping.
169 	 */
170 	if (!PageHighMem(page)) {
171 		__cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
172 	} else {
173 		void *addr = kmap_high_get(page);
174 		if (addr) {
175 			__cpuc_flush_dcache_area(addr, PAGE_SIZE);
176 			kunmap_high(page);
177 		} else if (cache_is_vipt()) {
178 			/* unmapped pages might still be cached */
179 			addr = kmap_atomic(page);
180 			__cpuc_flush_dcache_area(addr, PAGE_SIZE);
181 			kunmap_atomic(addr);
182 		}
183 	}
184 
185 	/*
186 	 * If this is a page cache page, and we have an aliasing VIPT cache,
187 	 * we only need to do one flush - which would be at the relevant
188 	 * userspace colour, which is congruent with page->index.
189 	 */
190 	if (mapping && cache_is_vipt_aliasing())
191 		flush_pfn_alias(page_to_pfn(page),
192 				page->index << PAGE_CACHE_SHIFT);
193 }
194 
195 static void __flush_dcache_aliases(struct address_space *mapping, struct page *page)
196 {
197 	struct mm_struct *mm = current->active_mm;
198 	struct vm_area_struct *mpnt;
199 	pgoff_t pgoff;
200 
201 	/*
202 	 * There are possible user space mappings of this page:
203 	 * - VIVT cache: we need to also write back and invalidate all user
204 	 *   data in the current VM view associated with this page.
205 	 * - aliasing VIPT: we only need to find one mapping of this page.
206 	 */
207 	pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
208 
209 	flush_dcache_mmap_lock(mapping);
210 	vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
211 		unsigned long offset;
212 
213 		/*
214 		 * If this VMA is not in our MM, we can ignore it.
215 		 */
216 		if (mpnt->vm_mm != mm)
217 			continue;
218 		if (!(mpnt->vm_flags & VM_MAYSHARE))
219 			continue;
220 		offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
221 		flush_cache_page(mpnt, mpnt->vm_start + offset, page_to_pfn(page));
222 	}
223 	flush_dcache_mmap_unlock(mapping);
224 }
225 
226 #if __LINUX_ARM_ARCH__ >= 6
227 void __sync_icache_dcache(pte_t pteval)
228 {
229 	unsigned long pfn;
230 	struct page *page;
231 	struct address_space *mapping;
232 
233 	if (cache_is_vipt_nonaliasing() && !pte_exec(pteval))
234 		/* only flush non-aliasing VIPT caches for exec mappings */
235 		return;
236 	pfn = pte_pfn(pteval);
237 	if (!pfn_valid(pfn))
238 		return;
239 
240 	page = pfn_to_page(pfn);
241 	if (cache_is_vipt_aliasing())
242 		mapping = page_mapping(page);
243 	else
244 		mapping = NULL;
245 
246 	if (!test_and_set_bit(PG_dcache_clean, &page->flags))
247 		__flush_dcache_page(mapping, page);
248 
249 	if (pte_exec(pteval))
250 		__flush_icache_all();
251 }
252 #endif
253 
254 /*
255  * Ensure cache coherency between kernel mapping and userspace mapping
256  * of this page.
257  *
258  * We have three cases to consider:
259  *  - VIPT non-aliasing cache: fully coherent so nothing required.
260  *  - VIVT: fully aliasing, so we need to handle every alias in our
261  *          current VM view.
262  *  - VIPT aliasing: need to handle one alias in our current VM view.
263  *
264  * If we need to handle aliasing:
265  *  If the page only exists in the page cache and there are no user
266  *  space mappings, we can be lazy and remember that we may have dirty
267  *  kernel cache lines for later.  Otherwise, we assume we have
268  *  aliasing mappings.
269  *
270  * Note that we disable the lazy flush for SMP configurations where
271  * the cache maintenance operations are not automatically broadcasted.
272  */
273 void flush_dcache_page(struct page *page)
274 {
275 	struct address_space *mapping;
276 
277 	/*
278 	 * The zero page is never written to, so never has any dirty
279 	 * cache lines, and therefore never needs to be flushed.
280 	 */
281 	if (page == ZERO_PAGE(0))
282 		return;
283 
284 	mapping = page_mapping(page);
285 
286 	if (!cache_ops_need_broadcast() &&
287 	    mapping && !mapping_mapped(mapping))
288 		clear_bit(PG_dcache_clean, &page->flags);
289 	else {
290 		__flush_dcache_page(mapping, page);
291 		if (mapping && cache_is_vivt())
292 			__flush_dcache_aliases(mapping, page);
293 		else if (mapping)
294 			__flush_icache_all();
295 		set_bit(PG_dcache_clean, &page->flags);
296 	}
297 }
298 EXPORT_SYMBOL(flush_dcache_page);
299 
300 /*
301  * Flush an anonymous page so that users of get_user_pages()
302  * can safely access the data.  The expected sequence is:
303  *
304  *  get_user_pages()
305  *    -> flush_anon_page
306  *  memcpy() to/from page
307  *  if written to page, flush_dcache_page()
308  */
309 void __flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
310 {
311 	unsigned long pfn;
312 
313 	/* VIPT non-aliasing caches need do nothing */
314 	if (cache_is_vipt_nonaliasing())
315 		return;
316 
317 	/*
318 	 * Write back and invalidate userspace mapping.
319 	 */
320 	pfn = page_to_pfn(page);
321 	if (cache_is_vivt()) {
322 		flush_cache_page(vma, vmaddr, pfn);
323 	} else {
324 		/*
325 		 * For aliasing VIPT, we can flush an alias of the
326 		 * userspace address only.
327 		 */
328 		flush_pfn_alias(pfn, vmaddr);
329 		__flush_icache_all();
330 	}
331 
332 	/*
333 	 * Invalidate kernel mapping.  No data should be contained
334 	 * in this mapping of the page.  FIXME: this is overkill
335 	 * since we actually ask for a write-back and invalidate.
336 	 */
337 	__cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
338 }
339