xref: /openbmc/linux/arch/arm/mm/flush.c (revision b04b4f78)
1 /*
2  *  linux/arch/arm/mm/flush.c
3  *
4  *  Copyright (C) 1995-2002 Russell King
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #include <linux/module.h>
11 #include <linux/mm.h>
12 #include <linux/pagemap.h>
13 
14 #include <asm/cacheflush.h>
15 #include <asm/cachetype.h>
16 #include <asm/system.h>
17 #include <asm/tlbflush.h>
18 
19 #include "mm.h"
20 
21 #ifdef CONFIG_ARM_ERRATA_411920
22 extern void v6_icache_inval_all(void);
23 #endif
24 
25 #ifdef CONFIG_CPU_CACHE_VIPT
26 
27 #define ALIAS_FLUSH_START	0xffff4000
28 
29 static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)
30 {
31 	unsigned long to = ALIAS_FLUSH_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
32 	const int zero = 0;
33 
34 	set_pte_ext(TOP_PTE(to), pfn_pte(pfn, PAGE_KERNEL), 0);
35 	flush_tlb_kernel_page(to);
36 
37 	asm(	"mcrr	p15, 0, %1, %0, c14\n"
38 	"	mcr	p15, 0, %2, c7, c10, 4\n"
39 #ifndef CONFIG_ARM_ERRATA_411920
40 	"	mcr	p15, 0, %2, c7, c5, 0\n"
41 #endif
42 	    :
43 	    : "r" (to), "r" (to + PAGE_SIZE - L1_CACHE_BYTES), "r" (zero)
44 	    : "cc");
45 #ifdef CONFIG_ARM_ERRATA_411920
46 	v6_icache_inval_all();
47 #endif
48 }
49 
50 void flush_cache_mm(struct mm_struct *mm)
51 {
52 	if (cache_is_vivt()) {
53 		if (cpu_isset(smp_processor_id(), mm->cpu_vm_mask))
54 			__cpuc_flush_user_all();
55 		return;
56 	}
57 
58 	if (cache_is_vipt_aliasing()) {
59 		asm(	"mcr	p15, 0, %0, c7, c14, 0\n"
60 		"	mcr	p15, 0, %0, c7, c10, 4\n"
61 #ifndef CONFIG_ARM_ERRATA_411920
62 		"	mcr	p15, 0, %0, c7, c5, 0\n"
63 #endif
64 		    :
65 		    : "r" (0)
66 		    : "cc");
67 #ifdef CONFIG_ARM_ERRATA_411920
68 		v6_icache_inval_all();
69 #endif
70 	}
71 }
72 
73 void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
74 {
75 	if (cache_is_vivt()) {
76 		if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask))
77 			__cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end),
78 						vma->vm_flags);
79 		return;
80 	}
81 
82 	if (cache_is_vipt_aliasing()) {
83 		asm(	"mcr	p15, 0, %0, c7, c14, 0\n"
84 		"	mcr	p15, 0, %0, c7, c10, 4\n"
85 #ifndef CONFIG_ARM_ERRATA_411920
86 		"	mcr	p15, 0, %0, c7, c5, 0\n"
87 #endif
88 		    :
89 		    : "r" (0)
90 		    : "cc");
91 #ifdef CONFIG_ARM_ERRATA_411920
92 		v6_icache_inval_all();
93 #endif
94 	}
95 }
96 
97 void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
98 {
99 	if (cache_is_vivt()) {
100 		if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) {
101 			unsigned long addr = user_addr & PAGE_MASK;
102 			__cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags);
103 		}
104 		return;
105 	}
106 
107 	if (cache_is_vipt_aliasing())
108 		flush_pfn_alias(pfn, user_addr);
109 }
110 
111 void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
112 			 unsigned long uaddr, void *kaddr,
113 			 unsigned long len, int write)
114 {
115 	if (cache_is_vivt()) {
116 		if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) {
117 			unsigned long addr = (unsigned long)kaddr;
118 			__cpuc_coherent_kern_range(addr, addr + len);
119 		}
120 		return;
121 	}
122 
123 	if (cache_is_vipt_aliasing()) {
124 		flush_pfn_alias(page_to_pfn(page), uaddr);
125 		return;
126 	}
127 
128 	/* VIPT non-aliasing cache */
129 	if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask) &&
130 	    vma->vm_flags & VM_EXEC) {
131 		unsigned long addr = (unsigned long)kaddr;
132 		/* only flushing the kernel mapping on non-aliasing VIPT */
133 		__cpuc_coherent_kern_range(addr, addr + len);
134 	}
135 }
136 #else
137 #define flush_pfn_alias(pfn,vaddr)	do { } while (0)
138 #endif
139 
140 void __flush_dcache_page(struct address_space *mapping, struct page *page)
141 {
142 	/*
143 	 * Writeback any data associated with the kernel mapping of this
144 	 * page.  This ensures that data in the physical page is mutually
145 	 * coherent with the kernels mapping.
146 	 */
147 	__cpuc_flush_dcache_page(page_address(page));
148 
149 	/*
150 	 * If this is a page cache page, and we have an aliasing VIPT cache,
151 	 * we only need to do one flush - which would be at the relevant
152 	 * userspace colour, which is congruent with page->index.
153 	 */
154 	if (mapping && cache_is_vipt_aliasing())
155 		flush_pfn_alias(page_to_pfn(page),
156 				page->index << PAGE_CACHE_SHIFT);
157 }
158 
159 static void __flush_dcache_aliases(struct address_space *mapping, struct page *page)
160 {
161 	struct mm_struct *mm = current->active_mm;
162 	struct vm_area_struct *mpnt;
163 	struct prio_tree_iter iter;
164 	pgoff_t pgoff;
165 
166 	/*
167 	 * There are possible user space mappings of this page:
168 	 * - VIVT cache: we need to also write back and invalidate all user
169 	 *   data in the current VM view associated with this page.
170 	 * - aliasing VIPT: we only need to find one mapping of this page.
171 	 */
172 	pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
173 
174 	flush_dcache_mmap_lock(mapping);
175 	vma_prio_tree_foreach(mpnt, &iter, &mapping->i_mmap, pgoff, pgoff) {
176 		unsigned long offset;
177 
178 		/*
179 		 * If this VMA is not in our MM, we can ignore it.
180 		 */
181 		if (mpnt->vm_mm != mm)
182 			continue;
183 		if (!(mpnt->vm_flags & VM_MAYSHARE))
184 			continue;
185 		offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
186 		flush_cache_page(mpnt, mpnt->vm_start + offset, page_to_pfn(page));
187 	}
188 	flush_dcache_mmap_unlock(mapping);
189 }
190 
191 /*
192  * Ensure cache coherency between kernel mapping and userspace mapping
193  * of this page.
194  *
195  * We have three cases to consider:
196  *  - VIPT non-aliasing cache: fully coherent so nothing required.
197  *  - VIVT: fully aliasing, so we need to handle every alias in our
198  *          current VM view.
199  *  - VIPT aliasing: need to handle one alias in our current VM view.
200  *
201  * If we need to handle aliasing:
202  *  If the page only exists in the page cache and there are no user
203  *  space mappings, we can be lazy and remember that we may have dirty
204  *  kernel cache lines for later.  Otherwise, we assume we have
205  *  aliasing mappings.
206  *
207  * Note that we disable the lazy flush for SMP.
208  */
209 void flush_dcache_page(struct page *page)
210 {
211 	struct address_space *mapping = page_mapping(page);
212 
213 #ifndef CONFIG_SMP
214 	if (!PageHighMem(page) && mapping && !mapping_mapped(mapping))
215 		set_bit(PG_dcache_dirty, &page->flags);
216 	else
217 #endif
218 	{
219 		__flush_dcache_page(mapping, page);
220 		if (mapping && cache_is_vivt())
221 			__flush_dcache_aliases(mapping, page);
222 		else if (mapping)
223 			__flush_icache_all();
224 	}
225 }
226 EXPORT_SYMBOL(flush_dcache_page);
227 
228 /*
229  * Flush an anonymous page so that users of get_user_pages()
230  * can safely access the data.  The expected sequence is:
231  *
232  *  get_user_pages()
233  *    -> flush_anon_page
234  *  memcpy() to/from page
235  *  if written to page, flush_dcache_page()
236  */
237 void __flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
238 {
239 	unsigned long pfn;
240 
241 	/* VIPT non-aliasing caches need do nothing */
242 	if (cache_is_vipt_nonaliasing())
243 		return;
244 
245 	/*
246 	 * Write back and invalidate userspace mapping.
247 	 */
248 	pfn = page_to_pfn(page);
249 	if (cache_is_vivt()) {
250 		flush_cache_page(vma, vmaddr, pfn);
251 	} else {
252 		/*
253 		 * For aliasing VIPT, we can flush an alias of the
254 		 * userspace address only.
255 		 */
256 		flush_pfn_alias(pfn, vmaddr);
257 	}
258 
259 	/*
260 	 * Invalidate kernel mapping.  No data should be contained
261 	 * in this mapping of the page.  FIXME: this is overkill
262 	 * since we actually ask for a write-back and invalidate.
263 	 */
264 	__cpuc_flush_dcache_page(page_address(page));
265 }
266