xref: /openbmc/linux/arch/arm/mm/flush.c (revision e868d61272caa648214046a096e5a6bfc068dc8c)
1 /*
2  *  linux/arch/arm/mm/flush.c
3  *
4  *  Copyright (C) 1995-2002 Russell King
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #include <linux/module.h>
11 #include <linux/mm.h>
12 #include <linux/pagemap.h>
13 
14 #include <asm/cacheflush.h>
15 #include <asm/system.h>
16 #include <asm/tlbflush.h>
17 
18 #include "mm.h"
19 
20 #ifdef CONFIG_CPU_CACHE_VIPT
21 
22 #define ALIAS_FLUSH_START	0xffff4000
23 
24 static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)
25 {
26 	unsigned long to = ALIAS_FLUSH_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
27 	const int zero = 0;
28 
29 	set_pte_ext(TOP_PTE(to), pfn_pte(pfn, PAGE_KERNEL), 0);
30 	flush_tlb_kernel_page(to);
31 
32 	asm(	"mcrr	p15, 0, %1, %0, c14\n"
33 	"	mcr	p15, 0, %2, c7, c10, 4\n"
34 	"	mcr	p15, 0, %2, c7, c5, 0\n"
35 	    :
36 	    : "r" (to), "r" (to + PAGE_SIZE - L1_CACHE_BYTES), "r" (zero)
37 	    : "cc");
38 }
39 
40 void flush_cache_mm(struct mm_struct *mm)
41 {
42 	if (cache_is_vivt()) {
43 		if (cpu_isset(smp_processor_id(), mm->cpu_vm_mask))
44 			__cpuc_flush_user_all();
45 		return;
46 	}
47 
48 	if (cache_is_vipt_aliasing()) {
49 		asm(	"mcr	p15, 0, %0, c7, c14, 0\n"
50 		"	mcr	p15, 0, %0, c7, c5, 0\n"
51 		"	mcr	p15, 0, %0, c7, c10, 4"
52 		    :
53 		    : "r" (0)
54 		    : "cc");
55 	}
56 }
57 
58 void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
59 {
60 	if (cache_is_vivt()) {
61 		if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask))
62 			__cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end),
63 						vma->vm_flags);
64 		return;
65 	}
66 
67 	if (cache_is_vipt_aliasing()) {
68 		asm(	"mcr	p15, 0, %0, c7, c14, 0\n"
69 		"	mcr	p15, 0, %0, c7, c5, 0\n"
70 		"	mcr	p15, 0, %0, c7, c10, 4"
71 		    :
72 		    : "r" (0)
73 		    : "cc");
74 	}
75 }
76 
77 void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
78 {
79 	if (cache_is_vivt()) {
80 		if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) {
81 			unsigned long addr = user_addr & PAGE_MASK;
82 			__cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags);
83 		}
84 		return;
85 	}
86 
87 	if (cache_is_vipt_aliasing())
88 		flush_pfn_alias(pfn, user_addr);
89 }
90 
91 void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
92 			 unsigned long uaddr, void *kaddr,
93 			 unsigned long len, int write)
94 {
95 	if (cache_is_vivt()) {
96 		if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) {
97 			unsigned long addr = (unsigned long)kaddr;
98 			__cpuc_coherent_kern_range(addr, addr + len);
99 		}
100 		return;
101 	}
102 
103 	if (cache_is_vipt_aliasing()) {
104 		flush_pfn_alias(page_to_pfn(page), uaddr);
105 		return;
106 	}
107 
108 	/* VIPT non-aliasing cache */
109 	if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask) &&
110 	    vma->vm_flags & VM_EXEC) {
111 		unsigned long addr = (unsigned long)kaddr;
112 		/* only flushing the kernel mapping on non-aliasing VIPT */
113 		__cpuc_coherent_kern_range(addr, addr + len);
114 	}
115 }
116 #else
117 #define flush_pfn_alias(pfn,vaddr)	do { } while (0)
118 #endif
119 
120 void __flush_dcache_page(struct address_space *mapping, struct page *page)
121 {
122 	/*
123 	 * Writeback any data associated with the kernel mapping of this
124 	 * page.  This ensures that data in the physical page is mutually
125 	 * coherent with the kernels mapping.
126 	 */
127 	__cpuc_flush_dcache_page(page_address(page));
128 
129 	/*
130 	 * If this is a page cache page, and we have an aliasing VIPT cache,
131 	 * we only need to do one flush - which would be at the relevant
132 	 * userspace colour, which is congruent with page->index.
133 	 */
134 	if (mapping && cache_is_vipt_aliasing())
135 		flush_pfn_alias(page_to_pfn(page),
136 				page->index << PAGE_CACHE_SHIFT);
137 }
138 
139 static void __flush_dcache_aliases(struct address_space *mapping, struct page *page)
140 {
141 	struct mm_struct *mm = current->active_mm;
142 	struct vm_area_struct *mpnt;
143 	struct prio_tree_iter iter;
144 	pgoff_t pgoff;
145 
146 	/*
147 	 * There are possible user space mappings of this page:
148 	 * - VIVT cache: we need to also write back and invalidate all user
149 	 *   data in the current VM view associated with this page.
150 	 * - aliasing VIPT: we only need to find one mapping of this page.
151 	 */
152 	pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
153 
154 	flush_dcache_mmap_lock(mapping);
155 	vma_prio_tree_foreach(mpnt, &iter, &mapping->i_mmap, pgoff, pgoff) {
156 		unsigned long offset;
157 
158 		/*
159 		 * If this VMA is not in our MM, we can ignore it.
160 		 */
161 		if (mpnt->vm_mm != mm)
162 			continue;
163 		if (!(mpnt->vm_flags & VM_MAYSHARE))
164 			continue;
165 		offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
166 		flush_cache_page(mpnt, mpnt->vm_start + offset, page_to_pfn(page));
167 	}
168 	flush_dcache_mmap_unlock(mapping);
169 }
170 
171 /*
172  * Ensure cache coherency between kernel mapping and userspace mapping
173  * of this page.
174  *
175  * We have three cases to consider:
176  *  - VIPT non-aliasing cache: fully coherent so nothing required.
177  *  - VIVT: fully aliasing, so we need to handle every alias in our
178  *          current VM view.
179  *  - VIPT aliasing: need to handle one alias in our current VM view.
180  *
181  * If we need to handle aliasing:
182  *  If the page only exists in the page cache and there are no user
183  *  space mappings, we can be lazy and remember that we may have dirty
184  *  kernel cache lines for later.  Otherwise, we assume we have
185  *  aliasing mappings.
186  *
187  * Note that we disable the lazy flush for SMP.
188  */
189 void flush_dcache_page(struct page *page)
190 {
191 	struct address_space *mapping = page_mapping(page);
192 
193 #ifndef CONFIG_SMP
194 	if (mapping && !mapping_mapped(mapping))
195 		set_bit(PG_dcache_dirty, &page->flags);
196 	else
197 #endif
198 	{
199 		__flush_dcache_page(mapping, page);
200 		if (mapping && cache_is_vivt())
201 			__flush_dcache_aliases(mapping, page);
202 	}
203 }
204 EXPORT_SYMBOL(flush_dcache_page);
205 
206 /*
207  * Flush an anonymous page so that users of get_user_pages()
208  * can safely access the data.  The expected sequence is:
209  *
210  *  get_user_pages()
211  *    -> flush_anon_page
212  *  memcpy() to/from page
213  *  if written to page, flush_dcache_page()
214  */
215 void __flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
216 {
217 	unsigned long pfn;
218 
219 	/* VIPT non-aliasing caches need do nothing */
220 	if (cache_is_vipt_nonaliasing())
221 		return;
222 
223 	/*
224 	 * Write back and invalidate userspace mapping.
225 	 */
226 	pfn = page_to_pfn(page);
227 	if (cache_is_vivt()) {
228 		flush_cache_page(vma, vmaddr, pfn);
229 	} else {
230 		/*
231 		 * For aliasing VIPT, we can flush an alias of the
232 		 * userspace address only.
233 		 */
234 		flush_pfn_alias(pfn, vmaddr);
235 	}
236 
237 	/*
238 	 * Invalidate kernel mapping.  No data should be contained
239 	 * in this mapping of the page.  FIXME: this is overkill
240 	 * since we actually ask for a write-back and invalidate.
241 	 */
242 	__cpuc_flush_dcache_page(page_address(page));
243 }
244