xref: /openbmc/linux/arch/xtensa/mm/cache.c (revision 29c37341)
1 /*
2  * arch/xtensa/mm/cache.c
3  *
4  * This file is subject to the terms and conditions of the GNU General Public
5  * License.  See the file "COPYING" in the main directory of this archive
6  * for more details.
7  *
8  * Copyright (C) 2001-2006 Tensilica Inc.
9  *
10  * Chris Zankel	<chris@zankel.net>
11  * Joe Taylor
12  * Marc Gauthier
13  *
14  */
15 
16 #include <linux/init.h>
17 #include <linux/signal.h>
18 #include <linux/sched.h>
19 #include <linux/kernel.h>
20 #include <linux/errno.h>
21 #include <linux/string.h>
22 #include <linux/types.h>
23 #include <linux/ptrace.h>
24 #include <linux/memblock.h>
25 #include <linux/swap.h>
26 #include <linux/pagemap.h>
27 #include <linux/pgtable.h>
28 
29 #include <asm/bootparam.h>
30 #include <asm/mmu_context.h>
31 #include <asm/tlb.h>
32 #include <asm/tlbflush.h>
33 #include <asm/page.h>
34 
35 /*
36  * Note:
37  * The kernel provides one architecture bit PG_arch_1 in the page flags that
38  * can be used for cache coherency.
39  *
40  * I$-D$ coherency.
41  *
42  * The Xtensa architecture doesn't keep the instruction cache coherent with
43  * the data cache. We use the architecture bit to indicate if the caches
44  * are coherent. The kernel clears this bit whenever a page is added to the
45  * page cache. At that time, the caches might not be in sync. We, therefore,
46  * define this flag as 'clean' if set.
47  *
48  * D-cache aliasing.
49  *
50  * With cache aliasing, we have to always flush the cache when pages are
51  * unmapped (see tlb_start_vma(). So, we use this flag to indicate a dirty
52  * page.
53  *
54  *
55  *
56  */
57 
58 #if (DCACHE_WAY_SIZE > PAGE_SIZE)
59 static inline void kmap_invalidate_coherent(struct page *page,
60 					    unsigned long vaddr)
61 {
62 	if (!DCACHE_ALIAS_EQ(page_to_phys(page), vaddr)) {
63 		unsigned long kvaddr;
64 
65 		if (!PageHighMem(page)) {
66 			kvaddr = (unsigned long)page_to_virt(page);
67 
68 			__invalidate_dcache_page(kvaddr);
69 		} else {
70 			kvaddr = TLBTEMP_BASE_1 +
71 				(page_to_phys(page) & DCACHE_ALIAS_MASK);
72 
73 			__invalidate_dcache_page_alias(kvaddr,
74 						       page_to_phys(page));
75 		}
76 	}
77 }
78 
79 static inline void *coherent_kvaddr(struct page *page, unsigned long base,
80 				    unsigned long vaddr, unsigned long *paddr)
81 {
82 	if (PageHighMem(page) || !DCACHE_ALIAS_EQ(page_to_phys(page), vaddr)) {
83 		*paddr = page_to_phys(page);
84 		return (void *)(base + (vaddr & DCACHE_ALIAS_MASK));
85 	} else {
86 		*paddr = 0;
87 		return page_to_virt(page);
88 	}
89 }
90 
91 void clear_user_highpage(struct page *page, unsigned long vaddr)
92 {
93 	unsigned long paddr;
94 	void *kvaddr = coherent_kvaddr(page, TLBTEMP_BASE_1, vaddr, &paddr);
95 
96 	preempt_disable();
97 	kmap_invalidate_coherent(page, vaddr);
98 	set_bit(PG_arch_1, &page->flags);
99 	clear_page_alias(kvaddr, paddr);
100 	preempt_enable();
101 }
102 EXPORT_SYMBOL(clear_user_highpage);
103 
104 void copy_user_highpage(struct page *dst, struct page *src,
105 			unsigned long vaddr, struct vm_area_struct *vma)
106 {
107 	unsigned long dst_paddr, src_paddr;
108 	void *dst_vaddr = coherent_kvaddr(dst, TLBTEMP_BASE_1, vaddr,
109 					  &dst_paddr);
110 	void *src_vaddr = coherent_kvaddr(src, TLBTEMP_BASE_2, vaddr,
111 					  &src_paddr);
112 
113 	preempt_disable();
114 	kmap_invalidate_coherent(dst, vaddr);
115 	set_bit(PG_arch_1, &dst->flags);
116 	copy_page_alias(dst_vaddr, src_vaddr, dst_paddr, src_paddr);
117 	preempt_enable();
118 }
119 EXPORT_SYMBOL(copy_user_highpage);
120 
121 /*
122  * Any time the kernel writes to a user page cache page, or it is about to
123  * read from a page cache page this routine is called.
124  *
125  */
126 
127 void flush_dcache_page(struct page *page)
128 {
129 	struct address_space *mapping = page_mapping_file(page);
130 
131 	/*
132 	 * If we have a mapping but the page is not mapped to user-space
133 	 * yet, we simply mark this page dirty and defer flushing the
134 	 * caches until update_mmu().
135 	 */
136 
137 	if (mapping && !mapping_mapped(mapping)) {
138 		if (!test_bit(PG_arch_1, &page->flags))
139 			set_bit(PG_arch_1, &page->flags);
140 		return;
141 
142 	} else {
143 
144 		unsigned long phys = page_to_phys(page);
145 		unsigned long temp = page->index << PAGE_SHIFT;
146 		unsigned long alias = !(DCACHE_ALIAS_EQ(temp, phys));
147 		unsigned long virt;
148 
149 		/*
150 		 * Flush the page in kernel space and user space.
151 		 * Note that we can omit that step if aliasing is not
152 		 * an issue, but we do have to synchronize I$ and D$
153 		 * if we have a mapping.
154 		 */
155 
156 		if (!alias && !mapping)
157 			return;
158 
159 		virt = TLBTEMP_BASE_1 + (phys & DCACHE_ALIAS_MASK);
160 		__flush_invalidate_dcache_page_alias(virt, phys);
161 
162 		virt = TLBTEMP_BASE_1 + (temp & DCACHE_ALIAS_MASK);
163 
164 		if (alias)
165 			__flush_invalidate_dcache_page_alias(virt, phys);
166 
167 		if (mapping)
168 			__invalidate_icache_page_alias(virt, phys);
169 	}
170 
171 	/* There shouldn't be an entry in the cache for this page anymore. */
172 }
173 EXPORT_SYMBOL(flush_dcache_page);
174 
175 /*
176  * For now, flush the whole cache. FIXME??
177  */
178 
179 void local_flush_cache_range(struct vm_area_struct *vma,
180 		       unsigned long start, unsigned long end)
181 {
182 	__flush_invalidate_dcache_all();
183 	__invalidate_icache_all();
184 }
185 EXPORT_SYMBOL(local_flush_cache_range);
186 
187 /*
188  * Remove any entry in the cache for this page.
189  *
190  * Note that this function is only called for user pages, so use the
191  * alias versions of the cache flush functions.
192  */
193 
194 void local_flush_cache_page(struct vm_area_struct *vma, unsigned long address,
195 		      unsigned long pfn)
196 {
197 	/* Note that we have to use the 'alias' address to avoid multi-hit */
198 
199 	unsigned long phys = page_to_phys(pfn_to_page(pfn));
200 	unsigned long virt = TLBTEMP_BASE_1 + (address & DCACHE_ALIAS_MASK);
201 
202 	__flush_invalidate_dcache_page_alias(virt, phys);
203 	__invalidate_icache_page_alias(virt, phys);
204 }
205 EXPORT_SYMBOL(local_flush_cache_page);
206 
207 #endif /* DCACHE_WAY_SIZE > PAGE_SIZE */
208 
209 void
210 update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep)
211 {
212 	unsigned long pfn = pte_pfn(*ptep);
213 	struct page *page;
214 
215 	if (!pfn_valid(pfn))
216 		return;
217 
218 	page = pfn_to_page(pfn);
219 
220 	/* Invalidate old entry in TLBs */
221 
222 	flush_tlb_page(vma, addr);
223 
224 #if (DCACHE_WAY_SIZE > PAGE_SIZE)
225 
226 	if (!PageReserved(page) && test_bit(PG_arch_1, &page->flags)) {
227 		unsigned long phys = page_to_phys(page);
228 		unsigned long tmp;
229 
230 		tmp = TLBTEMP_BASE_1 + (phys & DCACHE_ALIAS_MASK);
231 		__flush_invalidate_dcache_page_alias(tmp, phys);
232 		tmp = TLBTEMP_BASE_1 + (addr & DCACHE_ALIAS_MASK);
233 		__flush_invalidate_dcache_page_alias(tmp, phys);
234 		__invalidate_icache_page_alias(tmp, phys);
235 
236 		clear_bit(PG_arch_1, &page->flags);
237 	}
238 #else
239 	if (!PageReserved(page) && !test_bit(PG_arch_1, &page->flags)
240 	    && (vma->vm_flags & VM_EXEC) != 0) {
241 		unsigned long paddr = (unsigned long)kmap_atomic(page);
242 		__flush_dcache_page(paddr);
243 		__invalidate_icache_page(paddr);
244 		set_bit(PG_arch_1, &page->flags);
245 		kunmap_atomic((void *)paddr);
246 	}
247 #endif
248 }
249 
250 /*
251  * access_process_vm() has called get_user_pages(), which has done a
252  * flush_dcache_page() on the page.
253  */
254 
255 #if (DCACHE_WAY_SIZE > PAGE_SIZE)
256 
257 void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
258 		unsigned long vaddr, void *dst, const void *src,
259 		unsigned long len)
260 {
261 	unsigned long phys = page_to_phys(page);
262 	unsigned long alias = !(DCACHE_ALIAS_EQ(vaddr, phys));
263 
264 	/* Flush and invalidate user page if aliased. */
265 
266 	if (alias) {
267 		unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
268 		__flush_invalidate_dcache_page_alias(t, phys);
269 	}
270 
271 	/* Copy data */
272 
273 	memcpy(dst, src, len);
274 
275 	/*
276 	 * Flush and invalidate kernel page if aliased and synchronize
277 	 * data and instruction caches for executable pages.
278 	 */
279 
280 	if (alias) {
281 		unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
282 
283 		__flush_invalidate_dcache_range((unsigned long) dst, len);
284 		if ((vma->vm_flags & VM_EXEC) != 0)
285 			__invalidate_icache_page_alias(t, phys);
286 
287 	} else if ((vma->vm_flags & VM_EXEC) != 0) {
288 		__flush_dcache_range((unsigned long)dst,len);
289 		__invalidate_icache_range((unsigned long) dst, len);
290 	}
291 }
292 
293 extern void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
294 		unsigned long vaddr, void *dst, const void *src,
295 		unsigned long len)
296 {
297 	unsigned long phys = page_to_phys(page);
298 	unsigned long alias = !(DCACHE_ALIAS_EQ(vaddr, phys));
299 
300 	/*
301 	 * Flush user page if aliased.
302 	 * (Note: a simply flush would be sufficient)
303 	 */
304 
305 	if (alias) {
306 		unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
307 		__flush_invalidate_dcache_page_alias(t, phys);
308 	}
309 
310 	memcpy(dst, src, len);
311 }
312 
313 #endif
314