xref: /openbmc/linux/arch/xtensa/mm/cache.c (revision 206a81c1)
1 /*
2  * arch/xtensa/mm/cache.c
3  *
4  * This file is subject to the terms and conditions of the GNU General Public
5  * License.  See the file "COPYING" in the main directory of this archive
6  * for more details.
7  *
8  * Copyright (C) 2001-2006 Tensilica Inc.
9  *
10  * Chris Zankel	<chris@zankel.net>
11  * Joe Taylor
12  * Marc Gauthier
13  *
14  */
15 
16 #include <linux/init.h>
17 #include <linux/signal.h>
18 #include <linux/sched.h>
19 #include <linux/kernel.h>
20 #include <linux/errno.h>
21 #include <linux/string.h>
22 #include <linux/types.h>
23 #include <linux/ptrace.h>
24 #include <linux/bootmem.h>
25 #include <linux/swap.h>
26 #include <linux/pagemap.h>
27 
28 #include <asm/bootparam.h>
29 #include <asm/mmu_context.h>
30 #include <asm/tlb.h>
31 #include <asm/tlbflush.h>
32 #include <asm/page.h>
33 #include <asm/pgalloc.h>
34 #include <asm/pgtable.h>
35 
36 //#define printd(x...) printk(x)
37 #define printd(x...) do { } while(0)
38 
39 /*
40  * Note:
41  * The kernel provides one architecture bit PG_arch_1 in the page flags that
42  * can be used for cache coherency.
43  *
44  * I$-D$ coherency.
45  *
46  * The Xtensa architecture doesn't keep the instruction cache coherent with
47  * the data cache. We use the architecture bit to indicate if the caches
48  * are coherent. The kernel clears this bit whenever a page is added to the
49  * page cache. At that time, the caches might not be in sync. We, therefore,
50  * define this flag as 'clean' if set.
51  *
52  * D-cache aliasing.
53  *
54  * With cache aliasing, we have to always flush the cache when pages are
55  * unmapped (see tlb_start_vma(). So, we use this flag to indicate a dirty
56  * page.
57  *
58  *
59  *
60  */
61 
62 #if (DCACHE_WAY_SIZE > PAGE_SIZE) && defined(CONFIG_HIGHMEM)
63 #error "HIGHMEM is not supported on cores with aliasing cache."
64 #endif
65 
66 #if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
67 
68 /*
69  * Any time the kernel writes to a user page cache page, or it is about to
70  * read from a page cache page this routine is called.
71  *
72  */
73 
74 void flush_dcache_page(struct page *page)
75 {
76 	struct address_space *mapping = page_mapping(page);
77 
78 	/*
79 	 * If we have a mapping but the page is not mapped to user-space
80 	 * yet, we simply mark this page dirty and defer flushing the
81 	 * caches until update_mmu().
82 	 */
83 
84 	if (mapping && !mapping_mapped(mapping)) {
85 		if (!test_bit(PG_arch_1, &page->flags))
86 			set_bit(PG_arch_1, &page->flags);
87 		return;
88 
89 	} else {
90 
91 		unsigned long phys = page_to_phys(page);
92 		unsigned long temp = page->index << PAGE_SHIFT;
93 		unsigned long alias = !(DCACHE_ALIAS_EQ(temp, phys));
94 		unsigned long virt;
95 
96 		/*
97 		 * Flush the page in kernel space and user space.
98 		 * Note that we can omit that step if aliasing is not
99 		 * an issue, but we do have to synchronize I$ and D$
100 		 * if we have a mapping.
101 		 */
102 
103 		if (!alias && !mapping)
104 			return;
105 
106 		__flush_invalidate_dcache_page((long)page_address(page));
107 
108 		virt = TLBTEMP_BASE_1 + (temp & DCACHE_ALIAS_MASK);
109 
110 		if (alias)
111 			__flush_invalidate_dcache_page_alias(virt, phys);
112 
113 		if (mapping)
114 			__invalidate_icache_page_alias(virt, phys);
115 	}
116 
117 	/* There shouldn't be an entry in the cache for this page anymore. */
118 }
119 
120 
121 /*
122  * For now, flush the whole cache. FIXME??
123  */
124 
125 void local_flush_cache_range(struct vm_area_struct *vma,
126 		       unsigned long start, unsigned long end)
127 {
128 	__flush_invalidate_dcache_all();
129 	__invalidate_icache_all();
130 }
131 
132 /*
133  * Remove any entry in the cache for this page.
134  *
135  * Note that this function is only called for user pages, so use the
136  * alias versions of the cache flush functions.
137  */
138 
139 void local_flush_cache_page(struct vm_area_struct *vma, unsigned long address,
140 		      unsigned long pfn)
141 {
142 	/* Note that we have to use the 'alias' address to avoid multi-hit */
143 
144 	unsigned long phys = page_to_phys(pfn_to_page(pfn));
145 	unsigned long virt = TLBTEMP_BASE_1 + (address & DCACHE_ALIAS_MASK);
146 
147 	__flush_invalidate_dcache_page_alias(virt, phys);
148 	__invalidate_icache_page_alias(virt, phys);
149 }
150 
151 #endif
152 
153 void
154 update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep)
155 {
156 	unsigned long pfn = pte_pfn(*ptep);
157 	struct page *page;
158 
159 	if (!pfn_valid(pfn))
160 		return;
161 
162 	page = pfn_to_page(pfn);
163 
164 	/* Invalidate old entry in TLBs */
165 
166 	flush_tlb_page(vma, addr);
167 
168 #if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
169 
170 	if (!PageReserved(page) && test_bit(PG_arch_1, &page->flags)) {
171 
172 		unsigned long paddr = (unsigned long) page_address(page);
173 		unsigned long phys = page_to_phys(page);
174 		unsigned long tmp = TLBTEMP_BASE_1 + (addr & DCACHE_ALIAS_MASK);
175 
176 		__flush_invalidate_dcache_page(paddr);
177 
178 		__flush_invalidate_dcache_page_alias(tmp, phys);
179 		__invalidate_icache_page_alias(tmp, phys);
180 
181 		clear_bit(PG_arch_1, &page->flags);
182 	}
183 #else
184 	if (!PageReserved(page) && !test_bit(PG_arch_1, &page->flags)
185 	    && (vma->vm_flags & VM_EXEC) != 0) {
186 		unsigned long paddr = (unsigned long)kmap_atomic(page);
187 		__flush_dcache_page(paddr);
188 		__invalidate_icache_page(paddr);
189 		set_bit(PG_arch_1, &page->flags);
190 		kunmap_atomic((void *)paddr);
191 	}
192 #endif
193 }
194 
195 /*
196  * access_process_vm() has called get_user_pages(), which has done a
197  * flush_dcache_page() on the page.
198  */
199 
200 #if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
201 
202 void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
203 		unsigned long vaddr, void *dst, const void *src,
204 		unsigned long len)
205 {
206 	unsigned long phys = page_to_phys(page);
207 	unsigned long alias = !(DCACHE_ALIAS_EQ(vaddr, phys));
208 
209 	/* Flush and invalidate user page if aliased. */
210 
211 	if (alias) {
212 		unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
213 		__flush_invalidate_dcache_page_alias(t, phys);
214 	}
215 
216 	/* Copy data */
217 
218 	memcpy(dst, src, len);
219 
220 	/*
221 	 * Flush and invalidate kernel page if aliased and synchronize
222 	 * data and instruction caches for executable pages.
223 	 */
224 
225 	if (alias) {
226 		unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
227 
228 		__flush_invalidate_dcache_range((unsigned long) dst, len);
229 		if ((vma->vm_flags & VM_EXEC) != 0)
230 			__invalidate_icache_page_alias(t, phys);
231 
232 	} else if ((vma->vm_flags & VM_EXEC) != 0) {
233 		__flush_dcache_range((unsigned long)dst,len);
234 		__invalidate_icache_range((unsigned long) dst, len);
235 	}
236 }
237 
238 extern void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
239 		unsigned long vaddr, void *dst, const void *src,
240 		unsigned long len)
241 {
242 	unsigned long phys = page_to_phys(page);
243 	unsigned long alias = !(DCACHE_ALIAS_EQ(vaddr, phys));
244 
245 	/*
246 	 * Flush user page if aliased.
247 	 * (Note: a simply flush would be sufficient)
248 	 */
249 
250 	if (alias) {
251 		unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
252 		__flush_invalidate_dcache_page_alias(t, phys);
253 	}
254 
255 	memcpy(dst, src, len);
256 }
257 
258 #endif
259