xref: /openbmc/linux/arch/xtensa/mm/cache.c (revision 96de0e252cedffad61b3cb5e05662c591898e69a)
1 /*
2  * arch/xtensa/mm/cache.c
3  *
4  * This file is subject to the terms and conditions of the GNU General Public
5  * License.  See the file "COPYING" in the main directory of this archive
6  * for more details.
7  *
8  * Copyright (C) 2001-2006 Tensilica Inc.
9  *
10  * Chris Zankel	<chris@zankel.net>
11  * Joe Taylor
12  * Marc Gauthier
13  *
14  */
15 
16 #include <linux/init.h>
17 #include <linux/signal.h>
18 #include <linux/sched.h>
19 #include <linux/kernel.h>
20 #include <linux/errno.h>
21 #include <linux/string.h>
22 #include <linux/types.h>
23 #include <linux/ptrace.h>
24 #include <linux/bootmem.h>
25 #include <linux/swap.h>
26 #include <linux/pagemap.h>
27 
28 #include <asm/pgtable.h>
29 #include <asm/bootparam.h>
30 #include <asm/mmu_context.h>
31 #include <asm/tlb.h>
32 #include <asm/tlbflush.h>
33 #include <asm/page.h>
34 #include <asm/pgalloc.h>
35 #include <asm/pgtable.h>
36 
37 //#define printd(x...) printk(x)
38 #define printd(x...) do { } while(0)
39 
40 /*
41  * Note:
42  * The kernel provides one architecture bit PG_arch_1 in the page flags that
43  * can be used for cache coherency.
44  *
45  * I$-D$ coherency.
46  *
47  * The Xtensa architecture doesn't keep the instruction cache coherent with
48  * the data cache. We use the architecture bit to indicate if the caches
49  * are coherent. The kernel clears this bit whenever a page is added to the
50  * page cache. At that time, the caches might not be in sync. We, therefore,
51  * define this flag as 'clean' if set.
52  *
53  * D-cache aliasing.
54  *
55  * With cache aliasing, we have to always flush the cache when pages are
56  * unmapped (see tlb_start_vma(). So, we use this flag to indicate a dirty
57  * page.
58  *
59  *
60  *
61  */
62 
63 #if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
64 
65 /*
66  * Any time the kernel writes to a user page cache page, or it is about to
67  * read from a page cache page this routine is called.
68  *
69  */
70 
71 void flush_dcache_page(struct page *page)
72 {
73 	struct address_space *mapping = page_mapping(page);
74 
75 	/*
76 	 * If we have a mapping but the page is not mapped to user-space
77 	 * yet, we simply mark this page dirty and defer flushing the
78 	 * caches until update_mmu().
79 	 */
80 
81 	if (mapping && !mapping_mapped(mapping)) {
82 		if (!test_bit(PG_arch_1, &page->flags))
83 			set_bit(PG_arch_1, &page->flags);
84 		return;
85 
86 	} else {
87 
88 		unsigned long phys = page_to_phys(page);
89 		unsigned long temp = page->index << PAGE_SHIFT;
90 		unsigned long alias = !(DCACHE_ALIAS_EQ(temp, phys));
91 		unsigned long virt;
92 
93 		/*
94 		 * Flush the page in kernel space and user space.
95 		 * Note that we can omit that step if aliasing is not
96 		 * an issue, but we do have to synchronize I$ and D$
97 		 * if we have a mapping.
98 		 */
99 
100 		if (!alias && !mapping)
101 			return;
102 
103 		__flush_invalidate_dcache_page((long)page_address(page));
104 
105 		virt = TLBTEMP_BASE_1 + (temp & DCACHE_ALIAS_MASK);
106 
107 		if (alias)
108 			__flush_invalidate_dcache_page_alias(virt, phys);
109 
110 		if (mapping)
111 			__invalidate_icache_page_alias(virt, phys);
112 	}
113 
114 	/* There shouldn't be an entry in the cache for this page anymore. */
115 }
116 
117 
118 /*
119  * For now, flush the whole cache. FIXME??
120  */
121 
122 void flush_cache_range(struct vm_area_struct* vma,
123 		       unsigned long start, unsigned long end)
124 {
125 	__flush_invalidate_dcache_all();
126 	__invalidate_icache_all();
127 }
128 
129 /*
130  * Remove any entry in the cache for this page.
131  *
132  * Note that this function is only called for user pages, so use the
133  * alias versions of the cache flush functions.
134  */
135 
136 void flush_cache_page(struct vm_area_struct* vma, unsigned long address,
137     		      unsigned long pfn)
138 {
139 	/* Note that we have to use the 'alias' address to avoid multi-hit */
140 
141 	unsigned long phys = page_to_phys(pfn_to_page(pfn));
142 	unsigned long virt = TLBTEMP_BASE_1 + (address & DCACHE_ALIAS_MASK);
143 
144 	__flush_invalidate_dcache_page_alias(virt, phys);
145 	__invalidate_icache_page_alias(virt, phys);
146 }
147 
148 #endif
149 
150 void
151 update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t pte)
152 {
153 	unsigned long pfn = pte_pfn(pte);
154 	struct page *page;
155 
156 	if (!pfn_valid(pfn))
157 		return;
158 
159 	page = pfn_to_page(pfn);
160 
161 	/* Invalidate old entry in TLBs */
162 
163 	invalidate_itlb_mapping(addr);
164 	invalidate_dtlb_mapping(addr);
165 
166 #if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
167 
168 	if (!PageReserved(page) && test_bit(PG_arch_1, &page->flags)) {
169 
170 		unsigned long vaddr = TLBTEMP_BASE_1 + (addr & DCACHE_ALIAS_MASK);
171 		unsigned long paddr = (unsigned long) page_address(page);
172 		unsigned long phys = page_to_phys(page);
173 
174 		__flush_invalidate_dcache_page(paddr);
175 
176 		__flush_invalidate_dcache_page_alias(vaddr, phys);
177 		__invalidate_icache_page_alias(vaddr, phys);
178 
179 		clear_bit(PG_arch_1, &page->flags);
180 	}
181 #else
182 	if (!PageReserved(page) && !test_bit(PG_arch_1, &page->flags)
183 	    && (vma->vm_flags & VM_EXEC) != 0) {
184 		unsigned long vaddr = addr & PAGE_MASK;
185 		__flush_dcache_page(vaddr);
186 		__invalidate_icache_page(vaddr);
187 		set_bit(PG_arch_1, &page->flags);
188 	}
189 #endif
190 }
191 
192 /*
193  * access_process_vm() has called get_user_pages(), which has done a
194  * flush_dcache_page() on the page.
195  */
196 
197 #if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
198 
199 void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
200 		unsigned long vaddr, void *dst, const void *src,
201 		unsigned long len)
202 {
203 	unsigned long phys = page_to_phys(page);
204 	unsigned long alias = !(DCACHE_ALIAS_EQ(vaddr, phys));
205 
206 	/* Flush and invalidate user page if aliased. */
207 
208 	if (alias) {
209 		unsigned long temp = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
210 		__flush_invalidate_dcache_page_alias(temp, phys);
211 	}
212 
213 	/* Copy data */
214 
215 	memcpy(dst, src, len);
216 
217 	/*
218 	 * Flush and invalidate kernel page if aliased and synchronize
219 	 * data and instruction caches for executable pages.
220 	 */
221 
222 	if (alias) {
223 		unsigned long temp = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
224 
225 		__flush_invalidate_dcache_range((unsigned long) dst, len);
226 		if ((vma->vm_flags & VM_EXEC) != 0) {
227 			__invalidate_icache_page_alias(temp, phys);
228 		}
229 
230 	} else if ((vma->vm_flags & VM_EXEC) != 0) {
231 		__flush_dcache_range((unsigned long)dst,len);
232 		__invalidate_icache_range((unsigned long) dst, len);
233 	}
234 }
235 
236 extern void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
237 		unsigned long vaddr, void *dst, const void *src,
238 		unsigned long len)
239 {
240 	unsigned long phys = page_to_phys(page);
241 	unsigned long alias = !(DCACHE_ALIAS_EQ(vaddr, phys));
242 
243 	/*
244 	 * Flush user page if aliased.
245 	 * (Note: a simply flush would be sufficient)
246 	 */
247 
248 	if (alias) {
249 		unsigned long temp = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
250 		__flush_invalidate_dcache_page_alias(temp, phys);
251 	}
252 
253 	memcpy(dst, src, len);
254 }
255 
256 #endif
257