1 /*
2  * include/asm-xtensa/cacheflush.h
3  *
4  * This file is subject to the terms and conditions of the GNU General Public
5  * License.  See the file "COPYING" in the main directory of this archive
6  * for more details.
7  *
8  * (C) 2001 - 2007 Tensilica Inc.
9  */
10 
11 #ifndef _XTENSA_CACHEFLUSH_H
12 #define _XTENSA_CACHEFLUSH_H
13 
14 #ifdef __KERNEL__
15 
16 #include <linux/mm.h>
17 #include <asm/processor.h>
18 #include <asm/page.h>
19 
20 /*
21  * Lo-level routines for cache flushing.
22  *
23  * invalidate data or instruction cache:
24  *
25  * __invalidate_icache_all()
26  * __invalidate_icache_page(adr)
27  * __invalidate_dcache_page(adr)
28  * __invalidate_icache_range(from,size)
29  * __invalidate_dcache_range(from,size)
30  *
31  * flush data cache:
32  *
33  * __flush_dcache_page(adr)
34  *
35  * flush and invalidate data cache:
36  *
37  * __flush_invalidate_dcache_all()
38  * __flush_invalidate_dcache_page(adr)
39  * __flush_invalidate_dcache_range(from,size)
40  *
41  * specials for cache aliasing:
42  *
43  * __flush_invalidate_dcache_page_alias(vaddr,paddr)
44  * __invalidate_icache_page_alias(vaddr,paddr)
45  */
46 
47 extern void __invalidate_dcache_all(void);
48 extern void __invalidate_icache_all(void);
49 extern void __invalidate_dcache_page(unsigned long);
50 extern void __invalidate_icache_page(unsigned long);
51 extern void __invalidate_icache_range(unsigned long, unsigned long);
52 extern void __invalidate_dcache_range(unsigned long, unsigned long);
53 
54 
55 #if XCHAL_DCACHE_IS_WRITEBACK
56 extern void __flush_invalidate_dcache_all(void);
57 extern void __flush_dcache_page(unsigned long);
58 extern void __flush_dcache_range(unsigned long, unsigned long);
59 extern void __flush_invalidate_dcache_page(unsigned long);
60 extern void __flush_invalidate_dcache_range(unsigned long, unsigned long);
61 #else
62 # define __flush_dcache_range(p,s)		do { } while(0)
63 # define __flush_dcache_page(p)			do { } while(0)
64 # define __flush_invalidate_dcache_page(p) 	__invalidate_dcache_page(p)
65 # define __flush_invalidate_dcache_range(p,s)	__invalidate_dcache_range(p,s)
66 #endif
67 
68 #if defined(CONFIG_MMU) && (DCACHE_WAY_SIZE > PAGE_SIZE)
69 extern void __flush_invalidate_dcache_page_alias(unsigned long, unsigned long);
70 #else
71 static inline void __flush_invalidate_dcache_page_alias(unsigned long virt,
72 							unsigned long phys) { }
73 #endif
74 #if defined(CONFIG_MMU) && (ICACHE_WAY_SIZE > PAGE_SIZE)
75 extern void __invalidate_icache_page_alias(unsigned long, unsigned long);
76 #else
77 static inline void __invalidate_icache_page_alias(unsigned long virt,
78 						unsigned long phys) { }
79 #endif
80 
81 /*
82  * We have physically tagged caches - nothing to do here -
83  * unless we have cache aliasing.
84  *
85  * Pages can get remapped. Because this might change the 'color' of that page,
86  * we have to flush the cache before the PTE is changed.
87  * (see also Documentation/cachetlb.txt)
88  */
89 
90 #if (DCACHE_WAY_SIZE > PAGE_SIZE)
91 
92 #define flush_cache_all()						\
93 	do {								\
94 		__flush_invalidate_dcache_all();			\
95 		__invalidate_icache_all();				\
96 	} while (0)
97 
98 #define flush_cache_mm(mm)		flush_cache_all()
99 #define flush_cache_dup_mm(mm)		flush_cache_mm(mm)
100 
101 #define flush_cache_vmap(start,end)	flush_cache_all()
102 #define flush_cache_vunmap(start,end)	flush_cache_all()
103 
104 extern void flush_dcache_page(struct page*);
105 extern void flush_cache_range(struct vm_area_struct*, ulong, ulong);
106 extern void flush_cache_page(struct vm_area_struct*, unsigned long, unsigned long);
107 
108 #else
109 
110 #define flush_cache_all()				do { } while (0)
111 #define flush_cache_mm(mm)				do { } while (0)
112 #define flush_cache_dup_mm(mm)				do { } while (0)
113 
114 #define flush_cache_vmap(start,end)			do { } while (0)
115 #define flush_cache_vunmap(start,end)			do { } while (0)
116 
117 #define flush_dcache_page(page)				do { } while (0)
118 
119 #define flush_cache_page(vma,addr,pfn)			do { } while (0)
120 #define flush_cache_range(vma,start,end)		do { } while (0)
121 
122 #endif
123 
124 /* Ensure consistency between data and instruction cache. */
125 #define flush_icache_range(start,end) 					\
126 	do {								\
127 		__flush_dcache_range(start, (end) - (start));		\
128 		__invalidate_icache_range(start,(end) - (start));	\
129 	} while (0)
130 
131 /* This is not required, see Documentation/cachetlb.txt */
132 #define	flush_icache_page(vma,page)			do { } while (0)
133 
134 #define flush_dcache_mmap_lock(mapping)			do { } while (0)
135 #define flush_dcache_mmap_unlock(mapping)		do { } while (0)
136 
137 #if (DCACHE_WAY_SIZE > PAGE_SIZE)
138 
139 extern void copy_to_user_page(struct vm_area_struct*, struct page*,
140 		unsigned long, void*, const void*, unsigned long);
141 extern void copy_from_user_page(struct vm_area_struct*, struct page*,
142 		unsigned long, void*, const void*, unsigned long);
143 
144 #else
145 
146 #define copy_to_user_page(vma, page, vaddr, dst, src, len)		\
147 	do {								\
148 		memcpy(dst, src, len);					\
149 		__flush_dcache_range((unsigned long) dst, len);		\
150 		__invalidate_icache_range((unsigned long) dst, len);	\
151 	} while (0)
152 
153 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
154 	memcpy(dst, src, len)
155 
156 #endif
157 
158 #define XTENSA_CACHEBLK_LOG2	29
159 #define XTENSA_CACHEBLK_SIZE	(1 << XTENSA_CACHEBLK_LOG2)
160 #define XTENSA_CACHEBLK_MASK	(7 << XTENSA_CACHEBLK_LOG2)
161 
162 #if XCHAL_HAVE_CACHEATTR
163 static inline u32 xtensa_get_cacheattr(void)
164 {
165 	u32 r;
166 	asm volatile("	rsr %0, CACHEATTR" : "=a"(r));
167 	return r;
168 }
169 
170 static inline u32 xtensa_get_dtlb1(u32 addr)
171 {
172 	u32 r = addr & XTENSA_CACHEBLK_MASK;
173 	return r | ((xtensa_get_cacheattr() >> (r >> (XTENSA_CACHEBLK_LOG2-2)))
174 			& 0xF);
175 }
176 #else
177 static inline u32 xtensa_get_dtlb1(u32 addr)
178 {
179 	u32 r;
180 	asm volatile("	rdtlb1 %0, %1" : "=a"(r) : "a"(addr));
181 	asm volatile("	dsync");
182 	return r;
183 }
184 
185 static inline u32 xtensa_get_cacheattr(void)
186 {
187 	u32 r = 0;
188 	u32 a = 0;
189 	do {
190 		a -= XTENSA_CACHEBLK_SIZE;
191 		r = (r << 4) | (xtensa_get_dtlb1(a) & 0xF);
192 	} while (a);
193 	return r;
194 }
195 #endif
196 
197 static inline int xtensa_need_flush_dma_source(u32 addr)
198 {
199 	return (xtensa_get_dtlb1(addr) & ((1 << XCHAL_CA_BITS) - 1)) >= 4;
200 }
201 
202 static inline int xtensa_need_invalidate_dma_destination(u32 addr)
203 {
204 	return (xtensa_get_dtlb1(addr) & ((1 << XCHAL_CA_BITS) - 1)) != 2;
205 }
206 
207 static inline void flush_dcache_unaligned(u32 addr, u32 size)
208 {
209 	u32 cnt;
210 	if (size) {
211 		cnt = (size + ((XCHAL_DCACHE_LINESIZE - 1) & addr)
212 			+ XCHAL_DCACHE_LINESIZE - 1) / XCHAL_DCACHE_LINESIZE;
213 		while (cnt--) {
214 			asm volatile("	dhwb %0, 0" : : "a"(addr));
215 			addr += XCHAL_DCACHE_LINESIZE;
216 		}
217 		asm volatile("	dsync");
218 	}
219 }
220 
221 static inline void invalidate_dcache_unaligned(u32 addr, u32 size)
222 {
223 	int cnt;
224 	if (size) {
225 		asm volatile("	dhwbi %0, 0 ;" : : "a"(addr));
226 		cnt = (size + ((XCHAL_DCACHE_LINESIZE - 1) & addr)
227 			- XCHAL_DCACHE_LINESIZE - 1) / XCHAL_DCACHE_LINESIZE;
228 		while (cnt-- > 0) {
229 			asm volatile("	dhi %0, %1" : : "a"(addr),
230 						"n"(XCHAL_DCACHE_LINESIZE));
231 			addr += XCHAL_DCACHE_LINESIZE;
232 		}
233 		asm volatile("	dhwbi %0, %1" : : "a"(addr),
234 						"n"(XCHAL_DCACHE_LINESIZE));
235 		asm volatile("	dsync");
236 	}
237 }
238 
239 static inline void flush_invalidate_dcache_unaligned(u32 addr, u32 size)
240 {
241 	u32 cnt;
242 	if (size) {
243 		cnt = (size + ((XCHAL_DCACHE_LINESIZE - 1) & addr)
244 			+ XCHAL_DCACHE_LINESIZE - 1) / XCHAL_DCACHE_LINESIZE;
245 		while (cnt--) {
246 			asm volatile("	dhwbi %0, 0" : : "a"(addr));
247 			addr += XCHAL_DCACHE_LINESIZE;
248 		}
249 		asm volatile("	dsync");
250 	}
251 }
252 
253 #endif /* __KERNEL__ */
254 #endif /* _XTENSA_CACHEFLUSH_H */
255