1 #ifndef _M68K_CACHEFLUSH_H
2 #define _M68K_CACHEFLUSH_H
3 
4 #include <linux/mm.h>
5 #ifdef CONFIG_COLDFIRE
6 #include <asm/mcfsim.h>
7 #endif
8 
9 /* cache code */
10 #define FLUSH_I_AND_D	(0x00000808)
11 #define FLUSH_I		(0x00000008)
12 
13 #ifndef ICACHE_MAX_ADDR
14 #define ICACHE_MAX_ADDR	0
15 #define ICACHE_SET_MASK	0
16 #define DCACHE_MAX_ADDR	0
17 #define DCACHE_SETMASK	0
18 #endif
19 
20 static inline void flush_cf_icache(unsigned long start, unsigned long end)
21 {
22 	unsigned long set;
23 
24 	for (set = start; set <= end; set += (0x10 - 3)) {
25 		__asm__ __volatile__ (
26 			"cpushl %%ic,(%0)\n\t"
27 			"addq%.l #1,%0\n\t"
28 			"cpushl %%ic,(%0)\n\t"
29 			"addq%.l #1,%0\n\t"
30 			"cpushl %%ic,(%0)\n\t"
31 			"addq%.l #1,%0\n\t"
32 			"cpushl %%ic,(%0)"
33 			: "=a" (set)
34 			: "a" (set));
35 	}
36 }
37 
38 static inline void flush_cf_dcache(unsigned long start, unsigned long end)
39 {
40 	unsigned long set;
41 
42 	for (set = start; set <= end; set += (0x10 - 3)) {
43 		__asm__ __volatile__ (
44 			"cpushl %%dc,(%0)\n\t"
45 			"addq%.l #1,%0\n\t"
46 			"cpushl %%dc,(%0)\n\t"
47 			"addq%.l #1,%0\n\t"
48 			"cpushl %%dc,(%0)\n\t"
49 			"addq%.l #1,%0\n\t"
50 			"cpushl %%dc,(%0)"
51 			: "=a" (set)
52 			: "a" (set));
53 	}
54 }
55 
56 static inline void flush_cf_bcache(unsigned long start, unsigned long end)
57 {
58 	unsigned long set;
59 
60 	for (set = start; set <= end; set += (0x10 - 3)) {
61 		__asm__ __volatile__ (
62 			"cpushl %%bc,(%0)\n\t"
63 			"addq%.l #1,%0\n\t"
64 			"cpushl %%bc,(%0)\n\t"
65 			"addq%.l #1,%0\n\t"
66 			"cpushl %%bc,(%0)\n\t"
67 			"addq%.l #1,%0\n\t"
68 			"cpushl %%bc,(%0)"
69 			: "=a" (set)
70 			: "a" (set));
71 	}
72 }
73 
74 /*
75  * Cache handling functions
76  */
77 
78 static inline void flush_icache(void)
79 {
80 	if (CPU_IS_COLDFIRE) {
81 		flush_cf_icache(0, ICACHE_MAX_ADDR);
82 	} else if (CPU_IS_040_OR_060) {
83 		asm volatile (	"nop\n"
84 			"	.chip	68040\n"
85 			"	cpusha	%bc\n"
86 			"	.chip	68k");
87 	} else {
88 		unsigned long tmp;
89 		asm volatile (	"movec	%%cacr,%0\n"
90 			"	or.w	%1,%0\n"
91 			"	movec	%0,%%cacr"
92 			: "=&d" (tmp)
93 			: "id" (FLUSH_I));
94 	}
95 }
96 
97 /*
98  * invalidate the cache for the specified memory range.
99  * It starts at the physical address specified for
100  * the given number of bytes.
101  */
102 extern void cache_clear(unsigned long paddr, int len);
103 /*
104  * push any dirty cache in the specified memory range.
105  * It starts at the physical address specified for
106  * the given number of bytes.
107  */
108 extern void cache_push(unsigned long paddr, int len);
109 
110 /*
111  * push and invalidate pages in the specified user virtual
112  * memory range.
113  */
114 extern void cache_push_v(unsigned long vaddr, int len);
115 
116 /* This is needed whenever the virtual mapping of the current
117    process changes.  */
118 #define __flush_cache_all()					\
119 ({								\
120 	if (CPU_IS_COLDFIRE) {					\
121 		flush_cf_dcache(0, DCACHE_MAX_ADDR);		\
122 	} else if (CPU_IS_040_OR_060) {				\
123 		__asm__ __volatile__("nop\n\t"			\
124 				     ".chip 68040\n\t"		\
125 				     "cpusha %dc\n\t"		\
126 				     ".chip 68k");		\
127 	} else {						\
128 		unsigned long _tmp;				\
129 		__asm__ __volatile__("movec %%cacr,%0\n\t"	\
130 				     "orw %1,%0\n\t"		\
131 				     "movec %0,%%cacr"		\
132 				     : "=&d" (_tmp)		\
133 				     : "di" (FLUSH_I_AND_D));	\
134 	}							\
135 })
136 
137 #define __flush_cache_030()					\
138 ({								\
139 	if (CPU_IS_020_OR_030) {				\
140 		unsigned long _tmp;				\
141 		__asm__ __volatile__("movec %%cacr,%0\n\t"	\
142 				     "orw %1,%0\n\t"		\
143 				     "movec %0,%%cacr"		\
144 				     : "=&d" (_tmp)		\
145 				     : "di" (FLUSH_I_AND_D));	\
146 	}							\
147 })
148 
149 #define flush_cache_all() __flush_cache_all()
150 
151 #define flush_cache_vmap(start, end)		flush_cache_all()
152 #define flush_cache_vunmap(start, end)		flush_cache_all()
153 
154 static inline void flush_cache_mm(struct mm_struct *mm)
155 {
156 	if (mm == current->mm)
157 		__flush_cache_030();
158 }
159 
160 #define flush_cache_dup_mm(mm)			flush_cache_mm(mm)
161 
162 /* flush_cache_range/flush_cache_page must be macros to avoid
163    a dependency on linux/mm.h, which includes this file... */
164 static inline void flush_cache_range(struct vm_area_struct *vma,
165 				     unsigned long start,
166 				     unsigned long end)
167 {
168 	if (vma->vm_mm == current->mm)
169 	        __flush_cache_030();
170 }
171 
172 static inline void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
173 {
174 	if (vma->vm_mm == current->mm)
175 	        __flush_cache_030();
176 }
177 
178 
179 /* Push the page at kernel virtual address and clear the icache */
180 /* RZ: use cpush %bc instead of cpush %dc, cinv %ic */
181 static inline void __flush_page_to_ram(void *vaddr)
182 {
183 	if (CPU_IS_COLDFIRE) {
184 		unsigned long addr, start, end;
185 		addr = ((unsigned long) vaddr) & ~(PAGE_SIZE - 1);
186 		start = addr & ICACHE_SET_MASK;
187 		end = (addr + PAGE_SIZE - 1) & ICACHE_SET_MASK;
188 		if (start > end) {
189 			flush_cf_bcache(0, end);
190 			end = ICACHE_MAX_ADDR;
191 		}
192 		flush_cf_bcache(start, end);
193 	} else if (CPU_IS_040_OR_060) {
194 		__asm__ __volatile__("nop\n\t"
195 				     ".chip 68040\n\t"
196 				     "cpushp %%bc,(%0)\n\t"
197 				     ".chip 68k"
198 				     : : "a" (__pa(vaddr)));
199 	} else {
200 		unsigned long _tmp;
201 		__asm__ __volatile__("movec %%cacr,%0\n\t"
202 				     "orw %1,%0\n\t"
203 				     "movec %0,%%cacr"
204 				     : "=&d" (_tmp)
205 				     : "di" (FLUSH_I));
206 	}
207 }
208 
209 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
210 #define flush_dcache_page(page)		__flush_page_to_ram(page_address(page))
211 #define flush_dcache_mmap_lock(mapping)		do { } while (0)
212 #define flush_dcache_mmap_unlock(mapping)	do { } while (0)
213 #define flush_icache_page(vma, page)	__flush_page_to_ram(page_address(page))
214 
215 extern void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
216 				    unsigned long addr, int len);
217 extern void flush_icache_range(unsigned long address, unsigned long endaddr);
218 
219 static inline void copy_to_user_page(struct vm_area_struct *vma,
220 				     struct page *page, unsigned long vaddr,
221 				     void *dst, void *src, int len)
222 {
223 	flush_cache_page(vma, vaddr, page_to_pfn(page));
224 	memcpy(dst, src, len);
225 	flush_icache_user_range(vma, page, vaddr, len);
226 }
227 static inline void copy_from_user_page(struct vm_area_struct *vma,
228 				       struct page *page, unsigned long vaddr,
229 				       void *dst, void *src, int len)
230 {
231 	flush_cache_page(vma, vaddr, page_to_pfn(page));
232 	memcpy(dst, src, len);
233 }
234 
235 #endif /* _M68K_CACHEFLUSH_H */
236