xref: /openbmc/linux/arch/sh/mm/cache.c (revision 9d56dd3b083a3bec56e9da35ce07baca81030b03)
1 /*
2  * arch/sh/mm/cache.c
3  *
4  * Copyright (C) 1999, 2000, 2002  Niibe Yutaka
5  * Copyright (C) 2002 - 2010  Paul Mundt
6  *
7  * Released under the terms of the GNU GPL v2.0.
8  */
9 #include <linux/mm.h>
10 #include <linux/init.h>
11 #include <linux/mutex.h>
12 #include <linux/fs.h>
13 #include <linux/smp.h>
14 #include <linux/highmem.h>
15 #include <linux/module.h>
16 #include <asm/mmu_context.h>
17 #include <asm/cacheflush.h>
18 
19 void (*local_flush_cache_all)(void *args) = cache_noop;
20 void (*local_flush_cache_mm)(void *args) = cache_noop;
21 void (*local_flush_cache_dup_mm)(void *args) = cache_noop;
22 void (*local_flush_cache_page)(void *args) = cache_noop;
23 void (*local_flush_cache_range)(void *args) = cache_noop;
24 void (*local_flush_dcache_page)(void *args) = cache_noop;
25 void (*local_flush_icache_range)(void *args) = cache_noop;
26 void (*local_flush_icache_page)(void *args) = cache_noop;
27 void (*local_flush_cache_sigtramp)(void *args) = cache_noop;
28 
29 void (*__flush_wback_region)(void *start, int size);
30 EXPORT_SYMBOL(__flush_wback_region);
31 void (*__flush_purge_region)(void *start, int size);
32 EXPORT_SYMBOL(__flush_purge_region);
33 void (*__flush_invalidate_region)(void *start, int size);
34 EXPORT_SYMBOL(__flush_invalidate_region);
35 
36 static inline void noop__flush_region(void *start, int size)
37 {
38 }
39 
40 static inline void cacheop_on_each_cpu(void (*func) (void *info), void *info,
41                                    int wait)
42 {
43 	preempt_disable();
44 
45 	/*
46 	 * It's possible that this gets called early on when IRQs are
47 	 * still disabled due to ioremapping by the boot CPU, so don't
48 	 * even attempt IPIs unless there are other CPUs online.
49 	 */
50 	if (num_online_cpus() > 1)
51 		smp_call_function(func, info, wait);
52 
53 	func(info);
54 
55 	preempt_enable();
56 }
57 
58 void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
59 		       unsigned long vaddr, void *dst, const void *src,
60 		       unsigned long len)
61 {
62 	if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
63 	    !test_bit(PG_dcache_dirty, &page->flags)) {
64 		void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
65 		memcpy(vto, src, len);
66 		kunmap_coherent(vto);
67 	} else {
68 		memcpy(dst, src, len);
69 		if (boot_cpu_data.dcache.n_aliases)
70 			set_bit(PG_dcache_dirty, &page->flags);
71 	}
72 
73 	if (vma->vm_flags & VM_EXEC)
74 		flush_cache_page(vma, vaddr, page_to_pfn(page));
75 }
76 
77 void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
78 			 unsigned long vaddr, void *dst, const void *src,
79 			 unsigned long len)
80 {
81 	if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
82 	    !test_bit(PG_dcache_dirty, &page->flags)) {
83 		void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
84 		memcpy(dst, vfrom, len);
85 		kunmap_coherent(vfrom);
86 	} else {
87 		memcpy(dst, src, len);
88 		if (boot_cpu_data.dcache.n_aliases)
89 			set_bit(PG_dcache_dirty, &page->flags);
90 	}
91 }
92 
93 void copy_user_highpage(struct page *to, struct page *from,
94 			unsigned long vaddr, struct vm_area_struct *vma)
95 {
96 	void *vfrom, *vto;
97 
98 	vto = kmap_atomic(to, KM_USER1);
99 
100 	if (boot_cpu_data.dcache.n_aliases && page_mapped(from) &&
101 	    !test_bit(PG_dcache_dirty, &from->flags)) {
102 		vfrom = kmap_coherent(from, vaddr);
103 		copy_page(vto, vfrom);
104 		kunmap_coherent(vfrom);
105 	} else {
106 		vfrom = kmap_atomic(from, KM_USER0);
107 		copy_page(vto, vfrom);
108 		kunmap_atomic(vfrom, KM_USER0);
109 	}
110 
111 	if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
112 		__flush_purge_region(vto, PAGE_SIZE);
113 
114 	kunmap_atomic(vto, KM_USER1);
115 	/* Make sure this page is cleared on other CPU's too before using it */
116 	smp_wmb();
117 }
118 EXPORT_SYMBOL(copy_user_highpage);
119 
120 void clear_user_highpage(struct page *page, unsigned long vaddr)
121 {
122 	void *kaddr = kmap_atomic(page, KM_USER0);
123 
124 	clear_page(kaddr);
125 
126 	if (pages_do_alias((unsigned long)kaddr, vaddr & PAGE_MASK))
127 		__flush_purge_region(kaddr, PAGE_SIZE);
128 
129 	kunmap_atomic(kaddr, KM_USER0);
130 }
131 EXPORT_SYMBOL(clear_user_highpage);
132 
133 void __update_cache(struct vm_area_struct *vma,
134 		    unsigned long address, pte_t pte)
135 {
136 	struct page *page;
137 	unsigned long pfn = pte_pfn(pte);
138 
139 	if (!boot_cpu_data.dcache.n_aliases)
140 		return;
141 
142 	page = pfn_to_page(pfn);
143 	if (pfn_valid(pfn)) {
144 		int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags);
145 		if (dirty)
146 			__flush_purge_region(page_address(page), PAGE_SIZE);
147 	}
148 }
149 
150 void __flush_anon_page(struct page *page, unsigned long vmaddr)
151 {
152 	unsigned long addr = (unsigned long) page_address(page);
153 
154 	if (pages_do_alias(addr, vmaddr)) {
155 		if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
156 		    !test_bit(PG_dcache_dirty, &page->flags)) {
157 			void *kaddr;
158 
159 			kaddr = kmap_coherent(page, vmaddr);
160 			/* XXX.. For now kunmap_coherent() does a purge */
161 			/* __flush_purge_region((void *)kaddr, PAGE_SIZE); */
162 			kunmap_coherent(kaddr);
163 		} else
164 			__flush_purge_region((void *)addr, PAGE_SIZE);
165 	}
166 }
167 
168 void flush_cache_all(void)
169 {
170 	cacheop_on_each_cpu(local_flush_cache_all, NULL, 1);
171 }
172 EXPORT_SYMBOL(flush_cache_all);
173 
174 void flush_cache_mm(struct mm_struct *mm)
175 {
176 	if (boot_cpu_data.dcache.n_aliases == 0)
177 		return;
178 
179 	cacheop_on_each_cpu(local_flush_cache_mm, mm, 1);
180 }
181 
182 void flush_cache_dup_mm(struct mm_struct *mm)
183 {
184 	if (boot_cpu_data.dcache.n_aliases == 0)
185 		return;
186 
187 	cacheop_on_each_cpu(local_flush_cache_dup_mm, mm, 1);
188 }
189 
190 void flush_cache_page(struct vm_area_struct *vma, unsigned long addr,
191 		      unsigned long pfn)
192 {
193 	struct flusher_data data;
194 
195 	data.vma = vma;
196 	data.addr1 = addr;
197 	data.addr2 = pfn;
198 
199 	cacheop_on_each_cpu(local_flush_cache_page, (void *)&data, 1);
200 }
201 
202 void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
203 		       unsigned long end)
204 {
205 	struct flusher_data data;
206 
207 	data.vma = vma;
208 	data.addr1 = start;
209 	data.addr2 = end;
210 
211 	cacheop_on_each_cpu(local_flush_cache_range, (void *)&data, 1);
212 }
213 EXPORT_SYMBOL(flush_cache_range);
214 
215 void flush_dcache_page(struct page *page)
216 {
217 	cacheop_on_each_cpu(local_flush_dcache_page, page, 1);
218 }
219 EXPORT_SYMBOL(flush_dcache_page);
220 
221 void flush_icache_range(unsigned long start, unsigned long end)
222 {
223 	struct flusher_data data;
224 
225 	data.vma = NULL;
226 	data.addr1 = start;
227 	data.addr2 = end;
228 
229 	cacheop_on_each_cpu(local_flush_icache_range, (void *)&data, 1);
230 }
231 
232 void flush_icache_page(struct vm_area_struct *vma, struct page *page)
233 {
234 	/* Nothing uses the VMA, so just pass the struct page along */
235 	cacheop_on_each_cpu(local_flush_icache_page, page, 1);
236 }
237 
238 void flush_cache_sigtramp(unsigned long address)
239 {
240 	cacheop_on_each_cpu(local_flush_cache_sigtramp, (void *)address, 1);
241 }
242 
243 static void compute_alias(struct cache_info *c)
244 {
245 	c->alias_mask = ((c->sets - 1) << c->entry_shift) & ~(PAGE_SIZE - 1);
246 	c->n_aliases = c->alias_mask ? (c->alias_mask >> PAGE_SHIFT) + 1 : 0;
247 }
248 
249 static void __init emit_cache_params(void)
250 {
251 	printk(KERN_NOTICE "I-cache : n_ways=%d n_sets=%d way_incr=%d\n",
252 		boot_cpu_data.icache.ways,
253 		boot_cpu_data.icache.sets,
254 		boot_cpu_data.icache.way_incr);
255 	printk(KERN_NOTICE "I-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
256 		boot_cpu_data.icache.entry_mask,
257 		boot_cpu_data.icache.alias_mask,
258 		boot_cpu_data.icache.n_aliases);
259 	printk(KERN_NOTICE "D-cache : n_ways=%d n_sets=%d way_incr=%d\n",
260 		boot_cpu_data.dcache.ways,
261 		boot_cpu_data.dcache.sets,
262 		boot_cpu_data.dcache.way_incr);
263 	printk(KERN_NOTICE "D-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
264 		boot_cpu_data.dcache.entry_mask,
265 		boot_cpu_data.dcache.alias_mask,
266 		boot_cpu_data.dcache.n_aliases);
267 
268 	/*
269 	 * Emit Secondary Cache parameters if the CPU has a probed L2.
270 	 */
271 	if (boot_cpu_data.flags & CPU_HAS_L2_CACHE) {
272 		printk(KERN_NOTICE "S-cache : n_ways=%d n_sets=%d way_incr=%d\n",
273 			boot_cpu_data.scache.ways,
274 			boot_cpu_data.scache.sets,
275 			boot_cpu_data.scache.way_incr);
276 		printk(KERN_NOTICE "S-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
277 			boot_cpu_data.scache.entry_mask,
278 			boot_cpu_data.scache.alias_mask,
279 			boot_cpu_data.scache.n_aliases);
280 	}
281 }
282 
283 void __init cpu_cache_init(void)
284 {
285 	unsigned int cache_disabled = 0;
286 
287 #ifdef CCR
288 	cache_disabled = !(__raw_readl(CCR) & CCR_CACHE_ENABLE);
289 #endif
290 
291 	compute_alias(&boot_cpu_data.icache);
292 	compute_alias(&boot_cpu_data.dcache);
293 	compute_alias(&boot_cpu_data.scache);
294 
295 	__flush_wback_region		= noop__flush_region;
296 	__flush_purge_region		= noop__flush_region;
297 	__flush_invalidate_region	= noop__flush_region;
298 
299 	/*
300 	 * No flushing is necessary in the disabled cache case so we can
301 	 * just keep the noop functions in local_flush_..() and __flush_..()
302 	 */
303 	if (unlikely(cache_disabled))
304 		goto skip;
305 
306 	if (boot_cpu_data.family == CPU_FAMILY_SH2) {
307 		extern void __weak sh2_cache_init(void);
308 
309 		sh2_cache_init();
310 	}
311 
312 	if (boot_cpu_data.family == CPU_FAMILY_SH2A) {
313 		extern void __weak sh2a_cache_init(void);
314 
315 		sh2a_cache_init();
316 	}
317 
318 	if (boot_cpu_data.family == CPU_FAMILY_SH3) {
319 		extern void __weak sh3_cache_init(void);
320 
321 		sh3_cache_init();
322 
323 		if ((boot_cpu_data.type == CPU_SH7705) &&
324 		    (boot_cpu_data.dcache.sets == 512)) {
325 			extern void __weak sh7705_cache_init(void);
326 
327 			sh7705_cache_init();
328 		}
329 	}
330 
331 	if ((boot_cpu_data.family == CPU_FAMILY_SH4) ||
332 	    (boot_cpu_data.family == CPU_FAMILY_SH4A) ||
333 	    (boot_cpu_data.family == CPU_FAMILY_SH4AL_DSP)) {
334 		extern void __weak sh4_cache_init(void);
335 
336 		sh4_cache_init();
337 	}
338 
339 	if (boot_cpu_data.family == CPU_FAMILY_SH5) {
340 		extern void __weak sh5_cache_init(void);
341 
342 		sh5_cache_init();
343 	}
344 
345 skip:
346 	emit_cache_params();
347 }
348