xref: /openbmc/linux/arch/sh/mm/cache-sh4.c (revision de3a9980)
1 /*
2  * arch/sh/mm/cache-sh4.c
3  *
4  * Copyright (C) 1999, 2000, 2002  Niibe Yutaka
5  * Copyright (C) 2001 - 2009  Paul Mundt
6  * Copyright (C) 2003  Richard Curnow
7  * Copyright (c) 2007 STMicroelectronics (R&D) Ltd.
8  *
9  * This file is subject to the terms and conditions of the GNU General Public
10  * License.  See the file "COPYING" in the main directory of this archive
11  * for more details.
12  */
13 #include <linux/init.h>
14 #include <linux/mm.h>
15 #include <linux/io.h>
16 #include <linux/mutex.h>
17 #include <linux/fs.h>
18 #include <linux/highmem.h>
19 #include <asm/mmu_context.h>
20 #include <asm/cache_insns.h>
21 #include <asm/cacheflush.h>
22 
23 /*
24  * The maximum number of pages we support up to when doing ranged dcache
25  * flushing. Anything exceeding this will simply flush the dcache in its
26  * entirety.
27  */
28 #define MAX_ICACHE_PAGES	32
29 
30 static void __flush_cache_one(unsigned long addr, unsigned long phys,
31 			       unsigned long exec_offset);
32 
33 /*
34  * Write back the range of D-cache, and purge the I-cache.
35  *
36  * Called from kernel/module.c:sys_init_module and routine for a.out format,
37  * signal handler code and kprobes code
38  */
39 static void sh4_flush_icache_range(void *args)
40 {
41 	struct flusher_data *data = args;
42 	unsigned long start, end;
43 	unsigned long flags, v;
44 	int i;
45 
46 	start = data->addr1;
47 	end = data->addr2;
48 
49 	/* If there are too many pages then just blow away the caches */
50 	if (((end - start) >> PAGE_SHIFT) >= MAX_ICACHE_PAGES) {
51 		local_flush_cache_all(NULL);
52 		return;
53 	}
54 
55 	/*
56 	 * Selectively flush d-cache then invalidate the i-cache.
57 	 * This is inefficient, so only use this for small ranges.
58 	 */
59 	start &= ~(L1_CACHE_BYTES-1);
60 	end += L1_CACHE_BYTES-1;
61 	end &= ~(L1_CACHE_BYTES-1);
62 
63 	local_irq_save(flags);
64 	jump_to_uncached();
65 
66 	for (v = start; v < end; v += L1_CACHE_BYTES) {
67 		unsigned long icacheaddr;
68 		int j, n;
69 
70 		__ocbwb(v);
71 
72 		icacheaddr = CACHE_IC_ADDRESS_ARRAY | (v &
73 				cpu_data->icache.entry_mask);
74 
75 		/* Clear i-cache line valid-bit */
76 		n = boot_cpu_data.icache.n_aliases;
77 		for (i = 0; i < cpu_data->icache.ways; i++) {
78 			for (j = 0; j < n; j++)
79 				__raw_writel(0, icacheaddr + (j * PAGE_SIZE));
80 			icacheaddr += cpu_data->icache.way_incr;
81 		}
82 	}
83 
84 	back_to_cached();
85 	local_irq_restore(flags);
86 }
87 
88 static inline void flush_cache_one(unsigned long start, unsigned long phys)
89 {
90 	unsigned long flags, exec_offset = 0;
91 
92 	/*
93 	 * All types of SH-4 require PC to be uncached to operate on the I-cache.
94 	 * Some types of SH-4 require PC to be uncached to operate on the D-cache.
95 	 */
96 	if ((boot_cpu_data.flags & CPU_HAS_P2_FLUSH_BUG) ||
97 	    (start < CACHE_OC_ADDRESS_ARRAY))
98 		exec_offset = cached_to_uncached;
99 
100 	local_irq_save(flags);
101 	__flush_cache_one(start, phys, exec_offset);
102 	local_irq_restore(flags);
103 }
104 
105 /*
106  * Write back & invalidate the D-cache of the page.
107  * (To avoid "alias" issues)
108  */
109 static void sh4_flush_dcache_page(void *arg)
110 {
111 	struct page *page = arg;
112 	unsigned long addr = (unsigned long)page_address(page);
113 #ifndef CONFIG_SMP
114 	struct address_space *mapping = page_mapping_file(page);
115 
116 	if (mapping && !mapping_mapped(mapping))
117 		clear_bit(PG_dcache_clean, &page->flags);
118 	else
119 #endif
120 		flush_cache_one(CACHE_OC_ADDRESS_ARRAY |
121 				(addr & shm_align_mask), page_to_phys(page));
122 
123 	wmb();
124 }
125 
126 /* TODO: Selective icache invalidation through IC address array.. */
127 static void flush_icache_all(void)
128 {
129 	unsigned long flags, ccr;
130 
131 	local_irq_save(flags);
132 	jump_to_uncached();
133 
134 	/* Flush I-cache */
135 	ccr = __raw_readl(SH_CCR);
136 	ccr |= CCR_CACHE_ICI;
137 	__raw_writel(ccr, SH_CCR);
138 
139 	/*
140 	 * back_to_cached() will take care of the barrier for us, don't add
141 	 * another one!
142 	 */
143 
144 	back_to_cached();
145 	local_irq_restore(flags);
146 }
147 
148 static void flush_dcache_all(void)
149 {
150 	unsigned long addr, end_addr, entry_offset;
151 
152 	end_addr = CACHE_OC_ADDRESS_ARRAY +
153 		(current_cpu_data.dcache.sets <<
154 		 current_cpu_data.dcache.entry_shift) *
155 			current_cpu_data.dcache.ways;
156 
157 	entry_offset = 1 << current_cpu_data.dcache.entry_shift;
158 
159 	for (addr = CACHE_OC_ADDRESS_ARRAY; addr < end_addr; ) {
160 		__raw_writel(0, addr); addr += entry_offset;
161 		__raw_writel(0, addr); addr += entry_offset;
162 		__raw_writel(0, addr); addr += entry_offset;
163 		__raw_writel(0, addr); addr += entry_offset;
164 		__raw_writel(0, addr); addr += entry_offset;
165 		__raw_writel(0, addr); addr += entry_offset;
166 		__raw_writel(0, addr); addr += entry_offset;
167 		__raw_writel(0, addr); addr += entry_offset;
168 	}
169 }
170 
171 static void sh4_flush_cache_all(void *unused)
172 {
173 	flush_dcache_all();
174 	flush_icache_all();
175 }
176 
177 /*
178  * Note : (RPC) since the caches are physically tagged, the only point
179  * of flush_cache_mm for SH-4 is to get rid of aliases from the
180  * D-cache.  The assumption elsewhere, e.g. flush_cache_range, is that
181  * lines can stay resident so long as the virtual address they were
182  * accessed with (hence cache set) is in accord with the physical
183  * address (i.e. tag).  It's no different here.
184  *
185  * Caller takes mm->mmap_lock.
186  */
187 static void sh4_flush_cache_mm(void *arg)
188 {
189 	struct mm_struct *mm = arg;
190 
191 	if (cpu_context(smp_processor_id(), mm) == NO_CONTEXT)
192 		return;
193 
194 	flush_dcache_all();
195 }
196 
197 /*
198  * Write back and invalidate I/D-caches for the page.
199  *
200  * ADDR: Virtual Address (U0 address)
201  * PFN: Physical page number
202  */
203 static void sh4_flush_cache_page(void *args)
204 {
205 	struct flusher_data *data = args;
206 	struct vm_area_struct *vma;
207 	struct page *page;
208 	unsigned long address, pfn, phys;
209 	int map_coherent = 0;
210 	pmd_t *pmd;
211 	pte_t *pte;
212 	void *vaddr;
213 
214 	vma = data->vma;
215 	address = data->addr1 & PAGE_MASK;
216 	pfn = data->addr2;
217 	phys = pfn << PAGE_SHIFT;
218 	page = pfn_to_page(pfn);
219 
220 	if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT)
221 		return;
222 
223 	pmd = pmd_off(vma->vm_mm, address);
224 	pte = pte_offset_kernel(pmd, address);
225 
226 	/* If the page isn't present, there is nothing to do here. */
227 	if (!(pte_val(*pte) & _PAGE_PRESENT))
228 		return;
229 
230 	if ((vma->vm_mm == current->active_mm))
231 		vaddr = NULL;
232 	else {
233 		/*
234 		 * Use kmap_coherent or kmap_atomic to do flushes for
235 		 * another ASID than the current one.
236 		 */
237 		map_coherent = (current_cpu_data.dcache.n_aliases &&
238 			test_bit(PG_dcache_clean, &page->flags) &&
239 			page_mapcount(page));
240 		if (map_coherent)
241 			vaddr = kmap_coherent(page, address);
242 		else
243 			vaddr = kmap_atomic(page);
244 
245 		address = (unsigned long)vaddr;
246 	}
247 
248 	flush_cache_one(CACHE_OC_ADDRESS_ARRAY |
249 			(address & shm_align_mask), phys);
250 
251 	if (vma->vm_flags & VM_EXEC)
252 		flush_icache_all();
253 
254 	if (vaddr) {
255 		if (map_coherent)
256 			kunmap_coherent(vaddr);
257 		else
258 			kunmap_atomic(vaddr);
259 	}
260 }
261 
262 /*
263  * Write back and invalidate D-caches.
264  *
265  * START, END: Virtual Address (U0 address)
266  *
267  * NOTE: We need to flush the _physical_ page entry.
268  * Flushing the cache lines for U0 only isn't enough.
269  * We need to flush for P1 too, which may contain aliases.
270  */
271 static void sh4_flush_cache_range(void *args)
272 {
273 	struct flusher_data *data = args;
274 	struct vm_area_struct *vma;
275 	unsigned long start, end;
276 
277 	vma = data->vma;
278 	start = data->addr1;
279 	end = data->addr2;
280 
281 	if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT)
282 		return;
283 
284 	/*
285 	 * If cache is only 4k-per-way, there are never any 'aliases'.  Since
286 	 * the cache is physically tagged, the data can just be left in there.
287 	 */
288 	if (boot_cpu_data.dcache.n_aliases == 0)
289 		return;
290 
291 	flush_dcache_all();
292 
293 	if (vma->vm_flags & VM_EXEC)
294 		flush_icache_all();
295 }
296 
297 /**
298  * __flush_cache_one
299  *
300  * @addr:  address in memory mapped cache array
301  * @phys:  P1 address to flush (has to match tags if addr has 'A' bit
302  *         set i.e. associative write)
303  * @exec_offset: set to 0x20000000 if flush has to be executed from P2
304  *               region else 0x0
305  *
306  * The offset into the cache array implied by 'addr' selects the
307  * 'colour' of the virtual address range that will be flushed.  The
308  * operation (purge/write-back) is selected by the lower 2 bits of
309  * 'phys'.
310  */
311 static void __flush_cache_one(unsigned long addr, unsigned long phys,
312 			       unsigned long exec_offset)
313 {
314 	int way_count;
315 	unsigned long base_addr = addr;
316 	struct cache_info *dcache;
317 	unsigned long way_incr;
318 	unsigned long a, ea, p;
319 	unsigned long temp_pc;
320 
321 	dcache = &boot_cpu_data.dcache;
322 	/* Write this way for better assembly. */
323 	way_count = dcache->ways;
324 	way_incr = dcache->way_incr;
325 
326 	/*
327 	 * Apply exec_offset (i.e. branch to P2 if required.).
328 	 *
329 	 * FIXME:
330 	 *
331 	 *	If I write "=r" for the (temp_pc), it puts this in r6 hence
332 	 *	trashing exec_offset before it's been added on - why?  Hence
333 	 *	"=&r" as a 'workaround'
334 	 */
335 	asm volatile("mov.l 1f, %0\n\t"
336 		     "add   %1, %0\n\t"
337 		     "jmp   @%0\n\t"
338 		     "nop\n\t"
339 		     ".balign 4\n\t"
340 		     "1:  .long 2f\n\t"
341 		     "2:\n" : "=&r" (temp_pc) : "r" (exec_offset));
342 
343 	/*
344 	 * We know there will be >=1 iteration, so write as do-while to avoid
345 	 * pointless nead-of-loop check for 0 iterations.
346 	 */
347 	do {
348 		ea = base_addr + PAGE_SIZE;
349 		a = base_addr;
350 		p = phys;
351 
352 		do {
353 			*(volatile unsigned long *)a = p;
354 			/*
355 			 * Next line: intentionally not p+32, saves an add, p
356 			 * will do since only the cache tag bits need to
357 			 * match.
358 			 */
359 			*(volatile unsigned long *)(a+32) = p;
360 			a += 64;
361 			p += 64;
362 		} while (a < ea);
363 
364 		base_addr += way_incr;
365 	} while (--way_count != 0);
366 }
367 
368 extern void __weak sh4__flush_region_init(void);
369 
370 /*
371  * SH-4 has virtually indexed and physically tagged cache.
372  */
373 void __init sh4_cache_init(void)
374 {
375 	printk("PVR=%08x CVR=%08x PRR=%08x\n",
376 		__raw_readl(CCN_PVR),
377 		__raw_readl(CCN_CVR),
378 		__raw_readl(CCN_PRR));
379 
380 	local_flush_icache_range	= sh4_flush_icache_range;
381 	local_flush_dcache_page		= sh4_flush_dcache_page;
382 	local_flush_cache_all		= sh4_flush_cache_all;
383 	local_flush_cache_mm		= sh4_flush_cache_mm;
384 	local_flush_cache_dup_mm	= sh4_flush_cache_mm;
385 	local_flush_cache_page		= sh4_flush_cache_page;
386 	local_flush_cache_range		= sh4_flush_cache_range;
387 
388 	sh4__flush_region_init();
389 }
390