xref: /openbmc/linux/arch/sh/mm/cache-sh4.c (revision 157efa29)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * arch/sh/mm/cache-sh4.c
31da177e4SLinus Torvalds  *
41da177e4SLinus Torvalds  * Copyright (C) 1999, 2000, 2002  Niibe Yutaka
5deaef20eSPaul Mundt  * Copyright (C) 2001 - 2009  Paul Mundt
61da177e4SLinus Torvalds  * Copyright (C) 2003  Richard Curnow
709b5a10cSChris Smith  * Copyright (c) 2007 STMicroelectronics (R&D) Ltd.
81da177e4SLinus Torvalds  *
91da177e4SLinus Torvalds  * This file is subject to the terms and conditions of the GNU General Public
101da177e4SLinus Torvalds  * License.  See the file "COPYING" in the main directory of this archive
111da177e4SLinus Torvalds  * for more details.
121da177e4SLinus Torvalds  */
131da177e4SLinus Torvalds #include <linux/init.h>
141da177e4SLinus Torvalds #include <linux/mm.h>
1552e27782SPaul Mundt #include <linux/io.h>
1652e27782SPaul Mundt #include <linux/mutex.h>
172277ab4aSPaul Mundt #include <linux/fs.h>
18deaef20eSPaul Mundt #include <linux/highmem.h>
19842ca547SMatthew Wilcox (Oracle) #include <linux/pagemap.h>
201da177e4SLinus Torvalds #include <asm/mmu_context.h>
21f03c4866SPaul Mundt #include <asm/cache_insns.h>
221da177e4SLinus Torvalds #include <asm/cacheflush.h>
231da177e4SLinus Torvalds 
2428ccf7f9SPaul Mundt /*
2528ccf7f9SPaul Mundt  * The maximum number of pages we support up to when doing ranged dcache
2628ccf7f9SPaul Mundt  * flushing. Anything exceeding this will simply flush the dcache in its
2728ccf7f9SPaul Mundt  * entirety.
2828ccf7f9SPaul Mundt  */
2909b5a10cSChris Smith #define MAX_ICACHE_PAGES	32
3028ccf7f9SPaul Mundt 
31a7a7c0e1SValentin Sitdikov static void __flush_cache_one(unsigned long addr, unsigned long phys,
32a252710fSPaul Mundt 			       unsigned long exec_offset);
33b638d0b9SRichard Curnow 
34b638d0b9SRichard Curnow /*
351da177e4SLinus Torvalds  * Write back the range of D-cache, and purge the I-cache.
361da177e4SLinus Torvalds  *
3709b5a10cSChris Smith  * Called from kernel/module.c:sys_init_module and routine for a.out format,
3809b5a10cSChris Smith  * signal handler code and kprobes code
391da177e4SLinus Torvalds  */
sh4_flush_icache_range(void * args)402dc2f8e0SPaul Mundt static void sh4_flush_icache_range(void *args)
411da177e4SLinus Torvalds {
42f26b2a56SPaul Mundt 	struct flusher_data *data = args;
43f26b2a56SPaul Mundt 	unsigned long start, end;
44983f4c51SPaul Mundt 	unsigned long flags, v;
451da177e4SLinus Torvalds 	int i;
461da177e4SLinus Torvalds 
47f26b2a56SPaul Mundt 	start = data->addr1;
48f26b2a56SPaul Mundt 	end = data->addr2;
49f26b2a56SPaul Mundt 
50682f88abSPaul Mundt 	/* If there are too many pages then just blow away the caches */
5109b5a10cSChris Smith 	if (((end - start) >> PAGE_SHIFT) >= MAX_ICACHE_PAGES) {
52682f88abSPaul Mundt 		local_flush_cache_all(NULL);
53682f88abSPaul Mundt 		return;
54682f88abSPaul Mundt 	}
55682f88abSPaul Mundt 
56682f88abSPaul Mundt 	/*
57682f88abSPaul Mundt 	 * Selectively flush d-cache then invalidate the i-cache.
58682f88abSPaul Mundt 	 * This is inefficient, so only use this for small ranges.
59682f88abSPaul Mundt 	 */
6009b5a10cSChris Smith 	start &= ~(L1_CACHE_BYTES-1);
6109b5a10cSChris Smith 	end += L1_CACHE_BYTES-1;
6209b5a10cSChris Smith 	end &= ~(L1_CACHE_BYTES-1);
631da177e4SLinus Torvalds 
64983f4c51SPaul Mundt 	local_irq_save(flags);
65cbaa118eSStuart Menefy 	jump_to_uncached();
66b638d0b9SRichard Curnow 
6709b5a10cSChris Smith 	for (v = start; v < end; v += L1_CACHE_BYTES) {
68682f88abSPaul Mundt 		unsigned long icacheaddr;
69a9d244a2SMatt Fleming 		int j, n;
7009b5a10cSChris Smith 
71682f88abSPaul Mundt 		__ocbwb(v);
7209b5a10cSChris Smith 
73682f88abSPaul Mundt 		icacheaddr = CACHE_IC_ADDRESS_ARRAY | (v &
74682f88abSPaul Mundt 				cpu_data->icache.entry_mask);
75682f88abSPaul Mundt 
7609b5a10cSChris Smith 		/* Clear i-cache line valid-bit */
77a9d244a2SMatt Fleming 		n = boot_cpu_data.icache.n_aliases;
78682f88abSPaul Mundt 		for (i = 0; i < cpu_data->icache.ways; i++) {
79a9d244a2SMatt Fleming 			for (j = 0; j < n; j++)
80a9d244a2SMatt Fleming 				__raw_writel(0, icacheaddr + (j * PAGE_SIZE));
81682f88abSPaul Mundt 			icacheaddr += cpu_data->icache.way_incr;
82682f88abSPaul Mundt 		}
8309b5a10cSChris Smith 	}
84b638d0b9SRichard Curnow 
85cbaa118eSStuart Menefy 	back_to_cached();
86983f4c51SPaul Mundt 	local_irq_restore(flags);
871da177e4SLinus Torvalds }
881da177e4SLinus Torvalds 
flush_cache_one(unsigned long start,unsigned long phys)89a7a7c0e1SValentin Sitdikov static inline void flush_cache_one(unsigned long start, unsigned long phys)
901da177e4SLinus Torvalds {
91983f4c51SPaul Mundt 	unsigned long flags, exec_offset = 0;
9233573c0eSPaul Mundt 
931da177e4SLinus Torvalds 	/*
941f69b6afSMatt Fleming 	 * All types of SH-4 require PC to be uncached to operate on the I-cache.
951f69b6afSMatt Fleming 	 * Some types of SH-4 require PC to be uncached to operate on the D-cache.
961da177e4SLinus Torvalds 	 */
977ec9d6f8SPaul Mundt 	if ((boot_cpu_data.flags & CPU_HAS_P2_FLUSH_BUG) ||
9833573c0eSPaul Mundt 	    (start < CACHE_OC_ADDRESS_ARRAY))
991f69b6afSMatt Fleming 		exec_offset = cached_to_uncached;
10028ccf7f9SPaul Mundt 
101983f4c51SPaul Mundt 	local_irq_save(flags);
102a781d1e5SMatt Fleming 	__flush_cache_one(start, phys, exec_offset);
103983f4c51SPaul Mundt 	local_irq_restore(flags);
1041da177e4SLinus Torvalds }
1051da177e4SLinus Torvalds 
1061da177e4SLinus Torvalds /*
1071da177e4SLinus Torvalds  * Write back & invalidate the D-cache of the page.
1081da177e4SLinus Torvalds  * (To avoid "alias" issues)
1091da177e4SLinus Torvalds  */
sh4_flush_dcache_folio(void * arg)110*157efa29SMatthew Wilcox (Oracle) static void sh4_flush_dcache_folio(void *arg)
1111da177e4SLinus Torvalds {
112*157efa29SMatthew Wilcox (Oracle) 	struct folio *folio = arg;
113c139a595SPaul Mundt #ifndef CONFIG_SMP
114*157efa29SMatthew Wilcox (Oracle) 	struct address_space *mapping = folio_flush_mapping(folio);
1152277ab4aSPaul Mundt 
1162277ab4aSPaul Mundt 	if (mapping && !mapping_mapped(mapping))
117*157efa29SMatthew Wilcox (Oracle) 		clear_bit(PG_dcache_clean, &folio->flags);
1182277ab4aSPaul Mundt 	else
1192277ab4aSPaul Mundt #endif
120*157efa29SMatthew Wilcox (Oracle) 	{
121*157efa29SMatthew Wilcox (Oracle) 		unsigned long pfn = folio_pfn(folio);
122*157efa29SMatthew Wilcox (Oracle) 		unsigned long addr = (unsigned long)folio_address(folio);
123*157efa29SMatthew Wilcox (Oracle) 		unsigned int i, nr = folio_nr_pages(folio);
124*157efa29SMatthew Wilcox (Oracle) 
125*157efa29SMatthew Wilcox (Oracle) 		for (i = 0; i < nr; i++) {
126b4c89276SMatt Fleming 			flush_cache_one(CACHE_OC_ADDRESS_ARRAY |
127*157efa29SMatthew Wilcox (Oracle) 						(addr & shm_align_mask),
128*157efa29SMatthew Wilcox (Oracle) 					pfn * PAGE_SIZE);
129*157efa29SMatthew Wilcox (Oracle) 			addr += PAGE_SIZE;
130*157efa29SMatthew Wilcox (Oracle) 			pfn++;
131*157efa29SMatthew Wilcox (Oracle) 		}
132*157efa29SMatthew Wilcox (Oracle) 	}
133fdfc74f9SPaul Mundt 
134fdfc74f9SPaul Mundt 	wmb();
1351da177e4SLinus Torvalds }
1361da177e4SLinus Torvalds 
13728ccf7f9SPaul Mundt /* TODO: Selective icache invalidation through IC address array.. */
flush_icache_all(void)1382dc2f8e0SPaul Mundt static void flush_icache_all(void)
1391da177e4SLinus Torvalds {
140983f4c51SPaul Mundt 	unsigned long flags, ccr;
1411da177e4SLinus Torvalds 
142983f4c51SPaul Mundt 	local_irq_save(flags);
143cbaa118eSStuart Menefy 	jump_to_uncached();
1441da177e4SLinus Torvalds 
1451da177e4SLinus Torvalds 	/* Flush I-cache */
146a5f6ea29SGeert Uytterhoeven 	ccr = __raw_readl(SH_CCR);
1471da177e4SLinus Torvalds 	ccr |= CCR_CACHE_ICI;
148a5f6ea29SGeert Uytterhoeven 	__raw_writel(ccr, SH_CCR);
1491da177e4SLinus Torvalds 
15029847622SPaul Mundt 	/*
151cbaa118eSStuart Menefy 	 * back_to_cached() will take care of the barrier for us, don't add
15229847622SPaul Mundt 	 * another one!
15329847622SPaul Mundt 	 */
154983f4c51SPaul Mundt 
155cbaa118eSStuart Menefy 	back_to_cached();
156983f4c51SPaul Mundt 	local_irq_restore(flags);
1571da177e4SLinus Torvalds }
1581da177e4SLinus Torvalds 
flush_dcache_all(void)159bd6df574SPaul Mundt static void flush_dcache_all(void)
1601da177e4SLinus Torvalds {
161bd6df574SPaul Mundt 	unsigned long addr, end_addr, entry_offset;
162bd6df574SPaul Mundt 
163bd6df574SPaul Mundt 	end_addr = CACHE_OC_ADDRESS_ARRAY +
164bd6df574SPaul Mundt 		(current_cpu_data.dcache.sets <<
165bd6df574SPaul Mundt 		 current_cpu_data.dcache.entry_shift) *
166bd6df574SPaul Mundt 			current_cpu_data.dcache.ways;
167bd6df574SPaul Mundt 
168bd6df574SPaul Mundt 	entry_offset = 1 << current_cpu_data.dcache.entry_shift;
169bd6df574SPaul Mundt 
170bd6df574SPaul Mundt 	for (addr = CACHE_OC_ADDRESS_ARRAY; addr < end_addr; ) {
171bd6df574SPaul Mundt 		__raw_writel(0, addr); addr += entry_offset;
172bd6df574SPaul Mundt 		__raw_writel(0, addr); addr += entry_offset;
173bd6df574SPaul Mundt 		__raw_writel(0, addr); addr += entry_offset;
174bd6df574SPaul Mundt 		__raw_writel(0, addr); addr += entry_offset;
175bd6df574SPaul Mundt 		__raw_writel(0, addr); addr += entry_offset;
176bd6df574SPaul Mundt 		__raw_writel(0, addr); addr += entry_offset;
177bd6df574SPaul Mundt 		__raw_writel(0, addr); addr += entry_offset;
178bd6df574SPaul Mundt 		__raw_writel(0, addr); addr += entry_offset;
179bd6df574SPaul Mundt 	}
180a252710fSPaul Mundt }
181a252710fSPaul Mundt 
sh4_flush_cache_all(void * unused)182f26b2a56SPaul Mundt static void sh4_flush_cache_all(void *unused)
183a252710fSPaul Mundt {
184a252710fSPaul Mundt 	flush_dcache_all();
1851da177e4SLinus Torvalds 	flush_icache_all();
1861da177e4SLinus Torvalds }
1871da177e4SLinus Torvalds 
188b638d0b9SRichard Curnow /*
189b638d0b9SRichard Curnow  * Note : (RPC) since the caches are physically tagged, the only point
190b638d0b9SRichard Curnow  * of flush_cache_mm for SH-4 is to get rid of aliases from the
191b638d0b9SRichard Curnow  * D-cache.  The assumption elsewhere, e.g. flush_cache_range, is that
192b638d0b9SRichard Curnow  * lines can stay resident so long as the virtual address they were
193b638d0b9SRichard Curnow  * accessed with (hence cache set) is in accord with the physical
194654d364eSPaul Mundt  * address (i.e. tag).  It's no different here.
19528ccf7f9SPaul Mundt  *
196c1e8d7c6SMichel Lespinasse  * Caller takes mm->mmap_lock.
197b638d0b9SRichard Curnow  */
sh4_flush_cache_mm(void * arg)198f26b2a56SPaul Mundt static void sh4_flush_cache_mm(void *arg)
19928ccf7f9SPaul Mundt {
200f26b2a56SPaul Mundt 	struct mm_struct *mm = arg;
201f26b2a56SPaul Mundt 
202e7b8b7f1SPaul Mundt 	if (cpu_context(smp_processor_id(), mm) == NO_CONTEXT)
203e7b8b7f1SPaul Mundt 		return;
204e7b8b7f1SPaul Mundt 
20528ccf7f9SPaul Mundt 	flush_dcache_all();
2061da177e4SLinus Torvalds }
2071da177e4SLinus Torvalds 
2081da177e4SLinus Torvalds /*
2091da177e4SLinus Torvalds  * Write back and invalidate I/D-caches for the page.
2101da177e4SLinus Torvalds  *
2111da177e4SLinus Torvalds  * ADDR: Virtual Address (U0 address)
2121da177e4SLinus Torvalds  * PFN: Physical page number
2131da177e4SLinus Torvalds  */
sh4_flush_cache_page(void * args)214f26b2a56SPaul Mundt static void sh4_flush_cache_page(void *args)
2151da177e4SLinus Torvalds {
216f26b2a56SPaul Mundt 	struct flusher_data *data = args;
217f26b2a56SPaul Mundt 	struct vm_area_struct *vma;
218deaef20eSPaul Mundt 	struct page *page;
219f26b2a56SPaul Mundt 	unsigned long address, pfn, phys;
220deaef20eSPaul Mundt 	int map_coherent = 0;
221deaef20eSPaul Mundt 	pmd_t *pmd;
222deaef20eSPaul Mundt 	pte_t *pte;
223deaef20eSPaul Mundt 	void *vaddr;
224b638d0b9SRichard Curnow 
225f26b2a56SPaul Mundt 	vma = data->vma;
226abeaf33aSPaul Mundt 	address = data->addr1 & PAGE_MASK;
227f26b2a56SPaul Mundt 	pfn = data->addr2;
228f26b2a56SPaul Mundt 	phys = pfn << PAGE_SHIFT;
229deaef20eSPaul Mundt 	page = pfn_to_page(pfn);
230f26b2a56SPaul Mundt 
231e7b8b7f1SPaul Mundt 	if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT)
232e7b8b7f1SPaul Mundt 		return;
233e7b8b7f1SPaul Mundt 
234e05c7b1fSMike Rapoport 	pmd = pmd_off(vma->vm_mm, address);
235deaef20eSPaul Mundt 	pte = pte_offset_kernel(pmd, address);
2361da177e4SLinus Torvalds 
237deaef20eSPaul Mundt 	/* If the page isn't present, there is nothing to do here. */
238deaef20eSPaul Mundt 	if (!(pte_val(*pte) & _PAGE_PRESENT))
239deaef20eSPaul Mundt 		return;
240deaef20eSPaul Mundt 
241deaef20eSPaul Mundt 	if ((vma->vm_mm == current->active_mm))
242deaef20eSPaul Mundt 		vaddr = NULL;
243deaef20eSPaul Mundt 	else {
244deaef20eSPaul Mundt 		/*
245deaef20eSPaul Mundt 		 * Use kmap_coherent or kmap_atomic to do flushes for
246deaef20eSPaul Mundt 		 * another ASID than the current one.
247deaef20eSPaul Mundt 		 */
248deaef20eSPaul Mundt 		map_coherent = (current_cpu_data.dcache.n_aliases &&
24955661fc1SPaul Mundt 			test_bit(PG_dcache_clean, &page->flags) &&
250e1534ae9SKirill A. Shutemov 			page_mapcount(page));
251deaef20eSPaul Mundt 		if (map_coherent)
252deaef20eSPaul Mundt 			vaddr = kmap_coherent(page, address);
253deaef20eSPaul Mundt 		else
254bc3e11beSCong Wang 			vaddr = kmap_atomic(page);
255deaef20eSPaul Mundt 
256deaef20eSPaul Mundt 		address = (unsigned long)vaddr;
2571da177e4SLinus Torvalds 	}
2581da177e4SLinus Torvalds 
259abeaf33aSPaul Mundt 	flush_cache_one(CACHE_OC_ADDRESS_ARRAY |
260deaef20eSPaul Mundt 			(address & shm_align_mask), phys);
261deaef20eSPaul Mundt 
262deaef20eSPaul Mundt 	if (vma->vm_flags & VM_EXEC)
263deaef20eSPaul Mundt 		flush_icache_all();
264deaef20eSPaul Mundt 
265deaef20eSPaul Mundt 	if (vaddr) {
266deaef20eSPaul Mundt 		if (map_coherent)
267deaef20eSPaul Mundt 			kunmap_coherent(vaddr);
268deaef20eSPaul Mundt 		else
269bc3e11beSCong Wang 			kunmap_atomic(vaddr);
2701da177e4SLinus Torvalds 	}
271b638d0b9SRichard Curnow }
2721da177e4SLinus Torvalds 
2731da177e4SLinus Torvalds /*
2741da177e4SLinus Torvalds  * Write back and invalidate D-caches.
2751da177e4SLinus Torvalds  *
2761da177e4SLinus Torvalds  * START, END: Virtual Address (U0 address)
2771da177e4SLinus Torvalds  *
2781da177e4SLinus Torvalds  * NOTE: We need to flush the _physical_ page entry.
2791da177e4SLinus Torvalds  * Flushing the cache lines for U0 only isn't enough.
2801da177e4SLinus Torvalds  * We need to flush for P1 too, which may contain aliases.
2811da177e4SLinus Torvalds  */
sh4_flush_cache_range(void * args)282f26b2a56SPaul Mundt static void sh4_flush_cache_range(void *args)
2831da177e4SLinus Torvalds {
284f26b2a56SPaul Mundt 	struct flusher_data *data = args;
285f26b2a56SPaul Mundt 	struct vm_area_struct *vma;
286f26b2a56SPaul Mundt 	unsigned long start, end;
287f26b2a56SPaul Mundt 
288f26b2a56SPaul Mundt 	vma = data->vma;
289f26b2a56SPaul Mundt 	start = data->addr1;
290f26b2a56SPaul Mundt 	end = data->addr2;
291f26b2a56SPaul Mundt 
292e7b8b7f1SPaul Mundt 	if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT)
293e7b8b7f1SPaul Mundt 		return;
294e7b8b7f1SPaul Mundt 
295b638d0b9SRichard Curnow 	/*
296b638d0b9SRichard Curnow 	 * If cache is only 4k-per-way, there are never any 'aliases'.  Since
297b638d0b9SRichard Curnow 	 * the cache is physically tagged, the data can just be left in there.
298b638d0b9SRichard Curnow 	 */
2997ec9d6f8SPaul Mundt 	if (boot_cpu_data.dcache.n_aliases == 0)
300b638d0b9SRichard Curnow 		return;
301b638d0b9SRichard Curnow 
302a252710fSPaul Mundt 	flush_dcache_all();
303b638d0b9SRichard Curnow 
304654d364eSPaul Mundt 	if (vma->vm_flags & VM_EXEC)
3051da177e4SLinus Torvalds 		flush_icache_all();
3061da177e4SLinus Torvalds }
3071da177e4SLinus Torvalds 
308b638d0b9SRichard Curnow /**
309a7a7c0e1SValentin Sitdikov  * __flush_cache_one
310b638d0b9SRichard Curnow  *
311b638d0b9SRichard Curnow  * @addr:  address in memory mapped cache array
312b638d0b9SRichard Curnow  * @phys:  P1 address to flush (has to match tags if addr has 'A' bit
313b638d0b9SRichard Curnow  *         set i.e. associative write)
314b638d0b9SRichard Curnow  * @exec_offset: set to 0x20000000 if flush has to be executed from P2
315b638d0b9SRichard Curnow  *               region else 0x0
316b638d0b9SRichard Curnow  *
317b638d0b9SRichard Curnow  * The offset into the cache array implied by 'addr' selects the
318b638d0b9SRichard Curnow  * 'colour' of the virtual address range that will be flushed.  The
319b638d0b9SRichard Curnow  * operation (purge/write-back) is selected by the lower 2 bits of
320b638d0b9SRichard Curnow  * 'phys'.
321b638d0b9SRichard Curnow  */
__flush_cache_one(unsigned long addr,unsigned long phys,unsigned long exec_offset)322a7a7c0e1SValentin Sitdikov static void __flush_cache_one(unsigned long addr, unsigned long phys,
323b638d0b9SRichard Curnow 			       unsigned long exec_offset)
324b638d0b9SRichard Curnow {
325b638d0b9SRichard Curnow 	int way_count;
326b638d0b9SRichard Curnow 	unsigned long base_addr = addr;
327b638d0b9SRichard Curnow 	struct cache_info *dcache;
328b638d0b9SRichard Curnow 	unsigned long way_incr;
329b638d0b9SRichard Curnow 	unsigned long a, ea, p;
330b638d0b9SRichard Curnow 	unsigned long temp_pc;
331b638d0b9SRichard Curnow 
3327ec9d6f8SPaul Mundt 	dcache = &boot_cpu_data.dcache;
333b638d0b9SRichard Curnow 	/* Write this way for better assembly. */
334b638d0b9SRichard Curnow 	way_count = dcache->ways;
335b638d0b9SRichard Curnow 	way_incr = dcache->way_incr;
336b638d0b9SRichard Curnow 
337b638d0b9SRichard Curnow 	/*
338b638d0b9SRichard Curnow 	 * Apply exec_offset (i.e. branch to P2 if required.).
339b638d0b9SRichard Curnow 	 *
340b638d0b9SRichard Curnow 	 * FIXME:
341b638d0b9SRichard Curnow 	 *
342b638d0b9SRichard Curnow 	 *	If I write "=r" for the (temp_pc), it puts this in r6 hence
343b638d0b9SRichard Curnow 	 *	trashing exec_offset before it's been added on - why?  Hence
344b638d0b9SRichard Curnow 	 *	"=&r" as a 'workaround'
345b638d0b9SRichard Curnow 	 */
346b638d0b9SRichard Curnow 	asm volatile("mov.l 1f, %0\n\t"
347b638d0b9SRichard Curnow 		     "add   %1, %0\n\t"
348b638d0b9SRichard Curnow 		     "jmp   @%0\n\t"
349b638d0b9SRichard Curnow 		     "nop\n\t"
350b638d0b9SRichard Curnow 		     ".balign 4\n\t"
351b638d0b9SRichard Curnow 		     "1:  .long 2f\n\t"
352b638d0b9SRichard Curnow 		     "2:\n" : "=&r" (temp_pc) : "r" (exec_offset));
353b638d0b9SRichard Curnow 
354b638d0b9SRichard Curnow 	/*
355b638d0b9SRichard Curnow 	 * We know there will be >=1 iteration, so write as do-while to avoid
356b638d0b9SRichard Curnow 	 * pointless nead-of-loop check for 0 iterations.
357b638d0b9SRichard Curnow 	 */
358b638d0b9SRichard Curnow 	do {
359b638d0b9SRichard Curnow 		ea = base_addr + PAGE_SIZE;
360b638d0b9SRichard Curnow 		a = base_addr;
361b638d0b9SRichard Curnow 		p = phys;
362b638d0b9SRichard Curnow 
363b638d0b9SRichard Curnow 		do {
364b638d0b9SRichard Curnow 			*(volatile unsigned long *)a = p;
365b638d0b9SRichard Curnow 			/*
366b638d0b9SRichard Curnow 			 * Next line: intentionally not p+32, saves an add, p
367b638d0b9SRichard Curnow 			 * will do since only the cache tag bits need to
368b638d0b9SRichard Curnow 			 * match.
369b638d0b9SRichard Curnow 			 */
370b638d0b9SRichard Curnow 			*(volatile unsigned long *)(a+32) = p;
371b638d0b9SRichard Curnow 			a += 64;
372b638d0b9SRichard Curnow 			p += 64;
373b638d0b9SRichard Curnow 		} while (a < ea);
374b638d0b9SRichard Curnow 
375b638d0b9SRichard Curnow 		base_addr += way_incr;
376b638d0b9SRichard Curnow 	} while (--way_count != 0);
377b638d0b9SRichard Curnow }
378b638d0b9SRichard Curnow 
37937443ef3SPaul Mundt extern void __weak sh4__flush_region_init(void);
38037443ef3SPaul Mundt 
38137443ef3SPaul Mundt /*
38237443ef3SPaul Mundt  * SH-4 has virtually indexed and physically tagged cache.
38337443ef3SPaul Mundt  */
sh4_cache_init(void)38437443ef3SPaul Mundt void __init sh4_cache_init(void)
38537443ef3SPaul Mundt {
38637443ef3SPaul Mundt 	printk("PVR=%08x CVR=%08x PRR=%08x\n",
3879d56dd3bSPaul Mundt 		__raw_readl(CCN_PVR),
3889d56dd3bSPaul Mundt 		__raw_readl(CCN_CVR),
3899d56dd3bSPaul Mundt 		__raw_readl(CCN_PRR));
39037443ef3SPaul Mundt 
391f26b2a56SPaul Mundt 	local_flush_icache_range	= sh4_flush_icache_range;
392*157efa29SMatthew Wilcox (Oracle) 	local_flush_dcache_folio	= sh4_flush_dcache_folio;
393f26b2a56SPaul Mundt 	local_flush_cache_all		= sh4_flush_cache_all;
394f26b2a56SPaul Mundt 	local_flush_cache_mm		= sh4_flush_cache_mm;
395f26b2a56SPaul Mundt 	local_flush_cache_dup_mm	= sh4_flush_cache_mm;
396f26b2a56SPaul Mundt 	local_flush_cache_page		= sh4_flush_cache_page;
397f26b2a56SPaul Mundt 	local_flush_cache_range		= sh4_flush_cache_range;
39837443ef3SPaul Mundt 
39937443ef3SPaul Mundt 	sh4__flush_region_init();
40037443ef3SPaul Mundt }
401