xref: /openbmc/linux/arch/sh/mm/cache.c (revision 3cf6fa1e)
1cbbe2f68SPaul Mundt /*
2f26b2a56SPaul Mundt  * arch/sh/mm/cache.c
3cbbe2f68SPaul Mundt  *
4cbbe2f68SPaul Mundt  * Copyright (C) 1999, 2000, 2002  Niibe Yutaka
5a6198a23SPaul Mundt  * Copyright (C) 2002 - 2010  Paul Mundt
6cbbe2f68SPaul Mundt  *
7cbbe2f68SPaul Mundt  * Released under the terms of the GNU GPL v2.0.
8cbbe2f68SPaul Mundt  */
9cbbe2f68SPaul Mundt #include <linux/mm.h>
10cbbe2f68SPaul Mundt #include <linux/init.h>
11cbbe2f68SPaul Mundt #include <linux/mutex.h>
12cbbe2f68SPaul Mundt #include <linux/fs.h>
13f26b2a56SPaul Mundt #include <linux/smp.h>
14cbbe2f68SPaul Mundt #include <linux/highmem.h>
15cbbe2f68SPaul Mundt #include <linux/module.h>
16cbbe2f68SPaul Mundt #include <asm/mmu_context.h>
17cbbe2f68SPaul Mundt #include <asm/cacheflush.h>
18cbbe2f68SPaul Mundt 
19f26b2a56SPaul Mundt void (*local_flush_cache_all)(void *args) = cache_noop;
20f26b2a56SPaul Mundt void (*local_flush_cache_mm)(void *args) = cache_noop;
21f26b2a56SPaul Mundt void (*local_flush_cache_dup_mm)(void *args) = cache_noop;
22f26b2a56SPaul Mundt void (*local_flush_cache_page)(void *args) = cache_noop;
23f26b2a56SPaul Mundt void (*local_flush_cache_range)(void *args) = cache_noop;
24f26b2a56SPaul Mundt void (*local_flush_dcache_page)(void *args) = cache_noop;
25f26b2a56SPaul Mundt void (*local_flush_icache_range)(void *args) = cache_noop;
26f26b2a56SPaul Mundt void (*local_flush_icache_page)(void *args) = cache_noop;
27f26b2a56SPaul Mundt void (*local_flush_cache_sigtramp)(void *args) = cache_noop;
28f26b2a56SPaul Mundt 
2937443ef3SPaul Mundt void (*__flush_wback_region)(void *start, int size);
300a993b0aSPaul Mundt EXPORT_SYMBOL(__flush_wback_region);
3137443ef3SPaul Mundt void (*__flush_purge_region)(void *start, int size);
320a993b0aSPaul Mundt EXPORT_SYMBOL(__flush_purge_region);
3337443ef3SPaul Mundt void (*__flush_invalidate_region)(void *start, int size);
340a993b0aSPaul Mundt EXPORT_SYMBOL(__flush_invalidate_region);
3537443ef3SPaul Mundt 
3637443ef3SPaul Mundt static inline void noop__flush_region(void *start, int size)
3737443ef3SPaul Mundt {
3837443ef3SPaul Mundt }
3937443ef3SPaul Mundt 
406f379578SPaul Mundt static inline void cacheop_on_each_cpu(void (*func) (void *info), void *info,
416f379578SPaul Mundt                                    int wait)
426f379578SPaul Mundt {
436f379578SPaul Mundt 	preempt_disable();
44a6198a23SPaul Mundt 
45a6198a23SPaul Mundt 	/*
46a6198a23SPaul Mundt 	 * It's possible that this gets called early on when IRQs are
47a6198a23SPaul Mundt 	 * still disabled due to ioremapping by the boot CPU, so don't
48a6198a23SPaul Mundt 	 * even attempt IPIs unless there are other CPUs online.
49a6198a23SPaul Mundt 	 */
50a6198a23SPaul Mundt 	if (num_online_cpus() > 1)
516f379578SPaul Mundt 		smp_call_function(func, info, wait);
52a6198a23SPaul Mundt 
536f379578SPaul Mundt 	func(info);
54a6198a23SPaul Mundt 
556f379578SPaul Mundt 	preempt_enable();
566f379578SPaul Mundt }
576f379578SPaul Mundt 
58cbbe2f68SPaul Mundt void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
59cbbe2f68SPaul Mundt 		       unsigned long vaddr, void *dst, const void *src,
60cbbe2f68SPaul Mundt 		       unsigned long len)
61cbbe2f68SPaul Mundt {
62cbbe2f68SPaul Mundt 	if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
63cbbe2f68SPaul Mundt 	    !test_bit(PG_dcache_dirty, &page->flags)) {
64cbbe2f68SPaul Mundt 		void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
65cbbe2f68SPaul Mundt 		memcpy(vto, src, len);
660906a3adSPaul Mundt 		kunmap_coherent(vto);
67cbbe2f68SPaul Mundt 	} else {
68cbbe2f68SPaul Mundt 		memcpy(dst, src, len);
69cbbe2f68SPaul Mundt 		if (boot_cpu_data.dcache.n_aliases)
70cbbe2f68SPaul Mundt 			set_bit(PG_dcache_dirty, &page->flags);
71cbbe2f68SPaul Mundt 	}
72cbbe2f68SPaul Mundt 
73cbbe2f68SPaul Mundt 	if (vma->vm_flags & VM_EXEC)
74cbbe2f68SPaul Mundt 		flush_cache_page(vma, vaddr, page_to_pfn(page));
75cbbe2f68SPaul Mundt }
76cbbe2f68SPaul Mundt 
77cbbe2f68SPaul Mundt void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
78cbbe2f68SPaul Mundt 			 unsigned long vaddr, void *dst, const void *src,
79cbbe2f68SPaul Mundt 			 unsigned long len)
80cbbe2f68SPaul Mundt {
81cbbe2f68SPaul Mundt 	if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
82cbbe2f68SPaul Mundt 	    !test_bit(PG_dcache_dirty, &page->flags)) {
83cbbe2f68SPaul Mundt 		void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
84cbbe2f68SPaul Mundt 		memcpy(dst, vfrom, len);
850906a3adSPaul Mundt 		kunmap_coherent(vfrom);
86cbbe2f68SPaul Mundt 	} else {
87cbbe2f68SPaul Mundt 		memcpy(dst, src, len);
88cbbe2f68SPaul Mundt 		if (boot_cpu_data.dcache.n_aliases)
89cbbe2f68SPaul Mundt 			set_bit(PG_dcache_dirty, &page->flags);
90cbbe2f68SPaul Mundt 	}
91cbbe2f68SPaul Mundt }
92cbbe2f68SPaul Mundt 
93cbbe2f68SPaul Mundt void copy_user_highpage(struct page *to, struct page *from,
94cbbe2f68SPaul Mundt 			unsigned long vaddr, struct vm_area_struct *vma)
95cbbe2f68SPaul Mundt {
96cbbe2f68SPaul Mundt 	void *vfrom, *vto;
97cbbe2f68SPaul Mundt 
98cbbe2f68SPaul Mundt 	vto = kmap_atomic(to, KM_USER1);
99cbbe2f68SPaul Mundt 
100cbbe2f68SPaul Mundt 	if (boot_cpu_data.dcache.n_aliases && page_mapped(from) &&
101cbbe2f68SPaul Mundt 	    !test_bit(PG_dcache_dirty, &from->flags)) {
1027e01c949SPaul Mundt 		vfrom = kmap_coherent(from, vaddr);
103cbbe2f68SPaul Mundt 		copy_page(vto, vfrom);
1047e01c949SPaul Mundt 		kunmap_coherent(vfrom);
1057e01c949SPaul Mundt 	} else {
1067e01c949SPaul Mundt 		vfrom = kmap_atomic(from, KM_USER0);
1077e01c949SPaul Mundt 		copy_page(vto, vfrom);
108cbbe2f68SPaul Mundt 		kunmap_atomic(vfrom, KM_USER0);
1097e01c949SPaul Mundt 	}
11039ac11c1SStuart Menefy 
1117e01c949SPaul Mundt 	if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
1127e01c949SPaul Mundt 		__flush_purge_region(vto, PAGE_SIZE);
1137e01c949SPaul Mundt 
1147e01c949SPaul Mundt 	kunmap_atomic(vto, KM_USER1);
115cbbe2f68SPaul Mundt 	/* Make sure this page is cleared on other CPU's too before using it */
116cbbe2f68SPaul Mundt 	smp_wmb();
117cbbe2f68SPaul Mundt }
118cbbe2f68SPaul Mundt EXPORT_SYMBOL(copy_user_highpage);
119cbbe2f68SPaul Mundt 
120cbbe2f68SPaul Mundt void clear_user_highpage(struct page *page, unsigned long vaddr)
121cbbe2f68SPaul Mundt {
122cbbe2f68SPaul Mundt 	void *kaddr = kmap_atomic(page, KM_USER0);
123cbbe2f68SPaul Mundt 
12439ac11c1SStuart Menefy 	clear_page(kaddr);
125cbbe2f68SPaul Mundt 
1267e01c949SPaul Mundt 	if (pages_do_alias((unsigned long)kaddr, vaddr & PAGE_MASK))
1277e01c949SPaul Mundt 		__flush_purge_region(kaddr, PAGE_SIZE);
1287e01c949SPaul Mundt 
129cbbe2f68SPaul Mundt 	kunmap_atomic(kaddr, KM_USER0);
130cbbe2f68SPaul Mundt }
131cbbe2f68SPaul Mundt EXPORT_SYMBOL(clear_user_highpage);
132cbbe2f68SPaul Mundt 
133cbbe2f68SPaul Mundt void __update_cache(struct vm_area_struct *vma,
134cbbe2f68SPaul Mundt 		    unsigned long address, pte_t pte)
135cbbe2f68SPaul Mundt {
136cbbe2f68SPaul Mundt 	struct page *page;
137cbbe2f68SPaul Mundt 	unsigned long pfn = pte_pfn(pte);
138cbbe2f68SPaul Mundt 
139cbbe2f68SPaul Mundt 	if (!boot_cpu_data.dcache.n_aliases)
140cbbe2f68SPaul Mundt 		return;
141cbbe2f68SPaul Mundt 
142cbbe2f68SPaul Mundt 	page = pfn_to_page(pfn);
143964f7e5aSPaul Mundt 	if (pfn_valid(pfn)) {
144cbbe2f68SPaul Mundt 		int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags);
14576382b5bSMarkus Pietrek 		if (dirty)
14676382b5bSMarkus Pietrek 			__flush_purge_region(page_address(page), PAGE_SIZE);
147cbbe2f68SPaul Mundt 	}
148cbbe2f68SPaul Mundt }
149cbbe2f68SPaul Mundt 
150cbbe2f68SPaul Mundt void __flush_anon_page(struct page *page, unsigned long vmaddr)
151cbbe2f68SPaul Mundt {
152cbbe2f68SPaul Mundt 	unsigned long addr = (unsigned long) page_address(page);
153cbbe2f68SPaul Mundt 
154cbbe2f68SPaul Mundt 	if (pages_do_alias(addr, vmaddr)) {
155cbbe2f68SPaul Mundt 		if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
156cbbe2f68SPaul Mundt 		    !test_bit(PG_dcache_dirty, &page->flags)) {
157cbbe2f68SPaul Mundt 			void *kaddr;
158cbbe2f68SPaul Mundt 
159cbbe2f68SPaul Mundt 			kaddr = kmap_coherent(page, vmaddr);
1606e4154d4SPaul Mundt 			/* XXX.. For now kunmap_coherent() does a purge */
1616e4154d4SPaul Mundt 			/* __flush_purge_region((void *)kaddr, PAGE_SIZE); */
1620906a3adSPaul Mundt 			kunmap_coherent(kaddr);
163cbbe2f68SPaul Mundt 		} else
1646e4154d4SPaul Mundt 			__flush_purge_region((void *)addr, PAGE_SIZE);
165cbbe2f68SPaul Mundt 	}
166cbbe2f68SPaul Mundt }
167ecba1060SPaul Mundt 
168f26b2a56SPaul Mundt void flush_cache_all(void)
169f26b2a56SPaul Mundt {
1706f379578SPaul Mundt 	cacheop_on_each_cpu(local_flush_cache_all, NULL, 1);
171f26b2a56SPaul Mundt }
1720a993b0aSPaul Mundt EXPORT_SYMBOL(flush_cache_all);
173f26b2a56SPaul Mundt 
174f26b2a56SPaul Mundt void flush_cache_mm(struct mm_struct *mm)
175f26b2a56SPaul Mundt {
176654d364eSPaul Mundt 	if (boot_cpu_data.dcache.n_aliases == 0)
177654d364eSPaul Mundt 		return;
178654d364eSPaul Mundt 
1796f379578SPaul Mundt 	cacheop_on_each_cpu(local_flush_cache_mm, mm, 1);
180f26b2a56SPaul Mundt }
181f26b2a56SPaul Mundt 
182f26b2a56SPaul Mundt void flush_cache_dup_mm(struct mm_struct *mm)
183f26b2a56SPaul Mundt {
184654d364eSPaul Mundt 	if (boot_cpu_data.dcache.n_aliases == 0)
185654d364eSPaul Mundt 		return;
186654d364eSPaul Mundt 
1876f379578SPaul Mundt 	cacheop_on_each_cpu(local_flush_cache_dup_mm, mm, 1);
188f26b2a56SPaul Mundt }
189f26b2a56SPaul Mundt 
190f26b2a56SPaul Mundt void flush_cache_page(struct vm_area_struct *vma, unsigned long addr,
191f26b2a56SPaul Mundt 		      unsigned long pfn)
192f26b2a56SPaul Mundt {
193f26b2a56SPaul Mundt 	struct flusher_data data;
194f26b2a56SPaul Mundt 
195f26b2a56SPaul Mundt 	data.vma = vma;
196f26b2a56SPaul Mundt 	data.addr1 = addr;
197f26b2a56SPaul Mundt 	data.addr2 = pfn;
198f26b2a56SPaul Mundt 
1996f379578SPaul Mundt 	cacheop_on_each_cpu(local_flush_cache_page, (void *)&data, 1);
200f26b2a56SPaul Mundt }
201f26b2a56SPaul Mundt 
202f26b2a56SPaul Mundt void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
203f26b2a56SPaul Mundt 		       unsigned long end)
204f26b2a56SPaul Mundt {
205f26b2a56SPaul Mundt 	struct flusher_data data;
206f26b2a56SPaul Mundt 
207f26b2a56SPaul Mundt 	data.vma = vma;
208f26b2a56SPaul Mundt 	data.addr1 = start;
209f26b2a56SPaul Mundt 	data.addr2 = end;
210f26b2a56SPaul Mundt 
2116f379578SPaul Mundt 	cacheop_on_each_cpu(local_flush_cache_range, (void *)&data, 1);
212f26b2a56SPaul Mundt }
2130a993b0aSPaul Mundt EXPORT_SYMBOL(flush_cache_range);
214f26b2a56SPaul Mundt 
215f26b2a56SPaul Mundt void flush_dcache_page(struct page *page)
216f26b2a56SPaul Mundt {
2176f379578SPaul Mundt 	cacheop_on_each_cpu(local_flush_dcache_page, page, 1);
218f26b2a56SPaul Mundt }
2190a993b0aSPaul Mundt EXPORT_SYMBOL(flush_dcache_page);
220f26b2a56SPaul Mundt 
221f26b2a56SPaul Mundt void flush_icache_range(unsigned long start, unsigned long end)
222f26b2a56SPaul Mundt {
223f26b2a56SPaul Mundt 	struct flusher_data data;
224f26b2a56SPaul Mundt 
225f26b2a56SPaul Mundt 	data.vma = NULL;
226f26b2a56SPaul Mundt 	data.addr1 = start;
227f26b2a56SPaul Mundt 	data.addr2 = end;
228f26b2a56SPaul Mundt 
2296f379578SPaul Mundt 	cacheop_on_each_cpu(local_flush_icache_range, (void *)&data, 1);
230f26b2a56SPaul Mundt }
231f26b2a56SPaul Mundt 
232f26b2a56SPaul Mundt void flush_icache_page(struct vm_area_struct *vma, struct page *page)
233f26b2a56SPaul Mundt {
234f26b2a56SPaul Mundt 	/* Nothing uses the VMA, so just pass the struct page along */
2356f379578SPaul Mundt 	cacheop_on_each_cpu(local_flush_icache_page, page, 1);
236f26b2a56SPaul Mundt }
237f26b2a56SPaul Mundt 
238f26b2a56SPaul Mundt void flush_cache_sigtramp(unsigned long address)
239f26b2a56SPaul Mundt {
2406f379578SPaul Mundt 	cacheop_on_each_cpu(local_flush_cache_sigtramp, (void *)address, 1);
241f26b2a56SPaul Mundt }
242f26b2a56SPaul Mundt 
24327d59ec1SPaul Mundt static void compute_alias(struct cache_info *c)
24427d59ec1SPaul Mundt {
24527d59ec1SPaul Mundt 	c->alias_mask = ((c->sets - 1) << c->entry_shift) & ~(PAGE_SIZE - 1);
24627d59ec1SPaul Mundt 	c->n_aliases = c->alias_mask ? (c->alias_mask >> PAGE_SHIFT) + 1 : 0;
24727d59ec1SPaul Mundt }
24827d59ec1SPaul Mundt 
24927d59ec1SPaul Mundt static void __init emit_cache_params(void)
25027d59ec1SPaul Mundt {
25127d59ec1SPaul Mundt 	printk(KERN_NOTICE "I-cache : n_ways=%d n_sets=%d way_incr=%d\n",
25227d59ec1SPaul Mundt 		boot_cpu_data.icache.ways,
25327d59ec1SPaul Mundt 		boot_cpu_data.icache.sets,
25427d59ec1SPaul Mundt 		boot_cpu_data.icache.way_incr);
25527d59ec1SPaul Mundt 	printk(KERN_NOTICE "I-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
25627d59ec1SPaul Mundt 		boot_cpu_data.icache.entry_mask,
25727d59ec1SPaul Mundt 		boot_cpu_data.icache.alias_mask,
25827d59ec1SPaul Mundt 		boot_cpu_data.icache.n_aliases);
25927d59ec1SPaul Mundt 	printk(KERN_NOTICE "D-cache : n_ways=%d n_sets=%d way_incr=%d\n",
26027d59ec1SPaul Mundt 		boot_cpu_data.dcache.ways,
26127d59ec1SPaul Mundt 		boot_cpu_data.dcache.sets,
26227d59ec1SPaul Mundt 		boot_cpu_data.dcache.way_incr);
26327d59ec1SPaul Mundt 	printk(KERN_NOTICE "D-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
26427d59ec1SPaul Mundt 		boot_cpu_data.dcache.entry_mask,
26527d59ec1SPaul Mundt 		boot_cpu_data.dcache.alias_mask,
26627d59ec1SPaul Mundt 		boot_cpu_data.dcache.n_aliases);
26727d59ec1SPaul Mundt 
26827d59ec1SPaul Mundt 	/*
26927d59ec1SPaul Mundt 	 * Emit Secondary Cache parameters if the CPU has a probed L2.
27027d59ec1SPaul Mundt 	 */
27127d59ec1SPaul Mundt 	if (boot_cpu_data.flags & CPU_HAS_L2_CACHE) {
27227d59ec1SPaul Mundt 		printk(KERN_NOTICE "S-cache : n_ways=%d n_sets=%d way_incr=%d\n",
27327d59ec1SPaul Mundt 			boot_cpu_data.scache.ways,
27427d59ec1SPaul Mundt 			boot_cpu_data.scache.sets,
27527d59ec1SPaul Mundt 			boot_cpu_data.scache.way_incr);
27627d59ec1SPaul Mundt 		printk(KERN_NOTICE "S-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
27727d59ec1SPaul Mundt 			boot_cpu_data.scache.entry_mask,
27827d59ec1SPaul Mundt 			boot_cpu_data.scache.alias_mask,
27927d59ec1SPaul Mundt 			boot_cpu_data.scache.n_aliases);
28027d59ec1SPaul Mundt 	}
28127d59ec1SPaul Mundt }
28227d59ec1SPaul Mundt 
283ecba1060SPaul Mundt void __init cpu_cache_init(void)
284ecba1060SPaul Mundt {
2853af539e5SPaul Mundt 	unsigned int cache_disabled = 0;
2863af539e5SPaul Mundt 
2873af539e5SPaul Mundt #ifdef CCR
2883af539e5SPaul Mundt 	cache_disabled = !(__raw_readl(CCR) & CCR_CACHE_ENABLE);
2893af539e5SPaul Mundt #endif
2905fb80ae8SMagnus Damm 
29127d59ec1SPaul Mundt 	compute_alias(&boot_cpu_data.icache);
29227d59ec1SPaul Mundt 	compute_alias(&boot_cpu_data.dcache);
29327d59ec1SPaul Mundt 	compute_alias(&boot_cpu_data.scache);
29427d59ec1SPaul Mundt 
29537443ef3SPaul Mundt 	__flush_wback_region		= noop__flush_region;
29637443ef3SPaul Mundt 	__flush_purge_region		= noop__flush_region;
29737443ef3SPaul Mundt 	__flush_invalidate_region	= noop__flush_region;
29837443ef3SPaul Mundt 
2995fb80ae8SMagnus Damm 	/*
3005fb80ae8SMagnus Damm 	 * No flushing is necessary in the disabled cache case so we can
3015fb80ae8SMagnus Damm 	 * just keep the noop functions in local_flush_..() and __flush_..()
3025fb80ae8SMagnus Damm 	 */
3035fb80ae8SMagnus Damm 	if (unlikely(cache_disabled))
3045fb80ae8SMagnus Damm 		goto skip;
3055fb80ae8SMagnus Damm 
306109b44a8SPaul Mundt 	if (boot_cpu_data.family == CPU_FAMILY_SH2) {
307109b44a8SPaul Mundt 		extern void __weak sh2_cache_init(void);
308109b44a8SPaul Mundt 
309109b44a8SPaul Mundt 		sh2_cache_init();
310109b44a8SPaul Mundt 	}
311109b44a8SPaul Mundt 
312a58e1a2aSPaul Mundt 	if (boot_cpu_data.family == CPU_FAMILY_SH2A) {
313a58e1a2aSPaul Mundt 		extern void __weak sh2a_cache_init(void);
314a58e1a2aSPaul Mundt 
315a58e1a2aSPaul Mundt 		sh2a_cache_init();
316a58e1a2aSPaul Mundt 	}
317a58e1a2aSPaul Mundt 
31879f1c9daSPaul Mundt 	if (boot_cpu_data.family == CPU_FAMILY_SH3) {
31979f1c9daSPaul Mundt 		extern void __weak sh3_cache_init(void);
32079f1c9daSPaul Mundt 
32179f1c9daSPaul Mundt 		sh3_cache_init();
3220d051d90SPaul Mundt 
3230d051d90SPaul Mundt 		if ((boot_cpu_data.type == CPU_SH7705) &&
3240d051d90SPaul Mundt 		    (boot_cpu_data.dcache.sets == 512)) {
3250d051d90SPaul Mundt 			extern void __weak sh7705_cache_init(void);
3260d051d90SPaul Mundt 
3270d051d90SPaul Mundt 			sh7705_cache_init();
3280d051d90SPaul Mundt 		}
32979f1c9daSPaul Mundt 	}
33079f1c9daSPaul Mundt 
331ecba1060SPaul Mundt 	if ((boot_cpu_data.family == CPU_FAMILY_SH4) ||
332ecba1060SPaul Mundt 	    (boot_cpu_data.family == CPU_FAMILY_SH4A) ||
333ecba1060SPaul Mundt 	    (boot_cpu_data.family == CPU_FAMILY_SH4AL_DSP)) {
334ecba1060SPaul Mundt 		extern void __weak sh4_cache_init(void);
335ecba1060SPaul Mundt 
336ecba1060SPaul Mundt 		sh4_cache_init();
3373cf6fa1eSPaul Mundt 
3383cf6fa1eSPaul Mundt 		if ((boot_cpu_data.type == CPU_SH7786) ||
3393cf6fa1eSPaul Mundt 		    (boot_cpu_data.type == CPU_SHX3)) {
3403cf6fa1eSPaul Mundt 			extern void __weak shx3_cache_init(void);
3413cf6fa1eSPaul Mundt 
3423cf6fa1eSPaul Mundt 			shx3_cache_init();
3433cf6fa1eSPaul Mundt 		}
344ecba1060SPaul Mundt 	}
34527d59ec1SPaul Mundt 
3462b431518SPaul Mundt 	if (boot_cpu_data.family == CPU_FAMILY_SH5) {
3472b431518SPaul Mundt 		extern void __weak sh5_cache_init(void);
3482b431518SPaul Mundt 
3492b431518SPaul Mundt 		sh5_cache_init();
3502b431518SPaul Mundt 	}
3512b431518SPaul Mundt 
3525fb80ae8SMagnus Damm skip:
35327d59ec1SPaul Mundt 	emit_cache_params();
354ecba1060SPaul Mundt }
355