xref: /openbmc/linux/arch/sh/mm/cache-sh2a.c (revision 75bf465f0bc33e9b776a46d6a1b9b990f5fb7c37)
1*c456cfc2SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2cce2d453SYoshinori Sato /*
3cce2d453SYoshinori Sato  * arch/sh/mm/cache-sh2a.c
4cce2d453SYoshinori Sato  *
5cce2d453SYoshinori Sato  * Copyright (C) 2008 Yoshinori Sato
6cce2d453SYoshinori Sato  */
7cce2d453SYoshinori Sato 
8cce2d453SYoshinori Sato #include <linux/init.h>
9cce2d453SYoshinori Sato #include <linux/mm.h>
10cce2d453SYoshinori Sato 
11cce2d453SYoshinori Sato #include <asm/cache.h>
12cce2d453SYoshinori Sato #include <asm/addrspace.h>
13cce2d453SYoshinori Sato #include <asm/processor.h>
14cce2d453SYoshinori Sato #include <asm/cacheflush.h>
15cce2d453SYoshinori Sato #include <asm/io.h>
16cce2d453SYoshinori Sato 
17c1537b48SPhil Edworthy /*
18c1537b48SPhil Edworthy  * The maximum number of pages we support up to when doing ranged dcache
19c1537b48SPhil Edworthy  * flushing. Anything exceeding this will simply flush the dcache in its
20c1537b48SPhil Edworthy  * entirety.
21c1537b48SPhil Edworthy  */
22c1537b48SPhil Edworthy #define MAX_OCACHE_PAGES	32
23c1537b48SPhil Edworthy #define MAX_ICACHE_PAGES	32
24c1537b48SPhil Edworthy 
251ae911cbSPhil Edworthy #ifdef CONFIG_CACHE_WRITEBACK
sh2a_flush_oc_line(unsigned long v,int way)26c1537b48SPhil Edworthy static void sh2a_flush_oc_line(unsigned long v, int way)
27c1537b48SPhil Edworthy {
28c1537b48SPhil Edworthy 	unsigned long addr = (v & 0x000007f0) | (way << 11);
29c1537b48SPhil Edworthy 	unsigned long data;
30c1537b48SPhil Edworthy 
31c1537b48SPhil Edworthy 	data = __raw_readl(CACHE_OC_ADDRESS_ARRAY | addr);
32c1537b48SPhil Edworthy 	if ((data & CACHE_PHYSADDR_MASK) == (v & CACHE_PHYSADDR_MASK)) {
33c1537b48SPhil Edworthy 		data &= ~SH_CACHE_UPDATED;
34c1537b48SPhil Edworthy 		__raw_writel(data, CACHE_OC_ADDRESS_ARRAY | addr);
35c1537b48SPhil Edworthy 	}
36c1537b48SPhil Edworthy }
371ae911cbSPhil Edworthy #endif
38c1537b48SPhil Edworthy 
sh2a_invalidate_line(unsigned long cache_addr,unsigned long v)39c1537b48SPhil Edworthy static void sh2a_invalidate_line(unsigned long cache_addr, unsigned long v)
40c1537b48SPhil Edworthy {
41c1537b48SPhil Edworthy 	/* Set associative bit to hit all ways */
42c1537b48SPhil Edworthy 	unsigned long addr = (v & 0x000007f0) | SH_CACHE_ASSOC;
43c1537b48SPhil Edworthy 	__raw_writel((addr & CACHE_PHYSADDR_MASK), cache_addr | addr);
44c1537b48SPhil Edworthy }
45c1537b48SPhil Edworthy 
46c1537b48SPhil Edworthy /*
47c1537b48SPhil Edworthy  * Write back the dirty D-caches, but not invalidate them.
48c1537b48SPhil Edworthy  */
sh2a__flush_wback_region(void * start,int size)49a58e1a2aSPaul Mundt static void sh2a__flush_wback_region(void *start, int size)
50cce2d453SYoshinori Sato {
51c1537b48SPhil Edworthy #ifdef CONFIG_CACHE_WRITEBACK
52cce2d453SYoshinori Sato 	unsigned long v;
53cce2d453SYoshinori Sato 	unsigned long begin, end;
54cce2d453SYoshinori Sato 	unsigned long flags;
55c1537b48SPhil Edworthy 	int nr_ways;
56cce2d453SYoshinori Sato 
57cce2d453SYoshinori Sato 	begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
58cce2d453SYoshinori Sato 	end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
59cce2d453SYoshinori Sato 		& ~(L1_CACHE_BYTES-1);
60c1537b48SPhil Edworthy 	nr_ways = current_cpu_data.dcache.ways;
61cce2d453SYoshinori Sato 
62cce2d453SYoshinori Sato 	local_irq_save(flags);
63cce2d453SYoshinori Sato 	jump_to_uncached();
64cce2d453SYoshinori Sato 
65c1537b48SPhil Edworthy 	/* If there are too many pages then flush the entire cache */
66c1537b48SPhil Edworthy 	if (((end - begin) >> PAGE_SHIFT) >= MAX_OCACHE_PAGES) {
67c1537b48SPhil Edworthy 		begin = CACHE_OC_ADDRESS_ARRAY;
68c1537b48SPhil Edworthy 		end = begin + (nr_ways * current_cpu_data.dcache.way_size);
69c1537b48SPhil Edworthy 
70cce2d453SYoshinori Sato 		for (v = begin; v < end; v += L1_CACHE_BYTES) {
71c1537b48SPhil Edworthy 			unsigned long data = __raw_readl(v);
72c1537b48SPhil Edworthy 			if (data & SH_CACHE_UPDATED)
73c1537b48SPhil Edworthy 				__raw_writel(data & ~SH_CACHE_UPDATED, v);
74cce2d453SYoshinori Sato 		}
75c1537b48SPhil Edworthy 	} else {
76c1537b48SPhil Edworthy 		int way;
77c1537b48SPhil Edworthy 		for (way = 0; way < nr_ways; way++) {
78c1537b48SPhil Edworthy 			for (v = begin; v < end; v += L1_CACHE_BYTES)
79c1537b48SPhil Edworthy 				sh2a_flush_oc_line(v, way);
80cce2d453SYoshinori Sato 		}
81cce2d453SYoshinori Sato 	}
82cce2d453SYoshinori Sato 
83cce2d453SYoshinori Sato 	back_to_cached();
84cce2d453SYoshinori Sato 	local_irq_restore(flags);
85c1537b48SPhil Edworthy #endif
86cce2d453SYoshinori Sato }
87cce2d453SYoshinori Sato 
88c1537b48SPhil Edworthy /*
89c1537b48SPhil Edworthy  * Write back the dirty D-caches and invalidate them.
90c1537b48SPhil Edworthy  */
sh2a__flush_purge_region(void * start,int size)91a58e1a2aSPaul Mundt static void sh2a__flush_purge_region(void *start, int size)
92cce2d453SYoshinori Sato {
93cce2d453SYoshinori Sato 	unsigned long v;
94cce2d453SYoshinori Sato 	unsigned long begin, end;
95cce2d453SYoshinori Sato 	unsigned long flags;
96cce2d453SYoshinori Sato 
97cce2d453SYoshinori Sato 	begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
98cce2d453SYoshinori Sato 	end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
99cce2d453SYoshinori Sato 		& ~(L1_CACHE_BYTES-1);
100cce2d453SYoshinori Sato 
101cce2d453SYoshinori Sato 	local_irq_save(flags);
102cce2d453SYoshinori Sato 	jump_to_uncached();
103cce2d453SYoshinori Sato 
104cce2d453SYoshinori Sato 	for (v = begin; v < end; v+=L1_CACHE_BYTES) {
105c1537b48SPhil Edworthy #ifdef CONFIG_CACHE_WRITEBACK
106c1537b48SPhil Edworthy 		int way;
107c1537b48SPhil Edworthy 		int nr_ways = current_cpu_data.dcache.ways;
108c1537b48SPhil Edworthy 		for (way = 0; way < nr_ways; way++)
109c1537b48SPhil Edworthy 			sh2a_flush_oc_line(v, way);
110c1537b48SPhil Edworthy #endif
111c1537b48SPhil Edworthy 		sh2a_invalidate_line(CACHE_OC_ADDRESS_ARRAY, v);
112cce2d453SYoshinori Sato 	}
113c1537b48SPhil Edworthy 
114cce2d453SYoshinori Sato 	back_to_cached();
115cce2d453SYoshinori Sato 	local_irq_restore(flags);
116cce2d453SYoshinori Sato }
117cce2d453SYoshinori Sato 
118c1537b48SPhil Edworthy /*
119c1537b48SPhil Edworthy  * Invalidate the D-caches, but no write back please
120c1537b48SPhil Edworthy  */
sh2a__flush_invalidate_region(void * start,int size)121a58e1a2aSPaul Mundt static void sh2a__flush_invalidate_region(void *start, int size)
122cce2d453SYoshinori Sato {
123cce2d453SYoshinori Sato 	unsigned long v;
124cce2d453SYoshinori Sato 	unsigned long begin, end;
125cce2d453SYoshinori Sato 	unsigned long flags;
126cce2d453SYoshinori Sato 
127cce2d453SYoshinori Sato 	begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
128cce2d453SYoshinori Sato 	end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
129cce2d453SYoshinori Sato 		& ~(L1_CACHE_BYTES-1);
130c1537b48SPhil Edworthy 
131cce2d453SYoshinori Sato 	local_irq_save(flags);
132cce2d453SYoshinori Sato 	jump_to_uncached();
133cce2d453SYoshinori Sato 
134c1537b48SPhil Edworthy 	/* If there are too many pages then just blow the cache */
135c1537b48SPhil Edworthy 	if (((end - begin) >> PAGE_SHIFT) >= MAX_OCACHE_PAGES) {
136a5f6ea29SGeert Uytterhoeven 		__raw_writel(__raw_readl(SH_CCR) | CCR_OCACHE_INVALIDATE,
137a5f6ea29SGeert Uytterhoeven 			     SH_CCR);
138c1537b48SPhil Edworthy 	} else {
139c1537b48SPhil Edworthy 		for (v = begin; v < end; v += L1_CACHE_BYTES)
140c1537b48SPhil Edworthy 			sh2a_invalidate_line(CACHE_OC_ADDRESS_ARRAY, v);
141cce2d453SYoshinori Sato 	}
142c1537b48SPhil Edworthy 
143cce2d453SYoshinori Sato 	back_to_cached();
144cce2d453SYoshinori Sato 	local_irq_restore(flags);
145cce2d453SYoshinori Sato }
146cce2d453SYoshinori Sato 
147c1537b48SPhil Edworthy /*
148c1537b48SPhil Edworthy  * Write back the range of D-cache, and purge the I-cache.
149c1537b48SPhil Edworthy  */
sh2a_flush_icache_range(void * args)150f26b2a56SPaul Mundt static void sh2a_flush_icache_range(void *args)
151cce2d453SYoshinori Sato {
152f26b2a56SPaul Mundt 	struct flusher_data *data = args;
153f26b2a56SPaul Mundt 	unsigned long start, end;
154cce2d453SYoshinori Sato 	unsigned long v;
155983f4c51SPaul Mundt 	unsigned long flags;
156cce2d453SYoshinori Sato 
157f26b2a56SPaul Mundt 	start = data->addr1 & ~(L1_CACHE_BYTES-1);
158f26b2a56SPaul Mundt 	end = (data->addr2 + L1_CACHE_BYTES-1) & ~(L1_CACHE_BYTES-1);
159cce2d453SYoshinori Sato 
160c1537b48SPhil Edworthy #ifdef CONFIG_CACHE_WRITEBACK
161c1537b48SPhil Edworthy 	sh2a__flush_wback_region((void *)start, end-start);
162c1537b48SPhil Edworthy #endif
163c1537b48SPhil Edworthy 
164983f4c51SPaul Mundt 	local_irq_save(flags);
165cce2d453SYoshinori Sato 	jump_to_uncached();
166cce2d453SYoshinori Sato 
167cce2d453SYoshinori Sato 	/* I-Cache invalidate */
168c1537b48SPhil Edworthy 	/* If there are too many pages then just blow the cache */
169c1537b48SPhil Edworthy 	if (((end - start) >> PAGE_SHIFT) >= MAX_ICACHE_PAGES) {
170a5f6ea29SGeert Uytterhoeven 		__raw_writel(__raw_readl(SH_CCR) | CCR_ICACHE_INVALIDATE,
171a5f6ea29SGeert Uytterhoeven 			     SH_CCR);
172c1537b48SPhil Edworthy 	} else {
173c1537b48SPhil Edworthy 		for (v = start; v < end; v += L1_CACHE_BYTES)
174c1537b48SPhil Edworthy 			sh2a_invalidate_line(CACHE_IC_ADDRESS_ARRAY, v);
175cce2d453SYoshinori Sato 	}
176cce2d453SYoshinori Sato 
177cce2d453SYoshinori Sato 	back_to_cached();
178983f4c51SPaul Mundt 	local_irq_restore(flags);
179cce2d453SYoshinori Sato }
180a58e1a2aSPaul Mundt 
sh2a_cache_init(void)181a58e1a2aSPaul Mundt void __init sh2a_cache_init(void)
182a58e1a2aSPaul Mundt {
183f26b2a56SPaul Mundt 	local_flush_icache_range	= sh2a_flush_icache_range;
184a58e1a2aSPaul Mundt 
185a58e1a2aSPaul Mundt 	__flush_wback_region		= sh2a__flush_wback_region;
186a58e1a2aSPaul Mundt 	__flush_purge_region		= sh2a__flush_purge_region;
187a58e1a2aSPaul Mundt 	__flush_invalidate_region	= sh2a__flush_invalidate_region;
188a58e1a2aSPaul Mundt }
189