xref: /openbmc/linux/arch/arm/mm/cache-xsc3l2.c (revision 75bf465f0bc33e9b776a46d6a1b9b990f5fb7c37)
1*45051539SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2905a09d5SEric Miao /*
3905a09d5SEric Miao  * arch/arm/mm/cache-xsc3l2.c - XScale3 L2 cache controller support
4905a09d5SEric Miao  *
5905a09d5SEric Miao  * Copyright (C) 2007 ARM Limited
6905a09d5SEric Miao  */
7905a09d5SEric Miao #include <linux/init.h>
825cbe454SNicolas Pitre #include <linux/highmem.h>
915d07dc9SRussell King #include <asm/cp15.h>
100ba8b9b2SRussell King #include <asm/cputype.h>
11905a09d5SEric Miao #include <asm/cacheflush.h>
12905a09d5SEric Miao 
13905a09d5SEric Miao #define CR_L2	(1 << 26)
14905a09d5SEric Miao 
15905a09d5SEric Miao #define CACHE_LINE_SIZE		32
16905a09d5SEric Miao #define CACHE_LINE_SHIFT	5
17905a09d5SEric Miao #define CACHE_WAY_PER_SET	8
18905a09d5SEric Miao 
19905a09d5SEric Miao #define CACHE_WAY_SIZE(l2ctype)	(8192 << (((l2ctype) >> 8) & 0xf))
20905a09d5SEric Miao #define CACHE_SET_SIZE(l2ctype)	(CACHE_WAY_SIZE(l2ctype) >> CACHE_LINE_SHIFT)
21905a09d5SEric Miao 
xsc3_l2_present(void)22905a09d5SEric Miao static inline int xsc3_l2_present(void)
23905a09d5SEric Miao {
24905a09d5SEric Miao 	unsigned long l2ctype;
25905a09d5SEric Miao 
26905a09d5SEric Miao 	__asm__("mrc p15, 1, %0, c0, c0, 1" : "=r" (l2ctype));
27905a09d5SEric Miao 
28905a09d5SEric Miao 	return !!(l2ctype & 0xf8);
29905a09d5SEric Miao }
30905a09d5SEric Miao 
xsc3_l2_clean_mva(unsigned long addr)31905a09d5SEric Miao static inline void xsc3_l2_clean_mva(unsigned long addr)
32905a09d5SEric Miao {
33905a09d5SEric Miao 	__asm__("mcr p15, 1, %0, c7, c11, 1" : : "r" (addr));
34905a09d5SEric Miao }
35905a09d5SEric Miao 
xsc3_l2_inv_mva(unsigned long addr)36905a09d5SEric Miao static inline void xsc3_l2_inv_mva(unsigned long addr)
37905a09d5SEric Miao {
38905a09d5SEric Miao 	__asm__("mcr p15, 1, %0, c7, c7, 1" : : "r" (addr));
39905a09d5SEric Miao }
40905a09d5SEric Miao 
xsc3_l2_inv_all(void)41905a09d5SEric Miao static inline void xsc3_l2_inv_all(void)
42905a09d5SEric Miao {
43905a09d5SEric Miao 	unsigned long l2ctype, set_way;
44905a09d5SEric Miao 	int set, way;
45905a09d5SEric Miao 
46905a09d5SEric Miao 	__asm__("mrc p15, 1, %0, c0, c0, 1" : "=r" (l2ctype));
47905a09d5SEric Miao 
48905a09d5SEric Miao 	for (set = 0; set < CACHE_SET_SIZE(l2ctype); set++) {
49905a09d5SEric Miao 		for (way = 0; way < CACHE_WAY_PER_SET; way++) {
50905a09d5SEric Miao 			set_way = (way << 29) | (set << 5);
51905a09d5SEric Miao 			__asm__("mcr p15, 1, %0, c7, c11, 2" : : "r"(set_way));
52905a09d5SEric Miao 		}
53905a09d5SEric Miao 	}
54905a09d5SEric Miao 
55905a09d5SEric Miao 	dsb();
56905a09d5SEric Miao }
57905a09d5SEric Miao 
l2_unmap_va(unsigned long va)5825cbe454SNicolas Pitre static inline void l2_unmap_va(unsigned long va)
5925cbe454SNicolas Pitre {
603902a15eSNicolas Pitre #ifdef CONFIG_HIGHMEM
6125cbe454SNicolas Pitre 	if (va != -1)
6225cbe454SNicolas Pitre 		kunmap_atomic((void *)va);
633902a15eSNicolas Pitre #endif
6425cbe454SNicolas Pitre }
653902a15eSNicolas Pitre 
l2_map_va(unsigned long pa,unsigned long prev_va)6625cbe454SNicolas Pitre static inline unsigned long l2_map_va(unsigned long pa, unsigned long prev_va)
673902a15eSNicolas Pitre {
683902a15eSNicolas Pitre #ifdef CONFIG_HIGHMEM
693902a15eSNicolas Pitre 	unsigned long va = prev_va & PAGE_MASK;
703902a15eSNicolas Pitre 	unsigned long pa_offset = pa << (32 - PAGE_SHIFT);
713902a15eSNicolas Pitre 	if (unlikely(pa_offset < (prev_va << (32 - PAGE_SHIFT)))) {
723902a15eSNicolas Pitre 		/*
733902a15eSNicolas Pitre 		 * Switching to a new page.  Because cache ops are
743902a15eSNicolas Pitre 		 * using virtual addresses only, we must put a mapping
7525cbe454SNicolas Pitre 		 * in place for it.
763902a15eSNicolas Pitre 		 */
7725cbe454SNicolas Pitre 		l2_unmap_va(prev_va);
7825cbe454SNicolas Pitre 		va = (unsigned long)kmap_atomic_pfn(pa >> PAGE_SHIFT);
793902a15eSNicolas Pitre 	}
803902a15eSNicolas Pitre 	return va + (pa_offset >> (32 - PAGE_SHIFT));
813902a15eSNicolas Pitre #else
823902a15eSNicolas Pitre 	return __phys_to_virt(pa);
833902a15eSNicolas Pitre #endif
843902a15eSNicolas Pitre }
853902a15eSNicolas Pitre 
xsc3_l2_inv_range(unsigned long start,unsigned long end)86905a09d5SEric Miao static void xsc3_l2_inv_range(unsigned long start, unsigned long end)
87905a09d5SEric Miao {
8825cbe454SNicolas Pitre 	unsigned long vaddr;
893902a15eSNicolas Pitre 
90905a09d5SEric Miao 	if (start == 0 && end == -1ul) {
91905a09d5SEric Miao 		xsc3_l2_inv_all();
92905a09d5SEric Miao 		return;
93905a09d5SEric Miao 	}
94905a09d5SEric Miao 
953902a15eSNicolas Pitre 	vaddr = -1;  /* to force the first mapping */
963902a15eSNicolas Pitre 
97905a09d5SEric Miao 	/*
98905a09d5SEric Miao 	 * Clean and invalidate partial first cache line.
99905a09d5SEric Miao 	 */
100905a09d5SEric Miao 	if (start & (CACHE_LINE_SIZE - 1)) {
10125cbe454SNicolas Pitre 		vaddr = l2_map_va(start & ~(CACHE_LINE_SIZE - 1), vaddr);
1023902a15eSNicolas Pitre 		xsc3_l2_clean_mva(vaddr);
1033902a15eSNicolas Pitre 		xsc3_l2_inv_mva(vaddr);
104905a09d5SEric Miao 		start = (start | (CACHE_LINE_SIZE - 1)) + 1;
105905a09d5SEric Miao 	}
106905a09d5SEric Miao 
107905a09d5SEric Miao 	/*
108905a09d5SEric Miao 	 * Invalidate all full cache lines between 'start' and 'end'.
109905a09d5SEric Miao 	 */
1103902a15eSNicolas Pitre 	while (start < (end & ~(CACHE_LINE_SIZE - 1))) {
11125cbe454SNicolas Pitre 		vaddr = l2_map_va(start, vaddr);
1123902a15eSNicolas Pitre 		xsc3_l2_inv_mva(vaddr);
113905a09d5SEric Miao 		start += CACHE_LINE_SIZE;
114905a09d5SEric Miao 	}
115905a09d5SEric Miao 
1163902a15eSNicolas Pitre 	/*
1173902a15eSNicolas Pitre 	 * Clean and invalidate partial last cache line.
1183902a15eSNicolas Pitre 	 */
1193902a15eSNicolas Pitre 	if (start < end) {
12025cbe454SNicolas Pitre 		vaddr = l2_map_va(start, vaddr);
1213902a15eSNicolas Pitre 		xsc3_l2_clean_mva(vaddr);
1223902a15eSNicolas Pitre 		xsc3_l2_inv_mva(vaddr);
1233902a15eSNicolas Pitre 	}
1243902a15eSNicolas Pitre 
12525cbe454SNicolas Pitre 	l2_unmap_va(vaddr);
1263902a15eSNicolas Pitre 
127905a09d5SEric Miao 	dsb();
128905a09d5SEric Miao }
129905a09d5SEric Miao 
xsc3_l2_clean_range(unsigned long start,unsigned long end)130905a09d5SEric Miao static void xsc3_l2_clean_range(unsigned long start, unsigned long end)
131905a09d5SEric Miao {
13225cbe454SNicolas Pitre 	unsigned long vaddr;
1333902a15eSNicolas Pitre 
1343902a15eSNicolas Pitre 	vaddr = -1;  /* to force the first mapping */
1353902a15eSNicolas Pitre 
136905a09d5SEric Miao 	start &= ~(CACHE_LINE_SIZE - 1);
137905a09d5SEric Miao 	while (start < end) {
13825cbe454SNicolas Pitre 		vaddr = l2_map_va(start, vaddr);
1393902a15eSNicolas Pitre 		xsc3_l2_clean_mva(vaddr);
140905a09d5SEric Miao 		start += CACHE_LINE_SIZE;
141905a09d5SEric Miao 	}
142905a09d5SEric Miao 
14325cbe454SNicolas Pitre 	l2_unmap_va(vaddr);
1443902a15eSNicolas Pitre 
145905a09d5SEric Miao 	dsb();
146905a09d5SEric Miao }
147905a09d5SEric Miao 
148905a09d5SEric Miao /*
149905a09d5SEric Miao  * optimize L2 flush all operation by set/way format
150905a09d5SEric Miao  */
xsc3_l2_flush_all(void)151905a09d5SEric Miao static inline void xsc3_l2_flush_all(void)
152905a09d5SEric Miao {
153905a09d5SEric Miao 	unsigned long l2ctype, set_way;
154905a09d5SEric Miao 	int set, way;
155905a09d5SEric Miao 
156905a09d5SEric Miao 	__asm__("mrc p15, 1, %0, c0, c0, 1" : "=r" (l2ctype));
157905a09d5SEric Miao 
158905a09d5SEric Miao 	for (set = 0; set < CACHE_SET_SIZE(l2ctype); set++) {
159905a09d5SEric Miao 		for (way = 0; way < CACHE_WAY_PER_SET; way++) {
160905a09d5SEric Miao 			set_way = (way << 29) | (set << 5);
161905a09d5SEric Miao 			__asm__("mcr p15, 1, %0, c7, c15, 2" : : "r"(set_way));
162905a09d5SEric Miao 		}
163905a09d5SEric Miao 	}
164905a09d5SEric Miao 
165905a09d5SEric Miao 	dsb();
166905a09d5SEric Miao }
167905a09d5SEric Miao 
xsc3_l2_flush_range(unsigned long start,unsigned long end)168905a09d5SEric Miao static void xsc3_l2_flush_range(unsigned long start, unsigned long end)
169905a09d5SEric Miao {
17025cbe454SNicolas Pitre 	unsigned long vaddr;
1713902a15eSNicolas Pitre 
172905a09d5SEric Miao 	if (start == 0 && end == -1ul) {
173905a09d5SEric Miao 		xsc3_l2_flush_all();
174905a09d5SEric Miao 		return;
175905a09d5SEric Miao 	}
176905a09d5SEric Miao 
1773902a15eSNicolas Pitre 	vaddr = -1;  /* to force the first mapping */
1783902a15eSNicolas Pitre 
179905a09d5SEric Miao 	start &= ~(CACHE_LINE_SIZE - 1);
180905a09d5SEric Miao 	while (start < end) {
18125cbe454SNicolas Pitre 		vaddr = l2_map_va(start, vaddr);
1823902a15eSNicolas Pitre 		xsc3_l2_clean_mva(vaddr);
1833902a15eSNicolas Pitre 		xsc3_l2_inv_mva(vaddr);
184905a09d5SEric Miao 		start += CACHE_LINE_SIZE;
185905a09d5SEric Miao 	}
186905a09d5SEric Miao 
18725cbe454SNicolas Pitre 	l2_unmap_va(vaddr);
1883902a15eSNicolas Pitre 
189905a09d5SEric Miao 	dsb();
190905a09d5SEric Miao }
191905a09d5SEric Miao 
xsc3_l2_init(void)192905a09d5SEric Miao static int __init xsc3_l2_init(void)
193905a09d5SEric Miao {
194905a09d5SEric Miao 	if (!cpu_is_xsc3() || !xsc3_l2_present())
195905a09d5SEric Miao 		return 0;
196905a09d5SEric Miao 
197dc8601a2SHaojian Zhuang 	if (get_cr() & CR_L2) {
198905a09d5SEric Miao 		pr_info("XScale3 L2 cache enabled.\n");
199905a09d5SEric Miao 		xsc3_l2_inv_all();
200905a09d5SEric Miao 
201905a09d5SEric Miao 		outer_cache.inv_range = xsc3_l2_inv_range;
202905a09d5SEric Miao 		outer_cache.clean_range = xsc3_l2_clean_range;
203905a09d5SEric Miao 		outer_cache.flush_range = xsc3_l2_flush_range;
204dc8601a2SHaojian Zhuang 	}
205905a09d5SEric Miao 
206905a09d5SEric Miao 	return 0;
207905a09d5SEric Miao }
208905a09d5SEric Miao core_initcall(xsc3_l2_init);
209