xref: /openbmc/linux/arch/arm/mm/cache-tauros2.c (revision d0b73b48)
1 /*
2  * arch/arm/mm/cache-tauros2.c - Tauros2 L2 cache controller support
3  *
4  * Copyright (C) 2008 Marvell Semiconductor
5  *
6  * This file is licensed under the terms of the GNU General Public
7  * License version 2.  This program is licensed "as is" without any
8  * warranty of any kind, whether express or implied.
9  *
10  * References:
11  * - PJ1 CPU Core Datasheet,
12  *   Document ID MV-S104837-01, Rev 0.7, January 24 2008.
13  * - PJ4 CPU Core Datasheet,
14  *   Document ID MV-S105190-00, Rev 0.7, March 14 2008.
15  */
16 
17 #include <linux/init.h>
18 #include <linux/of.h>
19 #include <linux/of_address.h>
20 #include <asm/cacheflush.h>
21 #include <asm/cp15.h>
22 #include <asm/cputype.h>
23 #include <asm/hardware/cache-tauros2.h>
24 
25 
26 /*
27  * When Tauros2 is used on a CPU that supports the v7 hierarchical
28  * cache operations, the cache handling code in proc-v7.S takes care
29  * of everything, including handling DMA coherency.
30  *
31  * So, we only need to register outer cache operations here if we're
32  * being used on a pre-v7 CPU, and we only need to build support for
33  * outer cache operations into the kernel image if the kernel has been
34  * configured to support a pre-v7 CPU.
35  */
36 #if __LINUX_ARM_ARCH__ < 7
37 /*
38  * Low-level cache maintenance operations.
39  */
40 static inline void tauros2_clean_pa(unsigned long addr)
41 {
42 	__asm__("mcr p15, 1, %0, c7, c11, 3" : : "r" (addr));
43 }
44 
45 static inline void tauros2_clean_inv_pa(unsigned long addr)
46 {
47 	__asm__("mcr p15, 1, %0, c7, c15, 3" : : "r" (addr));
48 }
49 
50 static inline void tauros2_inv_pa(unsigned long addr)
51 {
52 	__asm__("mcr p15, 1, %0, c7, c7, 3" : : "r" (addr));
53 }
54 
55 
56 /*
57  * Linux primitives.
58  *
59  * Note that the end addresses passed to Linux primitives are
60  * noninclusive.
61  */
62 #define CACHE_LINE_SIZE		32
63 
64 static void tauros2_inv_range(unsigned long start, unsigned long end)
65 {
66 	/*
67 	 * Clean and invalidate partial first cache line.
68 	 */
69 	if (start & (CACHE_LINE_SIZE - 1)) {
70 		tauros2_clean_inv_pa(start & ~(CACHE_LINE_SIZE - 1));
71 		start = (start | (CACHE_LINE_SIZE - 1)) + 1;
72 	}
73 
74 	/*
75 	 * Clean and invalidate partial last cache line.
76 	 */
77 	if (end & (CACHE_LINE_SIZE - 1)) {
78 		tauros2_clean_inv_pa(end & ~(CACHE_LINE_SIZE - 1));
79 		end &= ~(CACHE_LINE_SIZE - 1);
80 	}
81 
82 	/*
83 	 * Invalidate all full cache lines between 'start' and 'end'.
84 	 */
85 	while (start < end) {
86 		tauros2_inv_pa(start);
87 		start += CACHE_LINE_SIZE;
88 	}
89 
90 	dsb();
91 }
92 
93 static void tauros2_clean_range(unsigned long start, unsigned long end)
94 {
95 	start &= ~(CACHE_LINE_SIZE - 1);
96 	while (start < end) {
97 		tauros2_clean_pa(start);
98 		start += CACHE_LINE_SIZE;
99 	}
100 
101 	dsb();
102 }
103 
104 static void tauros2_flush_range(unsigned long start, unsigned long end)
105 {
106 	start &= ~(CACHE_LINE_SIZE - 1);
107 	while (start < end) {
108 		tauros2_clean_inv_pa(start);
109 		start += CACHE_LINE_SIZE;
110 	}
111 
112 	dsb();
113 }
114 
115 static void tauros2_disable(void)
116 {
117 	__asm__ __volatile__ (
118 	"mcr	p15, 1, %0, c7, c11, 0 @L2 Cache Clean All\n\t"
119 	"mrc	p15, 0, %0, c1, c0, 0\n\t"
120 	"bic	%0, %0, #(1 << 26)\n\t"
121 	"mcr	p15, 0, %0, c1, c0, 0  @Disable L2 Cache\n\t"
122 	: : "r" (0x0));
123 }
124 
125 static void tauros2_resume(void)
126 {
127 	__asm__ __volatile__ (
128 	"mcr	p15, 1, %0, c7, c7, 0 @L2 Cache Invalidate All\n\t"
129 	"mrc	p15, 0, %0, c1, c0, 0\n\t"
130 	"orr	%0, %0, #(1 << 26)\n\t"
131 	"mcr	p15, 0, %0, c1, c0, 0 @Enable L2 Cache\n\t"
132 	: : "r" (0x0));
133 }
134 #endif
135 
136 static inline u32 __init read_extra_features(void)
137 {
138 	u32 u;
139 
140 	__asm__("mrc p15, 1, %0, c15, c1, 0" : "=r" (u));
141 
142 	return u;
143 }
144 
145 static inline void __init write_extra_features(u32 u)
146 {
147 	__asm__("mcr p15, 1, %0, c15, c1, 0" : : "r" (u));
148 }
149 
150 static inline int __init cpuid_scheme(void)
151 {
152 	return !!((processor_id & 0x000f0000) == 0x000f0000);
153 }
154 
155 static inline u32 __init read_mmfr3(void)
156 {
157 	u32 mmfr3;
158 
159 	__asm__("mrc p15, 0, %0, c0, c1, 7\n" : "=r" (mmfr3));
160 
161 	return mmfr3;
162 }
163 
164 static inline u32 __init read_actlr(void)
165 {
166 	u32 actlr;
167 
168 	__asm__("mrc p15, 0, %0, c1, c0, 1\n" : "=r" (actlr));
169 
170 	return actlr;
171 }
172 
173 static inline void __init write_actlr(u32 actlr)
174 {
175 	__asm__("mcr p15, 0, %0, c1, c0, 1\n" : : "r" (actlr));
176 }
177 
178 static void enable_extra_feature(unsigned int features)
179 {
180 	u32 u;
181 
182 	u = read_extra_features();
183 
184 	if (features & CACHE_TAUROS2_PREFETCH_ON)
185 		u &= ~0x01000000;
186 	else
187 		u |= 0x01000000;
188 	printk(KERN_INFO "Tauros2: %s L2 prefetch.\n",
189 			(features & CACHE_TAUROS2_PREFETCH_ON)
190 			? "Enabling" : "Disabling");
191 
192 	if (features & CACHE_TAUROS2_LINEFILL_BURST8)
193 		u |= 0x00100000;
194 	else
195 		u &= ~0x00100000;
196 	printk(KERN_INFO "Tauros2: %s line fill burt8.\n",
197 			(features & CACHE_TAUROS2_LINEFILL_BURST8)
198 			? "Enabling" : "Disabling");
199 
200 	write_extra_features(u);
201 }
202 
203 static void __init tauros2_internal_init(unsigned int features)
204 {
205 	char *mode = NULL;
206 
207 	enable_extra_feature(features);
208 
209 #ifdef CONFIG_CPU_32v5
210 	if ((processor_id & 0xff0f0000) == 0x56050000) {
211 		u32 feat;
212 
213 		/*
214 		 * v5 CPUs with Tauros2 have the L2 cache enable bit
215 		 * located in the CPU Extra Features register.
216 		 */
217 		feat = read_extra_features();
218 		if (!(feat & 0x00400000)) {
219 			printk(KERN_INFO "Tauros2: Enabling L2 cache.\n");
220 			write_extra_features(feat | 0x00400000);
221 		}
222 
223 		mode = "ARMv5";
224 		outer_cache.inv_range = tauros2_inv_range;
225 		outer_cache.clean_range = tauros2_clean_range;
226 		outer_cache.flush_range = tauros2_flush_range;
227 		outer_cache.disable = tauros2_disable;
228 		outer_cache.resume = tauros2_resume;
229 	}
230 #endif
231 
232 #ifdef CONFIG_CPU_32v6
233 	/*
234 	 * Check whether this CPU lacks support for the v7 hierarchical
235 	 * cache ops.  (PJ4 is in its v6 personality mode if the MMFR3
236 	 * register indicates no support for the v7 hierarchical cache
237 	 * ops.)
238 	 */
239 	if (cpuid_scheme() && (read_mmfr3() & 0xf) == 0) {
240 		/*
241 		 * When Tauros2 is used in an ARMv6 system, the L2
242 		 * enable bit is in the ARMv6 ARM-mandated position
243 		 * (bit [26] of the System Control Register).
244 		 */
245 		if (!(get_cr() & 0x04000000)) {
246 			printk(KERN_INFO "Tauros2: Enabling L2 cache.\n");
247 			adjust_cr(0x04000000, 0x04000000);
248 		}
249 
250 		mode = "ARMv6";
251 		outer_cache.inv_range = tauros2_inv_range;
252 		outer_cache.clean_range = tauros2_clean_range;
253 		outer_cache.flush_range = tauros2_flush_range;
254 		outer_cache.disable = tauros2_disable;
255 		outer_cache.resume = tauros2_resume;
256 	}
257 #endif
258 
259 #ifdef CONFIG_CPU_32v7
260 	/*
261 	 * Check whether this CPU has support for the v7 hierarchical
262 	 * cache ops.  (PJ4 is in its v7 personality mode if the MMFR3
263 	 * register indicates support for the v7 hierarchical cache
264 	 * ops.)
265 	 *
266 	 * (Although strictly speaking there may exist CPUs that
267 	 * implement the v7 cache ops but are only ARMv6 CPUs (due to
268 	 * not complying with all of the other ARMv7 requirements),
269 	 * there are no real-life examples of Tauros2 being used on
270 	 * such CPUs as of yet.)
271 	 */
272 	if (cpuid_scheme() && (read_mmfr3() & 0xf) == 1) {
273 		u32 actlr;
274 
275 		/*
276 		 * When Tauros2 is used in an ARMv7 system, the L2
277 		 * enable bit is located in the Auxiliary System Control
278 		 * Register (which is the only register allowed by the
279 		 * ARMv7 spec to contain fine-grained cache control bits).
280 		 */
281 		actlr = read_actlr();
282 		if (!(actlr & 0x00000002)) {
283 			printk(KERN_INFO "Tauros2: Enabling L2 cache.\n");
284 			write_actlr(actlr | 0x00000002);
285 		}
286 
287 		mode = "ARMv7";
288 	}
289 #endif
290 
291 	if (mode == NULL) {
292 		printk(KERN_CRIT "Tauros2: Unable to detect CPU mode.\n");
293 		return;
294 	}
295 
296 	printk(KERN_INFO "Tauros2: L2 cache support initialised "
297 			 "in %s mode.\n", mode);
298 }
299 
300 #ifdef CONFIG_OF
301 static const struct of_device_id tauros2_ids[] __initconst = {
302 	{ .compatible = "marvell,tauros2-cache"},
303 	{}
304 };
305 #endif
306 
307 void __init tauros2_init(unsigned int features)
308 {
309 #ifdef CONFIG_OF
310 	struct device_node *node;
311 	int ret;
312 	unsigned int f;
313 
314 	node = of_find_matching_node(NULL, tauros2_ids);
315 	if (!node) {
316 		pr_info("Not found marvell,tauros2-cache, disable it\n");
317 		return;
318 	}
319 
320 	ret = of_property_read_u32(node, "marvell,tauros2-cache-features", &f);
321 	if (ret) {
322 		pr_info("Not found marvell,tauros-cache-features property, "
323 			"disable extra features\n");
324 		features = 0;
325 	} else
326 		features = f;
327 #endif
328 	tauros2_internal_init(features);
329 }
330