xref: /openbmc/u-boot/arch/arc/lib/cache.c (revision 5d7a24d6)
1 /*
2  * Copyright (C) 2013-2014 Synopsys, Inc. All rights reserved.
3  *
4  * SPDX-License-Identifier:	GPL-2.0+
5  */
6 
7 #include <config.h>
8 #include <common.h>
9 #include <linux/compiler.h>
10 #include <linux/kernel.h>
11 #include <linux/log2.h>
12 #include <asm/arcregs.h>
13 #include <asm/cache.h>
14 
15 /* Bit values in IC_CTRL */
16 #define IC_CTRL_CACHE_DISABLE	BIT(0)
17 
18 /* Bit values in DC_CTRL */
19 #define DC_CTRL_CACHE_DISABLE	BIT(0)
20 #define DC_CTRL_INV_MODE_FLUSH	BIT(6)
21 #define DC_CTRL_FLUSH_STATUS	BIT(8)
22 #define CACHE_VER_NUM_MASK	0xF
23 
24 #define OP_INV			BIT(0)
25 #define OP_FLUSH		BIT(1)
26 #define OP_FLUSH_N_INV		(OP_FLUSH | OP_INV)
27 
28 /* Bit val in SLC_CONTROL */
29 #define SLC_CTRL_DIS		0x001
30 #define SLC_CTRL_IM		0x040
31 #define SLC_CTRL_BUSY		0x100
32 #define SLC_CTRL_RGN_OP_INV	0x200
33 
34 /*
35  * By default that variable will fall into .bss section.
36  * But .bss section is not relocated and so it will be initilized before
37  * relocation but will be used after being zeroed.
38  */
39 int l1_line_sz __section(".data");
40 bool dcache_exists __section(".data") = false;
41 bool icache_exists __section(".data") = false;
42 
43 #define CACHE_LINE_MASK		(~(l1_line_sz - 1))
44 
45 #ifdef CONFIG_ISA_ARCV2
46 int slc_line_sz __section(".data");
47 bool slc_exists __section(".data") = false;
48 bool ioc_exists __section(".data") = false;
49 bool pae_exists __section(".data") = false;
50 
51 /* To force enable IOC set ioc_enable to 'true' */
52 bool ioc_enable __section(".data") = false;
53 
54 void read_decode_mmu_bcr(void)
55 {
56 	/* TODO: should we compare mmu version from BCR and from CONFIG? */
57 #if (CONFIG_ARC_MMU_VER >= 4)
58 	u32 tmp;
59 
60 	tmp = read_aux_reg(ARC_AUX_MMU_BCR);
61 
62 	struct bcr_mmu_4 {
63 #ifdef CONFIG_CPU_BIG_ENDIAN
64 	unsigned int ver:8, sasid:1, sz1:4, sz0:4, res:2, pae:1,
65 		     n_ways:2, n_entry:2, n_super:2, u_itlb:3, u_dtlb:3;
66 #else
67 	/*           DTLB      ITLB      JES        JE         JA      */
68 	unsigned int u_dtlb:3, u_itlb:3, n_super:2, n_entry:2, n_ways:2,
69 		     pae:1, res:2, sz0:4, sz1:4, sasid:1, ver:8;
70 #endif /* CONFIG_CPU_BIG_ENDIAN */
71 	} *mmu4;
72 
73 	mmu4 = (struct bcr_mmu_4 *)&tmp;
74 
75 	pae_exists = !!mmu4->pae;
76 #endif /* (CONFIG_ARC_MMU_VER >= 4) */
77 }
78 
79 static void __slc_entire_op(const int op)
80 {
81 	unsigned int ctrl;
82 
83 	ctrl = read_aux_reg(ARC_AUX_SLC_CTRL);
84 
85 	if (!(op & OP_FLUSH))		/* i.e. OP_INV */
86 		ctrl &= ~SLC_CTRL_IM;	/* clear IM: Disable flush before Inv */
87 	else
88 		ctrl |= SLC_CTRL_IM;
89 
90 	write_aux_reg(ARC_AUX_SLC_CTRL, ctrl);
91 
92 	if (op & OP_INV)	/* Inv or flush-n-inv use same cmd reg */
93 		write_aux_reg(ARC_AUX_SLC_INVALIDATE, 0x1);
94 	else
95 		write_aux_reg(ARC_AUX_SLC_FLUSH, 0x1);
96 
97 	/* Make sure "busy" bit reports correct stataus, see STAR 9001165532 */
98 	read_aux_reg(ARC_AUX_SLC_CTRL);
99 
100 	/* Important to wait for flush to complete */
101 	while (read_aux_reg(ARC_AUX_SLC_CTRL) & SLC_CTRL_BUSY);
102 }
103 
104 static void slc_upper_region_init(void)
105 {
106 	/*
107 	 * ARC_AUX_SLC_RGN_END1 and ARC_AUX_SLC_RGN_START1 are always == 0
108 	 * as we don't use PAE40.
109 	 */
110 	write_aux_reg(ARC_AUX_SLC_RGN_END1, 0);
111 	write_aux_reg(ARC_AUX_SLC_RGN_START1, 0);
112 }
113 
114 static void __slc_rgn_op(unsigned long paddr, unsigned long sz, const int op)
115 {
116 	unsigned int ctrl;
117 	unsigned long end;
118 
119 	/*
120 	 * The Region Flush operation is specified by CTRL.RGN_OP[11..9]
121 	 *  - b'000 (default) is Flush,
122 	 *  - b'001 is Invalidate if CTRL.IM == 0
123 	 *  - b'001 is Flush-n-Invalidate if CTRL.IM == 1
124 	 */
125 	ctrl = read_aux_reg(ARC_AUX_SLC_CTRL);
126 
127 	/* Don't rely on default value of IM bit */
128 	if (!(op & OP_FLUSH))		/* i.e. OP_INV */
129 		ctrl &= ~SLC_CTRL_IM;	/* clear IM: Disable flush before Inv */
130 	else
131 		ctrl |= SLC_CTRL_IM;
132 
133 	if (op & OP_INV)
134 		ctrl |= SLC_CTRL_RGN_OP_INV;	/* Inv or flush-n-inv */
135 	else
136 		ctrl &= ~SLC_CTRL_RGN_OP_INV;
137 
138 	write_aux_reg(ARC_AUX_SLC_CTRL, ctrl);
139 
140 	/*
141 	 * Lower bits are ignored, no need to clip
142 	 * END needs to be setup before START (latter triggers the operation)
143 	 * END can't be same as START, so add (l2_line_sz - 1) to sz
144 	 */
145 	end = paddr + sz + slc_line_sz - 1;
146 
147 	/*
148 	 * Upper addresses (ARC_AUX_SLC_RGN_END1 and ARC_AUX_SLC_RGN_START1)
149 	 * are always == 0 as we don't use PAE40, so we only setup lower ones
150 	 * (ARC_AUX_SLC_RGN_END and ARC_AUX_SLC_RGN_START)
151 	 */
152 	write_aux_reg(ARC_AUX_SLC_RGN_END, end);
153 	write_aux_reg(ARC_AUX_SLC_RGN_START, paddr);
154 
155 	/* Make sure "busy" bit reports correct stataus, see STAR 9001165532 */
156 	read_aux_reg(ARC_AUX_SLC_CTRL);
157 
158 	while (read_aux_reg(ARC_AUX_SLC_CTRL) & SLC_CTRL_BUSY);
159 }
160 #endif /* CONFIG_ISA_ARCV2 */
161 
162 #ifdef CONFIG_ISA_ARCV2
163 static void read_decode_cache_bcr_arcv2(void)
164 {
165 	union {
166 		struct {
167 #ifdef CONFIG_CPU_BIG_ENDIAN
168 			unsigned int pad:24, way:2, lsz:2, sz:4;
169 #else
170 			unsigned int sz:4, lsz:2, way:2, pad:24;
171 #endif
172 		} fields;
173 		unsigned int word;
174 	} slc_cfg;
175 
176 	union {
177 		struct {
178 #ifdef CONFIG_CPU_BIG_ENDIAN
179 			unsigned int pad:24, ver:8;
180 #else
181 			unsigned int ver:8, pad:24;
182 #endif
183 		} fields;
184 		unsigned int word;
185 	} sbcr;
186 
187 	sbcr.word = read_aux_reg(ARC_BCR_SLC);
188 	if (sbcr.fields.ver) {
189 		slc_cfg.word = read_aux_reg(ARC_AUX_SLC_CONFIG);
190 		slc_exists = true;
191 		slc_line_sz = (slc_cfg.fields.lsz == 0) ? 128 : 64;
192 	}
193 
194 	union {
195 		struct bcr_clust_cfg {
196 #ifdef CONFIG_CPU_BIG_ENDIAN
197 			unsigned int pad:7, c:1, num_entries:8, num_cores:8, ver:8;
198 #else
199 			unsigned int ver:8, num_cores:8, num_entries:8, c:1, pad:7;
200 #endif
201 		} fields;
202 		unsigned int word;
203 	} cbcr;
204 
205 	cbcr.word = read_aux_reg(ARC_BCR_CLUSTER);
206 	if (cbcr.fields.c && ioc_enable)
207 		ioc_exists = true;
208 }
209 #endif
210 
211 void read_decode_cache_bcr(void)
212 {
213 	int dc_line_sz = 0, ic_line_sz = 0;
214 
215 	union {
216 		struct {
217 #ifdef CONFIG_CPU_BIG_ENDIAN
218 			unsigned int pad:12, line_len:4, sz:4, config:4, ver:8;
219 #else
220 			unsigned int ver:8, config:4, sz:4, line_len:4, pad:12;
221 #endif
222 		} fields;
223 		unsigned int word;
224 	} ibcr, dbcr;
225 
226 	ibcr.word = read_aux_reg(ARC_BCR_IC_BUILD);
227 	if (ibcr.fields.ver) {
228 		icache_exists = true;
229 		l1_line_sz = ic_line_sz = 8 << ibcr.fields.line_len;
230 		if (!ic_line_sz)
231 			panic("Instruction exists but line length is 0\n");
232 	}
233 
234 	dbcr.word = read_aux_reg(ARC_BCR_DC_BUILD);
235 	if (dbcr.fields.ver) {
236 		dcache_exists = true;
237 		l1_line_sz = dc_line_sz = 16 << dbcr.fields.line_len;
238 		if (!dc_line_sz)
239 			panic("Data cache exists but line length is 0\n");
240 	}
241 
242 	if (ic_line_sz && dc_line_sz && (ic_line_sz != dc_line_sz))
243 		panic("Instruction and data cache line lengths differ\n");
244 }
245 
246 void cache_init(void)
247 {
248 	read_decode_cache_bcr();
249 
250 #ifdef CONFIG_ISA_ARCV2
251 	read_decode_cache_bcr_arcv2();
252 
253 	if (ioc_exists) {
254 		/* IOC Aperture start is equal to DDR start */
255 		unsigned int ap_base = CONFIG_SYS_SDRAM_BASE;
256 		/* IOC Aperture size is equal to DDR size */
257 		long ap_size = CONFIG_SYS_SDRAM_SIZE;
258 
259 		flush_dcache_all();
260 		invalidate_dcache_all();
261 
262 		if (!is_power_of_2(ap_size) || ap_size < 4096)
263 			panic("IOC Aperture size must be power of 2 and bigger 4Kib");
264 
265 		/*
266 		 * IOC Aperture size decoded as 2 ^ (SIZE + 2) KB,
267 		 * so setting 0x11 implies 512M, 0x12 implies 1G...
268 		 */
269 		write_aux_reg(ARC_AUX_IO_COH_AP0_SIZE,
270 			      order_base_2(ap_size / 1024) - 2);
271 
272 		/* IOC Aperture start must be aligned to the size of the aperture */
273 		if (ap_base % ap_size != 0)
274 			panic("IOC Aperture start must be aligned to the size of the aperture");
275 
276 		write_aux_reg(ARC_AUX_IO_COH_AP0_BASE, ap_base >> 12);
277 		write_aux_reg(ARC_AUX_IO_COH_PARTIAL, 1);
278 		write_aux_reg(ARC_AUX_IO_COH_ENABLE, 1);
279 	}
280 
281 	read_decode_mmu_bcr();
282 
283 	/*
284 	 * ARC_AUX_SLC_RGN_START1 and ARC_AUX_SLC_RGN_END1 register exist
285 	 * only if PAE exists in current HW. So we had to check pae_exist
286 	 * before using them.
287 	 */
288 	if (slc_exists && pae_exists)
289 		slc_upper_region_init();
290 #endif /* CONFIG_ISA_ARCV2 */
291 }
292 
293 int icache_status(void)
294 {
295 	if (!icache_exists)
296 		return 0;
297 
298 	if (read_aux_reg(ARC_AUX_IC_CTRL) & IC_CTRL_CACHE_DISABLE)
299 		return 0;
300 	else
301 		return 1;
302 }
303 
304 void icache_enable(void)
305 {
306 	if (icache_exists)
307 		write_aux_reg(ARC_AUX_IC_CTRL, read_aux_reg(ARC_AUX_IC_CTRL) &
308 			      ~IC_CTRL_CACHE_DISABLE);
309 }
310 
311 void icache_disable(void)
312 {
313 	if (icache_exists)
314 		write_aux_reg(ARC_AUX_IC_CTRL, read_aux_reg(ARC_AUX_IC_CTRL) |
315 			      IC_CTRL_CACHE_DISABLE);
316 }
317 
318 /* IC supports only invalidation */
319 static inline void __ic_entire_invalidate(void)
320 {
321 	if (!icache_status())
322 		return;
323 
324 	/* Any write to IC_IVIC register triggers invalidation of entire I$ */
325 	write_aux_reg(ARC_AUX_IC_IVIC, 1);
326 	/*
327 	 * As per ARC HS databook (see chapter 5.3.3.2)
328 	 * it is required to add 3 NOPs after each write to IC_IVIC.
329 	 */
330 	__builtin_arc_nop();
331 	__builtin_arc_nop();
332 	__builtin_arc_nop();
333 	read_aux_reg(ARC_AUX_IC_CTRL);  /* blocks */
334 }
335 
336 void invalidate_icache_all(void)
337 {
338 	__ic_entire_invalidate();
339 
340 #ifdef CONFIG_ISA_ARCV2
341 	if (slc_exists)
342 		__slc_entire_op(OP_INV);
343 #endif
344 }
345 
346 int dcache_status(void)
347 {
348 	if (!dcache_exists)
349 		return 0;
350 
351 	if (read_aux_reg(ARC_AUX_DC_CTRL) & DC_CTRL_CACHE_DISABLE)
352 		return 0;
353 	else
354 		return 1;
355 }
356 
357 void dcache_enable(void)
358 {
359 	if (!dcache_exists)
360 		return;
361 
362 	write_aux_reg(ARC_AUX_DC_CTRL, read_aux_reg(ARC_AUX_DC_CTRL) &
363 		      ~(DC_CTRL_INV_MODE_FLUSH | DC_CTRL_CACHE_DISABLE));
364 }
365 
366 void dcache_disable(void)
367 {
368 	if (!dcache_exists)
369 		return;
370 
371 	write_aux_reg(ARC_AUX_DC_CTRL, read_aux_reg(ARC_AUX_DC_CTRL) |
372 		      DC_CTRL_CACHE_DISABLE);
373 }
374 
375 #ifndef CONFIG_SYS_DCACHE_OFF
376 /* Common Helper for Line Operations on D-cache */
377 static inline void __dcache_line_loop(unsigned long paddr, unsigned long sz,
378 				      const int cacheop)
379 {
380 	unsigned int aux_cmd;
381 	int num_lines;
382 
383 	/* d$ cmd: INV (discard or wback-n-discard) OR FLUSH (wback) */
384 	aux_cmd = cacheop & OP_INV ? ARC_AUX_DC_IVDL : ARC_AUX_DC_FLDL;
385 
386 	sz += paddr & ~CACHE_LINE_MASK;
387 	paddr &= CACHE_LINE_MASK;
388 
389 	num_lines = DIV_ROUND_UP(sz, l1_line_sz);
390 
391 	while (num_lines-- > 0) {
392 #if (CONFIG_ARC_MMU_VER == 3)
393 		write_aux_reg(ARC_AUX_DC_PTAG, paddr);
394 #endif
395 		write_aux_reg(aux_cmd, paddr);
396 		paddr += l1_line_sz;
397 	}
398 }
399 
400 static void __before_dc_op(const int op)
401 {
402 	unsigned int ctrl;
403 
404 	ctrl = read_aux_reg(ARC_AUX_DC_CTRL);
405 
406 	/* IM bit implies flush-n-inv, instead of vanilla inv */
407 	if (op == OP_INV)
408 		ctrl &= ~DC_CTRL_INV_MODE_FLUSH;
409 	else
410 		ctrl |= DC_CTRL_INV_MODE_FLUSH;
411 
412 	write_aux_reg(ARC_AUX_DC_CTRL, ctrl);
413 }
414 
415 static void __after_dc_op(const int op)
416 {
417 	if (op & OP_FLUSH)	/* flush / flush-n-inv both wait */
418 		while (read_aux_reg(ARC_AUX_DC_CTRL) & DC_CTRL_FLUSH_STATUS);
419 }
420 
421 static inline void __dc_entire_op(const int cacheop)
422 {
423 	int aux;
424 
425 	__before_dc_op(cacheop);
426 
427 	if (cacheop & OP_INV)	/* Inv or flush-n-inv use same cmd reg */
428 		aux = ARC_AUX_DC_IVDC;
429 	else
430 		aux = ARC_AUX_DC_FLSH;
431 
432 	write_aux_reg(aux, 0x1);
433 
434 	__after_dc_op(cacheop);
435 }
436 
437 static inline void __dc_line_op(unsigned long paddr, unsigned long sz,
438 				const int cacheop)
439 {
440 	__before_dc_op(cacheop);
441 	__dcache_line_loop(paddr, sz, cacheop);
442 	__after_dc_op(cacheop);
443 }
444 #else
445 #define __dc_entire_op(cacheop)
446 #define __dc_line_op(paddr, sz, cacheop)
447 #endif /* !CONFIG_SYS_DCACHE_OFF */
448 
449 void invalidate_dcache_range(unsigned long start, unsigned long end)
450 {
451 	if (start >= end)
452 		return;
453 
454 #ifdef CONFIG_ISA_ARCV2
455 	if (!ioc_exists)
456 #endif
457 		__dc_line_op(start, end - start, OP_INV);
458 
459 #ifdef CONFIG_ISA_ARCV2
460 	if (slc_exists && !ioc_exists)
461 		__slc_rgn_op(start, end - start, OP_INV);
462 #endif
463 }
464 
465 void flush_dcache_range(unsigned long start, unsigned long end)
466 {
467 	if (start >= end)
468 		return;
469 
470 #ifdef CONFIG_ISA_ARCV2
471 	if (!ioc_exists)
472 #endif
473 		__dc_line_op(start, end - start, OP_FLUSH);
474 
475 #ifdef CONFIG_ISA_ARCV2
476 	if (slc_exists && !ioc_exists)
477 		__slc_rgn_op(start, end - start, OP_FLUSH);
478 #endif
479 }
480 
481 void flush_cache(unsigned long start, unsigned long size)
482 {
483 	flush_dcache_range(start, start + size);
484 }
485 
486 void invalidate_dcache_all(void)
487 {
488 	__dc_entire_op(OP_INV);
489 
490 #ifdef CONFIG_ISA_ARCV2
491 	if (slc_exists)
492 		__slc_entire_op(OP_INV);
493 #endif
494 }
495 
496 void flush_dcache_all(void)
497 {
498 	__dc_entire_op(OP_FLUSH);
499 
500 #ifdef CONFIG_ISA_ARCV2
501 	if (slc_exists)
502 		__slc_entire_op(OP_FLUSH);
503 #endif
504 }
505