xref: /openbmc/u-boot/arch/arc/lib/cache.c (revision 0093b3fc)
1 /*
2  * Copyright (C) 2013-2014 Synopsys, Inc. All rights reserved.
3  *
4  * SPDX-License-Identifier:	GPL-2.0+
5  */
6 
7 #include <config.h>
8 #include <common.h>
9 #include <linux/compiler.h>
10 #include <linux/kernel.h>
11 #include <linux/log2.h>
12 #include <asm/arcregs.h>
13 #include <asm/cache.h>
14 
15 /* Bit values in IC_CTRL */
16 #define IC_CTRL_CACHE_DISABLE	BIT(0)
17 
18 /* Bit values in DC_CTRL */
19 #define DC_CTRL_CACHE_DISABLE	BIT(0)
20 #define DC_CTRL_INV_MODE_FLUSH	BIT(6)
21 #define DC_CTRL_FLUSH_STATUS	BIT(8)
22 #define CACHE_VER_NUM_MASK	0xF
23 
24 #define OP_INV		0x1
25 #define OP_FLUSH	0x2
26 #define OP_INV_IC	0x3
27 
28 /* Bit val in SLC_CONTROL */
29 #define SLC_CTRL_DIS		0x001
30 #define SLC_CTRL_IM		0x040
31 #define SLC_CTRL_BUSY		0x100
32 #define SLC_CTRL_RGN_OP_INV	0x200
33 
34 /*
35  * By default that variable will fall into .bss section.
36  * But .bss section is not relocated and so it will be initilized before
37  * relocation but will be used after being zeroed.
38  */
39 int l1_line_sz __section(".data");
40 bool dcache_exists __section(".data") = false;
41 bool icache_exists __section(".data") = false;
42 
43 #define CACHE_LINE_MASK		(~(l1_line_sz - 1))
44 
45 #ifdef CONFIG_ISA_ARCV2
46 int slc_line_sz __section(".data");
47 bool slc_exists __section(".data") = false;
48 bool ioc_exists __section(".data") = false;
49 bool pae_exists __section(".data") = false;
50 
51 /* To force enable IOC set ioc_enable to 'true' */
52 bool ioc_enable __section(".data") = false;
53 
54 void read_decode_mmu_bcr(void)
55 {
56 	/* TODO: should we compare mmu version from BCR and from CONFIG? */
57 #if (CONFIG_ARC_MMU_VER >= 4)
58 	u32 tmp;
59 
60 	tmp = read_aux_reg(ARC_AUX_MMU_BCR);
61 
62 	struct bcr_mmu_4 {
63 #ifdef CONFIG_CPU_BIG_ENDIAN
64 	unsigned int ver:8, sasid:1, sz1:4, sz0:4, res:2, pae:1,
65 		     n_ways:2, n_entry:2, n_super:2, u_itlb:3, u_dtlb:3;
66 #else
67 	/*           DTLB      ITLB      JES        JE         JA      */
68 	unsigned int u_dtlb:3, u_itlb:3, n_super:2, n_entry:2, n_ways:2,
69 		     pae:1, res:2, sz0:4, sz1:4, sasid:1, ver:8;
70 #endif /* CONFIG_CPU_BIG_ENDIAN */
71 	} *mmu4;
72 
73 	mmu4 = (struct bcr_mmu_4 *)&tmp;
74 
75 	pae_exists = !!mmu4->pae;
76 #endif /* (CONFIG_ARC_MMU_VER >= 4) */
77 }
78 
79 static void __slc_entire_op(const int op)
80 {
81 	unsigned int ctrl;
82 
83 	ctrl = read_aux_reg(ARC_AUX_SLC_CTRL);
84 
85 	if (!(op & OP_FLUSH))		/* i.e. OP_INV */
86 		ctrl &= ~SLC_CTRL_IM;	/* clear IM: Disable flush before Inv */
87 	else
88 		ctrl |= SLC_CTRL_IM;
89 
90 	write_aux_reg(ARC_AUX_SLC_CTRL, ctrl);
91 
92 	if (op & OP_INV)	/* Inv or flush-n-inv use same cmd reg */
93 		write_aux_reg(ARC_AUX_SLC_INVALIDATE, 0x1);
94 	else
95 		write_aux_reg(ARC_AUX_SLC_FLUSH, 0x1);
96 
97 	/* Make sure "busy" bit reports correct stataus, see STAR 9001165532 */
98 	read_aux_reg(ARC_AUX_SLC_CTRL);
99 
100 	/* Important to wait for flush to complete */
101 	while (read_aux_reg(ARC_AUX_SLC_CTRL) & SLC_CTRL_BUSY);
102 }
103 
104 static void slc_upper_region_init(void)
105 {
106 	/*
107 	 * ARC_AUX_SLC_RGN_END1 and ARC_AUX_SLC_RGN_START1 are always == 0
108 	 * as we don't use PAE40.
109 	 */
110 	write_aux_reg(ARC_AUX_SLC_RGN_END1, 0);
111 	write_aux_reg(ARC_AUX_SLC_RGN_START1, 0);
112 }
113 
114 static void __slc_rgn_op(unsigned long paddr, unsigned long sz, const int op)
115 {
116 	unsigned int ctrl;
117 	unsigned long end;
118 
119 	/*
120 	 * The Region Flush operation is specified by CTRL.RGN_OP[11..9]
121 	 *  - b'000 (default) is Flush,
122 	 *  - b'001 is Invalidate if CTRL.IM == 0
123 	 *  - b'001 is Flush-n-Invalidate if CTRL.IM == 1
124 	 */
125 	ctrl = read_aux_reg(ARC_AUX_SLC_CTRL);
126 
127 	/* Don't rely on default value of IM bit */
128 	if (!(op & OP_FLUSH))		/* i.e. OP_INV */
129 		ctrl &= ~SLC_CTRL_IM;	/* clear IM: Disable flush before Inv */
130 	else
131 		ctrl |= SLC_CTRL_IM;
132 
133 	if (op & OP_INV)
134 		ctrl |= SLC_CTRL_RGN_OP_INV;	/* Inv or flush-n-inv */
135 	else
136 		ctrl &= ~SLC_CTRL_RGN_OP_INV;
137 
138 	write_aux_reg(ARC_AUX_SLC_CTRL, ctrl);
139 
140 	/*
141 	 * Lower bits are ignored, no need to clip
142 	 * END needs to be setup before START (latter triggers the operation)
143 	 * END can't be same as START, so add (l2_line_sz - 1) to sz
144 	 */
145 	end = paddr + sz + slc_line_sz - 1;
146 
147 	/*
148 	 * Upper addresses (ARC_AUX_SLC_RGN_END1 and ARC_AUX_SLC_RGN_START1)
149 	 * are always == 0 as we don't use PAE40, so we only setup lower ones
150 	 * (ARC_AUX_SLC_RGN_END and ARC_AUX_SLC_RGN_START)
151 	 */
152 	write_aux_reg(ARC_AUX_SLC_RGN_END, end);
153 	write_aux_reg(ARC_AUX_SLC_RGN_START, paddr);
154 
155 	/* Make sure "busy" bit reports correct stataus, see STAR 9001165532 */
156 	read_aux_reg(ARC_AUX_SLC_CTRL);
157 
158 	while (read_aux_reg(ARC_AUX_SLC_CTRL) & SLC_CTRL_BUSY);
159 }
160 #endif /* CONFIG_ISA_ARCV2 */
161 
162 #ifdef CONFIG_ISA_ARCV2
163 static void read_decode_cache_bcr_arcv2(void)
164 {
165 	union {
166 		struct {
167 #ifdef CONFIG_CPU_BIG_ENDIAN
168 			unsigned int pad:24, way:2, lsz:2, sz:4;
169 #else
170 			unsigned int sz:4, lsz:2, way:2, pad:24;
171 #endif
172 		} fields;
173 		unsigned int word;
174 	} slc_cfg;
175 
176 	union {
177 		struct {
178 #ifdef CONFIG_CPU_BIG_ENDIAN
179 			unsigned int pad:24, ver:8;
180 #else
181 			unsigned int ver:8, pad:24;
182 #endif
183 		} fields;
184 		unsigned int word;
185 	} sbcr;
186 
187 	sbcr.word = read_aux_reg(ARC_BCR_SLC);
188 	if (sbcr.fields.ver) {
189 		slc_cfg.word = read_aux_reg(ARC_AUX_SLC_CONFIG);
190 		slc_exists = true;
191 		slc_line_sz = (slc_cfg.fields.lsz == 0) ? 128 : 64;
192 	}
193 
194 	union {
195 		struct bcr_clust_cfg {
196 #ifdef CONFIG_CPU_BIG_ENDIAN
197 			unsigned int pad:7, c:1, num_entries:8, num_cores:8, ver:8;
198 #else
199 			unsigned int ver:8, num_cores:8, num_entries:8, c:1, pad:7;
200 #endif
201 		} fields;
202 		unsigned int word;
203 	} cbcr;
204 
205 	cbcr.word = read_aux_reg(ARC_BCR_CLUSTER);
206 	if (cbcr.fields.c && ioc_enable)
207 		ioc_exists = true;
208 }
209 #endif
210 
211 void read_decode_cache_bcr(void)
212 {
213 	int dc_line_sz = 0, ic_line_sz = 0;
214 
215 	union {
216 		struct {
217 #ifdef CONFIG_CPU_BIG_ENDIAN
218 			unsigned int pad:12, line_len:4, sz:4, config:4, ver:8;
219 #else
220 			unsigned int ver:8, config:4, sz:4, line_len:4, pad:12;
221 #endif
222 		} fields;
223 		unsigned int word;
224 	} ibcr, dbcr;
225 
226 	ibcr.word = read_aux_reg(ARC_BCR_IC_BUILD);
227 	if (ibcr.fields.ver) {
228 		icache_exists = true;
229 		l1_line_sz = ic_line_sz = 8 << ibcr.fields.line_len;
230 		if (!ic_line_sz)
231 			panic("Instruction exists but line length is 0\n");
232 	}
233 
234 	dbcr.word = read_aux_reg(ARC_BCR_DC_BUILD);
235 	if (dbcr.fields.ver) {
236 		dcache_exists = true;
237 		l1_line_sz = dc_line_sz = 16 << dbcr.fields.line_len;
238 		if (!dc_line_sz)
239 			panic("Data cache exists but line length is 0\n");
240 	}
241 
242 	if (ic_line_sz && dc_line_sz && (ic_line_sz != dc_line_sz))
243 		panic("Instruction and data cache line lengths differ\n");
244 }
245 
246 void cache_init(void)
247 {
248 	read_decode_cache_bcr();
249 
250 #ifdef CONFIG_ISA_ARCV2
251 	read_decode_cache_bcr_arcv2();
252 
253 	if (ioc_exists) {
254 		/* IOC Aperture start is equal to DDR start */
255 		unsigned int ap_base = CONFIG_SYS_SDRAM_BASE;
256 		/* IOC Aperture size is equal to DDR size */
257 		long ap_size = CONFIG_SYS_SDRAM_SIZE;
258 
259 		flush_dcache_all();
260 		invalidate_dcache_all();
261 
262 		if (!is_power_of_2(ap_size) || ap_size < 4096)
263 			panic("IOC Aperture size must be power of 2 and bigger 4Kib");
264 
265 		/*
266 		 * IOC Aperture size decoded as 2 ^ (SIZE + 2) KB,
267 		 * so setting 0x11 implies 512M, 0x12 implies 1G...
268 		 */
269 		write_aux_reg(ARC_AUX_IO_COH_AP0_SIZE,
270 			      order_base_2(ap_size / 1024) - 2);
271 
272 		/* IOC Aperture start must be aligned to the size of the aperture */
273 		if (ap_base % ap_size != 0)
274 			panic("IOC Aperture start must be aligned to the size of the aperture");
275 
276 		write_aux_reg(ARC_AUX_IO_COH_AP0_BASE, ap_base >> 12);
277 		write_aux_reg(ARC_AUX_IO_COH_PARTIAL, 1);
278 		write_aux_reg(ARC_AUX_IO_COH_ENABLE, 1);
279 	}
280 
281 	read_decode_mmu_bcr();
282 
283 	/*
284 	 * ARC_AUX_SLC_RGN_START1 and ARC_AUX_SLC_RGN_END1 register exist
285 	 * only if PAE exists in current HW. So we had to check pae_exist
286 	 * before using them.
287 	 */
288 	if (slc_exists && pae_exists)
289 		slc_upper_region_init();
290 #endif /* CONFIG_ISA_ARCV2 */
291 }
292 
293 int icache_status(void)
294 {
295 	if (!icache_exists)
296 		return 0;
297 
298 	if (read_aux_reg(ARC_AUX_IC_CTRL) & IC_CTRL_CACHE_DISABLE)
299 		return 0;
300 	else
301 		return 1;
302 }
303 
304 void icache_enable(void)
305 {
306 	if (icache_exists)
307 		write_aux_reg(ARC_AUX_IC_CTRL, read_aux_reg(ARC_AUX_IC_CTRL) &
308 			      ~IC_CTRL_CACHE_DISABLE);
309 }
310 
311 void icache_disable(void)
312 {
313 	if (icache_exists)
314 		write_aux_reg(ARC_AUX_IC_CTRL, read_aux_reg(ARC_AUX_IC_CTRL) |
315 			      IC_CTRL_CACHE_DISABLE);
316 }
317 
318 void invalidate_icache_all(void)
319 {
320 	/* Any write to IC_IVIC register triggers invalidation of entire I$ */
321 	if (icache_status()) {
322 		write_aux_reg(ARC_AUX_IC_IVIC, 1);
323 		/*
324 		 * As per ARC HS databook (see chapter 5.3.3.2)
325 		 * it is required to add 3 NOPs after each write to IC_IVIC.
326 		 */
327 		__builtin_arc_nop();
328 		__builtin_arc_nop();
329 		__builtin_arc_nop();
330 		read_aux_reg(ARC_AUX_IC_CTRL);	/* blocks */
331 	}
332 
333 #ifdef CONFIG_ISA_ARCV2
334 	if (slc_exists)
335 		__slc_entire_op(OP_INV);
336 #endif
337 }
338 
339 int dcache_status(void)
340 {
341 	if (!dcache_exists)
342 		return 0;
343 
344 	if (read_aux_reg(ARC_AUX_DC_CTRL) & DC_CTRL_CACHE_DISABLE)
345 		return 0;
346 	else
347 		return 1;
348 }
349 
350 void dcache_enable(void)
351 {
352 	if (!dcache_exists)
353 		return;
354 
355 	write_aux_reg(ARC_AUX_DC_CTRL, read_aux_reg(ARC_AUX_DC_CTRL) &
356 		      ~(DC_CTRL_INV_MODE_FLUSH | DC_CTRL_CACHE_DISABLE));
357 }
358 
359 void dcache_disable(void)
360 {
361 	if (!dcache_exists)
362 		return;
363 
364 	write_aux_reg(ARC_AUX_DC_CTRL, read_aux_reg(ARC_AUX_DC_CTRL) |
365 		      DC_CTRL_CACHE_DISABLE);
366 }
367 
368 #ifndef CONFIG_SYS_DCACHE_OFF
369 /*
370  * Common Helper for Line Operations on {I,D}-Cache
371  */
372 static inline void __cache_line_loop(unsigned long paddr, unsigned long sz,
373 				     const int cacheop)
374 {
375 	unsigned int aux_cmd;
376 #if (CONFIG_ARC_MMU_VER == 3)
377 	unsigned int aux_tag;
378 #endif
379 	int num_lines;
380 
381 	if (cacheop == OP_INV_IC) {
382 		aux_cmd = ARC_AUX_IC_IVIL;
383 #if (CONFIG_ARC_MMU_VER == 3)
384 		aux_tag = ARC_AUX_IC_PTAG;
385 #endif
386 	} else {
387 		/* d$ cmd: INV (discard or wback-n-discard) OR FLUSH (wback) */
388 		aux_cmd = cacheop & OP_INV ? ARC_AUX_DC_IVDL : ARC_AUX_DC_FLDL;
389 #if (CONFIG_ARC_MMU_VER == 3)
390 		aux_tag = ARC_AUX_DC_PTAG;
391 #endif
392 	}
393 
394 	sz += paddr & ~CACHE_LINE_MASK;
395 	paddr &= CACHE_LINE_MASK;
396 
397 	num_lines = DIV_ROUND_UP(sz, l1_line_sz);
398 
399 	while (num_lines-- > 0) {
400 #if (CONFIG_ARC_MMU_VER == 3)
401 		write_aux_reg(aux_tag, paddr);
402 #endif
403 		write_aux_reg(aux_cmd, paddr);
404 		paddr += l1_line_sz;
405 	}
406 }
407 
408 static unsigned int __before_dc_op(const int op)
409 {
410 	unsigned int reg;
411 
412 	if (op == OP_INV) {
413 		/*
414 		 * IM is set by default and implies Flush-n-inv
415 		 * Clear it here for vanilla inv
416 		 */
417 		reg = read_aux_reg(ARC_AUX_DC_CTRL);
418 		write_aux_reg(ARC_AUX_DC_CTRL, reg & ~DC_CTRL_INV_MODE_FLUSH);
419 	}
420 
421 	return reg;
422 }
423 
424 static void __after_dc_op(const int op, unsigned int reg)
425 {
426 	if (op & OP_FLUSH)	/* flush / flush-n-inv both wait */
427 		while (read_aux_reg(ARC_AUX_DC_CTRL) & DC_CTRL_FLUSH_STATUS);
428 
429 	/* Switch back to default Invalidate mode */
430 	if (op == OP_INV)
431 		write_aux_reg(ARC_AUX_DC_CTRL, reg | DC_CTRL_INV_MODE_FLUSH);
432 }
433 
434 static inline void __dc_entire_op(const int cacheop)
435 {
436 	int aux;
437 	unsigned int ctrl_reg = __before_dc_op(cacheop);
438 
439 	if (cacheop & OP_INV)	/* Inv or flush-n-inv use same cmd reg */
440 		aux = ARC_AUX_DC_IVDC;
441 	else
442 		aux = ARC_AUX_DC_FLSH;
443 
444 	write_aux_reg(aux, 0x1);
445 
446 	__after_dc_op(cacheop, ctrl_reg);
447 }
448 
449 static inline void __dc_line_op(unsigned long paddr, unsigned long sz,
450 				const int cacheop)
451 {
452 	unsigned int ctrl_reg = __before_dc_op(cacheop);
453 
454 	__cache_line_loop(paddr, sz, cacheop);
455 	__after_dc_op(cacheop, ctrl_reg);
456 }
457 #else
458 #define __dc_entire_op(cacheop)
459 #define __dc_line_op(paddr, sz, cacheop)
460 #endif /* !CONFIG_SYS_DCACHE_OFF */
461 
462 void invalidate_dcache_range(unsigned long start, unsigned long end)
463 {
464 	if (start >= end)
465 		return;
466 
467 #ifdef CONFIG_ISA_ARCV2
468 	if (!ioc_exists)
469 #endif
470 		__dc_line_op(start, end - start, OP_INV);
471 
472 #ifdef CONFIG_ISA_ARCV2
473 	if (slc_exists && !ioc_exists)
474 		__slc_rgn_op(start, end - start, OP_INV);
475 #endif
476 }
477 
478 void flush_dcache_range(unsigned long start, unsigned long end)
479 {
480 	if (start >= end)
481 		return;
482 
483 #ifdef CONFIG_ISA_ARCV2
484 	if (!ioc_exists)
485 #endif
486 		__dc_line_op(start, end - start, OP_FLUSH);
487 
488 #ifdef CONFIG_ISA_ARCV2
489 	if (slc_exists && !ioc_exists)
490 		__slc_rgn_op(start, end - start, OP_FLUSH);
491 #endif
492 }
493 
494 void flush_cache(unsigned long start, unsigned long size)
495 {
496 	flush_dcache_range(start, start + size);
497 }
498 
499 void invalidate_dcache_all(void)
500 {
501 	__dc_entire_op(OP_INV);
502 
503 #ifdef CONFIG_ISA_ARCV2
504 	if (slc_exists)
505 		__slc_entire_op(OP_INV);
506 #endif
507 }
508 
509 void flush_dcache_all(void)
510 {
511 	__dc_entire_op(OP_FLUSH);
512 
513 #ifdef CONFIG_ISA_ARCV2
514 	if (slc_exists)
515 		__slc_entire_op(OP_FLUSH);
516 #endif
517 }
518