xref: /openbmc/u-boot/arch/arc/lib/cache.c (revision 41cada4d)
1 /*
2  * Copyright (C) 2013-2014 Synopsys, Inc. All rights reserved.
3  *
4  * SPDX-License-Identifier:	GPL-2.0+
5  */
6 
7 #include <config.h>
8 #include <common.h>
9 #include <linux/compiler.h>
10 #include <linux/kernel.h>
11 #include <linux/log2.h>
12 #include <asm/arcregs.h>
13 #include <asm/cache.h>
14 
15 /* Bit values in IC_CTRL */
16 #define IC_CTRL_CACHE_DISABLE	(1 << 0)
17 
18 /* Bit values in DC_CTRL */
19 #define DC_CTRL_CACHE_DISABLE	(1 << 0)
20 #define DC_CTRL_INV_MODE_FLUSH	(1 << 6)
21 #define DC_CTRL_FLUSH_STATUS	(1 << 8)
22 #define CACHE_VER_NUM_MASK	0xF
23 
24 #define OP_INV		0x1
25 #define OP_FLUSH	0x2
26 #define OP_INV_IC	0x3
27 
28 /* Bit val in SLC_CONTROL */
29 #define SLC_CTRL_DIS		0x001
30 #define SLC_CTRL_IM		0x040
31 #define SLC_CTRL_BUSY		0x100
32 #define SLC_CTRL_RGN_OP_INV	0x200
33 
34 /*
35  * By default that variable will fall into .bss section.
36  * But .bss section is not relocated and so it will be initilized before
37  * relocation but will be used after being zeroed.
38  */
39 int l1_line_sz __section(".data");
40 bool dcache_exists __section(".data") = false;
41 bool icache_exists __section(".data") = false;
42 
43 #define CACHE_LINE_MASK		(~(l1_line_sz - 1))
44 
45 #ifdef CONFIG_ISA_ARCV2
46 int slc_line_sz __section(".data");
47 bool slc_exists __section(".data") = false;
48 bool ioc_exists __section(".data") = false;
49 bool pae_exists __section(".data") = false;
50 
51 void read_decode_mmu_bcr(void)
52 {
53 	/* TODO: should we compare mmu version from BCR and from CONFIG? */
54 #if (CONFIG_ARC_MMU_VER >= 4)
55 	u32 tmp;
56 
57 	tmp = read_aux_reg(ARC_AUX_MMU_BCR);
58 
59 	struct bcr_mmu_4 {
60 #ifdef CONFIG_CPU_BIG_ENDIAN
61 	unsigned int ver:8, sasid:1, sz1:4, sz0:4, res:2, pae:1,
62 		     n_ways:2, n_entry:2, n_super:2, u_itlb:3, u_dtlb:3;
63 #else
64 	/*           DTLB      ITLB      JES        JE         JA      */
65 	unsigned int u_dtlb:3, u_itlb:3, n_super:2, n_entry:2, n_ways:2,
66 		     pae:1, res:2, sz0:4, sz1:4, sasid:1, ver:8;
67 #endif /* CONFIG_CPU_BIG_ENDIAN */
68 	} *mmu4;
69 
70 	mmu4 = (struct bcr_mmu_4 *)&tmp;
71 
72 	pae_exists = !!mmu4->pae;
73 #endif /* (CONFIG_ARC_MMU_VER >= 4) */
74 }
75 
76 static void __slc_entire_op(const int op)
77 {
78 	unsigned int ctrl;
79 
80 	ctrl = read_aux_reg(ARC_AUX_SLC_CTRL);
81 
82 	if (!(op & OP_FLUSH))		/* i.e. OP_INV */
83 		ctrl &= ~SLC_CTRL_IM;	/* clear IM: Disable flush before Inv */
84 	else
85 		ctrl |= SLC_CTRL_IM;
86 
87 	write_aux_reg(ARC_AUX_SLC_CTRL, ctrl);
88 
89 	if (op & OP_INV)	/* Inv or flush-n-inv use same cmd reg */
90 		write_aux_reg(ARC_AUX_SLC_INVALIDATE, 0x1);
91 	else
92 		write_aux_reg(ARC_AUX_SLC_FLUSH, 0x1);
93 
94 	/* Make sure "busy" bit reports correct stataus, see STAR 9001165532 */
95 	read_aux_reg(ARC_AUX_SLC_CTRL);
96 
97 	/* Important to wait for flush to complete */
98 	while (read_aux_reg(ARC_AUX_SLC_CTRL) & SLC_CTRL_BUSY);
99 }
100 
101 static void slc_upper_region_init(void)
102 {
103 	/*
104 	 * ARC_AUX_SLC_RGN_END1 and ARC_AUX_SLC_RGN_START1 are always == 0
105 	 * as we don't use PAE40.
106 	 */
107 	write_aux_reg(ARC_AUX_SLC_RGN_END1, 0);
108 	write_aux_reg(ARC_AUX_SLC_RGN_START1, 0);
109 }
110 
111 static void __slc_rgn_op(unsigned long paddr, unsigned long sz, const int op)
112 {
113 	unsigned int ctrl;
114 	unsigned long end;
115 
116 	/*
117 	 * The Region Flush operation is specified by CTRL.RGN_OP[11..9]
118 	 *  - b'000 (default) is Flush,
119 	 *  - b'001 is Invalidate if CTRL.IM == 0
120 	 *  - b'001 is Flush-n-Invalidate if CTRL.IM == 1
121 	 */
122 	ctrl = read_aux_reg(ARC_AUX_SLC_CTRL);
123 
124 	/* Don't rely on default value of IM bit */
125 	if (!(op & OP_FLUSH))		/* i.e. OP_INV */
126 		ctrl &= ~SLC_CTRL_IM;	/* clear IM: Disable flush before Inv */
127 	else
128 		ctrl |= SLC_CTRL_IM;
129 
130 	if (op & OP_INV)
131 		ctrl |= SLC_CTRL_RGN_OP_INV;	/* Inv or flush-n-inv */
132 	else
133 		ctrl &= ~SLC_CTRL_RGN_OP_INV;
134 
135 	write_aux_reg(ARC_AUX_SLC_CTRL, ctrl);
136 
137 	/*
138 	 * Lower bits are ignored, no need to clip
139 	 * END needs to be setup before START (latter triggers the operation)
140 	 * END can't be same as START, so add (l2_line_sz - 1) to sz
141 	 */
142 	end = paddr + sz + slc_line_sz - 1;
143 
144 	/*
145 	 * Upper addresses (ARC_AUX_SLC_RGN_END1 and ARC_AUX_SLC_RGN_START1)
146 	 * are always == 0 as we don't use PAE40, so we only setup lower ones
147 	 * (ARC_AUX_SLC_RGN_END and ARC_AUX_SLC_RGN_START)
148 	 */
149 	write_aux_reg(ARC_AUX_SLC_RGN_END, end);
150 	write_aux_reg(ARC_AUX_SLC_RGN_START, paddr);
151 
152 	/* Make sure "busy" bit reports correct stataus, see STAR 9001165532 */
153 	read_aux_reg(ARC_AUX_SLC_CTRL);
154 
155 	while (read_aux_reg(ARC_AUX_SLC_CTRL) & SLC_CTRL_BUSY);
156 }
157 #endif /* CONFIG_ISA_ARCV2 */
158 
159 #ifdef CONFIG_ISA_ARCV2
160 static void read_decode_cache_bcr_arcv2(void)
161 {
162 	union {
163 		struct {
164 #ifdef CONFIG_CPU_BIG_ENDIAN
165 			unsigned int pad:24, way:2, lsz:2, sz:4;
166 #else
167 			unsigned int sz:4, lsz:2, way:2, pad:24;
168 #endif
169 		} fields;
170 		unsigned int word;
171 	} slc_cfg;
172 
173 	union {
174 		struct {
175 #ifdef CONFIG_CPU_BIG_ENDIAN
176 			unsigned int pad:24, ver:8;
177 #else
178 			unsigned int ver:8, pad:24;
179 #endif
180 		} fields;
181 		unsigned int word;
182 	} sbcr;
183 
184 	sbcr.word = read_aux_reg(ARC_BCR_SLC);
185 	if (sbcr.fields.ver) {
186 		slc_cfg.word = read_aux_reg(ARC_AUX_SLC_CONFIG);
187 		slc_exists = true;
188 		slc_line_sz = (slc_cfg.fields.lsz == 0) ? 128 : 64;
189 	}
190 
191 	union {
192 		struct bcr_clust_cfg {
193 #ifdef CONFIG_CPU_BIG_ENDIAN
194 			unsigned int pad:7, c:1, num_entries:8, num_cores:8, ver:8;
195 #else
196 			unsigned int ver:8, num_cores:8, num_entries:8, c:1, pad:7;
197 #endif
198 		} fields;
199 		unsigned int word;
200 	} cbcr;
201 
202 	cbcr.word = read_aux_reg(ARC_BCR_CLUSTER);
203 	if (cbcr.fields.c)
204 		ioc_exists = true;
205 }
206 #endif
207 
208 void read_decode_cache_bcr(void)
209 {
210 	int dc_line_sz = 0, ic_line_sz = 0;
211 
212 	union {
213 		struct {
214 #ifdef CONFIG_CPU_BIG_ENDIAN
215 			unsigned int pad:12, line_len:4, sz:4, config:4, ver:8;
216 #else
217 			unsigned int ver:8, config:4, sz:4, line_len:4, pad:12;
218 #endif
219 		} fields;
220 		unsigned int word;
221 	} ibcr, dbcr;
222 
223 	ibcr.word = read_aux_reg(ARC_BCR_IC_BUILD);
224 	if (ibcr.fields.ver) {
225 		icache_exists = true;
226 		l1_line_sz = ic_line_sz = 8 << ibcr.fields.line_len;
227 		if (!ic_line_sz)
228 			panic("Instruction exists but line length is 0\n");
229 	}
230 
231 	dbcr.word = read_aux_reg(ARC_BCR_DC_BUILD);
232 	if (dbcr.fields.ver){
233 		dcache_exists = true;
234 		l1_line_sz = dc_line_sz = 16 << dbcr.fields.line_len;
235 		if (!dc_line_sz)
236 			panic("Data cache exists but line length is 0\n");
237 	}
238 
239 	if (ic_line_sz && dc_line_sz && (ic_line_sz != dc_line_sz))
240 		panic("Instruction and data cache line lengths differ\n");
241 }
242 
243 void cache_init(void)
244 {
245 	read_decode_cache_bcr();
246 
247 #ifdef CONFIG_ISA_ARCV2
248 	read_decode_cache_bcr_arcv2();
249 
250 	if (ioc_exists) {
251 		/* IOC Aperture start is equal to DDR start */
252 		unsigned int ap_base = CONFIG_SYS_SDRAM_BASE;
253 		/* IOC Aperture size is equal to DDR size */
254 		long ap_size = CONFIG_SYS_SDRAM_SIZE;
255 
256 		flush_dcache_all();
257 		invalidate_dcache_all();
258 
259 		if (!is_power_of_2(ap_size) || ap_size < 4096)
260 			panic("IOC Aperture size must be power of 2 and bigger 4Kib");
261 
262 		/*
263 		 * IOC Aperture size decoded as 2 ^ (SIZE + 2) KB,
264 		 * so setting 0x11 implies 512M, 0x12 implies 1G...
265 		 */
266 		write_aux_reg(ARC_AUX_IO_COH_AP0_SIZE,
267 			      order_base_2(ap_size/1024) - 2);
268 
269 
270 		/* IOC Aperture start must be aligned to the size of the aperture */
271 		if (ap_base % ap_size != 0)
272 			panic("IOC Aperture start must be aligned to the size of the aperture");
273 
274 		write_aux_reg(ARC_AUX_IO_COH_AP0_BASE, ap_base >> 12);
275 		write_aux_reg(ARC_AUX_IO_COH_PARTIAL, 1);
276 		write_aux_reg(ARC_AUX_IO_COH_ENABLE, 1);
277 
278 	}
279 
280 	read_decode_mmu_bcr();
281 
282 	/*
283 	 * ARC_AUX_SLC_RGN_START1 and ARC_AUX_SLC_RGN_END1 register exist
284 	 * only if PAE exists in current HW. So we had to check pae_exist
285 	 * before using them.
286 	 */
287 	if (slc_exists && pae_exists)
288 		slc_upper_region_init();
289 #endif /* CONFIG_ISA_ARCV2 */
290 }
291 
292 int icache_status(void)
293 {
294 	if (!icache_exists)
295 		return 0;
296 
297 	if (read_aux_reg(ARC_AUX_IC_CTRL) & IC_CTRL_CACHE_DISABLE)
298 		return 0;
299 	else
300 		return 1;
301 }
302 
303 void icache_enable(void)
304 {
305 	if (icache_exists)
306 		write_aux_reg(ARC_AUX_IC_CTRL, read_aux_reg(ARC_AUX_IC_CTRL) &
307 			      ~IC_CTRL_CACHE_DISABLE);
308 }
309 
310 void icache_disable(void)
311 {
312 	if (icache_exists)
313 		write_aux_reg(ARC_AUX_IC_CTRL, read_aux_reg(ARC_AUX_IC_CTRL) |
314 			      IC_CTRL_CACHE_DISABLE);
315 }
316 
317 void invalidate_icache_all(void)
318 {
319 	/* Any write to IC_IVIC register triggers invalidation of entire I$ */
320 	if (icache_status()) {
321 		write_aux_reg(ARC_AUX_IC_IVIC, 1);
322 		/*
323 		 * As per ARC HS databook (see chapter 5.3.3.2)
324 		 * it is required to add 3 NOPs after each write to IC_IVIC.
325 		 */
326 		__builtin_arc_nop();
327 		__builtin_arc_nop();
328 		__builtin_arc_nop();
329 		read_aux_reg(ARC_AUX_IC_CTRL);	/* blocks */
330 	}
331 
332 #ifdef CONFIG_ISA_ARCV2
333 	if (slc_exists)
334 		__slc_entire_op(OP_INV);
335 #endif
336 }
337 
338 int dcache_status(void)
339 {
340 	if (!dcache_exists)
341 		return 0;
342 
343 	if (read_aux_reg(ARC_AUX_DC_CTRL) & DC_CTRL_CACHE_DISABLE)
344 		return 0;
345 	else
346 		return 1;
347 }
348 
349 void dcache_enable(void)
350 {
351 	if (!dcache_exists)
352 		return;
353 
354 	write_aux_reg(ARC_AUX_DC_CTRL, read_aux_reg(ARC_AUX_DC_CTRL) &
355 		      ~(DC_CTRL_INV_MODE_FLUSH | DC_CTRL_CACHE_DISABLE));
356 }
357 
358 void dcache_disable(void)
359 {
360 	if (!dcache_exists)
361 		return;
362 
363 	write_aux_reg(ARC_AUX_DC_CTRL, read_aux_reg(ARC_AUX_DC_CTRL) |
364 		      DC_CTRL_CACHE_DISABLE);
365 }
366 
367 #ifndef CONFIG_SYS_DCACHE_OFF
368 /*
369  * Common Helper for Line Operations on {I,D}-Cache
370  */
371 static inline void __cache_line_loop(unsigned long paddr, unsigned long sz,
372 				     const int cacheop)
373 {
374 	unsigned int aux_cmd;
375 #if (CONFIG_ARC_MMU_VER == 3)
376 	unsigned int aux_tag;
377 #endif
378 	int num_lines;
379 
380 	if (cacheop == OP_INV_IC) {
381 		aux_cmd = ARC_AUX_IC_IVIL;
382 #if (CONFIG_ARC_MMU_VER == 3)
383 		aux_tag = ARC_AUX_IC_PTAG;
384 #endif
385 	} else {
386 		/* d$ cmd: INV (discard or wback-n-discard) OR FLUSH (wback) */
387 		aux_cmd = cacheop & OP_INV ? ARC_AUX_DC_IVDL : ARC_AUX_DC_FLDL;
388 #if (CONFIG_ARC_MMU_VER == 3)
389 		aux_tag = ARC_AUX_DC_PTAG;
390 #endif
391 	}
392 
393 	sz += paddr & ~CACHE_LINE_MASK;
394 	paddr &= CACHE_LINE_MASK;
395 
396 	num_lines = DIV_ROUND_UP(sz, l1_line_sz);
397 
398 	while (num_lines-- > 0) {
399 #if (CONFIG_ARC_MMU_VER == 3)
400 		write_aux_reg(aux_tag, paddr);
401 #endif
402 		write_aux_reg(aux_cmd, paddr);
403 		paddr += l1_line_sz;
404 	}
405 }
406 
407 static unsigned int __before_dc_op(const int op)
408 {
409 	unsigned int reg;
410 
411 	if (op == OP_INV) {
412 		/*
413 		 * IM is set by default and implies Flush-n-inv
414 		 * Clear it here for vanilla inv
415 		 */
416 		reg = read_aux_reg(ARC_AUX_DC_CTRL);
417 		write_aux_reg(ARC_AUX_DC_CTRL, reg & ~DC_CTRL_INV_MODE_FLUSH);
418 	}
419 
420 	return reg;
421 }
422 
423 static void __after_dc_op(const int op, unsigned int reg)
424 {
425 	if (op & OP_FLUSH)	/* flush / flush-n-inv both wait */
426 		while (read_aux_reg(ARC_AUX_DC_CTRL) & DC_CTRL_FLUSH_STATUS)
427 			;
428 
429 	/* Switch back to default Invalidate mode */
430 	if (op == OP_INV)
431 		write_aux_reg(ARC_AUX_DC_CTRL, reg | DC_CTRL_INV_MODE_FLUSH);
432 }
433 
434 static inline void __dc_entire_op(const int cacheop)
435 {
436 	int aux;
437 	unsigned int ctrl_reg = __before_dc_op(cacheop);
438 
439 	if (cacheop & OP_INV)	/* Inv or flush-n-inv use same cmd reg */
440 		aux = ARC_AUX_DC_IVDC;
441 	else
442 		aux = ARC_AUX_DC_FLSH;
443 
444 	write_aux_reg(aux, 0x1);
445 
446 	__after_dc_op(cacheop, ctrl_reg);
447 }
448 
449 static inline void __dc_line_op(unsigned long paddr, unsigned long sz,
450 				const int cacheop)
451 {
452 	unsigned int ctrl_reg = __before_dc_op(cacheop);
453 	__cache_line_loop(paddr, sz, cacheop);
454 	__after_dc_op(cacheop, ctrl_reg);
455 }
456 #else
457 #define __dc_entire_op(cacheop)
458 #define __dc_line_op(paddr, sz, cacheop)
459 #endif /* !CONFIG_SYS_DCACHE_OFF */
460 
461 void invalidate_dcache_range(unsigned long start, unsigned long end)
462 {
463 	if (start >= end)
464 		return;
465 
466 #ifdef CONFIG_ISA_ARCV2
467 	if (!ioc_exists)
468 #endif
469 		__dc_line_op(start, end - start, OP_INV);
470 
471 #ifdef CONFIG_ISA_ARCV2
472 	if (slc_exists && !ioc_exists)
473 		__slc_rgn_op(start, end - start, OP_INV);
474 #endif
475 }
476 
477 void flush_dcache_range(unsigned long start, unsigned long end)
478 {
479 	if (start >= end)
480 		return;
481 
482 #ifdef CONFIG_ISA_ARCV2
483 	if (!ioc_exists)
484 #endif
485 		__dc_line_op(start, end - start, OP_FLUSH);
486 
487 #ifdef CONFIG_ISA_ARCV2
488 	if (slc_exists && !ioc_exists)
489 		__slc_rgn_op(start, end - start, OP_FLUSH);
490 #endif
491 }
492 
493 void flush_cache(unsigned long start, unsigned long size)
494 {
495 	flush_dcache_range(start, start + size);
496 }
497 
498 void invalidate_dcache_all(void)
499 {
500 	__dc_entire_op(OP_INV);
501 
502 #ifdef CONFIG_ISA_ARCV2
503 	if (slc_exists)
504 		__slc_entire_op(OP_INV);
505 #endif
506 }
507 
508 void flush_dcache_all(void)
509 {
510 	__dc_entire_op(OP_FLUSH);
511 
512 #ifdef CONFIG_ISA_ARCV2
513 	if (slc_exists)
514 		__slc_entire_op(OP_FLUSH);
515 #endif
516 }
517