xref: /openbmc/u-boot/arch/arc/lib/cache.c (revision c4ef14d2)
1 /*
2  * Copyright (C) 2013-2014 Synopsys, Inc. All rights reserved.
3  *
4  * SPDX-License-Identifier:	GPL-2.0+
5  */
6 
7 #include <config.h>
8 #include <common.h>
9 #include <linux/compiler.h>
10 #include <linux/kernel.h>
11 #include <linux/log2.h>
12 #include <asm/arcregs.h>
13 #include <asm/cache.h>
14 
15 /* Bit values in IC_CTRL */
16 #define IC_CTRL_CACHE_DISABLE	BIT(0)
17 
18 /* Bit values in DC_CTRL */
19 #define DC_CTRL_CACHE_DISABLE	BIT(0)
20 #define DC_CTRL_INV_MODE_FLUSH	BIT(6)
21 #define DC_CTRL_FLUSH_STATUS	BIT(8)
22 #define CACHE_VER_NUM_MASK	0xF
23 
24 #define OP_INV		0x1
25 #define OP_FLUSH	0x2
26 
27 /* Bit val in SLC_CONTROL */
28 #define SLC_CTRL_DIS		0x001
29 #define SLC_CTRL_IM		0x040
30 #define SLC_CTRL_BUSY		0x100
31 #define SLC_CTRL_RGN_OP_INV	0x200
32 
33 /*
34  * By default that variable will fall into .bss section.
35  * But .bss section is not relocated and so it will be initilized before
36  * relocation but will be used after being zeroed.
37  */
38 int l1_line_sz __section(".data");
39 bool dcache_exists __section(".data") = false;
40 bool icache_exists __section(".data") = false;
41 
42 #define CACHE_LINE_MASK		(~(l1_line_sz - 1))
43 
44 #ifdef CONFIG_ISA_ARCV2
45 int slc_line_sz __section(".data");
46 bool slc_exists __section(".data") = false;
47 bool ioc_exists __section(".data") = false;
48 bool pae_exists __section(".data") = false;
49 
50 /* To force enable IOC set ioc_enable to 'true' */
51 bool ioc_enable __section(".data") = false;
52 
53 void read_decode_mmu_bcr(void)
54 {
55 	/* TODO: should we compare mmu version from BCR and from CONFIG? */
56 #if (CONFIG_ARC_MMU_VER >= 4)
57 	u32 tmp;
58 
59 	tmp = read_aux_reg(ARC_AUX_MMU_BCR);
60 
61 	struct bcr_mmu_4 {
62 #ifdef CONFIG_CPU_BIG_ENDIAN
63 	unsigned int ver:8, sasid:1, sz1:4, sz0:4, res:2, pae:1,
64 		     n_ways:2, n_entry:2, n_super:2, u_itlb:3, u_dtlb:3;
65 #else
66 	/*           DTLB      ITLB      JES        JE         JA      */
67 	unsigned int u_dtlb:3, u_itlb:3, n_super:2, n_entry:2, n_ways:2,
68 		     pae:1, res:2, sz0:4, sz1:4, sasid:1, ver:8;
69 #endif /* CONFIG_CPU_BIG_ENDIAN */
70 	} *mmu4;
71 
72 	mmu4 = (struct bcr_mmu_4 *)&tmp;
73 
74 	pae_exists = !!mmu4->pae;
75 #endif /* (CONFIG_ARC_MMU_VER >= 4) */
76 }
77 
78 static void __slc_entire_op(const int op)
79 {
80 	unsigned int ctrl;
81 
82 	ctrl = read_aux_reg(ARC_AUX_SLC_CTRL);
83 
84 	if (!(op & OP_FLUSH))		/* i.e. OP_INV */
85 		ctrl &= ~SLC_CTRL_IM;	/* clear IM: Disable flush before Inv */
86 	else
87 		ctrl |= SLC_CTRL_IM;
88 
89 	write_aux_reg(ARC_AUX_SLC_CTRL, ctrl);
90 
91 	if (op & OP_INV)	/* Inv or flush-n-inv use same cmd reg */
92 		write_aux_reg(ARC_AUX_SLC_INVALIDATE, 0x1);
93 	else
94 		write_aux_reg(ARC_AUX_SLC_FLUSH, 0x1);
95 
96 	/* Make sure "busy" bit reports correct stataus, see STAR 9001165532 */
97 	read_aux_reg(ARC_AUX_SLC_CTRL);
98 
99 	/* Important to wait for flush to complete */
100 	while (read_aux_reg(ARC_AUX_SLC_CTRL) & SLC_CTRL_BUSY);
101 }
102 
103 static void slc_upper_region_init(void)
104 {
105 	/*
106 	 * ARC_AUX_SLC_RGN_END1 and ARC_AUX_SLC_RGN_START1 are always == 0
107 	 * as we don't use PAE40.
108 	 */
109 	write_aux_reg(ARC_AUX_SLC_RGN_END1, 0);
110 	write_aux_reg(ARC_AUX_SLC_RGN_START1, 0);
111 }
112 
113 static void __slc_rgn_op(unsigned long paddr, unsigned long sz, const int op)
114 {
115 	unsigned int ctrl;
116 	unsigned long end;
117 
118 	/*
119 	 * The Region Flush operation is specified by CTRL.RGN_OP[11..9]
120 	 *  - b'000 (default) is Flush,
121 	 *  - b'001 is Invalidate if CTRL.IM == 0
122 	 *  - b'001 is Flush-n-Invalidate if CTRL.IM == 1
123 	 */
124 	ctrl = read_aux_reg(ARC_AUX_SLC_CTRL);
125 
126 	/* Don't rely on default value of IM bit */
127 	if (!(op & OP_FLUSH))		/* i.e. OP_INV */
128 		ctrl &= ~SLC_CTRL_IM;	/* clear IM: Disable flush before Inv */
129 	else
130 		ctrl |= SLC_CTRL_IM;
131 
132 	if (op & OP_INV)
133 		ctrl |= SLC_CTRL_RGN_OP_INV;	/* Inv or flush-n-inv */
134 	else
135 		ctrl &= ~SLC_CTRL_RGN_OP_INV;
136 
137 	write_aux_reg(ARC_AUX_SLC_CTRL, ctrl);
138 
139 	/*
140 	 * Lower bits are ignored, no need to clip
141 	 * END needs to be setup before START (latter triggers the operation)
142 	 * END can't be same as START, so add (l2_line_sz - 1) to sz
143 	 */
144 	end = paddr + sz + slc_line_sz - 1;
145 
146 	/*
147 	 * Upper addresses (ARC_AUX_SLC_RGN_END1 and ARC_AUX_SLC_RGN_START1)
148 	 * are always == 0 as we don't use PAE40, so we only setup lower ones
149 	 * (ARC_AUX_SLC_RGN_END and ARC_AUX_SLC_RGN_START)
150 	 */
151 	write_aux_reg(ARC_AUX_SLC_RGN_END, end);
152 	write_aux_reg(ARC_AUX_SLC_RGN_START, paddr);
153 
154 	/* Make sure "busy" bit reports correct stataus, see STAR 9001165532 */
155 	read_aux_reg(ARC_AUX_SLC_CTRL);
156 
157 	while (read_aux_reg(ARC_AUX_SLC_CTRL) & SLC_CTRL_BUSY);
158 }
159 #endif /* CONFIG_ISA_ARCV2 */
160 
161 #ifdef CONFIG_ISA_ARCV2
162 static void read_decode_cache_bcr_arcv2(void)
163 {
164 	union {
165 		struct {
166 #ifdef CONFIG_CPU_BIG_ENDIAN
167 			unsigned int pad:24, way:2, lsz:2, sz:4;
168 #else
169 			unsigned int sz:4, lsz:2, way:2, pad:24;
170 #endif
171 		} fields;
172 		unsigned int word;
173 	} slc_cfg;
174 
175 	union {
176 		struct {
177 #ifdef CONFIG_CPU_BIG_ENDIAN
178 			unsigned int pad:24, ver:8;
179 #else
180 			unsigned int ver:8, pad:24;
181 #endif
182 		} fields;
183 		unsigned int word;
184 	} sbcr;
185 
186 	sbcr.word = read_aux_reg(ARC_BCR_SLC);
187 	if (sbcr.fields.ver) {
188 		slc_cfg.word = read_aux_reg(ARC_AUX_SLC_CONFIG);
189 		slc_exists = true;
190 		slc_line_sz = (slc_cfg.fields.lsz == 0) ? 128 : 64;
191 	}
192 
193 	union {
194 		struct bcr_clust_cfg {
195 #ifdef CONFIG_CPU_BIG_ENDIAN
196 			unsigned int pad:7, c:1, num_entries:8, num_cores:8, ver:8;
197 #else
198 			unsigned int ver:8, num_cores:8, num_entries:8, c:1, pad:7;
199 #endif
200 		} fields;
201 		unsigned int word;
202 	} cbcr;
203 
204 	cbcr.word = read_aux_reg(ARC_BCR_CLUSTER);
205 	if (cbcr.fields.c && ioc_enable)
206 		ioc_exists = true;
207 }
208 #endif
209 
210 void read_decode_cache_bcr(void)
211 {
212 	int dc_line_sz = 0, ic_line_sz = 0;
213 
214 	union {
215 		struct {
216 #ifdef CONFIG_CPU_BIG_ENDIAN
217 			unsigned int pad:12, line_len:4, sz:4, config:4, ver:8;
218 #else
219 			unsigned int ver:8, config:4, sz:4, line_len:4, pad:12;
220 #endif
221 		} fields;
222 		unsigned int word;
223 	} ibcr, dbcr;
224 
225 	ibcr.word = read_aux_reg(ARC_BCR_IC_BUILD);
226 	if (ibcr.fields.ver) {
227 		icache_exists = true;
228 		l1_line_sz = ic_line_sz = 8 << ibcr.fields.line_len;
229 		if (!ic_line_sz)
230 			panic("Instruction exists but line length is 0\n");
231 	}
232 
233 	dbcr.word = read_aux_reg(ARC_BCR_DC_BUILD);
234 	if (dbcr.fields.ver) {
235 		dcache_exists = true;
236 		l1_line_sz = dc_line_sz = 16 << dbcr.fields.line_len;
237 		if (!dc_line_sz)
238 			panic("Data cache exists but line length is 0\n");
239 	}
240 
241 	if (ic_line_sz && dc_line_sz && (ic_line_sz != dc_line_sz))
242 		panic("Instruction and data cache line lengths differ\n");
243 }
244 
245 void cache_init(void)
246 {
247 	read_decode_cache_bcr();
248 
249 #ifdef CONFIG_ISA_ARCV2
250 	read_decode_cache_bcr_arcv2();
251 
252 	if (ioc_exists) {
253 		/* IOC Aperture start is equal to DDR start */
254 		unsigned int ap_base = CONFIG_SYS_SDRAM_BASE;
255 		/* IOC Aperture size is equal to DDR size */
256 		long ap_size = CONFIG_SYS_SDRAM_SIZE;
257 
258 		flush_dcache_all();
259 		invalidate_dcache_all();
260 
261 		if (!is_power_of_2(ap_size) || ap_size < 4096)
262 			panic("IOC Aperture size must be power of 2 and bigger 4Kib");
263 
264 		/*
265 		 * IOC Aperture size decoded as 2 ^ (SIZE + 2) KB,
266 		 * so setting 0x11 implies 512M, 0x12 implies 1G...
267 		 */
268 		write_aux_reg(ARC_AUX_IO_COH_AP0_SIZE,
269 			      order_base_2(ap_size / 1024) - 2);
270 
271 		/* IOC Aperture start must be aligned to the size of the aperture */
272 		if (ap_base % ap_size != 0)
273 			panic("IOC Aperture start must be aligned to the size of the aperture");
274 
275 		write_aux_reg(ARC_AUX_IO_COH_AP0_BASE, ap_base >> 12);
276 		write_aux_reg(ARC_AUX_IO_COH_PARTIAL, 1);
277 		write_aux_reg(ARC_AUX_IO_COH_ENABLE, 1);
278 	}
279 
280 	read_decode_mmu_bcr();
281 
282 	/*
283 	 * ARC_AUX_SLC_RGN_START1 and ARC_AUX_SLC_RGN_END1 register exist
284 	 * only if PAE exists in current HW. So we had to check pae_exist
285 	 * before using them.
286 	 */
287 	if (slc_exists && pae_exists)
288 		slc_upper_region_init();
289 #endif /* CONFIG_ISA_ARCV2 */
290 }
291 
292 int icache_status(void)
293 {
294 	if (!icache_exists)
295 		return 0;
296 
297 	if (read_aux_reg(ARC_AUX_IC_CTRL) & IC_CTRL_CACHE_DISABLE)
298 		return 0;
299 	else
300 		return 1;
301 }
302 
303 void icache_enable(void)
304 {
305 	if (icache_exists)
306 		write_aux_reg(ARC_AUX_IC_CTRL, read_aux_reg(ARC_AUX_IC_CTRL) &
307 			      ~IC_CTRL_CACHE_DISABLE);
308 }
309 
310 void icache_disable(void)
311 {
312 	if (icache_exists)
313 		write_aux_reg(ARC_AUX_IC_CTRL, read_aux_reg(ARC_AUX_IC_CTRL) |
314 			      IC_CTRL_CACHE_DISABLE);
315 }
316 
317 /* IC supports only invalidation */
318 static inline void __ic_entire_invalidate(void)
319 {
320 	if (!icache_status())
321 		return;
322 
323 	/* Any write to IC_IVIC register triggers invalidation of entire I$ */
324 	write_aux_reg(ARC_AUX_IC_IVIC, 1);
325 	/*
326 	 * As per ARC HS databook (see chapter 5.3.3.2)
327 	 * it is required to add 3 NOPs after each write to IC_IVIC.
328 	 */
329 	__builtin_arc_nop();
330 	__builtin_arc_nop();
331 	__builtin_arc_nop();
332 	read_aux_reg(ARC_AUX_IC_CTRL);  /* blocks */
333 }
334 
335 void invalidate_icache_all(void)
336 {
337 	__ic_entire_invalidate();
338 
339 #ifdef CONFIG_ISA_ARCV2
340 	if (slc_exists)
341 		__slc_entire_op(OP_INV);
342 #endif
343 }
344 
345 int dcache_status(void)
346 {
347 	if (!dcache_exists)
348 		return 0;
349 
350 	if (read_aux_reg(ARC_AUX_DC_CTRL) & DC_CTRL_CACHE_DISABLE)
351 		return 0;
352 	else
353 		return 1;
354 }
355 
356 void dcache_enable(void)
357 {
358 	if (!dcache_exists)
359 		return;
360 
361 	write_aux_reg(ARC_AUX_DC_CTRL, read_aux_reg(ARC_AUX_DC_CTRL) &
362 		      ~(DC_CTRL_INV_MODE_FLUSH | DC_CTRL_CACHE_DISABLE));
363 }
364 
365 void dcache_disable(void)
366 {
367 	if (!dcache_exists)
368 		return;
369 
370 	write_aux_reg(ARC_AUX_DC_CTRL, read_aux_reg(ARC_AUX_DC_CTRL) |
371 		      DC_CTRL_CACHE_DISABLE);
372 }
373 
374 #ifndef CONFIG_SYS_DCACHE_OFF
375 /* Common Helper for Line Operations on D-cache */
376 static inline void __dcache_line_loop(unsigned long paddr, unsigned long sz,
377 				      const int cacheop)
378 {
379 	unsigned int aux_cmd;
380 	int num_lines;
381 
382 	/* d$ cmd: INV (discard or wback-n-discard) OR FLUSH (wback) */
383 	aux_cmd = cacheop & OP_INV ? ARC_AUX_DC_IVDL : ARC_AUX_DC_FLDL;
384 
385 	sz += paddr & ~CACHE_LINE_MASK;
386 	paddr &= CACHE_LINE_MASK;
387 
388 	num_lines = DIV_ROUND_UP(sz, l1_line_sz);
389 
390 	while (num_lines-- > 0) {
391 #if (CONFIG_ARC_MMU_VER == 3)
392 		write_aux_reg(ARC_AUX_DC_PTAG, paddr);
393 #endif
394 		write_aux_reg(aux_cmd, paddr);
395 		paddr += l1_line_sz;
396 	}
397 }
398 
399 static unsigned int __before_dc_op(const int op)
400 {
401 	unsigned int reg;
402 
403 	if (op == OP_INV) {
404 		/*
405 		 * IM is set by default and implies Flush-n-inv
406 		 * Clear it here for vanilla inv
407 		 */
408 		reg = read_aux_reg(ARC_AUX_DC_CTRL);
409 		write_aux_reg(ARC_AUX_DC_CTRL, reg & ~DC_CTRL_INV_MODE_FLUSH);
410 	}
411 
412 	return reg;
413 }
414 
415 static void __after_dc_op(const int op, unsigned int reg)
416 {
417 	if (op & OP_FLUSH)	/* flush / flush-n-inv both wait */
418 		while (read_aux_reg(ARC_AUX_DC_CTRL) & DC_CTRL_FLUSH_STATUS);
419 
420 	/* Switch back to default Invalidate mode */
421 	if (op == OP_INV)
422 		write_aux_reg(ARC_AUX_DC_CTRL, reg | DC_CTRL_INV_MODE_FLUSH);
423 }
424 
425 static inline void __dc_entire_op(const int cacheop)
426 {
427 	int aux;
428 	unsigned int ctrl_reg = __before_dc_op(cacheop);
429 
430 	if (cacheop & OP_INV)	/* Inv or flush-n-inv use same cmd reg */
431 		aux = ARC_AUX_DC_IVDC;
432 	else
433 		aux = ARC_AUX_DC_FLSH;
434 
435 	write_aux_reg(aux, 0x1);
436 
437 	__after_dc_op(cacheop, ctrl_reg);
438 }
439 
440 static inline void __dc_line_op(unsigned long paddr, unsigned long sz,
441 				const int cacheop)
442 {
443 	unsigned int ctrl_reg = __before_dc_op(cacheop);
444 
445 	__dcache_line_loop(paddr, sz, cacheop);
446 	__after_dc_op(cacheop, ctrl_reg);
447 }
448 #else
449 #define __dc_entire_op(cacheop)
450 #define __dc_line_op(paddr, sz, cacheop)
451 #endif /* !CONFIG_SYS_DCACHE_OFF */
452 
453 void invalidate_dcache_range(unsigned long start, unsigned long end)
454 {
455 	if (start >= end)
456 		return;
457 
458 #ifdef CONFIG_ISA_ARCV2
459 	if (!ioc_exists)
460 #endif
461 		__dc_line_op(start, end - start, OP_INV);
462 
463 #ifdef CONFIG_ISA_ARCV2
464 	if (slc_exists && !ioc_exists)
465 		__slc_rgn_op(start, end - start, OP_INV);
466 #endif
467 }
468 
469 void flush_dcache_range(unsigned long start, unsigned long end)
470 {
471 	if (start >= end)
472 		return;
473 
474 #ifdef CONFIG_ISA_ARCV2
475 	if (!ioc_exists)
476 #endif
477 		__dc_line_op(start, end - start, OP_FLUSH);
478 
479 #ifdef CONFIG_ISA_ARCV2
480 	if (slc_exists && !ioc_exists)
481 		__slc_rgn_op(start, end - start, OP_FLUSH);
482 #endif
483 }
484 
485 void flush_cache(unsigned long start, unsigned long size)
486 {
487 	flush_dcache_range(start, start + size);
488 }
489 
490 void invalidate_dcache_all(void)
491 {
492 	__dc_entire_op(OP_INV);
493 
494 #ifdef CONFIG_ISA_ARCV2
495 	if (slc_exists)
496 		__slc_entire_op(OP_INV);
497 #endif
498 }
499 
500 void flush_dcache_all(void)
501 {
502 	__dc_entire_op(OP_FLUSH);
503 
504 #ifdef CONFIG_ISA_ARCV2
505 	if (slc_exists)
506 		__slc_entire_op(OP_FLUSH);
507 #endif
508 }
509