xref: /openbmc/u-boot/arch/arc/lib/cache.c (revision 1a68faac)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright (C) 2013-2014 Synopsys, Inc. All rights reserved.
4  */
5 
6 #include <config.h>
7 #include <common.h>
8 #include <linux/compiler.h>
9 #include <linux/kernel.h>
10 #include <linux/log2.h>
11 #include <asm/arcregs.h>
12 #include <asm/arc-bcr.h>
13 #include <asm/cache.h>
14 
15 /*
16  * [ NOTE 1 ]:
17  * Data cache (L1 D$ or SL$) entire invalidate operation or data cache disable
18  * operation may result in unexpected behavior and data loss even if we flush
19  * data cache right before invalidation. That may happens if we store any context
20  * on stack (like we store BLINK register on stack before function call).
21  * BLINK register is the register where return address is automatically saved
22  * when we do function call with instructions like 'bl'.
23  *
24  * There is the real example:
25  * We may hang in the next code as we store any BLINK register on stack in
26  * invalidate_dcache_all() function.
27  *
28  * void flush_dcache_all() {
29  *     __dc_entire_op(OP_FLUSH);
30  *     // Other code //
31  * }
32  *
33  * void invalidate_dcache_all() {
34  *     __dc_entire_op(OP_INV);
35  *     // Other code //
36  * }
37  *
38  * void foo(void) {
39  *     flush_dcache_all();
40  *     invalidate_dcache_all();
41  * }
42  *
43  * Now let's see what really happens during that code execution:
44  *
45  * foo()
46  *   |->> call flush_dcache_all
47  *     [return address is saved to BLINK register]
48  *     [push BLINK] (save to stack)              ![point 1]
49  *     |->> call __dc_entire_op(OP_FLUSH)
50  *         [return address is saved to BLINK register]
51  *         [flush L1 D$]
52  *         return [jump to BLINK]
53  *     <<------
54  *     [other flush_dcache_all code]
55  *     [pop BLINK] (get from stack)
56  *     return [jump to BLINK]
57  *   <<------
58  *   |->> call invalidate_dcache_all
59  *     [return address is saved to BLINK register]
60  *     [push BLINK] (save to stack)               ![point 2]
61  *     |->> call __dc_entire_op(OP_FLUSH)
62  *         [return address is saved to BLINK register]
63  *         [invalidate L1 D$]                 ![point 3]
64  *         // Oops!!!
65  *         // We lose return address from invalidate_dcache_all function:
66  *         // we save it to stack and invalidate L1 D$ after that!
67  *         return [jump to BLINK]
68  *     <<------
69  *     [other invalidate_dcache_all code]
70  *     [pop BLINK] (get from stack)
71  *     // we don't have this data in L1 dcache as we invalidated it in [point 3]
72  *     // so we get it from next memory level (for example DDR memory)
73  *     // but in the memory we have value which we save in [point 1], which
74  *     // is return address from flush_dcache_all function (instead of
75  *     // address from current invalidate_dcache_all function which we
76  *     // saved in [point 2] !)
77  *     return [jump to BLINK]
78  *   <<------
79  *   // As BLINK points to invalidate_dcache_all, we call it again and
80  *   // loop forever.
81  *
82  * Fortunately we may fix that by using flush & invalidation of D$ with a single
83  * one instruction (instead of flush and invalidation instructions pair) and
84  * enabling force function inline with '__attribute__((always_inline))' gcc
85  * attribute to avoid any function call (and BLINK store) between cache flush
86  * and disable.
87  *
88  *
89  * [ NOTE 2 ]:
90  * As of today we only support the following cache configurations on ARC.
91  * Other configurations may exist in HW (for example, since version 3.0 HS
92  * supports SL$ (L2 system level cache) disable) but we don't support it in SW.
93  * Configuration 1:
94  *        ______________________
95  *       |                      |
96  *       |   ARC CPU            |
97  *       |______________________|
98  *        ___|___        ___|___
99  *       |       |      |       |
100  *       | L1 I$ |      | L1 D$ |
101  *       |_______|      |_______|
102  *        on/off         on/off
103  *        ___|______________|____
104  *       |                      |
105  *       |   main memory        |
106  *       |______________________|
107  *
108  * Configuration 2:
109  *        ______________________
110  *       |                      |
111  *       |   ARC CPU            |
112  *       |______________________|
113  *        ___|___        ___|___
114  *       |       |      |       |
115  *       | L1 I$ |      | L1 D$ |
116  *       |_______|      |_______|
117  *        on/off         on/off
118  *        ___|______________|____
119  *       |                      |
120  *       |   L2 (SL$)           |
121  *       |______________________|
122  *          always must be on
123  *        ___|______________|____
124  *       |                      |
125  *       |   main memory        |
126  *       |______________________|
127  *
128  * Configuration 3:
129  *        ______________________
130  *       |                      |
131  *       |   ARC CPU            |
132  *       |______________________|
133  *        ___|___        ___|___
134  *       |       |      |       |
135  *       | L1 I$ |      | L1 D$ |
136  *       |_______|      |_______|
137  *        on/off        must be on
138  *        ___|______________|____      _______
139  *       |                      |     |       |
140  *       |   L2 (SL$)           |-----|  IOC  |
141  *       |______________________|     |_______|
142  *          always must be on          on/off
143  *        ___|______________|____
144  *       |                      |
145  *       |   main memory        |
146  *       |______________________|
147  */
148 
149 DECLARE_GLOBAL_DATA_PTR;
150 
151 /* Bit values in IC_CTRL */
152 #define IC_CTRL_CACHE_DISABLE	BIT(0)
153 
154 /* Bit values in DC_CTRL */
155 #define DC_CTRL_CACHE_DISABLE	BIT(0)
156 #define DC_CTRL_INV_MODE_FLUSH	BIT(6)
157 #define DC_CTRL_FLUSH_STATUS	BIT(8)
158 
159 #define OP_INV			BIT(0)
160 #define OP_FLUSH		BIT(1)
161 #define OP_FLUSH_N_INV		(OP_FLUSH | OP_INV)
162 
163 /* Bit val in SLC_CONTROL */
164 #define SLC_CTRL_DIS		0x001
165 #define SLC_CTRL_IM		0x040
166 #define SLC_CTRL_BUSY		0x100
167 #define SLC_CTRL_RGN_OP_INV	0x200
168 
169 #define CACHE_LINE_MASK		(~(gd->arch.l1_line_sz - 1))
170 
171 /*
172  * We don't want to use '__always_inline' macro here as it can be redefined
173  * to simple 'inline' in some cases which breaks stuff. See [ NOTE 1 ] for more
174  * details about the reasons we need to use always_inline functions.
175  */
176 #define inlined_cachefunc	 inline __attribute__((always_inline))
177 
178 static inlined_cachefunc void __ic_entire_invalidate(void);
179 static inlined_cachefunc void __dc_entire_op(const int cacheop);
180 
181 static inline bool pae_exists(void)
182 {
183 	/* TODO: should we compare mmu version from BCR and from CONFIG? */
184 #if (CONFIG_ARC_MMU_VER >= 4)
185 	union bcr_mmu_4 mmu4;
186 
187 	mmu4.word = read_aux_reg(ARC_AUX_MMU_BCR);
188 
189 	if (mmu4.fields.pae)
190 		return true;
191 #endif /* (CONFIG_ARC_MMU_VER >= 4) */
192 
193 	return false;
194 }
195 
196 static inlined_cachefunc bool icache_exists(void)
197 {
198 	union bcr_di_cache ibcr;
199 
200 	ibcr.word = read_aux_reg(ARC_BCR_IC_BUILD);
201 	return !!ibcr.fields.ver;
202 }
203 
204 static inlined_cachefunc bool icache_enabled(void)
205 {
206 	if (!icache_exists())
207 		return false;
208 
209 	return !(read_aux_reg(ARC_AUX_IC_CTRL) & IC_CTRL_CACHE_DISABLE);
210 }
211 
212 static inlined_cachefunc bool dcache_exists(void)
213 {
214 	union bcr_di_cache dbcr;
215 
216 	dbcr.word = read_aux_reg(ARC_BCR_DC_BUILD);
217 	return !!dbcr.fields.ver;
218 }
219 
220 static inlined_cachefunc bool dcache_enabled(void)
221 {
222 	if (!dcache_exists())
223 		return false;
224 
225 	return !(read_aux_reg(ARC_AUX_DC_CTRL) & DC_CTRL_CACHE_DISABLE);
226 }
227 
228 static inlined_cachefunc bool slc_exists(void)
229 {
230 	if (is_isa_arcv2()) {
231 		union bcr_generic sbcr;
232 
233 		sbcr.word = read_aux_reg(ARC_BCR_SLC);
234 		return !!sbcr.fields.ver;
235 	}
236 
237 	return false;
238 }
239 
240 static inlined_cachefunc bool slc_data_bypass(void)
241 {
242 	/*
243 	 * If L1 data cache is disabled SL$ is bypassed and all load/store
244 	 * requests are sent directly to main memory.
245 	 */
246 	return !dcache_enabled();
247 }
248 
249 static inline bool ioc_exists(void)
250 {
251 	if (is_isa_arcv2()) {
252 		union bcr_clust_cfg cbcr;
253 
254 		cbcr.word = read_aux_reg(ARC_BCR_CLUSTER);
255 		return cbcr.fields.c;
256 	}
257 
258 	return false;
259 }
260 
261 static inline bool ioc_enabled(void)
262 {
263 	/*
264 	 * We check only CONFIG option instead of IOC HW state check as IOC
265 	 * must be disabled by default.
266 	 */
267 	if (is_ioc_enabled())
268 		return ioc_exists();
269 
270 	return false;
271 }
272 
273 static inlined_cachefunc void __slc_entire_op(const int op)
274 {
275 	unsigned int ctrl;
276 
277 	if (!slc_exists())
278 		return;
279 
280 	ctrl = read_aux_reg(ARC_AUX_SLC_CTRL);
281 
282 	if (!(op & OP_FLUSH))		/* i.e. OP_INV */
283 		ctrl &= ~SLC_CTRL_IM;	/* clear IM: Disable flush before Inv */
284 	else
285 		ctrl |= SLC_CTRL_IM;
286 
287 	write_aux_reg(ARC_AUX_SLC_CTRL, ctrl);
288 
289 	if (op & OP_INV)	/* Inv or flush-n-inv use same cmd reg */
290 		write_aux_reg(ARC_AUX_SLC_INVALIDATE, 0x1);
291 	else
292 		write_aux_reg(ARC_AUX_SLC_FLUSH, 0x1);
293 
294 	/* Make sure "busy" bit reports correct stataus, see STAR 9001165532 */
295 	read_aux_reg(ARC_AUX_SLC_CTRL);
296 
297 	/* Important to wait for flush to complete */
298 	while (read_aux_reg(ARC_AUX_SLC_CTRL) & SLC_CTRL_BUSY);
299 }
300 
301 static void slc_upper_region_init(void)
302 {
303 	/*
304 	 * ARC_AUX_SLC_RGN_START1 and ARC_AUX_SLC_RGN_END1 register exist
305 	 * only if PAE exists in current HW. So we had to check pae_exist
306 	 * before using them.
307 	 */
308 	if (!pae_exists())
309 		return;
310 
311 	/*
312 	 * ARC_AUX_SLC_RGN_END1 and ARC_AUX_SLC_RGN_START1 are always == 0
313 	 * as we don't use PAE40.
314 	 */
315 	write_aux_reg(ARC_AUX_SLC_RGN_END1, 0);
316 	write_aux_reg(ARC_AUX_SLC_RGN_START1, 0);
317 }
318 
319 static void __slc_rgn_op(unsigned long paddr, unsigned long sz, const int op)
320 {
321 #ifdef CONFIG_ISA_ARCV2
322 
323 	unsigned int ctrl;
324 	unsigned long end;
325 
326 	if (!slc_exists())
327 		return;
328 
329 	/*
330 	 * The Region Flush operation is specified by CTRL.RGN_OP[11..9]
331 	 *  - b'000 (default) is Flush,
332 	 *  - b'001 is Invalidate if CTRL.IM == 0
333 	 *  - b'001 is Flush-n-Invalidate if CTRL.IM == 1
334 	 */
335 	ctrl = read_aux_reg(ARC_AUX_SLC_CTRL);
336 
337 	/* Don't rely on default value of IM bit */
338 	if (!(op & OP_FLUSH))		/* i.e. OP_INV */
339 		ctrl &= ~SLC_CTRL_IM;	/* clear IM: Disable flush before Inv */
340 	else
341 		ctrl |= SLC_CTRL_IM;
342 
343 	if (op & OP_INV)
344 		ctrl |= SLC_CTRL_RGN_OP_INV;	/* Inv or flush-n-inv */
345 	else
346 		ctrl &= ~SLC_CTRL_RGN_OP_INV;
347 
348 	write_aux_reg(ARC_AUX_SLC_CTRL, ctrl);
349 
350 	/*
351 	 * Lower bits are ignored, no need to clip
352 	 * END needs to be setup before START (latter triggers the operation)
353 	 * END can't be same as START, so add (l2_line_sz - 1) to sz
354 	 */
355 	end = paddr + sz + gd->arch.slc_line_sz - 1;
356 
357 	/*
358 	 * Upper addresses (ARC_AUX_SLC_RGN_END1 and ARC_AUX_SLC_RGN_START1)
359 	 * are always == 0 as we don't use PAE40, so we only setup lower ones
360 	 * (ARC_AUX_SLC_RGN_END and ARC_AUX_SLC_RGN_START)
361 	 */
362 	write_aux_reg(ARC_AUX_SLC_RGN_END, end);
363 	write_aux_reg(ARC_AUX_SLC_RGN_START, paddr);
364 
365 	/* Make sure "busy" bit reports correct stataus, see STAR 9001165532 */
366 	read_aux_reg(ARC_AUX_SLC_CTRL);
367 
368 	while (read_aux_reg(ARC_AUX_SLC_CTRL) & SLC_CTRL_BUSY);
369 
370 #endif /* CONFIG_ISA_ARCV2 */
371 }
372 
373 static void arc_ioc_setup(void)
374 {
375 	/* IOC Aperture start is equal to DDR start */
376 	unsigned int ap_base = CONFIG_SYS_SDRAM_BASE;
377 	/* IOC Aperture size is equal to DDR size */
378 	long ap_size = CONFIG_SYS_SDRAM_SIZE;
379 
380 	/* Unsupported configuration. See [ NOTE 2 ] for more details. */
381 	if (!slc_exists())
382 		panic("Try to enable IOC but SLC is not present");
383 
384 	/* Unsupported configuration. See [ NOTE 2 ] for more details. */
385 	if (!dcache_enabled())
386 		panic("Try to enable IOC but L1 D$ is disabled");
387 
388 	if (!is_power_of_2(ap_size) || ap_size < 4096)
389 		panic("IOC Aperture size must be power of 2 and bigger 4Kib");
390 
391 	/* IOC Aperture start must be aligned to the size of the aperture */
392 	if (ap_base % ap_size != 0)
393 		panic("IOC Aperture start must be aligned to the size of the aperture");
394 
395 	flush_n_invalidate_dcache_all();
396 
397 	/*
398 	 * IOC Aperture size decoded as 2 ^ (SIZE + 2) KB,
399 	 * so setting 0x11 implies 512M, 0x12 implies 1G...
400 	 */
401 	write_aux_reg(ARC_AUX_IO_COH_AP0_SIZE,
402 		      order_base_2(ap_size / 1024) - 2);
403 
404 	write_aux_reg(ARC_AUX_IO_COH_AP0_BASE, ap_base >> 12);
405 	write_aux_reg(ARC_AUX_IO_COH_PARTIAL, 1);
406 	write_aux_reg(ARC_AUX_IO_COH_ENABLE, 1);
407 }
408 
409 static void read_decode_cache_bcr_arcv2(void)
410 {
411 #ifdef CONFIG_ISA_ARCV2
412 
413 	union bcr_slc_cfg slc_cfg;
414 
415 	if (slc_exists()) {
416 		slc_cfg.word = read_aux_reg(ARC_AUX_SLC_CONFIG);
417 		gd->arch.slc_line_sz = (slc_cfg.fields.lsz == 0) ? 128 : 64;
418 
419 		/*
420 		 * We don't support configuration where L1 I$ or L1 D$ is
421 		 * absent but SL$ exists. See [ NOTE 2 ] for more details.
422 		 */
423 		if (!icache_exists() || !dcache_exists())
424 			panic("Unsupported cache configuration: SLC exists but one of L1 caches is absent");
425 	}
426 
427 #endif /* CONFIG_ISA_ARCV2 */
428 }
429 
430 void read_decode_cache_bcr(void)
431 {
432 	int dc_line_sz = 0, ic_line_sz = 0;
433 	union bcr_di_cache ibcr, dbcr;
434 
435 	/*
436 	 * We don't care much about I$ line length really as there're
437 	 * no per-line ops on I$ instead we only do full invalidation of it
438 	 * on occasion of relocation and right before jumping to the OS.
439 	 * Still we check insane config with zero-encoded line length in
440 	 * presense of version field in I$ BCR. Just in case.
441 	 */
442 	ibcr.word = read_aux_reg(ARC_BCR_IC_BUILD);
443 	if (ibcr.fields.ver) {
444 		ic_line_sz = 8 << ibcr.fields.line_len;
445 		if (!ic_line_sz)
446 			panic("Instruction exists but line length is 0\n");
447 	}
448 
449 	dbcr.word = read_aux_reg(ARC_BCR_DC_BUILD);
450 	if (dbcr.fields.ver) {
451 		gd->arch.l1_line_sz = dc_line_sz = 16 << dbcr.fields.line_len;
452 		if (!dc_line_sz)
453 			panic("Data cache exists but line length is 0\n");
454 	}
455 }
456 
457 void cache_init(void)
458 {
459 	read_decode_cache_bcr();
460 
461 	if (is_isa_arcv2())
462 		read_decode_cache_bcr_arcv2();
463 
464 	if (is_isa_arcv2() && ioc_enabled())
465 		arc_ioc_setup();
466 
467 	if (is_isa_arcv2() && slc_exists())
468 		slc_upper_region_init();
469 }
470 
471 int icache_status(void)
472 {
473 	return icache_enabled();
474 }
475 
476 void icache_enable(void)
477 {
478 	if (icache_exists())
479 		write_aux_reg(ARC_AUX_IC_CTRL, read_aux_reg(ARC_AUX_IC_CTRL) &
480 			      ~IC_CTRL_CACHE_DISABLE);
481 }
482 
483 void icache_disable(void)
484 {
485 	if (!icache_exists())
486 		return;
487 
488 	__ic_entire_invalidate();
489 
490 	write_aux_reg(ARC_AUX_IC_CTRL, read_aux_reg(ARC_AUX_IC_CTRL) |
491 		      IC_CTRL_CACHE_DISABLE);
492 }
493 
494 /* IC supports only invalidation */
495 static inlined_cachefunc void __ic_entire_invalidate(void)
496 {
497 	if (!icache_enabled())
498 		return;
499 
500 	/* Any write to IC_IVIC register triggers invalidation of entire I$ */
501 	write_aux_reg(ARC_AUX_IC_IVIC, 1);
502 	/*
503 	 * As per ARC HS databook (see chapter 5.3.3.2)
504 	 * it is required to add 3 NOPs after each write to IC_IVIC.
505 	 */
506 	__builtin_arc_nop();
507 	__builtin_arc_nop();
508 	__builtin_arc_nop();
509 	read_aux_reg(ARC_AUX_IC_CTRL);  /* blocks */
510 }
511 
512 void invalidate_icache_all(void)
513 {
514 	__ic_entire_invalidate();
515 
516 	/*
517 	 * If SL$ is bypassed for data it is used only for instructions,
518 	 * so we need to invalidate it too.
519 	 * TODO: HS 3.0 supports SLC disable so we need to check slc
520 	 * enable/disable status here.
521 	 */
522 	if (is_isa_arcv2() && slc_data_bypass())
523 		__slc_entire_op(OP_INV);
524 }
525 
526 int dcache_status(void)
527 {
528 	return dcache_enabled();
529 }
530 
531 void dcache_enable(void)
532 {
533 	if (!dcache_exists())
534 		return;
535 
536 	write_aux_reg(ARC_AUX_DC_CTRL, read_aux_reg(ARC_AUX_DC_CTRL) &
537 		      ~(DC_CTRL_INV_MODE_FLUSH | DC_CTRL_CACHE_DISABLE));
538 }
539 
540 void dcache_disable(void)
541 {
542 	if (!dcache_exists())
543 		return;
544 
545 	__dc_entire_op(OP_FLUSH_N_INV);
546 
547 	/*
548 	 * As SLC will be bypassed for data after L1 D$ disable we need to
549 	 * flush it first before L1 D$ disable. Also we invalidate SLC to
550 	 * avoid any inconsistent data problems after enabling L1 D$ again with
551 	 * dcache_enable function.
552 	 */
553 	if (is_isa_arcv2())
554 		__slc_entire_op(OP_FLUSH_N_INV);
555 
556 	write_aux_reg(ARC_AUX_DC_CTRL, read_aux_reg(ARC_AUX_DC_CTRL) |
557 		      DC_CTRL_CACHE_DISABLE);
558 }
559 
560 /* Common Helper for Line Operations on D-cache */
561 static inline void __dcache_line_loop(unsigned long paddr, unsigned long sz,
562 				      const int cacheop)
563 {
564 	unsigned int aux_cmd;
565 	int num_lines;
566 
567 	/* d$ cmd: INV (discard or wback-n-discard) OR FLUSH (wback) */
568 	aux_cmd = cacheop & OP_INV ? ARC_AUX_DC_IVDL : ARC_AUX_DC_FLDL;
569 
570 	sz += paddr & ~CACHE_LINE_MASK;
571 	paddr &= CACHE_LINE_MASK;
572 
573 	num_lines = DIV_ROUND_UP(sz, gd->arch.l1_line_sz);
574 
575 	while (num_lines-- > 0) {
576 #if (CONFIG_ARC_MMU_VER == 3)
577 		write_aux_reg(ARC_AUX_DC_PTAG, paddr);
578 #endif
579 		write_aux_reg(aux_cmd, paddr);
580 		paddr += gd->arch.l1_line_sz;
581 	}
582 }
583 
584 static inlined_cachefunc void __before_dc_op(const int op)
585 {
586 	unsigned int ctrl;
587 
588 	ctrl = read_aux_reg(ARC_AUX_DC_CTRL);
589 
590 	/* IM bit implies flush-n-inv, instead of vanilla inv */
591 	if (op == OP_INV)
592 		ctrl &= ~DC_CTRL_INV_MODE_FLUSH;
593 	else
594 		ctrl |= DC_CTRL_INV_MODE_FLUSH;
595 
596 	write_aux_reg(ARC_AUX_DC_CTRL, ctrl);
597 }
598 
599 static inlined_cachefunc void __after_dc_op(const int op)
600 {
601 	if (op & OP_FLUSH)	/* flush / flush-n-inv both wait */
602 		while (read_aux_reg(ARC_AUX_DC_CTRL) & DC_CTRL_FLUSH_STATUS);
603 }
604 
605 static inlined_cachefunc void __dc_entire_op(const int cacheop)
606 {
607 	int aux;
608 
609 	if (!dcache_enabled())
610 		return;
611 
612 	__before_dc_op(cacheop);
613 
614 	if (cacheop & OP_INV)	/* Inv or flush-n-inv use same cmd reg */
615 		aux = ARC_AUX_DC_IVDC;
616 	else
617 		aux = ARC_AUX_DC_FLSH;
618 
619 	write_aux_reg(aux, 0x1);
620 
621 	__after_dc_op(cacheop);
622 }
623 
624 static inline void __dc_line_op(unsigned long paddr, unsigned long sz,
625 				const int cacheop)
626 {
627 	if (!dcache_enabled())
628 		return;
629 
630 	__before_dc_op(cacheop);
631 	__dcache_line_loop(paddr, sz, cacheop);
632 	__after_dc_op(cacheop);
633 }
634 
635 void invalidate_dcache_range(unsigned long start, unsigned long end)
636 {
637 	if (start >= end)
638 		return;
639 
640 	/*
641 	 * ARCv1                                 -> call __dc_line_op
642 	 * ARCv2 && L1 D$ disabled               -> nothing
643 	 * ARCv2 && L1 D$ enabled && IOC enabled -> nothing
644 	 * ARCv2 && L1 D$ enabled && no IOC      -> call __dc_line_op; call __slc_rgn_op
645 	 */
646 	if (!is_isa_arcv2() || !ioc_enabled())
647 		__dc_line_op(start, end - start, OP_INV);
648 
649 	if (is_isa_arcv2() && !ioc_enabled() && !slc_data_bypass())
650 		__slc_rgn_op(start, end - start, OP_INV);
651 }
652 
653 void flush_dcache_range(unsigned long start, unsigned long end)
654 {
655 	if (start >= end)
656 		return;
657 
658 	/*
659 	 * ARCv1                                 -> call __dc_line_op
660 	 * ARCv2 && L1 D$ disabled               -> nothing
661 	 * ARCv2 && L1 D$ enabled && IOC enabled -> nothing
662 	 * ARCv2 && L1 D$ enabled && no IOC      -> call __dc_line_op; call __slc_rgn_op
663 	 */
664 	if (!is_isa_arcv2() || !ioc_enabled())
665 		__dc_line_op(start, end - start, OP_FLUSH);
666 
667 	if (is_isa_arcv2() && !ioc_enabled() && !slc_data_bypass())
668 		__slc_rgn_op(start, end - start, OP_FLUSH);
669 }
670 
671 void flush_cache(unsigned long start, unsigned long size)
672 {
673 	flush_dcache_range(start, start + size);
674 }
675 
676 /*
677  * As invalidate_dcache_all() is not used in generic U-Boot code and as we
678  * don't need it in arch/arc code alone (invalidate without flush) we implement
679  * flush_n_invalidate_dcache_all (flush and invalidate in 1 operation) because
680  * it's much safer. See [ NOTE 1 ] for more details.
681  */
682 void flush_n_invalidate_dcache_all(void)
683 {
684 	__dc_entire_op(OP_FLUSH_N_INV);
685 
686 	if (is_isa_arcv2() && !slc_data_bypass())
687 		__slc_entire_op(OP_FLUSH_N_INV);
688 }
689 
690 void flush_dcache_all(void)
691 {
692 	__dc_entire_op(OP_FLUSH);
693 
694 	if (is_isa_arcv2() && !slc_data_bypass())
695 		__slc_entire_op(OP_FLUSH);
696 }
697 
698 /*
699  * This is function to cleanup all caches (and therefore sync I/D caches) which
700  * can be used for cleanup before linux launch or to sync caches during
701  * relocation.
702  */
703 void sync_n_cleanup_cache_all(void)
704 {
705 	__dc_entire_op(OP_FLUSH_N_INV);
706 
707 	/*
708 	 * If SL$ is bypassed for data it is used only for instructions,
709 	 * and we shouldn't flush it. So invalidate it instead of flush_n_inv.
710 	 */
711 	if (is_isa_arcv2()) {
712 		if (slc_data_bypass())
713 			__slc_entire_op(OP_INV);
714 		else
715 			__slc_entire_op(OP_FLUSH_N_INV);
716 	}
717 
718 	__ic_entire_invalidate();
719 }
720