1 /* 2 * Copyright (C) 2013-2014 Synopsys, Inc. All rights reserved. 3 * 4 * SPDX-License-Identifier: GPL-2.0+ 5 */ 6 7 #include <config.h> 8 #include <common.h> 9 #include <linux/compiler.h> 10 #include <linux/kernel.h> 11 #include <linux/log2.h> 12 #include <asm/arcregs.h> 13 #include <asm/arc-bcr.h> 14 #include <asm/cache.h> 15 16 /* 17 * [ NOTE 1 ]: 18 * Data cache (L1 D$ or SL$) entire invalidate operation or data cache disable 19 * operation may result in unexpected behavior and data loss even if we flush 20 * data cache right before invalidation. That may happens if we store any context 21 * on stack (like we store BLINK register on stack before function call). 22 * BLINK register is the register where return address is automatically saved 23 * when we do function call with instructions like 'bl'. 24 * 25 * There is the real example: 26 * We may hang in the next code as we store any BLINK register on stack in 27 * invalidate_dcache_all() function. 28 * 29 * void flush_dcache_all() { 30 * __dc_entire_op(OP_FLUSH); 31 * // Other code // 32 * } 33 * 34 * void invalidate_dcache_all() { 35 * __dc_entire_op(OP_INV); 36 * // Other code // 37 * } 38 * 39 * void foo(void) { 40 * flush_dcache_all(); 41 * invalidate_dcache_all(); 42 * } 43 * 44 * Now let's see what really happens during that code execution: 45 * 46 * foo() 47 * |->> call flush_dcache_all 48 * [return address is saved to BLINK register] 49 * [push BLINK] (save to stack) ![point 1] 50 * |->> call __dc_entire_op(OP_FLUSH) 51 * [return address is saved to BLINK register] 52 * [flush L1 D$] 53 * return [jump to BLINK] 54 * <<------ 55 * [other flush_dcache_all code] 56 * [pop BLINK] (get from stack) 57 * return [jump to BLINK] 58 * <<------ 59 * |->> call invalidate_dcache_all 60 * [return address is saved to BLINK register] 61 * [push BLINK] (save to stack) ![point 2] 62 * |->> call __dc_entire_op(OP_FLUSH) 63 * [return address is saved to BLINK register] 64 * [invalidate L1 D$] ![point 3] 65 * // Oops!!! 66 * // We lose return address from invalidate_dcache_all function: 67 * // we save it to stack and invalidate L1 D$ after that! 68 * return [jump to BLINK] 69 * <<------ 70 * [other invalidate_dcache_all code] 71 * [pop BLINK] (get from stack) 72 * // we don't have this data in L1 dcache as we invalidated it in [point 3] 73 * // so we get it from next memory level (for example DDR memory) 74 * // but in the memory we have value which we save in [point 1], which 75 * // is return address from flush_dcache_all function (instead of 76 * // address from current invalidate_dcache_all function which we 77 * // saved in [point 2] !) 78 * return [jump to BLINK] 79 * <<------ 80 * // As BLINK points to invalidate_dcache_all, we call it again and 81 * // loop forever. 82 * 83 * Fortunately we may fix that by using flush & invalidation of D$ with a single 84 * one instruction (instead of flush and invalidation instructions pair) and 85 * enabling force function inline with '__attribute__((always_inline))' gcc 86 * attribute to avoid any function call (and BLINK store) between cache flush 87 * and disable. 88 * 89 * 90 * [ NOTE 2 ]: 91 * As of today we only support the following cache configurations on ARC. 92 * Other configurations may exist in HW (for example, since version 3.0 HS 93 * supports SL$ (L2 system level cache) disable) but we don't support it in SW. 94 * Configuration 1: 95 * ______________________ 96 * | | 97 * | ARC CPU | 98 * |______________________| 99 * ___|___ ___|___ 100 * | | | | 101 * | L1 I$ | | L1 D$ | 102 * |_______| |_______| 103 * on/off on/off 104 * ___|______________|____ 105 * | | 106 * | main memory | 107 * |______________________| 108 * 109 * Configuration 2: 110 * ______________________ 111 * | | 112 * | ARC CPU | 113 * |______________________| 114 * ___|___ ___|___ 115 * | | | | 116 * | L1 I$ | | L1 D$ | 117 * |_______| |_______| 118 * on/off on/off 119 * ___|______________|____ 120 * | | 121 * | L2 (SL$) | 122 * |______________________| 123 * always must be on 124 * ___|______________|____ 125 * | | 126 * | main memory | 127 * |______________________| 128 * 129 * Configuration 3: 130 * ______________________ 131 * | | 132 * | ARC CPU | 133 * |______________________| 134 * ___|___ ___|___ 135 * | | | | 136 * | L1 I$ | | L1 D$ | 137 * |_______| |_______| 138 * on/off must be on 139 * ___|______________|____ _______ 140 * | | | | 141 * | L2 (SL$) |-----| IOC | 142 * |______________________| |_______| 143 * always must be on on/off 144 * ___|______________|____ 145 * | | 146 * | main memory | 147 * |______________________| 148 */ 149 150 DECLARE_GLOBAL_DATA_PTR; 151 152 /* Bit values in IC_CTRL */ 153 #define IC_CTRL_CACHE_DISABLE BIT(0) 154 155 /* Bit values in DC_CTRL */ 156 #define DC_CTRL_CACHE_DISABLE BIT(0) 157 #define DC_CTRL_INV_MODE_FLUSH BIT(6) 158 #define DC_CTRL_FLUSH_STATUS BIT(8) 159 160 #define OP_INV BIT(0) 161 #define OP_FLUSH BIT(1) 162 #define OP_FLUSH_N_INV (OP_FLUSH | OP_INV) 163 164 /* Bit val in SLC_CONTROL */ 165 #define SLC_CTRL_DIS 0x001 166 #define SLC_CTRL_IM 0x040 167 #define SLC_CTRL_BUSY 0x100 168 #define SLC_CTRL_RGN_OP_INV 0x200 169 170 #define CACHE_LINE_MASK (~(gd->arch.l1_line_sz - 1)) 171 172 /* 173 * We don't want to use '__always_inline' macro here as it can be redefined 174 * to simple 'inline' in some cases which breaks stuff. See [ NOTE 1 ] for more 175 * details about the reasons we need to use always_inline functions. 176 */ 177 #define inlined_cachefunc inline __attribute__((always_inline)) 178 179 static inlined_cachefunc void __ic_entire_invalidate(void); 180 static inlined_cachefunc void __dc_entire_op(const int cacheop); 181 182 static inline bool pae_exists(void) 183 { 184 /* TODO: should we compare mmu version from BCR and from CONFIG? */ 185 #if (CONFIG_ARC_MMU_VER >= 4) 186 union bcr_mmu_4 mmu4; 187 188 mmu4.word = read_aux_reg(ARC_AUX_MMU_BCR); 189 190 if (mmu4.fields.pae) 191 return true; 192 #endif /* (CONFIG_ARC_MMU_VER >= 4) */ 193 194 return false; 195 } 196 197 static inlined_cachefunc bool icache_exists(void) 198 { 199 union bcr_di_cache ibcr; 200 201 ibcr.word = read_aux_reg(ARC_BCR_IC_BUILD); 202 return !!ibcr.fields.ver; 203 } 204 205 static inlined_cachefunc bool icache_enabled(void) 206 { 207 if (!icache_exists()) 208 return false; 209 210 return !(read_aux_reg(ARC_AUX_IC_CTRL) & IC_CTRL_CACHE_DISABLE); 211 } 212 213 static inlined_cachefunc bool dcache_exists(void) 214 { 215 union bcr_di_cache dbcr; 216 217 dbcr.word = read_aux_reg(ARC_BCR_DC_BUILD); 218 return !!dbcr.fields.ver; 219 } 220 221 static inlined_cachefunc bool dcache_enabled(void) 222 { 223 if (!dcache_exists()) 224 return false; 225 226 return !(read_aux_reg(ARC_AUX_DC_CTRL) & DC_CTRL_CACHE_DISABLE); 227 } 228 229 static inlined_cachefunc bool slc_exists(void) 230 { 231 if (is_isa_arcv2()) { 232 union bcr_generic sbcr; 233 234 sbcr.word = read_aux_reg(ARC_BCR_SLC); 235 return !!sbcr.fields.ver; 236 } 237 238 return false; 239 } 240 241 static inlined_cachefunc bool slc_data_bypass(void) 242 { 243 /* 244 * If L1 data cache is disabled SL$ is bypassed and all load/store 245 * requests are sent directly to main memory. 246 */ 247 return !dcache_enabled(); 248 } 249 250 static inline bool ioc_exists(void) 251 { 252 if (is_isa_arcv2()) { 253 union bcr_clust_cfg cbcr; 254 255 cbcr.word = read_aux_reg(ARC_BCR_CLUSTER); 256 return cbcr.fields.c; 257 } 258 259 return false; 260 } 261 262 static inline bool ioc_enabled(void) 263 { 264 /* 265 * We check only CONFIG option instead of IOC HW state check as IOC 266 * must be disabled by default. 267 */ 268 if (is_ioc_enabled()) 269 return ioc_exists(); 270 271 return false; 272 } 273 274 static inlined_cachefunc void __slc_entire_op(const int op) 275 { 276 unsigned int ctrl; 277 278 if (!slc_exists()) 279 return; 280 281 ctrl = read_aux_reg(ARC_AUX_SLC_CTRL); 282 283 if (!(op & OP_FLUSH)) /* i.e. OP_INV */ 284 ctrl &= ~SLC_CTRL_IM; /* clear IM: Disable flush before Inv */ 285 else 286 ctrl |= SLC_CTRL_IM; 287 288 write_aux_reg(ARC_AUX_SLC_CTRL, ctrl); 289 290 if (op & OP_INV) /* Inv or flush-n-inv use same cmd reg */ 291 write_aux_reg(ARC_AUX_SLC_INVALIDATE, 0x1); 292 else 293 write_aux_reg(ARC_AUX_SLC_FLUSH, 0x1); 294 295 /* Make sure "busy" bit reports correct stataus, see STAR 9001165532 */ 296 read_aux_reg(ARC_AUX_SLC_CTRL); 297 298 /* Important to wait for flush to complete */ 299 while (read_aux_reg(ARC_AUX_SLC_CTRL) & SLC_CTRL_BUSY); 300 } 301 302 static void slc_upper_region_init(void) 303 { 304 /* 305 * ARC_AUX_SLC_RGN_START1 and ARC_AUX_SLC_RGN_END1 register exist 306 * only if PAE exists in current HW. So we had to check pae_exist 307 * before using them. 308 */ 309 if (!pae_exists()) 310 return; 311 312 /* 313 * ARC_AUX_SLC_RGN_END1 and ARC_AUX_SLC_RGN_START1 are always == 0 314 * as we don't use PAE40. 315 */ 316 write_aux_reg(ARC_AUX_SLC_RGN_END1, 0); 317 write_aux_reg(ARC_AUX_SLC_RGN_START1, 0); 318 } 319 320 static void __slc_rgn_op(unsigned long paddr, unsigned long sz, const int op) 321 { 322 #ifdef CONFIG_ISA_ARCV2 323 324 unsigned int ctrl; 325 unsigned long end; 326 327 if (!slc_exists()) 328 return; 329 330 /* 331 * The Region Flush operation is specified by CTRL.RGN_OP[11..9] 332 * - b'000 (default) is Flush, 333 * - b'001 is Invalidate if CTRL.IM == 0 334 * - b'001 is Flush-n-Invalidate if CTRL.IM == 1 335 */ 336 ctrl = read_aux_reg(ARC_AUX_SLC_CTRL); 337 338 /* Don't rely on default value of IM bit */ 339 if (!(op & OP_FLUSH)) /* i.e. OP_INV */ 340 ctrl &= ~SLC_CTRL_IM; /* clear IM: Disable flush before Inv */ 341 else 342 ctrl |= SLC_CTRL_IM; 343 344 if (op & OP_INV) 345 ctrl |= SLC_CTRL_RGN_OP_INV; /* Inv or flush-n-inv */ 346 else 347 ctrl &= ~SLC_CTRL_RGN_OP_INV; 348 349 write_aux_reg(ARC_AUX_SLC_CTRL, ctrl); 350 351 /* 352 * Lower bits are ignored, no need to clip 353 * END needs to be setup before START (latter triggers the operation) 354 * END can't be same as START, so add (l2_line_sz - 1) to sz 355 */ 356 end = paddr + sz + gd->arch.slc_line_sz - 1; 357 358 /* 359 * Upper addresses (ARC_AUX_SLC_RGN_END1 and ARC_AUX_SLC_RGN_START1) 360 * are always == 0 as we don't use PAE40, so we only setup lower ones 361 * (ARC_AUX_SLC_RGN_END and ARC_AUX_SLC_RGN_START) 362 */ 363 write_aux_reg(ARC_AUX_SLC_RGN_END, end); 364 write_aux_reg(ARC_AUX_SLC_RGN_START, paddr); 365 366 /* Make sure "busy" bit reports correct stataus, see STAR 9001165532 */ 367 read_aux_reg(ARC_AUX_SLC_CTRL); 368 369 while (read_aux_reg(ARC_AUX_SLC_CTRL) & SLC_CTRL_BUSY); 370 371 #endif /* CONFIG_ISA_ARCV2 */ 372 } 373 374 static void arc_ioc_setup(void) 375 { 376 /* IOC Aperture start is equal to DDR start */ 377 unsigned int ap_base = CONFIG_SYS_SDRAM_BASE; 378 /* IOC Aperture size is equal to DDR size */ 379 long ap_size = CONFIG_SYS_SDRAM_SIZE; 380 381 /* Unsupported configuration. See [ NOTE 2 ] for more details. */ 382 if (!slc_exists()) 383 panic("Try to enable IOC but SLC is not present"); 384 385 /* Unsupported configuration. See [ NOTE 2 ] for more details. */ 386 if (!dcache_enabled()) 387 panic("Try to enable IOC but L1 D$ is disabled"); 388 389 if (!is_power_of_2(ap_size) || ap_size < 4096) 390 panic("IOC Aperture size must be power of 2 and bigger 4Kib"); 391 392 /* IOC Aperture start must be aligned to the size of the aperture */ 393 if (ap_base % ap_size != 0) 394 panic("IOC Aperture start must be aligned to the size of the aperture"); 395 396 flush_n_invalidate_dcache_all(); 397 398 /* 399 * IOC Aperture size decoded as 2 ^ (SIZE + 2) KB, 400 * so setting 0x11 implies 512M, 0x12 implies 1G... 401 */ 402 write_aux_reg(ARC_AUX_IO_COH_AP0_SIZE, 403 order_base_2(ap_size / 1024) - 2); 404 405 write_aux_reg(ARC_AUX_IO_COH_AP0_BASE, ap_base >> 12); 406 write_aux_reg(ARC_AUX_IO_COH_PARTIAL, 1); 407 write_aux_reg(ARC_AUX_IO_COH_ENABLE, 1); 408 } 409 410 static void read_decode_cache_bcr_arcv2(void) 411 { 412 #ifdef CONFIG_ISA_ARCV2 413 414 union bcr_slc_cfg slc_cfg; 415 416 if (slc_exists()) { 417 slc_cfg.word = read_aux_reg(ARC_AUX_SLC_CONFIG); 418 gd->arch.slc_line_sz = (slc_cfg.fields.lsz == 0) ? 128 : 64; 419 420 /* 421 * We don't support configuration where L1 I$ or L1 D$ is 422 * absent but SL$ exists. See [ NOTE 2 ] for more details. 423 */ 424 if (!icache_exists() || !dcache_exists()) 425 panic("Unsupported cache configuration: SLC exists but one of L1 caches is absent"); 426 } 427 428 #endif /* CONFIG_ISA_ARCV2 */ 429 } 430 431 void read_decode_cache_bcr(void) 432 { 433 int dc_line_sz = 0, ic_line_sz = 0; 434 union bcr_di_cache ibcr, dbcr; 435 436 ibcr.word = read_aux_reg(ARC_BCR_IC_BUILD); 437 if (ibcr.fields.ver) { 438 gd->arch.l1_line_sz = ic_line_sz = 8 << ibcr.fields.line_len; 439 if (!ic_line_sz) 440 panic("Instruction exists but line length is 0\n"); 441 } 442 443 dbcr.word = read_aux_reg(ARC_BCR_DC_BUILD); 444 if (dbcr.fields.ver) { 445 gd->arch.l1_line_sz = dc_line_sz = 16 << dbcr.fields.line_len; 446 if (!dc_line_sz) 447 panic("Data cache exists but line length is 0\n"); 448 } 449 450 if (ic_line_sz && dc_line_sz && (ic_line_sz != dc_line_sz)) 451 panic("Instruction and data cache line lengths differ\n"); 452 } 453 454 void cache_init(void) 455 { 456 read_decode_cache_bcr(); 457 458 if (is_isa_arcv2()) 459 read_decode_cache_bcr_arcv2(); 460 461 if (is_isa_arcv2() && ioc_enabled()) 462 arc_ioc_setup(); 463 464 if (is_isa_arcv2() && slc_exists()) 465 slc_upper_region_init(); 466 } 467 468 int icache_status(void) 469 { 470 return icache_enabled(); 471 } 472 473 void icache_enable(void) 474 { 475 if (icache_exists()) 476 write_aux_reg(ARC_AUX_IC_CTRL, read_aux_reg(ARC_AUX_IC_CTRL) & 477 ~IC_CTRL_CACHE_DISABLE); 478 } 479 480 void icache_disable(void) 481 { 482 if (!icache_exists()) 483 return; 484 485 __ic_entire_invalidate(); 486 487 write_aux_reg(ARC_AUX_IC_CTRL, read_aux_reg(ARC_AUX_IC_CTRL) | 488 IC_CTRL_CACHE_DISABLE); 489 } 490 491 /* IC supports only invalidation */ 492 static inlined_cachefunc void __ic_entire_invalidate(void) 493 { 494 if (!icache_enabled()) 495 return; 496 497 /* Any write to IC_IVIC register triggers invalidation of entire I$ */ 498 write_aux_reg(ARC_AUX_IC_IVIC, 1); 499 /* 500 * As per ARC HS databook (see chapter 5.3.3.2) 501 * it is required to add 3 NOPs after each write to IC_IVIC. 502 */ 503 __builtin_arc_nop(); 504 __builtin_arc_nop(); 505 __builtin_arc_nop(); 506 read_aux_reg(ARC_AUX_IC_CTRL); /* blocks */ 507 } 508 509 void invalidate_icache_all(void) 510 { 511 __ic_entire_invalidate(); 512 513 /* 514 * If SL$ is bypassed for data it is used only for instructions, 515 * so we need to invalidate it too. 516 * TODO: HS 3.0 supports SLC disable so we need to check slc 517 * enable/disable status here. 518 */ 519 if (is_isa_arcv2() && slc_data_bypass()) 520 __slc_entire_op(OP_INV); 521 } 522 523 int dcache_status(void) 524 { 525 return dcache_enabled(); 526 } 527 528 void dcache_enable(void) 529 { 530 if (!dcache_exists()) 531 return; 532 533 write_aux_reg(ARC_AUX_DC_CTRL, read_aux_reg(ARC_AUX_DC_CTRL) & 534 ~(DC_CTRL_INV_MODE_FLUSH | DC_CTRL_CACHE_DISABLE)); 535 } 536 537 void dcache_disable(void) 538 { 539 if (!dcache_exists()) 540 return; 541 542 __dc_entire_op(OP_FLUSH_N_INV); 543 544 /* 545 * As SLC will be bypassed for data after L1 D$ disable we need to 546 * flush it first before L1 D$ disable. Also we invalidate SLC to 547 * avoid any inconsistent data problems after enabling L1 D$ again with 548 * dcache_enable function. 549 */ 550 if (is_isa_arcv2()) 551 __slc_entire_op(OP_FLUSH_N_INV); 552 553 write_aux_reg(ARC_AUX_DC_CTRL, read_aux_reg(ARC_AUX_DC_CTRL) | 554 DC_CTRL_CACHE_DISABLE); 555 } 556 557 /* Common Helper for Line Operations on D-cache */ 558 static inline void __dcache_line_loop(unsigned long paddr, unsigned long sz, 559 const int cacheop) 560 { 561 unsigned int aux_cmd; 562 int num_lines; 563 564 /* d$ cmd: INV (discard or wback-n-discard) OR FLUSH (wback) */ 565 aux_cmd = cacheop & OP_INV ? ARC_AUX_DC_IVDL : ARC_AUX_DC_FLDL; 566 567 sz += paddr & ~CACHE_LINE_MASK; 568 paddr &= CACHE_LINE_MASK; 569 570 num_lines = DIV_ROUND_UP(sz, gd->arch.l1_line_sz); 571 572 while (num_lines-- > 0) { 573 #if (CONFIG_ARC_MMU_VER == 3) 574 write_aux_reg(ARC_AUX_DC_PTAG, paddr); 575 #endif 576 write_aux_reg(aux_cmd, paddr); 577 paddr += gd->arch.l1_line_sz; 578 } 579 } 580 581 static inlined_cachefunc void __before_dc_op(const int op) 582 { 583 unsigned int ctrl; 584 585 ctrl = read_aux_reg(ARC_AUX_DC_CTRL); 586 587 /* IM bit implies flush-n-inv, instead of vanilla inv */ 588 if (op == OP_INV) 589 ctrl &= ~DC_CTRL_INV_MODE_FLUSH; 590 else 591 ctrl |= DC_CTRL_INV_MODE_FLUSH; 592 593 write_aux_reg(ARC_AUX_DC_CTRL, ctrl); 594 } 595 596 static inlined_cachefunc void __after_dc_op(const int op) 597 { 598 if (op & OP_FLUSH) /* flush / flush-n-inv both wait */ 599 while (read_aux_reg(ARC_AUX_DC_CTRL) & DC_CTRL_FLUSH_STATUS); 600 } 601 602 static inlined_cachefunc void __dc_entire_op(const int cacheop) 603 { 604 int aux; 605 606 if (!dcache_enabled()) 607 return; 608 609 __before_dc_op(cacheop); 610 611 if (cacheop & OP_INV) /* Inv or flush-n-inv use same cmd reg */ 612 aux = ARC_AUX_DC_IVDC; 613 else 614 aux = ARC_AUX_DC_FLSH; 615 616 write_aux_reg(aux, 0x1); 617 618 __after_dc_op(cacheop); 619 } 620 621 static inline void __dc_line_op(unsigned long paddr, unsigned long sz, 622 const int cacheop) 623 { 624 if (!dcache_enabled()) 625 return; 626 627 __before_dc_op(cacheop); 628 __dcache_line_loop(paddr, sz, cacheop); 629 __after_dc_op(cacheop); 630 } 631 632 void invalidate_dcache_range(unsigned long start, unsigned long end) 633 { 634 if (start >= end) 635 return; 636 637 /* 638 * ARCv1 -> call __dc_line_op 639 * ARCv2 && L1 D$ disabled -> nothing 640 * ARCv2 && L1 D$ enabled && IOC enabled -> nothing 641 * ARCv2 && L1 D$ enabled && no IOC -> call __dc_line_op; call __slc_rgn_op 642 */ 643 if (!is_isa_arcv2() || !ioc_enabled()) 644 __dc_line_op(start, end - start, OP_INV); 645 646 if (is_isa_arcv2() && !ioc_enabled() && !slc_data_bypass()) 647 __slc_rgn_op(start, end - start, OP_INV); 648 } 649 650 void flush_dcache_range(unsigned long start, unsigned long end) 651 { 652 if (start >= end) 653 return; 654 655 /* 656 * ARCv1 -> call __dc_line_op 657 * ARCv2 && L1 D$ disabled -> nothing 658 * ARCv2 && L1 D$ enabled && IOC enabled -> nothing 659 * ARCv2 && L1 D$ enabled && no IOC -> call __dc_line_op; call __slc_rgn_op 660 */ 661 if (!is_isa_arcv2() || !ioc_enabled()) 662 __dc_line_op(start, end - start, OP_FLUSH); 663 664 if (is_isa_arcv2() && !ioc_enabled() && !slc_data_bypass()) 665 __slc_rgn_op(start, end - start, OP_FLUSH); 666 } 667 668 void flush_cache(unsigned long start, unsigned long size) 669 { 670 flush_dcache_range(start, start + size); 671 } 672 673 /* 674 * As invalidate_dcache_all() is not used in generic U-Boot code and as we 675 * don't need it in arch/arc code alone (invalidate without flush) we implement 676 * flush_n_invalidate_dcache_all (flush and invalidate in 1 operation) because 677 * it's much safer. See [ NOTE 1 ] for more details. 678 */ 679 void flush_n_invalidate_dcache_all(void) 680 { 681 __dc_entire_op(OP_FLUSH_N_INV); 682 683 if (is_isa_arcv2() && !slc_data_bypass()) 684 __slc_entire_op(OP_FLUSH_N_INV); 685 } 686 687 void flush_dcache_all(void) 688 { 689 __dc_entire_op(OP_FLUSH); 690 691 if (is_isa_arcv2() && !slc_data_bypass()) 692 __slc_entire_op(OP_FLUSH); 693 } 694 695 /* 696 * This is function to cleanup all caches (and therefore sync I/D caches) which 697 * can be used for cleanup before linux launch or to sync caches during 698 * relocation. 699 */ 700 void sync_n_cleanup_cache_all(void) 701 { 702 __dc_entire_op(OP_FLUSH_N_INV); 703 704 /* 705 * If SL$ is bypassed for data it is used only for instructions, 706 * and we shouldn't flush it. So invalidate it instead of flush_n_inv. 707 */ 708 if (is_isa_arcv2()) { 709 if (slc_data_bypass()) 710 __slc_entire_op(OP_INV); 711 else 712 __slc_entire_op(OP_FLUSH_N_INV); 713 } 714 715 __ic_entire_invalidate(); 716 } 717