1 /* 2 * Copyright (C) 2013-2014 Synopsys, Inc. All rights reserved. 3 * 4 * SPDX-License-Identifier: GPL-2.0+ 5 */ 6 7 #include <config.h> 8 #include <common.h> 9 #include <linux/compiler.h> 10 #include <linux/kernel.h> 11 #include <linux/log2.h> 12 #include <asm/arcregs.h> 13 #include <asm/arc-bcr.h> 14 #include <asm/cache.h> 15 16 /* 17 * [ NOTE 1 ]: 18 * Data cache (L1 D$ or SL$) entire invalidate operation or data cache disable 19 * operation may result in unexpected behavior and data loss even if we flush 20 * data cache right before invalidation. That may happens if we store any context 21 * on stack (like we store BLINK register on stack before function call). 22 * BLINK register is the register where return address is automatically saved 23 * when we do function call with instructions like 'bl'. 24 * 25 * There is the real example: 26 * We may hang in the next code as we store any BLINK register on stack in 27 * invalidate_dcache_all() function. 28 * 29 * void flush_dcache_all() { 30 * __dc_entire_op(OP_FLUSH); 31 * // Other code // 32 * } 33 * 34 * void invalidate_dcache_all() { 35 * __dc_entire_op(OP_INV); 36 * // Other code // 37 * } 38 * 39 * void foo(void) { 40 * flush_dcache_all(); 41 * invalidate_dcache_all(); 42 * } 43 * 44 * Now let's see what really happens during that code execution: 45 * 46 * foo() 47 * |->> call flush_dcache_all 48 * [return address is saved to BLINK register] 49 * [push BLINK] (save to stack) ![point 1] 50 * |->> call __dc_entire_op(OP_FLUSH) 51 * [return address is saved to BLINK register] 52 * [flush L1 D$] 53 * return [jump to BLINK] 54 * <<------ 55 * [other flush_dcache_all code] 56 * [pop BLINK] (get from stack) 57 * return [jump to BLINK] 58 * <<------ 59 * |->> call invalidate_dcache_all 60 * [return address is saved to BLINK register] 61 * [push BLINK] (save to stack) ![point 2] 62 * |->> call __dc_entire_op(OP_FLUSH) 63 * [return address is saved to BLINK register] 64 * [invalidate L1 D$] ![point 3] 65 * // Oops!!! 66 * // We lose return address from invalidate_dcache_all function: 67 * // we save it to stack and invalidate L1 D$ after that! 68 * return [jump to BLINK] 69 * <<------ 70 * [other invalidate_dcache_all code] 71 * [pop BLINK] (get from stack) 72 * // we don't have this data in L1 dcache as we invalidated it in [point 3] 73 * // so we get it from next memory level (for example DDR memory) 74 * // but in the memory we have value which we save in [point 1], which 75 * // is return address from flush_dcache_all function (instead of 76 * // address from current invalidate_dcache_all function which we 77 * // saved in [point 2] !) 78 * return [jump to BLINK] 79 * <<------ 80 * // As BLINK points to invalidate_dcache_all, we call it again and 81 * // loop forever. 82 * 83 * Fortunately we may fix that by using flush & invalidation of D$ with a single 84 * one instruction (instead of flush and invalidation instructions pair) and 85 * enabling force function inline with '__attribute__((always_inline))' gcc 86 * attribute to avoid any function call (and BLINK store) between cache flush 87 * and disable. 88 */ 89 90 /* Bit values in IC_CTRL */ 91 #define IC_CTRL_CACHE_DISABLE BIT(0) 92 93 /* Bit values in DC_CTRL */ 94 #define DC_CTRL_CACHE_DISABLE BIT(0) 95 #define DC_CTRL_INV_MODE_FLUSH BIT(6) 96 #define DC_CTRL_FLUSH_STATUS BIT(8) 97 #define CACHE_VER_NUM_MASK 0xF 98 99 #define OP_INV BIT(0) 100 #define OP_FLUSH BIT(1) 101 #define OP_FLUSH_N_INV (OP_FLUSH | OP_INV) 102 103 /* Bit val in SLC_CONTROL */ 104 #define SLC_CTRL_DIS 0x001 105 #define SLC_CTRL_IM 0x040 106 #define SLC_CTRL_BUSY 0x100 107 #define SLC_CTRL_RGN_OP_INV 0x200 108 109 /* 110 * By default that variable will fall into .bss section. 111 * But .bss section is not relocated and so it will be initilized before 112 * relocation but will be used after being zeroed. 113 */ 114 int l1_line_sz __section(".data"); 115 bool dcache_exists __section(".data") = false; 116 bool icache_exists __section(".data") = false; 117 118 #define CACHE_LINE_MASK (~(l1_line_sz - 1)) 119 120 int slc_line_sz __section(".data"); 121 bool slc_exists __section(".data") = false; 122 bool ioc_exists __section(".data") = false; 123 bool pae_exists __section(".data") = false; 124 125 /* To force enable IOC set ioc_enable to 'true' */ 126 bool ioc_enable __section(".data") = false; 127 128 void read_decode_mmu_bcr(void) 129 { 130 /* TODO: should we compare mmu version from BCR and from CONFIG? */ 131 #if (CONFIG_ARC_MMU_VER >= 4) 132 union bcr_mmu_4 mmu4; 133 134 mmu4.word = read_aux_reg(ARC_AUX_MMU_BCR); 135 136 pae_exists = !!mmu4.fields.pae; 137 #endif /* (CONFIG_ARC_MMU_VER >= 4) */ 138 } 139 140 static void __slc_entire_op(const int op) 141 { 142 unsigned int ctrl; 143 144 ctrl = read_aux_reg(ARC_AUX_SLC_CTRL); 145 146 if (!(op & OP_FLUSH)) /* i.e. OP_INV */ 147 ctrl &= ~SLC_CTRL_IM; /* clear IM: Disable flush before Inv */ 148 else 149 ctrl |= SLC_CTRL_IM; 150 151 write_aux_reg(ARC_AUX_SLC_CTRL, ctrl); 152 153 if (op & OP_INV) /* Inv or flush-n-inv use same cmd reg */ 154 write_aux_reg(ARC_AUX_SLC_INVALIDATE, 0x1); 155 else 156 write_aux_reg(ARC_AUX_SLC_FLUSH, 0x1); 157 158 /* Make sure "busy" bit reports correct stataus, see STAR 9001165532 */ 159 read_aux_reg(ARC_AUX_SLC_CTRL); 160 161 /* Important to wait for flush to complete */ 162 while (read_aux_reg(ARC_AUX_SLC_CTRL) & SLC_CTRL_BUSY); 163 } 164 165 static void slc_upper_region_init(void) 166 { 167 /* 168 * ARC_AUX_SLC_RGN_END1 and ARC_AUX_SLC_RGN_START1 are always == 0 169 * as we don't use PAE40. 170 */ 171 write_aux_reg(ARC_AUX_SLC_RGN_END1, 0); 172 write_aux_reg(ARC_AUX_SLC_RGN_START1, 0); 173 } 174 175 static void __slc_rgn_op(unsigned long paddr, unsigned long sz, const int op) 176 { 177 #ifdef CONFIG_ISA_ARCV2 178 179 unsigned int ctrl; 180 unsigned long end; 181 182 /* 183 * The Region Flush operation is specified by CTRL.RGN_OP[11..9] 184 * - b'000 (default) is Flush, 185 * - b'001 is Invalidate if CTRL.IM == 0 186 * - b'001 is Flush-n-Invalidate if CTRL.IM == 1 187 */ 188 ctrl = read_aux_reg(ARC_AUX_SLC_CTRL); 189 190 /* Don't rely on default value of IM bit */ 191 if (!(op & OP_FLUSH)) /* i.e. OP_INV */ 192 ctrl &= ~SLC_CTRL_IM; /* clear IM: Disable flush before Inv */ 193 else 194 ctrl |= SLC_CTRL_IM; 195 196 if (op & OP_INV) 197 ctrl |= SLC_CTRL_RGN_OP_INV; /* Inv or flush-n-inv */ 198 else 199 ctrl &= ~SLC_CTRL_RGN_OP_INV; 200 201 write_aux_reg(ARC_AUX_SLC_CTRL, ctrl); 202 203 /* 204 * Lower bits are ignored, no need to clip 205 * END needs to be setup before START (latter triggers the operation) 206 * END can't be same as START, so add (l2_line_sz - 1) to sz 207 */ 208 end = paddr + sz + slc_line_sz - 1; 209 210 /* 211 * Upper addresses (ARC_AUX_SLC_RGN_END1 and ARC_AUX_SLC_RGN_START1) 212 * are always == 0 as we don't use PAE40, so we only setup lower ones 213 * (ARC_AUX_SLC_RGN_END and ARC_AUX_SLC_RGN_START) 214 */ 215 write_aux_reg(ARC_AUX_SLC_RGN_END, end); 216 write_aux_reg(ARC_AUX_SLC_RGN_START, paddr); 217 218 /* Make sure "busy" bit reports correct stataus, see STAR 9001165532 */ 219 read_aux_reg(ARC_AUX_SLC_CTRL); 220 221 while (read_aux_reg(ARC_AUX_SLC_CTRL) & SLC_CTRL_BUSY); 222 223 #endif /* CONFIG_ISA_ARCV2 */ 224 } 225 226 static void arc_ioc_setup(void) 227 { 228 /* IOC Aperture start is equal to DDR start */ 229 unsigned int ap_base = CONFIG_SYS_SDRAM_BASE; 230 /* IOC Aperture size is equal to DDR size */ 231 long ap_size = CONFIG_SYS_SDRAM_SIZE; 232 233 flush_n_invalidate_dcache_all(); 234 235 if (!is_power_of_2(ap_size) || ap_size < 4096) 236 panic("IOC Aperture size must be power of 2 and bigger 4Kib"); 237 238 /* 239 * IOC Aperture size decoded as 2 ^ (SIZE + 2) KB, 240 * so setting 0x11 implies 512M, 0x12 implies 1G... 241 */ 242 write_aux_reg(ARC_AUX_IO_COH_AP0_SIZE, 243 order_base_2(ap_size / 1024) - 2); 244 245 /* IOC Aperture start must be aligned to the size of the aperture */ 246 if (ap_base % ap_size != 0) 247 panic("IOC Aperture start must be aligned to the size of the aperture"); 248 249 write_aux_reg(ARC_AUX_IO_COH_AP0_BASE, ap_base >> 12); 250 write_aux_reg(ARC_AUX_IO_COH_PARTIAL, 1); 251 write_aux_reg(ARC_AUX_IO_COH_ENABLE, 1); 252 } 253 254 static void read_decode_cache_bcr_arcv2(void) 255 { 256 #ifdef CONFIG_ISA_ARCV2 257 258 union bcr_slc_cfg slc_cfg; 259 union bcr_clust_cfg cbcr; 260 union bcr_generic sbcr; 261 262 sbcr.word = read_aux_reg(ARC_BCR_SLC); 263 if (sbcr.fields.ver) { 264 slc_cfg.word = read_aux_reg(ARC_AUX_SLC_CONFIG); 265 slc_exists = true; 266 slc_line_sz = (slc_cfg.fields.lsz == 0) ? 128 : 64; 267 } 268 269 cbcr.word = read_aux_reg(ARC_BCR_CLUSTER); 270 if (cbcr.fields.c && ioc_enable) 271 ioc_exists = true; 272 273 #endif /* CONFIG_ISA_ARCV2 */ 274 } 275 276 void read_decode_cache_bcr(void) 277 { 278 int dc_line_sz = 0, ic_line_sz = 0; 279 union bcr_di_cache ibcr, dbcr; 280 281 ibcr.word = read_aux_reg(ARC_BCR_IC_BUILD); 282 if (ibcr.fields.ver) { 283 icache_exists = true; 284 l1_line_sz = ic_line_sz = 8 << ibcr.fields.line_len; 285 if (!ic_line_sz) 286 panic("Instruction exists but line length is 0\n"); 287 } 288 289 dbcr.word = read_aux_reg(ARC_BCR_DC_BUILD); 290 if (dbcr.fields.ver) { 291 dcache_exists = true; 292 l1_line_sz = dc_line_sz = 16 << dbcr.fields.line_len; 293 if (!dc_line_sz) 294 panic("Data cache exists but line length is 0\n"); 295 } 296 297 if (ic_line_sz && dc_line_sz && (ic_line_sz != dc_line_sz)) 298 panic("Instruction and data cache line lengths differ\n"); 299 } 300 301 void cache_init(void) 302 { 303 read_decode_cache_bcr(); 304 305 if (is_isa_arcv2()) 306 read_decode_cache_bcr_arcv2(); 307 308 if (is_isa_arcv2() && ioc_exists) 309 arc_ioc_setup(); 310 311 read_decode_mmu_bcr(); 312 313 /* 314 * ARC_AUX_SLC_RGN_START1 and ARC_AUX_SLC_RGN_END1 register exist 315 * only if PAE exists in current HW. So we had to check pae_exist 316 * before using them. 317 */ 318 if (is_isa_arcv2() && slc_exists && pae_exists) 319 slc_upper_region_init(); 320 } 321 322 int icache_status(void) 323 { 324 if (!icache_exists) 325 return 0; 326 327 if (read_aux_reg(ARC_AUX_IC_CTRL) & IC_CTRL_CACHE_DISABLE) 328 return 0; 329 else 330 return 1; 331 } 332 333 void icache_enable(void) 334 { 335 if (icache_exists) 336 write_aux_reg(ARC_AUX_IC_CTRL, read_aux_reg(ARC_AUX_IC_CTRL) & 337 ~IC_CTRL_CACHE_DISABLE); 338 } 339 340 void icache_disable(void) 341 { 342 if (icache_exists) 343 write_aux_reg(ARC_AUX_IC_CTRL, read_aux_reg(ARC_AUX_IC_CTRL) | 344 IC_CTRL_CACHE_DISABLE); 345 } 346 347 /* IC supports only invalidation */ 348 static inline void __ic_entire_invalidate(void) 349 { 350 if (!icache_status()) 351 return; 352 353 /* Any write to IC_IVIC register triggers invalidation of entire I$ */ 354 write_aux_reg(ARC_AUX_IC_IVIC, 1); 355 /* 356 * As per ARC HS databook (see chapter 5.3.3.2) 357 * it is required to add 3 NOPs after each write to IC_IVIC. 358 */ 359 __builtin_arc_nop(); 360 __builtin_arc_nop(); 361 __builtin_arc_nop(); 362 read_aux_reg(ARC_AUX_IC_CTRL); /* blocks */ 363 } 364 365 void invalidate_icache_all(void) 366 { 367 __ic_entire_invalidate(); 368 369 if (is_isa_arcv2() && slc_exists) 370 __slc_entire_op(OP_INV); 371 } 372 373 int dcache_status(void) 374 { 375 if (!dcache_exists) 376 return 0; 377 378 if (read_aux_reg(ARC_AUX_DC_CTRL) & DC_CTRL_CACHE_DISABLE) 379 return 0; 380 else 381 return 1; 382 } 383 384 void dcache_enable(void) 385 { 386 if (!dcache_exists) 387 return; 388 389 write_aux_reg(ARC_AUX_DC_CTRL, read_aux_reg(ARC_AUX_DC_CTRL) & 390 ~(DC_CTRL_INV_MODE_FLUSH | DC_CTRL_CACHE_DISABLE)); 391 } 392 393 void dcache_disable(void) 394 { 395 if (!dcache_exists) 396 return; 397 398 write_aux_reg(ARC_AUX_DC_CTRL, read_aux_reg(ARC_AUX_DC_CTRL) | 399 DC_CTRL_CACHE_DISABLE); 400 } 401 402 /* Common Helper for Line Operations on D-cache */ 403 static inline void __dcache_line_loop(unsigned long paddr, unsigned long sz, 404 const int cacheop) 405 { 406 unsigned int aux_cmd; 407 int num_lines; 408 409 /* d$ cmd: INV (discard or wback-n-discard) OR FLUSH (wback) */ 410 aux_cmd = cacheop & OP_INV ? ARC_AUX_DC_IVDL : ARC_AUX_DC_FLDL; 411 412 sz += paddr & ~CACHE_LINE_MASK; 413 paddr &= CACHE_LINE_MASK; 414 415 num_lines = DIV_ROUND_UP(sz, l1_line_sz); 416 417 while (num_lines-- > 0) { 418 #if (CONFIG_ARC_MMU_VER == 3) 419 write_aux_reg(ARC_AUX_DC_PTAG, paddr); 420 #endif 421 write_aux_reg(aux_cmd, paddr); 422 paddr += l1_line_sz; 423 } 424 } 425 426 static void __before_dc_op(const int op) 427 { 428 unsigned int ctrl; 429 430 ctrl = read_aux_reg(ARC_AUX_DC_CTRL); 431 432 /* IM bit implies flush-n-inv, instead of vanilla inv */ 433 if (op == OP_INV) 434 ctrl &= ~DC_CTRL_INV_MODE_FLUSH; 435 else 436 ctrl |= DC_CTRL_INV_MODE_FLUSH; 437 438 write_aux_reg(ARC_AUX_DC_CTRL, ctrl); 439 } 440 441 static void __after_dc_op(const int op) 442 { 443 if (op & OP_FLUSH) /* flush / flush-n-inv both wait */ 444 while (read_aux_reg(ARC_AUX_DC_CTRL) & DC_CTRL_FLUSH_STATUS); 445 } 446 447 static inline void __dc_entire_op(const int cacheop) 448 { 449 int aux; 450 451 if (!dcache_status()) 452 return; 453 454 __before_dc_op(cacheop); 455 456 if (cacheop & OP_INV) /* Inv or flush-n-inv use same cmd reg */ 457 aux = ARC_AUX_DC_IVDC; 458 else 459 aux = ARC_AUX_DC_FLSH; 460 461 write_aux_reg(aux, 0x1); 462 463 __after_dc_op(cacheop); 464 } 465 466 static inline void __dc_line_op(unsigned long paddr, unsigned long sz, 467 const int cacheop) 468 { 469 if (!dcache_status()) 470 return; 471 472 __before_dc_op(cacheop); 473 __dcache_line_loop(paddr, sz, cacheop); 474 __after_dc_op(cacheop); 475 } 476 477 void invalidate_dcache_range(unsigned long start, unsigned long end) 478 { 479 if (start >= end) 480 return; 481 482 /* 483 * ARCv1 -> call __dc_line_op 484 * ARCv2 && no IOC -> call __dc_line_op; call __slc_rgn_op 485 * ARCv2 && IOC enabled -> nothing 486 */ 487 if (!is_isa_arcv2() || !ioc_exists) 488 __dc_line_op(start, end - start, OP_INV); 489 490 if (is_isa_arcv2() && slc_exists && !ioc_exists) 491 __slc_rgn_op(start, end - start, OP_INV); 492 } 493 494 void flush_dcache_range(unsigned long start, unsigned long end) 495 { 496 if (start >= end) 497 return; 498 499 /* 500 * ARCv1 -> call __dc_line_op 501 * ARCv2 && no IOC -> call __dc_line_op; call __slc_rgn_op 502 * ARCv2 && IOC enabled -> nothing 503 */ 504 if (!is_isa_arcv2() || !ioc_exists) 505 __dc_line_op(start, end - start, OP_FLUSH); 506 507 if (is_isa_arcv2() && slc_exists && !ioc_exists) 508 __slc_rgn_op(start, end - start, OP_FLUSH); 509 } 510 511 void flush_cache(unsigned long start, unsigned long size) 512 { 513 flush_dcache_range(start, start + size); 514 } 515 516 /* 517 * As invalidate_dcache_all() is not used in generic U-Boot code and as we 518 * don't need it in arch/arc code alone (invalidate without flush) we implement 519 * flush_n_invalidate_dcache_all (flush and invalidate in 1 operation) because 520 * it's much safer. See [ NOTE 1 ] for more details. 521 */ 522 void flush_n_invalidate_dcache_all(void) 523 { 524 __dc_entire_op(OP_FLUSH_N_INV); 525 526 if (is_isa_arcv2() && slc_exists) 527 __slc_entire_op(OP_FLUSH_N_INV); 528 } 529 530 void flush_dcache_all(void) 531 { 532 __dc_entire_op(OP_FLUSH); 533 534 if (is_isa_arcv2() && slc_exists) 535 __slc_entire_op(OP_FLUSH); 536 } 537