1 /* 2 * arch/arm/mm/cache-l2x0.c - L210/L220 cache controller support 3 * 4 * Copyright (C) 2007 ARM Limited 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software 17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18 */ 19 #include <linux/err.h> 20 #include <linux/init.h> 21 #include <linux/spinlock.h> 22 #include <linux/io.h> 23 #include <linux/of.h> 24 #include <linux/of_address.h> 25 26 #include <asm/cacheflush.h> 27 #include <asm/hardware/cache-l2x0.h> 28 #include "cache-tauros3.h" 29 #include "cache-aurora-l2.h" 30 31 #define CACHE_LINE_SIZE 32 32 33 static void __iomem *l2x0_base; 34 static DEFINE_RAW_SPINLOCK(l2x0_lock); 35 static u32 l2x0_way_mask; /* Bitmask of active ways */ 36 static u32 l2x0_size; 37 static unsigned long sync_reg_offset = L2X0_CACHE_SYNC; 38 39 /* Aurora don't have the cache ID register available, so we have to 40 * pass it though the device tree */ 41 static u32 cache_id_part_number_from_dt; 42 43 struct l2x0_regs l2x0_saved_regs; 44 45 struct l2x0_of_data { 46 void (*setup)(const struct device_node *, u32 *, u32 *); 47 void (*save)(void); 48 struct outer_cache_fns outer_cache; 49 }; 50 51 static bool of_init = false; 52 53 static inline void cache_wait_way(void __iomem *reg, unsigned long mask) 54 { 55 /* wait for cache operation by line or way to complete */ 56 while (readl_relaxed(reg) & mask) 57 cpu_relax(); 58 } 59 60 #ifdef CONFIG_CACHE_PL310 61 static inline void cache_wait(void __iomem *reg, unsigned long mask) 62 { 63 /* cache operations by line are atomic on PL310 */ 64 } 65 #else 66 #define cache_wait cache_wait_way 67 #endif 68 69 static inline void cache_sync(void) 70 { 71 void __iomem *base = l2x0_base; 72 73 writel_relaxed(0, base + sync_reg_offset); 74 cache_wait(base + L2X0_CACHE_SYNC, 1); 75 } 76 77 static inline void l2x0_clean_line(unsigned long addr) 78 { 79 void __iomem *base = l2x0_base; 80 cache_wait(base + L2X0_CLEAN_LINE_PA, 1); 81 writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA); 82 } 83 84 static inline void l2x0_inv_line(unsigned long addr) 85 { 86 void __iomem *base = l2x0_base; 87 cache_wait(base + L2X0_INV_LINE_PA, 1); 88 writel_relaxed(addr, base + L2X0_INV_LINE_PA); 89 } 90 91 #if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915) 92 static inline void debug_writel(unsigned long val) 93 { 94 if (outer_cache.set_debug) 95 outer_cache.set_debug(val); 96 } 97 98 static void pl310_set_debug(unsigned long val) 99 { 100 writel_relaxed(val, l2x0_base + L2X0_DEBUG_CTRL); 101 } 102 #else 103 /* Optimised out for non-errata case */ 104 static inline void debug_writel(unsigned long val) 105 { 106 } 107 108 #define pl310_set_debug NULL 109 #endif 110 111 #ifdef CONFIG_PL310_ERRATA_588369 112 static inline void l2x0_flush_line(unsigned long addr) 113 { 114 void __iomem *base = l2x0_base; 115 116 /* Clean by PA followed by Invalidate by PA */ 117 cache_wait(base + L2X0_CLEAN_LINE_PA, 1); 118 writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA); 119 cache_wait(base + L2X0_INV_LINE_PA, 1); 120 writel_relaxed(addr, base + L2X0_INV_LINE_PA); 121 } 122 #else 123 124 static inline void l2x0_flush_line(unsigned long addr) 125 { 126 void __iomem *base = l2x0_base; 127 cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1); 128 writel_relaxed(addr, base + L2X0_CLEAN_INV_LINE_PA); 129 } 130 #endif 131 132 static void l2x0_cache_sync(void) 133 { 134 unsigned long flags; 135 136 raw_spin_lock_irqsave(&l2x0_lock, flags); 137 cache_sync(); 138 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 139 } 140 141 static void __l2x0_flush_all(void) 142 { 143 debug_writel(0x03); 144 writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_INV_WAY); 145 cache_wait_way(l2x0_base + L2X0_CLEAN_INV_WAY, l2x0_way_mask); 146 cache_sync(); 147 debug_writel(0x00); 148 } 149 150 static void l2x0_flush_all(void) 151 { 152 unsigned long flags; 153 154 /* clean all ways */ 155 raw_spin_lock_irqsave(&l2x0_lock, flags); 156 __l2x0_flush_all(); 157 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 158 } 159 160 static void l2x0_clean_all(void) 161 { 162 unsigned long flags; 163 164 /* clean all ways */ 165 raw_spin_lock_irqsave(&l2x0_lock, flags); 166 writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_WAY); 167 cache_wait_way(l2x0_base + L2X0_CLEAN_WAY, l2x0_way_mask); 168 cache_sync(); 169 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 170 } 171 172 static void l2x0_inv_all(void) 173 { 174 unsigned long flags; 175 176 /* invalidate all ways */ 177 raw_spin_lock_irqsave(&l2x0_lock, flags); 178 /* Invalidating when L2 is enabled is a nono */ 179 BUG_ON(readl(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN); 180 writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_INV_WAY); 181 cache_wait_way(l2x0_base + L2X0_INV_WAY, l2x0_way_mask); 182 cache_sync(); 183 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 184 } 185 186 static void l2x0_inv_range(unsigned long start, unsigned long end) 187 { 188 void __iomem *base = l2x0_base; 189 unsigned long flags; 190 191 raw_spin_lock_irqsave(&l2x0_lock, flags); 192 if (start & (CACHE_LINE_SIZE - 1)) { 193 start &= ~(CACHE_LINE_SIZE - 1); 194 debug_writel(0x03); 195 l2x0_flush_line(start); 196 debug_writel(0x00); 197 start += CACHE_LINE_SIZE; 198 } 199 200 if (end & (CACHE_LINE_SIZE - 1)) { 201 end &= ~(CACHE_LINE_SIZE - 1); 202 debug_writel(0x03); 203 l2x0_flush_line(end); 204 debug_writel(0x00); 205 } 206 207 while (start < end) { 208 unsigned long blk_end = start + min(end - start, 4096UL); 209 210 while (start < blk_end) { 211 l2x0_inv_line(start); 212 start += CACHE_LINE_SIZE; 213 } 214 215 if (blk_end < end) { 216 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 217 raw_spin_lock_irqsave(&l2x0_lock, flags); 218 } 219 } 220 cache_wait(base + L2X0_INV_LINE_PA, 1); 221 cache_sync(); 222 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 223 } 224 225 static void l2x0_clean_range(unsigned long start, unsigned long end) 226 { 227 void __iomem *base = l2x0_base; 228 unsigned long flags; 229 230 if ((end - start) >= l2x0_size) { 231 l2x0_clean_all(); 232 return; 233 } 234 235 raw_spin_lock_irqsave(&l2x0_lock, flags); 236 start &= ~(CACHE_LINE_SIZE - 1); 237 while (start < end) { 238 unsigned long blk_end = start + min(end - start, 4096UL); 239 240 while (start < blk_end) { 241 l2x0_clean_line(start); 242 start += CACHE_LINE_SIZE; 243 } 244 245 if (blk_end < end) { 246 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 247 raw_spin_lock_irqsave(&l2x0_lock, flags); 248 } 249 } 250 cache_wait(base + L2X0_CLEAN_LINE_PA, 1); 251 cache_sync(); 252 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 253 } 254 255 static void l2x0_flush_range(unsigned long start, unsigned long end) 256 { 257 void __iomem *base = l2x0_base; 258 unsigned long flags; 259 260 if ((end - start) >= l2x0_size) { 261 l2x0_flush_all(); 262 return; 263 } 264 265 raw_spin_lock_irqsave(&l2x0_lock, flags); 266 start &= ~(CACHE_LINE_SIZE - 1); 267 while (start < end) { 268 unsigned long blk_end = start + min(end - start, 4096UL); 269 270 debug_writel(0x03); 271 while (start < blk_end) { 272 l2x0_flush_line(start); 273 start += CACHE_LINE_SIZE; 274 } 275 debug_writel(0x00); 276 277 if (blk_end < end) { 278 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 279 raw_spin_lock_irqsave(&l2x0_lock, flags); 280 } 281 } 282 cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1); 283 cache_sync(); 284 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 285 } 286 287 static void l2x0_disable(void) 288 { 289 unsigned long flags; 290 291 raw_spin_lock_irqsave(&l2x0_lock, flags); 292 __l2x0_flush_all(); 293 writel_relaxed(0, l2x0_base + L2X0_CTRL); 294 dsb(st); 295 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 296 } 297 298 static void l2x0_unlock(u32 cache_id) 299 { 300 int lockregs; 301 int i; 302 303 switch (cache_id & L2X0_CACHE_ID_PART_MASK) { 304 case L2X0_CACHE_ID_PART_L310: 305 lockregs = 8; 306 break; 307 case AURORA_CACHE_ID: 308 lockregs = 4; 309 break; 310 default: 311 /* L210 and unknown types */ 312 lockregs = 1; 313 break; 314 } 315 316 for (i = 0; i < lockregs; i++) { 317 writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_D_BASE + 318 i * L2X0_LOCKDOWN_STRIDE); 319 writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_I_BASE + 320 i * L2X0_LOCKDOWN_STRIDE); 321 } 322 } 323 324 void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask) 325 { 326 u32 aux; 327 u32 cache_id; 328 u32 way_size = 0; 329 int ways; 330 int way_size_shift = L2X0_WAY_SIZE_SHIFT; 331 const char *type; 332 333 l2x0_base = base; 334 if (cache_id_part_number_from_dt) 335 cache_id = cache_id_part_number_from_dt; 336 else 337 cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID); 338 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); 339 340 aux &= aux_mask; 341 aux |= aux_val; 342 343 /* Determine the number of ways */ 344 switch (cache_id & L2X0_CACHE_ID_PART_MASK) { 345 case L2X0_CACHE_ID_PART_L310: 346 if (aux & (1 << 16)) 347 ways = 16; 348 else 349 ways = 8; 350 type = "L310"; 351 #ifdef CONFIG_PL310_ERRATA_753970 352 /* Unmapped register. */ 353 sync_reg_offset = L2X0_DUMMY_REG; 354 #endif 355 if ((cache_id & L2X0_CACHE_ID_RTL_MASK) <= L2X0_CACHE_ID_RTL_R3P0) 356 outer_cache.set_debug = pl310_set_debug; 357 break; 358 case L2X0_CACHE_ID_PART_L210: 359 ways = (aux >> 13) & 0xf; 360 type = "L210"; 361 break; 362 363 case AURORA_CACHE_ID: 364 sync_reg_offset = AURORA_SYNC_REG; 365 ways = (aux >> 13) & 0xf; 366 ways = 2 << ((ways + 1) >> 2); 367 way_size_shift = AURORA_WAY_SIZE_SHIFT; 368 type = "Aurora"; 369 break; 370 default: 371 /* Assume unknown chips have 8 ways */ 372 ways = 8; 373 type = "L2x0 series"; 374 break; 375 } 376 377 l2x0_way_mask = (1 << ways) - 1; 378 379 /* 380 * L2 cache Size = Way size * Number of ways 381 */ 382 way_size = (aux & L2X0_AUX_CTRL_WAY_SIZE_MASK) >> 17; 383 way_size = 1 << (way_size + way_size_shift); 384 385 l2x0_size = ways * way_size * SZ_1K; 386 387 /* 388 * Check if l2x0 controller is already enabled. 389 * If you are booting from non-secure mode 390 * accessing the below registers will fault. 391 */ 392 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) { 393 /* Make sure that I&D is not locked down when starting */ 394 l2x0_unlock(cache_id); 395 396 /* l2x0 controller is disabled */ 397 writel_relaxed(aux, l2x0_base + L2X0_AUX_CTRL); 398 399 l2x0_inv_all(); 400 401 /* enable L2X0 */ 402 writel_relaxed(L2X0_CTRL_EN, l2x0_base + L2X0_CTRL); 403 } 404 405 /* Re-read it in case some bits are reserved. */ 406 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); 407 408 /* Save the value for resuming. */ 409 l2x0_saved_regs.aux_ctrl = aux; 410 411 if (!of_init) { 412 outer_cache.inv_range = l2x0_inv_range; 413 outer_cache.clean_range = l2x0_clean_range; 414 outer_cache.flush_range = l2x0_flush_range; 415 outer_cache.sync = l2x0_cache_sync; 416 outer_cache.flush_all = l2x0_flush_all; 417 outer_cache.disable = l2x0_disable; 418 } 419 420 pr_info("%s cache controller enabled\n", type); 421 pr_info("l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d kB\n", 422 ways, cache_id, aux, l2x0_size >> 10); 423 } 424 425 #ifdef CONFIG_OF 426 static int l2_wt_override; 427 428 /* 429 * Note that the end addresses passed to Linux primitives are 430 * noninclusive, while the hardware cache range operations use 431 * inclusive start and end addresses. 432 */ 433 static unsigned long calc_range_end(unsigned long start, unsigned long end) 434 { 435 /* 436 * Limit the number of cache lines processed at once, 437 * since cache range operations stall the CPU pipeline 438 * until completion. 439 */ 440 if (end > start + MAX_RANGE_SIZE) 441 end = start + MAX_RANGE_SIZE; 442 443 /* 444 * Cache range operations can't straddle a page boundary. 445 */ 446 if (end > PAGE_ALIGN(start+1)) 447 end = PAGE_ALIGN(start+1); 448 449 return end; 450 } 451 452 /* 453 * Make sure 'start' and 'end' reference the same page, as L2 is PIPT 454 * and range operations only do a TLB lookup on the start address. 455 */ 456 static void aurora_pa_range(unsigned long start, unsigned long end, 457 unsigned long offset) 458 { 459 unsigned long flags; 460 461 raw_spin_lock_irqsave(&l2x0_lock, flags); 462 writel_relaxed(start, l2x0_base + AURORA_RANGE_BASE_ADDR_REG); 463 writel_relaxed(end, l2x0_base + offset); 464 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 465 466 cache_sync(); 467 } 468 469 static void aurora_inv_range(unsigned long start, unsigned long end) 470 { 471 /* 472 * round start and end adresses up to cache line size 473 */ 474 start &= ~(CACHE_LINE_SIZE - 1); 475 end = ALIGN(end, CACHE_LINE_SIZE); 476 477 /* 478 * Invalidate all full cache lines between 'start' and 'end'. 479 */ 480 while (start < end) { 481 unsigned long range_end = calc_range_end(start, end); 482 aurora_pa_range(start, range_end - CACHE_LINE_SIZE, 483 AURORA_INVAL_RANGE_REG); 484 start = range_end; 485 } 486 } 487 488 static void aurora_clean_range(unsigned long start, unsigned long end) 489 { 490 /* 491 * If L2 is forced to WT, the L2 will always be clean and we 492 * don't need to do anything here. 493 */ 494 if (!l2_wt_override) { 495 start &= ~(CACHE_LINE_SIZE - 1); 496 end = ALIGN(end, CACHE_LINE_SIZE); 497 while (start != end) { 498 unsigned long range_end = calc_range_end(start, end); 499 aurora_pa_range(start, range_end - CACHE_LINE_SIZE, 500 AURORA_CLEAN_RANGE_REG); 501 start = range_end; 502 } 503 } 504 } 505 506 static void aurora_flush_range(unsigned long start, unsigned long end) 507 { 508 start &= ~(CACHE_LINE_SIZE - 1); 509 end = ALIGN(end, CACHE_LINE_SIZE); 510 while (start != end) { 511 unsigned long range_end = calc_range_end(start, end); 512 /* 513 * If L2 is forced to WT, the L2 will always be clean and we 514 * just need to invalidate. 515 */ 516 if (l2_wt_override) 517 aurora_pa_range(start, range_end - CACHE_LINE_SIZE, 518 AURORA_INVAL_RANGE_REG); 519 else 520 aurora_pa_range(start, range_end - CACHE_LINE_SIZE, 521 AURORA_FLUSH_RANGE_REG); 522 start = range_end; 523 } 524 } 525 526 /* 527 * For certain Broadcom SoCs, depending on the address range, different offsets 528 * need to be added to the address before passing it to L2 for 529 * invalidation/clean/flush 530 * 531 * Section Address Range Offset EMI 532 * 1 0x00000000 - 0x3FFFFFFF 0x80000000 VC 533 * 2 0x40000000 - 0xBFFFFFFF 0x40000000 SYS 534 * 3 0xC0000000 - 0xFFFFFFFF 0x80000000 VC 535 * 536 * When the start and end addresses have crossed two different sections, we 537 * need to break the L2 operation into two, each within its own section. 538 * For example, if we need to invalidate addresses starts at 0xBFFF0000 and 539 * ends at 0xC0001000, we need do invalidate 1) 0xBFFF0000 - 0xBFFFFFFF and 2) 540 * 0xC0000000 - 0xC0001000 541 * 542 * Note 1: 543 * By breaking a single L2 operation into two, we may potentially suffer some 544 * performance hit, but keep in mind the cross section case is very rare 545 * 546 * Note 2: 547 * We do not need to handle the case when the start address is in 548 * Section 1 and the end address is in Section 3, since it is not a valid use 549 * case 550 * 551 * Note 3: 552 * Section 1 in practical terms can no longer be used on rev A2. Because of 553 * that the code does not need to handle section 1 at all. 554 * 555 */ 556 #define BCM_SYS_EMI_START_ADDR 0x40000000UL 557 #define BCM_VC_EMI_SEC3_START_ADDR 0xC0000000UL 558 559 #define BCM_SYS_EMI_OFFSET 0x40000000UL 560 #define BCM_VC_EMI_OFFSET 0x80000000UL 561 562 static inline int bcm_addr_is_sys_emi(unsigned long addr) 563 { 564 return (addr >= BCM_SYS_EMI_START_ADDR) && 565 (addr < BCM_VC_EMI_SEC3_START_ADDR); 566 } 567 568 static inline unsigned long bcm_l2_phys_addr(unsigned long addr) 569 { 570 if (bcm_addr_is_sys_emi(addr)) 571 return addr + BCM_SYS_EMI_OFFSET; 572 else 573 return addr + BCM_VC_EMI_OFFSET; 574 } 575 576 static void bcm_inv_range(unsigned long start, unsigned long end) 577 { 578 unsigned long new_start, new_end; 579 580 BUG_ON(start < BCM_SYS_EMI_START_ADDR); 581 582 if (unlikely(end <= start)) 583 return; 584 585 new_start = bcm_l2_phys_addr(start); 586 new_end = bcm_l2_phys_addr(end); 587 588 /* normal case, no cross section between start and end */ 589 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) { 590 l2x0_inv_range(new_start, new_end); 591 return; 592 } 593 594 /* They cross sections, so it can only be a cross from section 595 * 2 to section 3 596 */ 597 l2x0_inv_range(new_start, 598 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1)); 599 l2x0_inv_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR), 600 new_end); 601 } 602 603 static void bcm_clean_range(unsigned long start, unsigned long end) 604 { 605 unsigned long new_start, new_end; 606 607 BUG_ON(start < BCM_SYS_EMI_START_ADDR); 608 609 if (unlikely(end <= start)) 610 return; 611 612 if ((end - start) >= l2x0_size) { 613 l2x0_clean_all(); 614 return; 615 } 616 617 new_start = bcm_l2_phys_addr(start); 618 new_end = bcm_l2_phys_addr(end); 619 620 /* normal case, no cross section between start and end */ 621 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) { 622 l2x0_clean_range(new_start, new_end); 623 return; 624 } 625 626 /* They cross sections, so it can only be a cross from section 627 * 2 to section 3 628 */ 629 l2x0_clean_range(new_start, 630 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1)); 631 l2x0_clean_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR), 632 new_end); 633 } 634 635 static void bcm_flush_range(unsigned long start, unsigned long end) 636 { 637 unsigned long new_start, new_end; 638 639 BUG_ON(start < BCM_SYS_EMI_START_ADDR); 640 641 if (unlikely(end <= start)) 642 return; 643 644 if ((end - start) >= l2x0_size) { 645 l2x0_flush_all(); 646 return; 647 } 648 649 new_start = bcm_l2_phys_addr(start); 650 new_end = bcm_l2_phys_addr(end); 651 652 /* normal case, no cross section between start and end */ 653 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) { 654 l2x0_flush_range(new_start, new_end); 655 return; 656 } 657 658 /* They cross sections, so it can only be a cross from section 659 * 2 to section 3 660 */ 661 l2x0_flush_range(new_start, 662 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1)); 663 l2x0_flush_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR), 664 new_end); 665 } 666 667 static void __init l2x0_of_setup(const struct device_node *np, 668 u32 *aux_val, u32 *aux_mask) 669 { 670 u32 data[2] = { 0, 0 }; 671 u32 tag = 0; 672 u32 dirty = 0; 673 u32 val = 0, mask = 0; 674 675 of_property_read_u32(np, "arm,tag-latency", &tag); 676 if (tag) { 677 mask |= L2X0_AUX_CTRL_TAG_LATENCY_MASK; 678 val |= (tag - 1) << L2X0_AUX_CTRL_TAG_LATENCY_SHIFT; 679 } 680 681 of_property_read_u32_array(np, "arm,data-latency", 682 data, ARRAY_SIZE(data)); 683 if (data[0] && data[1]) { 684 mask |= L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK | 685 L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK; 686 val |= ((data[0] - 1) << L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT) | 687 ((data[1] - 1) << L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT); 688 } 689 690 of_property_read_u32(np, "arm,dirty-latency", &dirty); 691 if (dirty) { 692 mask |= L2X0_AUX_CTRL_DIRTY_LATENCY_MASK; 693 val |= (dirty - 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT; 694 } 695 696 *aux_val &= ~mask; 697 *aux_val |= val; 698 *aux_mask &= ~mask; 699 } 700 701 static void __init pl310_of_setup(const struct device_node *np, 702 u32 *aux_val, u32 *aux_mask) 703 { 704 u32 data[3] = { 0, 0, 0 }; 705 u32 tag[3] = { 0, 0, 0 }; 706 u32 filter[2] = { 0, 0 }; 707 708 of_property_read_u32_array(np, "arm,tag-latency", tag, ARRAY_SIZE(tag)); 709 if (tag[0] && tag[1] && tag[2]) 710 writel_relaxed( 711 ((tag[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) | 712 ((tag[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) | 713 ((tag[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT), 714 l2x0_base + L2X0_TAG_LATENCY_CTRL); 715 716 of_property_read_u32_array(np, "arm,data-latency", 717 data, ARRAY_SIZE(data)); 718 if (data[0] && data[1] && data[2]) 719 writel_relaxed( 720 ((data[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) | 721 ((data[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) | 722 ((data[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT), 723 l2x0_base + L2X0_DATA_LATENCY_CTRL); 724 725 of_property_read_u32_array(np, "arm,filter-ranges", 726 filter, ARRAY_SIZE(filter)); 727 if (filter[1]) { 728 writel_relaxed(ALIGN(filter[0] + filter[1], SZ_1M), 729 l2x0_base + L2X0_ADDR_FILTER_END); 730 writel_relaxed((filter[0] & ~(SZ_1M - 1)) | L2X0_ADDR_FILTER_EN, 731 l2x0_base + L2X0_ADDR_FILTER_START); 732 } 733 } 734 735 static void __init pl310_save(void) 736 { 737 u32 l2x0_revision = readl_relaxed(l2x0_base + L2X0_CACHE_ID) & 738 L2X0_CACHE_ID_RTL_MASK; 739 740 l2x0_saved_regs.tag_latency = readl_relaxed(l2x0_base + 741 L2X0_TAG_LATENCY_CTRL); 742 l2x0_saved_regs.data_latency = readl_relaxed(l2x0_base + 743 L2X0_DATA_LATENCY_CTRL); 744 l2x0_saved_regs.filter_end = readl_relaxed(l2x0_base + 745 L2X0_ADDR_FILTER_END); 746 l2x0_saved_regs.filter_start = readl_relaxed(l2x0_base + 747 L2X0_ADDR_FILTER_START); 748 749 if (l2x0_revision >= L2X0_CACHE_ID_RTL_R2P0) { 750 /* 751 * From r2p0, there is Prefetch offset/control register 752 */ 753 l2x0_saved_regs.prefetch_ctrl = readl_relaxed(l2x0_base + 754 L2X0_PREFETCH_CTRL); 755 /* 756 * From r3p0, there is Power control register 757 */ 758 if (l2x0_revision >= L2X0_CACHE_ID_RTL_R3P0) 759 l2x0_saved_regs.pwr_ctrl = readl_relaxed(l2x0_base + 760 L2X0_POWER_CTRL); 761 } 762 } 763 764 static void aurora_save(void) 765 { 766 l2x0_saved_regs.ctrl = readl_relaxed(l2x0_base + L2X0_CTRL); 767 l2x0_saved_regs.aux_ctrl = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); 768 } 769 770 static void __init tauros3_save(void) 771 { 772 l2x0_saved_regs.aux2_ctrl = 773 readl_relaxed(l2x0_base + TAUROS3_AUX2_CTRL); 774 l2x0_saved_regs.prefetch_ctrl = 775 readl_relaxed(l2x0_base + L2X0_PREFETCH_CTRL); 776 } 777 778 static void l2x0_resume(void) 779 { 780 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) { 781 /* restore aux ctrl and enable l2 */ 782 l2x0_unlock(readl_relaxed(l2x0_base + L2X0_CACHE_ID)); 783 784 writel_relaxed(l2x0_saved_regs.aux_ctrl, l2x0_base + 785 L2X0_AUX_CTRL); 786 787 l2x0_inv_all(); 788 789 writel_relaxed(L2X0_CTRL_EN, l2x0_base + L2X0_CTRL); 790 } 791 } 792 793 static void pl310_resume(void) 794 { 795 u32 l2x0_revision; 796 797 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) { 798 /* restore pl310 setup */ 799 writel_relaxed(l2x0_saved_regs.tag_latency, 800 l2x0_base + L2X0_TAG_LATENCY_CTRL); 801 writel_relaxed(l2x0_saved_regs.data_latency, 802 l2x0_base + L2X0_DATA_LATENCY_CTRL); 803 writel_relaxed(l2x0_saved_regs.filter_end, 804 l2x0_base + L2X0_ADDR_FILTER_END); 805 writel_relaxed(l2x0_saved_regs.filter_start, 806 l2x0_base + L2X0_ADDR_FILTER_START); 807 808 l2x0_revision = readl_relaxed(l2x0_base + L2X0_CACHE_ID) & 809 L2X0_CACHE_ID_RTL_MASK; 810 811 if (l2x0_revision >= L2X0_CACHE_ID_RTL_R2P0) { 812 writel_relaxed(l2x0_saved_regs.prefetch_ctrl, 813 l2x0_base + L2X0_PREFETCH_CTRL); 814 if (l2x0_revision >= L2X0_CACHE_ID_RTL_R3P0) 815 writel_relaxed(l2x0_saved_regs.pwr_ctrl, 816 l2x0_base + L2X0_POWER_CTRL); 817 } 818 } 819 820 l2x0_resume(); 821 } 822 823 static void aurora_resume(void) 824 { 825 if (!(readl(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) { 826 writel_relaxed(l2x0_saved_regs.aux_ctrl, 827 l2x0_base + L2X0_AUX_CTRL); 828 writel_relaxed(l2x0_saved_regs.ctrl, l2x0_base + L2X0_CTRL); 829 } 830 } 831 832 static void tauros3_resume(void) 833 { 834 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) { 835 writel_relaxed(l2x0_saved_regs.aux2_ctrl, 836 l2x0_base + TAUROS3_AUX2_CTRL); 837 writel_relaxed(l2x0_saved_regs.prefetch_ctrl, 838 l2x0_base + L2X0_PREFETCH_CTRL); 839 } 840 841 l2x0_resume(); 842 } 843 844 static void __init aurora_broadcast_l2_commands(void) 845 { 846 __u32 u; 847 /* Enable Broadcasting of cache commands to L2*/ 848 __asm__ __volatile__("mrc p15, 1, %0, c15, c2, 0" : "=r"(u)); 849 u |= AURORA_CTRL_FW; /* Set the FW bit */ 850 __asm__ __volatile__("mcr p15, 1, %0, c15, c2, 0\n" : : "r"(u)); 851 isb(); 852 } 853 854 static void __init aurora_of_setup(const struct device_node *np, 855 u32 *aux_val, u32 *aux_mask) 856 { 857 u32 val = AURORA_ACR_REPLACEMENT_TYPE_SEMIPLRU; 858 u32 mask = AURORA_ACR_REPLACEMENT_MASK; 859 860 of_property_read_u32(np, "cache-id-part", 861 &cache_id_part_number_from_dt); 862 863 /* Determine and save the write policy */ 864 l2_wt_override = of_property_read_bool(np, "wt-override"); 865 866 if (l2_wt_override) { 867 val |= AURORA_ACR_FORCE_WRITE_THRO_POLICY; 868 mask |= AURORA_ACR_FORCE_WRITE_POLICY_MASK; 869 } 870 871 *aux_val &= ~mask; 872 *aux_val |= val; 873 *aux_mask &= ~mask; 874 } 875 876 static const struct l2x0_of_data pl310_data = { 877 .setup = pl310_of_setup, 878 .save = pl310_save, 879 .outer_cache = { 880 .resume = pl310_resume, 881 .inv_range = l2x0_inv_range, 882 .clean_range = l2x0_clean_range, 883 .flush_range = l2x0_flush_range, 884 .sync = l2x0_cache_sync, 885 .flush_all = l2x0_flush_all, 886 .disable = l2x0_disable, 887 }, 888 }; 889 890 static const struct l2x0_of_data l2x0_data = { 891 .setup = l2x0_of_setup, 892 .save = NULL, 893 .outer_cache = { 894 .resume = l2x0_resume, 895 .inv_range = l2x0_inv_range, 896 .clean_range = l2x0_clean_range, 897 .flush_range = l2x0_flush_range, 898 .sync = l2x0_cache_sync, 899 .flush_all = l2x0_flush_all, 900 .disable = l2x0_disable, 901 }, 902 }; 903 904 static const struct l2x0_of_data aurora_with_outer_data = { 905 .setup = aurora_of_setup, 906 .save = aurora_save, 907 .outer_cache = { 908 .resume = aurora_resume, 909 .inv_range = aurora_inv_range, 910 .clean_range = aurora_clean_range, 911 .flush_range = aurora_flush_range, 912 .sync = l2x0_cache_sync, 913 .flush_all = l2x0_flush_all, 914 .disable = l2x0_disable, 915 }, 916 }; 917 918 static const struct l2x0_of_data aurora_no_outer_data = { 919 .setup = aurora_of_setup, 920 .save = aurora_save, 921 .outer_cache = { 922 .resume = aurora_resume, 923 }, 924 }; 925 926 static const struct l2x0_of_data tauros3_data = { 927 .setup = NULL, 928 .save = tauros3_save, 929 /* Tauros3 broadcasts L1 cache operations to L2 */ 930 .outer_cache = { 931 .resume = tauros3_resume, 932 }, 933 }; 934 935 static const struct l2x0_of_data bcm_l2x0_data = { 936 .setup = pl310_of_setup, 937 .save = pl310_save, 938 .outer_cache = { 939 .resume = pl310_resume, 940 .inv_range = bcm_inv_range, 941 .clean_range = bcm_clean_range, 942 .flush_range = bcm_flush_range, 943 .sync = l2x0_cache_sync, 944 .flush_all = l2x0_flush_all, 945 .disable = l2x0_disable, 946 }, 947 }; 948 949 #define L2C_ID(name, fns) { .compatible = name, .data = (void *)&fns } 950 static const struct of_device_id l2x0_ids[] __initconst = { 951 L2C_ID("arm,l210-cache", l2x0_data), 952 L2C_ID("arm,l220-cache", l2x0_data), 953 L2C_ID("arm,pl310-cache", pl310_data), 954 L2C_ID("brcm,bcm11351-a2-pl310-cache", bcm_l2x0_data), 955 L2C_ID("marvell,aurora-outer-cache", aurora_with_outer_data), 956 L2C_ID("marvell,aurora-system-cache", aurora_no_outer_data), 957 L2C_ID("marvell,tauros3-cache", tauros3_data), 958 /* Deprecated IDs */ 959 L2C_ID("bcm,bcm11351-a2-pl310-cache", bcm_l2x0_data), 960 {} 961 }; 962 963 int __init l2x0_of_init(u32 aux_val, u32 aux_mask) 964 { 965 struct device_node *np; 966 const struct l2x0_of_data *data; 967 struct resource res; 968 969 np = of_find_matching_node(NULL, l2x0_ids); 970 if (!np) 971 return -ENODEV; 972 973 if (of_address_to_resource(np, 0, &res)) 974 return -ENODEV; 975 976 l2x0_base = ioremap(res.start, resource_size(&res)); 977 if (!l2x0_base) 978 return -ENOMEM; 979 980 l2x0_saved_regs.phy_base = res.start; 981 982 data = of_match_node(l2x0_ids, np)->data; 983 984 /* L2 configuration can only be changed if the cache is disabled */ 985 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) { 986 if (data->setup) 987 data->setup(np, &aux_val, &aux_mask); 988 989 /* For aurora cache in no outer mode select the 990 * correct mode using the coprocessor*/ 991 if (data == &aurora_no_outer_data) 992 aurora_broadcast_l2_commands(); 993 } 994 995 if (data->save) 996 data->save(); 997 998 of_init = true; 999 memcpy(&outer_cache, &data->outer_cache, sizeof(outer_cache)); 1000 l2x0_init(l2x0_base, aux_val, aux_mask); 1001 1002 return 0; 1003 } 1004 #endif 1005