1 /* 2 * arch/arm/mm/cache-l2x0.c - L210/L220 cache controller support 3 * 4 * Copyright (C) 2007 ARM Limited 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software 17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18 */ 19 #include <linux/err.h> 20 #include <linux/init.h> 21 #include <linux/spinlock.h> 22 #include <linux/io.h> 23 #include <linux/of.h> 24 #include <linux/of_address.h> 25 26 #include <asm/cacheflush.h> 27 #include <asm/hardware/cache-l2x0.h> 28 #include "cache-tauros3.h" 29 #include "cache-aurora-l2.h" 30 31 struct l2c_init_data { 32 void (*of_parse)(const struct device_node *, u32 *, u32 *); 33 void (*save)(void); 34 struct outer_cache_fns outer_cache; 35 }; 36 37 #define CACHE_LINE_SIZE 32 38 39 static void __iomem *l2x0_base; 40 static DEFINE_RAW_SPINLOCK(l2x0_lock); 41 static u32 l2x0_way_mask; /* Bitmask of active ways */ 42 static u32 l2x0_size; 43 static unsigned long sync_reg_offset = L2X0_CACHE_SYNC; 44 45 /* Aurora don't have the cache ID register available, so we have to 46 * pass it though the device tree */ 47 static u32 cache_id_part_number_from_dt; 48 49 struct l2x0_regs l2x0_saved_regs; 50 51 static bool of_init = false; 52 53 /* 54 * Common code for all cache controllers. 55 */ 56 static inline void cache_wait_way(void __iomem *reg, unsigned long mask) 57 { 58 /* wait for cache operation by line or way to complete */ 59 while (readl_relaxed(reg) & mask) 60 cpu_relax(); 61 } 62 63 /* 64 * This should only be called when we have a requirement that the 65 * register be written due to a work-around, as platforms running 66 * in non-secure mode may not be able to access this register. 67 */ 68 static inline void l2c_set_debug(void __iomem *base, unsigned long val) 69 { 70 outer_cache.set_debug(val); 71 } 72 73 static void __l2c_op_way(void __iomem *reg) 74 { 75 writel_relaxed(l2x0_way_mask, reg); 76 cache_wait_way(reg, l2x0_way_mask); 77 } 78 79 static inline void l2c_unlock(void __iomem *base, unsigned num) 80 { 81 unsigned i; 82 83 for (i = 0; i < num; i++) { 84 writel_relaxed(0, base + L2X0_LOCKDOWN_WAY_D_BASE + 85 i * L2X0_LOCKDOWN_STRIDE); 86 writel_relaxed(0, base + L2X0_LOCKDOWN_WAY_I_BASE + 87 i * L2X0_LOCKDOWN_STRIDE); 88 } 89 } 90 91 #ifdef CONFIG_CACHE_PL310 92 static inline void cache_wait(void __iomem *reg, unsigned long mask) 93 { 94 /* cache operations by line are atomic on PL310 */ 95 } 96 #else 97 #define cache_wait cache_wait_way 98 #endif 99 100 static inline void cache_sync(void) 101 { 102 void __iomem *base = l2x0_base; 103 104 writel_relaxed(0, base + sync_reg_offset); 105 cache_wait(base + L2X0_CACHE_SYNC, 1); 106 } 107 108 static inline void l2x0_clean_line(unsigned long addr) 109 { 110 void __iomem *base = l2x0_base; 111 cache_wait(base + L2X0_CLEAN_LINE_PA, 1); 112 writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA); 113 } 114 115 static inline void l2x0_inv_line(unsigned long addr) 116 { 117 void __iomem *base = l2x0_base; 118 cache_wait(base + L2X0_INV_LINE_PA, 1); 119 writel_relaxed(addr, base + L2X0_INV_LINE_PA); 120 } 121 122 #if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915) 123 static inline void debug_writel(unsigned long val) 124 { 125 if (outer_cache.set_debug) 126 l2c_set_debug(l2x0_base, val); 127 } 128 129 static void pl310_set_debug(unsigned long val) 130 { 131 writel_relaxed(val, l2x0_base + L2X0_DEBUG_CTRL); 132 } 133 #else 134 /* Optimised out for non-errata case */ 135 static inline void debug_writel(unsigned long val) 136 { 137 } 138 139 #define pl310_set_debug NULL 140 #endif 141 142 #ifdef CONFIG_PL310_ERRATA_588369 143 static inline void l2x0_flush_line(unsigned long addr) 144 { 145 void __iomem *base = l2x0_base; 146 147 /* Clean by PA followed by Invalidate by PA */ 148 cache_wait(base + L2X0_CLEAN_LINE_PA, 1); 149 writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA); 150 cache_wait(base + L2X0_INV_LINE_PA, 1); 151 writel_relaxed(addr, base + L2X0_INV_LINE_PA); 152 } 153 #else 154 155 static inline void l2x0_flush_line(unsigned long addr) 156 { 157 void __iomem *base = l2x0_base; 158 cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1); 159 writel_relaxed(addr, base + L2X0_CLEAN_INV_LINE_PA); 160 } 161 #endif 162 163 static void l2x0_cache_sync(void) 164 { 165 unsigned long flags; 166 167 raw_spin_lock_irqsave(&l2x0_lock, flags); 168 cache_sync(); 169 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 170 } 171 172 static void __l2x0_flush_all(void) 173 { 174 debug_writel(0x03); 175 __l2c_op_way(l2x0_base + L2X0_CLEAN_INV_WAY); 176 cache_sync(); 177 debug_writel(0x00); 178 } 179 180 static void l2x0_flush_all(void) 181 { 182 unsigned long flags; 183 184 /* clean all ways */ 185 raw_spin_lock_irqsave(&l2x0_lock, flags); 186 __l2x0_flush_all(); 187 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 188 } 189 190 static void l2x0_clean_all(void) 191 { 192 unsigned long flags; 193 194 /* clean all ways */ 195 raw_spin_lock_irqsave(&l2x0_lock, flags); 196 __l2c_op_way(l2x0_base + L2X0_CLEAN_WAY); 197 cache_sync(); 198 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 199 } 200 201 static void l2x0_inv_all(void) 202 { 203 unsigned long flags; 204 205 /* invalidate all ways */ 206 raw_spin_lock_irqsave(&l2x0_lock, flags); 207 /* Invalidating when L2 is enabled is a nono */ 208 BUG_ON(readl(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN); 209 __l2c_op_way(l2x0_base + L2X0_INV_WAY); 210 cache_sync(); 211 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 212 } 213 214 static void l2x0_inv_range(unsigned long start, unsigned long end) 215 { 216 void __iomem *base = l2x0_base; 217 unsigned long flags; 218 219 raw_spin_lock_irqsave(&l2x0_lock, flags); 220 if (start & (CACHE_LINE_SIZE - 1)) { 221 start &= ~(CACHE_LINE_SIZE - 1); 222 debug_writel(0x03); 223 l2x0_flush_line(start); 224 debug_writel(0x00); 225 start += CACHE_LINE_SIZE; 226 } 227 228 if (end & (CACHE_LINE_SIZE - 1)) { 229 end &= ~(CACHE_LINE_SIZE - 1); 230 debug_writel(0x03); 231 l2x0_flush_line(end); 232 debug_writel(0x00); 233 } 234 235 while (start < end) { 236 unsigned long blk_end = start + min(end - start, 4096UL); 237 238 while (start < blk_end) { 239 l2x0_inv_line(start); 240 start += CACHE_LINE_SIZE; 241 } 242 243 if (blk_end < end) { 244 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 245 raw_spin_lock_irqsave(&l2x0_lock, flags); 246 } 247 } 248 cache_wait(base + L2X0_INV_LINE_PA, 1); 249 cache_sync(); 250 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 251 } 252 253 static void l2x0_clean_range(unsigned long start, unsigned long end) 254 { 255 void __iomem *base = l2x0_base; 256 unsigned long flags; 257 258 if ((end - start) >= l2x0_size) { 259 l2x0_clean_all(); 260 return; 261 } 262 263 raw_spin_lock_irqsave(&l2x0_lock, flags); 264 start &= ~(CACHE_LINE_SIZE - 1); 265 while (start < end) { 266 unsigned long blk_end = start + min(end - start, 4096UL); 267 268 while (start < blk_end) { 269 l2x0_clean_line(start); 270 start += CACHE_LINE_SIZE; 271 } 272 273 if (blk_end < end) { 274 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 275 raw_spin_lock_irqsave(&l2x0_lock, flags); 276 } 277 } 278 cache_wait(base + L2X0_CLEAN_LINE_PA, 1); 279 cache_sync(); 280 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 281 } 282 283 static void l2x0_flush_range(unsigned long start, unsigned long end) 284 { 285 void __iomem *base = l2x0_base; 286 unsigned long flags; 287 288 if ((end - start) >= l2x0_size) { 289 l2x0_flush_all(); 290 return; 291 } 292 293 raw_spin_lock_irqsave(&l2x0_lock, flags); 294 start &= ~(CACHE_LINE_SIZE - 1); 295 while (start < end) { 296 unsigned long blk_end = start + min(end - start, 4096UL); 297 298 debug_writel(0x03); 299 while (start < blk_end) { 300 l2x0_flush_line(start); 301 start += CACHE_LINE_SIZE; 302 } 303 debug_writel(0x00); 304 305 if (blk_end < end) { 306 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 307 raw_spin_lock_irqsave(&l2x0_lock, flags); 308 } 309 } 310 cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1); 311 cache_sync(); 312 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 313 } 314 315 static void l2x0_disable(void) 316 { 317 unsigned long flags; 318 319 raw_spin_lock_irqsave(&l2x0_lock, flags); 320 __l2x0_flush_all(); 321 writel_relaxed(0, l2x0_base + L2X0_CTRL); 322 dsb(st); 323 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 324 } 325 326 static void l2x0_unlock(u32 cache_id) 327 { 328 int lockregs; 329 330 switch (cache_id & L2X0_CACHE_ID_PART_MASK) { 331 case L2X0_CACHE_ID_PART_L310: 332 lockregs = 8; 333 break; 334 case AURORA_CACHE_ID: 335 lockregs = 4; 336 break; 337 default: 338 /* L210 and unknown types */ 339 lockregs = 1; 340 break; 341 } 342 343 l2c_unlock(l2x0_base, lockregs); 344 } 345 346 void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask) 347 { 348 u32 aux; 349 u32 cache_id; 350 u32 way_size = 0; 351 int ways; 352 int way_size_shift = L2X0_WAY_SIZE_SHIFT; 353 const char *type; 354 355 l2x0_base = base; 356 if (cache_id_part_number_from_dt) 357 cache_id = cache_id_part_number_from_dt; 358 else 359 cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID); 360 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); 361 362 aux &= aux_mask; 363 aux |= aux_val; 364 365 /* Determine the number of ways */ 366 switch (cache_id & L2X0_CACHE_ID_PART_MASK) { 367 case L2X0_CACHE_ID_PART_L310: 368 if (aux & (1 << 16)) 369 ways = 16; 370 else 371 ways = 8; 372 type = "L310"; 373 #ifdef CONFIG_PL310_ERRATA_753970 374 /* Unmapped register. */ 375 sync_reg_offset = L2X0_DUMMY_REG; 376 #endif 377 if ((cache_id & L2X0_CACHE_ID_RTL_MASK) <= L2X0_CACHE_ID_RTL_R3P0) 378 outer_cache.set_debug = pl310_set_debug; 379 break; 380 case L2X0_CACHE_ID_PART_L210: 381 ways = (aux >> 13) & 0xf; 382 type = "L210"; 383 break; 384 385 case AURORA_CACHE_ID: 386 sync_reg_offset = AURORA_SYNC_REG; 387 ways = (aux >> 13) & 0xf; 388 ways = 2 << ((ways + 1) >> 2); 389 way_size_shift = AURORA_WAY_SIZE_SHIFT; 390 type = "Aurora"; 391 break; 392 default: 393 /* Assume unknown chips have 8 ways */ 394 ways = 8; 395 type = "L2x0 series"; 396 break; 397 } 398 399 l2x0_way_mask = (1 << ways) - 1; 400 401 /* 402 * L2 cache Size = Way size * Number of ways 403 */ 404 way_size = (aux & L2X0_AUX_CTRL_WAY_SIZE_MASK) >> 17; 405 way_size = 1 << (way_size + way_size_shift); 406 407 l2x0_size = ways * way_size * SZ_1K; 408 409 /* 410 * Check if l2x0 controller is already enabled. 411 * If you are booting from non-secure mode 412 * accessing the below registers will fault. 413 */ 414 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) { 415 /* Make sure that I&D is not locked down when starting */ 416 l2x0_unlock(cache_id); 417 418 /* l2x0 controller is disabled */ 419 writel_relaxed(aux, l2x0_base + L2X0_AUX_CTRL); 420 421 l2x0_inv_all(); 422 423 /* enable L2X0 */ 424 writel_relaxed(L2X0_CTRL_EN, l2x0_base + L2X0_CTRL); 425 } 426 427 /* Re-read it in case some bits are reserved. */ 428 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); 429 430 /* Save the value for resuming. */ 431 l2x0_saved_regs.aux_ctrl = aux; 432 433 if (!of_init) { 434 outer_cache.inv_range = l2x0_inv_range; 435 outer_cache.clean_range = l2x0_clean_range; 436 outer_cache.flush_range = l2x0_flush_range; 437 outer_cache.sync = l2x0_cache_sync; 438 outer_cache.flush_all = l2x0_flush_all; 439 outer_cache.disable = l2x0_disable; 440 } 441 442 pr_info("%s cache controller enabled\n", type); 443 pr_info("l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d kB\n", 444 ways, cache_id, aux, l2x0_size >> 10); 445 } 446 447 #ifdef CONFIG_OF 448 static int l2_wt_override; 449 450 /* 451 * Note that the end addresses passed to Linux primitives are 452 * noninclusive, while the hardware cache range operations use 453 * inclusive start and end addresses. 454 */ 455 static unsigned long calc_range_end(unsigned long start, unsigned long end) 456 { 457 /* 458 * Limit the number of cache lines processed at once, 459 * since cache range operations stall the CPU pipeline 460 * until completion. 461 */ 462 if (end > start + MAX_RANGE_SIZE) 463 end = start + MAX_RANGE_SIZE; 464 465 /* 466 * Cache range operations can't straddle a page boundary. 467 */ 468 if (end > PAGE_ALIGN(start+1)) 469 end = PAGE_ALIGN(start+1); 470 471 return end; 472 } 473 474 /* 475 * Make sure 'start' and 'end' reference the same page, as L2 is PIPT 476 * and range operations only do a TLB lookup on the start address. 477 */ 478 static void aurora_pa_range(unsigned long start, unsigned long end, 479 unsigned long offset) 480 { 481 unsigned long flags; 482 483 raw_spin_lock_irqsave(&l2x0_lock, flags); 484 writel_relaxed(start, l2x0_base + AURORA_RANGE_BASE_ADDR_REG); 485 writel_relaxed(end, l2x0_base + offset); 486 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 487 488 cache_sync(); 489 } 490 491 static void aurora_inv_range(unsigned long start, unsigned long end) 492 { 493 /* 494 * round start and end adresses up to cache line size 495 */ 496 start &= ~(CACHE_LINE_SIZE - 1); 497 end = ALIGN(end, CACHE_LINE_SIZE); 498 499 /* 500 * Invalidate all full cache lines between 'start' and 'end'. 501 */ 502 while (start < end) { 503 unsigned long range_end = calc_range_end(start, end); 504 aurora_pa_range(start, range_end - CACHE_LINE_SIZE, 505 AURORA_INVAL_RANGE_REG); 506 start = range_end; 507 } 508 } 509 510 static void aurora_clean_range(unsigned long start, unsigned long end) 511 { 512 /* 513 * If L2 is forced to WT, the L2 will always be clean and we 514 * don't need to do anything here. 515 */ 516 if (!l2_wt_override) { 517 start &= ~(CACHE_LINE_SIZE - 1); 518 end = ALIGN(end, CACHE_LINE_SIZE); 519 while (start != end) { 520 unsigned long range_end = calc_range_end(start, end); 521 aurora_pa_range(start, range_end - CACHE_LINE_SIZE, 522 AURORA_CLEAN_RANGE_REG); 523 start = range_end; 524 } 525 } 526 } 527 528 static void aurora_flush_range(unsigned long start, unsigned long end) 529 { 530 start &= ~(CACHE_LINE_SIZE - 1); 531 end = ALIGN(end, CACHE_LINE_SIZE); 532 while (start != end) { 533 unsigned long range_end = calc_range_end(start, end); 534 /* 535 * If L2 is forced to WT, the L2 will always be clean and we 536 * just need to invalidate. 537 */ 538 if (l2_wt_override) 539 aurora_pa_range(start, range_end - CACHE_LINE_SIZE, 540 AURORA_INVAL_RANGE_REG); 541 else 542 aurora_pa_range(start, range_end - CACHE_LINE_SIZE, 543 AURORA_FLUSH_RANGE_REG); 544 start = range_end; 545 } 546 } 547 548 /* 549 * For certain Broadcom SoCs, depending on the address range, different offsets 550 * need to be added to the address before passing it to L2 for 551 * invalidation/clean/flush 552 * 553 * Section Address Range Offset EMI 554 * 1 0x00000000 - 0x3FFFFFFF 0x80000000 VC 555 * 2 0x40000000 - 0xBFFFFFFF 0x40000000 SYS 556 * 3 0xC0000000 - 0xFFFFFFFF 0x80000000 VC 557 * 558 * When the start and end addresses have crossed two different sections, we 559 * need to break the L2 operation into two, each within its own section. 560 * For example, if we need to invalidate addresses starts at 0xBFFF0000 and 561 * ends at 0xC0001000, we need do invalidate 1) 0xBFFF0000 - 0xBFFFFFFF and 2) 562 * 0xC0000000 - 0xC0001000 563 * 564 * Note 1: 565 * By breaking a single L2 operation into two, we may potentially suffer some 566 * performance hit, but keep in mind the cross section case is very rare 567 * 568 * Note 2: 569 * We do not need to handle the case when the start address is in 570 * Section 1 and the end address is in Section 3, since it is not a valid use 571 * case 572 * 573 * Note 3: 574 * Section 1 in practical terms can no longer be used on rev A2. Because of 575 * that the code does not need to handle section 1 at all. 576 * 577 */ 578 #define BCM_SYS_EMI_START_ADDR 0x40000000UL 579 #define BCM_VC_EMI_SEC3_START_ADDR 0xC0000000UL 580 581 #define BCM_SYS_EMI_OFFSET 0x40000000UL 582 #define BCM_VC_EMI_OFFSET 0x80000000UL 583 584 static inline int bcm_addr_is_sys_emi(unsigned long addr) 585 { 586 return (addr >= BCM_SYS_EMI_START_ADDR) && 587 (addr < BCM_VC_EMI_SEC3_START_ADDR); 588 } 589 590 static inline unsigned long bcm_l2_phys_addr(unsigned long addr) 591 { 592 if (bcm_addr_is_sys_emi(addr)) 593 return addr + BCM_SYS_EMI_OFFSET; 594 else 595 return addr + BCM_VC_EMI_OFFSET; 596 } 597 598 static void bcm_inv_range(unsigned long start, unsigned long end) 599 { 600 unsigned long new_start, new_end; 601 602 BUG_ON(start < BCM_SYS_EMI_START_ADDR); 603 604 if (unlikely(end <= start)) 605 return; 606 607 new_start = bcm_l2_phys_addr(start); 608 new_end = bcm_l2_phys_addr(end); 609 610 /* normal case, no cross section between start and end */ 611 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) { 612 l2x0_inv_range(new_start, new_end); 613 return; 614 } 615 616 /* They cross sections, so it can only be a cross from section 617 * 2 to section 3 618 */ 619 l2x0_inv_range(new_start, 620 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1)); 621 l2x0_inv_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR), 622 new_end); 623 } 624 625 static void bcm_clean_range(unsigned long start, unsigned long end) 626 { 627 unsigned long new_start, new_end; 628 629 BUG_ON(start < BCM_SYS_EMI_START_ADDR); 630 631 if (unlikely(end <= start)) 632 return; 633 634 if ((end - start) >= l2x0_size) { 635 l2x0_clean_all(); 636 return; 637 } 638 639 new_start = bcm_l2_phys_addr(start); 640 new_end = bcm_l2_phys_addr(end); 641 642 /* normal case, no cross section between start and end */ 643 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) { 644 l2x0_clean_range(new_start, new_end); 645 return; 646 } 647 648 /* They cross sections, so it can only be a cross from section 649 * 2 to section 3 650 */ 651 l2x0_clean_range(new_start, 652 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1)); 653 l2x0_clean_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR), 654 new_end); 655 } 656 657 static void bcm_flush_range(unsigned long start, unsigned long end) 658 { 659 unsigned long new_start, new_end; 660 661 BUG_ON(start < BCM_SYS_EMI_START_ADDR); 662 663 if (unlikely(end <= start)) 664 return; 665 666 if ((end - start) >= l2x0_size) { 667 l2x0_flush_all(); 668 return; 669 } 670 671 new_start = bcm_l2_phys_addr(start); 672 new_end = bcm_l2_phys_addr(end); 673 674 /* normal case, no cross section between start and end */ 675 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) { 676 l2x0_flush_range(new_start, new_end); 677 return; 678 } 679 680 /* They cross sections, so it can only be a cross from section 681 * 2 to section 3 682 */ 683 l2x0_flush_range(new_start, 684 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1)); 685 l2x0_flush_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR), 686 new_end); 687 } 688 689 static void __init l2x0_of_parse(const struct device_node *np, 690 u32 *aux_val, u32 *aux_mask) 691 { 692 u32 data[2] = { 0, 0 }; 693 u32 tag = 0; 694 u32 dirty = 0; 695 u32 val = 0, mask = 0; 696 697 of_property_read_u32(np, "arm,tag-latency", &tag); 698 if (tag) { 699 mask |= L2X0_AUX_CTRL_TAG_LATENCY_MASK; 700 val |= (tag - 1) << L2X0_AUX_CTRL_TAG_LATENCY_SHIFT; 701 } 702 703 of_property_read_u32_array(np, "arm,data-latency", 704 data, ARRAY_SIZE(data)); 705 if (data[0] && data[1]) { 706 mask |= L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK | 707 L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK; 708 val |= ((data[0] - 1) << L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT) | 709 ((data[1] - 1) << L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT); 710 } 711 712 of_property_read_u32(np, "arm,dirty-latency", &dirty); 713 if (dirty) { 714 mask |= L2X0_AUX_CTRL_DIRTY_LATENCY_MASK; 715 val |= (dirty - 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT; 716 } 717 718 *aux_val &= ~mask; 719 *aux_val |= val; 720 *aux_mask &= ~mask; 721 } 722 723 static void __init pl310_of_parse(const struct device_node *np, 724 u32 *aux_val, u32 *aux_mask) 725 { 726 u32 data[3] = { 0, 0, 0 }; 727 u32 tag[3] = { 0, 0, 0 }; 728 u32 filter[2] = { 0, 0 }; 729 730 of_property_read_u32_array(np, "arm,tag-latency", tag, ARRAY_SIZE(tag)); 731 if (tag[0] && tag[1] && tag[2]) 732 writel_relaxed( 733 ((tag[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) | 734 ((tag[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) | 735 ((tag[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT), 736 l2x0_base + L2X0_TAG_LATENCY_CTRL); 737 738 of_property_read_u32_array(np, "arm,data-latency", 739 data, ARRAY_SIZE(data)); 740 if (data[0] && data[1] && data[2]) 741 writel_relaxed( 742 ((data[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) | 743 ((data[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) | 744 ((data[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT), 745 l2x0_base + L2X0_DATA_LATENCY_CTRL); 746 747 of_property_read_u32_array(np, "arm,filter-ranges", 748 filter, ARRAY_SIZE(filter)); 749 if (filter[1]) { 750 writel_relaxed(ALIGN(filter[0] + filter[1], SZ_1M), 751 l2x0_base + L2X0_ADDR_FILTER_END); 752 writel_relaxed((filter[0] & ~(SZ_1M - 1)) | L2X0_ADDR_FILTER_EN, 753 l2x0_base + L2X0_ADDR_FILTER_START); 754 } 755 } 756 757 static void __init pl310_save(void) 758 { 759 u32 l2x0_revision = readl_relaxed(l2x0_base + L2X0_CACHE_ID) & 760 L2X0_CACHE_ID_RTL_MASK; 761 762 l2x0_saved_regs.tag_latency = readl_relaxed(l2x0_base + 763 L2X0_TAG_LATENCY_CTRL); 764 l2x0_saved_regs.data_latency = readl_relaxed(l2x0_base + 765 L2X0_DATA_LATENCY_CTRL); 766 l2x0_saved_regs.filter_end = readl_relaxed(l2x0_base + 767 L2X0_ADDR_FILTER_END); 768 l2x0_saved_regs.filter_start = readl_relaxed(l2x0_base + 769 L2X0_ADDR_FILTER_START); 770 771 if (l2x0_revision >= L2X0_CACHE_ID_RTL_R2P0) { 772 /* 773 * From r2p0, there is Prefetch offset/control register 774 */ 775 l2x0_saved_regs.prefetch_ctrl = readl_relaxed(l2x0_base + 776 L2X0_PREFETCH_CTRL); 777 /* 778 * From r3p0, there is Power control register 779 */ 780 if (l2x0_revision >= L2X0_CACHE_ID_RTL_R3P0) 781 l2x0_saved_regs.pwr_ctrl = readl_relaxed(l2x0_base + 782 L2X0_POWER_CTRL); 783 } 784 } 785 786 static void aurora_save(void) 787 { 788 l2x0_saved_regs.ctrl = readl_relaxed(l2x0_base + L2X0_CTRL); 789 l2x0_saved_regs.aux_ctrl = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); 790 } 791 792 static void __init tauros3_save(void) 793 { 794 l2x0_saved_regs.aux2_ctrl = 795 readl_relaxed(l2x0_base + TAUROS3_AUX2_CTRL); 796 l2x0_saved_regs.prefetch_ctrl = 797 readl_relaxed(l2x0_base + L2X0_PREFETCH_CTRL); 798 } 799 800 static void l2x0_resume(void) 801 { 802 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) { 803 /* restore aux ctrl and enable l2 */ 804 l2x0_unlock(readl_relaxed(l2x0_base + L2X0_CACHE_ID)); 805 806 writel_relaxed(l2x0_saved_regs.aux_ctrl, l2x0_base + 807 L2X0_AUX_CTRL); 808 809 l2x0_inv_all(); 810 811 writel_relaxed(L2X0_CTRL_EN, l2x0_base + L2X0_CTRL); 812 } 813 } 814 815 static void pl310_resume(void) 816 { 817 u32 l2x0_revision; 818 819 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) { 820 /* restore pl310 setup */ 821 writel_relaxed(l2x0_saved_regs.tag_latency, 822 l2x0_base + L2X0_TAG_LATENCY_CTRL); 823 writel_relaxed(l2x0_saved_regs.data_latency, 824 l2x0_base + L2X0_DATA_LATENCY_CTRL); 825 writel_relaxed(l2x0_saved_regs.filter_end, 826 l2x0_base + L2X0_ADDR_FILTER_END); 827 writel_relaxed(l2x0_saved_regs.filter_start, 828 l2x0_base + L2X0_ADDR_FILTER_START); 829 830 l2x0_revision = readl_relaxed(l2x0_base + L2X0_CACHE_ID) & 831 L2X0_CACHE_ID_RTL_MASK; 832 833 if (l2x0_revision >= L2X0_CACHE_ID_RTL_R2P0) { 834 writel_relaxed(l2x0_saved_regs.prefetch_ctrl, 835 l2x0_base + L2X0_PREFETCH_CTRL); 836 if (l2x0_revision >= L2X0_CACHE_ID_RTL_R3P0) 837 writel_relaxed(l2x0_saved_regs.pwr_ctrl, 838 l2x0_base + L2X0_POWER_CTRL); 839 } 840 } 841 842 l2x0_resume(); 843 } 844 845 static void aurora_resume(void) 846 { 847 if (!(readl(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) { 848 writel_relaxed(l2x0_saved_regs.aux_ctrl, 849 l2x0_base + L2X0_AUX_CTRL); 850 writel_relaxed(l2x0_saved_regs.ctrl, l2x0_base + L2X0_CTRL); 851 } 852 } 853 854 static void tauros3_resume(void) 855 { 856 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) { 857 writel_relaxed(l2x0_saved_regs.aux2_ctrl, 858 l2x0_base + TAUROS3_AUX2_CTRL); 859 writel_relaxed(l2x0_saved_regs.prefetch_ctrl, 860 l2x0_base + L2X0_PREFETCH_CTRL); 861 } 862 863 l2x0_resume(); 864 } 865 866 static void __init aurora_broadcast_l2_commands(void) 867 { 868 __u32 u; 869 /* Enable Broadcasting of cache commands to L2*/ 870 __asm__ __volatile__("mrc p15, 1, %0, c15, c2, 0" : "=r"(u)); 871 u |= AURORA_CTRL_FW; /* Set the FW bit */ 872 __asm__ __volatile__("mcr p15, 1, %0, c15, c2, 0\n" : : "r"(u)); 873 isb(); 874 } 875 876 static void __init aurora_of_parse(const struct device_node *np, 877 u32 *aux_val, u32 *aux_mask) 878 { 879 u32 val = AURORA_ACR_REPLACEMENT_TYPE_SEMIPLRU; 880 u32 mask = AURORA_ACR_REPLACEMENT_MASK; 881 882 of_property_read_u32(np, "cache-id-part", 883 &cache_id_part_number_from_dt); 884 885 /* Determine and save the write policy */ 886 l2_wt_override = of_property_read_bool(np, "wt-override"); 887 888 if (l2_wt_override) { 889 val |= AURORA_ACR_FORCE_WRITE_THRO_POLICY; 890 mask |= AURORA_ACR_FORCE_WRITE_POLICY_MASK; 891 } 892 893 *aux_val &= ~mask; 894 *aux_val |= val; 895 *aux_mask &= ~mask; 896 } 897 898 static const struct l2c_init_data of_pl310_data __initconst = { 899 .of_parse = pl310_of_parse, 900 .save = pl310_save, 901 .outer_cache = { 902 .inv_range = l2x0_inv_range, 903 .clean_range = l2x0_clean_range, 904 .flush_range = l2x0_flush_range, 905 .flush_all = l2x0_flush_all, 906 .disable = l2x0_disable, 907 .sync = l2x0_cache_sync, 908 .resume = pl310_resume, 909 }, 910 }; 911 912 static const struct l2c_init_data of_l2x0_data __initconst = { 913 .of_parse = l2x0_of_parse, 914 .outer_cache = { 915 .inv_range = l2x0_inv_range, 916 .clean_range = l2x0_clean_range, 917 .flush_range = l2x0_flush_range, 918 .flush_all = l2x0_flush_all, 919 .disable = l2x0_disable, 920 .sync = l2x0_cache_sync, 921 .resume = l2x0_resume, 922 }, 923 }; 924 925 static const struct l2c_init_data of_aurora_with_outer_data __initconst = { 926 .of_parse = aurora_of_parse, 927 .save = aurora_save, 928 .outer_cache = { 929 .inv_range = aurora_inv_range, 930 .clean_range = aurora_clean_range, 931 .flush_range = aurora_flush_range, 932 .flush_all = l2x0_flush_all, 933 .disable = l2x0_disable, 934 .sync = l2x0_cache_sync, 935 .resume = aurora_resume, 936 }, 937 }; 938 939 static const struct l2c_init_data of_aurora_no_outer_data __initconst = { 940 .of_parse = aurora_of_parse, 941 .save = aurora_save, 942 .outer_cache = { 943 .resume = aurora_resume, 944 }, 945 }; 946 947 static const struct l2c_init_data of_tauros3_data __initconst = { 948 .save = tauros3_save, 949 /* Tauros3 broadcasts L1 cache operations to L2 */ 950 .outer_cache = { 951 .resume = tauros3_resume, 952 }, 953 }; 954 955 static const struct l2c_init_data of_bcm_l2x0_data __initconst = { 956 .of_parse = pl310_of_parse, 957 .save = pl310_save, 958 .outer_cache = { 959 .inv_range = bcm_inv_range, 960 .clean_range = bcm_clean_range, 961 .flush_range = bcm_flush_range, 962 .flush_all = l2x0_flush_all, 963 .disable = l2x0_disable, 964 .sync = l2x0_cache_sync, 965 .resume = pl310_resume, 966 }, 967 }; 968 969 #define L2C_ID(name, fns) { .compatible = name, .data = (void *)&fns } 970 static const struct of_device_id l2x0_ids[] __initconst = { 971 L2C_ID("arm,l210-cache", of_l2x0_data), 972 L2C_ID("arm,l220-cache", of_l2x0_data), 973 L2C_ID("arm,pl310-cache", of_pl310_data), 974 L2C_ID("brcm,bcm11351-a2-pl310-cache", of_bcm_l2x0_data), 975 L2C_ID("marvell,aurora-outer-cache", of_aurora_with_outer_data), 976 L2C_ID("marvell,aurora-system-cache", of_aurora_no_outer_data), 977 L2C_ID("marvell,tauros3-cache", of_tauros3_data), 978 /* Deprecated IDs */ 979 L2C_ID("bcm,bcm11351-a2-pl310-cache", of_bcm_l2x0_data), 980 {} 981 }; 982 983 int __init l2x0_of_init(u32 aux_val, u32 aux_mask) 984 { 985 const struct l2c_init_data *data; 986 struct device_node *np; 987 struct resource res; 988 989 np = of_find_matching_node(NULL, l2x0_ids); 990 if (!np) 991 return -ENODEV; 992 993 if (of_address_to_resource(np, 0, &res)) 994 return -ENODEV; 995 996 l2x0_base = ioremap(res.start, resource_size(&res)); 997 if (!l2x0_base) 998 return -ENOMEM; 999 1000 l2x0_saved_regs.phy_base = res.start; 1001 1002 data = of_match_node(l2x0_ids, np)->data; 1003 1004 /* L2 configuration can only be changed if the cache is disabled */ 1005 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) { 1006 if (data->of_parse) 1007 data->of_parse(np, &aux_val, &aux_mask); 1008 1009 /* For aurora cache in no outer mode select the 1010 * correct mode using the coprocessor*/ 1011 if (data == &of_aurora_no_outer_data) 1012 aurora_broadcast_l2_commands(); 1013 } 1014 1015 if (data->save) 1016 data->save(); 1017 1018 of_init = true; 1019 memcpy(&outer_cache, &data->outer_cache, sizeof(outer_cache)); 1020 l2x0_init(l2x0_base, aux_val, aux_mask); 1021 1022 return 0; 1023 } 1024 #endif 1025