1 /* 2 * arch/arm/mm/cache-l2x0.c - L210/L220 cache controller support 3 * 4 * Copyright (C) 2007 ARM Limited 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software 17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18 */ 19 #include <linux/err.h> 20 #include <linux/init.h> 21 #include <linux/spinlock.h> 22 #include <linux/io.h> 23 #include <linux/of.h> 24 #include <linux/of_address.h> 25 26 #include <asm/cacheflush.h> 27 #include <asm/cputype.h> 28 #include <asm/hardware/cache-l2x0.h> 29 #include "cache-tauros3.h" 30 #include "cache-aurora-l2.h" 31 32 struct l2c_init_data { 33 const char *type; 34 unsigned way_size_0; 35 unsigned num_lock; 36 void (*of_parse)(const struct device_node *, u32 *, u32 *); 37 void (*enable)(void __iomem *, u32, unsigned); 38 void (*fixup)(void __iomem *, u32, struct outer_cache_fns *); 39 void (*save)(void __iomem *); 40 struct outer_cache_fns outer_cache; 41 }; 42 43 #define CACHE_LINE_SIZE 32 44 45 static void __iomem *l2x0_base; 46 static DEFINE_RAW_SPINLOCK(l2x0_lock); 47 static u32 l2x0_way_mask; /* Bitmask of active ways */ 48 static u32 l2x0_size; 49 static unsigned long sync_reg_offset = L2X0_CACHE_SYNC; 50 51 struct l2x0_regs l2x0_saved_regs; 52 53 /* 54 * Common code for all cache controllers. 55 */ 56 static inline void l2c_wait_mask(void __iomem *reg, unsigned long mask) 57 { 58 /* wait for cache operation by line or way to complete */ 59 while (readl_relaxed(reg) & mask) 60 cpu_relax(); 61 } 62 63 /* 64 * By default, we write directly to secure registers. Platforms must 65 * override this if they are running non-secure. 66 */ 67 static void l2c_write_sec(unsigned long val, void __iomem *base, unsigned reg) 68 { 69 if (val == readl_relaxed(base + reg)) 70 return; 71 if (outer_cache.write_sec) 72 outer_cache.write_sec(val, reg); 73 else 74 writel_relaxed(val, base + reg); 75 } 76 77 /* 78 * This should only be called when we have a requirement that the 79 * register be written due to a work-around, as platforms running 80 * in non-secure mode may not be able to access this register. 81 */ 82 static inline void l2c_set_debug(void __iomem *base, unsigned long val) 83 { 84 l2c_write_sec(val, base, L2X0_DEBUG_CTRL); 85 } 86 87 static void __l2c_op_way(void __iomem *reg) 88 { 89 writel_relaxed(l2x0_way_mask, reg); 90 l2c_wait_mask(reg, l2x0_way_mask); 91 } 92 93 static inline void l2c_unlock(void __iomem *base, unsigned num) 94 { 95 unsigned i; 96 97 for (i = 0; i < num; i++) { 98 writel_relaxed(0, base + L2X0_LOCKDOWN_WAY_D_BASE + 99 i * L2X0_LOCKDOWN_STRIDE); 100 writel_relaxed(0, base + L2X0_LOCKDOWN_WAY_I_BASE + 101 i * L2X0_LOCKDOWN_STRIDE); 102 } 103 } 104 105 /* 106 * Enable the L2 cache controller. This function must only be 107 * called when the cache controller is known to be disabled. 108 */ 109 static void l2c_enable(void __iomem *base, u32 aux, unsigned num_lock) 110 { 111 unsigned long flags; 112 113 l2c_write_sec(aux, base, L2X0_AUX_CTRL); 114 115 l2c_unlock(base, num_lock); 116 117 local_irq_save(flags); 118 __l2c_op_way(base + L2X0_INV_WAY); 119 writel_relaxed(0, base + sync_reg_offset); 120 l2c_wait_mask(base + sync_reg_offset, 1); 121 local_irq_restore(flags); 122 123 l2c_write_sec(L2X0_CTRL_EN, base, L2X0_CTRL); 124 } 125 126 static void l2c_disable(void) 127 { 128 void __iomem *base = l2x0_base; 129 130 outer_cache.flush_all(); 131 l2c_write_sec(0, base, L2X0_CTRL); 132 dsb(st); 133 } 134 135 #ifdef CONFIG_CACHE_PL310 136 static inline void cache_wait(void __iomem *reg, unsigned long mask) 137 { 138 /* cache operations by line are atomic on PL310 */ 139 } 140 #else 141 #define cache_wait l2c_wait_mask 142 #endif 143 144 static inline void cache_sync(void) 145 { 146 void __iomem *base = l2x0_base; 147 148 writel_relaxed(0, base + sync_reg_offset); 149 cache_wait(base + L2X0_CACHE_SYNC, 1); 150 } 151 152 #if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915) 153 static inline void debug_writel(unsigned long val) 154 { 155 l2c_set_debug(l2x0_base, val); 156 } 157 #else 158 /* Optimised out for non-errata case */ 159 static inline void debug_writel(unsigned long val) 160 { 161 } 162 #endif 163 164 static void l2x0_cache_sync(void) 165 { 166 unsigned long flags; 167 168 raw_spin_lock_irqsave(&l2x0_lock, flags); 169 cache_sync(); 170 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 171 } 172 173 static void __l2x0_flush_all(void) 174 { 175 debug_writel(0x03); 176 __l2c_op_way(l2x0_base + L2X0_CLEAN_INV_WAY); 177 cache_sync(); 178 debug_writel(0x00); 179 } 180 181 static void l2x0_flush_all(void) 182 { 183 unsigned long flags; 184 185 /* clean all ways */ 186 raw_spin_lock_irqsave(&l2x0_lock, flags); 187 __l2x0_flush_all(); 188 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 189 } 190 191 static void l2x0_disable(void) 192 { 193 unsigned long flags; 194 195 raw_spin_lock_irqsave(&l2x0_lock, flags); 196 __l2x0_flush_all(); 197 l2c_write_sec(0, l2x0_base, L2X0_CTRL); 198 dsb(st); 199 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 200 } 201 202 static void l2c_save(void __iomem *base) 203 { 204 l2x0_saved_regs.aux_ctrl = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); 205 } 206 207 /* 208 * L2C-210 specific code. 209 * 210 * The L2C-2x0 PA, set/way and sync operations are atomic, but we must 211 * ensure that no background operation is running. The way operations 212 * are all background tasks. 213 * 214 * While a background operation is in progress, any new operation is 215 * ignored (unspecified whether this causes an error.) Thankfully, not 216 * used on SMP. 217 * 218 * Never has a different sync register other than L2X0_CACHE_SYNC, but 219 * we use sync_reg_offset here so we can share some of this with L2C-310. 220 */ 221 static void __l2c210_cache_sync(void __iomem *base) 222 { 223 writel_relaxed(0, base + sync_reg_offset); 224 } 225 226 static void __l2c210_op_pa_range(void __iomem *reg, unsigned long start, 227 unsigned long end) 228 { 229 while (start < end) { 230 writel_relaxed(start, reg); 231 start += CACHE_LINE_SIZE; 232 } 233 } 234 235 static void l2c210_inv_range(unsigned long start, unsigned long end) 236 { 237 void __iomem *base = l2x0_base; 238 239 if (start & (CACHE_LINE_SIZE - 1)) { 240 start &= ~(CACHE_LINE_SIZE - 1); 241 writel_relaxed(start, base + L2X0_CLEAN_INV_LINE_PA); 242 start += CACHE_LINE_SIZE; 243 } 244 245 if (end & (CACHE_LINE_SIZE - 1)) { 246 end &= ~(CACHE_LINE_SIZE - 1); 247 writel_relaxed(end, base + L2X0_CLEAN_INV_LINE_PA); 248 } 249 250 __l2c210_op_pa_range(base + L2X0_INV_LINE_PA, start, end); 251 __l2c210_cache_sync(base); 252 } 253 254 static void l2c210_clean_range(unsigned long start, unsigned long end) 255 { 256 void __iomem *base = l2x0_base; 257 258 start &= ~(CACHE_LINE_SIZE - 1); 259 __l2c210_op_pa_range(base + L2X0_CLEAN_LINE_PA, start, end); 260 __l2c210_cache_sync(base); 261 } 262 263 static void l2c210_flush_range(unsigned long start, unsigned long end) 264 { 265 void __iomem *base = l2x0_base; 266 267 start &= ~(CACHE_LINE_SIZE - 1); 268 __l2c210_op_pa_range(base + L2X0_CLEAN_INV_LINE_PA, start, end); 269 __l2c210_cache_sync(base); 270 } 271 272 static void l2c210_flush_all(void) 273 { 274 void __iomem *base = l2x0_base; 275 276 BUG_ON(!irqs_disabled()); 277 278 __l2c_op_way(base + L2X0_CLEAN_INV_WAY); 279 __l2c210_cache_sync(base); 280 } 281 282 static void l2c210_sync(void) 283 { 284 __l2c210_cache_sync(l2x0_base); 285 } 286 287 static void l2c210_resume(void) 288 { 289 void __iomem *base = l2x0_base; 290 291 if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN)) 292 l2c_enable(base, l2x0_saved_regs.aux_ctrl, 1); 293 } 294 295 static const struct l2c_init_data l2c210_data __initconst = { 296 .type = "L2C-210", 297 .way_size_0 = SZ_8K, 298 .num_lock = 1, 299 .enable = l2c_enable, 300 .save = l2c_save, 301 .outer_cache = { 302 .inv_range = l2c210_inv_range, 303 .clean_range = l2c210_clean_range, 304 .flush_range = l2c210_flush_range, 305 .flush_all = l2c210_flush_all, 306 .disable = l2c_disable, 307 .sync = l2c210_sync, 308 .resume = l2c210_resume, 309 }, 310 }; 311 312 /* 313 * L2C-220 specific code. 314 * 315 * All operations are background operations: they have to be waited for. 316 * Conflicting requests generate a slave error (which will cause an 317 * imprecise abort.) Never uses sync_reg_offset, so we hard-code the 318 * sync register here. 319 * 320 * However, we can re-use the l2c210_resume call. 321 */ 322 static inline void __l2c220_cache_sync(void __iomem *base) 323 { 324 writel_relaxed(0, base + L2X0_CACHE_SYNC); 325 l2c_wait_mask(base + L2X0_CACHE_SYNC, 1); 326 } 327 328 static void l2c220_op_way(void __iomem *base, unsigned reg) 329 { 330 unsigned long flags; 331 332 raw_spin_lock_irqsave(&l2x0_lock, flags); 333 __l2c_op_way(base + reg); 334 __l2c220_cache_sync(base); 335 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 336 } 337 338 static unsigned long l2c220_op_pa_range(void __iomem *reg, unsigned long start, 339 unsigned long end, unsigned long flags) 340 { 341 raw_spinlock_t *lock = &l2x0_lock; 342 343 while (start < end) { 344 unsigned long blk_end = start + min(end - start, 4096UL); 345 346 while (start < blk_end) { 347 l2c_wait_mask(reg, 1); 348 writel_relaxed(start, reg); 349 start += CACHE_LINE_SIZE; 350 } 351 352 if (blk_end < end) { 353 raw_spin_unlock_irqrestore(lock, flags); 354 raw_spin_lock_irqsave(lock, flags); 355 } 356 } 357 358 return flags; 359 } 360 361 static void l2c220_inv_range(unsigned long start, unsigned long end) 362 { 363 void __iomem *base = l2x0_base; 364 unsigned long flags; 365 366 raw_spin_lock_irqsave(&l2x0_lock, flags); 367 if ((start | end) & (CACHE_LINE_SIZE - 1)) { 368 if (start & (CACHE_LINE_SIZE - 1)) { 369 start &= ~(CACHE_LINE_SIZE - 1); 370 writel_relaxed(start, base + L2X0_CLEAN_INV_LINE_PA); 371 start += CACHE_LINE_SIZE; 372 } 373 374 if (end & (CACHE_LINE_SIZE - 1)) { 375 end &= ~(CACHE_LINE_SIZE - 1); 376 l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1); 377 writel_relaxed(end, base + L2X0_CLEAN_INV_LINE_PA); 378 } 379 } 380 381 flags = l2c220_op_pa_range(base + L2X0_INV_LINE_PA, 382 start, end, flags); 383 l2c_wait_mask(base + L2X0_INV_LINE_PA, 1); 384 __l2c220_cache_sync(base); 385 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 386 } 387 388 static void l2c220_clean_range(unsigned long start, unsigned long end) 389 { 390 void __iomem *base = l2x0_base; 391 unsigned long flags; 392 393 start &= ~(CACHE_LINE_SIZE - 1); 394 if ((end - start) >= l2x0_size) { 395 l2c220_op_way(base, L2X0_CLEAN_WAY); 396 return; 397 } 398 399 raw_spin_lock_irqsave(&l2x0_lock, flags); 400 flags = l2c220_op_pa_range(base + L2X0_CLEAN_LINE_PA, 401 start, end, flags); 402 l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1); 403 __l2c220_cache_sync(base); 404 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 405 } 406 407 static void l2c220_flush_range(unsigned long start, unsigned long end) 408 { 409 void __iomem *base = l2x0_base; 410 unsigned long flags; 411 412 start &= ~(CACHE_LINE_SIZE - 1); 413 if ((end - start) >= l2x0_size) { 414 l2c220_op_way(base, L2X0_CLEAN_INV_WAY); 415 return; 416 } 417 418 raw_spin_lock_irqsave(&l2x0_lock, flags); 419 flags = l2c220_op_pa_range(base + L2X0_CLEAN_INV_LINE_PA, 420 start, end, flags); 421 l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1); 422 __l2c220_cache_sync(base); 423 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 424 } 425 426 static void l2c220_flush_all(void) 427 { 428 l2c220_op_way(l2x0_base, L2X0_CLEAN_INV_WAY); 429 } 430 431 static void l2c220_sync(void) 432 { 433 unsigned long flags; 434 435 raw_spin_lock_irqsave(&l2x0_lock, flags); 436 __l2c220_cache_sync(l2x0_base); 437 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 438 } 439 440 static void l2c220_enable(void __iomem *base, u32 aux, unsigned num_lock) 441 { 442 /* 443 * Always enable non-secure access to the lockdown registers - 444 * we write to them as part of the L2C enable sequence so they 445 * need to be accessible. 446 */ 447 aux |= L220_AUX_CTRL_NS_LOCKDOWN; 448 449 l2c_enable(base, aux, num_lock); 450 } 451 452 static const struct l2c_init_data l2c220_data = { 453 .type = "L2C-220", 454 .way_size_0 = SZ_8K, 455 .num_lock = 1, 456 .enable = l2c220_enable, 457 .save = l2c_save, 458 .outer_cache = { 459 .inv_range = l2c220_inv_range, 460 .clean_range = l2c220_clean_range, 461 .flush_range = l2c220_flush_range, 462 .flush_all = l2c220_flush_all, 463 .disable = l2c_disable, 464 .sync = l2c220_sync, 465 .resume = l2c210_resume, 466 }, 467 }; 468 469 /* 470 * L2C-310 specific code. 471 * 472 * Very similar to L2C-210, the PA, set/way and sync operations are atomic, 473 * and the way operations are all background tasks. However, issuing an 474 * operation while a background operation is in progress results in a 475 * SLVERR response. We can reuse: 476 * 477 * __l2c210_cache_sync (using sync_reg_offset) 478 * l2c210_sync 479 * l2c210_inv_range (if 588369 is not applicable) 480 * l2c210_clean_range 481 * l2c210_flush_range (if 588369 is not applicable) 482 * l2c210_flush_all (if 727915 is not applicable) 483 * 484 * Errata: 485 * 588369: PL310 R0P0->R1P0, fixed R2P0. 486 * Affects: all clean+invalidate operations 487 * clean and invalidate skips the invalidate step, so we need to issue 488 * separate operations. We also require the above debug workaround 489 * enclosing this code fragment on affected parts. On unaffected parts, 490 * we must not use this workaround without the debug register writes 491 * to avoid exposing a problem similar to 727915. 492 * 493 * 727915: PL310 R2P0->R3P0, fixed R3P1. 494 * Affects: clean+invalidate by way 495 * clean and invalidate by way runs in the background, and a store can 496 * hit the line between the clean operation and invalidate operation, 497 * resulting in the store being lost. 498 * 499 * 752271: PL310 R3P0->R3P1-50REL0, fixed R3P2. 500 * Affects: 8x64-bit (double fill) line fetches 501 * double fill line fetches can fail to cause dirty data to be evicted 502 * from the cache before the new data overwrites the second line. 503 * 504 * 753970: PL310 R3P0, fixed R3P1. 505 * Affects: sync 506 * prevents merging writes after the sync operation, until another L2C 507 * operation is performed (or a number of other conditions.) 508 * 509 * 769419: PL310 R0P0->R3P1, fixed R3P2. 510 * Affects: store buffer 511 * store buffer is not automatically drained. 512 */ 513 static void l2c310_inv_range_erratum(unsigned long start, unsigned long end) 514 { 515 void __iomem *base = l2x0_base; 516 517 if ((start | end) & (CACHE_LINE_SIZE - 1)) { 518 unsigned long flags; 519 520 /* Erratum 588369 for both clean+invalidate operations */ 521 raw_spin_lock_irqsave(&l2x0_lock, flags); 522 l2c_set_debug(base, 0x03); 523 524 if (start & (CACHE_LINE_SIZE - 1)) { 525 start &= ~(CACHE_LINE_SIZE - 1); 526 writel_relaxed(start, base + L2X0_CLEAN_LINE_PA); 527 writel_relaxed(start, base + L2X0_INV_LINE_PA); 528 start += CACHE_LINE_SIZE; 529 } 530 531 if (end & (CACHE_LINE_SIZE - 1)) { 532 end &= ~(CACHE_LINE_SIZE - 1); 533 writel_relaxed(end, base + L2X0_CLEAN_LINE_PA); 534 writel_relaxed(end, base + L2X0_INV_LINE_PA); 535 } 536 537 l2c_set_debug(base, 0x00); 538 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 539 } 540 541 __l2c210_op_pa_range(base + L2X0_INV_LINE_PA, start, end); 542 __l2c210_cache_sync(base); 543 } 544 545 static void l2c310_flush_range_erratum(unsigned long start, unsigned long end) 546 { 547 raw_spinlock_t *lock = &l2x0_lock; 548 unsigned long flags; 549 void __iomem *base = l2x0_base; 550 551 raw_spin_lock_irqsave(lock, flags); 552 while (start < end) { 553 unsigned long blk_end = start + min(end - start, 4096UL); 554 555 l2c_set_debug(base, 0x03); 556 while (start < blk_end) { 557 writel_relaxed(start, base + L2X0_CLEAN_LINE_PA); 558 writel_relaxed(start, base + L2X0_INV_LINE_PA); 559 start += CACHE_LINE_SIZE; 560 } 561 l2c_set_debug(base, 0x00); 562 563 if (blk_end < end) { 564 raw_spin_unlock_irqrestore(lock, flags); 565 raw_spin_lock_irqsave(lock, flags); 566 } 567 } 568 raw_spin_unlock_irqrestore(lock, flags); 569 __l2c210_cache_sync(base); 570 } 571 572 static void l2c310_flush_all_erratum(void) 573 { 574 void __iomem *base = l2x0_base; 575 unsigned long flags; 576 577 raw_spin_lock_irqsave(&l2x0_lock, flags); 578 l2c_set_debug(base, 0x03); 579 __l2c_op_way(base + L2X0_CLEAN_INV_WAY); 580 l2c_set_debug(base, 0x00); 581 __l2c210_cache_sync(base); 582 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 583 } 584 585 static void __init l2c310_save(void __iomem *base) 586 { 587 unsigned revision; 588 589 l2c_save(base); 590 591 l2x0_saved_regs.tag_latency = readl_relaxed(base + 592 L310_TAG_LATENCY_CTRL); 593 l2x0_saved_regs.data_latency = readl_relaxed(base + 594 L310_DATA_LATENCY_CTRL); 595 l2x0_saved_regs.filter_end = readl_relaxed(base + 596 L310_ADDR_FILTER_END); 597 l2x0_saved_regs.filter_start = readl_relaxed(base + 598 L310_ADDR_FILTER_START); 599 600 revision = readl_relaxed(base + L2X0_CACHE_ID) & 601 L2X0_CACHE_ID_RTL_MASK; 602 603 /* From r2p0, there is Prefetch offset/control register */ 604 if (revision >= L310_CACHE_ID_RTL_R2P0) 605 l2x0_saved_regs.prefetch_ctrl = readl_relaxed(base + 606 L310_PREFETCH_CTRL); 607 608 /* From r3p0, there is Power control register */ 609 if (revision >= L310_CACHE_ID_RTL_R3P0) 610 l2x0_saved_regs.pwr_ctrl = readl_relaxed(base + 611 L310_POWER_CTRL); 612 } 613 614 static void l2c310_resume(void) 615 { 616 void __iomem *base = l2x0_base; 617 618 if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN)) { 619 unsigned revision; 620 621 /* restore pl310 setup */ 622 writel_relaxed(l2x0_saved_regs.tag_latency, 623 base + L310_TAG_LATENCY_CTRL); 624 writel_relaxed(l2x0_saved_regs.data_latency, 625 base + L310_DATA_LATENCY_CTRL); 626 writel_relaxed(l2x0_saved_regs.filter_end, 627 base + L310_ADDR_FILTER_END); 628 writel_relaxed(l2x0_saved_regs.filter_start, 629 base + L310_ADDR_FILTER_START); 630 631 revision = readl_relaxed(base + L2X0_CACHE_ID) & 632 L2X0_CACHE_ID_RTL_MASK; 633 634 if (revision >= L310_CACHE_ID_RTL_R2P0) 635 l2c_write_sec(l2x0_saved_regs.prefetch_ctrl, base, 636 L310_PREFETCH_CTRL); 637 if (revision >= L310_CACHE_ID_RTL_R3P0) 638 l2c_write_sec(l2x0_saved_regs.pwr_ctrl, base, 639 L310_POWER_CTRL); 640 641 l2c_enable(base, l2x0_saved_regs.aux_ctrl, 8); 642 } 643 } 644 645 static void __init l2c310_enable(void __iomem *base, u32 aux, unsigned num_lock) 646 { 647 unsigned rev = readl_relaxed(base + L2X0_CACHE_ID) & L2X0_CACHE_ID_PART_MASK; 648 bool cortex_a9 = read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A9; 649 650 if (rev >= L310_CACHE_ID_RTL_R2P0) { 651 if (cortex_a9) { 652 aux |= L310_AUX_CTRL_EARLY_BRESP; 653 pr_info("L2C-310 enabling early BRESP for Cortex-A9\n"); 654 } else if (aux & L310_AUX_CTRL_EARLY_BRESP) { 655 pr_warn("L2C-310 early BRESP only supported with Cortex-A9\n"); 656 aux &= ~L310_AUX_CTRL_EARLY_BRESP; 657 } 658 } 659 660 /* r3p0 or later has power control register */ 661 if (rev >= L310_CACHE_ID_RTL_R3P0) { 662 u32 power_ctrl; 663 664 l2c_write_sec(L310_DYNAMIC_CLK_GATING_EN | L310_STNDBY_MODE_EN, 665 base, L310_POWER_CTRL); 666 power_ctrl = readl_relaxed(base + L310_POWER_CTRL); 667 pr_info("L2C-310 dynamic clock gating %sabled, standby mode %sabled\n", 668 power_ctrl & L310_DYNAMIC_CLK_GATING_EN ? "en" : "dis", 669 power_ctrl & L310_STNDBY_MODE_EN ? "en" : "dis"); 670 } 671 672 /* 673 * Always enable non-secure access to the lockdown registers - 674 * we write to them as part of the L2C enable sequence so they 675 * need to be accessible. 676 */ 677 aux |= L310_AUX_CTRL_NS_LOCKDOWN; 678 679 l2c_enable(base, aux, num_lock); 680 } 681 682 static void __init l2c310_fixup(void __iomem *base, u32 cache_id, 683 struct outer_cache_fns *fns) 684 { 685 unsigned revision = cache_id & L2X0_CACHE_ID_RTL_MASK; 686 const char *errata[8]; 687 unsigned n = 0; 688 689 if (IS_ENABLED(CONFIG_PL310_ERRATA_588369) && 690 revision < L310_CACHE_ID_RTL_R2P0 && 691 /* For bcm compatibility */ 692 fns->inv_range == l2c210_inv_range) { 693 fns->inv_range = l2c310_inv_range_erratum; 694 fns->flush_range = l2c310_flush_range_erratum; 695 errata[n++] = "588369"; 696 } 697 698 if (IS_ENABLED(CONFIG_PL310_ERRATA_727915) && 699 revision >= L310_CACHE_ID_RTL_R2P0 && 700 revision < L310_CACHE_ID_RTL_R3P1) { 701 fns->flush_all = l2c310_flush_all_erratum; 702 errata[n++] = "727915"; 703 } 704 705 if (revision >= L310_CACHE_ID_RTL_R3P0 && 706 revision < L310_CACHE_ID_RTL_R3P2) { 707 u32 val = readl_relaxed(base + L310_PREFETCH_CTRL); 708 /* I don't think bit23 is required here... but iMX6 does so */ 709 if (val & (BIT(30) | BIT(23))) { 710 val &= ~(BIT(30) | BIT(23)); 711 l2c_write_sec(val, base, L310_PREFETCH_CTRL); 712 errata[n++] = "752271"; 713 } 714 } 715 716 if (IS_ENABLED(CONFIG_PL310_ERRATA_753970) && 717 revision == L310_CACHE_ID_RTL_R3P0) { 718 sync_reg_offset = L2X0_DUMMY_REG; 719 errata[n++] = "753970"; 720 } 721 722 if (IS_ENABLED(CONFIG_PL310_ERRATA_769419)) 723 errata[n++] = "769419"; 724 725 if (n) { 726 unsigned i; 727 728 pr_info("L2C-310 errat%s", n > 1 ? "a" : "um"); 729 for (i = 0; i < n; i++) 730 pr_cont(" %s", errata[i]); 731 pr_cont(" enabled\n"); 732 } 733 } 734 735 static const struct l2c_init_data l2c310_init_fns __initconst = { 736 .type = "L2C-310", 737 .way_size_0 = SZ_8K, 738 .num_lock = 8, 739 .enable = l2c310_enable, 740 .fixup = l2c310_fixup, 741 .save = l2c310_save, 742 .outer_cache = { 743 .inv_range = l2c210_inv_range, 744 .clean_range = l2c210_clean_range, 745 .flush_range = l2c210_flush_range, 746 .flush_all = l2c210_flush_all, 747 .disable = l2c_disable, 748 .sync = l2c210_sync, 749 .resume = l2c310_resume, 750 }, 751 }; 752 753 static void __init __l2c_init(const struct l2c_init_data *data, 754 u32 aux_val, u32 aux_mask, u32 cache_id) 755 { 756 struct outer_cache_fns fns; 757 unsigned way_size_bits, ways; 758 u32 aux; 759 760 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); 761 762 aux &= aux_mask; 763 aux |= aux_val; 764 765 /* Determine the number of ways */ 766 switch (cache_id & L2X0_CACHE_ID_PART_MASK) { 767 case L2X0_CACHE_ID_PART_L310: 768 if ((aux_val | ~aux_mask) & (L2C_AUX_CTRL_WAY_SIZE_MASK | L310_AUX_CTRL_ASSOCIATIVITY_16)) 769 pr_warn("L2C: DT/platform tries to modify or specify cache size\n"); 770 if (aux & (1 << 16)) 771 ways = 16; 772 else 773 ways = 8; 774 break; 775 776 case L2X0_CACHE_ID_PART_L210: 777 case L2X0_CACHE_ID_PART_L220: 778 ways = (aux >> 13) & 0xf; 779 break; 780 781 case AURORA_CACHE_ID: 782 ways = (aux >> 13) & 0xf; 783 ways = 2 << ((ways + 1) >> 2); 784 break; 785 786 default: 787 /* Assume unknown chips have 8 ways */ 788 ways = 8; 789 break; 790 } 791 792 l2x0_way_mask = (1 << ways) - 1; 793 794 /* 795 * way_size_0 is the size that a way_size value of zero would be 796 * given the calculation: way_size = way_size_0 << way_size_bits. 797 * So, if way_size_bits=0 is reserved, but way_size_bits=1 is 16k, 798 * then way_size_0 would be 8k. 799 * 800 * L2 cache size = number of ways * way size. 801 */ 802 way_size_bits = (aux & L2C_AUX_CTRL_WAY_SIZE_MASK) >> 803 L2C_AUX_CTRL_WAY_SIZE_SHIFT; 804 l2x0_size = ways * (data->way_size_0 << way_size_bits); 805 806 fns = data->outer_cache; 807 fns.write_sec = outer_cache.write_sec; 808 if (data->fixup) 809 data->fixup(l2x0_base, cache_id, &fns); 810 811 /* 812 * Check if l2x0 controller is already enabled. If we are booting 813 * in non-secure mode accessing the below registers will fault. 814 */ 815 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) 816 data->enable(l2x0_base, aux, data->num_lock); 817 818 outer_cache = fns; 819 820 /* 821 * It is strange to save the register state before initialisation, 822 * but hey, this is what the DT implementations decided to do. 823 */ 824 if (data->save) 825 data->save(l2x0_base); 826 827 /* Re-read it in case some bits are reserved. */ 828 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); 829 830 pr_info("%s cache controller enabled, %d ways, %d kB\n", 831 data->type, ways, l2x0_size >> 10); 832 pr_info("%s: CACHE_ID 0x%08x, AUX_CTRL 0x%08x\n", 833 data->type, cache_id, aux); 834 } 835 836 void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask) 837 { 838 const struct l2c_init_data *data; 839 u32 cache_id; 840 841 l2x0_base = base; 842 843 cache_id = readl_relaxed(base + L2X0_CACHE_ID); 844 845 switch (cache_id & L2X0_CACHE_ID_PART_MASK) { 846 default: 847 case L2X0_CACHE_ID_PART_L210: 848 data = &l2c210_data; 849 break; 850 851 case L2X0_CACHE_ID_PART_L220: 852 data = &l2c220_data; 853 break; 854 855 case L2X0_CACHE_ID_PART_L310: 856 data = &l2c310_init_fns; 857 break; 858 } 859 860 __l2c_init(data, aux_val, aux_mask, cache_id); 861 } 862 863 #ifdef CONFIG_OF 864 static int l2_wt_override; 865 866 /* Aurora don't have the cache ID register available, so we have to 867 * pass it though the device tree */ 868 static u32 cache_id_part_number_from_dt; 869 870 static void __init l2x0_of_parse(const struct device_node *np, 871 u32 *aux_val, u32 *aux_mask) 872 { 873 u32 data[2] = { 0, 0 }; 874 u32 tag = 0; 875 u32 dirty = 0; 876 u32 val = 0, mask = 0; 877 878 of_property_read_u32(np, "arm,tag-latency", &tag); 879 if (tag) { 880 mask |= L2X0_AUX_CTRL_TAG_LATENCY_MASK; 881 val |= (tag - 1) << L2X0_AUX_CTRL_TAG_LATENCY_SHIFT; 882 } 883 884 of_property_read_u32_array(np, "arm,data-latency", 885 data, ARRAY_SIZE(data)); 886 if (data[0] && data[1]) { 887 mask |= L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK | 888 L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK; 889 val |= ((data[0] - 1) << L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT) | 890 ((data[1] - 1) << L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT); 891 } 892 893 of_property_read_u32(np, "arm,dirty-latency", &dirty); 894 if (dirty) { 895 mask |= L2X0_AUX_CTRL_DIRTY_LATENCY_MASK; 896 val |= (dirty - 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT; 897 } 898 899 *aux_val &= ~mask; 900 *aux_val |= val; 901 *aux_mask &= ~mask; 902 } 903 904 static const struct l2c_init_data of_l2c210_data __initconst = { 905 .type = "L2C-210", 906 .way_size_0 = SZ_8K, 907 .num_lock = 1, 908 .of_parse = l2x0_of_parse, 909 .enable = l2c_enable, 910 .save = l2c_save, 911 .outer_cache = { 912 .inv_range = l2c210_inv_range, 913 .clean_range = l2c210_clean_range, 914 .flush_range = l2c210_flush_range, 915 .flush_all = l2c210_flush_all, 916 .disable = l2c_disable, 917 .sync = l2c210_sync, 918 .resume = l2c210_resume, 919 }, 920 }; 921 922 static const struct l2c_init_data of_l2c220_data __initconst = { 923 .type = "L2C-220", 924 .way_size_0 = SZ_8K, 925 .num_lock = 1, 926 .of_parse = l2x0_of_parse, 927 .enable = l2c220_enable, 928 .save = l2c_save, 929 .outer_cache = { 930 .inv_range = l2c220_inv_range, 931 .clean_range = l2c220_clean_range, 932 .flush_range = l2c220_flush_range, 933 .flush_all = l2c220_flush_all, 934 .disable = l2c_disable, 935 .sync = l2c220_sync, 936 .resume = l2c210_resume, 937 }, 938 }; 939 940 static void __init l2c310_of_parse(const struct device_node *np, 941 u32 *aux_val, u32 *aux_mask) 942 { 943 u32 data[3] = { 0, 0, 0 }; 944 u32 tag[3] = { 0, 0, 0 }; 945 u32 filter[2] = { 0, 0 }; 946 947 of_property_read_u32_array(np, "arm,tag-latency", tag, ARRAY_SIZE(tag)); 948 if (tag[0] && tag[1] && tag[2]) 949 writel_relaxed( 950 L310_LATENCY_CTRL_RD(tag[0] - 1) | 951 L310_LATENCY_CTRL_WR(tag[1] - 1) | 952 L310_LATENCY_CTRL_SETUP(tag[2] - 1), 953 l2x0_base + L310_TAG_LATENCY_CTRL); 954 955 of_property_read_u32_array(np, "arm,data-latency", 956 data, ARRAY_SIZE(data)); 957 if (data[0] && data[1] && data[2]) 958 writel_relaxed( 959 L310_LATENCY_CTRL_RD(data[0] - 1) | 960 L310_LATENCY_CTRL_WR(data[1] - 1) | 961 L310_LATENCY_CTRL_SETUP(data[2] - 1), 962 l2x0_base + L310_DATA_LATENCY_CTRL); 963 964 of_property_read_u32_array(np, "arm,filter-ranges", 965 filter, ARRAY_SIZE(filter)); 966 if (filter[1]) { 967 writel_relaxed(ALIGN(filter[0] + filter[1], SZ_1M), 968 l2x0_base + L310_ADDR_FILTER_END); 969 writel_relaxed((filter[0] & ~(SZ_1M - 1)) | L310_ADDR_FILTER_EN, 970 l2x0_base + L310_ADDR_FILTER_START); 971 } 972 } 973 974 static const struct l2c_init_data of_l2c310_data __initconst = { 975 .type = "L2C-310", 976 .way_size_0 = SZ_8K, 977 .num_lock = 8, 978 .of_parse = l2c310_of_parse, 979 .enable = l2c310_enable, 980 .fixup = l2c310_fixup, 981 .save = l2c310_save, 982 .outer_cache = { 983 .inv_range = l2c210_inv_range, 984 .clean_range = l2c210_clean_range, 985 .flush_range = l2c210_flush_range, 986 .flush_all = l2c210_flush_all, 987 .disable = l2c_disable, 988 .sync = l2c210_sync, 989 .resume = l2c310_resume, 990 }, 991 }; 992 993 /* 994 * Note that the end addresses passed to Linux primitives are 995 * noninclusive, while the hardware cache range operations use 996 * inclusive start and end addresses. 997 */ 998 static unsigned long calc_range_end(unsigned long start, unsigned long end) 999 { 1000 /* 1001 * Limit the number of cache lines processed at once, 1002 * since cache range operations stall the CPU pipeline 1003 * until completion. 1004 */ 1005 if (end > start + MAX_RANGE_SIZE) 1006 end = start + MAX_RANGE_SIZE; 1007 1008 /* 1009 * Cache range operations can't straddle a page boundary. 1010 */ 1011 if (end > PAGE_ALIGN(start+1)) 1012 end = PAGE_ALIGN(start+1); 1013 1014 return end; 1015 } 1016 1017 /* 1018 * Make sure 'start' and 'end' reference the same page, as L2 is PIPT 1019 * and range operations only do a TLB lookup on the start address. 1020 */ 1021 static void aurora_pa_range(unsigned long start, unsigned long end, 1022 unsigned long offset) 1023 { 1024 unsigned long flags; 1025 1026 raw_spin_lock_irqsave(&l2x0_lock, flags); 1027 writel_relaxed(start, l2x0_base + AURORA_RANGE_BASE_ADDR_REG); 1028 writel_relaxed(end, l2x0_base + offset); 1029 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 1030 1031 cache_sync(); 1032 } 1033 1034 static void aurora_inv_range(unsigned long start, unsigned long end) 1035 { 1036 /* 1037 * round start and end adresses up to cache line size 1038 */ 1039 start &= ~(CACHE_LINE_SIZE - 1); 1040 end = ALIGN(end, CACHE_LINE_SIZE); 1041 1042 /* 1043 * Invalidate all full cache lines between 'start' and 'end'. 1044 */ 1045 while (start < end) { 1046 unsigned long range_end = calc_range_end(start, end); 1047 aurora_pa_range(start, range_end - CACHE_LINE_SIZE, 1048 AURORA_INVAL_RANGE_REG); 1049 start = range_end; 1050 } 1051 } 1052 1053 static void aurora_clean_range(unsigned long start, unsigned long end) 1054 { 1055 /* 1056 * If L2 is forced to WT, the L2 will always be clean and we 1057 * don't need to do anything here. 1058 */ 1059 if (!l2_wt_override) { 1060 start &= ~(CACHE_LINE_SIZE - 1); 1061 end = ALIGN(end, CACHE_LINE_SIZE); 1062 while (start != end) { 1063 unsigned long range_end = calc_range_end(start, end); 1064 aurora_pa_range(start, range_end - CACHE_LINE_SIZE, 1065 AURORA_CLEAN_RANGE_REG); 1066 start = range_end; 1067 } 1068 } 1069 } 1070 1071 static void aurora_flush_range(unsigned long start, unsigned long end) 1072 { 1073 start &= ~(CACHE_LINE_SIZE - 1); 1074 end = ALIGN(end, CACHE_LINE_SIZE); 1075 while (start != end) { 1076 unsigned long range_end = calc_range_end(start, end); 1077 /* 1078 * If L2 is forced to WT, the L2 will always be clean and we 1079 * just need to invalidate. 1080 */ 1081 if (l2_wt_override) 1082 aurora_pa_range(start, range_end - CACHE_LINE_SIZE, 1083 AURORA_INVAL_RANGE_REG); 1084 else 1085 aurora_pa_range(start, range_end - CACHE_LINE_SIZE, 1086 AURORA_FLUSH_RANGE_REG); 1087 start = range_end; 1088 } 1089 } 1090 1091 static void aurora_save(void __iomem *base) 1092 { 1093 l2x0_saved_regs.ctrl = readl_relaxed(base + L2X0_CTRL); 1094 l2x0_saved_regs.aux_ctrl = readl_relaxed(base + L2X0_AUX_CTRL); 1095 } 1096 1097 static void aurora_resume(void) 1098 { 1099 void __iomem *base = l2x0_base; 1100 1101 if (!(readl(base + L2X0_CTRL) & L2X0_CTRL_EN)) { 1102 writel_relaxed(l2x0_saved_regs.aux_ctrl, base + L2X0_AUX_CTRL); 1103 writel_relaxed(l2x0_saved_regs.ctrl, base + L2X0_CTRL); 1104 } 1105 } 1106 1107 /* 1108 * For Aurora cache in no outer mode, enable via the CP15 coprocessor 1109 * broadcasting of cache commands to L2. 1110 */ 1111 static void __init aurora_enable_no_outer(void __iomem *base, u32 aux, 1112 unsigned num_lock) 1113 { 1114 u32 u; 1115 1116 asm volatile("mrc p15, 1, %0, c15, c2, 0" : "=r" (u)); 1117 u |= AURORA_CTRL_FW; /* Set the FW bit */ 1118 asm volatile("mcr p15, 1, %0, c15, c2, 0" : : "r" (u)); 1119 1120 isb(); 1121 1122 l2c_enable(base, aux, num_lock); 1123 } 1124 1125 static void __init aurora_fixup(void __iomem *base, u32 cache_id, 1126 struct outer_cache_fns *fns) 1127 { 1128 sync_reg_offset = AURORA_SYNC_REG; 1129 } 1130 1131 static void __init aurora_of_parse(const struct device_node *np, 1132 u32 *aux_val, u32 *aux_mask) 1133 { 1134 u32 val = AURORA_ACR_REPLACEMENT_TYPE_SEMIPLRU; 1135 u32 mask = AURORA_ACR_REPLACEMENT_MASK; 1136 1137 of_property_read_u32(np, "cache-id-part", 1138 &cache_id_part_number_from_dt); 1139 1140 /* Determine and save the write policy */ 1141 l2_wt_override = of_property_read_bool(np, "wt-override"); 1142 1143 if (l2_wt_override) { 1144 val |= AURORA_ACR_FORCE_WRITE_THRO_POLICY; 1145 mask |= AURORA_ACR_FORCE_WRITE_POLICY_MASK; 1146 } 1147 1148 *aux_val &= ~mask; 1149 *aux_val |= val; 1150 *aux_mask &= ~mask; 1151 } 1152 1153 static const struct l2c_init_data of_aurora_with_outer_data __initconst = { 1154 .type = "Aurora", 1155 .way_size_0 = SZ_4K, 1156 .num_lock = 4, 1157 .of_parse = aurora_of_parse, 1158 .enable = l2c_enable, 1159 .fixup = aurora_fixup, 1160 .save = aurora_save, 1161 .outer_cache = { 1162 .inv_range = aurora_inv_range, 1163 .clean_range = aurora_clean_range, 1164 .flush_range = aurora_flush_range, 1165 .flush_all = l2x0_flush_all, 1166 .disable = l2x0_disable, 1167 .sync = l2x0_cache_sync, 1168 .resume = aurora_resume, 1169 }, 1170 }; 1171 1172 static const struct l2c_init_data of_aurora_no_outer_data __initconst = { 1173 .type = "Aurora", 1174 .way_size_0 = SZ_4K, 1175 .num_lock = 4, 1176 .of_parse = aurora_of_parse, 1177 .enable = aurora_enable_no_outer, 1178 .fixup = aurora_fixup, 1179 .save = aurora_save, 1180 .outer_cache = { 1181 .resume = aurora_resume, 1182 }, 1183 }; 1184 1185 /* 1186 * For certain Broadcom SoCs, depending on the address range, different offsets 1187 * need to be added to the address before passing it to L2 for 1188 * invalidation/clean/flush 1189 * 1190 * Section Address Range Offset EMI 1191 * 1 0x00000000 - 0x3FFFFFFF 0x80000000 VC 1192 * 2 0x40000000 - 0xBFFFFFFF 0x40000000 SYS 1193 * 3 0xC0000000 - 0xFFFFFFFF 0x80000000 VC 1194 * 1195 * When the start and end addresses have crossed two different sections, we 1196 * need to break the L2 operation into two, each within its own section. 1197 * For example, if we need to invalidate addresses starts at 0xBFFF0000 and 1198 * ends at 0xC0001000, we need do invalidate 1) 0xBFFF0000 - 0xBFFFFFFF and 2) 1199 * 0xC0000000 - 0xC0001000 1200 * 1201 * Note 1: 1202 * By breaking a single L2 operation into two, we may potentially suffer some 1203 * performance hit, but keep in mind the cross section case is very rare 1204 * 1205 * Note 2: 1206 * We do not need to handle the case when the start address is in 1207 * Section 1 and the end address is in Section 3, since it is not a valid use 1208 * case 1209 * 1210 * Note 3: 1211 * Section 1 in practical terms can no longer be used on rev A2. Because of 1212 * that the code does not need to handle section 1 at all. 1213 * 1214 */ 1215 #define BCM_SYS_EMI_START_ADDR 0x40000000UL 1216 #define BCM_VC_EMI_SEC3_START_ADDR 0xC0000000UL 1217 1218 #define BCM_SYS_EMI_OFFSET 0x40000000UL 1219 #define BCM_VC_EMI_OFFSET 0x80000000UL 1220 1221 static inline int bcm_addr_is_sys_emi(unsigned long addr) 1222 { 1223 return (addr >= BCM_SYS_EMI_START_ADDR) && 1224 (addr < BCM_VC_EMI_SEC3_START_ADDR); 1225 } 1226 1227 static inline unsigned long bcm_l2_phys_addr(unsigned long addr) 1228 { 1229 if (bcm_addr_is_sys_emi(addr)) 1230 return addr + BCM_SYS_EMI_OFFSET; 1231 else 1232 return addr + BCM_VC_EMI_OFFSET; 1233 } 1234 1235 static void bcm_inv_range(unsigned long start, unsigned long end) 1236 { 1237 unsigned long new_start, new_end; 1238 1239 BUG_ON(start < BCM_SYS_EMI_START_ADDR); 1240 1241 if (unlikely(end <= start)) 1242 return; 1243 1244 new_start = bcm_l2_phys_addr(start); 1245 new_end = bcm_l2_phys_addr(end); 1246 1247 /* normal case, no cross section between start and end */ 1248 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) { 1249 l2c210_inv_range(new_start, new_end); 1250 return; 1251 } 1252 1253 /* They cross sections, so it can only be a cross from section 1254 * 2 to section 3 1255 */ 1256 l2c210_inv_range(new_start, 1257 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1)); 1258 l2c210_inv_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR), 1259 new_end); 1260 } 1261 1262 static void bcm_clean_range(unsigned long start, unsigned long end) 1263 { 1264 unsigned long new_start, new_end; 1265 1266 BUG_ON(start < BCM_SYS_EMI_START_ADDR); 1267 1268 if (unlikely(end <= start)) 1269 return; 1270 1271 new_start = bcm_l2_phys_addr(start); 1272 new_end = bcm_l2_phys_addr(end); 1273 1274 /* normal case, no cross section between start and end */ 1275 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) { 1276 l2c210_clean_range(new_start, new_end); 1277 return; 1278 } 1279 1280 /* They cross sections, so it can only be a cross from section 1281 * 2 to section 3 1282 */ 1283 l2c210_clean_range(new_start, 1284 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1)); 1285 l2c210_clean_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR), 1286 new_end); 1287 } 1288 1289 static void bcm_flush_range(unsigned long start, unsigned long end) 1290 { 1291 unsigned long new_start, new_end; 1292 1293 BUG_ON(start < BCM_SYS_EMI_START_ADDR); 1294 1295 if (unlikely(end <= start)) 1296 return; 1297 1298 if ((end - start) >= l2x0_size) { 1299 outer_cache.flush_all(); 1300 return; 1301 } 1302 1303 new_start = bcm_l2_phys_addr(start); 1304 new_end = bcm_l2_phys_addr(end); 1305 1306 /* normal case, no cross section between start and end */ 1307 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) { 1308 l2c210_flush_range(new_start, new_end); 1309 return; 1310 } 1311 1312 /* They cross sections, so it can only be a cross from section 1313 * 2 to section 3 1314 */ 1315 l2c210_flush_range(new_start, 1316 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1)); 1317 l2c210_flush_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR), 1318 new_end); 1319 } 1320 1321 /* Broadcom L2C-310 start from ARMs R3P2 or later, and require no fixups */ 1322 static const struct l2c_init_data of_bcm_l2x0_data __initconst = { 1323 .type = "BCM-L2C-310", 1324 .way_size_0 = SZ_8K, 1325 .num_lock = 8, 1326 .of_parse = l2c310_of_parse, 1327 .enable = l2c310_enable, 1328 .save = l2c310_save, 1329 .outer_cache = { 1330 .inv_range = bcm_inv_range, 1331 .clean_range = bcm_clean_range, 1332 .flush_range = bcm_flush_range, 1333 .flush_all = l2c210_flush_all, 1334 .disable = l2c_disable, 1335 .sync = l2c210_sync, 1336 .resume = l2c310_resume, 1337 }, 1338 }; 1339 1340 static void __init tauros3_save(void __iomem *base) 1341 { 1342 l2c_save(base); 1343 1344 l2x0_saved_regs.aux2_ctrl = 1345 readl_relaxed(base + TAUROS3_AUX2_CTRL); 1346 l2x0_saved_regs.prefetch_ctrl = 1347 readl_relaxed(base + L310_PREFETCH_CTRL); 1348 } 1349 1350 static void tauros3_resume(void) 1351 { 1352 void __iomem *base = l2x0_base; 1353 1354 if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN)) { 1355 writel_relaxed(l2x0_saved_regs.aux2_ctrl, 1356 base + TAUROS3_AUX2_CTRL); 1357 writel_relaxed(l2x0_saved_regs.prefetch_ctrl, 1358 base + L310_PREFETCH_CTRL); 1359 1360 l2c_enable(base, l2x0_saved_regs.aux_ctrl, 8); 1361 } 1362 } 1363 1364 static const struct l2c_init_data of_tauros3_data __initconst = { 1365 .type = "Tauros3", 1366 .way_size_0 = SZ_8K, 1367 .num_lock = 8, 1368 .enable = l2c_enable, 1369 .save = tauros3_save, 1370 /* Tauros3 broadcasts L1 cache operations to L2 */ 1371 .outer_cache = { 1372 .resume = tauros3_resume, 1373 }, 1374 }; 1375 1376 #define L2C_ID(name, fns) { .compatible = name, .data = (void *)&fns } 1377 static const struct of_device_id l2x0_ids[] __initconst = { 1378 L2C_ID("arm,l210-cache", of_l2c210_data), 1379 L2C_ID("arm,l220-cache", of_l2c220_data), 1380 L2C_ID("arm,pl310-cache", of_l2c310_data), 1381 L2C_ID("brcm,bcm11351-a2-pl310-cache", of_bcm_l2x0_data), 1382 L2C_ID("marvell,aurora-outer-cache", of_aurora_with_outer_data), 1383 L2C_ID("marvell,aurora-system-cache", of_aurora_no_outer_data), 1384 L2C_ID("marvell,tauros3-cache", of_tauros3_data), 1385 /* Deprecated IDs */ 1386 L2C_ID("bcm,bcm11351-a2-pl310-cache", of_bcm_l2x0_data), 1387 {} 1388 }; 1389 1390 int __init l2x0_of_init(u32 aux_val, u32 aux_mask) 1391 { 1392 const struct l2c_init_data *data; 1393 struct device_node *np; 1394 struct resource res; 1395 u32 cache_id; 1396 1397 np = of_find_matching_node(NULL, l2x0_ids); 1398 if (!np) 1399 return -ENODEV; 1400 1401 if (of_address_to_resource(np, 0, &res)) 1402 return -ENODEV; 1403 1404 l2x0_base = ioremap(res.start, resource_size(&res)); 1405 if (!l2x0_base) 1406 return -ENOMEM; 1407 1408 l2x0_saved_regs.phy_base = res.start; 1409 1410 data = of_match_node(l2x0_ids, np)->data; 1411 1412 /* All L2 caches are unified, so this property should be specified */ 1413 if (!of_property_read_bool(np, "cache-unified")) 1414 pr_err("L2C: device tree omits to specify unified cache\n"); 1415 1416 /* L2 configuration can only be changed if the cache is disabled */ 1417 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) 1418 if (data->of_parse) 1419 data->of_parse(np, &aux_val, &aux_mask); 1420 1421 if (cache_id_part_number_from_dt) 1422 cache_id = cache_id_part_number_from_dt; 1423 else 1424 cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID); 1425 1426 __l2c_init(data, aux_val, aux_mask, cache_id); 1427 1428 return 0; 1429 } 1430 #endif 1431