1 /* 2 * arch/arm/mm/cache-l2x0.c - L210/L220 cache controller support 3 * 4 * Copyright (C) 2007 ARM Limited 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software 17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18 */ 19 #include <linux/err.h> 20 #include <linux/init.h> 21 #include <linux/spinlock.h> 22 #include <linux/io.h> 23 #include <linux/of.h> 24 #include <linux/of_address.h> 25 26 #include <asm/cacheflush.h> 27 #include <asm/cputype.h> 28 #include <asm/hardware/cache-l2x0.h> 29 #include "cache-tauros3.h" 30 #include "cache-aurora-l2.h" 31 32 struct l2c_init_data { 33 const char *type; 34 unsigned way_size_0; 35 unsigned num_lock; 36 void (*of_parse)(const struct device_node *, u32 *, u32 *); 37 void (*enable)(void __iomem *, u32, unsigned); 38 void (*fixup)(void __iomem *, u32, struct outer_cache_fns *); 39 void (*save)(void __iomem *); 40 struct outer_cache_fns outer_cache; 41 }; 42 43 #define CACHE_LINE_SIZE 32 44 45 static void __iomem *l2x0_base; 46 static DEFINE_RAW_SPINLOCK(l2x0_lock); 47 static u32 l2x0_way_mask; /* Bitmask of active ways */ 48 static u32 l2x0_size; 49 static unsigned long sync_reg_offset = L2X0_CACHE_SYNC; 50 51 struct l2x0_regs l2x0_saved_regs; 52 53 /* 54 * Common code for all cache controllers. 55 */ 56 static inline void l2c_wait_mask(void __iomem *reg, unsigned long mask) 57 { 58 /* wait for cache operation by line or way to complete */ 59 while (readl_relaxed(reg) & mask) 60 cpu_relax(); 61 } 62 63 /* 64 * By default, we write directly to secure registers. Platforms must 65 * override this if they are running non-secure. 66 */ 67 static void l2c_write_sec(unsigned long val, void __iomem *base, unsigned reg) 68 { 69 if (val == readl_relaxed(base + reg)) 70 return; 71 if (outer_cache.write_sec) 72 outer_cache.write_sec(val, reg); 73 else 74 writel_relaxed(val, base + reg); 75 } 76 77 /* 78 * This should only be called when we have a requirement that the 79 * register be written due to a work-around, as platforms running 80 * in non-secure mode may not be able to access this register. 81 */ 82 static inline void l2c_set_debug(void __iomem *base, unsigned long val) 83 { 84 if (outer_cache.set_debug) 85 outer_cache.set_debug(val); 86 else 87 l2c_write_sec(val, base, L2X0_DEBUG_CTRL); 88 } 89 90 static void __l2c_op_way(void __iomem *reg) 91 { 92 writel_relaxed(l2x0_way_mask, reg); 93 l2c_wait_mask(reg, l2x0_way_mask); 94 } 95 96 static inline void l2c_unlock(void __iomem *base, unsigned num) 97 { 98 unsigned i; 99 100 for (i = 0; i < num; i++) { 101 writel_relaxed(0, base + L2X0_LOCKDOWN_WAY_D_BASE + 102 i * L2X0_LOCKDOWN_STRIDE); 103 writel_relaxed(0, base + L2X0_LOCKDOWN_WAY_I_BASE + 104 i * L2X0_LOCKDOWN_STRIDE); 105 } 106 } 107 108 /* 109 * Enable the L2 cache controller. This function must only be 110 * called when the cache controller is known to be disabled. 111 */ 112 static void l2c_enable(void __iomem *base, u32 aux, unsigned num_lock) 113 { 114 unsigned long flags; 115 116 l2c_write_sec(aux, base, L2X0_AUX_CTRL); 117 118 l2c_unlock(base, num_lock); 119 120 local_irq_save(flags); 121 __l2c_op_way(base + L2X0_INV_WAY); 122 writel_relaxed(0, base + sync_reg_offset); 123 l2c_wait_mask(base + sync_reg_offset, 1); 124 local_irq_restore(flags); 125 126 l2c_write_sec(L2X0_CTRL_EN, base, L2X0_CTRL); 127 } 128 129 static void l2c_disable(void) 130 { 131 void __iomem *base = l2x0_base; 132 133 outer_cache.flush_all(); 134 l2c_write_sec(0, base, L2X0_CTRL); 135 dsb(st); 136 } 137 138 #ifdef CONFIG_CACHE_PL310 139 static inline void cache_wait(void __iomem *reg, unsigned long mask) 140 { 141 /* cache operations by line are atomic on PL310 */ 142 } 143 #else 144 #define cache_wait l2c_wait_mask 145 #endif 146 147 static inline void cache_sync(void) 148 { 149 void __iomem *base = l2x0_base; 150 151 writel_relaxed(0, base + sync_reg_offset); 152 cache_wait(base + L2X0_CACHE_SYNC, 1); 153 } 154 155 #if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915) 156 static inline void debug_writel(unsigned long val) 157 { 158 if (outer_cache.set_debug || outer_cache.write_sec) 159 l2c_set_debug(l2x0_base, val); 160 } 161 #else 162 /* Optimised out for non-errata case */ 163 static inline void debug_writel(unsigned long val) 164 { 165 } 166 #endif 167 168 static void l2x0_cache_sync(void) 169 { 170 unsigned long flags; 171 172 raw_spin_lock_irqsave(&l2x0_lock, flags); 173 cache_sync(); 174 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 175 } 176 177 static void __l2x0_flush_all(void) 178 { 179 debug_writel(0x03); 180 __l2c_op_way(l2x0_base + L2X0_CLEAN_INV_WAY); 181 cache_sync(); 182 debug_writel(0x00); 183 } 184 185 static void l2x0_flush_all(void) 186 { 187 unsigned long flags; 188 189 /* clean all ways */ 190 raw_spin_lock_irqsave(&l2x0_lock, flags); 191 __l2x0_flush_all(); 192 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 193 } 194 195 static void l2x0_disable(void) 196 { 197 unsigned long flags; 198 199 raw_spin_lock_irqsave(&l2x0_lock, flags); 200 __l2x0_flush_all(); 201 l2c_write_sec(0, l2x0_base, L2X0_CTRL); 202 dsb(st); 203 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 204 } 205 206 static void l2c_save(void __iomem *base) 207 { 208 l2x0_saved_regs.aux_ctrl = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); 209 } 210 211 /* 212 * L2C-210 specific code. 213 * 214 * The L2C-2x0 PA, set/way and sync operations are atomic, but we must 215 * ensure that no background operation is running. The way operations 216 * are all background tasks. 217 * 218 * While a background operation is in progress, any new operation is 219 * ignored (unspecified whether this causes an error.) Thankfully, not 220 * used on SMP. 221 * 222 * Never has a different sync register other than L2X0_CACHE_SYNC, but 223 * we use sync_reg_offset here so we can share some of this with L2C-310. 224 */ 225 static void __l2c210_cache_sync(void __iomem *base) 226 { 227 writel_relaxed(0, base + sync_reg_offset); 228 } 229 230 static void __l2c210_op_pa_range(void __iomem *reg, unsigned long start, 231 unsigned long end) 232 { 233 while (start < end) { 234 writel_relaxed(start, reg); 235 start += CACHE_LINE_SIZE; 236 } 237 } 238 239 static void l2c210_inv_range(unsigned long start, unsigned long end) 240 { 241 void __iomem *base = l2x0_base; 242 243 if (start & (CACHE_LINE_SIZE - 1)) { 244 start &= ~(CACHE_LINE_SIZE - 1); 245 writel_relaxed(start, base + L2X0_CLEAN_INV_LINE_PA); 246 start += CACHE_LINE_SIZE; 247 } 248 249 if (end & (CACHE_LINE_SIZE - 1)) { 250 end &= ~(CACHE_LINE_SIZE - 1); 251 writel_relaxed(end, base + L2X0_CLEAN_INV_LINE_PA); 252 } 253 254 __l2c210_op_pa_range(base + L2X0_INV_LINE_PA, start, end); 255 __l2c210_cache_sync(base); 256 } 257 258 static void l2c210_clean_range(unsigned long start, unsigned long end) 259 { 260 void __iomem *base = l2x0_base; 261 262 start &= ~(CACHE_LINE_SIZE - 1); 263 __l2c210_op_pa_range(base + L2X0_CLEAN_LINE_PA, start, end); 264 __l2c210_cache_sync(base); 265 } 266 267 static void l2c210_flush_range(unsigned long start, unsigned long end) 268 { 269 void __iomem *base = l2x0_base; 270 271 start &= ~(CACHE_LINE_SIZE - 1); 272 __l2c210_op_pa_range(base + L2X0_CLEAN_INV_LINE_PA, start, end); 273 __l2c210_cache_sync(base); 274 } 275 276 static void l2c210_flush_all(void) 277 { 278 void __iomem *base = l2x0_base; 279 280 BUG_ON(!irqs_disabled()); 281 282 __l2c_op_way(base + L2X0_CLEAN_INV_WAY); 283 __l2c210_cache_sync(base); 284 } 285 286 static void l2c210_sync(void) 287 { 288 __l2c210_cache_sync(l2x0_base); 289 } 290 291 static void l2c210_resume(void) 292 { 293 void __iomem *base = l2x0_base; 294 295 if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN)) 296 l2c_enable(base, l2x0_saved_regs.aux_ctrl, 1); 297 } 298 299 static const struct l2c_init_data l2c210_data __initconst = { 300 .type = "L2C-210", 301 .way_size_0 = SZ_8K, 302 .num_lock = 1, 303 .enable = l2c_enable, 304 .save = l2c_save, 305 .outer_cache = { 306 .inv_range = l2c210_inv_range, 307 .clean_range = l2c210_clean_range, 308 .flush_range = l2c210_flush_range, 309 .flush_all = l2c210_flush_all, 310 .disable = l2c_disable, 311 .sync = l2c210_sync, 312 .resume = l2c210_resume, 313 }, 314 }; 315 316 /* 317 * L2C-220 specific code. 318 * 319 * All operations are background operations: they have to be waited for. 320 * Conflicting requests generate a slave error (which will cause an 321 * imprecise abort.) Never uses sync_reg_offset, so we hard-code the 322 * sync register here. 323 * 324 * However, we can re-use the l2c210_resume call. 325 */ 326 static inline void __l2c220_cache_sync(void __iomem *base) 327 { 328 writel_relaxed(0, base + L2X0_CACHE_SYNC); 329 l2c_wait_mask(base + L2X0_CACHE_SYNC, 1); 330 } 331 332 static void l2c220_op_way(void __iomem *base, unsigned reg) 333 { 334 unsigned long flags; 335 336 raw_spin_lock_irqsave(&l2x0_lock, flags); 337 __l2c_op_way(base + reg); 338 __l2c220_cache_sync(base); 339 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 340 } 341 342 static unsigned long l2c220_op_pa_range(void __iomem *reg, unsigned long start, 343 unsigned long end, unsigned long flags) 344 { 345 raw_spinlock_t *lock = &l2x0_lock; 346 347 while (start < end) { 348 unsigned long blk_end = start + min(end - start, 4096UL); 349 350 while (start < blk_end) { 351 l2c_wait_mask(reg, 1); 352 writel_relaxed(start, reg); 353 start += CACHE_LINE_SIZE; 354 } 355 356 if (blk_end < end) { 357 raw_spin_unlock_irqrestore(lock, flags); 358 raw_spin_lock_irqsave(lock, flags); 359 } 360 } 361 362 return flags; 363 } 364 365 static void l2c220_inv_range(unsigned long start, unsigned long end) 366 { 367 void __iomem *base = l2x0_base; 368 unsigned long flags; 369 370 raw_spin_lock_irqsave(&l2x0_lock, flags); 371 if ((start | end) & (CACHE_LINE_SIZE - 1)) { 372 if (start & (CACHE_LINE_SIZE - 1)) { 373 start &= ~(CACHE_LINE_SIZE - 1); 374 writel_relaxed(start, base + L2X0_CLEAN_INV_LINE_PA); 375 start += CACHE_LINE_SIZE; 376 } 377 378 if (end & (CACHE_LINE_SIZE - 1)) { 379 end &= ~(CACHE_LINE_SIZE - 1); 380 l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1); 381 writel_relaxed(end, base + L2X0_CLEAN_INV_LINE_PA); 382 } 383 } 384 385 flags = l2c220_op_pa_range(base + L2X0_INV_LINE_PA, 386 start, end, flags); 387 l2c_wait_mask(base + L2X0_INV_LINE_PA, 1); 388 __l2c220_cache_sync(base); 389 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 390 } 391 392 static void l2c220_clean_range(unsigned long start, unsigned long end) 393 { 394 void __iomem *base = l2x0_base; 395 unsigned long flags; 396 397 start &= ~(CACHE_LINE_SIZE - 1); 398 if ((end - start) >= l2x0_size) { 399 l2c220_op_way(base, L2X0_CLEAN_WAY); 400 return; 401 } 402 403 raw_spin_lock_irqsave(&l2x0_lock, flags); 404 flags = l2c220_op_pa_range(base + L2X0_CLEAN_LINE_PA, 405 start, end, flags); 406 l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1); 407 __l2c220_cache_sync(base); 408 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 409 } 410 411 static void l2c220_flush_range(unsigned long start, unsigned long end) 412 { 413 void __iomem *base = l2x0_base; 414 unsigned long flags; 415 416 start &= ~(CACHE_LINE_SIZE - 1); 417 if ((end - start) >= l2x0_size) { 418 l2c220_op_way(base, L2X0_CLEAN_INV_WAY); 419 return; 420 } 421 422 raw_spin_lock_irqsave(&l2x0_lock, flags); 423 flags = l2c220_op_pa_range(base + L2X0_CLEAN_INV_LINE_PA, 424 start, end, flags); 425 l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1); 426 __l2c220_cache_sync(base); 427 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 428 } 429 430 static void l2c220_flush_all(void) 431 { 432 l2c220_op_way(l2x0_base, L2X0_CLEAN_INV_WAY); 433 } 434 435 static void l2c220_sync(void) 436 { 437 unsigned long flags; 438 439 raw_spin_lock_irqsave(&l2x0_lock, flags); 440 __l2c220_cache_sync(l2x0_base); 441 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 442 } 443 444 static const struct l2c_init_data l2c220_data = { 445 .type = "L2C-220", 446 .way_size_0 = SZ_8K, 447 .num_lock = 1, 448 .enable = l2c_enable, 449 .save = l2c_save, 450 .outer_cache = { 451 .inv_range = l2c220_inv_range, 452 .clean_range = l2c220_clean_range, 453 .flush_range = l2c220_flush_range, 454 .flush_all = l2c220_flush_all, 455 .disable = l2c_disable, 456 .sync = l2c220_sync, 457 .resume = l2c210_resume, 458 }, 459 }; 460 461 /* 462 * L2C-310 specific code. 463 * 464 * Very similar to L2C-210, the PA, set/way and sync operations are atomic, 465 * and the way operations are all background tasks. However, issuing an 466 * operation while a background operation is in progress results in a 467 * SLVERR response. We can reuse: 468 * 469 * __l2c210_cache_sync (using sync_reg_offset) 470 * l2c210_sync 471 * l2c210_inv_range (if 588369 is not applicable) 472 * l2c210_clean_range 473 * l2c210_flush_range (if 588369 is not applicable) 474 * l2c210_flush_all (if 727915 is not applicable) 475 * 476 * Errata: 477 * 588369: PL310 R0P0->R1P0, fixed R2P0. 478 * Affects: all clean+invalidate operations 479 * clean and invalidate skips the invalidate step, so we need to issue 480 * separate operations. We also require the above debug workaround 481 * enclosing this code fragment on affected parts. On unaffected parts, 482 * we must not use this workaround without the debug register writes 483 * to avoid exposing a problem similar to 727915. 484 * 485 * 727915: PL310 R2P0->R3P0, fixed R3P1. 486 * Affects: clean+invalidate by way 487 * clean and invalidate by way runs in the background, and a store can 488 * hit the line between the clean operation and invalidate operation, 489 * resulting in the store being lost. 490 * 491 * 752271: PL310 R3P0->R3P1-50REL0, fixed R3P2. 492 * Affects: 8x64-bit (double fill) line fetches 493 * double fill line fetches can fail to cause dirty data to be evicted 494 * from the cache before the new data overwrites the second line. 495 * 496 * 753970: PL310 R3P0, fixed R3P1. 497 * Affects: sync 498 * prevents merging writes after the sync operation, until another L2C 499 * operation is performed (or a number of other conditions.) 500 * 501 * 769419: PL310 R0P0->R3P1, fixed R3P2. 502 * Affects: store buffer 503 * store buffer is not automatically drained. 504 */ 505 static void l2c310_set_debug(unsigned long val) 506 { 507 writel_relaxed(val, l2x0_base + L2X0_DEBUG_CTRL); 508 } 509 510 static void l2c310_inv_range_erratum(unsigned long start, unsigned long end) 511 { 512 void __iomem *base = l2x0_base; 513 514 if ((start | end) & (CACHE_LINE_SIZE - 1)) { 515 unsigned long flags; 516 517 /* Erratum 588369 for both clean+invalidate operations */ 518 raw_spin_lock_irqsave(&l2x0_lock, flags); 519 l2c_set_debug(base, 0x03); 520 521 if (start & (CACHE_LINE_SIZE - 1)) { 522 start &= ~(CACHE_LINE_SIZE - 1); 523 writel_relaxed(start, base + L2X0_CLEAN_LINE_PA); 524 writel_relaxed(start, base + L2X0_INV_LINE_PA); 525 start += CACHE_LINE_SIZE; 526 } 527 528 if (end & (CACHE_LINE_SIZE - 1)) { 529 end &= ~(CACHE_LINE_SIZE - 1); 530 writel_relaxed(end, base + L2X0_CLEAN_LINE_PA); 531 writel_relaxed(end, base + L2X0_INV_LINE_PA); 532 } 533 534 l2c_set_debug(base, 0x00); 535 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 536 } 537 538 __l2c210_op_pa_range(base + L2X0_INV_LINE_PA, start, end); 539 __l2c210_cache_sync(base); 540 } 541 542 static void l2c310_flush_range_erratum(unsigned long start, unsigned long end) 543 { 544 raw_spinlock_t *lock = &l2x0_lock; 545 unsigned long flags; 546 void __iomem *base = l2x0_base; 547 548 raw_spin_lock_irqsave(lock, flags); 549 while (start < end) { 550 unsigned long blk_end = start + min(end - start, 4096UL); 551 552 l2c_set_debug(base, 0x03); 553 while (start < blk_end) { 554 writel_relaxed(start, base + L2X0_CLEAN_LINE_PA); 555 writel_relaxed(start, base + L2X0_INV_LINE_PA); 556 start += CACHE_LINE_SIZE; 557 } 558 l2c_set_debug(base, 0x00); 559 560 if (blk_end < end) { 561 raw_spin_unlock_irqrestore(lock, flags); 562 raw_spin_lock_irqsave(lock, flags); 563 } 564 } 565 raw_spin_unlock_irqrestore(lock, flags); 566 __l2c210_cache_sync(base); 567 } 568 569 static void l2c310_flush_all_erratum(void) 570 { 571 void __iomem *base = l2x0_base; 572 unsigned long flags; 573 574 raw_spin_lock_irqsave(&l2x0_lock, flags); 575 l2c_set_debug(base, 0x03); 576 __l2c_op_way(base + L2X0_CLEAN_INV_WAY); 577 l2c_set_debug(base, 0x00); 578 __l2c210_cache_sync(base); 579 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 580 } 581 582 static void __init l2c310_save(void __iomem *base) 583 { 584 unsigned revision; 585 586 l2c_save(base); 587 588 l2x0_saved_regs.tag_latency = readl_relaxed(base + 589 L310_TAG_LATENCY_CTRL); 590 l2x0_saved_regs.data_latency = readl_relaxed(base + 591 L310_DATA_LATENCY_CTRL); 592 l2x0_saved_regs.filter_end = readl_relaxed(base + 593 L310_ADDR_FILTER_END); 594 l2x0_saved_regs.filter_start = readl_relaxed(base + 595 L310_ADDR_FILTER_START); 596 597 revision = readl_relaxed(base + L2X0_CACHE_ID) & 598 L2X0_CACHE_ID_RTL_MASK; 599 600 /* From r2p0, there is Prefetch offset/control register */ 601 if (revision >= L310_CACHE_ID_RTL_R2P0) 602 l2x0_saved_regs.prefetch_ctrl = readl_relaxed(base + 603 L310_PREFETCH_CTRL); 604 605 /* From r3p0, there is Power control register */ 606 if (revision >= L310_CACHE_ID_RTL_R3P0) 607 l2x0_saved_regs.pwr_ctrl = readl_relaxed(base + 608 L310_POWER_CTRL); 609 } 610 611 static void l2c310_resume(void) 612 { 613 void __iomem *base = l2x0_base; 614 615 if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN)) { 616 unsigned revision; 617 618 /* restore pl310 setup */ 619 writel_relaxed(l2x0_saved_regs.tag_latency, 620 base + L310_TAG_LATENCY_CTRL); 621 writel_relaxed(l2x0_saved_regs.data_latency, 622 base + L310_DATA_LATENCY_CTRL); 623 writel_relaxed(l2x0_saved_regs.filter_end, 624 base + L310_ADDR_FILTER_END); 625 writel_relaxed(l2x0_saved_regs.filter_start, 626 base + L310_ADDR_FILTER_START); 627 628 revision = readl_relaxed(base + L2X0_CACHE_ID) & 629 L2X0_CACHE_ID_RTL_MASK; 630 631 if (revision >= L310_CACHE_ID_RTL_R2P0) 632 l2c_write_sec(l2x0_saved_regs.prefetch_ctrl, base, 633 L310_PREFETCH_CTRL); 634 if (revision >= L310_CACHE_ID_RTL_R3P0) 635 l2c_write_sec(l2x0_saved_regs.pwr_ctrl, base, 636 L310_POWER_CTRL); 637 638 l2c_enable(base, l2x0_saved_regs.aux_ctrl, 8); 639 } 640 } 641 642 static void __init l2c310_enable(void __iomem *base, u32 aux, unsigned num_lock) 643 { 644 unsigned rev = readl_relaxed(base + L2X0_CACHE_ID) & L2X0_CACHE_ID_PART_MASK; 645 bool cortex_a9 = read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A9; 646 647 if (rev >= L310_CACHE_ID_RTL_R2P0) { 648 if (cortex_a9) { 649 aux |= L310_AUX_CTRL_EARLY_BRESP; 650 pr_info("L2C-310 enabling early BRESP for Cortex-A9\n"); 651 } else if (aux & L310_AUX_CTRL_EARLY_BRESP) { 652 pr_warn("L2C-310 early BRESP only supported with Cortex-A9\n"); 653 aux &= ~L310_AUX_CTRL_EARLY_BRESP; 654 } 655 } 656 657 l2c_enable(base, aux, num_lock); 658 } 659 660 static void __init l2c310_fixup(void __iomem *base, u32 cache_id, 661 struct outer_cache_fns *fns) 662 { 663 unsigned revision = cache_id & L2X0_CACHE_ID_RTL_MASK; 664 const char *errata[8]; 665 unsigned n = 0; 666 667 /* For compatibility */ 668 if (revision <= L310_CACHE_ID_RTL_R3P0) 669 fns->set_debug = l2c310_set_debug; 670 671 if (IS_ENABLED(CONFIG_PL310_ERRATA_588369) && 672 revision < L310_CACHE_ID_RTL_R2P0 && 673 /* For bcm compatibility */ 674 fns->inv_range == l2c210_inv_range) { 675 fns->inv_range = l2c310_inv_range_erratum; 676 fns->flush_range = l2c310_flush_range_erratum; 677 errata[n++] = "588369"; 678 } 679 680 if (IS_ENABLED(CONFIG_PL310_ERRATA_727915) && 681 revision >= L310_CACHE_ID_RTL_R2P0 && 682 revision < L310_CACHE_ID_RTL_R3P1) { 683 fns->flush_all = l2c310_flush_all_erratum; 684 errata[n++] = "727915"; 685 } 686 687 if (revision >= L310_CACHE_ID_RTL_R3P0 && 688 revision < L310_CACHE_ID_RTL_R3P2) { 689 u32 val = readl_relaxed(base + L310_PREFETCH_CTRL); 690 /* I don't think bit23 is required here... but iMX6 does so */ 691 if (val & (BIT(30) | BIT(23))) { 692 val &= ~(BIT(30) | BIT(23)); 693 l2c_write_sec(val, base, L310_PREFETCH_CTRL); 694 errata[n++] = "752271"; 695 } 696 } 697 698 if (IS_ENABLED(CONFIG_PL310_ERRATA_753970) && 699 revision == L310_CACHE_ID_RTL_R3P0) { 700 sync_reg_offset = L2X0_DUMMY_REG; 701 errata[n++] = "753970"; 702 } 703 704 if (IS_ENABLED(CONFIG_PL310_ERRATA_769419)) 705 errata[n++] = "769419"; 706 707 if (n) { 708 unsigned i; 709 710 pr_info("L2C-310 errat%s", n > 1 ? "a" : "um"); 711 for (i = 0; i < n; i++) 712 pr_cont(" %s", errata[i]); 713 pr_cont(" enabled\n"); 714 } 715 } 716 717 static const struct l2c_init_data l2c310_init_fns __initconst = { 718 .type = "L2C-310", 719 .way_size_0 = SZ_8K, 720 .num_lock = 8, 721 .enable = l2c310_enable, 722 .fixup = l2c310_fixup, 723 .save = l2c310_save, 724 .outer_cache = { 725 .inv_range = l2c210_inv_range, 726 .clean_range = l2c210_clean_range, 727 .flush_range = l2c210_flush_range, 728 .flush_all = l2c210_flush_all, 729 .disable = l2c_disable, 730 .sync = l2c210_sync, 731 .set_debug = l2c310_set_debug, 732 .resume = l2c310_resume, 733 }, 734 }; 735 736 static void __init __l2c_init(const struct l2c_init_data *data, 737 u32 aux_val, u32 aux_mask, u32 cache_id) 738 { 739 struct outer_cache_fns fns; 740 unsigned way_size_bits, ways; 741 u32 aux; 742 743 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); 744 745 aux &= aux_mask; 746 aux |= aux_val; 747 748 /* Determine the number of ways */ 749 switch (cache_id & L2X0_CACHE_ID_PART_MASK) { 750 case L2X0_CACHE_ID_PART_L310: 751 if (aux & (1 << 16)) 752 ways = 16; 753 else 754 ways = 8; 755 break; 756 757 case L2X0_CACHE_ID_PART_L210: 758 case L2X0_CACHE_ID_PART_L220: 759 ways = (aux >> 13) & 0xf; 760 break; 761 762 case AURORA_CACHE_ID: 763 ways = (aux >> 13) & 0xf; 764 ways = 2 << ((ways + 1) >> 2); 765 break; 766 767 default: 768 /* Assume unknown chips have 8 ways */ 769 ways = 8; 770 break; 771 } 772 773 l2x0_way_mask = (1 << ways) - 1; 774 775 /* 776 * way_size_0 is the size that a way_size value of zero would be 777 * given the calculation: way_size = way_size_0 << way_size_bits. 778 * So, if way_size_bits=0 is reserved, but way_size_bits=1 is 16k, 779 * then way_size_0 would be 8k. 780 * 781 * L2 cache size = number of ways * way size. 782 */ 783 way_size_bits = (aux & L2C_AUX_CTRL_WAY_SIZE_MASK) >> 784 L2C_AUX_CTRL_WAY_SIZE_SHIFT; 785 l2x0_size = ways * (data->way_size_0 << way_size_bits); 786 787 fns = data->outer_cache; 788 fns.write_sec = outer_cache.write_sec; 789 if (data->fixup) 790 data->fixup(l2x0_base, cache_id, &fns); 791 if (fns.write_sec) 792 fns.set_debug = NULL; 793 794 /* 795 * Check if l2x0 controller is already enabled. If we are booting 796 * in non-secure mode accessing the below registers will fault. 797 */ 798 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) 799 data->enable(l2x0_base, aux, data->num_lock); 800 801 outer_cache = fns; 802 803 /* 804 * It is strange to save the register state before initialisation, 805 * but hey, this is what the DT implementations decided to do. 806 */ 807 if (data->save) 808 data->save(l2x0_base); 809 810 /* Re-read it in case some bits are reserved. */ 811 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); 812 813 pr_info("%s cache controller enabled, %d ways, %d kB\n", 814 data->type, ways, l2x0_size >> 10); 815 pr_info("%s: CACHE_ID 0x%08x, AUX_CTRL 0x%08x\n", 816 data->type, cache_id, aux); 817 } 818 819 void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask) 820 { 821 const struct l2c_init_data *data; 822 u32 cache_id; 823 824 l2x0_base = base; 825 826 cache_id = readl_relaxed(base + L2X0_CACHE_ID); 827 828 switch (cache_id & L2X0_CACHE_ID_PART_MASK) { 829 default: 830 case L2X0_CACHE_ID_PART_L210: 831 data = &l2c210_data; 832 break; 833 834 case L2X0_CACHE_ID_PART_L220: 835 data = &l2c220_data; 836 break; 837 838 case L2X0_CACHE_ID_PART_L310: 839 data = &l2c310_init_fns; 840 break; 841 } 842 843 __l2c_init(data, aux_val, aux_mask, cache_id); 844 } 845 846 #ifdef CONFIG_OF 847 static int l2_wt_override; 848 849 /* Aurora don't have the cache ID register available, so we have to 850 * pass it though the device tree */ 851 static u32 cache_id_part_number_from_dt; 852 853 static void __init l2x0_of_parse(const struct device_node *np, 854 u32 *aux_val, u32 *aux_mask) 855 { 856 u32 data[2] = { 0, 0 }; 857 u32 tag = 0; 858 u32 dirty = 0; 859 u32 val = 0, mask = 0; 860 861 of_property_read_u32(np, "arm,tag-latency", &tag); 862 if (tag) { 863 mask |= L2X0_AUX_CTRL_TAG_LATENCY_MASK; 864 val |= (tag - 1) << L2X0_AUX_CTRL_TAG_LATENCY_SHIFT; 865 } 866 867 of_property_read_u32_array(np, "arm,data-latency", 868 data, ARRAY_SIZE(data)); 869 if (data[0] && data[1]) { 870 mask |= L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK | 871 L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK; 872 val |= ((data[0] - 1) << L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT) | 873 ((data[1] - 1) << L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT); 874 } 875 876 of_property_read_u32(np, "arm,dirty-latency", &dirty); 877 if (dirty) { 878 mask |= L2X0_AUX_CTRL_DIRTY_LATENCY_MASK; 879 val |= (dirty - 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT; 880 } 881 882 *aux_val &= ~mask; 883 *aux_val |= val; 884 *aux_mask &= ~mask; 885 } 886 887 static const struct l2c_init_data of_l2c210_data __initconst = { 888 .type = "L2C-210", 889 .way_size_0 = SZ_8K, 890 .num_lock = 1, 891 .of_parse = l2x0_of_parse, 892 .enable = l2c_enable, 893 .save = l2c_save, 894 .outer_cache = { 895 .inv_range = l2c210_inv_range, 896 .clean_range = l2c210_clean_range, 897 .flush_range = l2c210_flush_range, 898 .flush_all = l2c210_flush_all, 899 .disable = l2c_disable, 900 .sync = l2c210_sync, 901 .resume = l2c210_resume, 902 }, 903 }; 904 905 static const struct l2c_init_data of_l2c220_data __initconst = { 906 .type = "L2C-220", 907 .way_size_0 = SZ_8K, 908 .num_lock = 1, 909 .of_parse = l2x0_of_parse, 910 .enable = l2c_enable, 911 .save = l2c_save, 912 .outer_cache = { 913 .inv_range = l2c220_inv_range, 914 .clean_range = l2c220_clean_range, 915 .flush_range = l2c220_flush_range, 916 .flush_all = l2c220_flush_all, 917 .disable = l2c_disable, 918 .sync = l2c220_sync, 919 .resume = l2c210_resume, 920 }, 921 }; 922 923 static void __init l2c310_of_parse(const struct device_node *np, 924 u32 *aux_val, u32 *aux_mask) 925 { 926 u32 data[3] = { 0, 0, 0 }; 927 u32 tag[3] = { 0, 0, 0 }; 928 u32 filter[2] = { 0, 0 }; 929 930 of_property_read_u32_array(np, "arm,tag-latency", tag, ARRAY_SIZE(tag)); 931 if (tag[0] && tag[1] && tag[2]) 932 writel_relaxed( 933 L310_LATENCY_CTRL_RD(tag[0] - 1) | 934 L310_LATENCY_CTRL_WR(tag[1] - 1) | 935 L310_LATENCY_CTRL_SETUP(tag[2] - 1), 936 l2x0_base + L310_TAG_LATENCY_CTRL); 937 938 of_property_read_u32_array(np, "arm,data-latency", 939 data, ARRAY_SIZE(data)); 940 if (data[0] && data[1] && data[2]) 941 writel_relaxed( 942 L310_LATENCY_CTRL_RD(data[0] - 1) | 943 L310_LATENCY_CTRL_WR(data[1] - 1) | 944 L310_LATENCY_CTRL_SETUP(data[2] - 1), 945 l2x0_base + L310_DATA_LATENCY_CTRL); 946 947 of_property_read_u32_array(np, "arm,filter-ranges", 948 filter, ARRAY_SIZE(filter)); 949 if (filter[1]) { 950 writel_relaxed(ALIGN(filter[0] + filter[1], SZ_1M), 951 l2x0_base + L310_ADDR_FILTER_END); 952 writel_relaxed((filter[0] & ~(SZ_1M - 1)) | L310_ADDR_FILTER_EN, 953 l2x0_base + L310_ADDR_FILTER_START); 954 } 955 } 956 957 static const struct l2c_init_data of_l2c310_data __initconst = { 958 .type = "L2C-310", 959 .way_size_0 = SZ_8K, 960 .num_lock = 8, 961 .of_parse = l2c310_of_parse, 962 .enable = l2c310_enable, 963 .fixup = l2c310_fixup, 964 .save = l2c310_save, 965 .outer_cache = { 966 .inv_range = l2c210_inv_range, 967 .clean_range = l2c210_clean_range, 968 .flush_range = l2c210_flush_range, 969 .flush_all = l2c210_flush_all, 970 .disable = l2c_disable, 971 .sync = l2c210_sync, 972 .set_debug = l2c310_set_debug, 973 .resume = l2c310_resume, 974 }, 975 }; 976 977 /* 978 * Note that the end addresses passed to Linux primitives are 979 * noninclusive, while the hardware cache range operations use 980 * inclusive start and end addresses. 981 */ 982 static unsigned long calc_range_end(unsigned long start, unsigned long end) 983 { 984 /* 985 * Limit the number of cache lines processed at once, 986 * since cache range operations stall the CPU pipeline 987 * until completion. 988 */ 989 if (end > start + MAX_RANGE_SIZE) 990 end = start + MAX_RANGE_SIZE; 991 992 /* 993 * Cache range operations can't straddle a page boundary. 994 */ 995 if (end > PAGE_ALIGN(start+1)) 996 end = PAGE_ALIGN(start+1); 997 998 return end; 999 } 1000 1001 /* 1002 * Make sure 'start' and 'end' reference the same page, as L2 is PIPT 1003 * and range operations only do a TLB lookup on the start address. 1004 */ 1005 static void aurora_pa_range(unsigned long start, unsigned long end, 1006 unsigned long offset) 1007 { 1008 unsigned long flags; 1009 1010 raw_spin_lock_irqsave(&l2x0_lock, flags); 1011 writel_relaxed(start, l2x0_base + AURORA_RANGE_BASE_ADDR_REG); 1012 writel_relaxed(end, l2x0_base + offset); 1013 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 1014 1015 cache_sync(); 1016 } 1017 1018 static void aurora_inv_range(unsigned long start, unsigned long end) 1019 { 1020 /* 1021 * round start and end adresses up to cache line size 1022 */ 1023 start &= ~(CACHE_LINE_SIZE - 1); 1024 end = ALIGN(end, CACHE_LINE_SIZE); 1025 1026 /* 1027 * Invalidate all full cache lines between 'start' and 'end'. 1028 */ 1029 while (start < end) { 1030 unsigned long range_end = calc_range_end(start, end); 1031 aurora_pa_range(start, range_end - CACHE_LINE_SIZE, 1032 AURORA_INVAL_RANGE_REG); 1033 start = range_end; 1034 } 1035 } 1036 1037 static void aurora_clean_range(unsigned long start, unsigned long end) 1038 { 1039 /* 1040 * If L2 is forced to WT, the L2 will always be clean and we 1041 * don't need to do anything here. 1042 */ 1043 if (!l2_wt_override) { 1044 start &= ~(CACHE_LINE_SIZE - 1); 1045 end = ALIGN(end, CACHE_LINE_SIZE); 1046 while (start != end) { 1047 unsigned long range_end = calc_range_end(start, end); 1048 aurora_pa_range(start, range_end - CACHE_LINE_SIZE, 1049 AURORA_CLEAN_RANGE_REG); 1050 start = range_end; 1051 } 1052 } 1053 } 1054 1055 static void aurora_flush_range(unsigned long start, unsigned long end) 1056 { 1057 start &= ~(CACHE_LINE_SIZE - 1); 1058 end = ALIGN(end, CACHE_LINE_SIZE); 1059 while (start != end) { 1060 unsigned long range_end = calc_range_end(start, end); 1061 /* 1062 * If L2 is forced to WT, the L2 will always be clean and we 1063 * just need to invalidate. 1064 */ 1065 if (l2_wt_override) 1066 aurora_pa_range(start, range_end - CACHE_LINE_SIZE, 1067 AURORA_INVAL_RANGE_REG); 1068 else 1069 aurora_pa_range(start, range_end - CACHE_LINE_SIZE, 1070 AURORA_FLUSH_RANGE_REG); 1071 start = range_end; 1072 } 1073 } 1074 1075 static void aurora_save(void __iomem *base) 1076 { 1077 l2x0_saved_regs.ctrl = readl_relaxed(base + L2X0_CTRL); 1078 l2x0_saved_regs.aux_ctrl = readl_relaxed(base + L2X0_AUX_CTRL); 1079 } 1080 1081 static void aurora_resume(void) 1082 { 1083 void __iomem *base = l2x0_base; 1084 1085 if (!(readl(base + L2X0_CTRL) & L2X0_CTRL_EN)) { 1086 writel_relaxed(l2x0_saved_regs.aux_ctrl, base + L2X0_AUX_CTRL); 1087 writel_relaxed(l2x0_saved_regs.ctrl, base + L2X0_CTRL); 1088 } 1089 } 1090 1091 /* 1092 * For Aurora cache in no outer mode, enable via the CP15 coprocessor 1093 * broadcasting of cache commands to L2. 1094 */ 1095 static void __init aurora_enable_no_outer(void __iomem *base, u32 aux, 1096 unsigned num_lock) 1097 { 1098 u32 u; 1099 1100 asm volatile("mrc p15, 1, %0, c15, c2, 0" : "=r" (u)); 1101 u |= AURORA_CTRL_FW; /* Set the FW bit */ 1102 asm volatile("mcr p15, 1, %0, c15, c2, 0" : : "r" (u)); 1103 1104 isb(); 1105 1106 l2c_enable(base, aux, num_lock); 1107 } 1108 1109 static void __init aurora_fixup(void __iomem *base, u32 cache_id, 1110 struct outer_cache_fns *fns) 1111 { 1112 sync_reg_offset = AURORA_SYNC_REG; 1113 } 1114 1115 static void __init aurora_of_parse(const struct device_node *np, 1116 u32 *aux_val, u32 *aux_mask) 1117 { 1118 u32 val = AURORA_ACR_REPLACEMENT_TYPE_SEMIPLRU; 1119 u32 mask = AURORA_ACR_REPLACEMENT_MASK; 1120 1121 of_property_read_u32(np, "cache-id-part", 1122 &cache_id_part_number_from_dt); 1123 1124 /* Determine and save the write policy */ 1125 l2_wt_override = of_property_read_bool(np, "wt-override"); 1126 1127 if (l2_wt_override) { 1128 val |= AURORA_ACR_FORCE_WRITE_THRO_POLICY; 1129 mask |= AURORA_ACR_FORCE_WRITE_POLICY_MASK; 1130 } 1131 1132 *aux_val &= ~mask; 1133 *aux_val |= val; 1134 *aux_mask &= ~mask; 1135 } 1136 1137 static const struct l2c_init_data of_aurora_with_outer_data __initconst = { 1138 .type = "Aurora", 1139 .way_size_0 = SZ_4K, 1140 .num_lock = 4, 1141 .of_parse = aurora_of_parse, 1142 .enable = l2c_enable, 1143 .fixup = aurora_fixup, 1144 .save = aurora_save, 1145 .outer_cache = { 1146 .inv_range = aurora_inv_range, 1147 .clean_range = aurora_clean_range, 1148 .flush_range = aurora_flush_range, 1149 .flush_all = l2x0_flush_all, 1150 .disable = l2x0_disable, 1151 .sync = l2x0_cache_sync, 1152 .resume = aurora_resume, 1153 }, 1154 }; 1155 1156 static const struct l2c_init_data of_aurora_no_outer_data __initconst = { 1157 .type = "Aurora", 1158 .way_size_0 = SZ_4K, 1159 .num_lock = 4, 1160 .of_parse = aurora_of_parse, 1161 .enable = aurora_enable_no_outer, 1162 .fixup = aurora_fixup, 1163 .save = aurora_save, 1164 .outer_cache = { 1165 .resume = aurora_resume, 1166 }, 1167 }; 1168 1169 /* 1170 * For certain Broadcom SoCs, depending on the address range, different offsets 1171 * need to be added to the address before passing it to L2 for 1172 * invalidation/clean/flush 1173 * 1174 * Section Address Range Offset EMI 1175 * 1 0x00000000 - 0x3FFFFFFF 0x80000000 VC 1176 * 2 0x40000000 - 0xBFFFFFFF 0x40000000 SYS 1177 * 3 0xC0000000 - 0xFFFFFFFF 0x80000000 VC 1178 * 1179 * When the start and end addresses have crossed two different sections, we 1180 * need to break the L2 operation into two, each within its own section. 1181 * For example, if we need to invalidate addresses starts at 0xBFFF0000 and 1182 * ends at 0xC0001000, we need do invalidate 1) 0xBFFF0000 - 0xBFFFFFFF and 2) 1183 * 0xC0000000 - 0xC0001000 1184 * 1185 * Note 1: 1186 * By breaking a single L2 operation into two, we may potentially suffer some 1187 * performance hit, but keep in mind the cross section case is very rare 1188 * 1189 * Note 2: 1190 * We do not need to handle the case when the start address is in 1191 * Section 1 and the end address is in Section 3, since it is not a valid use 1192 * case 1193 * 1194 * Note 3: 1195 * Section 1 in practical terms can no longer be used on rev A2. Because of 1196 * that the code does not need to handle section 1 at all. 1197 * 1198 */ 1199 #define BCM_SYS_EMI_START_ADDR 0x40000000UL 1200 #define BCM_VC_EMI_SEC3_START_ADDR 0xC0000000UL 1201 1202 #define BCM_SYS_EMI_OFFSET 0x40000000UL 1203 #define BCM_VC_EMI_OFFSET 0x80000000UL 1204 1205 static inline int bcm_addr_is_sys_emi(unsigned long addr) 1206 { 1207 return (addr >= BCM_SYS_EMI_START_ADDR) && 1208 (addr < BCM_VC_EMI_SEC3_START_ADDR); 1209 } 1210 1211 static inline unsigned long bcm_l2_phys_addr(unsigned long addr) 1212 { 1213 if (bcm_addr_is_sys_emi(addr)) 1214 return addr + BCM_SYS_EMI_OFFSET; 1215 else 1216 return addr + BCM_VC_EMI_OFFSET; 1217 } 1218 1219 static void bcm_inv_range(unsigned long start, unsigned long end) 1220 { 1221 unsigned long new_start, new_end; 1222 1223 BUG_ON(start < BCM_SYS_EMI_START_ADDR); 1224 1225 if (unlikely(end <= start)) 1226 return; 1227 1228 new_start = bcm_l2_phys_addr(start); 1229 new_end = bcm_l2_phys_addr(end); 1230 1231 /* normal case, no cross section between start and end */ 1232 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) { 1233 l2c210_inv_range(new_start, new_end); 1234 return; 1235 } 1236 1237 /* They cross sections, so it can only be a cross from section 1238 * 2 to section 3 1239 */ 1240 l2c210_inv_range(new_start, 1241 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1)); 1242 l2c210_inv_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR), 1243 new_end); 1244 } 1245 1246 static void bcm_clean_range(unsigned long start, unsigned long end) 1247 { 1248 unsigned long new_start, new_end; 1249 1250 BUG_ON(start < BCM_SYS_EMI_START_ADDR); 1251 1252 if (unlikely(end <= start)) 1253 return; 1254 1255 new_start = bcm_l2_phys_addr(start); 1256 new_end = bcm_l2_phys_addr(end); 1257 1258 /* normal case, no cross section between start and end */ 1259 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) { 1260 l2c210_clean_range(new_start, new_end); 1261 return; 1262 } 1263 1264 /* They cross sections, so it can only be a cross from section 1265 * 2 to section 3 1266 */ 1267 l2c210_clean_range(new_start, 1268 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1)); 1269 l2c210_clean_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR), 1270 new_end); 1271 } 1272 1273 static void bcm_flush_range(unsigned long start, unsigned long end) 1274 { 1275 unsigned long new_start, new_end; 1276 1277 BUG_ON(start < BCM_SYS_EMI_START_ADDR); 1278 1279 if (unlikely(end <= start)) 1280 return; 1281 1282 if ((end - start) >= l2x0_size) { 1283 outer_cache.flush_all(); 1284 return; 1285 } 1286 1287 new_start = bcm_l2_phys_addr(start); 1288 new_end = bcm_l2_phys_addr(end); 1289 1290 /* normal case, no cross section between start and end */ 1291 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) { 1292 l2c210_flush_range(new_start, new_end); 1293 return; 1294 } 1295 1296 /* They cross sections, so it can only be a cross from section 1297 * 2 to section 3 1298 */ 1299 l2c210_flush_range(new_start, 1300 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1)); 1301 l2c210_flush_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR), 1302 new_end); 1303 } 1304 1305 /* Broadcom L2C-310 start from ARMs R3P2 or later, and require no fixups */ 1306 static const struct l2c_init_data of_bcm_l2x0_data __initconst = { 1307 .type = "BCM-L2C-310", 1308 .way_size_0 = SZ_8K, 1309 .num_lock = 8, 1310 .of_parse = l2c310_of_parse, 1311 .enable = l2c310_enable, 1312 .save = l2c310_save, 1313 .outer_cache = { 1314 .inv_range = bcm_inv_range, 1315 .clean_range = bcm_clean_range, 1316 .flush_range = bcm_flush_range, 1317 .flush_all = l2c210_flush_all, 1318 .disable = l2c_disable, 1319 .sync = l2c210_sync, 1320 .resume = l2c310_resume, 1321 }, 1322 }; 1323 1324 static void __init tauros3_save(void __iomem *base) 1325 { 1326 l2c_save(base); 1327 1328 l2x0_saved_regs.aux2_ctrl = 1329 readl_relaxed(base + TAUROS3_AUX2_CTRL); 1330 l2x0_saved_regs.prefetch_ctrl = 1331 readl_relaxed(base + L310_PREFETCH_CTRL); 1332 } 1333 1334 static void tauros3_resume(void) 1335 { 1336 void __iomem *base = l2x0_base; 1337 1338 if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN)) { 1339 writel_relaxed(l2x0_saved_regs.aux2_ctrl, 1340 base + TAUROS3_AUX2_CTRL); 1341 writel_relaxed(l2x0_saved_regs.prefetch_ctrl, 1342 base + L310_PREFETCH_CTRL); 1343 1344 l2c_enable(base, l2x0_saved_regs.aux_ctrl, 8); 1345 } 1346 } 1347 1348 static const struct l2c_init_data of_tauros3_data __initconst = { 1349 .type = "Tauros3", 1350 .way_size_0 = SZ_8K, 1351 .num_lock = 8, 1352 .enable = l2c_enable, 1353 .save = tauros3_save, 1354 /* Tauros3 broadcasts L1 cache operations to L2 */ 1355 .outer_cache = { 1356 .resume = tauros3_resume, 1357 }, 1358 }; 1359 1360 #define L2C_ID(name, fns) { .compatible = name, .data = (void *)&fns } 1361 static const struct of_device_id l2x0_ids[] __initconst = { 1362 L2C_ID("arm,l210-cache", of_l2c210_data), 1363 L2C_ID("arm,l220-cache", of_l2c220_data), 1364 L2C_ID("arm,pl310-cache", of_l2c310_data), 1365 L2C_ID("brcm,bcm11351-a2-pl310-cache", of_bcm_l2x0_data), 1366 L2C_ID("marvell,aurora-outer-cache", of_aurora_with_outer_data), 1367 L2C_ID("marvell,aurora-system-cache", of_aurora_no_outer_data), 1368 L2C_ID("marvell,tauros3-cache", of_tauros3_data), 1369 /* Deprecated IDs */ 1370 L2C_ID("bcm,bcm11351-a2-pl310-cache", of_bcm_l2x0_data), 1371 {} 1372 }; 1373 1374 int __init l2x0_of_init(u32 aux_val, u32 aux_mask) 1375 { 1376 const struct l2c_init_data *data; 1377 struct device_node *np; 1378 struct resource res; 1379 u32 cache_id; 1380 1381 np = of_find_matching_node(NULL, l2x0_ids); 1382 if (!np) 1383 return -ENODEV; 1384 1385 if (of_address_to_resource(np, 0, &res)) 1386 return -ENODEV; 1387 1388 l2x0_base = ioremap(res.start, resource_size(&res)); 1389 if (!l2x0_base) 1390 return -ENOMEM; 1391 1392 l2x0_saved_regs.phy_base = res.start; 1393 1394 data = of_match_node(l2x0_ids, np)->data; 1395 1396 /* All L2 caches are unified, so this property should be specified */ 1397 if (!of_property_read_bool(np, "cache-unified")) 1398 pr_err("L2C: device tree omits to specify unified cache\n"); 1399 1400 /* L2 configuration can only be changed if the cache is disabled */ 1401 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) 1402 if (data->of_parse) 1403 data->of_parse(np, &aux_val, &aux_mask); 1404 1405 if (cache_id_part_number_from_dt) 1406 cache_id = cache_id_part_number_from_dt; 1407 else 1408 cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID); 1409 1410 __l2c_init(data, aux_val, aux_mask, cache_id); 1411 1412 return 0; 1413 } 1414 #endif 1415