1 /* 2 * arch/arm/mm/cache-l2x0.c - L210/L220 cache controller support 3 * 4 * Copyright (C) 2007 ARM Limited 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software 17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18 */ 19 #include <linux/err.h> 20 #include <linux/init.h> 21 #include <linux/spinlock.h> 22 #include <linux/io.h> 23 #include <linux/of.h> 24 #include <linux/of_address.h> 25 26 #include <asm/cacheflush.h> 27 #include <asm/hardware/cache-l2x0.h> 28 #include "cache-tauros3.h" 29 #include "cache-aurora-l2.h" 30 31 struct l2c_init_data { 32 const char *type; 33 unsigned way_size_0; 34 unsigned num_lock; 35 void (*of_parse)(const struct device_node *, u32 *, u32 *); 36 void (*enable)(void __iomem *, u32, unsigned); 37 void (*fixup)(void __iomem *, u32, struct outer_cache_fns *); 38 void (*save)(void __iomem *); 39 struct outer_cache_fns outer_cache; 40 }; 41 42 #define CACHE_LINE_SIZE 32 43 44 static void __iomem *l2x0_base; 45 static DEFINE_RAW_SPINLOCK(l2x0_lock); 46 static u32 l2x0_way_mask; /* Bitmask of active ways */ 47 static u32 l2x0_size; 48 static unsigned long sync_reg_offset = L2X0_CACHE_SYNC; 49 50 struct l2x0_regs l2x0_saved_regs; 51 52 /* 53 * Common code for all cache controllers. 54 */ 55 static inline void l2c_wait_mask(void __iomem *reg, unsigned long mask) 56 { 57 /* wait for cache operation by line or way to complete */ 58 while (readl_relaxed(reg) & mask) 59 cpu_relax(); 60 } 61 62 /* 63 * By default, we write directly to secure registers. Platforms must 64 * override this if they are running non-secure. 65 */ 66 static void l2c_write_sec(unsigned long val, void __iomem *base, unsigned reg) 67 { 68 if (val == readl_relaxed(base + reg)) 69 return; 70 if (outer_cache.write_sec) 71 outer_cache.write_sec(val, reg); 72 else 73 writel_relaxed(val, base + reg); 74 } 75 76 /* 77 * This should only be called when we have a requirement that the 78 * register be written due to a work-around, as platforms running 79 * in non-secure mode may not be able to access this register. 80 */ 81 static inline void l2c_set_debug(void __iomem *base, unsigned long val) 82 { 83 if (outer_cache.set_debug) 84 outer_cache.set_debug(val); 85 else 86 l2c_write_sec(val, base, L2X0_DEBUG_CTRL); 87 } 88 89 static void __l2c_op_way(void __iomem *reg) 90 { 91 writel_relaxed(l2x0_way_mask, reg); 92 l2c_wait_mask(reg, l2x0_way_mask); 93 } 94 95 static inline void l2c_unlock(void __iomem *base, unsigned num) 96 { 97 unsigned i; 98 99 for (i = 0; i < num; i++) { 100 writel_relaxed(0, base + L2X0_LOCKDOWN_WAY_D_BASE + 101 i * L2X0_LOCKDOWN_STRIDE); 102 writel_relaxed(0, base + L2X0_LOCKDOWN_WAY_I_BASE + 103 i * L2X0_LOCKDOWN_STRIDE); 104 } 105 } 106 107 /* 108 * Enable the L2 cache controller. This function must only be 109 * called when the cache controller is known to be disabled. 110 */ 111 static void l2c_enable(void __iomem *base, u32 aux, unsigned num_lock) 112 { 113 unsigned long flags; 114 115 l2c_write_sec(aux, base, L2X0_AUX_CTRL); 116 117 l2c_unlock(base, num_lock); 118 119 local_irq_save(flags); 120 __l2c_op_way(base + L2X0_INV_WAY); 121 writel_relaxed(0, base + sync_reg_offset); 122 l2c_wait_mask(base + sync_reg_offset, 1); 123 local_irq_restore(flags); 124 125 l2c_write_sec(L2X0_CTRL_EN, base, L2X0_CTRL); 126 } 127 128 static void l2c_disable(void) 129 { 130 void __iomem *base = l2x0_base; 131 132 outer_cache.flush_all(); 133 l2c_write_sec(0, base, L2X0_CTRL); 134 dsb(st); 135 } 136 137 #ifdef CONFIG_CACHE_PL310 138 static inline void cache_wait(void __iomem *reg, unsigned long mask) 139 { 140 /* cache operations by line are atomic on PL310 */ 141 } 142 #else 143 #define cache_wait l2c_wait_mask 144 #endif 145 146 static inline void cache_sync(void) 147 { 148 void __iomem *base = l2x0_base; 149 150 writel_relaxed(0, base + sync_reg_offset); 151 cache_wait(base + L2X0_CACHE_SYNC, 1); 152 } 153 154 #if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915) 155 static inline void debug_writel(unsigned long val) 156 { 157 if (outer_cache.set_debug || outer_cache.write_sec) 158 l2c_set_debug(l2x0_base, val); 159 } 160 #else 161 /* Optimised out for non-errata case */ 162 static inline void debug_writel(unsigned long val) 163 { 164 } 165 #endif 166 167 static void l2x0_cache_sync(void) 168 { 169 unsigned long flags; 170 171 raw_spin_lock_irqsave(&l2x0_lock, flags); 172 cache_sync(); 173 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 174 } 175 176 static void __l2x0_flush_all(void) 177 { 178 debug_writel(0x03); 179 __l2c_op_way(l2x0_base + L2X0_CLEAN_INV_WAY); 180 cache_sync(); 181 debug_writel(0x00); 182 } 183 184 static void l2x0_flush_all(void) 185 { 186 unsigned long flags; 187 188 /* clean all ways */ 189 raw_spin_lock_irqsave(&l2x0_lock, flags); 190 __l2x0_flush_all(); 191 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 192 } 193 194 static void l2x0_disable(void) 195 { 196 unsigned long flags; 197 198 raw_spin_lock_irqsave(&l2x0_lock, flags); 199 __l2x0_flush_all(); 200 l2c_write_sec(0, l2x0_base, L2X0_CTRL); 201 dsb(st); 202 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 203 } 204 205 static void l2c_save(void __iomem *base) 206 { 207 l2x0_saved_regs.aux_ctrl = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); 208 } 209 210 /* 211 * L2C-210 specific code. 212 * 213 * The L2C-2x0 PA, set/way and sync operations are atomic, but we must 214 * ensure that no background operation is running. The way operations 215 * are all background tasks. 216 * 217 * While a background operation is in progress, any new operation is 218 * ignored (unspecified whether this causes an error.) Thankfully, not 219 * used on SMP. 220 * 221 * Never has a different sync register other than L2X0_CACHE_SYNC, but 222 * we use sync_reg_offset here so we can share some of this with L2C-310. 223 */ 224 static void __l2c210_cache_sync(void __iomem *base) 225 { 226 writel_relaxed(0, base + sync_reg_offset); 227 } 228 229 static void __l2c210_op_pa_range(void __iomem *reg, unsigned long start, 230 unsigned long end) 231 { 232 while (start < end) { 233 writel_relaxed(start, reg); 234 start += CACHE_LINE_SIZE; 235 } 236 } 237 238 static void l2c210_inv_range(unsigned long start, unsigned long end) 239 { 240 void __iomem *base = l2x0_base; 241 242 if (start & (CACHE_LINE_SIZE - 1)) { 243 start &= ~(CACHE_LINE_SIZE - 1); 244 writel_relaxed(start, base + L2X0_CLEAN_INV_LINE_PA); 245 start += CACHE_LINE_SIZE; 246 } 247 248 if (end & (CACHE_LINE_SIZE - 1)) { 249 end &= ~(CACHE_LINE_SIZE - 1); 250 writel_relaxed(end, base + L2X0_CLEAN_INV_LINE_PA); 251 } 252 253 __l2c210_op_pa_range(base + L2X0_INV_LINE_PA, start, end); 254 __l2c210_cache_sync(base); 255 } 256 257 static void l2c210_clean_range(unsigned long start, unsigned long end) 258 { 259 void __iomem *base = l2x0_base; 260 261 start &= ~(CACHE_LINE_SIZE - 1); 262 __l2c210_op_pa_range(base + L2X0_CLEAN_LINE_PA, start, end); 263 __l2c210_cache_sync(base); 264 } 265 266 static void l2c210_flush_range(unsigned long start, unsigned long end) 267 { 268 void __iomem *base = l2x0_base; 269 270 start &= ~(CACHE_LINE_SIZE - 1); 271 __l2c210_op_pa_range(base + L2X0_CLEAN_INV_LINE_PA, start, end); 272 __l2c210_cache_sync(base); 273 } 274 275 static void l2c210_flush_all(void) 276 { 277 void __iomem *base = l2x0_base; 278 279 BUG_ON(!irqs_disabled()); 280 281 __l2c_op_way(base + L2X0_CLEAN_INV_WAY); 282 __l2c210_cache_sync(base); 283 } 284 285 static void l2c210_sync(void) 286 { 287 __l2c210_cache_sync(l2x0_base); 288 } 289 290 static void l2c210_resume(void) 291 { 292 void __iomem *base = l2x0_base; 293 294 if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN)) 295 l2c_enable(base, l2x0_saved_regs.aux_ctrl, 1); 296 } 297 298 static const struct l2c_init_data l2c210_data __initconst = { 299 .type = "L2C-210", 300 .way_size_0 = SZ_8K, 301 .num_lock = 1, 302 .enable = l2c_enable, 303 .save = l2c_save, 304 .outer_cache = { 305 .inv_range = l2c210_inv_range, 306 .clean_range = l2c210_clean_range, 307 .flush_range = l2c210_flush_range, 308 .flush_all = l2c210_flush_all, 309 .disable = l2c_disable, 310 .sync = l2c210_sync, 311 .resume = l2c210_resume, 312 }, 313 }; 314 315 /* 316 * L2C-220 specific code. 317 * 318 * All operations are background operations: they have to be waited for. 319 * Conflicting requests generate a slave error (which will cause an 320 * imprecise abort.) Never uses sync_reg_offset, so we hard-code the 321 * sync register here. 322 * 323 * However, we can re-use the l2c210_resume call. 324 */ 325 static inline void __l2c220_cache_sync(void __iomem *base) 326 { 327 writel_relaxed(0, base + L2X0_CACHE_SYNC); 328 l2c_wait_mask(base + L2X0_CACHE_SYNC, 1); 329 } 330 331 static void l2c220_op_way(void __iomem *base, unsigned reg) 332 { 333 unsigned long flags; 334 335 raw_spin_lock_irqsave(&l2x0_lock, flags); 336 __l2c_op_way(base + reg); 337 __l2c220_cache_sync(base); 338 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 339 } 340 341 static unsigned long l2c220_op_pa_range(void __iomem *reg, unsigned long start, 342 unsigned long end, unsigned long flags) 343 { 344 raw_spinlock_t *lock = &l2x0_lock; 345 346 while (start < end) { 347 unsigned long blk_end = start + min(end - start, 4096UL); 348 349 while (start < blk_end) { 350 l2c_wait_mask(reg, 1); 351 writel_relaxed(start, reg); 352 start += CACHE_LINE_SIZE; 353 } 354 355 if (blk_end < end) { 356 raw_spin_unlock_irqrestore(lock, flags); 357 raw_spin_lock_irqsave(lock, flags); 358 } 359 } 360 361 return flags; 362 } 363 364 static void l2c220_inv_range(unsigned long start, unsigned long end) 365 { 366 void __iomem *base = l2x0_base; 367 unsigned long flags; 368 369 raw_spin_lock_irqsave(&l2x0_lock, flags); 370 if ((start | end) & (CACHE_LINE_SIZE - 1)) { 371 if (start & (CACHE_LINE_SIZE - 1)) { 372 start &= ~(CACHE_LINE_SIZE - 1); 373 writel_relaxed(start, base + L2X0_CLEAN_INV_LINE_PA); 374 start += CACHE_LINE_SIZE; 375 } 376 377 if (end & (CACHE_LINE_SIZE - 1)) { 378 end &= ~(CACHE_LINE_SIZE - 1); 379 l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1); 380 writel_relaxed(end, base + L2X0_CLEAN_INV_LINE_PA); 381 } 382 } 383 384 flags = l2c220_op_pa_range(base + L2X0_INV_LINE_PA, 385 start, end, flags); 386 l2c_wait_mask(base + L2X0_INV_LINE_PA, 1); 387 __l2c220_cache_sync(base); 388 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 389 } 390 391 static void l2c220_clean_range(unsigned long start, unsigned long end) 392 { 393 void __iomem *base = l2x0_base; 394 unsigned long flags; 395 396 start &= ~(CACHE_LINE_SIZE - 1); 397 if ((end - start) >= l2x0_size) { 398 l2c220_op_way(base, L2X0_CLEAN_WAY); 399 return; 400 } 401 402 raw_spin_lock_irqsave(&l2x0_lock, flags); 403 flags = l2c220_op_pa_range(base + L2X0_CLEAN_LINE_PA, 404 start, end, flags); 405 l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1); 406 __l2c220_cache_sync(base); 407 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 408 } 409 410 static void l2c220_flush_range(unsigned long start, unsigned long end) 411 { 412 void __iomem *base = l2x0_base; 413 unsigned long flags; 414 415 start &= ~(CACHE_LINE_SIZE - 1); 416 if ((end - start) >= l2x0_size) { 417 l2c220_op_way(base, L2X0_CLEAN_INV_WAY); 418 return; 419 } 420 421 raw_spin_lock_irqsave(&l2x0_lock, flags); 422 flags = l2c220_op_pa_range(base + L2X0_CLEAN_INV_LINE_PA, 423 start, end, flags); 424 l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1); 425 __l2c220_cache_sync(base); 426 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 427 } 428 429 static void l2c220_flush_all(void) 430 { 431 l2c220_op_way(l2x0_base, L2X0_CLEAN_INV_WAY); 432 } 433 434 static void l2c220_sync(void) 435 { 436 unsigned long flags; 437 438 raw_spin_lock_irqsave(&l2x0_lock, flags); 439 __l2c220_cache_sync(l2x0_base); 440 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 441 } 442 443 static const struct l2c_init_data l2c220_data = { 444 .type = "L2C-220", 445 .way_size_0 = SZ_8K, 446 .num_lock = 1, 447 .enable = l2c_enable, 448 .save = l2c_save, 449 .outer_cache = { 450 .inv_range = l2c220_inv_range, 451 .clean_range = l2c220_clean_range, 452 .flush_range = l2c220_flush_range, 453 .flush_all = l2c220_flush_all, 454 .disable = l2c_disable, 455 .sync = l2c220_sync, 456 .resume = l2c210_resume, 457 }, 458 }; 459 460 /* 461 * L2C-310 specific code. 462 * 463 * Very similar to L2C-210, the PA, set/way and sync operations are atomic, 464 * and the way operations are all background tasks. However, issuing an 465 * operation while a background operation is in progress results in a 466 * SLVERR response. We can reuse: 467 * 468 * __l2c210_cache_sync (using sync_reg_offset) 469 * l2c210_sync 470 * l2c210_inv_range (if 588369 is not applicable) 471 * l2c210_clean_range 472 * l2c210_flush_range (if 588369 is not applicable) 473 * l2c210_flush_all (if 727915 is not applicable) 474 * 475 * Errata: 476 * 588369: PL310 R0P0->R1P0, fixed R2P0. 477 * Affects: all clean+invalidate operations 478 * clean and invalidate skips the invalidate step, so we need to issue 479 * separate operations. We also require the above debug workaround 480 * enclosing this code fragment on affected parts. On unaffected parts, 481 * we must not use this workaround without the debug register writes 482 * to avoid exposing a problem similar to 727915. 483 * 484 * 727915: PL310 R2P0->R3P0, fixed R3P1. 485 * Affects: clean+invalidate by way 486 * clean and invalidate by way runs in the background, and a store can 487 * hit the line between the clean operation and invalidate operation, 488 * resulting in the store being lost. 489 * 490 * 752271: PL310 R3P0->R3P1-50REL0, fixed R3P2. 491 * Affects: 8x64-bit (double fill) line fetches 492 * double fill line fetches can fail to cause dirty data to be evicted 493 * from the cache before the new data overwrites the second line. 494 * 495 * 753970: PL310 R3P0, fixed R3P1. 496 * Affects: sync 497 * prevents merging writes after the sync operation, until another L2C 498 * operation is performed (or a number of other conditions.) 499 * 500 * 769419: PL310 R0P0->R3P1, fixed R3P2. 501 * Affects: store buffer 502 * store buffer is not automatically drained. 503 */ 504 static void l2c310_set_debug(unsigned long val) 505 { 506 writel_relaxed(val, l2x0_base + L2X0_DEBUG_CTRL); 507 } 508 509 static void l2c310_inv_range_erratum(unsigned long start, unsigned long end) 510 { 511 void __iomem *base = l2x0_base; 512 513 if ((start | end) & (CACHE_LINE_SIZE - 1)) { 514 unsigned long flags; 515 516 /* Erratum 588369 for both clean+invalidate operations */ 517 raw_spin_lock_irqsave(&l2x0_lock, flags); 518 l2c_set_debug(base, 0x03); 519 520 if (start & (CACHE_LINE_SIZE - 1)) { 521 start &= ~(CACHE_LINE_SIZE - 1); 522 writel_relaxed(start, base + L2X0_CLEAN_LINE_PA); 523 writel_relaxed(start, base + L2X0_INV_LINE_PA); 524 start += CACHE_LINE_SIZE; 525 } 526 527 if (end & (CACHE_LINE_SIZE - 1)) { 528 end &= ~(CACHE_LINE_SIZE - 1); 529 writel_relaxed(end, base + L2X0_CLEAN_LINE_PA); 530 writel_relaxed(end, base + L2X0_INV_LINE_PA); 531 } 532 533 l2c_set_debug(base, 0x00); 534 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 535 } 536 537 __l2c210_op_pa_range(base + L2X0_INV_LINE_PA, start, end); 538 __l2c210_cache_sync(base); 539 } 540 541 static void l2c310_flush_range_erratum(unsigned long start, unsigned long end) 542 { 543 raw_spinlock_t *lock = &l2x0_lock; 544 unsigned long flags; 545 void __iomem *base = l2x0_base; 546 547 raw_spin_lock_irqsave(lock, flags); 548 while (start < end) { 549 unsigned long blk_end = start + min(end - start, 4096UL); 550 551 l2c_set_debug(base, 0x03); 552 while (start < blk_end) { 553 writel_relaxed(start, base + L2X0_CLEAN_LINE_PA); 554 writel_relaxed(start, base + L2X0_INV_LINE_PA); 555 start += CACHE_LINE_SIZE; 556 } 557 l2c_set_debug(base, 0x00); 558 559 if (blk_end < end) { 560 raw_spin_unlock_irqrestore(lock, flags); 561 raw_spin_lock_irqsave(lock, flags); 562 } 563 } 564 raw_spin_unlock_irqrestore(lock, flags); 565 __l2c210_cache_sync(base); 566 } 567 568 static void l2c310_flush_all_erratum(void) 569 { 570 void __iomem *base = l2x0_base; 571 unsigned long flags; 572 573 raw_spin_lock_irqsave(&l2x0_lock, flags); 574 l2c_set_debug(base, 0x03); 575 __l2c_op_way(base + L2X0_CLEAN_INV_WAY); 576 l2c_set_debug(base, 0x00); 577 __l2c210_cache_sync(base); 578 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 579 } 580 581 static void __init l2c310_save(void __iomem *base) 582 { 583 unsigned revision; 584 585 l2c_save(base); 586 587 l2x0_saved_regs.tag_latency = readl_relaxed(base + 588 L310_TAG_LATENCY_CTRL); 589 l2x0_saved_regs.data_latency = readl_relaxed(base + 590 L310_DATA_LATENCY_CTRL); 591 l2x0_saved_regs.filter_end = readl_relaxed(base + 592 L310_ADDR_FILTER_END); 593 l2x0_saved_regs.filter_start = readl_relaxed(base + 594 L310_ADDR_FILTER_START); 595 596 revision = readl_relaxed(base + L2X0_CACHE_ID) & 597 L2X0_CACHE_ID_RTL_MASK; 598 599 /* From r2p0, there is Prefetch offset/control register */ 600 if (revision >= L310_CACHE_ID_RTL_R2P0) 601 l2x0_saved_regs.prefetch_ctrl = readl_relaxed(base + 602 L310_PREFETCH_CTRL); 603 604 /* From r3p0, there is Power control register */ 605 if (revision >= L310_CACHE_ID_RTL_R3P0) 606 l2x0_saved_regs.pwr_ctrl = readl_relaxed(base + 607 L310_POWER_CTRL); 608 } 609 610 static void l2c310_resume(void) 611 { 612 void __iomem *base = l2x0_base; 613 614 if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN)) { 615 unsigned revision; 616 617 /* restore pl310 setup */ 618 writel_relaxed(l2x0_saved_regs.tag_latency, 619 base + L310_TAG_LATENCY_CTRL); 620 writel_relaxed(l2x0_saved_regs.data_latency, 621 base + L310_DATA_LATENCY_CTRL); 622 writel_relaxed(l2x0_saved_regs.filter_end, 623 base + L310_ADDR_FILTER_END); 624 writel_relaxed(l2x0_saved_regs.filter_start, 625 base + L310_ADDR_FILTER_START); 626 627 revision = readl_relaxed(base + L2X0_CACHE_ID) & 628 L2X0_CACHE_ID_RTL_MASK; 629 630 if (revision >= L310_CACHE_ID_RTL_R2P0) 631 l2c_write_sec(l2x0_saved_regs.prefetch_ctrl, base, 632 L310_PREFETCH_CTRL); 633 if (revision >= L310_CACHE_ID_RTL_R3P0) 634 l2c_write_sec(l2x0_saved_regs.pwr_ctrl, base, 635 L310_POWER_CTRL); 636 637 l2c_enable(base, l2x0_saved_regs.aux_ctrl, 8); 638 } 639 } 640 641 static void __init l2c310_fixup(void __iomem *base, u32 cache_id, 642 struct outer_cache_fns *fns) 643 { 644 unsigned revision = cache_id & L2X0_CACHE_ID_RTL_MASK; 645 const char *errata[8]; 646 unsigned n = 0; 647 648 /* For compatibility */ 649 if (revision <= L310_CACHE_ID_RTL_R3P0) 650 fns->set_debug = l2c310_set_debug; 651 652 if (IS_ENABLED(CONFIG_PL310_ERRATA_588369) && 653 revision < L310_CACHE_ID_RTL_R2P0 && 654 /* For bcm compatibility */ 655 fns->inv_range == l2c210_inv_range) { 656 fns->inv_range = l2c310_inv_range_erratum; 657 fns->flush_range = l2c310_flush_range_erratum; 658 errata[n++] = "588369"; 659 } 660 661 if (IS_ENABLED(CONFIG_PL310_ERRATA_727915) && 662 revision >= L310_CACHE_ID_RTL_R2P0 && 663 revision < L310_CACHE_ID_RTL_R3P1) { 664 fns->flush_all = l2c310_flush_all_erratum; 665 errata[n++] = "727915"; 666 } 667 668 if (revision >= L310_CACHE_ID_RTL_R3P0 && 669 revision < L310_CACHE_ID_RTL_R3P2) { 670 u32 val = readl_relaxed(base + L310_PREFETCH_CTRL); 671 /* I don't think bit23 is required here... but iMX6 does so */ 672 if (val & (BIT(30) | BIT(23))) { 673 val &= ~(BIT(30) | BIT(23)); 674 l2c_write_sec(val, base, L310_PREFETCH_CTRL); 675 errata[n++] = "752271"; 676 } 677 } 678 679 if (IS_ENABLED(CONFIG_PL310_ERRATA_753970) && 680 revision == L310_CACHE_ID_RTL_R3P0) { 681 sync_reg_offset = L2X0_DUMMY_REG; 682 errata[n++] = "753970"; 683 } 684 685 if (IS_ENABLED(CONFIG_PL310_ERRATA_769419)) 686 errata[n++] = "769419"; 687 688 if (n) { 689 unsigned i; 690 691 pr_info("L2C-310 errat%s", n > 1 ? "a" : "um"); 692 for (i = 0; i < n; i++) 693 pr_cont(" %s", errata[i]); 694 pr_cont(" enabled\n"); 695 } 696 } 697 698 static const struct l2c_init_data l2c310_init_fns __initconst = { 699 .type = "L2C-310", 700 .way_size_0 = SZ_8K, 701 .num_lock = 8, 702 .enable = l2c_enable, 703 .fixup = l2c310_fixup, 704 .save = l2c310_save, 705 .outer_cache = { 706 .inv_range = l2c210_inv_range, 707 .clean_range = l2c210_clean_range, 708 .flush_range = l2c210_flush_range, 709 .flush_all = l2c210_flush_all, 710 .disable = l2c_disable, 711 .sync = l2c210_sync, 712 .set_debug = l2c310_set_debug, 713 .resume = l2c310_resume, 714 }, 715 }; 716 717 static void __init __l2c_init(const struct l2c_init_data *data, 718 u32 aux_val, u32 aux_mask, u32 cache_id) 719 { 720 struct outer_cache_fns fns; 721 unsigned way_size_bits, ways; 722 u32 aux; 723 724 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); 725 726 aux &= aux_mask; 727 aux |= aux_val; 728 729 /* Determine the number of ways */ 730 switch (cache_id & L2X0_CACHE_ID_PART_MASK) { 731 case L2X0_CACHE_ID_PART_L310: 732 if (aux & (1 << 16)) 733 ways = 16; 734 else 735 ways = 8; 736 break; 737 738 case L2X0_CACHE_ID_PART_L210: 739 case L2X0_CACHE_ID_PART_L220: 740 ways = (aux >> 13) & 0xf; 741 break; 742 743 case AURORA_CACHE_ID: 744 ways = (aux >> 13) & 0xf; 745 ways = 2 << ((ways + 1) >> 2); 746 break; 747 748 default: 749 /* Assume unknown chips have 8 ways */ 750 ways = 8; 751 break; 752 } 753 754 l2x0_way_mask = (1 << ways) - 1; 755 756 /* 757 * way_size_0 is the size that a way_size value of zero would be 758 * given the calculation: way_size = way_size_0 << way_size_bits. 759 * So, if way_size_bits=0 is reserved, but way_size_bits=1 is 16k, 760 * then way_size_0 would be 8k. 761 * 762 * L2 cache size = number of ways * way size. 763 */ 764 way_size_bits = (aux & L2C_AUX_CTRL_WAY_SIZE_MASK) >> 765 L2C_AUX_CTRL_WAY_SIZE_SHIFT; 766 l2x0_size = ways * (data->way_size_0 << way_size_bits); 767 768 fns = data->outer_cache; 769 fns.write_sec = outer_cache.write_sec; 770 if (data->fixup) 771 data->fixup(l2x0_base, cache_id, &fns); 772 if (fns.write_sec) 773 fns.set_debug = NULL; 774 775 /* 776 * Check if l2x0 controller is already enabled. If we are booting 777 * in non-secure mode accessing the below registers will fault. 778 */ 779 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) 780 data->enable(l2x0_base, aux, data->num_lock); 781 782 outer_cache = fns; 783 784 /* 785 * It is strange to save the register state before initialisation, 786 * but hey, this is what the DT implementations decided to do. 787 */ 788 if (data->save) 789 data->save(l2x0_base); 790 791 /* Re-read it in case some bits are reserved. */ 792 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); 793 794 pr_info("%s cache controller enabled, %d ways, %d kB\n", 795 data->type, ways, l2x0_size >> 10); 796 pr_info("%s: CACHE_ID 0x%08x, AUX_CTRL 0x%08x\n", 797 data->type, cache_id, aux); 798 } 799 800 void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask) 801 { 802 const struct l2c_init_data *data; 803 u32 cache_id; 804 805 l2x0_base = base; 806 807 cache_id = readl_relaxed(base + L2X0_CACHE_ID); 808 809 switch (cache_id & L2X0_CACHE_ID_PART_MASK) { 810 default: 811 case L2X0_CACHE_ID_PART_L210: 812 data = &l2c210_data; 813 break; 814 815 case L2X0_CACHE_ID_PART_L220: 816 data = &l2c220_data; 817 break; 818 819 case L2X0_CACHE_ID_PART_L310: 820 data = &l2c310_init_fns; 821 break; 822 } 823 824 __l2c_init(data, aux_val, aux_mask, cache_id); 825 } 826 827 #ifdef CONFIG_OF 828 static int l2_wt_override; 829 830 /* Aurora don't have the cache ID register available, so we have to 831 * pass it though the device tree */ 832 static u32 cache_id_part_number_from_dt; 833 834 static void __init l2x0_of_parse(const struct device_node *np, 835 u32 *aux_val, u32 *aux_mask) 836 { 837 u32 data[2] = { 0, 0 }; 838 u32 tag = 0; 839 u32 dirty = 0; 840 u32 val = 0, mask = 0; 841 842 of_property_read_u32(np, "arm,tag-latency", &tag); 843 if (tag) { 844 mask |= L2X0_AUX_CTRL_TAG_LATENCY_MASK; 845 val |= (tag - 1) << L2X0_AUX_CTRL_TAG_LATENCY_SHIFT; 846 } 847 848 of_property_read_u32_array(np, "arm,data-latency", 849 data, ARRAY_SIZE(data)); 850 if (data[0] && data[1]) { 851 mask |= L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK | 852 L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK; 853 val |= ((data[0] - 1) << L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT) | 854 ((data[1] - 1) << L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT); 855 } 856 857 of_property_read_u32(np, "arm,dirty-latency", &dirty); 858 if (dirty) { 859 mask |= L2X0_AUX_CTRL_DIRTY_LATENCY_MASK; 860 val |= (dirty - 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT; 861 } 862 863 *aux_val &= ~mask; 864 *aux_val |= val; 865 *aux_mask &= ~mask; 866 } 867 868 static const struct l2c_init_data of_l2c210_data __initconst = { 869 .type = "L2C-210", 870 .way_size_0 = SZ_8K, 871 .num_lock = 1, 872 .of_parse = l2x0_of_parse, 873 .enable = l2c_enable, 874 .save = l2c_save, 875 .outer_cache = { 876 .inv_range = l2c210_inv_range, 877 .clean_range = l2c210_clean_range, 878 .flush_range = l2c210_flush_range, 879 .flush_all = l2c210_flush_all, 880 .disable = l2c_disable, 881 .sync = l2c210_sync, 882 .resume = l2c210_resume, 883 }, 884 }; 885 886 static const struct l2c_init_data of_l2c220_data __initconst = { 887 .type = "L2C-220", 888 .way_size_0 = SZ_8K, 889 .num_lock = 1, 890 .of_parse = l2x0_of_parse, 891 .enable = l2c_enable, 892 .save = l2c_save, 893 .outer_cache = { 894 .inv_range = l2c220_inv_range, 895 .clean_range = l2c220_clean_range, 896 .flush_range = l2c220_flush_range, 897 .flush_all = l2c220_flush_all, 898 .disable = l2c_disable, 899 .sync = l2c220_sync, 900 .resume = l2c210_resume, 901 }, 902 }; 903 904 static void __init l2c310_of_parse(const struct device_node *np, 905 u32 *aux_val, u32 *aux_mask) 906 { 907 u32 data[3] = { 0, 0, 0 }; 908 u32 tag[3] = { 0, 0, 0 }; 909 u32 filter[2] = { 0, 0 }; 910 911 of_property_read_u32_array(np, "arm,tag-latency", tag, ARRAY_SIZE(tag)); 912 if (tag[0] && tag[1] && tag[2]) 913 writel_relaxed( 914 L310_LATENCY_CTRL_RD(tag[0] - 1) | 915 L310_LATENCY_CTRL_WR(tag[1] - 1) | 916 L310_LATENCY_CTRL_SETUP(tag[2] - 1), 917 l2x0_base + L310_TAG_LATENCY_CTRL); 918 919 of_property_read_u32_array(np, "arm,data-latency", 920 data, ARRAY_SIZE(data)); 921 if (data[0] && data[1] && data[2]) 922 writel_relaxed( 923 L310_LATENCY_CTRL_RD(data[0] - 1) | 924 L310_LATENCY_CTRL_WR(data[1] - 1) | 925 L310_LATENCY_CTRL_SETUP(data[2] - 1), 926 l2x0_base + L310_DATA_LATENCY_CTRL); 927 928 of_property_read_u32_array(np, "arm,filter-ranges", 929 filter, ARRAY_SIZE(filter)); 930 if (filter[1]) { 931 writel_relaxed(ALIGN(filter[0] + filter[1], SZ_1M), 932 l2x0_base + L310_ADDR_FILTER_END); 933 writel_relaxed((filter[0] & ~(SZ_1M - 1)) | L310_ADDR_FILTER_EN, 934 l2x0_base + L310_ADDR_FILTER_START); 935 } 936 } 937 938 static const struct l2c_init_data of_l2c310_data __initconst = { 939 .type = "L2C-310", 940 .way_size_0 = SZ_8K, 941 .num_lock = 8, 942 .of_parse = l2c310_of_parse, 943 .enable = l2c_enable, 944 .fixup = l2c310_fixup, 945 .save = l2c310_save, 946 .outer_cache = { 947 .inv_range = l2c210_inv_range, 948 .clean_range = l2c210_clean_range, 949 .flush_range = l2c210_flush_range, 950 .flush_all = l2c210_flush_all, 951 .disable = l2c_disable, 952 .sync = l2c210_sync, 953 .set_debug = l2c310_set_debug, 954 .resume = l2c310_resume, 955 }, 956 }; 957 958 /* 959 * Note that the end addresses passed to Linux primitives are 960 * noninclusive, while the hardware cache range operations use 961 * inclusive start and end addresses. 962 */ 963 static unsigned long calc_range_end(unsigned long start, unsigned long end) 964 { 965 /* 966 * Limit the number of cache lines processed at once, 967 * since cache range operations stall the CPU pipeline 968 * until completion. 969 */ 970 if (end > start + MAX_RANGE_SIZE) 971 end = start + MAX_RANGE_SIZE; 972 973 /* 974 * Cache range operations can't straddle a page boundary. 975 */ 976 if (end > PAGE_ALIGN(start+1)) 977 end = PAGE_ALIGN(start+1); 978 979 return end; 980 } 981 982 /* 983 * Make sure 'start' and 'end' reference the same page, as L2 is PIPT 984 * and range operations only do a TLB lookup on the start address. 985 */ 986 static void aurora_pa_range(unsigned long start, unsigned long end, 987 unsigned long offset) 988 { 989 unsigned long flags; 990 991 raw_spin_lock_irqsave(&l2x0_lock, flags); 992 writel_relaxed(start, l2x0_base + AURORA_RANGE_BASE_ADDR_REG); 993 writel_relaxed(end, l2x0_base + offset); 994 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 995 996 cache_sync(); 997 } 998 999 static void aurora_inv_range(unsigned long start, unsigned long end) 1000 { 1001 /* 1002 * round start and end adresses up to cache line size 1003 */ 1004 start &= ~(CACHE_LINE_SIZE - 1); 1005 end = ALIGN(end, CACHE_LINE_SIZE); 1006 1007 /* 1008 * Invalidate all full cache lines between 'start' and 'end'. 1009 */ 1010 while (start < end) { 1011 unsigned long range_end = calc_range_end(start, end); 1012 aurora_pa_range(start, range_end - CACHE_LINE_SIZE, 1013 AURORA_INVAL_RANGE_REG); 1014 start = range_end; 1015 } 1016 } 1017 1018 static void aurora_clean_range(unsigned long start, unsigned long end) 1019 { 1020 /* 1021 * If L2 is forced to WT, the L2 will always be clean and we 1022 * don't need to do anything here. 1023 */ 1024 if (!l2_wt_override) { 1025 start &= ~(CACHE_LINE_SIZE - 1); 1026 end = ALIGN(end, CACHE_LINE_SIZE); 1027 while (start != end) { 1028 unsigned long range_end = calc_range_end(start, end); 1029 aurora_pa_range(start, range_end - CACHE_LINE_SIZE, 1030 AURORA_CLEAN_RANGE_REG); 1031 start = range_end; 1032 } 1033 } 1034 } 1035 1036 static void aurora_flush_range(unsigned long start, unsigned long end) 1037 { 1038 start &= ~(CACHE_LINE_SIZE - 1); 1039 end = ALIGN(end, CACHE_LINE_SIZE); 1040 while (start != end) { 1041 unsigned long range_end = calc_range_end(start, end); 1042 /* 1043 * If L2 is forced to WT, the L2 will always be clean and we 1044 * just need to invalidate. 1045 */ 1046 if (l2_wt_override) 1047 aurora_pa_range(start, range_end - CACHE_LINE_SIZE, 1048 AURORA_INVAL_RANGE_REG); 1049 else 1050 aurora_pa_range(start, range_end - CACHE_LINE_SIZE, 1051 AURORA_FLUSH_RANGE_REG); 1052 start = range_end; 1053 } 1054 } 1055 1056 static void aurora_save(void __iomem *base) 1057 { 1058 l2x0_saved_regs.ctrl = readl_relaxed(base + L2X0_CTRL); 1059 l2x0_saved_regs.aux_ctrl = readl_relaxed(base + L2X0_AUX_CTRL); 1060 } 1061 1062 static void aurora_resume(void) 1063 { 1064 void __iomem *base = l2x0_base; 1065 1066 if (!(readl(base + L2X0_CTRL) & L2X0_CTRL_EN)) { 1067 writel_relaxed(l2x0_saved_regs.aux_ctrl, base + L2X0_AUX_CTRL); 1068 writel_relaxed(l2x0_saved_regs.ctrl, base + L2X0_CTRL); 1069 } 1070 } 1071 1072 /* 1073 * For Aurora cache in no outer mode, enable via the CP15 coprocessor 1074 * broadcasting of cache commands to L2. 1075 */ 1076 static void __init aurora_enable_no_outer(void __iomem *base, u32 aux, 1077 unsigned num_lock) 1078 { 1079 u32 u; 1080 1081 asm volatile("mrc p15, 1, %0, c15, c2, 0" : "=r" (u)); 1082 u |= AURORA_CTRL_FW; /* Set the FW bit */ 1083 asm volatile("mcr p15, 1, %0, c15, c2, 0" : : "r" (u)); 1084 1085 isb(); 1086 1087 l2c_enable(base, aux, num_lock); 1088 } 1089 1090 static void __init aurora_fixup(void __iomem *base, u32 cache_id, 1091 struct outer_cache_fns *fns) 1092 { 1093 sync_reg_offset = AURORA_SYNC_REG; 1094 } 1095 1096 static void __init aurora_of_parse(const struct device_node *np, 1097 u32 *aux_val, u32 *aux_mask) 1098 { 1099 u32 val = AURORA_ACR_REPLACEMENT_TYPE_SEMIPLRU; 1100 u32 mask = AURORA_ACR_REPLACEMENT_MASK; 1101 1102 of_property_read_u32(np, "cache-id-part", 1103 &cache_id_part_number_from_dt); 1104 1105 /* Determine and save the write policy */ 1106 l2_wt_override = of_property_read_bool(np, "wt-override"); 1107 1108 if (l2_wt_override) { 1109 val |= AURORA_ACR_FORCE_WRITE_THRO_POLICY; 1110 mask |= AURORA_ACR_FORCE_WRITE_POLICY_MASK; 1111 } 1112 1113 *aux_val &= ~mask; 1114 *aux_val |= val; 1115 *aux_mask &= ~mask; 1116 } 1117 1118 static const struct l2c_init_data of_aurora_with_outer_data __initconst = { 1119 .type = "Aurora", 1120 .way_size_0 = SZ_4K, 1121 .num_lock = 4, 1122 .of_parse = aurora_of_parse, 1123 .enable = l2c_enable, 1124 .fixup = aurora_fixup, 1125 .save = aurora_save, 1126 .outer_cache = { 1127 .inv_range = aurora_inv_range, 1128 .clean_range = aurora_clean_range, 1129 .flush_range = aurora_flush_range, 1130 .flush_all = l2x0_flush_all, 1131 .disable = l2x0_disable, 1132 .sync = l2x0_cache_sync, 1133 .resume = aurora_resume, 1134 }, 1135 }; 1136 1137 static const struct l2c_init_data of_aurora_no_outer_data __initconst = { 1138 .type = "Aurora", 1139 .way_size_0 = SZ_4K, 1140 .num_lock = 4, 1141 .of_parse = aurora_of_parse, 1142 .enable = aurora_enable_no_outer, 1143 .fixup = aurora_fixup, 1144 .save = aurora_save, 1145 .outer_cache = { 1146 .resume = aurora_resume, 1147 }, 1148 }; 1149 1150 /* 1151 * For certain Broadcom SoCs, depending on the address range, different offsets 1152 * need to be added to the address before passing it to L2 for 1153 * invalidation/clean/flush 1154 * 1155 * Section Address Range Offset EMI 1156 * 1 0x00000000 - 0x3FFFFFFF 0x80000000 VC 1157 * 2 0x40000000 - 0xBFFFFFFF 0x40000000 SYS 1158 * 3 0xC0000000 - 0xFFFFFFFF 0x80000000 VC 1159 * 1160 * When the start and end addresses have crossed two different sections, we 1161 * need to break the L2 operation into two, each within its own section. 1162 * For example, if we need to invalidate addresses starts at 0xBFFF0000 and 1163 * ends at 0xC0001000, we need do invalidate 1) 0xBFFF0000 - 0xBFFFFFFF and 2) 1164 * 0xC0000000 - 0xC0001000 1165 * 1166 * Note 1: 1167 * By breaking a single L2 operation into two, we may potentially suffer some 1168 * performance hit, but keep in mind the cross section case is very rare 1169 * 1170 * Note 2: 1171 * We do not need to handle the case when the start address is in 1172 * Section 1 and the end address is in Section 3, since it is not a valid use 1173 * case 1174 * 1175 * Note 3: 1176 * Section 1 in practical terms can no longer be used on rev A2. Because of 1177 * that the code does not need to handle section 1 at all. 1178 * 1179 */ 1180 #define BCM_SYS_EMI_START_ADDR 0x40000000UL 1181 #define BCM_VC_EMI_SEC3_START_ADDR 0xC0000000UL 1182 1183 #define BCM_SYS_EMI_OFFSET 0x40000000UL 1184 #define BCM_VC_EMI_OFFSET 0x80000000UL 1185 1186 static inline int bcm_addr_is_sys_emi(unsigned long addr) 1187 { 1188 return (addr >= BCM_SYS_EMI_START_ADDR) && 1189 (addr < BCM_VC_EMI_SEC3_START_ADDR); 1190 } 1191 1192 static inline unsigned long bcm_l2_phys_addr(unsigned long addr) 1193 { 1194 if (bcm_addr_is_sys_emi(addr)) 1195 return addr + BCM_SYS_EMI_OFFSET; 1196 else 1197 return addr + BCM_VC_EMI_OFFSET; 1198 } 1199 1200 static void bcm_inv_range(unsigned long start, unsigned long end) 1201 { 1202 unsigned long new_start, new_end; 1203 1204 BUG_ON(start < BCM_SYS_EMI_START_ADDR); 1205 1206 if (unlikely(end <= start)) 1207 return; 1208 1209 new_start = bcm_l2_phys_addr(start); 1210 new_end = bcm_l2_phys_addr(end); 1211 1212 /* normal case, no cross section between start and end */ 1213 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) { 1214 l2c210_inv_range(new_start, new_end); 1215 return; 1216 } 1217 1218 /* They cross sections, so it can only be a cross from section 1219 * 2 to section 3 1220 */ 1221 l2c210_inv_range(new_start, 1222 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1)); 1223 l2c210_inv_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR), 1224 new_end); 1225 } 1226 1227 static void bcm_clean_range(unsigned long start, unsigned long end) 1228 { 1229 unsigned long new_start, new_end; 1230 1231 BUG_ON(start < BCM_SYS_EMI_START_ADDR); 1232 1233 if (unlikely(end <= start)) 1234 return; 1235 1236 new_start = bcm_l2_phys_addr(start); 1237 new_end = bcm_l2_phys_addr(end); 1238 1239 /* normal case, no cross section between start and end */ 1240 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) { 1241 l2c210_clean_range(new_start, new_end); 1242 return; 1243 } 1244 1245 /* They cross sections, so it can only be a cross from section 1246 * 2 to section 3 1247 */ 1248 l2c210_clean_range(new_start, 1249 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1)); 1250 l2c210_clean_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR), 1251 new_end); 1252 } 1253 1254 static void bcm_flush_range(unsigned long start, unsigned long end) 1255 { 1256 unsigned long new_start, new_end; 1257 1258 BUG_ON(start < BCM_SYS_EMI_START_ADDR); 1259 1260 if (unlikely(end <= start)) 1261 return; 1262 1263 if ((end - start) >= l2x0_size) { 1264 outer_cache.flush_all(); 1265 return; 1266 } 1267 1268 new_start = bcm_l2_phys_addr(start); 1269 new_end = bcm_l2_phys_addr(end); 1270 1271 /* normal case, no cross section between start and end */ 1272 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) { 1273 l2c210_flush_range(new_start, new_end); 1274 return; 1275 } 1276 1277 /* They cross sections, so it can only be a cross from section 1278 * 2 to section 3 1279 */ 1280 l2c210_flush_range(new_start, 1281 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1)); 1282 l2c210_flush_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR), 1283 new_end); 1284 } 1285 1286 /* Broadcom L2C-310 start from ARMs R3P2 or later, and require no fixups */ 1287 static const struct l2c_init_data of_bcm_l2x0_data __initconst = { 1288 .type = "BCM-L2C-310", 1289 .way_size_0 = SZ_8K, 1290 .num_lock = 8, 1291 .of_parse = l2c310_of_parse, 1292 .enable = l2c_enable, 1293 .save = l2c310_save, 1294 .outer_cache = { 1295 .inv_range = bcm_inv_range, 1296 .clean_range = bcm_clean_range, 1297 .flush_range = bcm_flush_range, 1298 .flush_all = l2c210_flush_all, 1299 .disable = l2c_disable, 1300 .sync = l2c210_sync, 1301 .resume = l2c310_resume, 1302 }, 1303 }; 1304 1305 static void __init tauros3_save(void __iomem *base) 1306 { 1307 l2c_save(base); 1308 1309 l2x0_saved_regs.aux2_ctrl = 1310 readl_relaxed(base + TAUROS3_AUX2_CTRL); 1311 l2x0_saved_regs.prefetch_ctrl = 1312 readl_relaxed(base + L310_PREFETCH_CTRL); 1313 } 1314 1315 static void tauros3_resume(void) 1316 { 1317 void __iomem *base = l2x0_base; 1318 1319 if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN)) { 1320 writel_relaxed(l2x0_saved_regs.aux2_ctrl, 1321 base + TAUROS3_AUX2_CTRL); 1322 writel_relaxed(l2x0_saved_regs.prefetch_ctrl, 1323 base + L310_PREFETCH_CTRL); 1324 1325 l2c_enable(base, l2x0_saved_regs.aux_ctrl, 8); 1326 } 1327 } 1328 1329 static const struct l2c_init_data of_tauros3_data __initconst = { 1330 .type = "Tauros3", 1331 .way_size_0 = SZ_8K, 1332 .num_lock = 8, 1333 .enable = l2c_enable, 1334 .save = tauros3_save, 1335 /* Tauros3 broadcasts L1 cache operations to L2 */ 1336 .outer_cache = { 1337 .resume = tauros3_resume, 1338 }, 1339 }; 1340 1341 #define L2C_ID(name, fns) { .compatible = name, .data = (void *)&fns } 1342 static const struct of_device_id l2x0_ids[] __initconst = { 1343 L2C_ID("arm,l210-cache", of_l2c210_data), 1344 L2C_ID("arm,l220-cache", of_l2c220_data), 1345 L2C_ID("arm,pl310-cache", of_l2c310_data), 1346 L2C_ID("brcm,bcm11351-a2-pl310-cache", of_bcm_l2x0_data), 1347 L2C_ID("marvell,aurora-outer-cache", of_aurora_with_outer_data), 1348 L2C_ID("marvell,aurora-system-cache", of_aurora_no_outer_data), 1349 L2C_ID("marvell,tauros3-cache", of_tauros3_data), 1350 /* Deprecated IDs */ 1351 L2C_ID("bcm,bcm11351-a2-pl310-cache", of_bcm_l2x0_data), 1352 {} 1353 }; 1354 1355 int __init l2x0_of_init(u32 aux_val, u32 aux_mask) 1356 { 1357 const struct l2c_init_data *data; 1358 struct device_node *np; 1359 struct resource res; 1360 u32 cache_id; 1361 1362 np = of_find_matching_node(NULL, l2x0_ids); 1363 if (!np) 1364 return -ENODEV; 1365 1366 if (of_address_to_resource(np, 0, &res)) 1367 return -ENODEV; 1368 1369 l2x0_base = ioremap(res.start, resource_size(&res)); 1370 if (!l2x0_base) 1371 return -ENOMEM; 1372 1373 l2x0_saved_regs.phy_base = res.start; 1374 1375 data = of_match_node(l2x0_ids, np)->data; 1376 1377 /* All L2 caches are unified, so this property should be specified */ 1378 if (!of_property_read_bool(np, "cache-unified")) 1379 pr_err("L2C: device tree omits to specify unified cache\n"); 1380 1381 /* L2 configuration can only be changed if the cache is disabled */ 1382 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) 1383 if (data->of_parse) 1384 data->of_parse(np, &aux_val, &aux_mask); 1385 1386 if (cache_id_part_number_from_dt) 1387 cache_id = cache_id_part_number_from_dt; 1388 else 1389 cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID); 1390 1391 __l2c_init(data, aux_val, aux_mask, cache_id); 1392 1393 return 0; 1394 } 1395 #endif 1396