1 /* 2 * arch/arm/mm/cache-l2x0.c - L210/L220 cache controller support 3 * 4 * Copyright (C) 2007 ARM Limited 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software 17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18 */ 19 #include <linux/err.h> 20 #include <linux/init.h> 21 #include <linux/spinlock.h> 22 #include <linux/io.h> 23 #include <linux/of.h> 24 #include <linux/of_address.h> 25 26 #include <asm/cacheflush.h> 27 #include <asm/hardware/cache-l2x0.h> 28 #include "cache-tauros3.h" 29 #include "cache-aurora-l2.h" 30 31 struct l2c_init_data { 32 const char *type; 33 unsigned way_size_0; 34 unsigned num_lock; 35 void (*of_parse)(const struct device_node *, u32 *, u32 *); 36 void (*enable)(void __iomem *, u32, unsigned); 37 void (*fixup)(void __iomem *, u32, struct outer_cache_fns *); 38 void (*save)(void __iomem *); 39 struct outer_cache_fns outer_cache; 40 }; 41 42 #define CACHE_LINE_SIZE 32 43 44 static void __iomem *l2x0_base; 45 static DEFINE_RAW_SPINLOCK(l2x0_lock); 46 static u32 l2x0_way_mask; /* Bitmask of active ways */ 47 static u32 l2x0_size; 48 static unsigned long sync_reg_offset = L2X0_CACHE_SYNC; 49 50 struct l2x0_regs l2x0_saved_regs; 51 52 /* 53 * Common code for all cache controllers. 54 */ 55 static inline void l2c_wait_mask(void __iomem *reg, unsigned long mask) 56 { 57 /* wait for cache operation by line or way to complete */ 58 while (readl_relaxed(reg) & mask) 59 cpu_relax(); 60 } 61 62 /* 63 * By default, we write directly to secure registers. Platforms must 64 * override this if they are running non-secure. 65 */ 66 static void l2c_write_sec(unsigned long val, void __iomem *base, unsigned reg) 67 { 68 if (val == readl_relaxed(base + reg)) 69 return; 70 if (outer_cache.write_sec) 71 outer_cache.write_sec(val, reg); 72 else 73 writel_relaxed(val, base + reg); 74 } 75 76 /* 77 * This should only be called when we have a requirement that the 78 * register be written due to a work-around, as platforms running 79 * in non-secure mode may not be able to access this register. 80 */ 81 static inline void l2c_set_debug(void __iomem *base, unsigned long val) 82 { 83 if (outer_cache.set_debug) 84 outer_cache.set_debug(val); 85 else 86 l2c_write_sec(val, base, L2X0_DEBUG_CTRL); 87 } 88 89 static void __l2c_op_way(void __iomem *reg) 90 { 91 writel_relaxed(l2x0_way_mask, reg); 92 l2c_wait_mask(reg, l2x0_way_mask); 93 } 94 95 static inline void l2c_unlock(void __iomem *base, unsigned num) 96 { 97 unsigned i; 98 99 for (i = 0; i < num; i++) { 100 writel_relaxed(0, base + L2X0_LOCKDOWN_WAY_D_BASE + 101 i * L2X0_LOCKDOWN_STRIDE); 102 writel_relaxed(0, base + L2X0_LOCKDOWN_WAY_I_BASE + 103 i * L2X0_LOCKDOWN_STRIDE); 104 } 105 } 106 107 /* 108 * Enable the L2 cache controller. This function must only be 109 * called when the cache controller is known to be disabled. 110 */ 111 static void l2c_enable(void __iomem *base, u32 aux, unsigned num_lock) 112 { 113 unsigned long flags; 114 115 l2c_write_sec(aux, base, L2X0_AUX_CTRL); 116 117 l2c_unlock(base, num_lock); 118 119 local_irq_save(flags); 120 __l2c_op_way(base + L2X0_INV_WAY); 121 writel_relaxed(0, base + sync_reg_offset); 122 l2c_wait_mask(base + sync_reg_offset, 1); 123 local_irq_restore(flags); 124 125 l2c_write_sec(L2X0_CTRL_EN, base, L2X0_CTRL); 126 } 127 128 static void l2c_disable(void) 129 { 130 void __iomem *base = l2x0_base; 131 132 outer_cache.flush_all(); 133 l2c_write_sec(0, base, L2X0_CTRL); 134 dsb(st); 135 } 136 137 #ifdef CONFIG_CACHE_PL310 138 static inline void cache_wait(void __iomem *reg, unsigned long mask) 139 { 140 /* cache operations by line are atomic on PL310 */ 141 } 142 #else 143 #define cache_wait l2c_wait_mask 144 #endif 145 146 static inline void cache_sync(void) 147 { 148 void __iomem *base = l2x0_base; 149 150 writel_relaxed(0, base + sync_reg_offset); 151 cache_wait(base + L2X0_CACHE_SYNC, 1); 152 } 153 154 #if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915) 155 static inline void debug_writel(unsigned long val) 156 { 157 if (outer_cache.set_debug || outer_cache.write_sec) 158 l2c_set_debug(l2x0_base, val); 159 } 160 #else 161 /* Optimised out for non-errata case */ 162 static inline void debug_writel(unsigned long val) 163 { 164 } 165 #endif 166 167 static void l2x0_cache_sync(void) 168 { 169 unsigned long flags; 170 171 raw_spin_lock_irqsave(&l2x0_lock, flags); 172 cache_sync(); 173 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 174 } 175 176 static void __l2x0_flush_all(void) 177 { 178 debug_writel(0x03); 179 __l2c_op_way(l2x0_base + L2X0_CLEAN_INV_WAY); 180 cache_sync(); 181 debug_writel(0x00); 182 } 183 184 static void l2x0_flush_all(void) 185 { 186 unsigned long flags; 187 188 /* clean all ways */ 189 raw_spin_lock_irqsave(&l2x0_lock, flags); 190 __l2x0_flush_all(); 191 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 192 } 193 194 static void l2x0_disable(void) 195 { 196 unsigned long flags; 197 198 raw_spin_lock_irqsave(&l2x0_lock, flags); 199 __l2x0_flush_all(); 200 l2c_write_sec(0, l2x0_base, L2X0_CTRL); 201 dsb(st); 202 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 203 } 204 205 /* 206 * L2C-210 specific code. 207 * 208 * The L2C-2x0 PA, set/way and sync operations are atomic, but we must 209 * ensure that no background operation is running. The way operations 210 * are all background tasks. 211 * 212 * While a background operation is in progress, any new operation is 213 * ignored (unspecified whether this causes an error.) Thankfully, not 214 * used on SMP. 215 * 216 * Never has a different sync register other than L2X0_CACHE_SYNC, but 217 * we use sync_reg_offset here so we can share some of this with L2C-310. 218 */ 219 static void __l2c210_cache_sync(void __iomem *base) 220 { 221 writel_relaxed(0, base + sync_reg_offset); 222 } 223 224 static void __l2c210_op_pa_range(void __iomem *reg, unsigned long start, 225 unsigned long end) 226 { 227 while (start < end) { 228 writel_relaxed(start, reg); 229 start += CACHE_LINE_SIZE; 230 } 231 } 232 233 static void l2c210_inv_range(unsigned long start, unsigned long end) 234 { 235 void __iomem *base = l2x0_base; 236 237 if (start & (CACHE_LINE_SIZE - 1)) { 238 start &= ~(CACHE_LINE_SIZE - 1); 239 writel_relaxed(start, base + L2X0_CLEAN_INV_LINE_PA); 240 start += CACHE_LINE_SIZE; 241 } 242 243 if (end & (CACHE_LINE_SIZE - 1)) { 244 end &= ~(CACHE_LINE_SIZE - 1); 245 writel_relaxed(end, base + L2X0_CLEAN_INV_LINE_PA); 246 } 247 248 __l2c210_op_pa_range(base + L2X0_INV_LINE_PA, start, end); 249 __l2c210_cache_sync(base); 250 } 251 252 static void l2c210_clean_range(unsigned long start, unsigned long end) 253 { 254 void __iomem *base = l2x0_base; 255 256 start &= ~(CACHE_LINE_SIZE - 1); 257 __l2c210_op_pa_range(base + L2X0_CLEAN_LINE_PA, start, end); 258 __l2c210_cache_sync(base); 259 } 260 261 static void l2c210_flush_range(unsigned long start, unsigned long end) 262 { 263 void __iomem *base = l2x0_base; 264 265 start &= ~(CACHE_LINE_SIZE - 1); 266 __l2c210_op_pa_range(base + L2X0_CLEAN_INV_LINE_PA, start, end); 267 __l2c210_cache_sync(base); 268 } 269 270 static void l2c210_flush_all(void) 271 { 272 void __iomem *base = l2x0_base; 273 274 BUG_ON(!irqs_disabled()); 275 276 __l2c_op_way(base + L2X0_CLEAN_INV_WAY); 277 __l2c210_cache_sync(base); 278 } 279 280 static void l2c210_sync(void) 281 { 282 __l2c210_cache_sync(l2x0_base); 283 } 284 285 static void l2c210_resume(void) 286 { 287 void __iomem *base = l2x0_base; 288 289 if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN)) 290 l2c_enable(base, l2x0_saved_regs.aux_ctrl, 1); 291 } 292 293 static const struct l2c_init_data l2c210_data __initconst = { 294 .type = "L2C-210", 295 .way_size_0 = SZ_8K, 296 .num_lock = 1, 297 .enable = l2c_enable, 298 .outer_cache = { 299 .inv_range = l2c210_inv_range, 300 .clean_range = l2c210_clean_range, 301 .flush_range = l2c210_flush_range, 302 .flush_all = l2c210_flush_all, 303 .disable = l2c_disable, 304 .sync = l2c210_sync, 305 .resume = l2c210_resume, 306 }, 307 }; 308 309 /* 310 * L2C-220 specific code. 311 * 312 * All operations are background operations: they have to be waited for. 313 * Conflicting requests generate a slave error (which will cause an 314 * imprecise abort.) Never uses sync_reg_offset, so we hard-code the 315 * sync register here. 316 * 317 * However, we can re-use the l2c210_resume call. 318 */ 319 static inline void __l2c220_cache_sync(void __iomem *base) 320 { 321 writel_relaxed(0, base + L2X0_CACHE_SYNC); 322 l2c_wait_mask(base + L2X0_CACHE_SYNC, 1); 323 } 324 325 static void l2c220_op_way(void __iomem *base, unsigned reg) 326 { 327 unsigned long flags; 328 329 raw_spin_lock_irqsave(&l2x0_lock, flags); 330 __l2c_op_way(base + reg); 331 __l2c220_cache_sync(base); 332 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 333 } 334 335 static unsigned long l2c220_op_pa_range(void __iomem *reg, unsigned long start, 336 unsigned long end, unsigned long flags) 337 { 338 raw_spinlock_t *lock = &l2x0_lock; 339 340 while (start < end) { 341 unsigned long blk_end = start + min(end - start, 4096UL); 342 343 while (start < blk_end) { 344 l2c_wait_mask(reg, 1); 345 writel_relaxed(start, reg); 346 start += CACHE_LINE_SIZE; 347 } 348 349 if (blk_end < end) { 350 raw_spin_unlock_irqrestore(lock, flags); 351 raw_spin_lock_irqsave(lock, flags); 352 } 353 } 354 355 return flags; 356 } 357 358 static void l2c220_inv_range(unsigned long start, unsigned long end) 359 { 360 void __iomem *base = l2x0_base; 361 unsigned long flags; 362 363 raw_spin_lock_irqsave(&l2x0_lock, flags); 364 if ((start | end) & (CACHE_LINE_SIZE - 1)) { 365 if (start & (CACHE_LINE_SIZE - 1)) { 366 start &= ~(CACHE_LINE_SIZE - 1); 367 writel_relaxed(start, base + L2X0_CLEAN_INV_LINE_PA); 368 start += CACHE_LINE_SIZE; 369 } 370 371 if (end & (CACHE_LINE_SIZE - 1)) { 372 end &= ~(CACHE_LINE_SIZE - 1); 373 l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1); 374 writel_relaxed(end, base + L2X0_CLEAN_INV_LINE_PA); 375 } 376 } 377 378 flags = l2c220_op_pa_range(base + L2X0_INV_LINE_PA, 379 start, end, flags); 380 l2c_wait_mask(base + L2X0_INV_LINE_PA, 1); 381 __l2c220_cache_sync(base); 382 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 383 } 384 385 static void l2c220_clean_range(unsigned long start, unsigned long end) 386 { 387 void __iomem *base = l2x0_base; 388 unsigned long flags; 389 390 start &= ~(CACHE_LINE_SIZE - 1); 391 if ((end - start) >= l2x0_size) { 392 l2c220_op_way(base, L2X0_CLEAN_WAY); 393 return; 394 } 395 396 raw_spin_lock_irqsave(&l2x0_lock, flags); 397 flags = l2c220_op_pa_range(base + L2X0_CLEAN_LINE_PA, 398 start, end, flags); 399 l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1); 400 __l2c220_cache_sync(base); 401 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 402 } 403 404 static void l2c220_flush_range(unsigned long start, unsigned long end) 405 { 406 void __iomem *base = l2x0_base; 407 unsigned long flags; 408 409 start &= ~(CACHE_LINE_SIZE - 1); 410 if ((end - start) >= l2x0_size) { 411 l2c220_op_way(base, L2X0_CLEAN_INV_WAY); 412 return; 413 } 414 415 raw_spin_lock_irqsave(&l2x0_lock, flags); 416 flags = l2c220_op_pa_range(base + L2X0_CLEAN_INV_LINE_PA, 417 start, end, flags); 418 l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1); 419 __l2c220_cache_sync(base); 420 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 421 } 422 423 static void l2c220_flush_all(void) 424 { 425 l2c220_op_way(l2x0_base, L2X0_CLEAN_INV_WAY); 426 } 427 428 static void l2c220_sync(void) 429 { 430 unsigned long flags; 431 432 raw_spin_lock_irqsave(&l2x0_lock, flags); 433 __l2c220_cache_sync(l2x0_base); 434 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 435 } 436 437 static const struct l2c_init_data l2c220_data = { 438 .type = "L2C-220", 439 .way_size_0 = SZ_8K, 440 .num_lock = 1, 441 .enable = l2c_enable, 442 .outer_cache = { 443 .inv_range = l2c220_inv_range, 444 .clean_range = l2c220_clean_range, 445 .flush_range = l2c220_flush_range, 446 .flush_all = l2c220_flush_all, 447 .disable = l2c_disable, 448 .sync = l2c220_sync, 449 .resume = l2c210_resume, 450 }, 451 }; 452 453 /* 454 * L2C-310 specific code. 455 * 456 * Very similar to L2C-210, the PA, set/way and sync operations are atomic, 457 * and the way operations are all background tasks. However, issuing an 458 * operation while a background operation is in progress results in a 459 * SLVERR response. We can reuse: 460 * 461 * __l2c210_cache_sync (using sync_reg_offset) 462 * l2c210_sync 463 * l2c210_inv_range (if 588369 is not applicable) 464 * l2c210_clean_range 465 * l2c210_flush_range (if 588369 is not applicable) 466 * l2c210_flush_all (if 727915 is not applicable) 467 * 468 * Errata: 469 * 588369: PL310 R0P0->R1P0, fixed R2P0. 470 * Affects: all clean+invalidate operations 471 * clean and invalidate skips the invalidate step, so we need to issue 472 * separate operations. We also require the above debug workaround 473 * enclosing this code fragment on affected parts. On unaffected parts, 474 * we must not use this workaround without the debug register writes 475 * to avoid exposing a problem similar to 727915. 476 * 477 * 727915: PL310 R2P0->R3P0, fixed R3P1. 478 * Affects: clean+invalidate by way 479 * clean and invalidate by way runs in the background, and a store can 480 * hit the line between the clean operation and invalidate operation, 481 * resulting in the store being lost. 482 * 483 * 752271: PL310 R3P0->R3P1-50REL0, fixed R3P2. 484 * Affects: 8x64-bit (double fill) line fetches 485 * double fill line fetches can fail to cause dirty data to be evicted 486 * from the cache before the new data overwrites the second line. 487 * 488 * 753970: PL310 R3P0, fixed R3P1. 489 * Affects: sync 490 * prevents merging writes after the sync operation, until another L2C 491 * operation is performed (or a number of other conditions.) 492 * 493 * 769419: PL310 R0P0->R3P1, fixed R3P2. 494 * Affects: store buffer 495 * store buffer is not automatically drained. 496 */ 497 static void l2c310_set_debug(unsigned long val) 498 { 499 writel_relaxed(val, l2x0_base + L2X0_DEBUG_CTRL); 500 } 501 502 static void l2c310_inv_range_erratum(unsigned long start, unsigned long end) 503 { 504 void __iomem *base = l2x0_base; 505 506 if ((start | end) & (CACHE_LINE_SIZE - 1)) { 507 unsigned long flags; 508 509 /* Erratum 588369 for both clean+invalidate operations */ 510 raw_spin_lock_irqsave(&l2x0_lock, flags); 511 l2c_set_debug(base, 0x03); 512 513 if (start & (CACHE_LINE_SIZE - 1)) { 514 start &= ~(CACHE_LINE_SIZE - 1); 515 writel_relaxed(start, base + L2X0_CLEAN_LINE_PA); 516 writel_relaxed(start, base + L2X0_INV_LINE_PA); 517 start += CACHE_LINE_SIZE; 518 } 519 520 if (end & (CACHE_LINE_SIZE - 1)) { 521 end &= ~(CACHE_LINE_SIZE - 1); 522 writel_relaxed(end, base + L2X0_CLEAN_LINE_PA); 523 writel_relaxed(end, base + L2X0_INV_LINE_PA); 524 } 525 526 l2c_set_debug(base, 0x00); 527 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 528 } 529 530 __l2c210_op_pa_range(base + L2X0_INV_LINE_PA, start, end); 531 __l2c210_cache_sync(base); 532 } 533 534 static void l2c310_flush_range_erratum(unsigned long start, unsigned long end) 535 { 536 raw_spinlock_t *lock = &l2x0_lock; 537 unsigned long flags; 538 void __iomem *base = l2x0_base; 539 540 raw_spin_lock_irqsave(lock, flags); 541 while (start < end) { 542 unsigned long blk_end = start + min(end - start, 4096UL); 543 544 l2c_set_debug(base, 0x03); 545 while (start < blk_end) { 546 writel_relaxed(start, base + L2X0_CLEAN_LINE_PA); 547 writel_relaxed(start, base + L2X0_INV_LINE_PA); 548 start += CACHE_LINE_SIZE; 549 } 550 l2c_set_debug(base, 0x00); 551 552 if (blk_end < end) { 553 raw_spin_unlock_irqrestore(lock, flags); 554 raw_spin_lock_irqsave(lock, flags); 555 } 556 } 557 raw_spin_unlock_irqrestore(lock, flags); 558 __l2c210_cache_sync(base); 559 } 560 561 static void l2c310_flush_all_erratum(void) 562 { 563 void __iomem *base = l2x0_base; 564 unsigned long flags; 565 566 raw_spin_lock_irqsave(&l2x0_lock, flags); 567 l2c_set_debug(base, 0x03); 568 __l2c_op_way(base + L2X0_CLEAN_INV_WAY); 569 l2c_set_debug(base, 0x00); 570 __l2c210_cache_sync(base); 571 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 572 } 573 574 static void __init l2c310_save(void __iomem *base) 575 { 576 unsigned revision; 577 578 l2x0_saved_regs.tag_latency = readl_relaxed(base + 579 L310_TAG_LATENCY_CTRL); 580 l2x0_saved_regs.data_latency = readl_relaxed(base + 581 L310_DATA_LATENCY_CTRL); 582 l2x0_saved_regs.filter_end = readl_relaxed(base + 583 L310_ADDR_FILTER_END); 584 l2x0_saved_regs.filter_start = readl_relaxed(base + 585 L310_ADDR_FILTER_START); 586 587 revision = readl_relaxed(base + L2X0_CACHE_ID) & 588 L2X0_CACHE_ID_RTL_MASK; 589 590 /* From r2p0, there is Prefetch offset/control register */ 591 if (revision >= L310_CACHE_ID_RTL_R2P0) 592 l2x0_saved_regs.prefetch_ctrl = readl_relaxed(base + 593 L310_PREFETCH_CTRL); 594 595 /* From r3p0, there is Power control register */ 596 if (revision >= L310_CACHE_ID_RTL_R3P0) 597 l2x0_saved_regs.pwr_ctrl = readl_relaxed(base + 598 L310_POWER_CTRL); 599 } 600 601 static void l2c310_resume(void) 602 { 603 void __iomem *base = l2x0_base; 604 605 if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN)) { 606 unsigned revision; 607 608 /* restore pl310 setup */ 609 writel_relaxed(l2x0_saved_regs.tag_latency, 610 base + L310_TAG_LATENCY_CTRL); 611 writel_relaxed(l2x0_saved_regs.data_latency, 612 base + L310_DATA_LATENCY_CTRL); 613 writel_relaxed(l2x0_saved_regs.filter_end, 614 base + L310_ADDR_FILTER_END); 615 writel_relaxed(l2x0_saved_regs.filter_start, 616 base + L310_ADDR_FILTER_START); 617 618 revision = readl_relaxed(base + L2X0_CACHE_ID) & 619 L2X0_CACHE_ID_RTL_MASK; 620 621 if (revision >= L310_CACHE_ID_RTL_R2P0) 622 l2c_write_sec(l2x0_saved_regs.prefetch_ctrl, base, 623 L310_PREFETCH_CTRL); 624 if (revision >= L310_CACHE_ID_RTL_R3P0) 625 l2c_write_sec(l2x0_saved_regs.pwr_ctrl, base, 626 L310_POWER_CTRL); 627 628 l2c_enable(base, l2x0_saved_regs.aux_ctrl, 8); 629 } 630 } 631 632 static void __init l2c310_fixup(void __iomem *base, u32 cache_id, 633 struct outer_cache_fns *fns) 634 { 635 unsigned revision = cache_id & L2X0_CACHE_ID_RTL_MASK; 636 const char *errata[8]; 637 unsigned n = 0; 638 639 /* For compatibility */ 640 if (revision <= L310_CACHE_ID_RTL_R3P0) 641 fns->set_debug = l2c310_set_debug; 642 643 if (IS_ENABLED(CONFIG_PL310_ERRATA_588369) && 644 revision < L310_CACHE_ID_RTL_R2P0 && 645 /* For bcm compatibility */ 646 fns->inv_range == l2c210_inv_range) { 647 fns->inv_range = l2c310_inv_range_erratum; 648 fns->flush_range = l2c310_flush_range_erratum; 649 errata[n++] = "588369"; 650 } 651 652 if (IS_ENABLED(CONFIG_PL310_ERRATA_727915) && 653 revision >= L310_CACHE_ID_RTL_R2P0 && 654 revision < L310_CACHE_ID_RTL_R3P1) { 655 fns->flush_all = l2c310_flush_all_erratum; 656 errata[n++] = "727915"; 657 } 658 659 if (revision >= L310_CACHE_ID_RTL_R3P0 && 660 revision < L310_CACHE_ID_RTL_R3P2) { 661 u32 val = readl_relaxed(base + L310_PREFETCH_CTRL); 662 /* I don't think bit23 is required here... but iMX6 does so */ 663 if (val & (BIT(30) | BIT(23))) { 664 val &= ~(BIT(30) | BIT(23)); 665 l2c_write_sec(val, base, L310_PREFETCH_CTRL); 666 errata[n++] = "752271"; 667 } 668 } 669 670 if (IS_ENABLED(CONFIG_PL310_ERRATA_753970) && 671 revision == L310_CACHE_ID_RTL_R3P0) { 672 sync_reg_offset = L2X0_DUMMY_REG; 673 errata[n++] = "753970"; 674 } 675 676 if (IS_ENABLED(CONFIG_PL310_ERRATA_769419)) 677 errata[n++] = "769419"; 678 679 if (n) { 680 unsigned i; 681 682 pr_info("L2C-310 errat%s", n > 1 ? "a" : "um"); 683 for (i = 0; i < n; i++) 684 pr_cont(" %s", errata[i]); 685 pr_cont(" enabled\n"); 686 } 687 } 688 689 static const struct l2c_init_data l2c310_init_fns __initconst = { 690 .type = "L2C-310", 691 .way_size_0 = SZ_8K, 692 .num_lock = 8, 693 .enable = l2c_enable, 694 .fixup = l2c310_fixup, 695 .save = l2c310_save, 696 .outer_cache = { 697 .inv_range = l2c210_inv_range, 698 .clean_range = l2c210_clean_range, 699 .flush_range = l2c210_flush_range, 700 .flush_all = l2c210_flush_all, 701 .disable = l2c_disable, 702 .sync = l2c210_sync, 703 .set_debug = l2c310_set_debug, 704 .resume = l2c310_resume, 705 }, 706 }; 707 708 static void __init __l2c_init(const struct l2c_init_data *data, 709 u32 aux_val, u32 aux_mask, u32 cache_id) 710 { 711 struct outer_cache_fns fns; 712 unsigned way_size_bits, ways; 713 u32 aux; 714 715 /* 716 * It is strange to save the register state before initialisation, 717 * but hey, this is what the DT implementations decided to do. 718 */ 719 if (data->save) 720 data->save(l2x0_base); 721 722 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); 723 724 aux &= aux_mask; 725 aux |= aux_val; 726 727 /* Determine the number of ways */ 728 switch (cache_id & L2X0_CACHE_ID_PART_MASK) { 729 case L2X0_CACHE_ID_PART_L310: 730 if (aux & (1 << 16)) 731 ways = 16; 732 else 733 ways = 8; 734 break; 735 736 case L2X0_CACHE_ID_PART_L210: 737 case L2X0_CACHE_ID_PART_L220: 738 ways = (aux >> 13) & 0xf; 739 break; 740 741 case AURORA_CACHE_ID: 742 ways = (aux >> 13) & 0xf; 743 ways = 2 << ((ways + 1) >> 2); 744 break; 745 746 default: 747 /* Assume unknown chips have 8 ways */ 748 ways = 8; 749 break; 750 } 751 752 l2x0_way_mask = (1 << ways) - 1; 753 754 /* 755 * way_size_0 is the size that a way_size value of zero would be 756 * given the calculation: way_size = way_size_0 << way_size_bits. 757 * So, if way_size_bits=0 is reserved, but way_size_bits=1 is 16k, 758 * then way_size_0 would be 8k. 759 * 760 * L2 cache size = number of ways * way size. 761 */ 762 way_size_bits = (aux & L2C_AUX_CTRL_WAY_SIZE_MASK) >> 763 L2C_AUX_CTRL_WAY_SIZE_SHIFT; 764 l2x0_size = ways * (data->way_size_0 << way_size_bits); 765 766 fns = data->outer_cache; 767 fns.write_sec = outer_cache.write_sec; 768 if (data->fixup) 769 data->fixup(l2x0_base, cache_id, &fns); 770 if (fns.write_sec) 771 fns.set_debug = NULL; 772 773 /* 774 * Check if l2x0 controller is already enabled. If we are booting 775 * in non-secure mode accessing the below registers will fault. 776 */ 777 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) 778 data->enable(l2x0_base, aux, data->num_lock); 779 780 /* Re-read it in case some bits are reserved. */ 781 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); 782 783 /* Save the value for resuming. */ 784 l2x0_saved_regs.aux_ctrl = aux; 785 786 outer_cache = fns; 787 788 pr_info("%s cache controller enabled, %d ways, %d kB\n", 789 data->type, ways, l2x0_size >> 10); 790 pr_info("%s: CACHE_ID 0x%08x, AUX_CTRL 0x%08x\n", 791 data->type, cache_id, aux); 792 } 793 794 void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask) 795 { 796 const struct l2c_init_data *data; 797 u32 cache_id; 798 799 l2x0_base = base; 800 801 cache_id = readl_relaxed(base + L2X0_CACHE_ID); 802 803 switch (cache_id & L2X0_CACHE_ID_PART_MASK) { 804 default: 805 case L2X0_CACHE_ID_PART_L210: 806 data = &l2c210_data; 807 break; 808 809 case L2X0_CACHE_ID_PART_L220: 810 data = &l2c220_data; 811 break; 812 813 case L2X0_CACHE_ID_PART_L310: 814 data = &l2c310_init_fns; 815 break; 816 } 817 818 __l2c_init(data, aux_val, aux_mask, cache_id); 819 } 820 821 #ifdef CONFIG_OF 822 static int l2_wt_override; 823 824 /* Aurora don't have the cache ID register available, so we have to 825 * pass it though the device tree */ 826 static u32 cache_id_part_number_from_dt; 827 828 static void __init l2x0_of_parse(const struct device_node *np, 829 u32 *aux_val, u32 *aux_mask) 830 { 831 u32 data[2] = { 0, 0 }; 832 u32 tag = 0; 833 u32 dirty = 0; 834 u32 val = 0, mask = 0; 835 836 of_property_read_u32(np, "arm,tag-latency", &tag); 837 if (tag) { 838 mask |= L2X0_AUX_CTRL_TAG_LATENCY_MASK; 839 val |= (tag - 1) << L2X0_AUX_CTRL_TAG_LATENCY_SHIFT; 840 } 841 842 of_property_read_u32_array(np, "arm,data-latency", 843 data, ARRAY_SIZE(data)); 844 if (data[0] && data[1]) { 845 mask |= L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK | 846 L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK; 847 val |= ((data[0] - 1) << L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT) | 848 ((data[1] - 1) << L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT); 849 } 850 851 of_property_read_u32(np, "arm,dirty-latency", &dirty); 852 if (dirty) { 853 mask |= L2X0_AUX_CTRL_DIRTY_LATENCY_MASK; 854 val |= (dirty - 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT; 855 } 856 857 *aux_val &= ~mask; 858 *aux_val |= val; 859 *aux_mask &= ~mask; 860 } 861 862 static const struct l2c_init_data of_l2c210_data __initconst = { 863 .type = "L2C-210", 864 .way_size_0 = SZ_8K, 865 .num_lock = 1, 866 .of_parse = l2x0_of_parse, 867 .enable = l2c_enable, 868 .outer_cache = { 869 .inv_range = l2c210_inv_range, 870 .clean_range = l2c210_clean_range, 871 .flush_range = l2c210_flush_range, 872 .flush_all = l2c210_flush_all, 873 .disable = l2c_disable, 874 .sync = l2c210_sync, 875 .resume = l2c210_resume, 876 }, 877 }; 878 879 static const struct l2c_init_data of_l2c220_data __initconst = { 880 .type = "L2C-220", 881 .way_size_0 = SZ_8K, 882 .num_lock = 1, 883 .of_parse = l2x0_of_parse, 884 .enable = l2c_enable, 885 .outer_cache = { 886 .inv_range = l2c220_inv_range, 887 .clean_range = l2c220_clean_range, 888 .flush_range = l2c220_flush_range, 889 .flush_all = l2c220_flush_all, 890 .disable = l2c_disable, 891 .sync = l2c220_sync, 892 .resume = l2c210_resume, 893 }, 894 }; 895 896 static void __init l2c310_of_parse(const struct device_node *np, 897 u32 *aux_val, u32 *aux_mask) 898 { 899 u32 data[3] = { 0, 0, 0 }; 900 u32 tag[3] = { 0, 0, 0 }; 901 u32 filter[2] = { 0, 0 }; 902 903 of_property_read_u32_array(np, "arm,tag-latency", tag, ARRAY_SIZE(tag)); 904 if (tag[0] && tag[1] && tag[2]) 905 writel_relaxed( 906 L310_LATENCY_CTRL_RD(tag[0] - 1) | 907 L310_LATENCY_CTRL_WR(tag[1] - 1) | 908 L310_LATENCY_CTRL_SETUP(tag[2] - 1), 909 l2x0_base + L310_TAG_LATENCY_CTRL); 910 911 of_property_read_u32_array(np, "arm,data-latency", 912 data, ARRAY_SIZE(data)); 913 if (data[0] && data[1] && data[2]) 914 writel_relaxed( 915 L310_LATENCY_CTRL_RD(data[0] - 1) | 916 L310_LATENCY_CTRL_WR(data[1] - 1) | 917 L310_LATENCY_CTRL_SETUP(data[2] - 1), 918 l2x0_base + L310_DATA_LATENCY_CTRL); 919 920 of_property_read_u32_array(np, "arm,filter-ranges", 921 filter, ARRAY_SIZE(filter)); 922 if (filter[1]) { 923 writel_relaxed(ALIGN(filter[0] + filter[1], SZ_1M), 924 l2x0_base + L310_ADDR_FILTER_END); 925 writel_relaxed((filter[0] & ~(SZ_1M - 1)) | L310_ADDR_FILTER_EN, 926 l2x0_base + L310_ADDR_FILTER_START); 927 } 928 } 929 930 static const struct l2c_init_data of_l2c310_data __initconst = { 931 .type = "L2C-310", 932 .way_size_0 = SZ_8K, 933 .num_lock = 8, 934 .of_parse = l2c310_of_parse, 935 .enable = l2c_enable, 936 .fixup = l2c310_fixup, 937 .save = l2c310_save, 938 .outer_cache = { 939 .inv_range = l2c210_inv_range, 940 .clean_range = l2c210_clean_range, 941 .flush_range = l2c210_flush_range, 942 .flush_all = l2c210_flush_all, 943 .disable = l2c_disable, 944 .sync = l2c210_sync, 945 .set_debug = l2c310_set_debug, 946 .resume = l2c310_resume, 947 }, 948 }; 949 950 /* 951 * Note that the end addresses passed to Linux primitives are 952 * noninclusive, while the hardware cache range operations use 953 * inclusive start and end addresses. 954 */ 955 static unsigned long calc_range_end(unsigned long start, unsigned long end) 956 { 957 /* 958 * Limit the number of cache lines processed at once, 959 * since cache range operations stall the CPU pipeline 960 * until completion. 961 */ 962 if (end > start + MAX_RANGE_SIZE) 963 end = start + MAX_RANGE_SIZE; 964 965 /* 966 * Cache range operations can't straddle a page boundary. 967 */ 968 if (end > PAGE_ALIGN(start+1)) 969 end = PAGE_ALIGN(start+1); 970 971 return end; 972 } 973 974 /* 975 * Make sure 'start' and 'end' reference the same page, as L2 is PIPT 976 * and range operations only do a TLB lookup on the start address. 977 */ 978 static void aurora_pa_range(unsigned long start, unsigned long end, 979 unsigned long offset) 980 { 981 unsigned long flags; 982 983 raw_spin_lock_irqsave(&l2x0_lock, flags); 984 writel_relaxed(start, l2x0_base + AURORA_RANGE_BASE_ADDR_REG); 985 writel_relaxed(end, l2x0_base + offset); 986 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 987 988 cache_sync(); 989 } 990 991 static void aurora_inv_range(unsigned long start, unsigned long end) 992 { 993 /* 994 * round start and end adresses up to cache line size 995 */ 996 start &= ~(CACHE_LINE_SIZE - 1); 997 end = ALIGN(end, CACHE_LINE_SIZE); 998 999 /* 1000 * Invalidate all full cache lines between 'start' and 'end'. 1001 */ 1002 while (start < end) { 1003 unsigned long range_end = calc_range_end(start, end); 1004 aurora_pa_range(start, range_end - CACHE_LINE_SIZE, 1005 AURORA_INVAL_RANGE_REG); 1006 start = range_end; 1007 } 1008 } 1009 1010 static void aurora_clean_range(unsigned long start, unsigned long end) 1011 { 1012 /* 1013 * If L2 is forced to WT, the L2 will always be clean and we 1014 * don't need to do anything here. 1015 */ 1016 if (!l2_wt_override) { 1017 start &= ~(CACHE_LINE_SIZE - 1); 1018 end = ALIGN(end, CACHE_LINE_SIZE); 1019 while (start != end) { 1020 unsigned long range_end = calc_range_end(start, end); 1021 aurora_pa_range(start, range_end - CACHE_LINE_SIZE, 1022 AURORA_CLEAN_RANGE_REG); 1023 start = range_end; 1024 } 1025 } 1026 } 1027 1028 static void aurora_flush_range(unsigned long start, unsigned long end) 1029 { 1030 start &= ~(CACHE_LINE_SIZE - 1); 1031 end = ALIGN(end, CACHE_LINE_SIZE); 1032 while (start != end) { 1033 unsigned long range_end = calc_range_end(start, end); 1034 /* 1035 * If L2 is forced to WT, the L2 will always be clean and we 1036 * just need to invalidate. 1037 */ 1038 if (l2_wt_override) 1039 aurora_pa_range(start, range_end - CACHE_LINE_SIZE, 1040 AURORA_INVAL_RANGE_REG); 1041 else 1042 aurora_pa_range(start, range_end - CACHE_LINE_SIZE, 1043 AURORA_FLUSH_RANGE_REG); 1044 start = range_end; 1045 } 1046 } 1047 1048 static void aurora_save(void __iomem *base) 1049 { 1050 l2x0_saved_regs.ctrl = readl_relaxed(base + L2X0_CTRL); 1051 l2x0_saved_regs.aux_ctrl = readl_relaxed(base + L2X0_AUX_CTRL); 1052 } 1053 1054 static void aurora_resume(void) 1055 { 1056 void __iomem *base = l2x0_base; 1057 1058 if (!(readl(base + L2X0_CTRL) & L2X0_CTRL_EN)) { 1059 writel_relaxed(l2x0_saved_regs.aux_ctrl, base + L2X0_AUX_CTRL); 1060 writel_relaxed(l2x0_saved_regs.ctrl, base + L2X0_CTRL); 1061 } 1062 } 1063 1064 /* 1065 * For Aurora cache in no outer mode, enable via the CP15 coprocessor 1066 * broadcasting of cache commands to L2. 1067 */ 1068 static void __init aurora_enable_no_outer(void __iomem *base, u32 aux, 1069 unsigned num_lock) 1070 { 1071 u32 u; 1072 1073 asm volatile("mrc p15, 1, %0, c15, c2, 0" : "=r" (u)); 1074 u |= AURORA_CTRL_FW; /* Set the FW bit */ 1075 asm volatile("mcr p15, 1, %0, c15, c2, 0" : : "r" (u)); 1076 1077 isb(); 1078 1079 l2c_enable(base, aux, num_lock); 1080 } 1081 1082 static void __init aurora_fixup(void __iomem *base, u32 cache_id, 1083 struct outer_cache_fns *fns) 1084 { 1085 sync_reg_offset = AURORA_SYNC_REG; 1086 } 1087 1088 static void __init aurora_of_parse(const struct device_node *np, 1089 u32 *aux_val, u32 *aux_mask) 1090 { 1091 u32 val = AURORA_ACR_REPLACEMENT_TYPE_SEMIPLRU; 1092 u32 mask = AURORA_ACR_REPLACEMENT_MASK; 1093 1094 of_property_read_u32(np, "cache-id-part", 1095 &cache_id_part_number_from_dt); 1096 1097 /* Determine and save the write policy */ 1098 l2_wt_override = of_property_read_bool(np, "wt-override"); 1099 1100 if (l2_wt_override) { 1101 val |= AURORA_ACR_FORCE_WRITE_THRO_POLICY; 1102 mask |= AURORA_ACR_FORCE_WRITE_POLICY_MASK; 1103 } 1104 1105 *aux_val &= ~mask; 1106 *aux_val |= val; 1107 *aux_mask &= ~mask; 1108 } 1109 1110 static const struct l2c_init_data of_aurora_with_outer_data __initconst = { 1111 .type = "Aurora", 1112 .way_size_0 = SZ_4K, 1113 .num_lock = 4, 1114 .of_parse = aurora_of_parse, 1115 .enable = l2c_enable, 1116 .fixup = aurora_fixup, 1117 .save = aurora_save, 1118 .outer_cache = { 1119 .inv_range = aurora_inv_range, 1120 .clean_range = aurora_clean_range, 1121 .flush_range = aurora_flush_range, 1122 .flush_all = l2x0_flush_all, 1123 .disable = l2x0_disable, 1124 .sync = l2x0_cache_sync, 1125 .resume = aurora_resume, 1126 }, 1127 }; 1128 1129 static const struct l2c_init_data of_aurora_no_outer_data __initconst = { 1130 .type = "Aurora", 1131 .way_size_0 = SZ_4K, 1132 .num_lock = 4, 1133 .of_parse = aurora_of_parse, 1134 .enable = aurora_enable_no_outer, 1135 .fixup = aurora_fixup, 1136 .save = aurora_save, 1137 .outer_cache = { 1138 .resume = aurora_resume, 1139 }, 1140 }; 1141 1142 /* 1143 * For certain Broadcom SoCs, depending on the address range, different offsets 1144 * need to be added to the address before passing it to L2 for 1145 * invalidation/clean/flush 1146 * 1147 * Section Address Range Offset EMI 1148 * 1 0x00000000 - 0x3FFFFFFF 0x80000000 VC 1149 * 2 0x40000000 - 0xBFFFFFFF 0x40000000 SYS 1150 * 3 0xC0000000 - 0xFFFFFFFF 0x80000000 VC 1151 * 1152 * When the start and end addresses have crossed two different sections, we 1153 * need to break the L2 operation into two, each within its own section. 1154 * For example, if we need to invalidate addresses starts at 0xBFFF0000 and 1155 * ends at 0xC0001000, we need do invalidate 1) 0xBFFF0000 - 0xBFFFFFFF and 2) 1156 * 0xC0000000 - 0xC0001000 1157 * 1158 * Note 1: 1159 * By breaking a single L2 operation into two, we may potentially suffer some 1160 * performance hit, but keep in mind the cross section case is very rare 1161 * 1162 * Note 2: 1163 * We do not need to handle the case when the start address is in 1164 * Section 1 and the end address is in Section 3, since it is not a valid use 1165 * case 1166 * 1167 * Note 3: 1168 * Section 1 in practical terms can no longer be used on rev A2. Because of 1169 * that the code does not need to handle section 1 at all. 1170 * 1171 */ 1172 #define BCM_SYS_EMI_START_ADDR 0x40000000UL 1173 #define BCM_VC_EMI_SEC3_START_ADDR 0xC0000000UL 1174 1175 #define BCM_SYS_EMI_OFFSET 0x40000000UL 1176 #define BCM_VC_EMI_OFFSET 0x80000000UL 1177 1178 static inline int bcm_addr_is_sys_emi(unsigned long addr) 1179 { 1180 return (addr >= BCM_SYS_EMI_START_ADDR) && 1181 (addr < BCM_VC_EMI_SEC3_START_ADDR); 1182 } 1183 1184 static inline unsigned long bcm_l2_phys_addr(unsigned long addr) 1185 { 1186 if (bcm_addr_is_sys_emi(addr)) 1187 return addr + BCM_SYS_EMI_OFFSET; 1188 else 1189 return addr + BCM_VC_EMI_OFFSET; 1190 } 1191 1192 static void bcm_inv_range(unsigned long start, unsigned long end) 1193 { 1194 unsigned long new_start, new_end; 1195 1196 BUG_ON(start < BCM_SYS_EMI_START_ADDR); 1197 1198 if (unlikely(end <= start)) 1199 return; 1200 1201 new_start = bcm_l2_phys_addr(start); 1202 new_end = bcm_l2_phys_addr(end); 1203 1204 /* normal case, no cross section between start and end */ 1205 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) { 1206 l2c210_inv_range(new_start, new_end); 1207 return; 1208 } 1209 1210 /* They cross sections, so it can only be a cross from section 1211 * 2 to section 3 1212 */ 1213 l2c210_inv_range(new_start, 1214 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1)); 1215 l2c210_inv_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR), 1216 new_end); 1217 } 1218 1219 static void bcm_clean_range(unsigned long start, unsigned long end) 1220 { 1221 unsigned long new_start, new_end; 1222 1223 BUG_ON(start < BCM_SYS_EMI_START_ADDR); 1224 1225 if (unlikely(end <= start)) 1226 return; 1227 1228 new_start = bcm_l2_phys_addr(start); 1229 new_end = bcm_l2_phys_addr(end); 1230 1231 /* normal case, no cross section between start and end */ 1232 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) { 1233 l2c210_clean_range(new_start, new_end); 1234 return; 1235 } 1236 1237 /* They cross sections, so it can only be a cross from section 1238 * 2 to section 3 1239 */ 1240 l2c210_clean_range(new_start, 1241 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1)); 1242 l2c210_clean_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR), 1243 new_end); 1244 } 1245 1246 static void bcm_flush_range(unsigned long start, unsigned long end) 1247 { 1248 unsigned long new_start, new_end; 1249 1250 BUG_ON(start < BCM_SYS_EMI_START_ADDR); 1251 1252 if (unlikely(end <= start)) 1253 return; 1254 1255 if ((end - start) >= l2x0_size) { 1256 outer_cache.flush_all(); 1257 return; 1258 } 1259 1260 new_start = bcm_l2_phys_addr(start); 1261 new_end = bcm_l2_phys_addr(end); 1262 1263 /* normal case, no cross section between start and end */ 1264 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) { 1265 l2c210_flush_range(new_start, new_end); 1266 return; 1267 } 1268 1269 /* They cross sections, so it can only be a cross from section 1270 * 2 to section 3 1271 */ 1272 l2c210_flush_range(new_start, 1273 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1)); 1274 l2c210_flush_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR), 1275 new_end); 1276 } 1277 1278 /* Broadcom L2C-310 start from ARMs R3P2 or later, and require no fixups */ 1279 static const struct l2c_init_data of_bcm_l2x0_data __initconst = { 1280 .type = "BCM-L2C-310", 1281 .way_size_0 = SZ_8K, 1282 .num_lock = 8, 1283 .of_parse = l2c310_of_parse, 1284 .enable = l2c_enable, 1285 .save = l2c310_save, 1286 .outer_cache = { 1287 .inv_range = bcm_inv_range, 1288 .clean_range = bcm_clean_range, 1289 .flush_range = bcm_flush_range, 1290 .flush_all = l2c210_flush_all, 1291 .disable = l2c_disable, 1292 .sync = l2c210_sync, 1293 .resume = l2c310_resume, 1294 }, 1295 }; 1296 1297 static void __init tauros3_save(void __iomem *base) 1298 { 1299 l2x0_saved_regs.aux2_ctrl = 1300 readl_relaxed(base + TAUROS3_AUX2_CTRL); 1301 l2x0_saved_regs.prefetch_ctrl = 1302 readl_relaxed(base + L310_PREFETCH_CTRL); 1303 } 1304 1305 static void tauros3_resume(void) 1306 { 1307 void __iomem *base = l2x0_base; 1308 1309 if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN)) { 1310 writel_relaxed(l2x0_saved_regs.aux2_ctrl, 1311 base + TAUROS3_AUX2_CTRL); 1312 writel_relaxed(l2x0_saved_regs.prefetch_ctrl, 1313 base + L310_PREFETCH_CTRL); 1314 1315 l2c_enable(base, l2x0_saved_regs.aux_ctrl, 8); 1316 } 1317 } 1318 1319 static const struct l2c_init_data of_tauros3_data __initconst = { 1320 .type = "Tauros3", 1321 .way_size_0 = SZ_8K, 1322 .num_lock = 8, 1323 .enable = l2c_enable, 1324 .save = tauros3_save, 1325 /* Tauros3 broadcasts L1 cache operations to L2 */ 1326 .outer_cache = { 1327 .resume = tauros3_resume, 1328 }, 1329 }; 1330 1331 #define L2C_ID(name, fns) { .compatible = name, .data = (void *)&fns } 1332 static const struct of_device_id l2x0_ids[] __initconst = { 1333 L2C_ID("arm,l210-cache", of_l2c210_data), 1334 L2C_ID("arm,l220-cache", of_l2c220_data), 1335 L2C_ID("arm,pl310-cache", of_l2c310_data), 1336 L2C_ID("brcm,bcm11351-a2-pl310-cache", of_bcm_l2x0_data), 1337 L2C_ID("marvell,aurora-outer-cache", of_aurora_with_outer_data), 1338 L2C_ID("marvell,aurora-system-cache", of_aurora_no_outer_data), 1339 L2C_ID("marvell,tauros3-cache", of_tauros3_data), 1340 /* Deprecated IDs */ 1341 L2C_ID("bcm,bcm11351-a2-pl310-cache", of_bcm_l2x0_data), 1342 {} 1343 }; 1344 1345 int __init l2x0_of_init(u32 aux_val, u32 aux_mask) 1346 { 1347 const struct l2c_init_data *data; 1348 struct device_node *np; 1349 struct resource res; 1350 u32 cache_id; 1351 1352 np = of_find_matching_node(NULL, l2x0_ids); 1353 if (!np) 1354 return -ENODEV; 1355 1356 if (of_address_to_resource(np, 0, &res)) 1357 return -ENODEV; 1358 1359 l2x0_base = ioremap(res.start, resource_size(&res)); 1360 if (!l2x0_base) 1361 return -ENOMEM; 1362 1363 l2x0_saved_regs.phy_base = res.start; 1364 1365 data = of_match_node(l2x0_ids, np)->data; 1366 1367 /* All L2 caches are unified, so this property should be specified */ 1368 if (!of_property_read_bool(np, "cache-unified")) 1369 pr_err("L2C: device tree omits to specify unified cache\n"); 1370 1371 /* L2 configuration can only be changed if the cache is disabled */ 1372 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) 1373 if (data->of_parse) 1374 data->of_parse(np, &aux_val, &aux_mask); 1375 1376 if (cache_id_part_number_from_dt) 1377 cache_id = cache_id_part_number_from_dt; 1378 else 1379 cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID); 1380 1381 __l2c_init(data, aux_val, aux_mask, cache_id); 1382 1383 return 0; 1384 } 1385 #endif 1386