1 /* 2 * arch/arm/mm/cache-l2x0.c - L210/L220 cache controller support 3 * 4 * Copyright (C) 2007 ARM Limited 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software 17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18 */ 19 #include <linux/err.h> 20 #include <linux/init.h> 21 #include <linux/spinlock.h> 22 #include <linux/io.h> 23 #include <linux/of.h> 24 #include <linux/of_address.h> 25 26 #include <asm/cacheflush.h> 27 #include <asm/cputype.h> 28 #include <asm/hardware/cache-l2x0.h> 29 #include "cache-tauros3.h" 30 #include "cache-aurora-l2.h" 31 32 struct l2c_init_data { 33 const char *type; 34 unsigned way_size_0; 35 unsigned num_lock; 36 void (*of_parse)(const struct device_node *, u32 *, u32 *); 37 void (*enable)(void __iomem *, u32, unsigned); 38 void (*fixup)(void __iomem *, u32, struct outer_cache_fns *); 39 void (*save)(void __iomem *); 40 struct outer_cache_fns outer_cache; 41 }; 42 43 #define CACHE_LINE_SIZE 32 44 45 static void __iomem *l2x0_base; 46 static DEFINE_RAW_SPINLOCK(l2x0_lock); 47 static u32 l2x0_way_mask; /* Bitmask of active ways */ 48 static u32 l2x0_size; 49 static unsigned long sync_reg_offset = L2X0_CACHE_SYNC; 50 51 struct l2x0_regs l2x0_saved_regs; 52 53 /* 54 * Common code for all cache controllers. 55 */ 56 static inline void l2c_wait_mask(void __iomem *reg, unsigned long mask) 57 { 58 /* wait for cache operation by line or way to complete */ 59 while (readl_relaxed(reg) & mask) 60 cpu_relax(); 61 } 62 63 /* 64 * By default, we write directly to secure registers. Platforms must 65 * override this if they are running non-secure. 66 */ 67 static void l2c_write_sec(unsigned long val, void __iomem *base, unsigned reg) 68 { 69 if (val == readl_relaxed(base + reg)) 70 return; 71 if (outer_cache.write_sec) 72 outer_cache.write_sec(val, reg); 73 else 74 writel_relaxed(val, base + reg); 75 } 76 77 /* 78 * This should only be called when we have a requirement that the 79 * register be written due to a work-around, as platforms running 80 * in non-secure mode may not be able to access this register. 81 */ 82 static inline void l2c_set_debug(void __iomem *base, unsigned long val) 83 { 84 if (outer_cache.set_debug) 85 outer_cache.set_debug(val); 86 else 87 l2c_write_sec(val, base, L2X0_DEBUG_CTRL); 88 } 89 90 static void __l2c_op_way(void __iomem *reg) 91 { 92 writel_relaxed(l2x0_way_mask, reg); 93 l2c_wait_mask(reg, l2x0_way_mask); 94 } 95 96 static inline void l2c_unlock(void __iomem *base, unsigned num) 97 { 98 unsigned i; 99 100 for (i = 0; i < num; i++) { 101 writel_relaxed(0, base + L2X0_LOCKDOWN_WAY_D_BASE + 102 i * L2X0_LOCKDOWN_STRIDE); 103 writel_relaxed(0, base + L2X0_LOCKDOWN_WAY_I_BASE + 104 i * L2X0_LOCKDOWN_STRIDE); 105 } 106 } 107 108 /* 109 * Enable the L2 cache controller. This function must only be 110 * called when the cache controller is known to be disabled. 111 */ 112 static void l2c_enable(void __iomem *base, u32 aux, unsigned num_lock) 113 { 114 unsigned long flags; 115 116 l2c_write_sec(aux, base, L2X0_AUX_CTRL); 117 118 l2c_unlock(base, num_lock); 119 120 local_irq_save(flags); 121 __l2c_op_way(base + L2X0_INV_WAY); 122 writel_relaxed(0, base + sync_reg_offset); 123 l2c_wait_mask(base + sync_reg_offset, 1); 124 local_irq_restore(flags); 125 126 l2c_write_sec(L2X0_CTRL_EN, base, L2X0_CTRL); 127 } 128 129 static void l2c_disable(void) 130 { 131 void __iomem *base = l2x0_base; 132 133 outer_cache.flush_all(); 134 l2c_write_sec(0, base, L2X0_CTRL); 135 dsb(st); 136 } 137 138 #ifdef CONFIG_CACHE_PL310 139 static inline void cache_wait(void __iomem *reg, unsigned long mask) 140 { 141 /* cache operations by line are atomic on PL310 */ 142 } 143 #else 144 #define cache_wait l2c_wait_mask 145 #endif 146 147 static inline void cache_sync(void) 148 { 149 void __iomem *base = l2x0_base; 150 151 writel_relaxed(0, base + sync_reg_offset); 152 cache_wait(base + L2X0_CACHE_SYNC, 1); 153 } 154 155 #if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915) 156 static inline void debug_writel(unsigned long val) 157 { 158 if (outer_cache.set_debug || outer_cache.write_sec) 159 l2c_set_debug(l2x0_base, val); 160 } 161 #else 162 /* Optimised out for non-errata case */ 163 static inline void debug_writel(unsigned long val) 164 { 165 } 166 #endif 167 168 static void l2x0_cache_sync(void) 169 { 170 unsigned long flags; 171 172 raw_spin_lock_irqsave(&l2x0_lock, flags); 173 cache_sync(); 174 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 175 } 176 177 static void __l2x0_flush_all(void) 178 { 179 debug_writel(0x03); 180 __l2c_op_way(l2x0_base + L2X0_CLEAN_INV_WAY); 181 cache_sync(); 182 debug_writel(0x00); 183 } 184 185 static void l2x0_flush_all(void) 186 { 187 unsigned long flags; 188 189 /* clean all ways */ 190 raw_spin_lock_irqsave(&l2x0_lock, flags); 191 __l2x0_flush_all(); 192 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 193 } 194 195 static void l2x0_disable(void) 196 { 197 unsigned long flags; 198 199 raw_spin_lock_irqsave(&l2x0_lock, flags); 200 __l2x0_flush_all(); 201 l2c_write_sec(0, l2x0_base, L2X0_CTRL); 202 dsb(st); 203 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 204 } 205 206 static void l2c_save(void __iomem *base) 207 { 208 l2x0_saved_regs.aux_ctrl = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); 209 } 210 211 /* 212 * L2C-210 specific code. 213 * 214 * The L2C-2x0 PA, set/way and sync operations are atomic, but we must 215 * ensure that no background operation is running. The way operations 216 * are all background tasks. 217 * 218 * While a background operation is in progress, any new operation is 219 * ignored (unspecified whether this causes an error.) Thankfully, not 220 * used on SMP. 221 * 222 * Never has a different sync register other than L2X0_CACHE_SYNC, but 223 * we use sync_reg_offset here so we can share some of this with L2C-310. 224 */ 225 static void __l2c210_cache_sync(void __iomem *base) 226 { 227 writel_relaxed(0, base + sync_reg_offset); 228 } 229 230 static void __l2c210_op_pa_range(void __iomem *reg, unsigned long start, 231 unsigned long end) 232 { 233 while (start < end) { 234 writel_relaxed(start, reg); 235 start += CACHE_LINE_SIZE; 236 } 237 } 238 239 static void l2c210_inv_range(unsigned long start, unsigned long end) 240 { 241 void __iomem *base = l2x0_base; 242 243 if (start & (CACHE_LINE_SIZE - 1)) { 244 start &= ~(CACHE_LINE_SIZE - 1); 245 writel_relaxed(start, base + L2X0_CLEAN_INV_LINE_PA); 246 start += CACHE_LINE_SIZE; 247 } 248 249 if (end & (CACHE_LINE_SIZE - 1)) { 250 end &= ~(CACHE_LINE_SIZE - 1); 251 writel_relaxed(end, base + L2X0_CLEAN_INV_LINE_PA); 252 } 253 254 __l2c210_op_pa_range(base + L2X0_INV_LINE_PA, start, end); 255 __l2c210_cache_sync(base); 256 } 257 258 static void l2c210_clean_range(unsigned long start, unsigned long end) 259 { 260 void __iomem *base = l2x0_base; 261 262 start &= ~(CACHE_LINE_SIZE - 1); 263 __l2c210_op_pa_range(base + L2X0_CLEAN_LINE_PA, start, end); 264 __l2c210_cache_sync(base); 265 } 266 267 static void l2c210_flush_range(unsigned long start, unsigned long end) 268 { 269 void __iomem *base = l2x0_base; 270 271 start &= ~(CACHE_LINE_SIZE - 1); 272 __l2c210_op_pa_range(base + L2X0_CLEAN_INV_LINE_PA, start, end); 273 __l2c210_cache_sync(base); 274 } 275 276 static void l2c210_flush_all(void) 277 { 278 void __iomem *base = l2x0_base; 279 280 BUG_ON(!irqs_disabled()); 281 282 __l2c_op_way(base + L2X0_CLEAN_INV_WAY); 283 __l2c210_cache_sync(base); 284 } 285 286 static void l2c210_sync(void) 287 { 288 __l2c210_cache_sync(l2x0_base); 289 } 290 291 static void l2c210_resume(void) 292 { 293 void __iomem *base = l2x0_base; 294 295 if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN)) 296 l2c_enable(base, l2x0_saved_regs.aux_ctrl, 1); 297 } 298 299 static const struct l2c_init_data l2c210_data __initconst = { 300 .type = "L2C-210", 301 .way_size_0 = SZ_8K, 302 .num_lock = 1, 303 .enable = l2c_enable, 304 .save = l2c_save, 305 .outer_cache = { 306 .inv_range = l2c210_inv_range, 307 .clean_range = l2c210_clean_range, 308 .flush_range = l2c210_flush_range, 309 .flush_all = l2c210_flush_all, 310 .disable = l2c_disable, 311 .sync = l2c210_sync, 312 .resume = l2c210_resume, 313 }, 314 }; 315 316 /* 317 * L2C-220 specific code. 318 * 319 * All operations are background operations: they have to be waited for. 320 * Conflicting requests generate a slave error (which will cause an 321 * imprecise abort.) Never uses sync_reg_offset, so we hard-code the 322 * sync register here. 323 * 324 * However, we can re-use the l2c210_resume call. 325 */ 326 static inline void __l2c220_cache_sync(void __iomem *base) 327 { 328 writel_relaxed(0, base + L2X0_CACHE_SYNC); 329 l2c_wait_mask(base + L2X0_CACHE_SYNC, 1); 330 } 331 332 static void l2c220_op_way(void __iomem *base, unsigned reg) 333 { 334 unsigned long flags; 335 336 raw_spin_lock_irqsave(&l2x0_lock, flags); 337 __l2c_op_way(base + reg); 338 __l2c220_cache_sync(base); 339 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 340 } 341 342 static unsigned long l2c220_op_pa_range(void __iomem *reg, unsigned long start, 343 unsigned long end, unsigned long flags) 344 { 345 raw_spinlock_t *lock = &l2x0_lock; 346 347 while (start < end) { 348 unsigned long blk_end = start + min(end - start, 4096UL); 349 350 while (start < blk_end) { 351 l2c_wait_mask(reg, 1); 352 writel_relaxed(start, reg); 353 start += CACHE_LINE_SIZE; 354 } 355 356 if (blk_end < end) { 357 raw_spin_unlock_irqrestore(lock, flags); 358 raw_spin_lock_irqsave(lock, flags); 359 } 360 } 361 362 return flags; 363 } 364 365 static void l2c220_inv_range(unsigned long start, unsigned long end) 366 { 367 void __iomem *base = l2x0_base; 368 unsigned long flags; 369 370 raw_spin_lock_irqsave(&l2x0_lock, flags); 371 if ((start | end) & (CACHE_LINE_SIZE - 1)) { 372 if (start & (CACHE_LINE_SIZE - 1)) { 373 start &= ~(CACHE_LINE_SIZE - 1); 374 writel_relaxed(start, base + L2X0_CLEAN_INV_LINE_PA); 375 start += CACHE_LINE_SIZE; 376 } 377 378 if (end & (CACHE_LINE_SIZE - 1)) { 379 end &= ~(CACHE_LINE_SIZE - 1); 380 l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1); 381 writel_relaxed(end, base + L2X0_CLEAN_INV_LINE_PA); 382 } 383 } 384 385 flags = l2c220_op_pa_range(base + L2X0_INV_LINE_PA, 386 start, end, flags); 387 l2c_wait_mask(base + L2X0_INV_LINE_PA, 1); 388 __l2c220_cache_sync(base); 389 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 390 } 391 392 static void l2c220_clean_range(unsigned long start, unsigned long end) 393 { 394 void __iomem *base = l2x0_base; 395 unsigned long flags; 396 397 start &= ~(CACHE_LINE_SIZE - 1); 398 if ((end - start) >= l2x0_size) { 399 l2c220_op_way(base, L2X0_CLEAN_WAY); 400 return; 401 } 402 403 raw_spin_lock_irqsave(&l2x0_lock, flags); 404 flags = l2c220_op_pa_range(base + L2X0_CLEAN_LINE_PA, 405 start, end, flags); 406 l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1); 407 __l2c220_cache_sync(base); 408 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 409 } 410 411 static void l2c220_flush_range(unsigned long start, unsigned long end) 412 { 413 void __iomem *base = l2x0_base; 414 unsigned long flags; 415 416 start &= ~(CACHE_LINE_SIZE - 1); 417 if ((end - start) >= l2x0_size) { 418 l2c220_op_way(base, L2X0_CLEAN_INV_WAY); 419 return; 420 } 421 422 raw_spin_lock_irqsave(&l2x0_lock, flags); 423 flags = l2c220_op_pa_range(base + L2X0_CLEAN_INV_LINE_PA, 424 start, end, flags); 425 l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1); 426 __l2c220_cache_sync(base); 427 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 428 } 429 430 static void l2c220_flush_all(void) 431 { 432 l2c220_op_way(l2x0_base, L2X0_CLEAN_INV_WAY); 433 } 434 435 static void l2c220_sync(void) 436 { 437 unsigned long flags; 438 439 raw_spin_lock_irqsave(&l2x0_lock, flags); 440 __l2c220_cache_sync(l2x0_base); 441 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 442 } 443 444 static void l2c220_enable(void __iomem *base, u32 aux, unsigned num_lock) 445 { 446 /* 447 * Always enable non-secure access to the lockdown registers - 448 * we write to them as part of the L2C enable sequence so they 449 * need to be accessible. 450 */ 451 aux |= L220_AUX_CTRL_NS_LOCKDOWN; 452 453 l2c_enable(base, aux, num_lock); 454 } 455 456 static const struct l2c_init_data l2c220_data = { 457 .type = "L2C-220", 458 .way_size_0 = SZ_8K, 459 .num_lock = 1, 460 .enable = l2c220_enable, 461 .save = l2c_save, 462 .outer_cache = { 463 .inv_range = l2c220_inv_range, 464 .clean_range = l2c220_clean_range, 465 .flush_range = l2c220_flush_range, 466 .flush_all = l2c220_flush_all, 467 .disable = l2c_disable, 468 .sync = l2c220_sync, 469 .resume = l2c210_resume, 470 }, 471 }; 472 473 /* 474 * L2C-310 specific code. 475 * 476 * Very similar to L2C-210, the PA, set/way and sync operations are atomic, 477 * and the way operations are all background tasks. However, issuing an 478 * operation while a background operation is in progress results in a 479 * SLVERR response. We can reuse: 480 * 481 * __l2c210_cache_sync (using sync_reg_offset) 482 * l2c210_sync 483 * l2c210_inv_range (if 588369 is not applicable) 484 * l2c210_clean_range 485 * l2c210_flush_range (if 588369 is not applicable) 486 * l2c210_flush_all (if 727915 is not applicable) 487 * 488 * Errata: 489 * 588369: PL310 R0P0->R1P0, fixed R2P0. 490 * Affects: all clean+invalidate operations 491 * clean and invalidate skips the invalidate step, so we need to issue 492 * separate operations. We also require the above debug workaround 493 * enclosing this code fragment on affected parts. On unaffected parts, 494 * we must not use this workaround without the debug register writes 495 * to avoid exposing a problem similar to 727915. 496 * 497 * 727915: PL310 R2P0->R3P0, fixed R3P1. 498 * Affects: clean+invalidate by way 499 * clean and invalidate by way runs in the background, and a store can 500 * hit the line between the clean operation and invalidate operation, 501 * resulting in the store being lost. 502 * 503 * 752271: PL310 R3P0->R3P1-50REL0, fixed R3P2. 504 * Affects: 8x64-bit (double fill) line fetches 505 * double fill line fetches can fail to cause dirty data to be evicted 506 * from the cache before the new data overwrites the second line. 507 * 508 * 753970: PL310 R3P0, fixed R3P1. 509 * Affects: sync 510 * prevents merging writes after the sync operation, until another L2C 511 * operation is performed (or a number of other conditions.) 512 * 513 * 769419: PL310 R0P0->R3P1, fixed R3P2. 514 * Affects: store buffer 515 * store buffer is not automatically drained. 516 */ 517 static void l2c310_set_debug(unsigned long val) 518 { 519 writel_relaxed(val, l2x0_base + L2X0_DEBUG_CTRL); 520 } 521 522 static void l2c310_inv_range_erratum(unsigned long start, unsigned long end) 523 { 524 void __iomem *base = l2x0_base; 525 526 if ((start | end) & (CACHE_LINE_SIZE - 1)) { 527 unsigned long flags; 528 529 /* Erratum 588369 for both clean+invalidate operations */ 530 raw_spin_lock_irqsave(&l2x0_lock, flags); 531 l2c_set_debug(base, 0x03); 532 533 if (start & (CACHE_LINE_SIZE - 1)) { 534 start &= ~(CACHE_LINE_SIZE - 1); 535 writel_relaxed(start, base + L2X0_CLEAN_LINE_PA); 536 writel_relaxed(start, base + L2X0_INV_LINE_PA); 537 start += CACHE_LINE_SIZE; 538 } 539 540 if (end & (CACHE_LINE_SIZE - 1)) { 541 end &= ~(CACHE_LINE_SIZE - 1); 542 writel_relaxed(end, base + L2X0_CLEAN_LINE_PA); 543 writel_relaxed(end, base + L2X0_INV_LINE_PA); 544 } 545 546 l2c_set_debug(base, 0x00); 547 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 548 } 549 550 __l2c210_op_pa_range(base + L2X0_INV_LINE_PA, start, end); 551 __l2c210_cache_sync(base); 552 } 553 554 static void l2c310_flush_range_erratum(unsigned long start, unsigned long end) 555 { 556 raw_spinlock_t *lock = &l2x0_lock; 557 unsigned long flags; 558 void __iomem *base = l2x0_base; 559 560 raw_spin_lock_irqsave(lock, flags); 561 while (start < end) { 562 unsigned long blk_end = start + min(end - start, 4096UL); 563 564 l2c_set_debug(base, 0x03); 565 while (start < blk_end) { 566 writel_relaxed(start, base + L2X0_CLEAN_LINE_PA); 567 writel_relaxed(start, base + L2X0_INV_LINE_PA); 568 start += CACHE_LINE_SIZE; 569 } 570 l2c_set_debug(base, 0x00); 571 572 if (blk_end < end) { 573 raw_spin_unlock_irqrestore(lock, flags); 574 raw_spin_lock_irqsave(lock, flags); 575 } 576 } 577 raw_spin_unlock_irqrestore(lock, flags); 578 __l2c210_cache_sync(base); 579 } 580 581 static void l2c310_flush_all_erratum(void) 582 { 583 void __iomem *base = l2x0_base; 584 unsigned long flags; 585 586 raw_spin_lock_irqsave(&l2x0_lock, flags); 587 l2c_set_debug(base, 0x03); 588 __l2c_op_way(base + L2X0_CLEAN_INV_WAY); 589 l2c_set_debug(base, 0x00); 590 __l2c210_cache_sync(base); 591 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 592 } 593 594 static void __init l2c310_save(void __iomem *base) 595 { 596 unsigned revision; 597 598 l2c_save(base); 599 600 l2x0_saved_regs.tag_latency = readl_relaxed(base + 601 L310_TAG_LATENCY_CTRL); 602 l2x0_saved_regs.data_latency = readl_relaxed(base + 603 L310_DATA_LATENCY_CTRL); 604 l2x0_saved_regs.filter_end = readl_relaxed(base + 605 L310_ADDR_FILTER_END); 606 l2x0_saved_regs.filter_start = readl_relaxed(base + 607 L310_ADDR_FILTER_START); 608 609 revision = readl_relaxed(base + L2X0_CACHE_ID) & 610 L2X0_CACHE_ID_RTL_MASK; 611 612 /* From r2p0, there is Prefetch offset/control register */ 613 if (revision >= L310_CACHE_ID_RTL_R2P0) 614 l2x0_saved_regs.prefetch_ctrl = readl_relaxed(base + 615 L310_PREFETCH_CTRL); 616 617 /* From r3p0, there is Power control register */ 618 if (revision >= L310_CACHE_ID_RTL_R3P0) 619 l2x0_saved_regs.pwr_ctrl = readl_relaxed(base + 620 L310_POWER_CTRL); 621 } 622 623 static void l2c310_resume(void) 624 { 625 void __iomem *base = l2x0_base; 626 627 if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN)) { 628 unsigned revision; 629 630 /* restore pl310 setup */ 631 writel_relaxed(l2x0_saved_regs.tag_latency, 632 base + L310_TAG_LATENCY_CTRL); 633 writel_relaxed(l2x0_saved_regs.data_latency, 634 base + L310_DATA_LATENCY_CTRL); 635 writel_relaxed(l2x0_saved_regs.filter_end, 636 base + L310_ADDR_FILTER_END); 637 writel_relaxed(l2x0_saved_regs.filter_start, 638 base + L310_ADDR_FILTER_START); 639 640 revision = readl_relaxed(base + L2X0_CACHE_ID) & 641 L2X0_CACHE_ID_RTL_MASK; 642 643 if (revision >= L310_CACHE_ID_RTL_R2P0) 644 l2c_write_sec(l2x0_saved_regs.prefetch_ctrl, base, 645 L310_PREFETCH_CTRL); 646 if (revision >= L310_CACHE_ID_RTL_R3P0) 647 l2c_write_sec(l2x0_saved_regs.pwr_ctrl, base, 648 L310_POWER_CTRL); 649 650 l2c_enable(base, l2x0_saved_regs.aux_ctrl, 8); 651 } 652 } 653 654 static void __init l2c310_enable(void __iomem *base, u32 aux, unsigned num_lock) 655 { 656 unsigned rev = readl_relaxed(base + L2X0_CACHE_ID) & L2X0_CACHE_ID_PART_MASK; 657 bool cortex_a9 = read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A9; 658 659 if (rev >= L310_CACHE_ID_RTL_R2P0) { 660 if (cortex_a9) { 661 aux |= L310_AUX_CTRL_EARLY_BRESP; 662 pr_info("L2C-310 enabling early BRESP for Cortex-A9\n"); 663 } else if (aux & L310_AUX_CTRL_EARLY_BRESP) { 664 pr_warn("L2C-310 early BRESP only supported with Cortex-A9\n"); 665 aux &= ~L310_AUX_CTRL_EARLY_BRESP; 666 } 667 } 668 669 /* r3p0 or later has power control register */ 670 if (rev >= L310_CACHE_ID_RTL_R3P0) { 671 u32 power_ctrl; 672 673 l2c_write_sec(L310_DYNAMIC_CLK_GATING_EN | L310_STNDBY_MODE_EN, 674 base, L310_POWER_CTRL); 675 power_ctrl = readl_relaxed(base + L310_POWER_CTRL); 676 pr_info("L2C-310 dynamic clock gating %sabled, standby mode %sabled\n", 677 power_ctrl & L310_DYNAMIC_CLK_GATING_EN ? "en" : "dis", 678 power_ctrl & L310_STNDBY_MODE_EN ? "en" : "dis"); 679 } 680 681 /* 682 * Always enable non-secure access to the lockdown registers - 683 * we write to them as part of the L2C enable sequence so they 684 * need to be accessible. 685 */ 686 aux |= L310_AUX_CTRL_NS_LOCKDOWN; 687 688 l2c_enable(base, aux, num_lock); 689 } 690 691 static void __init l2c310_fixup(void __iomem *base, u32 cache_id, 692 struct outer_cache_fns *fns) 693 { 694 unsigned revision = cache_id & L2X0_CACHE_ID_RTL_MASK; 695 const char *errata[8]; 696 unsigned n = 0; 697 698 /* For compatibility */ 699 if (revision <= L310_CACHE_ID_RTL_R3P0) 700 fns->set_debug = l2c310_set_debug; 701 702 if (IS_ENABLED(CONFIG_PL310_ERRATA_588369) && 703 revision < L310_CACHE_ID_RTL_R2P0 && 704 /* For bcm compatibility */ 705 fns->inv_range == l2c210_inv_range) { 706 fns->inv_range = l2c310_inv_range_erratum; 707 fns->flush_range = l2c310_flush_range_erratum; 708 errata[n++] = "588369"; 709 } 710 711 if (IS_ENABLED(CONFIG_PL310_ERRATA_727915) && 712 revision >= L310_CACHE_ID_RTL_R2P0 && 713 revision < L310_CACHE_ID_RTL_R3P1) { 714 fns->flush_all = l2c310_flush_all_erratum; 715 errata[n++] = "727915"; 716 } 717 718 if (revision >= L310_CACHE_ID_RTL_R3P0 && 719 revision < L310_CACHE_ID_RTL_R3P2) { 720 u32 val = readl_relaxed(base + L310_PREFETCH_CTRL); 721 /* I don't think bit23 is required here... but iMX6 does so */ 722 if (val & (BIT(30) | BIT(23))) { 723 val &= ~(BIT(30) | BIT(23)); 724 l2c_write_sec(val, base, L310_PREFETCH_CTRL); 725 errata[n++] = "752271"; 726 } 727 } 728 729 if (IS_ENABLED(CONFIG_PL310_ERRATA_753970) && 730 revision == L310_CACHE_ID_RTL_R3P0) { 731 sync_reg_offset = L2X0_DUMMY_REG; 732 errata[n++] = "753970"; 733 } 734 735 if (IS_ENABLED(CONFIG_PL310_ERRATA_769419)) 736 errata[n++] = "769419"; 737 738 if (n) { 739 unsigned i; 740 741 pr_info("L2C-310 errat%s", n > 1 ? "a" : "um"); 742 for (i = 0; i < n; i++) 743 pr_cont(" %s", errata[i]); 744 pr_cont(" enabled\n"); 745 } 746 } 747 748 static const struct l2c_init_data l2c310_init_fns __initconst = { 749 .type = "L2C-310", 750 .way_size_0 = SZ_8K, 751 .num_lock = 8, 752 .enable = l2c310_enable, 753 .fixup = l2c310_fixup, 754 .save = l2c310_save, 755 .outer_cache = { 756 .inv_range = l2c210_inv_range, 757 .clean_range = l2c210_clean_range, 758 .flush_range = l2c210_flush_range, 759 .flush_all = l2c210_flush_all, 760 .disable = l2c_disable, 761 .sync = l2c210_sync, 762 .set_debug = l2c310_set_debug, 763 .resume = l2c310_resume, 764 }, 765 }; 766 767 static void __init __l2c_init(const struct l2c_init_data *data, 768 u32 aux_val, u32 aux_mask, u32 cache_id) 769 { 770 struct outer_cache_fns fns; 771 unsigned way_size_bits, ways; 772 u32 aux; 773 774 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); 775 776 aux &= aux_mask; 777 aux |= aux_val; 778 779 /* Determine the number of ways */ 780 switch (cache_id & L2X0_CACHE_ID_PART_MASK) { 781 case L2X0_CACHE_ID_PART_L310: 782 if (aux & (1 << 16)) 783 ways = 16; 784 else 785 ways = 8; 786 break; 787 788 case L2X0_CACHE_ID_PART_L210: 789 case L2X0_CACHE_ID_PART_L220: 790 ways = (aux >> 13) & 0xf; 791 break; 792 793 case AURORA_CACHE_ID: 794 ways = (aux >> 13) & 0xf; 795 ways = 2 << ((ways + 1) >> 2); 796 break; 797 798 default: 799 /* Assume unknown chips have 8 ways */ 800 ways = 8; 801 break; 802 } 803 804 l2x0_way_mask = (1 << ways) - 1; 805 806 /* 807 * way_size_0 is the size that a way_size value of zero would be 808 * given the calculation: way_size = way_size_0 << way_size_bits. 809 * So, if way_size_bits=0 is reserved, but way_size_bits=1 is 16k, 810 * then way_size_0 would be 8k. 811 * 812 * L2 cache size = number of ways * way size. 813 */ 814 way_size_bits = (aux & L2C_AUX_CTRL_WAY_SIZE_MASK) >> 815 L2C_AUX_CTRL_WAY_SIZE_SHIFT; 816 l2x0_size = ways * (data->way_size_0 << way_size_bits); 817 818 fns = data->outer_cache; 819 fns.write_sec = outer_cache.write_sec; 820 if (data->fixup) 821 data->fixup(l2x0_base, cache_id, &fns); 822 if (fns.write_sec) 823 fns.set_debug = NULL; 824 825 /* 826 * Check if l2x0 controller is already enabled. If we are booting 827 * in non-secure mode accessing the below registers will fault. 828 */ 829 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) 830 data->enable(l2x0_base, aux, data->num_lock); 831 832 outer_cache = fns; 833 834 /* 835 * It is strange to save the register state before initialisation, 836 * but hey, this is what the DT implementations decided to do. 837 */ 838 if (data->save) 839 data->save(l2x0_base); 840 841 /* Re-read it in case some bits are reserved. */ 842 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); 843 844 pr_info("%s cache controller enabled, %d ways, %d kB\n", 845 data->type, ways, l2x0_size >> 10); 846 pr_info("%s: CACHE_ID 0x%08x, AUX_CTRL 0x%08x\n", 847 data->type, cache_id, aux); 848 } 849 850 void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask) 851 { 852 const struct l2c_init_data *data; 853 u32 cache_id; 854 855 l2x0_base = base; 856 857 cache_id = readl_relaxed(base + L2X0_CACHE_ID); 858 859 switch (cache_id & L2X0_CACHE_ID_PART_MASK) { 860 default: 861 case L2X0_CACHE_ID_PART_L210: 862 data = &l2c210_data; 863 break; 864 865 case L2X0_CACHE_ID_PART_L220: 866 data = &l2c220_data; 867 break; 868 869 case L2X0_CACHE_ID_PART_L310: 870 data = &l2c310_init_fns; 871 break; 872 } 873 874 __l2c_init(data, aux_val, aux_mask, cache_id); 875 } 876 877 #ifdef CONFIG_OF 878 static int l2_wt_override; 879 880 /* Aurora don't have the cache ID register available, so we have to 881 * pass it though the device tree */ 882 static u32 cache_id_part_number_from_dt; 883 884 static void __init l2x0_of_parse(const struct device_node *np, 885 u32 *aux_val, u32 *aux_mask) 886 { 887 u32 data[2] = { 0, 0 }; 888 u32 tag = 0; 889 u32 dirty = 0; 890 u32 val = 0, mask = 0; 891 892 of_property_read_u32(np, "arm,tag-latency", &tag); 893 if (tag) { 894 mask |= L2X0_AUX_CTRL_TAG_LATENCY_MASK; 895 val |= (tag - 1) << L2X0_AUX_CTRL_TAG_LATENCY_SHIFT; 896 } 897 898 of_property_read_u32_array(np, "arm,data-latency", 899 data, ARRAY_SIZE(data)); 900 if (data[0] && data[1]) { 901 mask |= L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK | 902 L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK; 903 val |= ((data[0] - 1) << L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT) | 904 ((data[1] - 1) << L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT); 905 } 906 907 of_property_read_u32(np, "arm,dirty-latency", &dirty); 908 if (dirty) { 909 mask |= L2X0_AUX_CTRL_DIRTY_LATENCY_MASK; 910 val |= (dirty - 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT; 911 } 912 913 *aux_val &= ~mask; 914 *aux_val |= val; 915 *aux_mask &= ~mask; 916 } 917 918 static const struct l2c_init_data of_l2c210_data __initconst = { 919 .type = "L2C-210", 920 .way_size_0 = SZ_8K, 921 .num_lock = 1, 922 .of_parse = l2x0_of_parse, 923 .enable = l2c_enable, 924 .save = l2c_save, 925 .outer_cache = { 926 .inv_range = l2c210_inv_range, 927 .clean_range = l2c210_clean_range, 928 .flush_range = l2c210_flush_range, 929 .flush_all = l2c210_flush_all, 930 .disable = l2c_disable, 931 .sync = l2c210_sync, 932 .resume = l2c210_resume, 933 }, 934 }; 935 936 static const struct l2c_init_data of_l2c220_data __initconst = { 937 .type = "L2C-220", 938 .way_size_0 = SZ_8K, 939 .num_lock = 1, 940 .of_parse = l2x0_of_parse, 941 .enable = l2c220_enable, 942 .save = l2c_save, 943 .outer_cache = { 944 .inv_range = l2c220_inv_range, 945 .clean_range = l2c220_clean_range, 946 .flush_range = l2c220_flush_range, 947 .flush_all = l2c220_flush_all, 948 .disable = l2c_disable, 949 .sync = l2c220_sync, 950 .resume = l2c210_resume, 951 }, 952 }; 953 954 static void __init l2c310_of_parse(const struct device_node *np, 955 u32 *aux_val, u32 *aux_mask) 956 { 957 u32 data[3] = { 0, 0, 0 }; 958 u32 tag[3] = { 0, 0, 0 }; 959 u32 filter[2] = { 0, 0 }; 960 961 of_property_read_u32_array(np, "arm,tag-latency", tag, ARRAY_SIZE(tag)); 962 if (tag[0] && tag[1] && tag[2]) 963 writel_relaxed( 964 L310_LATENCY_CTRL_RD(tag[0] - 1) | 965 L310_LATENCY_CTRL_WR(tag[1] - 1) | 966 L310_LATENCY_CTRL_SETUP(tag[2] - 1), 967 l2x0_base + L310_TAG_LATENCY_CTRL); 968 969 of_property_read_u32_array(np, "arm,data-latency", 970 data, ARRAY_SIZE(data)); 971 if (data[0] && data[1] && data[2]) 972 writel_relaxed( 973 L310_LATENCY_CTRL_RD(data[0] - 1) | 974 L310_LATENCY_CTRL_WR(data[1] - 1) | 975 L310_LATENCY_CTRL_SETUP(data[2] - 1), 976 l2x0_base + L310_DATA_LATENCY_CTRL); 977 978 of_property_read_u32_array(np, "arm,filter-ranges", 979 filter, ARRAY_SIZE(filter)); 980 if (filter[1]) { 981 writel_relaxed(ALIGN(filter[0] + filter[1], SZ_1M), 982 l2x0_base + L310_ADDR_FILTER_END); 983 writel_relaxed((filter[0] & ~(SZ_1M - 1)) | L310_ADDR_FILTER_EN, 984 l2x0_base + L310_ADDR_FILTER_START); 985 } 986 } 987 988 static const struct l2c_init_data of_l2c310_data __initconst = { 989 .type = "L2C-310", 990 .way_size_0 = SZ_8K, 991 .num_lock = 8, 992 .of_parse = l2c310_of_parse, 993 .enable = l2c310_enable, 994 .fixup = l2c310_fixup, 995 .save = l2c310_save, 996 .outer_cache = { 997 .inv_range = l2c210_inv_range, 998 .clean_range = l2c210_clean_range, 999 .flush_range = l2c210_flush_range, 1000 .flush_all = l2c210_flush_all, 1001 .disable = l2c_disable, 1002 .sync = l2c210_sync, 1003 .set_debug = l2c310_set_debug, 1004 .resume = l2c310_resume, 1005 }, 1006 }; 1007 1008 /* 1009 * Note that the end addresses passed to Linux primitives are 1010 * noninclusive, while the hardware cache range operations use 1011 * inclusive start and end addresses. 1012 */ 1013 static unsigned long calc_range_end(unsigned long start, unsigned long end) 1014 { 1015 /* 1016 * Limit the number of cache lines processed at once, 1017 * since cache range operations stall the CPU pipeline 1018 * until completion. 1019 */ 1020 if (end > start + MAX_RANGE_SIZE) 1021 end = start + MAX_RANGE_SIZE; 1022 1023 /* 1024 * Cache range operations can't straddle a page boundary. 1025 */ 1026 if (end > PAGE_ALIGN(start+1)) 1027 end = PAGE_ALIGN(start+1); 1028 1029 return end; 1030 } 1031 1032 /* 1033 * Make sure 'start' and 'end' reference the same page, as L2 is PIPT 1034 * and range operations only do a TLB lookup on the start address. 1035 */ 1036 static void aurora_pa_range(unsigned long start, unsigned long end, 1037 unsigned long offset) 1038 { 1039 unsigned long flags; 1040 1041 raw_spin_lock_irqsave(&l2x0_lock, flags); 1042 writel_relaxed(start, l2x0_base + AURORA_RANGE_BASE_ADDR_REG); 1043 writel_relaxed(end, l2x0_base + offset); 1044 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 1045 1046 cache_sync(); 1047 } 1048 1049 static void aurora_inv_range(unsigned long start, unsigned long end) 1050 { 1051 /* 1052 * round start and end adresses up to cache line size 1053 */ 1054 start &= ~(CACHE_LINE_SIZE - 1); 1055 end = ALIGN(end, CACHE_LINE_SIZE); 1056 1057 /* 1058 * Invalidate all full cache lines between 'start' and 'end'. 1059 */ 1060 while (start < end) { 1061 unsigned long range_end = calc_range_end(start, end); 1062 aurora_pa_range(start, range_end - CACHE_LINE_SIZE, 1063 AURORA_INVAL_RANGE_REG); 1064 start = range_end; 1065 } 1066 } 1067 1068 static void aurora_clean_range(unsigned long start, unsigned long end) 1069 { 1070 /* 1071 * If L2 is forced to WT, the L2 will always be clean and we 1072 * don't need to do anything here. 1073 */ 1074 if (!l2_wt_override) { 1075 start &= ~(CACHE_LINE_SIZE - 1); 1076 end = ALIGN(end, CACHE_LINE_SIZE); 1077 while (start != end) { 1078 unsigned long range_end = calc_range_end(start, end); 1079 aurora_pa_range(start, range_end - CACHE_LINE_SIZE, 1080 AURORA_CLEAN_RANGE_REG); 1081 start = range_end; 1082 } 1083 } 1084 } 1085 1086 static void aurora_flush_range(unsigned long start, unsigned long end) 1087 { 1088 start &= ~(CACHE_LINE_SIZE - 1); 1089 end = ALIGN(end, CACHE_LINE_SIZE); 1090 while (start != end) { 1091 unsigned long range_end = calc_range_end(start, end); 1092 /* 1093 * If L2 is forced to WT, the L2 will always be clean and we 1094 * just need to invalidate. 1095 */ 1096 if (l2_wt_override) 1097 aurora_pa_range(start, range_end - CACHE_LINE_SIZE, 1098 AURORA_INVAL_RANGE_REG); 1099 else 1100 aurora_pa_range(start, range_end - CACHE_LINE_SIZE, 1101 AURORA_FLUSH_RANGE_REG); 1102 start = range_end; 1103 } 1104 } 1105 1106 static void aurora_save(void __iomem *base) 1107 { 1108 l2x0_saved_regs.ctrl = readl_relaxed(base + L2X0_CTRL); 1109 l2x0_saved_regs.aux_ctrl = readl_relaxed(base + L2X0_AUX_CTRL); 1110 } 1111 1112 static void aurora_resume(void) 1113 { 1114 void __iomem *base = l2x0_base; 1115 1116 if (!(readl(base + L2X0_CTRL) & L2X0_CTRL_EN)) { 1117 writel_relaxed(l2x0_saved_regs.aux_ctrl, base + L2X0_AUX_CTRL); 1118 writel_relaxed(l2x0_saved_regs.ctrl, base + L2X0_CTRL); 1119 } 1120 } 1121 1122 /* 1123 * For Aurora cache in no outer mode, enable via the CP15 coprocessor 1124 * broadcasting of cache commands to L2. 1125 */ 1126 static void __init aurora_enable_no_outer(void __iomem *base, u32 aux, 1127 unsigned num_lock) 1128 { 1129 u32 u; 1130 1131 asm volatile("mrc p15, 1, %0, c15, c2, 0" : "=r" (u)); 1132 u |= AURORA_CTRL_FW; /* Set the FW bit */ 1133 asm volatile("mcr p15, 1, %0, c15, c2, 0" : : "r" (u)); 1134 1135 isb(); 1136 1137 l2c_enable(base, aux, num_lock); 1138 } 1139 1140 static void __init aurora_fixup(void __iomem *base, u32 cache_id, 1141 struct outer_cache_fns *fns) 1142 { 1143 sync_reg_offset = AURORA_SYNC_REG; 1144 } 1145 1146 static void __init aurora_of_parse(const struct device_node *np, 1147 u32 *aux_val, u32 *aux_mask) 1148 { 1149 u32 val = AURORA_ACR_REPLACEMENT_TYPE_SEMIPLRU; 1150 u32 mask = AURORA_ACR_REPLACEMENT_MASK; 1151 1152 of_property_read_u32(np, "cache-id-part", 1153 &cache_id_part_number_from_dt); 1154 1155 /* Determine and save the write policy */ 1156 l2_wt_override = of_property_read_bool(np, "wt-override"); 1157 1158 if (l2_wt_override) { 1159 val |= AURORA_ACR_FORCE_WRITE_THRO_POLICY; 1160 mask |= AURORA_ACR_FORCE_WRITE_POLICY_MASK; 1161 } 1162 1163 *aux_val &= ~mask; 1164 *aux_val |= val; 1165 *aux_mask &= ~mask; 1166 } 1167 1168 static const struct l2c_init_data of_aurora_with_outer_data __initconst = { 1169 .type = "Aurora", 1170 .way_size_0 = SZ_4K, 1171 .num_lock = 4, 1172 .of_parse = aurora_of_parse, 1173 .enable = l2c_enable, 1174 .fixup = aurora_fixup, 1175 .save = aurora_save, 1176 .outer_cache = { 1177 .inv_range = aurora_inv_range, 1178 .clean_range = aurora_clean_range, 1179 .flush_range = aurora_flush_range, 1180 .flush_all = l2x0_flush_all, 1181 .disable = l2x0_disable, 1182 .sync = l2x0_cache_sync, 1183 .resume = aurora_resume, 1184 }, 1185 }; 1186 1187 static const struct l2c_init_data of_aurora_no_outer_data __initconst = { 1188 .type = "Aurora", 1189 .way_size_0 = SZ_4K, 1190 .num_lock = 4, 1191 .of_parse = aurora_of_parse, 1192 .enable = aurora_enable_no_outer, 1193 .fixup = aurora_fixup, 1194 .save = aurora_save, 1195 .outer_cache = { 1196 .resume = aurora_resume, 1197 }, 1198 }; 1199 1200 /* 1201 * For certain Broadcom SoCs, depending on the address range, different offsets 1202 * need to be added to the address before passing it to L2 for 1203 * invalidation/clean/flush 1204 * 1205 * Section Address Range Offset EMI 1206 * 1 0x00000000 - 0x3FFFFFFF 0x80000000 VC 1207 * 2 0x40000000 - 0xBFFFFFFF 0x40000000 SYS 1208 * 3 0xC0000000 - 0xFFFFFFFF 0x80000000 VC 1209 * 1210 * When the start and end addresses have crossed two different sections, we 1211 * need to break the L2 operation into two, each within its own section. 1212 * For example, if we need to invalidate addresses starts at 0xBFFF0000 and 1213 * ends at 0xC0001000, we need do invalidate 1) 0xBFFF0000 - 0xBFFFFFFF and 2) 1214 * 0xC0000000 - 0xC0001000 1215 * 1216 * Note 1: 1217 * By breaking a single L2 operation into two, we may potentially suffer some 1218 * performance hit, but keep in mind the cross section case is very rare 1219 * 1220 * Note 2: 1221 * We do not need to handle the case when the start address is in 1222 * Section 1 and the end address is in Section 3, since it is not a valid use 1223 * case 1224 * 1225 * Note 3: 1226 * Section 1 in practical terms can no longer be used on rev A2. Because of 1227 * that the code does not need to handle section 1 at all. 1228 * 1229 */ 1230 #define BCM_SYS_EMI_START_ADDR 0x40000000UL 1231 #define BCM_VC_EMI_SEC3_START_ADDR 0xC0000000UL 1232 1233 #define BCM_SYS_EMI_OFFSET 0x40000000UL 1234 #define BCM_VC_EMI_OFFSET 0x80000000UL 1235 1236 static inline int bcm_addr_is_sys_emi(unsigned long addr) 1237 { 1238 return (addr >= BCM_SYS_EMI_START_ADDR) && 1239 (addr < BCM_VC_EMI_SEC3_START_ADDR); 1240 } 1241 1242 static inline unsigned long bcm_l2_phys_addr(unsigned long addr) 1243 { 1244 if (bcm_addr_is_sys_emi(addr)) 1245 return addr + BCM_SYS_EMI_OFFSET; 1246 else 1247 return addr + BCM_VC_EMI_OFFSET; 1248 } 1249 1250 static void bcm_inv_range(unsigned long start, unsigned long end) 1251 { 1252 unsigned long new_start, new_end; 1253 1254 BUG_ON(start < BCM_SYS_EMI_START_ADDR); 1255 1256 if (unlikely(end <= start)) 1257 return; 1258 1259 new_start = bcm_l2_phys_addr(start); 1260 new_end = bcm_l2_phys_addr(end); 1261 1262 /* normal case, no cross section between start and end */ 1263 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) { 1264 l2c210_inv_range(new_start, new_end); 1265 return; 1266 } 1267 1268 /* They cross sections, so it can only be a cross from section 1269 * 2 to section 3 1270 */ 1271 l2c210_inv_range(new_start, 1272 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1)); 1273 l2c210_inv_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR), 1274 new_end); 1275 } 1276 1277 static void bcm_clean_range(unsigned long start, unsigned long end) 1278 { 1279 unsigned long new_start, new_end; 1280 1281 BUG_ON(start < BCM_SYS_EMI_START_ADDR); 1282 1283 if (unlikely(end <= start)) 1284 return; 1285 1286 new_start = bcm_l2_phys_addr(start); 1287 new_end = bcm_l2_phys_addr(end); 1288 1289 /* normal case, no cross section between start and end */ 1290 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) { 1291 l2c210_clean_range(new_start, new_end); 1292 return; 1293 } 1294 1295 /* They cross sections, so it can only be a cross from section 1296 * 2 to section 3 1297 */ 1298 l2c210_clean_range(new_start, 1299 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1)); 1300 l2c210_clean_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR), 1301 new_end); 1302 } 1303 1304 static void bcm_flush_range(unsigned long start, unsigned long end) 1305 { 1306 unsigned long new_start, new_end; 1307 1308 BUG_ON(start < BCM_SYS_EMI_START_ADDR); 1309 1310 if (unlikely(end <= start)) 1311 return; 1312 1313 if ((end - start) >= l2x0_size) { 1314 outer_cache.flush_all(); 1315 return; 1316 } 1317 1318 new_start = bcm_l2_phys_addr(start); 1319 new_end = bcm_l2_phys_addr(end); 1320 1321 /* normal case, no cross section between start and end */ 1322 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) { 1323 l2c210_flush_range(new_start, new_end); 1324 return; 1325 } 1326 1327 /* They cross sections, so it can only be a cross from section 1328 * 2 to section 3 1329 */ 1330 l2c210_flush_range(new_start, 1331 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1)); 1332 l2c210_flush_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR), 1333 new_end); 1334 } 1335 1336 /* Broadcom L2C-310 start from ARMs R3P2 or later, and require no fixups */ 1337 static const struct l2c_init_data of_bcm_l2x0_data __initconst = { 1338 .type = "BCM-L2C-310", 1339 .way_size_0 = SZ_8K, 1340 .num_lock = 8, 1341 .of_parse = l2c310_of_parse, 1342 .enable = l2c310_enable, 1343 .save = l2c310_save, 1344 .outer_cache = { 1345 .inv_range = bcm_inv_range, 1346 .clean_range = bcm_clean_range, 1347 .flush_range = bcm_flush_range, 1348 .flush_all = l2c210_flush_all, 1349 .disable = l2c_disable, 1350 .sync = l2c210_sync, 1351 .resume = l2c310_resume, 1352 }, 1353 }; 1354 1355 static void __init tauros3_save(void __iomem *base) 1356 { 1357 l2c_save(base); 1358 1359 l2x0_saved_regs.aux2_ctrl = 1360 readl_relaxed(base + TAUROS3_AUX2_CTRL); 1361 l2x0_saved_regs.prefetch_ctrl = 1362 readl_relaxed(base + L310_PREFETCH_CTRL); 1363 } 1364 1365 static void tauros3_resume(void) 1366 { 1367 void __iomem *base = l2x0_base; 1368 1369 if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN)) { 1370 writel_relaxed(l2x0_saved_regs.aux2_ctrl, 1371 base + TAUROS3_AUX2_CTRL); 1372 writel_relaxed(l2x0_saved_regs.prefetch_ctrl, 1373 base + L310_PREFETCH_CTRL); 1374 1375 l2c_enable(base, l2x0_saved_regs.aux_ctrl, 8); 1376 } 1377 } 1378 1379 static const struct l2c_init_data of_tauros3_data __initconst = { 1380 .type = "Tauros3", 1381 .way_size_0 = SZ_8K, 1382 .num_lock = 8, 1383 .enable = l2c_enable, 1384 .save = tauros3_save, 1385 /* Tauros3 broadcasts L1 cache operations to L2 */ 1386 .outer_cache = { 1387 .resume = tauros3_resume, 1388 }, 1389 }; 1390 1391 #define L2C_ID(name, fns) { .compatible = name, .data = (void *)&fns } 1392 static const struct of_device_id l2x0_ids[] __initconst = { 1393 L2C_ID("arm,l210-cache", of_l2c210_data), 1394 L2C_ID("arm,l220-cache", of_l2c220_data), 1395 L2C_ID("arm,pl310-cache", of_l2c310_data), 1396 L2C_ID("brcm,bcm11351-a2-pl310-cache", of_bcm_l2x0_data), 1397 L2C_ID("marvell,aurora-outer-cache", of_aurora_with_outer_data), 1398 L2C_ID("marvell,aurora-system-cache", of_aurora_no_outer_data), 1399 L2C_ID("marvell,tauros3-cache", of_tauros3_data), 1400 /* Deprecated IDs */ 1401 L2C_ID("bcm,bcm11351-a2-pl310-cache", of_bcm_l2x0_data), 1402 {} 1403 }; 1404 1405 int __init l2x0_of_init(u32 aux_val, u32 aux_mask) 1406 { 1407 const struct l2c_init_data *data; 1408 struct device_node *np; 1409 struct resource res; 1410 u32 cache_id; 1411 1412 np = of_find_matching_node(NULL, l2x0_ids); 1413 if (!np) 1414 return -ENODEV; 1415 1416 if (of_address_to_resource(np, 0, &res)) 1417 return -ENODEV; 1418 1419 l2x0_base = ioremap(res.start, resource_size(&res)); 1420 if (!l2x0_base) 1421 return -ENOMEM; 1422 1423 l2x0_saved_regs.phy_base = res.start; 1424 1425 data = of_match_node(l2x0_ids, np)->data; 1426 1427 /* All L2 caches are unified, so this property should be specified */ 1428 if (!of_property_read_bool(np, "cache-unified")) 1429 pr_err("L2C: device tree omits to specify unified cache\n"); 1430 1431 /* L2 configuration can only be changed if the cache is disabled */ 1432 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) 1433 if (data->of_parse) 1434 data->of_parse(np, &aux_val, &aux_mask); 1435 1436 if (cache_id_part_number_from_dt) 1437 cache_id = cache_id_part_number_from_dt; 1438 else 1439 cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID); 1440 1441 __l2c_init(data, aux_val, aux_mask, cache_id); 1442 1443 return 0; 1444 } 1445 #endif 1446