1 /* 2 * arch/arm/mm/cache-l2x0.c - L210/L220 cache controller support 3 * 4 * Copyright (C) 2007 ARM Limited 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software 17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18 */ 19 #include <linux/cpu.h> 20 #include <linux/err.h> 21 #include <linux/init.h> 22 #include <linux/smp.h> 23 #include <linux/spinlock.h> 24 #include <linux/log2.h> 25 #include <linux/io.h> 26 #include <linux/of.h> 27 #include <linux/of_address.h> 28 29 #include <asm/cacheflush.h> 30 #include <asm/cp15.h> 31 #include <asm/cputype.h> 32 #include <asm/hardware/cache-l2x0.h> 33 #include "cache-tauros3.h" 34 #include "cache-aurora-l2.h" 35 36 struct l2c_init_data { 37 const char *type; 38 unsigned way_size_0; 39 unsigned num_lock; 40 void (*of_parse)(const struct device_node *, u32 *, u32 *); 41 void (*enable)(void __iomem *, u32, unsigned); 42 void (*fixup)(void __iomem *, u32, struct outer_cache_fns *); 43 void (*save)(void __iomem *); 44 struct outer_cache_fns outer_cache; 45 }; 46 47 #define CACHE_LINE_SIZE 32 48 49 static void __iomem *l2x0_base; 50 static DEFINE_RAW_SPINLOCK(l2x0_lock); 51 static u32 l2x0_way_mask; /* Bitmask of active ways */ 52 static u32 l2x0_size; 53 static unsigned long sync_reg_offset = L2X0_CACHE_SYNC; 54 55 struct l2x0_regs l2x0_saved_regs; 56 57 /* 58 * Common code for all cache controllers. 59 */ 60 static inline void l2c_wait_mask(void __iomem *reg, unsigned long mask) 61 { 62 /* wait for cache operation by line or way to complete */ 63 while (readl_relaxed(reg) & mask) 64 cpu_relax(); 65 } 66 67 /* 68 * By default, we write directly to secure registers. Platforms must 69 * override this if they are running non-secure. 70 */ 71 static void l2c_write_sec(unsigned long val, void __iomem *base, unsigned reg) 72 { 73 if (val == readl_relaxed(base + reg)) 74 return; 75 if (outer_cache.write_sec) 76 outer_cache.write_sec(val, reg); 77 else 78 writel_relaxed(val, base + reg); 79 } 80 81 /* 82 * This should only be called when we have a requirement that the 83 * register be written due to a work-around, as platforms running 84 * in non-secure mode may not be able to access this register. 85 */ 86 static inline void l2c_set_debug(void __iomem *base, unsigned long val) 87 { 88 l2c_write_sec(val, base, L2X0_DEBUG_CTRL); 89 } 90 91 static void __l2c_op_way(void __iomem *reg) 92 { 93 writel_relaxed(l2x0_way_mask, reg); 94 l2c_wait_mask(reg, l2x0_way_mask); 95 } 96 97 static inline void l2c_unlock(void __iomem *base, unsigned num) 98 { 99 unsigned i; 100 101 for (i = 0; i < num; i++) { 102 writel_relaxed(0, base + L2X0_LOCKDOWN_WAY_D_BASE + 103 i * L2X0_LOCKDOWN_STRIDE); 104 writel_relaxed(0, base + L2X0_LOCKDOWN_WAY_I_BASE + 105 i * L2X0_LOCKDOWN_STRIDE); 106 } 107 } 108 109 /* 110 * Enable the L2 cache controller. This function must only be 111 * called when the cache controller is known to be disabled. 112 */ 113 static void l2c_enable(void __iomem *base, u32 aux, unsigned num_lock) 114 { 115 unsigned long flags; 116 117 l2c_write_sec(aux, base, L2X0_AUX_CTRL); 118 119 l2c_unlock(base, num_lock); 120 121 local_irq_save(flags); 122 __l2c_op_way(base + L2X0_INV_WAY); 123 writel_relaxed(0, base + sync_reg_offset); 124 l2c_wait_mask(base + sync_reg_offset, 1); 125 local_irq_restore(flags); 126 127 l2c_write_sec(L2X0_CTRL_EN, base, L2X0_CTRL); 128 } 129 130 static void l2c_disable(void) 131 { 132 void __iomem *base = l2x0_base; 133 134 outer_cache.flush_all(); 135 l2c_write_sec(0, base, L2X0_CTRL); 136 dsb(st); 137 } 138 139 #ifdef CONFIG_CACHE_PL310 140 static inline void cache_wait(void __iomem *reg, unsigned long mask) 141 { 142 /* cache operations by line are atomic on PL310 */ 143 } 144 #else 145 #define cache_wait l2c_wait_mask 146 #endif 147 148 static inline void cache_sync(void) 149 { 150 void __iomem *base = l2x0_base; 151 152 writel_relaxed(0, base + sync_reg_offset); 153 cache_wait(base + L2X0_CACHE_SYNC, 1); 154 } 155 156 #if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915) 157 static inline void debug_writel(unsigned long val) 158 { 159 l2c_set_debug(l2x0_base, val); 160 } 161 #else 162 /* Optimised out for non-errata case */ 163 static inline void debug_writel(unsigned long val) 164 { 165 } 166 #endif 167 168 static void l2x0_cache_sync(void) 169 { 170 unsigned long flags; 171 172 raw_spin_lock_irqsave(&l2x0_lock, flags); 173 cache_sync(); 174 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 175 } 176 177 static void __l2x0_flush_all(void) 178 { 179 debug_writel(0x03); 180 __l2c_op_way(l2x0_base + L2X0_CLEAN_INV_WAY); 181 cache_sync(); 182 debug_writel(0x00); 183 } 184 185 static void l2x0_flush_all(void) 186 { 187 unsigned long flags; 188 189 /* clean all ways */ 190 raw_spin_lock_irqsave(&l2x0_lock, flags); 191 __l2x0_flush_all(); 192 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 193 } 194 195 static void l2x0_disable(void) 196 { 197 unsigned long flags; 198 199 raw_spin_lock_irqsave(&l2x0_lock, flags); 200 __l2x0_flush_all(); 201 l2c_write_sec(0, l2x0_base, L2X0_CTRL); 202 dsb(st); 203 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 204 } 205 206 static void l2c_save(void __iomem *base) 207 { 208 l2x0_saved_regs.aux_ctrl = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); 209 } 210 211 /* 212 * L2C-210 specific code. 213 * 214 * The L2C-2x0 PA, set/way and sync operations are atomic, but we must 215 * ensure that no background operation is running. The way operations 216 * are all background tasks. 217 * 218 * While a background operation is in progress, any new operation is 219 * ignored (unspecified whether this causes an error.) Thankfully, not 220 * used on SMP. 221 * 222 * Never has a different sync register other than L2X0_CACHE_SYNC, but 223 * we use sync_reg_offset here so we can share some of this with L2C-310. 224 */ 225 static void __l2c210_cache_sync(void __iomem *base) 226 { 227 writel_relaxed(0, base + sync_reg_offset); 228 } 229 230 static void __l2c210_op_pa_range(void __iomem *reg, unsigned long start, 231 unsigned long end) 232 { 233 while (start < end) { 234 writel_relaxed(start, reg); 235 start += CACHE_LINE_SIZE; 236 } 237 } 238 239 static void l2c210_inv_range(unsigned long start, unsigned long end) 240 { 241 void __iomem *base = l2x0_base; 242 243 if (start & (CACHE_LINE_SIZE - 1)) { 244 start &= ~(CACHE_LINE_SIZE - 1); 245 writel_relaxed(start, base + L2X0_CLEAN_INV_LINE_PA); 246 start += CACHE_LINE_SIZE; 247 } 248 249 if (end & (CACHE_LINE_SIZE - 1)) { 250 end &= ~(CACHE_LINE_SIZE - 1); 251 writel_relaxed(end, base + L2X0_CLEAN_INV_LINE_PA); 252 } 253 254 __l2c210_op_pa_range(base + L2X0_INV_LINE_PA, start, end); 255 __l2c210_cache_sync(base); 256 } 257 258 static void l2c210_clean_range(unsigned long start, unsigned long end) 259 { 260 void __iomem *base = l2x0_base; 261 262 start &= ~(CACHE_LINE_SIZE - 1); 263 __l2c210_op_pa_range(base + L2X0_CLEAN_LINE_PA, start, end); 264 __l2c210_cache_sync(base); 265 } 266 267 static void l2c210_flush_range(unsigned long start, unsigned long end) 268 { 269 void __iomem *base = l2x0_base; 270 271 start &= ~(CACHE_LINE_SIZE - 1); 272 __l2c210_op_pa_range(base + L2X0_CLEAN_INV_LINE_PA, start, end); 273 __l2c210_cache_sync(base); 274 } 275 276 static void l2c210_flush_all(void) 277 { 278 void __iomem *base = l2x0_base; 279 280 BUG_ON(!irqs_disabled()); 281 282 __l2c_op_way(base + L2X0_CLEAN_INV_WAY); 283 __l2c210_cache_sync(base); 284 } 285 286 static void l2c210_sync(void) 287 { 288 __l2c210_cache_sync(l2x0_base); 289 } 290 291 static void l2c210_resume(void) 292 { 293 void __iomem *base = l2x0_base; 294 295 if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN)) 296 l2c_enable(base, l2x0_saved_regs.aux_ctrl, 1); 297 } 298 299 static const struct l2c_init_data l2c210_data __initconst = { 300 .type = "L2C-210", 301 .way_size_0 = SZ_8K, 302 .num_lock = 1, 303 .enable = l2c_enable, 304 .save = l2c_save, 305 .outer_cache = { 306 .inv_range = l2c210_inv_range, 307 .clean_range = l2c210_clean_range, 308 .flush_range = l2c210_flush_range, 309 .flush_all = l2c210_flush_all, 310 .disable = l2c_disable, 311 .sync = l2c210_sync, 312 .resume = l2c210_resume, 313 }, 314 }; 315 316 /* 317 * L2C-220 specific code. 318 * 319 * All operations are background operations: they have to be waited for. 320 * Conflicting requests generate a slave error (which will cause an 321 * imprecise abort.) Never uses sync_reg_offset, so we hard-code the 322 * sync register here. 323 * 324 * However, we can re-use the l2c210_resume call. 325 */ 326 static inline void __l2c220_cache_sync(void __iomem *base) 327 { 328 writel_relaxed(0, base + L2X0_CACHE_SYNC); 329 l2c_wait_mask(base + L2X0_CACHE_SYNC, 1); 330 } 331 332 static void l2c220_op_way(void __iomem *base, unsigned reg) 333 { 334 unsigned long flags; 335 336 raw_spin_lock_irqsave(&l2x0_lock, flags); 337 __l2c_op_way(base + reg); 338 __l2c220_cache_sync(base); 339 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 340 } 341 342 static unsigned long l2c220_op_pa_range(void __iomem *reg, unsigned long start, 343 unsigned long end, unsigned long flags) 344 { 345 raw_spinlock_t *lock = &l2x0_lock; 346 347 while (start < end) { 348 unsigned long blk_end = start + min(end - start, 4096UL); 349 350 while (start < blk_end) { 351 l2c_wait_mask(reg, 1); 352 writel_relaxed(start, reg); 353 start += CACHE_LINE_SIZE; 354 } 355 356 if (blk_end < end) { 357 raw_spin_unlock_irqrestore(lock, flags); 358 raw_spin_lock_irqsave(lock, flags); 359 } 360 } 361 362 return flags; 363 } 364 365 static void l2c220_inv_range(unsigned long start, unsigned long end) 366 { 367 void __iomem *base = l2x0_base; 368 unsigned long flags; 369 370 raw_spin_lock_irqsave(&l2x0_lock, flags); 371 if ((start | end) & (CACHE_LINE_SIZE - 1)) { 372 if (start & (CACHE_LINE_SIZE - 1)) { 373 start &= ~(CACHE_LINE_SIZE - 1); 374 writel_relaxed(start, base + L2X0_CLEAN_INV_LINE_PA); 375 start += CACHE_LINE_SIZE; 376 } 377 378 if (end & (CACHE_LINE_SIZE - 1)) { 379 end &= ~(CACHE_LINE_SIZE - 1); 380 l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1); 381 writel_relaxed(end, base + L2X0_CLEAN_INV_LINE_PA); 382 } 383 } 384 385 flags = l2c220_op_pa_range(base + L2X0_INV_LINE_PA, 386 start, end, flags); 387 l2c_wait_mask(base + L2X0_INV_LINE_PA, 1); 388 __l2c220_cache_sync(base); 389 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 390 } 391 392 static void l2c220_clean_range(unsigned long start, unsigned long end) 393 { 394 void __iomem *base = l2x0_base; 395 unsigned long flags; 396 397 start &= ~(CACHE_LINE_SIZE - 1); 398 if ((end - start) >= l2x0_size) { 399 l2c220_op_way(base, L2X0_CLEAN_WAY); 400 return; 401 } 402 403 raw_spin_lock_irqsave(&l2x0_lock, flags); 404 flags = l2c220_op_pa_range(base + L2X0_CLEAN_LINE_PA, 405 start, end, flags); 406 l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1); 407 __l2c220_cache_sync(base); 408 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 409 } 410 411 static void l2c220_flush_range(unsigned long start, unsigned long end) 412 { 413 void __iomem *base = l2x0_base; 414 unsigned long flags; 415 416 start &= ~(CACHE_LINE_SIZE - 1); 417 if ((end - start) >= l2x0_size) { 418 l2c220_op_way(base, L2X0_CLEAN_INV_WAY); 419 return; 420 } 421 422 raw_spin_lock_irqsave(&l2x0_lock, flags); 423 flags = l2c220_op_pa_range(base + L2X0_CLEAN_INV_LINE_PA, 424 start, end, flags); 425 l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1); 426 __l2c220_cache_sync(base); 427 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 428 } 429 430 static void l2c220_flush_all(void) 431 { 432 l2c220_op_way(l2x0_base, L2X0_CLEAN_INV_WAY); 433 } 434 435 static void l2c220_sync(void) 436 { 437 unsigned long flags; 438 439 raw_spin_lock_irqsave(&l2x0_lock, flags); 440 __l2c220_cache_sync(l2x0_base); 441 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 442 } 443 444 static void l2c220_enable(void __iomem *base, u32 aux, unsigned num_lock) 445 { 446 /* 447 * Always enable non-secure access to the lockdown registers - 448 * we write to them as part of the L2C enable sequence so they 449 * need to be accessible. 450 */ 451 aux |= L220_AUX_CTRL_NS_LOCKDOWN; 452 453 l2c_enable(base, aux, num_lock); 454 } 455 456 static const struct l2c_init_data l2c220_data = { 457 .type = "L2C-220", 458 .way_size_0 = SZ_8K, 459 .num_lock = 1, 460 .enable = l2c220_enable, 461 .save = l2c_save, 462 .outer_cache = { 463 .inv_range = l2c220_inv_range, 464 .clean_range = l2c220_clean_range, 465 .flush_range = l2c220_flush_range, 466 .flush_all = l2c220_flush_all, 467 .disable = l2c_disable, 468 .sync = l2c220_sync, 469 .resume = l2c210_resume, 470 }, 471 }; 472 473 /* 474 * L2C-310 specific code. 475 * 476 * Very similar to L2C-210, the PA, set/way and sync operations are atomic, 477 * and the way operations are all background tasks. However, issuing an 478 * operation while a background operation is in progress results in a 479 * SLVERR response. We can reuse: 480 * 481 * __l2c210_cache_sync (using sync_reg_offset) 482 * l2c210_sync 483 * l2c210_inv_range (if 588369 is not applicable) 484 * l2c210_clean_range 485 * l2c210_flush_range (if 588369 is not applicable) 486 * l2c210_flush_all (if 727915 is not applicable) 487 * 488 * Errata: 489 * 588369: PL310 R0P0->R1P0, fixed R2P0. 490 * Affects: all clean+invalidate operations 491 * clean and invalidate skips the invalidate step, so we need to issue 492 * separate operations. We also require the above debug workaround 493 * enclosing this code fragment on affected parts. On unaffected parts, 494 * we must not use this workaround without the debug register writes 495 * to avoid exposing a problem similar to 727915. 496 * 497 * 727915: PL310 R2P0->R3P0, fixed R3P1. 498 * Affects: clean+invalidate by way 499 * clean and invalidate by way runs in the background, and a store can 500 * hit the line between the clean operation and invalidate operation, 501 * resulting in the store being lost. 502 * 503 * 752271: PL310 R3P0->R3P1-50REL0, fixed R3P2. 504 * Affects: 8x64-bit (double fill) line fetches 505 * double fill line fetches can fail to cause dirty data to be evicted 506 * from the cache before the new data overwrites the second line. 507 * 508 * 753970: PL310 R3P0, fixed R3P1. 509 * Affects: sync 510 * prevents merging writes after the sync operation, until another L2C 511 * operation is performed (or a number of other conditions.) 512 * 513 * 769419: PL310 R0P0->R3P1, fixed R3P2. 514 * Affects: store buffer 515 * store buffer is not automatically drained. 516 */ 517 static void l2c310_inv_range_erratum(unsigned long start, unsigned long end) 518 { 519 void __iomem *base = l2x0_base; 520 521 if ((start | end) & (CACHE_LINE_SIZE - 1)) { 522 unsigned long flags; 523 524 /* Erratum 588369 for both clean+invalidate operations */ 525 raw_spin_lock_irqsave(&l2x0_lock, flags); 526 l2c_set_debug(base, 0x03); 527 528 if (start & (CACHE_LINE_SIZE - 1)) { 529 start &= ~(CACHE_LINE_SIZE - 1); 530 writel_relaxed(start, base + L2X0_CLEAN_LINE_PA); 531 writel_relaxed(start, base + L2X0_INV_LINE_PA); 532 start += CACHE_LINE_SIZE; 533 } 534 535 if (end & (CACHE_LINE_SIZE - 1)) { 536 end &= ~(CACHE_LINE_SIZE - 1); 537 writel_relaxed(end, base + L2X0_CLEAN_LINE_PA); 538 writel_relaxed(end, base + L2X0_INV_LINE_PA); 539 } 540 541 l2c_set_debug(base, 0x00); 542 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 543 } 544 545 __l2c210_op_pa_range(base + L2X0_INV_LINE_PA, start, end); 546 __l2c210_cache_sync(base); 547 } 548 549 static void l2c310_flush_range_erratum(unsigned long start, unsigned long end) 550 { 551 raw_spinlock_t *lock = &l2x0_lock; 552 unsigned long flags; 553 void __iomem *base = l2x0_base; 554 555 raw_spin_lock_irqsave(lock, flags); 556 while (start < end) { 557 unsigned long blk_end = start + min(end - start, 4096UL); 558 559 l2c_set_debug(base, 0x03); 560 while (start < blk_end) { 561 writel_relaxed(start, base + L2X0_CLEAN_LINE_PA); 562 writel_relaxed(start, base + L2X0_INV_LINE_PA); 563 start += CACHE_LINE_SIZE; 564 } 565 l2c_set_debug(base, 0x00); 566 567 if (blk_end < end) { 568 raw_spin_unlock_irqrestore(lock, flags); 569 raw_spin_lock_irqsave(lock, flags); 570 } 571 } 572 raw_spin_unlock_irqrestore(lock, flags); 573 __l2c210_cache_sync(base); 574 } 575 576 static void l2c310_flush_all_erratum(void) 577 { 578 void __iomem *base = l2x0_base; 579 unsigned long flags; 580 581 raw_spin_lock_irqsave(&l2x0_lock, flags); 582 l2c_set_debug(base, 0x03); 583 __l2c_op_way(base + L2X0_CLEAN_INV_WAY); 584 l2c_set_debug(base, 0x00); 585 __l2c210_cache_sync(base); 586 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 587 } 588 589 static void __init l2c310_save(void __iomem *base) 590 { 591 unsigned revision; 592 593 l2c_save(base); 594 595 l2x0_saved_regs.tag_latency = readl_relaxed(base + 596 L310_TAG_LATENCY_CTRL); 597 l2x0_saved_regs.data_latency = readl_relaxed(base + 598 L310_DATA_LATENCY_CTRL); 599 l2x0_saved_regs.filter_end = readl_relaxed(base + 600 L310_ADDR_FILTER_END); 601 l2x0_saved_regs.filter_start = readl_relaxed(base + 602 L310_ADDR_FILTER_START); 603 604 revision = readl_relaxed(base + L2X0_CACHE_ID) & 605 L2X0_CACHE_ID_RTL_MASK; 606 607 /* From r2p0, there is Prefetch offset/control register */ 608 if (revision >= L310_CACHE_ID_RTL_R2P0) 609 l2x0_saved_regs.prefetch_ctrl = readl_relaxed(base + 610 L310_PREFETCH_CTRL); 611 612 /* From r3p0, there is Power control register */ 613 if (revision >= L310_CACHE_ID_RTL_R3P0) 614 l2x0_saved_regs.pwr_ctrl = readl_relaxed(base + 615 L310_POWER_CTRL); 616 } 617 618 static void l2c310_resume(void) 619 { 620 void __iomem *base = l2x0_base; 621 622 if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN)) { 623 unsigned revision; 624 625 /* restore pl310 setup */ 626 writel_relaxed(l2x0_saved_regs.tag_latency, 627 base + L310_TAG_LATENCY_CTRL); 628 writel_relaxed(l2x0_saved_regs.data_latency, 629 base + L310_DATA_LATENCY_CTRL); 630 writel_relaxed(l2x0_saved_regs.filter_end, 631 base + L310_ADDR_FILTER_END); 632 writel_relaxed(l2x0_saved_regs.filter_start, 633 base + L310_ADDR_FILTER_START); 634 635 revision = readl_relaxed(base + L2X0_CACHE_ID) & 636 L2X0_CACHE_ID_RTL_MASK; 637 638 if (revision >= L310_CACHE_ID_RTL_R2P0) 639 l2c_write_sec(l2x0_saved_regs.prefetch_ctrl, base, 640 L310_PREFETCH_CTRL); 641 if (revision >= L310_CACHE_ID_RTL_R3P0) 642 l2c_write_sec(l2x0_saved_regs.pwr_ctrl, base, 643 L310_POWER_CTRL); 644 645 l2c_enable(base, l2x0_saved_regs.aux_ctrl, 8); 646 647 /* Re-enable full-line-of-zeros for Cortex-A9 */ 648 if (l2x0_saved_regs.aux_ctrl & L310_AUX_CTRL_FULL_LINE_ZERO) 649 set_auxcr(get_auxcr() | BIT(3) | BIT(2) | BIT(1)); 650 } 651 } 652 653 static int l2c310_cpu_enable_flz(struct notifier_block *nb, unsigned long act, void *data) 654 { 655 switch (act & ~CPU_TASKS_FROZEN) { 656 case CPU_STARTING: 657 set_auxcr(get_auxcr() | BIT(3) | BIT(2) | BIT(1)); 658 break; 659 case CPU_DYING: 660 set_auxcr(get_auxcr() & ~(BIT(3) | BIT(2) | BIT(1))); 661 break; 662 } 663 return NOTIFY_OK; 664 } 665 666 static void __init l2c310_enable(void __iomem *base, u32 aux, unsigned num_lock) 667 { 668 unsigned rev = readl_relaxed(base + L2X0_CACHE_ID) & L2X0_CACHE_ID_RTL_MASK; 669 bool cortex_a9 = read_cpuid_part() == ARM_CPU_PART_CORTEX_A9; 670 671 if (rev >= L310_CACHE_ID_RTL_R2P0) { 672 if (cortex_a9) { 673 aux |= L310_AUX_CTRL_EARLY_BRESP; 674 pr_info("L2C-310 enabling early BRESP for Cortex-A9\n"); 675 } else if (aux & L310_AUX_CTRL_EARLY_BRESP) { 676 pr_warn("L2C-310 early BRESP only supported with Cortex-A9\n"); 677 aux &= ~L310_AUX_CTRL_EARLY_BRESP; 678 } 679 } 680 681 if (cortex_a9) { 682 u32 aux_cur = readl_relaxed(base + L2X0_AUX_CTRL); 683 u32 acr = get_auxcr(); 684 685 pr_debug("Cortex-A9 ACR=0x%08x\n", acr); 686 687 if (acr & BIT(3) && !(aux_cur & L310_AUX_CTRL_FULL_LINE_ZERO)) 688 pr_err("L2C-310: full line of zeros enabled in Cortex-A9 but not L2C-310 - invalid\n"); 689 690 if (aux & L310_AUX_CTRL_FULL_LINE_ZERO && !(acr & BIT(3))) 691 pr_err("L2C-310: enabling full line of zeros but not enabled in Cortex-A9\n"); 692 693 if (!(aux & L310_AUX_CTRL_FULL_LINE_ZERO) && !outer_cache.write_sec) { 694 aux |= L310_AUX_CTRL_FULL_LINE_ZERO; 695 pr_info("L2C-310 full line of zeros enabled for Cortex-A9\n"); 696 } 697 } else if (aux & (L310_AUX_CTRL_FULL_LINE_ZERO | L310_AUX_CTRL_EARLY_BRESP)) { 698 pr_err("L2C-310: disabling Cortex-A9 specific feature bits\n"); 699 aux &= ~(L310_AUX_CTRL_FULL_LINE_ZERO | L310_AUX_CTRL_EARLY_BRESP); 700 } 701 702 if (aux & (L310_AUX_CTRL_DATA_PREFETCH | L310_AUX_CTRL_INSTR_PREFETCH)) { 703 u32 prefetch = readl_relaxed(base + L310_PREFETCH_CTRL); 704 705 pr_info("L2C-310 %s%s prefetch enabled, offset %u lines\n", 706 aux & L310_AUX_CTRL_INSTR_PREFETCH ? "I" : "", 707 aux & L310_AUX_CTRL_DATA_PREFETCH ? "D" : "", 708 1 + (prefetch & L310_PREFETCH_CTRL_OFFSET_MASK)); 709 } 710 711 /* r3p0 or later has power control register */ 712 if (rev >= L310_CACHE_ID_RTL_R3P0) { 713 u32 power_ctrl; 714 715 l2c_write_sec(L310_DYNAMIC_CLK_GATING_EN | L310_STNDBY_MODE_EN, 716 base, L310_POWER_CTRL); 717 power_ctrl = readl_relaxed(base + L310_POWER_CTRL); 718 pr_info("L2C-310 dynamic clock gating %sabled, standby mode %sabled\n", 719 power_ctrl & L310_DYNAMIC_CLK_GATING_EN ? "en" : "dis", 720 power_ctrl & L310_STNDBY_MODE_EN ? "en" : "dis"); 721 } 722 723 /* 724 * Always enable non-secure access to the lockdown registers - 725 * we write to them as part of the L2C enable sequence so they 726 * need to be accessible. 727 */ 728 aux |= L310_AUX_CTRL_NS_LOCKDOWN; 729 730 l2c_enable(base, aux, num_lock); 731 732 if (aux & L310_AUX_CTRL_FULL_LINE_ZERO) { 733 set_auxcr(get_auxcr() | BIT(3) | BIT(2) | BIT(1)); 734 cpu_notifier(l2c310_cpu_enable_flz, 0); 735 } 736 } 737 738 static void __init l2c310_fixup(void __iomem *base, u32 cache_id, 739 struct outer_cache_fns *fns) 740 { 741 unsigned revision = cache_id & L2X0_CACHE_ID_RTL_MASK; 742 const char *errata[8]; 743 unsigned n = 0; 744 745 if (IS_ENABLED(CONFIG_PL310_ERRATA_588369) && 746 revision < L310_CACHE_ID_RTL_R2P0 && 747 /* For bcm compatibility */ 748 fns->inv_range == l2c210_inv_range) { 749 fns->inv_range = l2c310_inv_range_erratum; 750 fns->flush_range = l2c310_flush_range_erratum; 751 errata[n++] = "588369"; 752 } 753 754 if (IS_ENABLED(CONFIG_PL310_ERRATA_727915) && 755 revision >= L310_CACHE_ID_RTL_R2P0 && 756 revision < L310_CACHE_ID_RTL_R3P1) { 757 fns->flush_all = l2c310_flush_all_erratum; 758 errata[n++] = "727915"; 759 } 760 761 if (revision >= L310_CACHE_ID_RTL_R3P0 && 762 revision < L310_CACHE_ID_RTL_R3P2) { 763 u32 val = readl_relaxed(base + L310_PREFETCH_CTRL); 764 /* I don't think bit23 is required here... but iMX6 does so */ 765 if (val & (BIT(30) | BIT(23))) { 766 val &= ~(BIT(30) | BIT(23)); 767 l2c_write_sec(val, base, L310_PREFETCH_CTRL); 768 errata[n++] = "752271"; 769 } 770 } 771 772 if (IS_ENABLED(CONFIG_PL310_ERRATA_753970) && 773 revision == L310_CACHE_ID_RTL_R3P0) { 774 sync_reg_offset = L2X0_DUMMY_REG; 775 errata[n++] = "753970"; 776 } 777 778 if (IS_ENABLED(CONFIG_PL310_ERRATA_769419)) 779 errata[n++] = "769419"; 780 781 if (n) { 782 unsigned i; 783 784 pr_info("L2C-310 errat%s", n > 1 ? "a" : "um"); 785 for (i = 0; i < n; i++) 786 pr_cont(" %s", errata[i]); 787 pr_cont(" enabled\n"); 788 } 789 } 790 791 static void l2c310_disable(void) 792 { 793 /* 794 * If full-line-of-zeros is enabled, we must first disable it in the 795 * Cortex-A9 auxiliary control register before disabling the L2 cache. 796 */ 797 if (l2x0_saved_regs.aux_ctrl & L310_AUX_CTRL_FULL_LINE_ZERO) 798 set_auxcr(get_auxcr() & ~(BIT(3) | BIT(2) | BIT(1))); 799 800 l2c_disable(); 801 } 802 803 static const struct l2c_init_data l2c310_init_fns __initconst = { 804 .type = "L2C-310", 805 .way_size_0 = SZ_8K, 806 .num_lock = 8, 807 .enable = l2c310_enable, 808 .fixup = l2c310_fixup, 809 .save = l2c310_save, 810 .outer_cache = { 811 .inv_range = l2c210_inv_range, 812 .clean_range = l2c210_clean_range, 813 .flush_range = l2c210_flush_range, 814 .flush_all = l2c210_flush_all, 815 .disable = l2c310_disable, 816 .sync = l2c210_sync, 817 .resume = l2c310_resume, 818 }, 819 }; 820 821 static void __init __l2c_init(const struct l2c_init_data *data, 822 u32 aux_val, u32 aux_mask, u32 cache_id) 823 { 824 struct outer_cache_fns fns; 825 unsigned way_size_bits, ways; 826 u32 aux, old_aux; 827 828 /* 829 * Sanity check the aux values. aux_mask is the bits we preserve 830 * from reading the hardware register, and aux_val is the bits we 831 * set. 832 */ 833 if (aux_val & aux_mask) 834 pr_alert("L2C: platform provided aux values permit register corruption.\n"); 835 836 old_aux = aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); 837 aux &= aux_mask; 838 aux |= aux_val; 839 840 if (old_aux != aux) 841 pr_warn("L2C: DT/platform modifies aux control register: 0x%08x -> 0x%08x\n", 842 old_aux, aux); 843 844 /* Determine the number of ways */ 845 switch (cache_id & L2X0_CACHE_ID_PART_MASK) { 846 case L2X0_CACHE_ID_PART_L310: 847 if ((aux_val | ~aux_mask) & (L2C_AUX_CTRL_WAY_SIZE_MASK | L310_AUX_CTRL_ASSOCIATIVITY_16)) 848 pr_warn("L2C: DT/platform tries to modify or specify cache size\n"); 849 if (aux & (1 << 16)) 850 ways = 16; 851 else 852 ways = 8; 853 break; 854 855 case L2X0_CACHE_ID_PART_L210: 856 case L2X0_CACHE_ID_PART_L220: 857 ways = (aux >> 13) & 0xf; 858 break; 859 860 case AURORA_CACHE_ID: 861 ways = (aux >> 13) & 0xf; 862 ways = 2 << ((ways + 1) >> 2); 863 break; 864 865 default: 866 /* Assume unknown chips have 8 ways */ 867 ways = 8; 868 break; 869 } 870 871 l2x0_way_mask = (1 << ways) - 1; 872 873 /* 874 * way_size_0 is the size that a way_size value of zero would be 875 * given the calculation: way_size = way_size_0 << way_size_bits. 876 * So, if way_size_bits=0 is reserved, but way_size_bits=1 is 16k, 877 * then way_size_0 would be 8k. 878 * 879 * L2 cache size = number of ways * way size. 880 */ 881 way_size_bits = (aux & L2C_AUX_CTRL_WAY_SIZE_MASK) >> 882 L2C_AUX_CTRL_WAY_SIZE_SHIFT; 883 l2x0_size = ways * (data->way_size_0 << way_size_bits); 884 885 fns = data->outer_cache; 886 fns.write_sec = outer_cache.write_sec; 887 if (data->fixup) 888 data->fixup(l2x0_base, cache_id, &fns); 889 890 /* 891 * Check if l2x0 controller is already enabled. If we are booting 892 * in non-secure mode accessing the below registers will fault. 893 */ 894 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) 895 data->enable(l2x0_base, aux, data->num_lock); 896 897 outer_cache = fns; 898 899 /* 900 * It is strange to save the register state before initialisation, 901 * but hey, this is what the DT implementations decided to do. 902 */ 903 if (data->save) 904 data->save(l2x0_base); 905 906 /* Re-read it in case some bits are reserved. */ 907 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); 908 909 pr_info("%s cache controller enabled, %d ways, %d kB\n", 910 data->type, ways, l2x0_size >> 10); 911 pr_info("%s: CACHE_ID 0x%08x, AUX_CTRL 0x%08x\n", 912 data->type, cache_id, aux); 913 } 914 915 void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask) 916 { 917 const struct l2c_init_data *data; 918 u32 cache_id; 919 920 l2x0_base = base; 921 922 cache_id = readl_relaxed(base + L2X0_CACHE_ID); 923 924 switch (cache_id & L2X0_CACHE_ID_PART_MASK) { 925 default: 926 case L2X0_CACHE_ID_PART_L210: 927 data = &l2c210_data; 928 break; 929 930 case L2X0_CACHE_ID_PART_L220: 931 data = &l2c220_data; 932 break; 933 934 case L2X0_CACHE_ID_PART_L310: 935 data = &l2c310_init_fns; 936 break; 937 } 938 939 __l2c_init(data, aux_val, aux_mask, cache_id); 940 } 941 942 #ifdef CONFIG_OF 943 static int l2_wt_override; 944 945 /* Aurora don't have the cache ID register available, so we have to 946 * pass it though the device tree */ 947 static u32 cache_id_part_number_from_dt; 948 949 /** 950 * l2x0_cache_size_of_parse() - read cache size parameters from DT 951 * @np: the device tree node for the l2 cache 952 * @aux_val: pointer to machine-supplied auxilary register value, to 953 * be augmented by the call (bits to be set to 1) 954 * @aux_mask: pointer to machine-supplied auxilary register mask, to 955 * be augmented by the call (bits to be set to 0) 956 * @associativity: variable to return the calculated associativity in 957 * @max_way_size: the maximum size in bytes for the cache ways 958 */ 959 static void __init l2x0_cache_size_of_parse(const struct device_node *np, 960 u32 *aux_val, u32 *aux_mask, 961 u32 *associativity, 962 u32 max_way_size) 963 { 964 u32 mask = 0, val = 0; 965 u32 cache_size = 0, sets = 0; 966 u32 way_size_bits = 1; 967 u32 way_size = 0; 968 u32 block_size = 0; 969 u32 line_size = 0; 970 971 of_property_read_u32(np, "cache-size", &cache_size); 972 of_property_read_u32(np, "cache-sets", &sets); 973 of_property_read_u32(np, "cache-block-size", &block_size); 974 of_property_read_u32(np, "cache-line-size", &line_size); 975 976 if (!cache_size || !sets) 977 return; 978 979 /* All these l2 caches have the same line = block size actually */ 980 if (!line_size) { 981 if (block_size) { 982 /* If linesize if not given, it is equal to blocksize */ 983 line_size = block_size; 984 } else { 985 /* Fall back to known size */ 986 pr_warn("L2C OF: no cache block/line size given: " 987 "falling back to default size %d bytes\n", 988 CACHE_LINE_SIZE); 989 line_size = CACHE_LINE_SIZE; 990 } 991 } 992 993 if (line_size != CACHE_LINE_SIZE) 994 pr_warn("L2C OF: DT supplied line size %d bytes does " 995 "not match hardware line size of %d bytes\n", 996 line_size, 997 CACHE_LINE_SIZE); 998 999 /* 1000 * Since: 1001 * set size = cache size / sets 1002 * ways = cache size / (sets * line size) 1003 * way size = cache size / (cache size / (sets * line size)) 1004 * way size = sets * line size 1005 * associativity = ways = cache size / way size 1006 */ 1007 way_size = sets * line_size; 1008 *associativity = cache_size / way_size; 1009 1010 if (way_size > max_way_size) { 1011 pr_err("L2C OF: set size %dKB is too large\n", way_size); 1012 return; 1013 } 1014 1015 pr_info("L2C OF: override cache size: %d bytes (%dKB)\n", 1016 cache_size, cache_size >> 10); 1017 pr_info("L2C OF: override line size: %d bytes\n", line_size); 1018 pr_info("L2C OF: override way size: %d bytes (%dKB)\n", 1019 way_size, way_size >> 10); 1020 pr_info("L2C OF: override associativity: %d\n", *associativity); 1021 1022 /* 1023 * Calculates the bits 17:19 to set for way size: 1024 * 512KB -> 6, 256KB -> 5, ... 16KB -> 1 1025 */ 1026 way_size_bits = ilog2(way_size >> 10) - 3; 1027 if (way_size_bits < 1 || way_size_bits > 6) { 1028 pr_err("L2C OF: cache way size illegal: %dKB is not mapped\n", 1029 way_size); 1030 return; 1031 } 1032 1033 mask |= L2C_AUX_CTRL_WAY_SIZE_MASK; 1034 val |= (way_size_bits << L2C_AUX_CTRL_WAY_SIZE_SHIFT); 1035 1036 *aux_val &= ~mask; 1037 *aux_val |= val; 1038 *aux_mask &= ~mask; 1039 } 1040 1041 static void __init l2x0_of_parse(const struct device_node *np, 1042 u32 *aux_val, u32 *aux_mask) 1043 { 1044 u32 data[2] = { 0, 0 }; 1045 u32 tag = 0; 1046 u32 dirty = 0; 1047 u32 val = 0, mask = 0; 1048 u32 assoc; 1049 1050 of_property_read_u32(np, "arm,tag-latency", &tag); 1051 if (tag) { 1052 mask |= L2X0_AUX_CTRL_TAG_LATENCY_MASK; 1053 val |= (tag - 1) << L2X0_AUX_CTRL_TAG_LATENCY_SHIFT; 1054 } 1055 1056 of_property_read_u32_array(np, "arm,data-latency", 1057 data, ARRAY_SIZE(data)); 1058 if (data[0] && data[1]) { 1059 mask |= L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK | 1060 L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK; 1061 val |= ((data[0] - 1) << L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT) | 1062 ((data[1] - 1) << L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT); 1063 } 1064 1065 of_property_read_u32(np, "arm,dirty-latency", &dirty); 1066 if (dirty) { 1067 mask |= L2X0_AUX_CTRL_DIRTY_LATENCY_MASK; 1068 val |= (dirty - 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT; 1069 } 1070 1071 l2x0_cache_size_of_parse(np, aux_val, aux_mask, &assoc, SZ_256K); 1072 if (assoc > 8) { 1073 pr_err("l2x0 of: cache setting yield too high associativity\n"); 1074 pr_err("l2x0 of: %d calculated, max 8\n", assoc); 1075 } else { 1076 mask |= L2X0_AUX_CTRL_ASSOC_MASK; 1077 val |= (assoc << L2X0_AUX_CTRL_ASSOC_SHIFT); 1078 } 1079 1080 *aux_val &= ~mask; 1081 *aux_val |= val; 1082 *aux_mask &= ~mask; 1083 } 1084 1085 static const struct l2c_init_data of_l2c210_data __initconst = { 1086 .type = "L2C-210", 1087 .way_size_0 = SZ_8K, 1088 .num_lock = 1, 1089 .of_parse = l2x0_of_parse, 1090 .enable = l2c_enable, 1091 .save = l2c_save, 1092 .outer_cache = { 1093 .inv_range = l2c210_inv_range, 1094 .clean_range = l2c210_clean_range, 1095 .flush_range = l2c210_flush_range, 1096 .flush_all = l2c210_flush_all, 1097 .disable = l2c_disable, 1098 .sync = l2c210_sync, 1099 .resume = l2c210_resume, 1100 }, 1101 }; 1102 1103 static const struct l2c_init_data of_l2c220_data __initconst = { 1104 .type = "L2C-220", 1105 .way_size_0 = SZ_8K, 1106 .num_lock = 1, 1107 .of_parse = l2x0_of_parse, 1108 .enable = l2c220_enable, 1109 .save = l2c_save, 1110 .outer_cache = { 1111 .inv_range = l2c220_inv_range, 1112 .clean_range = l2c220_clean_range, 1113 .flush_range = l2c220_flush_range, 1114 .flush_all = l2c220_flush_all, 1115 .disable = l2c_disable, 1116 .sync = l2c220_sync, 1117 .resume = l2c210_resume, 1118 }, 1119 }; 1120 1121 static void __init l2c310_of_parse(const struct device_node *np, 1122 u32 *aux_val, u32 *aux_mask) 1123 { 1124 u32 data[3] = { 0, 0, 0 }; 1125 u32 tag[3] = { 0, 0, 0 }; 1126 u32 filter[2] = { 0, 0 }; 1127 u32 assoc; 1128 1129 of_property_read_u32_array(np, "arm,tag-latency", tag, ARRAY_SIZE(tag)); 1130 if (tag[0] && tag[1] && tag[2]) 1131 writel_relaxed( 1132 L310_LATENCY_CTRL_RD(tag[0] - 1) | 1133 L310_LATENCY_CTRL_WR(tag[1] - 1) | 1134 L310_LATENCY_CTRL_SETUP(tag[2] - 1), 1135 l2x0_base + L310_TAG_LATENCY_CTRL); 1136 1137 of_property_read_u32_array(np, "arm,data-latency", 1138 data, ARRAY_SIZE(data)); 1139 if (data[0] && data[1] && data[2]) 1140 writel_relaxed( 1141 L310_LATENCY_CTRL_RD(data[0] - 1) | 1142 L310_LATENCY_CTRL_WR(data[1] - 1) | 1143 L310_LATENCY_CTRL_SETUP(data[2] - 1), 1144 l2x0_base + L310_DATA_LATENCY_CTRL); 1145 1146 of_property_read_u32_array(np, "arm,filter-ranges", 1147 filter, ARRAY_SIZE(filter)); 1148 if (filter[1]) { 1149 writel_relaxed(ALIGN(filter[0] + filter[1], SZ_1M), 1150 l2x0_base + L310_ADDR_FILTER_END); 1151 writel_relaxed((filter[0] & ~(SZ_1M - 1)) | L310_ADDR_FILTER_EN, 1152 l2x0_base + L310_ADDR_FILTER_START); 1153 } 1154 1155 l2x0_cache_size_of_parse(np, aux_val, aux_mask, &assoc, SZ_512K); 1156 switch (assoc) { 1157 case 16: 1158 *aux_val &= ~L2X0_AUX_CTRL_ASSOC_MASK; 1159 *aux_val |= L310_AUX_CTRL_ASSOCIATIVITY_16; 1160 *aux_mask &= ~L2X0_AUX_CTRL_ASSOC_MASK; 1161 break; 1162 case 8: 1163 *aux_val &= ~L2X0_AUX_CTRL_ASSOC_MASK; 1164 *aux_mask &= ~L2X0_AUX_CTRL_ASSOC_MASK; 1165 break; 1166 default: 1167 pr_err("PL310 OF: cache setting yield illegal associativity\n"); 1168 pr_err("PL310 OF: %d calculated, only 8 and 16 legal\n", assoc); 1169 break; 1170 } 1171 } 1172 1173 static const struct l2c_init_data of_l2c310_data __initconst = { 1174 .type = "L2C-310", 1175 .way_size_0 = SZ_8K, 1176 .num_lock = 8, 1177 .of_parse = l2c310_of_parse, 1178 .enable = l2c310_enable, 1179 .fixup = l2c310_fixup, 1180 .save = l2c310_save, 1181 .outer_cache = { 1182 .inv_range = l2c210_inv_range, 1183 .clean_range = l2c210_clean_range, 1184 .flush_range = l2c210_flush_range, 1185 .flush_all = l2c210_flush_all, 1186 .disable = l2c310_disable, 1187 .sync = l2c210_sync, 1188 .resume = l2c310_resume, 1189 }, 1190 }; 1191 1192 /* 1193 * This is a variant of the of_l2c310_data with .sync set to 1194 * NULL. Outer sync operations are not needed when the system is I/O 1195 * coherent, and potentially harmful in certain situations (PCIe/PL310 1196 * deadlock on Armada 375/38x due to hardware I/O coherency). The 1197 * other operations are kept because they are infrequent (therefore do 1198 * not cause the deadlock in practice) and needed for secondary CPU 1199 * boot and other power management activities. 1200 */ 1201 static const struct l2c_init_data of_l2c310_coherent_data __initconst = { 1202 .type = "L2C-310 Coherent", 1203 .way_size_0 = SZ_8K, 1204 .num_lock = 8, 1205 .of_parse = l2c310_of_parse, 1206 .enable = l2c310_enable, 1207 .fixup = l2c310_fixup, 1208 .save = l2c310_save, 1209 .outer_cache = { 1210 .inv_range = l2c210_inv_range, 1211 .clean_range = l2c210_clean_range, 1212 .flush_range = l2c210_flush_range, 1213 .flush_all = l2c210_flush_all, 1214 .disable = l2c310_disable, 1215 .resume = l2c310_resume, 1216 }, 1217 }; 1218 1219 /* 1220 * Note that the end addresses passed to Linux primitives are 1221 * noninclusive, while the hardware cache range operations use 1222 * inclusive start and end addresses. 1223 */ 1224 static unsigned long calc_range_end(unsigned long start, unsigned long end) 1225 { 1226 /* 1227 * Limit the number of cache lines processed at once, 1228 * since cache range operations stall the CPU pipeline 1229 * until completion. 1230 */ 1231 if (end > start + MAX_RANGE_SIZE) 1232 end = start + MAX_RANGE_SIZE; 1233 1234 /* 1235 * Cache range operations can't straddle a page boundary. 1236 */ 1237 if (end > PAGE_ALIGN(start+1)) 1238 end = PAGE_ALIGN(start+1); 1239 1240 return end; 1241 } 1242 1243 /* 1244 * Make sure 'start' and 'end' reference the same page, as L2 is PIPT 1245 * and range operations only do a TLB lookup on the start address. 1246 */ 1247 static void aurora_pa_range(unsigned long start, unsigned long end, 1248 unsigned long offset) 1249 { 1250 unsigned long flags; 1251 1252 raw_spin_lock_irqsave(&l2x0_lock, flags); 1253 writel_relaxed(start, l2x0_base + AURORA_RANGE_BASE_ADDR_REG); 1254 writel_relaxed(end, l2x0_base + offset); 1255 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 1256 1257 cache_sync(); 1258 } 1259 1260 static void aurora_inv_range(unsigned long start, unsigned long end) 1261 { 1262 /* 1263 * round start and end adresses up to cache line size 1264 */ 1265 start &= ~(CACHE_LINE_SIZE - 1); 1266 end = ALIGN(end, CACHE_LINE_SIZE); 1267 1268 /* 1269 * Invalidate all full cache lines between 'start' and 'end'. 1270 */ 1271 while (start < end) { 1272 unsigned long range_end = calc_range_end(start, end); 1273 aurora_pa_range(start, range_end - CACHE_LINE_SIZE, 1274 AURORA_INVAL_RANGE_REG); 1275 start = range_end; 1276 } 1277 } 1278 1279 static void aurora_clean_range(unsigned long start, unsigned long end) 1280 { 1281 /* 1282 * If L2 is forced to WT, the L2 will always be clean and we 1283 * don't need to do anything here. 1284 */ 1285 if (!l2_wt_override) { 1286 start &= ~(CACHE_LINE_SIZE - 1); 1287 end = ALIGN(end, CACHE_LINE_SIZE); 1288 while (start != end) { 1289 unsigned long range_end = calc_range_end(start, end); 1290 aurora_pa_range(start, range_end - CACHE_LINE_SIZE, 1291 AURORA_CLEAN_RANGE_REG); 1292 start = range_end; 1293 } 1294 } 1295 } 1296 1297 static void aurora_flush_range(unsigned long start, unsigned long end) 1298 { 1299 start &= ~(CACHE_LINE_SIZE - 1); 1300 end = ALIGN(end, CACHE_LINE_SIZE); 1301 while (start != end) { 1302 unsigned long range_end = calc_range_end(start, end); 1303 /* 1304 * If L2 is forced to WT, the L2 will always be clean and we 1305 * just need to invalidate. 1306 */ 1307 if (l2_wt_override) 1308 aurora_pa_range(start, range_end - CACHE_LINE_SIZE, 1309 AURORA_INVAL_RANGE_REG); 1310 else 1311 aurora_pa_range(start, range_end - CACHE_LINE_SIZE, 1312 AURORA_FLUSH_RANGE_REG); 1313 start = range_end; 1314 } 1315 } 1316 1317 static void aurora_save(void __iomem *base) 1318 { 1319 l2x0_saved_regs.ctrl = readl_relaxed(base + L2X0_CTRL); 1320 l2x0_saved_regs.aux_ctrl = readl_relaxed(base + L2X0_AUX_CTRL); 1321 } 1322 1323 static void aurora_resume(void) 1324 { 1325 void __iomem *base = l2x0_base; 1326 1327 if (!(readl(base + L2X0_CTRL) & L2X0_CTRL_EN)) { 1328 writel_relaxed(l2x0_saved_regs.aux_ctrl, base + L2X0_AUX_CTRL); 1329 writel_relaxed(l2x0_saved_regs.ctrl, base + L2X0_CTRL); 1330 } 1331 } 1332 1333 /* 1334 * For Aurora cache in no outer mode, enable via the CP15 coprocessor 1335 * broadcasting of cache commands to L2. 1336 */ 1337 static void __init aurora_enable_no_outer(void __iomem *base, u32 aux, 1338 unsigned num_lock) 1339 { 1340 u32 u; 1341 1342 asm volatile("mrc p15, 1, %0, c15, c2, 0" : "=r" (u)); 1343 u |= AURORA_CTRL_FW; /* Set the FW bit */ 1344 asm volatile("mcr p15, 1, %0, c15, c2, 0" : : "r" (u)); 1345 1346 isb(); 1347 1348 l2c_enable(base, aux, num_lock); 1349 } 1350 1351 static void __init aurora_fixup(void __iomem *base, u32 cache_id, 1352 struct outer_cache_fns *fns) 1353 { 1354 sync_reg_offset = AURORA_SYNC_REG; 1355 } 1356 1357 static void __init aurora_of_parse(const struct device_node *np, 1358 u32 *aux_val, u32 *aux_mask) 1359 { 1360 u32 val = AURORA_ACR_REPLACEMENT_TYPE_SEMIPLRU; 1361 u32 mask = AURORA_ACR_REPLACEMENT_MASK; 1362 1363 of_property_read_u32(np, "cache-id-part", 1364 &cache_id_part_number_from_dt); 1365 1366 /* Determine and save the write policy */ 1367 l2_wt_override = of_property_read_bool(np, "wt-override"); 1368 1369 if (l2_wt_override) { 1370 val |= AURORA_ACR_FORCE_WRITE_THRO_POLICY; 1371 mask |= AURORA_ACR_FORCE_WRITE_POLICY_MASK; 1372 } 1373 1374 *aux_val &= ~mask; 1375 *aux_val |= val; 1376 *aux_mask &= ~mask; 1377 } 1378 1379 static const struct l2c_init_data of_aurora_with_outer_data __initconst = { 1380 .type = "Aurora", 1381 .way_size_0 = SZ_4K, 1382 .num_lock = 4, 1383 .of_parse = aurora_of_parse, 1384 .enable = l2c_enable, 1385 .fixup = aurora_fixup, 1386 .save = aurora_save, 1387 .outer_cache = { 1388 .inv_range = aurora_inv_range, 1389 .clean_range = aurora_clean_range, 1390 .flush_range = aurora_flush_range, 1391 .flush_all = l2x0_flush_all, 1392 .disable = l2x0_disable, 1393 .sync = l2x0_cache_sync, 1394 .resume = aurora_resume, 1395 }, 1396 }; 1397 1398 static const struct l2c_init_data of_aurora_no_outer_data __initconst = { 1399 .type = "Aurora", 1400 .way_size_0 = SZ_4K, 1401 .num_lock = 4, 1402 .of_parse = aurora_of_parse, 1403 .enable = aurora_enable_no_outer, 1404 .fixup = aurora_fixup, 1405 .save = aurora_save, 1406 .outer_cache = { 1407 .resume = aurora_resume, 1408 }, 1409 }; 1410 1411 /* 1412 * For certain Broadcom SoCs, depending on the address range, different offsets 1413 * need to be added to the address before passing it to L2 for 1414 * invalidation/clean/flush 1415 * 1416 * Section Address Range Offset EMI 1417 * 1 0x00000000 - 0x3FFFFFFF 0x80000000 VC 1418 * 2 0x40000000 - 0xBFFFFFFF 0x40000000 SYS 1419 * 3 0xC0000000 - 0xFFFFFFFF 0x80000000 VC 1420 * 1421 * When the start and end addresses have crossed two different sections, we 1422 * need to break the L2 operation into two, each within its own section. 1423 * For example, if we need to invalidate addresses starts at 0xBFFF0000 and 1424 * ends at 0xC0001000, we need do invalidate 1) 0xBFFF0000 - 0xBFFFFFFF and 2) 1425 * 0xC0000000 - 0xC0001000 1426 * 1427 * Note 1: 1428 * By breaking a single L2 operation into two, we may potentially suffer some 1429 * performance hit, but keep in mind the cross section case is very rare 1430 * 1431 * Note 2: 1432 * We do not need to handle the case when the start address is in 1433 * Section 1 and the end address is in Section 3, since it is not a valid use 1434 * case 1435 * 1436 * Note 3: 1437 * Section 1 in practical terms can no longer be used on rev A2. Because of 1438 * that the code does not need to handle section 1 at all. 1439 * 1440 */ 1441 #define BCM_SYS_EMI_START_ADDR 0x40000000UL 1442 #define BCM_VC_EMI_SEC3_START_ADDR 0xC0000000UL 1443 1444 #define BCM_SYS_EMI_OFFSET 0x40000000UL 1445 #define BCM_VC_EMI_OFFSET 0x80000000UL 1446 1447 static inline int bcm_addr_is_sys_emi(unsigned long addr) 1448 { 1449 return (addr >= BCM_SYS_EMI_START_ADDR) && 1450 (addr < BCM_VC_EMI_SEC3_START_ADDR); 1451 } 1452 1453 static inline unsigned long bcm_l2_phys_addr(unsigned long addr) 1454 { 1455 if (bcm_addr_is_sys_emi(addr)) 1456 return addr + BCM_SYS_EMI_OFFSET; 1457 else 1458 return addr + BCM_VC_EMI_OFFSET; 1459 } 1460 1461 static void bcm_inv_range(unsigned long start, unsigned long end) 1462 { 1463 unsigned long new_start, new_end; 1464 1465 BUG_ON(start < BCM_SYS_EMI_START_ADDR); 1466 1467 if (unlikely(end <= start)) 1468 return; 1469 1470 new_start = bcm_l2_phys_addr(start); 1471 new_end = bcm_l2_phys_addr(end); 1472 1473 /* normal case, no cross section between start and end */ 1474 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) { 1475 l2c210_inv_range(new_start, new_end); 1476 return; 1477 } 1478 1479 /* They cross sections, so it can only be a cross from section 1480 * 2 to section 3 1481 */ 1482 l2c210_inv_range(new_start, 1483 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1)); 1484 l2c210_inv_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR), 1485 new_end); 1486 } 1487 1488 static void bcm_clean_range(unsigned long start, unsigned long end) 1489 { 1490 unsigned long new_start, new_end; 1491 1492 BUG_ON(start < BCM_SYS_EMI_START_ADDR); 1493 1494 if (unlikely(end <= start)) 1495 return; 1496 1497 new_start = bcm_l2_phys_addr(start); 1498 new_end = bcm_l2_phys_addr(end); 1499 1500 /* normal case, no cross section between start and end */ 1501 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) { 1502 l2c210_clean_range(new_start, new_end); 1503 return; 1504 } 1505 1506 /* They cross sections, so it can only be a cross from section 1507 * 2 to section 3 1508 */ 1509 l2c210_clean_range(new_start, 1510 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1)); 1511 l2c210_clean_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR), 1512 new_end); 1513 } 1514 1515 static void bcm_flush_range(unsigned long start, unsigned long end) 1516 { 1517 unsigned long new_start, new_end; 1518 1519 BUG_ON(start < BCM_SYS_EMI_START_ADDR); 1520 1521 if (unlikely(end <= start)) 1522 return; 1523 1524 if ((end - start) >= l2x0_size) { 1525 outer_cache.flush_all(); 1526 return; 1527 } 1528 1529 new_start = bcm_l2_phys_addr(start); 1530 new_end = bcm_l2_phys_addr(end); 1531 1532 /* normal case, no cross section between start and end */ 1533 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) { 1534 l2c210_flush_range(new_start, new_end); 1535 return; 1536 } 1537 1538 /* They cross sections, so it can only be a cross from section 1539 * 2 to section 3 1540 */ 1541 l2c210_flush_range(new_start, 1542 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1)); 1543 l2c210_flush_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR), 1544 new_end); 1545 } 1546 1547 /* Broadcom L2C-310 start from ARMs R3P2 or later, and require no fixups */ 1548 static const struct l2c_init_data of_bcm_l2x0_data __initconst = { 1549 .type = "BCM-L2C-310", 1550 .way_size_0 = SZ_8K, 1551 .num_lock = 8, 1552 .of_parse = l2c310_of_parse, 1553 .enable = l2c310_enable, 1554 .save = l2c310_save, 1555 .outer_cache = { 1556 .inv_range = bcm_inv_range, 1557 .clean_range = bcm_clean_range, 1558 .flush_range = bcm_flush_range, 1559 .flush_all = l2c210_flush_all, 1560 .disable = l2c310_disable, 1561 .sync = l2c210_sync, 1562 .resume = l2c310_resume, 1563 }, 1564 }; 1565 1566 static void __init tauros3_save(void __iomem *base) 1567 { 1568 l2c_save(base); 1569 1570 l2x0_saved_regs.aux2_ctrl = 1571 readl_relaxed(base + TAUROS3_AUX2_CTRL); 1572 l2x0_saved_regs.prefetch_ctrl = 1573 readl_relaxed(base + L310_PREFETCH_CTRL); 1574 } 1575 1576 static void tauros3_resume(void) 1577 { 1578 void __iomem *base = l2x0_base; 1579 1580 if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN)) { 1581 writel_relaxed(l2x0_saved_regs.aux2_ctrl, 1582 base + TAUROS3_AUX2_CTRL); 1583 writel_relaxed(l2x0_saved_regs.prefetch_ctrl, 1584 base + L310_PREFETCH_CTRL); 1585 1586 l2c_enable(base, l2x0_saved_regs.aux_ctrl, 8); 1587 } 1588 } 1589 1590 static const struct l2c_init_data of_tauros3_data __initconst = { 1591 .type = "Tauros3", 1592 .way_size_0 = SZ_8K, 1593 .num_lock = 8, 1594 .enable = l2c_enable, 1595 .save = tauros3_save, 1596 /* Tauros3 broadcasts L1 cache operations to L2 */ 1597 .outer_cache = { 1598 .resume = tauros3_resume, 1599 }, 1600 }; 1601 1602 #define L2C_ID(name, fns) { .compatible = name, .data = (void *)&fns } 1603 static const struct of_device_id l2x0_ids[] __initconst = { 1604 L2C_ID("arm,l210-cache", of_l2c210_data), 1605 L2C_ID("arm,l220-cache", of_l2c220_data), 1606 L2C_ID("arm,pl310-cache", of_l2c310_data), 1607 L2C_ID("brcm,bcm11351-a2-pl310-cache", of_bcm_l2x0_data), 1608 L2C_ID("marvell,aurora-outer-cache", of_aurora_with_outer_data), 1609 L2C_ID("marvell,aurora-system-cache", of_aurora_no_outer_data), 1610 L2C_ID("marvell,tauros3-cache", of_tauros3_data), 1611 /* Deprecated IDs */ 1612 L2C_ID("bcm,bcm11351-a2-pl310-cache", of_bcm_l2x0_data), 1613 {} 1614 }; 1615 1616 int __init l2x0_of_init(u32 aux_val, u32 aux_mask) 1617 { 1618 const struct l2c_init_data *data; 1619 struct device_node *np; 1620 struct resource res; 1621 u32 cache_id, old_aux; 1622 1623 np = of_find_matching_node(NULL, l2x0_ids); 1624 if (!np) 1625 return -ENODEV; 1626 1627 if (of_address_to_resource(np, 0, &res)) 1628 return -ENODEV; 1629 1630 l2x0_base = ioremap(res.start, resource_size(&res)); 1631 if (!l2x0_base) 1632 return -ENOMEM; 1633 1634 l2x0_saved_regs.phy_base = res.start; 1635 1636 data = of_match_node(l2x0_ids, np)->data; 1637 1638 if (of_device_is_compatible(np, "arm,pl310-cache") && 1639 of_property_read_bool(np, "arm,io-coherent")) 1640 data = &of_l2c310_coherent_data; 1641 1642 old_aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); 1643 if (old_aux != ((old_aux & aux_mask) | aux_val)) { 1644 pr_warn("L2C: platform modifies aux control register: 0x%08x -> 0x%08x\n", 1645 old_aux, (old_aux & aux_mask) | aux_val); 1646 } else if (aux_mask != ~0U && aux_val != 0) { 1647 pr_alert("L2C: platform provided aux values match the hardware, so have no effect. Please remove them.\n"); 1648 } 1649 1650 /* All L2 caches are unified, so this property should be specified */ 1651 if (!of_property_read_bool(np, "cache-unified")) 1652 pr_err("L2C: device tree omits to specify unified cache\n"); 1653 1654 /* L2 configuration can only be changed if the cache is disabled */ 1655 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) 1656 if (data->of_parse) 1657 data->of_parse(np, &aux_val, &aux_mask); 1658 1659 if (cache_id_part_number_from_dt) 1660 cache_id = cache_id_part_number_from_dt; 1661 else 1662 cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID); 1663 1664 __l2c_init(data, aux_val, aux_mask, cache_id); 1665 1666 return 0; 1667 } 1668 #endif 1669