1 /* 2 * arch/arm/mm/cache-l2x0.c - L210/L220 cache controller support 3 * 4 * Copyright (C) 2007 ARM Limited 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software 17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18 */ 19 #include <linux/cpu.h> 20 #include <linux/err.h> 21 #include <linux/init.h> 22 #include <linux/smp.h> 23 #include <linux/spinlock.h> 24 #include <linux/log2.h> 25 #include <linux/io.h> 26 #include <linux/of.h> 27 #include <linux/of_address.h> 28 29 #include <asm/cacheflush.h> 30 #include <asm/cp15.h> 31 #include <asm/cputype.h> 32 #include <asm/hardware/cache-l2x0.h> 33 #include "cache-tauros3.h" 34 #include "cache-aurora-l2.h" 35 36 struct l2c_init_data { 37 const char *type; 38 unsigned way_size_0; 39 unsigned num_lock; 40 void (*of_parse)(const struct device_node *, u32 *, u32 *); 41 void (*enable)(void __iomem *, u32, unsigned); 42 void (*fixup)(void __iomem *, u32, struct outer_cache_fns *); 43 void (*save)(void __iomem *); 44 void (*configure)(void __iomem *); 45 struct outer_cache_fns outer_cache; 46 }; 47 48 #define CACHE_LINE_SIZE 32 49 50 static void __iomem *l2x0_base; 51 static const struct l2c_init_data *l2x0_data; 52 static DEFINE_RAW_SPINLOCK(l2x0_lock); 53 static u32 l2x0_way_mask; /* Bitmask of active ways */ 54 static u32 l2x0_size; 55 static unsigned long sync_reg_offset = L2X0_CACHE_SYNC; 56 57 struct l2x0_regs l2x0_saved_regs; 58 59 /* 60 * Common code for all cache controllers. 61 */ 62 static inline void l2c_wait_mask(void __iomem *reg, unsigned long mask) 63 { 64 /* wait for cache operation by line or way to complete */ 65 while (readl_relaxed(reg) & mask) 66 cpu_relax(); 67 } 68 69 /* 70 * By default, we write directly to secure registers. Platforms must 71 * override this if they are running non-secure. 72 */ 73 static void l2c_write_sec(unsigned long val, void __iomem *base, unsigned reg) 74 { 75 if (val == readl_relaxed(base + reg)) 76 return; 77 if (outer_cache.write_sec) 78 outer_cache.write_sec(val, reg); 79 else 80 writel_relaxed(val, base + reg); 81 } 82 83 /* 84 * This should only be called when we have a requirement that the 85 * register be written due to a work-around, as platforms running 86 * in non-secure mode may not be able to access this register. 87 */ 88 static inline void l2c_set_debug(void __iomem *base, unsigned long val) 89 { 90 l2c_write_sec(val, base, L2X0_DEBUG_CTRL); 91 } 92 93 static void __l2c_op_way(void __iomem *reg) 94 { 95 writel_relaxed(l2x0_way_mask, reg); 96 l2c_wait_mask(reg, l2x0_way_mask); 97 } 98 99 static inline void l2c_unlock(void __iomem *base, unsigned num) 100 { 101 unsigned i; 102 103 for (i = 0; i < num; i++) { 104 writel_relaxed(0, base + L2X0_LOCKDOWN_WAY_D_BASE + 105 i * L2X0_LOCKDOWN_STRIDE); 106 writel_relaxed(0, base + L2X0_LOCKDOWN_WAY_I_BASE + 107 i * L2X0_LOCKDOWN_STRIDE); 108 } 109 } 110 111 static void l2c_configure(void __iomem *base) 112 { 113 if (l2x0_data->configure) 114 l2x0_data->configure(base); 115 116 l2c_write_sec(l2x0_saved_regs.aux_ctrl, base, L2X0_AUX_CTRL); 117 } 118 119 /* 120 * Enable the L2 cache controller. This function must only be 121 * called when the cache controller is known to be disabled. 122 */ 123 static void l2c_enable(void __iomem *base, u32 aux, unsigned num_lock) 124 { 125 unsigned long flags; 126 127 /* Do not touch the controller if already enabled. */ 128 if (readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN) 129 return; 130 131 l2x0_saved_regs.aux_ctrl = aux; 132 l2c_configure(base); 133 134 l2c_unlock(base, num_lock); 135 136 local_irq_save(flags); 137 __l2c_op_way(base + L2X0_INV_WAY); 138 writel_relaxed(0, base + sync_reg_offset); 139 l2c_wait_mask(base + sync_reg_offset, 1); 140 local_irq_restore(flags); 141 142 l2c_write_sec(L2X0_CTRL_EN, base, L2X0_CTRL); 143 } 144 145 static void l2c_disable(void) 146 { 147 void __iomem *base = l2x0_base; 148 149 outer_cache.flush_all(); 150 l2c_write_sec(0, base, L2X0_CTRL); 151 dsb(st); 152 } 153 154 #ifdef CONFIG_CACHE_PL310 155 static inline void cache_wait(void __iomem *reg, unsigned long mask) 156 { 157 /* cache operations by line are atomic on PL310 */ 158 } 159 #else 160 #define cache_wait l2c_wait_mask 161 #endif 162 163 static inline void cache_sync(void) 164 { 165 void __iomem *base = l2x0_base; 166 167 writel_relaxed(0, base + sync_reg_offset); 168 cache_wait(base + L2X0_CACHE_SYNC, 1); 169 } 170 171 #if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915) 172 static inline void debug_writel(unsigned long val) 173 { 174 l2c_set_debug(l2x0_base, val); 175 } 176 #else 177 /* Optimised out for non-errata case */ 178 static inline void debug_writel(unsigned long val) 179 { 180 } 181 #endif 182 183 static void l2x0_cache_sync(void) 184 { 185 unsigned long flags; 186 187 raw_spin_lock_irqsave(&l2x0_lock, flags); 188 cache_sync(); 189 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 190 } 191 192 static void __l2x0_flush_all(void) 193 { 194 debug_writel(0x03); 195 __l2c_op_way(l2x0_base + L2X0_CLEAN_INV_WAY); 196 cache_sync(); 197 debug_writel(0x00); 198 } 199 200 static void l2x0_flush_all(void) 201 { 202 unsigned long flags; 203 204 /* clean all ways */ 205 raw_spin_lock_irqsave(&l2x0_lock, flags); 206 __l2x0_flush_all(); 207 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 208 } 209 210 static void l2x0_disable(void) 211 { 212 unsigned long flags; 213 214 raw_spin_lock_irqsave(&l2x0_lock, flags); 215 __l2x0_flush_all(); 216 l2c_write_sec(0, l2x0_base, L2X0_CTRL); 217 dsb(st); 218 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 219 } 220 221 static void l2c_save(void __iomem *base) 222 { 223 l2x0_saved_regs.aux_ctrl = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); 224 } 225 226 static void l2c_resume(void) 227 { 228 l2c_enable(l2x0_base, l2x0_saved_regs.aux_ctrl, l2x0_data->num_lock); 229 } 230 231 /* 232 * L2C-210 specific code. 233 * 234 * The L2C-2x0 PA, set/way and sync operations are atomic, but we must 235 * ensure that no background operation is running. The way operations 236 * are all background tasks. 237 * 238 * While a background operation is in progress, any new operation is 239 * ignored (unspecified whether this causes an error.) Thankfully, not 240 * used on SMP. 241 * 242 * Never has a different sync register other than L2X0_CACHE_SYNC, but 243 * we use sync_reg_offset here so we can share some of this with L2C-310. 244 */ 245 static void __l2c210_cache_sync(void __iomem *base) 246 { 247 writel_relaxed(0, base + sync_reg_offset); 248 } 249 250 static void __l2c210_op_pa_range(void __iomem *reg, unsigned long start, 251 unsigned long end) 252 { 253 while (start < end) { 254 writel_relaxed(start, reg); 255 start += CACHE_LINE_SIZE; 256 } 257 } 258 259 static void l2c210_inv_range(unsigned long start, unsigned long end) 260 { 261 void __iomem *base = l2x0_base; 262 263 if (start & (CACHE_LINE_SIZE - 1)) { 264 start &= ~(CACHE_LINE_SIZE - 1); 265 writel_relaxed(start, base + L2X0_CLEAN_INV_LINE_PA); 266 start += CACHE_LINE_SIZE; 267 } 268 269 if (end & (CACHE_LINE_SIZE - 1)) { 270 end &= ~(CACHE_LINE_SIZE - 1); 271 writel_relaxed(end, base + L2X0_CLEAN_INV_LINE_PA); 272 } 273 274 __l2c210_op_pa_range(base + L2X0_INV_LINE_PA, start, end); 275 __l2c210_cache_sync(base); 276 } 277 278 static void l2c210_clean_range(unsigned long start, unsigned long end) 279 { 280 void __iomem *base = l2x0_base; 281 282 start &= ~(CACHE_LINE_SIZE - 1); 283 __l2c210_op_pa_range(base + L2X0_CLEAN_LINE_PA, start, end); 284 __l2c210_cache_sync(base); 285 } 286 287 static void l2c210_flush_range(unsigned long start, unsigned long end) 288 { 289 void __iomem *base = l2x0_base; 290 291 start &= ~(CACHE_LINE_SIZE - 1); 292 __l2c210_op_pa_range(base + L2X0_CLEAN_INV_LINE_PA, start, end); 293 __l2c210_cache_sync(base); 294 } 295 296 static void l2c210_flush_all(void) 297 { 298 void __iomem *base = l2x0_base; 299 300 BUG_ON(!irqs_disabled()); 301 302 __l2c_op_way(base + L2X0_CLEAN_INV_WAY); 303 __l2c210_cache_sync(base); 304 } 305 306 static void l2c210_sync(void) 307 { 308 __l2c210_cache_sync(l2x0_base); 309 } 310 311 static const struct l2c_init_data l2c210_data __initconst = { 312 .type = "L2C-210", 313 .way_size_0 = SZ_8K, 314 .num_lock = 1, 315 .enable = l2c_enable, 316 .save = l2c_save, 317 .outer_cache = { 318 .inv_range = l2c210_inv_range, 319 .clean_range = l2c210_clean_range, 320 .flush_range = l2c210_flush_range, 321 .flush_all = l2c210_flush_all, 322 .disable = l2c_disable, 323 .sync = l2c210_sync, 324 .resume = l2c_resume, 325 }, 326 }; 327 328 /* 329 * L2C-220 specific code. 330 * 331 * All operations are background operations: they have to be waited for. 332 * Conflicting requests generate a slave error (which will cause an 333 * imprecise abort.) Never uses sync_reg_offset, so we hard-code the 334 * sync register here. 335 * 336 * However, we can re-use the l2c210_resume call. 337 */ 338 static inline void __l2c220_cache_sync(void __iomem *base) 339 { 340 writel_relaxed(0, base + L2X0_CACHE_SYNC); 341 l2c_wait_mask(base + L2X0_CACHE_SYNC, 1); 342 } 343 344 static void l2c220_op_way(void __iomem *base, unsigned reg) 345 { 346 unsigned long flags; 347 348 raw_spin_lock_irqsave(&l2x0_lock, flags); 349 __l2c_op_way(base + reg); 350 __l2c220_cache_sync(base); 351 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 352 } 353 354 static unsigned long l2c220_op_pa_range(void __iomem *reg, unsigned long start, 355 unsigned long end, unsigned long flags) 356 { 357 raw_spinlock_t *lock = &l2x0_lock; 358 359 while (start < end) { 360 unsigned long blk_end = start + min(end - start, 4096UL); 361 362 while (start < blk_end) { 363 l2c_wait_mask(reg, 1); 364 writel_relaxed(start, reg); 365 start += CACHE_LINE_SIZE; 366 } 367 368 if (blk_end < end) { 369 raw_spin_unlock_irqrestore(lock, flags); 370 raw_spin_lock_irqsave(lock, flags); 371 } 372 } 373 374 return flags; 375 } 376 377 static void l2c220_inv_range(unsigned long start, unsigned long end) 378 { 379 void __iomem *base = l2x0_base; 380 unsigned long flags; 381 382 raw_spin_lock_irqsave(&l2x0_lock, flags); 383 if ((start | end) & (CACHE_LINE_SIZE - 1)) { 384 if (start & (CACHE_LINE_SIZE - 1)) { 385 start &= ~(CACHE_LINE_SIZE - 1); 386 writel_relaxed(start, base + L2X0_CLEAN_INV_LINE_PA); 387 start += CACHE_LINE_SIZE; 388 } 389 390 if (end & (CACHE_LINE_SIZE - 1)) { 391 end &= ~(CACHE_LINE_SIZE - 1); 392 l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1); 393 writel_relaxed(end, base + L2X0_CLEAN_INV_LINE_PA); 394 } 395 } 396 397 flags = l2c220_op_pa_range(base + L2X0_INV_LINE_PA, 398 start, end, flags); 399 l2c_wait_mask(base + L2X0_INV_LINE_PA, 1); 400 __l2c220_cache_sync(base); 401 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 402 } 403 404 static void l2c220_clean_range(unsigned long start, unsigned long end) 405 { 406 void __iomem *base = l2x0_base; 407 unsigned long flags; 408 409 start &= ~(CACHE_LINE_SIZE - 1); 410 if ((end - start) >= l2x0_size) { 411 l2c220_op_way(base, L2X0_CLEAN_WAY); 412 return; 413 } 414 415 raw_spin_lock_irqsave(&l2x0_lock, flags); 416 flags = l2c220_op_pa_range(base + L2X0_CLEAN_LINE_PA, 417 start, end, flags); 418 l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1); 419 __l2c220_cache_sync(base); 420 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 421 } 422 423 static void l2c220_flush_range(unsigned long start, unsigned long end) 424 { 425 void __iomem *base = l2x0_base; 426 unsigned long flags; 427 428 start &= ~(CACHE_LINE_SIZE - 1); 429 if ((end - start) >= l2x0_size) { 430 l2c220_op_way(base, L2X0_CLEAN_INV_WAY); 431 return; 432 } 433 434 raw_spin_lock_irqsave(&l2x0_lock, flags); 435 flags = l2c220_op_pa_range(base + L2X0_CLEAN_INV_LINE_PA, 436 start, end, flags); 437 l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1); 438 __l2c220_cache_sync(base); 439 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 440 } 441 442 static void l2c220_flush_all(void) 443 { 444 l2c220_op_way(l2x0_base, L2X0_CLEAN_INV_WAY); 445 } 446 447 static void l2c220_sync(void) 448 { 449 unsigned long flags; 450 451 raw_spin_lock_irqsave(&l2x0_lock, flags); 452 __l2c220_cache_sync(l2x0_base); 453 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 454 } 455 456 static void l2c220_enable(void __iomem *base, u32 aux, unsigned num_lock) 457 { 458 /* 459 * Always enable non-secure access to the lockdown registers - 460 * we write to them as part of the L2C enable sequence so they 461 * need to be accessible. 462 */ 463 aux |= L220_AUX_CTRL_NS_LOCKDOWN; 464 465 l2c_enable(base, aux, num_lock); 466 } 467 468 static const struct l2c_init_data l2c220_data = { 469 .type = "L2C-220", 470 .way_size_0 = SZ_8K, 471 .num_lock = 1, 472 .enable = l2c220_enable, 473 .save = l2c_save, 474 .outer_cache = { 475 .inv_range = l2c220_inv_range, 476 .clean_range = l2c220_clean_range, 477 .flush_range = l2c220_flush_range, 478 .flush_all = l2c220_flush_all, 479 .disable = l2c_disable, 480 .sync = l2c220_sync, 481 .resume = l2c_resume, 482 }, 483 }; 484 485 /* 486 * L2C-310 specific code. 487 * 488 * Very similar to L2C-210, the PA, set/way and sync operations are atomic, 489 * and the way operations are all background tasks. However, issuing an 490 * operation while a background operation is in progress results in a 491 * SLVERR response. We can reuse: 492 * 493 * __l2c210_cache_sync (using sync_reg_offset) 494 * l2c210_sync 495 * l2c210_inv_range (if 588369 is not applicable) 496 * l2c210_clean_range 497 * l2c210_flush_range (if 588369 is not applicable) 498 * l2c210_flush_all (if 727915 is not applicable) 499 * 500 * Errata: 501 * 588369: PL310 R0P0->R1P0, fixed R2P0. 502 * Affects: all clean+invalidate operations 503 * clean and invalidate skips the invalidate step, so we need to issue 504 * separate operations. We also require the above debug workaround 505 * enclosing this code fragment on affected parts. On unaffected parts, 506 * we must not use this workaround without the debug register writes 507 * to avoid exposing a problem similar to 727915. 508 * 509 * 727915: PL310 R2P0->R3P0, fixed R3P1. 510 * Affects: clean+invalidate by way 511 * clean and invalidate by way runs in the background, and a store can 512 * hit the line between the clean operation and invalidate operation, 513 * resulting in the store being lost. 514 * 515 * 752271: PL310 R3P0->R3P1-50REL0, fixed R3P2. 516 * Affects: 8x64-bit (double fill) line fetches 517 * double fill line fetches can fail to cause dirty data to be evicted 518 * from the cache before the new data overwrites the second line. 519 * 520 * 753970: PL310 R3P0, fixed R3P1. 521 * Affects: sync 522 * prevents merging writes after the sync operation, until another L2C 523 * operation is performed (or a number of other conditions.) 524 * 525 * 769419: PL310 R0P0->R3P1, fixed R3P2. 526 * Affects: store buffer 527 * store buffer is not automatically drained. 528 */ 529 static void l2c310_inv_range_erratum(unsigned long start, unsigned long end) 530 { 531 void __iomem *base = l2x0_base; 532 533 if ((start | end) & (CACHE_LINE_SIZE - 1)) { 534 unsigned long flags; 535 536 /* Erratum 588369 for both clean+invalidate operations */ 537 raw_spin_lock_irqsave(&l2x0_lock, flags); 538 l2c_set_debug(base, 0x03); 539 540 if (start & (CACHE_LINE_SIZE - 1)) { 541 start &= ~(CACHE_LINE_SIZE - 1); 542 writel_relaxed(start, base + L2X0_CLEAN_LINE_PA); 543 writel_relaxed(start, base + L2X0_INV_LINE_PA); 544 start += CACHE_LINE_SIZE; 545 } 546 547 if (end & (CACHE_LINE_SIZE - 1)) { 548 end &= ~(CACHE_LINE_SIZE - 1); 549 writel_relaxed(end, base + L2X0_CLEAN_LINE_PA); 550 writel_relaxed(end, base + L2X0_INV_LINE_PA); 551 } 552 553 l2c_set_debug(base, 0x00); 554 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 555 } 556 557 __l2c210_op_pa_range(base + L2X0_INV_LINE_PA, start, end); 558 __l2c210_cache_sync(base); 559 } 560 561 static void l2c310_flush_range_erratum(unsigned long start, unsigned long end) 562 { 563 raw_spinlock_t *lock = &l2x0_lock; 564 unsigned long flags; 565 void __iomem *base = l2x0_base; 566 567 raw_spin_lock_irqsave(lock, flags); 568 while (start < end) { 569 unsigned long blk_end = start + min(end - start, 4096UL); 570 571 l2c_set_debug(base, 0x03); 572 while (start < blk_end) { 573 writel_relaxed(start, base + L2X0_CLEAN_LINE_PA); 574 writel_relaxed(start, base + L2X0_INV_LINE_PA); 575 start += CACHE_LINE_SIZE; 576 } 577 l2c_set_debug(base, 0x00); 578 579 if (blk_end < end) { 580 raw_spin_unlock_irqrestore(lock, flags); 581 raw_spin_lock_irqsave(lock, flags); 582 } 583 } 584 raw_spin_unlock_irqrestore(lock, flags); 585 __l2c210_cache_sync(base); 586 } 587 588 static void l2c310_flush_all_erratum(void) 589 { 590 void __iomem *base = l2x0_base; 591 unsigned long flags; 592 593 raw_spin_lock_irqsave(&l2x0_lock, flags); 594 l2c_set_debug(base, 0x03); 595 __l2c_op_way(base + L2X0_CLEAN_INV_WAY); 596 l2c_set_debug(base, 0x00); 597 __l2c210_cache_sync(base); 598 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 599 } 600 601 static void __init l2c310_save(void __iomem *base) 602 { 603 unsigned revision; 604 605 l2c_save(base); 606 607 l2x0_saved_regs.tag_latency = readl_relaxed(base + 608 L310_TAG_LATENCY_CTRL); 609 l2x0_saved_regs.data_latency = readl_relaxed(base + 610 L310_DATA_LATENCY_CTRL); 611 l2x0_saved_regs.filter_end = readl_relaxed(base + 612 L310_ADDR_FILTER_END); 613 l2x0_saved_regs.filter_start = readl_relaxed(base + 614 L310_ADDR_FILTER_START); 615 616 revision = readl_relaxed(base + L2X0_CACHE_ID) & 617 L2X0_CACHE_ID_RTL_MASK; 618 619 /* From r2p0, there is Prefetch offset/control register */ 620 if (revision >= L310_CACHE_ID_RTL_R2P0) 621 l2x0_saved_regs.prefetch_ctrl = readl_relaxed(base + 622 L310_PREFETCH_CTRL); 623 624 /* From r3p0, there is Power control register */ 625 if (revision >= L310_CACHE_ID_RTL_R3P0) 626 l2x0_saved_regs.pwr_ctrl = readl_relaxed(base + 627 L310_POWER_CTRL); 628 } 629 630 static void l2c310_configure(void __iomem *base) 631 { 632 unsigned revision; 633 634 /* restore pl310 setup */ 635 l2c_write_sec(l2x0_saved_regs.tag_latency, base, 636 L310_TAG_LATENCY_CTRL); 637 l2c_write_sec(l2x0_saved_regs.data_latency, base, 638 L310_DATA_LATENCY_CTRL); 639 l2c_write_sec(l2x0_saved_regs.filter_end, base, 640 L310_ADDR_FILTER_END); 641 l2c_write_sec(l2x0_saved_regs.filter_start, base, 642 L310_ADDR_FILTER_START); 643 644 revision = readl_relaxed(base + L2X0_CACHE_ID) & 645 L2X0_CACHE_ID_RTL_MASK; 646 647 if (revision >= L310_CACHE_ID_RTL_R2P0) 648 l2c_write_sec(l2x0_saved_regs.prefetch_ctrl, base, 649 L310_PREFETCH_CTRL); 650 if (revision >= L310_CACHE_ID_RTL_R3P0) 651 l2c_write_sec(l2x0_saved_regs.pwr_ctrl, base, 652 L310_POWER_CTRL); 653 } 654 655 static int l2c310_cpu_enable_flz(struct notifier_block *nb, unsigned long act, void *data) 656 { 657 switch (act & ~CPU_TASKS_FROZEN) { 658 case CPU_STARTING: 659 set_auxcr(get_auxcr() | BIT(3) | BIT(2) | BIT(1)); 660 break; 661 case CPU_DYING: 662 set_auxcr(get_auxcr() & ~(BIT(3) | BIT(2) | BIT(1))); 663 break; 664 } 665 return NOTIFY_OK; 666 } 667 668 static void __init l2c310_enable(void __iomem *base, u32 aux, unsigned num_lock) 669 { 670 unsigned rev = readl_relaxed(base + L2X0_CACHE_ID) & L2X0_CACHE_ID_RTL_MASK; 671 bool cortex_a9 = read_cpuid_part() == ARM_CPU_PART_CORTEX_A9; 672 673 if (rev >= L310_CACHE_ID_RTL_R2P0) { 674 if (cortex_a9) { 675 aux |= L310_AUX_CTRL_EARLY_BRESP; 676 pr_info("L2C-310 enabling early BRESP for Cortex-A9\n"); 677 } else if (aux & L310_AUX_CTRL_EARLY_BRESP) { 678 pr_warn("L2C-310 early BRESP only supported with Cortex-A9\n"); 679 aux &= ~L310_AUX_CTRL_EARLY_BRESP; 680 } 681 } 682 683 if (cortex_a9) { 684 u32 aux_cur = readl_relaxed(base + L2X0_AUX_CTRL); 685 u32 acr = get_auxcr(); 686 687 pr_debug("Cortex-A9 ACR=0x%08x\n", acr); 688 689 if (acr & BIT(3) && !(aux_cur & L310_AUX_CTRL_FULL_LINE_ZERO)) 690 pr_err("L2C-310: full line of zeros enabled in Cortex-A9 but not L2C-310 - invalid\n"); 691 692 if (aux & L310_AUX_CTRL_FULL_LINE_ZERO && !(acr & BIT(3))) 693 pr_err("L2C-310: enabling full line of zeros but not enabled in Cortex-A9\n"); 694 695 if (!(aux & L310_AUX_CTRL_FULL_LINE_ZERO) && !outer_cache.write_sec) { 696 aux |= L310_AUX_CTRL_FULL_LINE_ZERO; 697 pr_info("L2C-310 full line of zeros enabled for Cortex-A9\n"); 698 } 699 } else if (aux & (L310_AUX_CTRL_FULL_LINE_ZERO | L310_AUX_CTRL_EARLY_BRESP)) { 700 pr_err("L2C-310: disabling Cortex-A9 specific feature bits\n"); 701 aux &= ~(L310_AUX_CTRL_FULL_LINE_ZERO | L310_AUX_CTRL_EARLY_BRESP); 702 } 703 704 /* r3p0 or later has power control register */ 705 if (rev >= L310_CACHE_ID_RTL_R3P0) 706 l2x0_saved_regs.pwr_ctrl = L310_DYNAMIC_CLK_GATING_EN | 707 L310_STNDBY_MODE_EN; 708 709 /* 710 * Always enable non-secure access to the lockdown registers - 711 * we write to them as part of the L2C enable sequence so they 712 * need to be accessible. 713 */ 714 aux |= L310_AUX_CTRL_NS_LOCKDOWN; 715 716 l2c_enable(base, aux, num_lock); 717 718 /* Read back resulting AUX_CTRL value as it could have been altered. */ 719 aux = readl_relaxed(base + L2X0_AUX_CTRL); 720 721 if (aux & (L310_AUX_CTRL_DATA_PREFETCH | L310_AUX_CTRL_INSTR_PREFETCH)) { 722 u32 prefetch = readl_relaxed(base + L310_PREFETCH_CTRL); 723 724 pr_info("L2C-310 %s%s prefetch enabled, offset %u lines\n", 725 aux & L310_AUX_CTRL_INSTR_PREFETCH ? "I" : "", 726 aux & L310_AUX_CTRL_DATA_PREFETCH ? "D" : "", 727 1 + (prefetch & L310_PREFETCH_CTRL_OFFSET_MASK)); 728 } 729 730 /* r3p0 or later has power control register */ 731 if (rev >= L310_CACHE_ID_RTL_R3P0) { 732 u32 power_ctrl; 733 734 power_ctrl = readl_relaxed(base + L310_POWER_CTRL); 735 pr_info("L2C-310 dynamic clock gating %sabled, standby mode %sabled\n", 736 power_ctrl & L310_DYNAMIC_CLK_GATING_EN ? "en" : "dis", 737 power_ctrl & L310_STNDBY_MODE_EN ? "en" : "dis"); 738 } 739 740 if (aux & L310_AUX_CTRL_FULL_LINE_ZERO) { 741 set_auxcr(get_auxcr() | BIT(3) | BIT(2) | BIT(1)); 742 cpu_notifier(l2c310_cpu_enable_flz, 0); 743 } 744 } 745 746 static void __init l2c310_fixup(void __iomem *base, u32 cache_id, 747 struct outer_cache_fns *fns) 748 { 749 unsigned revision = cache_id & L2X0_CACHE_ID_RTL_MASK; 750 const char *errata[8]; 751 unsigned n = 0; 752 753 if (IS_ENABLED(CONFIG_PL310_ERRATA_588369) && 754 revision < L310_CACHE_ID_RTL_R2P0 && 755 /* For bcm compatibility */ 756 fns->inv_range == l2c210_inv_range) { 757 fns->inv_range = l2c310_inv_range_erratum; 758 fns->flush_range = l2c310_flush_range_erratum; 759 errata[n++] = "588369"; 760 } 761 762 if (IS_ENABLED(CONFIG_PL310_ERRATA_727915) && 763 revision >= L310_CACHE_ID_RTL_R2P0 && 764 revision < L310_CACHE_ID_RTL_R3P1) { 765 fns->flush_all = l2c310_flush_all_erratum; 766 errata[n++] = "727915"; 767 } 768 769 if (revision >= L310_CACHE_ID_RTL_R3P0 && 770 revision < L310_CACHE_ID_RTL_R3P2) { 771 u32 val = l2x0_saved_regs.prefetch_ctrl; 772 /* I don't think bit23 is required here... but iMX6 does so */ 773 if (val & (BIT(30) | BIT(23))) { 774 val &= ~(BIT(30) | BIT(23)); 775 l2x0_saved_regs.prefetch_ctrl = val; 776 errata[n++] = "752271"; 777 } 778 } 779 780 if (IS_ENABLED(CONFIG_PL310_ERRATA_753970) && 781 revision == L310_CACHE_ID_RTL_R3P0) { 782 sync_reg_offset = L2X0_DUMMY_REG; 783 errata[n++] = "753970"; 784 } 785 786 if (IS_ENABLED(CONFIG_PL310_ERRATA_769419)) 787 errata[n++] = "769419"; 788 789 if (n) { 790 unsigned i; 791 792 pr_info("L2C-310 errat%s", n > 1 ? "a" : "um"); 793 for (i = 0; i < n; i++) 794 pr_cont(" %s", errata[i]); 795 pr_cont(" enabled\n"); 796 } 797 } 798 799 static void l2c310_disable(void) 800 { 801 /* 802 * If full-line-of-zeros is enabled, we must first disable it in the 803 * Cortex-A9 auxiliary control register before disabling the L2 cache. 804 */ 805 if (l2x0_saved_regs.aux_ctrl & L310_AUX_CTRL_FULL_LINE_ZERO) 806 set_auxcr(get_auxcr() & ~(BIT(3) | BIT(2) | BIT(1))); 807 808 l2c_disable(); 809 } 810 811 static void l2c310_resume(void) 812 { 813 l2c_resume(); 814 815 /* Re-enable full-line-of-zeros for Cortex-A9 */ 816 if (l2x0_saved_regs.aux_ctrl & L310_AUX_CTRL_FULL_LINE_ZERO) 817 set_auxcr(get_auxcr() | BIT(3) | BIT(2) | BIT(1)); 818 } 819 820 static const struct l2c_init_data l2c310_init_fns __initconst = { 821 .type = "L2C-310", 822 .way_size_0 = SZ_8K, 823 .num_lock = 8, 824 .enable = l2c310_enable, 825 .fixup = l2c310_fixup, 826 .save = l2c310_save, 827 .configure = l2c310_configure, 828 .outer_cache = { 829 .inv_range = l2c210_inv_range, 830 .clean_range = l2c210_clean_range, 831 .flush_range = l2c210_flush_range, 832 .flush_all = l2c210_flush_all, 833 .disable = l2c310_disable, 834 .sync = l2c210_sync, 835 .resume = l2c310_resume, 836 }, 837 }; 838 839 static int __init __l2c_init(const struct l2c_init_data *data, 840 u32 aux_val, u32 aux_mask, u32 cache_id) 841 { 842 struct outer_cache_fns fns; 843 unsigned way_size_bits, ways; 844 u32 aux, old_aux; 845 846 /* 847 * Save the pointer globally so that callbacks which do not receive 848 * context from callers can access the structure. 849 */ 850 l2x0_data = kmemdup(data, sizeof(*data), GFP_KERNEL); 851 if (!l2x0_data) 852 return -ENOMEM; 853 854 /* 855 * Sanity check the aux values. aux_mask is the bits we preserve 856 * from reading the hardware register, and aux_val is the bits we 857 * set. 858 */ 859 if (aux_val & aux_mask) 860 pr_alert("L2C: platform provided aux values permit register corruption.\n"); 861 862 old_aux = aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); 863 aux &= aux_mask; 864 aux |= aux_val; 865 866 if (old_aux != aux) 867 pr_warn("L2C: DT/platform modifies aux control register: 0x%08x -> 0x%08x\n", 868 old_aux, aux); 869 870 /* Determine the number of ways */ 871 switch (cache_id & L2X0_CACHE_ID_PART_MASK) { 872 case L2X0_CACHE_ID_PART_L310: 873 if ((aux_val | ~aux_mask) & (L2C_AUX_CTRL_WAY_SIZE_MASK | L310_AUX_CTRL_ASSOCIATIVITY_16)) 874 pr_warn("L2C: DT/platform tries to modify or specify cache size\n"); 875 if (aux & (1 << 16)) 876 ways = 16; 877 else 878 ways = 8; 879 break; 880 881 case L2X0_CACHE_ID_PART_L210: 882 case L2X0_CACHE_ID_PART_L220: 883 ways = (aux >> 13) & 0xf; 884 break; 885 886 case AURORA_CACHE_ID: 887 ways = (aux >> 13) & 0xf; 888 ways = 2 << ((ways + 1) >> 2); 889 break; 890 891 default: 892 /* Assume unknown chips have 8 ways */ 893 ways = 8; 894 break; 895 } 896 897 l2x0_way_mask = (1 << ways) - 1; 898 899 /* 900 * way_size_0 is the size that a way_size value of zero would be 901 * given the calculation: way_size = way_size_0 << way_size_bits. 902 * So, if way_size_bits=0 is reserved, but way_size_bits=1 is 16k, 903 * then way_size_0 would be 8k. 904 * 905 * L2 cache size = number of ways * way size. 906 */ 907 way_size_bits = (aux & L2C_AUX_CTRL_WAY_SIZE_MASK) >> 908 L2C_AUX_CTRL_WAY_SIZE_SHIFT; 909 l2x0_size = ways * (data->way_size_0 << way_size_bits); 910 911 fns = data->outer_cache; 912 fns.write_sec = outer_cache.write_sec; 913 if (data->fixup) 914 data->fixup(l2x0_base, cache_id, &fns); 915 916 /* 917 * Check if l2x0 controller is already enabled. If we are booting 918 * in non-secure mode accessing the below registers will fault. 919 */ 920 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) 921 data->enable(l2x0_base, aux, data->num_lock); 922 923 outer_cache = fns; 924 925 /* 926 * It is strange to save the register state before initialisation, 927 * but hey, this is what the DT implementations decided to do. 928 */ 929 if (data->save) 930 data->save(l2x0_base); 931 932 /* Re-read it in case some bits are reserved. */ 933 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); 934 935 pr_info("%s cache controller enabled, %d ways, %d kB\n", 936 data->type, ways, l2x0_size >> 10); 937 pr_info("%s: CACHE_ID 0x%08x, AUX_CTRL 0x%08x\n", 938 data->type, cache_id, aux); 939 940 return 0; 941 } 942 943 void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask) 944 { 945 const struct l2c_init_data *data; 946 u32 cache_id; 947 948 l2x0_base = base; 949 950 cache_id = readl_relaxed(base + L2X0_CACHE_ID); 951 952 switch (cache_id & L2X0_CACHE_ID_PART_MASK) { 953 default: 954 case L2X0_CACHE_ID_PART_L210: 955 data = &l2c210_data; 956 break; 957 958 case L2X0_CACHE_ID_PART_L220: 959 data = &l2c220_data; 960 break; 961 962 case L2X0_CACHE_ID_PART_L310: 963 data = &l2c310_init_fns; 964 break; 965 } 966 967 /* Read back current (default) hardware configuration */ 968 if (data->save) 969 data->save(l2x0_base); 970 971 __l2c_init(data, aux_val, aux_mask, cache_id); 972 } 973 974 #ifdef CONFIG_OF 975 static int l2_wt_override; 976 977 /* Aurora don't have the cache ID register available, so we have to 978 * pass it though the device tree */ 979 static u32 cache_id_part_number_from_dt; 980 981 /** 982 * l2x0_cache_size_of_parse() - read cache size parameters from DT 983 * @np: the device tree node for the l2 cache 984 * @aux_val: pointer to machine-supplied auxilary register value, to 985 * be augmented by the call (bits to be set to 1) 986 * @aux_mask: pointer to machine-supplied auxilary register mask, to 987 * be augmented by the call (bits to be set to 0) 988 * @associativity: variable to return the calculated associativity in 989 * @max_way_size: the maximum size in bytes for the cache ways 990 */ 991 static int __init l2x0_cache_size_of_parse(const struct device_node *np, 992 u32 *aux_val, u32 *aux_mask, 993 u32 *associativity, 994 u32 max_way_size) 995 { 996 u32 mask = 0, val = 0; 997 u32 cache_size = 0, sets = 0; 998 u32 way_size_bits = 1; 999 u32 way_size = 0; 1000 u32 block_size = 0; 1001 u32 line_size = 0; 1002 1003 of_property_read_u32(np, "cache-size", &cache_size); 1004 of_property_read_u32(np, "cache-sets", &sets); 1005 of_property_read_u32(np, "cache-block-size", &block_size); 1006 of_property_read_u32(np, "cache-line-size", &line_size); 1007 1008 if (!cache_size || !sets) 1009 return -ENODEV; 1010 1011 /* All these l2 caches have the same line = block size actually */ 1012 if (!line_size) { 1013 if (block_size) { 1014 /* If linesize if not given, it is equal to blocksize */ 1015 line_size = block_size; 1016 } else { 1017 /* Fall back to known size */ 1018 pr_warn("L2C OF: no cache block/line size given: " 1019 "falling back to default size %d bytes\n", 1020 CACHE_LINE_SIZE); 1021 line_size = CACHE_LINE_SIZE; 1022 } 1023 } 1024 1025 if (line_size != CACHE_LINE_SIZE) 1026 pr_warn("L2C OF: DT supplied line size %d bytes does " 1027 "not match hardware line size of %d bytes\n", 1028 line_size, 1029 CACHE_LINE_SIZE); 1030 1031 /* 1032 * Since: 1033 * set size = cache size / sets 1034 * ways = cache size / (sets * line size) 1035 * way size = cache size / (cache size / (sets * line size)) 1036 * way size = sets * line size 1037 * associativity = ways = cache size / way size 1038 */ 1039 way_size = sets * line_size; 1040 *associativity = cache_size / way_size; 1041 1042 if (way_size > max_way_size) { 1043 pr_err("L2C OF: set size %dKB is too large\n", way_size); 1044 return -EINVAL; 1045 } 1046 1047 pr_info("L2C OF: override cache size: %d bytes (%dKB)\n", 1048 cache_size, cache_size >> 10); 1049 pr_info("L2C OF: override line size: %d bytes\n", line_size); 1050 pr_info("L2C OF: override way size: %d bytes (%dKB)\n", 1051 way_size, way_size >> 10); 1052 pr_info("L2C OF: override associativity: %d\n", *associativity); 1053 1054 /* 1055 * Calculates the bits 17:19 to set for way size: 1056 * 512KB -> 6, 256KB -> 5, ... 16KB -> 1 1057 */ 1058 way_size_bits = ilog2(way_size >> 10) - 3; 1059 if (way_size_bits < 1 || way_size_bits > 6) { 1060 pr_err("L2C OF: cache way size illegal: %dKB is not mapped\n", 1061 way_size); 1062 return -EINVAL; 1063 } 1064 1065 mask |= L2C_AUX_CTRL_WAY_SIZE_MASK; 1066 val |= (way_size_bits << L2C_AUX_CTRL_WAY_SIZE_SHIFT); 1067 1068 *aux_val &= ~mask; 1069 *aux_val |= val; 1070 *aux_mask &= ~mask; 1071 1072 return 0; 1073 } 1074 1075 static void __init l2x0_of_parse(const struct device_node *np, 1076 u32 *aux_val, u32 *aux_mask) 1077 { 1078 u32 data[2] = { 0, 0 }; 1079 u32 tag = 0; 1080 u32 dirty = 0; 1081 u32 val = 0, mask = 0; 1082 u32 assoc; 1083 int ret; 1084 1085 of_property_read_u32(np, "arm,tag-latency", &tag); 1086 if (tag) { 1087 mask |= L2X0_AUX_CTRL_TAG_LATENCY_MASK; 1088 val |= (tag - 1) << L2X0_AUX_CTRL_TAG_LATENCY_SHIFT; 1089 } 1090 1091 of_property_read_u32_array(np, "arm,data-latency", 1092 data, ARRAY_SIZE(data)); 1093 if (data[0] && data[1]) { 1094 mask |= L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK | 1095 L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK; 1096 val |= ((data[0] - 1) << L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT) | 1097 ((data[1] - 1) << L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT); 1098 } 1099 1100 of_property_read_u32(np, "arm,dirty-latency", &dirty); 1101 if (dirty) { 1102 mask |= L2X0_AUX_CTRL_DIRTY_LATENCY_MASK; 1103 val |= (dirty - 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT; 1104 } 1105 1106 ret = l2x0_cache_size_of_parse(np, aux_val, aux_mask, &assoc, SZ_256K); 1107 if (ret) 1108 return; 1109 1110 if (assoc > 8) { 1111 pr_err("l2x0 of: cache setting yield too high associativity\n"); 1112 pr_err("l2x0 of: %d calculated, max 8\n", assoc); 1113 } else { 1114 mask |= L2X0_AUX_CTRL_ASSOC_MASK; 1115 val |= (assoc << L2X0_AUX_CTRL_ASSOC_SHIFT); 1116 } 1117 1118 *aux_val &= ~mask; 1119 *aux_val |= val; 1120 *aux_mask &= ~mask; 1121 } 1122 1123 static const struct l2c_init_data of_l2c210_data __initconst = { 1124 .type = "L2C-210", 1125 .way_size_0 = SZ_8K, 1126 .num_lock = 1, 1127 .of_parse = l2x0_of_parse, 1128 .enable = l2c_enable, 1129 .save = l2c_save, 1130 .outer_cache = { 1131 .inv_range = l2c210_inv_range, 1132 .clean_range = l2c210_clean_range, 1133 .flush_range = l2c210_flush_range, 1134 .flush_all = l2c210_flush_all, 1135 .disable = l2c_disable, 1136 .sync = l2c210_sync, 1137 .resume = l2c_resume, 1138 }, 1139 }; 1140 1141 static const struct l2c_init_data of_l2c220_data __initconst = { 1142 .type = "L2C-220", 1143 .way_size_0 = SZ_8K, 1144 .num_lock = 1, 1145 .of_parse = l2x0_of_parse, 1146 .enable = l2c220_enable, 1147 .save = l2c_save, 1148 .outer_cache = { 1149 .inv_range = l2c220_inv_range, 1150 .clean_range = l2c220_clean_range, 1151 .flush_range = l2c220_flush_range, 1152 .flush_all = l2c220_flush_all, 1153 .disable = l2c_disable, 1154 .sync = l2c220_sync, 1155 .resume = l2c_resume, 1156 }, 1157 }; 1158 1159 static void __init l2c310_of_parse(const struct device_node *np, 1160 u32 *aux_val, u32 *aux_mask) 1161 { 1162 u32 data[3] = { 0, 0, 0 }; 1163 u32 tag[3] = { 0, 0, 0 }; 1164 u32 filter[2] = { 0, 0 }; 1165 u32 assoc; 1166 int ret; 1167 1168 of_property_read_u32_array(np, "arm,tag-latency", tag, ARRAY_SIZE(tag)); 1169 if (tag[0] && tag[1] && tag[2]) 1170 l2x0_saved_regs.tag_latency = 1171 L310_LATENCY_CTRL_RD(tag[0] - 1) | 1172 L310_LATENCY_CTRL_WR(tag[1] - 1) | 1173 L310_LATENCY_CTRL_SETUP(tag[2] - 1); 1174 1175 of_property_read_u32_array(np, "arm,data-latency", 1176 data, ARRAY_SIZE(data)); 1177 if (data[0] && data[1] && data[2]) 1178 l2x0_saved_regs.data_latency = 1179 L310_LATENCY_CTRL_RD(data[0] - 1) | 1180 L310_LATENCY_CTRL_WR(data[1] - 1) | 1181 L310_LATENCY_CTRL_SETUP(data[2] - 1); 1182 1183 of_property_read_u32_array(np, "arm,filter-ranges", 1184 filter, ARRAY_SIZE(filter)); 1185 if (filter[1]) { 1186 l2x0_saved_regs.filter_end = 1187 ALIGN(filter[0] + filter[1], SZ_1M); 1188 l2x0_saved_regs.filter_start = (filter[0] & ~(SZ_1M - 1)) 1189 | L310_ADDR_FILTER_EN; 1190 } 1191 1192 ret = l2x0_cache_size_of_parse(np, aux_val, aux_mask, &assoc, SZ_512K); 1193 if (ret) 1194 return; 1195 1196 switch (assoc) { 1197 case 16: 1198 *aux_val &= ~L2X0_AUX_CTRL_ASSOC_MASK; 1199 *aux_val |= L310_AUX_CTRL_ASSOCIATIVITY_16; 1200 *aux_mask &= ~L2X0_AUX_CTRL_ASSOC_MASK; 1201 break; 1202 case 8: 1203 *aux_val &= ~L2X0_AUX_CTRL_ASSOC_MASK; 1204 *aux_mask &= ~L2X0_AUX_CTRL_ASSOC_MASK; 1205 break; 1206 default: 1207 pr_err("L2C-310 OF cache associativity %d invalid, only 8 or 16 permitted\n", 1208 assoc); 1209 break; 1210 } 1211 } 1212 1213 static const struct l2c_init_data of_l2c310_data __initconst = { 1214 .type = "L2C-310", 1215 .way_size_0 = SZ_8K, 1216 .num_lock = 8, 1217 .of_parse = l2c310_of_parse, 1218 .enable = l2c310_enable, 1219 .fixup = l2c310_fixup, 1220 .save = l2c310_save, 1221 .configure = l2c310_configure, 1222 .outer_cache = { 1223 .inv_range = l2c210_inv_range, 1224 .clean_range = l2c210_clean_range, 1225 .flush_range = l2c210_flush_range, 1226 .flush_all = l2c210_flush_all, 1227 .disable = l2c310_disable, 1228 .sync = l2c210_sync, 1229 .resume = l2c310_resume, 1230 }, 1231 }; 1232 1233 /* 1234 * This is a variant of the of_l2c310_data with .sync set to 1235 * NULL. Outer sync operations are not needed when the system is I/O 1236 * coherent, and potentially harmful in certain situations (PCIe/PL310 1237 * deadlock on Armada 375/38x due to hardware I/O coherency). The 1238 * other operations are kept because they are infrequent (therefore do 1239 * not cause the deadlock in practice) and needed for secondary CPU 1240 * boot and other power management activities. 1241 */ 1242 static const struct l2c_init_data of_l2c310_coherent_data __initconst = { 1243 .type = "L2C-310 Coherent", 1244 .way_size_0 = SZ_8K, 1245 .num_lock = 8, 1246 .of_parse = l2c310_of_parse, 1247 .enable = l2c310_enable, 1248 .fixup = l2c310_fixup, 1249 .save = l2c310_save, 1250 .configure = l2c310_configure, 1251 .outer_cache = { 1252 .inv_range = l2c210_inv_range, 1253 .clean_range = l2c210_clean_range, 1254 .flush_range = l2c210_flush_range, 1255 .flush_all = l2c210_flush_all, 1256 .disable = l2c310_disable, 1257 .resume = l2c310_resume, 1258 }, 1259 }; 1260 1261 /* 1262 * Note that the end addresses passed to Linux primitives are 1263 * noninclusive, while the hardware cache range operations use 1264 * inclusive start and end addresses. 1265 */ 1266 static unsigned long calc_range_end(unsigned long start, unsigned long end) 1267 { 1268 /* 1269 * Limit the number of cache lines processed at once, 1270 * since cache range operations stall the CPU pipeline 1271 * until completion. 1272 */ 1273 if (end > start + MAX_RANGE_SIZE) 1274 end = start + MAX_RANGE_SIZE; 1275 1276 /* 1277 * Cache range operations can't straddle a page boundary. 1278 */ 1279 if (end > PAGE_ALIGN(start+1)) 1280 end = PAGE_ALIGN(start+1); 1281 1282 return end; 1283 } 1284 1285 /* 1286 * Make sure 'start' and 'end' reference the same page, as L2 is PIPT 1287 * and range operations only do a TLB lookup on the start address. 1288 */ 1289 static void aurora_pa_range(unsigned long start, unsigned long end, 1290 unsigned long offset) 1291 { 1292 unsigned long flags; 1293 1294 raw_spin_lock_irqsave(&l2x0_lock, flags); 1295 writel_relaxed(start, l2x0_base + AURORA_RANGE_BASE_ADDR_REG); 1296 writel_relaxed(end, l2x0_base + offset); 1297 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 1298 1299 cache_sync(); 1300 } 1301 1302 static void aurora_inv_range(unsigned long start, unsigned long end) 1303 { 1304 /* 1305 * round start and end adresses up to cache line size 1306 */ 1307 start &= ~(CACHE_LINE_SIZE - 1); 1308 end = ALIGN(end, CACHE_LINE_SIZE); 1309 1310 /* 1311 * Invalidate all full cache lines between 'start' and 'end'. 1312 */ 1313 while (start < end) { 1314 unsigned long range_end = calc_range_end(start, end); 1315 aurora_pa_range(start, range_end - CACHE_LINE_SIZE, 1316 AURORA_INVAL_RANGE_REG); 1317 start = range_end; 1318 } 1319 } 1320 1321 static void aurora_clean_range(unsigned long start, unsigned long end) 1322 { 1323 /* 1324 * If L2 is forced to WT, the L2 will always be clean and we 1325 * don't need to do anything here. 1326 */ 1327 if (!l2_wt_override) { 1328 start &= ~(CACHE_LINE_SIZE - 1); 1329 end = ALIGN(end, CACHE_LINE_SIZE); 1330 while (start != end) { 1331 unsigned long range_end = calc_range_end(start, end); 1332 aurora_pa_range(start, range_end - CACHE_LINE_SIZE, 1333 AURORA_CLEAN_RANGE_REG); 1334 start = range_end; 1335 } 1336 } 1337 } 1338 1339 static void aurora_flush_range(unsigned long start, unsigned long end) 1340 { 1341 start &= ~(CACHE_LINE_SIZE - 1); 1342 end = ALIGN(end, CACHE_LINE_SIZE); 1343 while (start != end) { 1344 unsigned long range_end = calc_range_end(start, end); 1345 /* 1346 * If L2 is forced to WT, the L2 will always be clean and we 1347 * just need to invalidate. 1348 */ 1349 if (l2_wt_override) 1350 aurora_pa_range(start, range_end - CACHE_LINE_SIZE, 1351 AURORA_INVAL_RANGE_REG); 1352 else 1353 aurora_pa_range(start, range_end - CACHE_LINE_SIZE, 1354 AURORA_FLUSH_RANGE_REG); 1355 start = range_end; 1356 } 1357 } 1358 1359 static void aurora_save(void __iomem *base) 1360 { 1361 l2x0_saved_regs.ctrl = readl_relaxed(base + L2X0_CTRL); 1362 l2x0_saved_regs.aux_ctrl = readl_relaxed(base + L2X0_AUX_CTRL); 1363 } 1364 1365 /* 1366 * For Aurora cache in no outer mode, enable via the CP15 coprocessor 1367 * broadcasting of cache commands to L2. 1368 */ 1369 static void __init aurora_enable_no_outer(void __iomem *base, u32 aux, 1370 unsigned num_lock) 1371 { 1372 u32 u; 1373 1374 asm volatile("mrc p15, 1, %0, c15, c2, 0" : "=r" (u)); 1375 u |= AURORA_CTRL_FW; /* Set the FW bit */ 1376 asm volatile("mcr p15, 1, %0, c15, c2, 0" : : "r" (u)); 1377 1378 isb(); 1379 1380 l2c_enable(base, aux, num_lock); 1381 } 1382 1383 static void __init aurora_fixup(void __iomem *base, u32 cache_id, 1384 struct outer_cache_fns *fns) 1385 { 1386 sync_reg_offset = AURORA_SYNC_REG; 1387 } 1388 1389 static void __init aurora_of_parse(const struct device_node *np, 1390 u32 *aux_val, u32 *aux_mask) 1391 { 1392 u32 val = AURORA_ACR_REPLACEMENT_TYPE_SEMIPLRU; 1393 u32 mask = AURORA_ACR_REPLACEMENT_MASK; 1394 1395 of_property_read_u32(np, "cache-id-part", 1396 &cache_id_part_number_from_dt); 1397 1398 /* Determine and save the write policy */ 1399 l2_wt_override = of_property_read_bool(np, "wt-override"); 1400 1401 if (l2_wt_override) { 1402 val |= AURORA_ACR_FORCE_WRITE_THRO_POLICY; 1403 mask |= AURORA_ACR_FORCE_WRITE_POLICY_MASK; 1404 } 1405 1406 *aux_val &= ~mask; 1407 *aux_val |= val; 1408 *aux_mask &= ~mask; 1409 } 1410 1411 static const struct l2c_init_data of_aurora_with_outer_data __initconst = { 1412 .type = "Aurora", 1413 .way_size_0 = SZ_4K, 1414 .num_lock = 4, 1415 .of_parse = aurora_of_parse, 1416 .enable = l2c_enable, 1417 .fixup = aurora_fixup, 1418 .save = aurora_save, 1419 .outer_cache = { 1420 .inv_range = aurora_inv_range, 1421 .clean_range = aurora_clean_range, 1422 .flush_range = aurora_flush_range, 1423 .flush_all = l2x0_flush_all, 1424 .disable = l2x0_disable, 1425 .sync = l2x0_cache_sync, 1426 .resume = l2c_resume, 1427 }, 1428 }; 1429 1430 static const struct l2c_init_data of_aurora_no_outer_data __initconst = { 1431 .type = "Aurora", 1432 .way_size_0 = SZ_4K, 1433 .num_lock = 4, 1434 .of_parse = aurora_of_parse, 1435 .enable = aurora_enable_no_outer, 1436 .fixup = aurora_fixup, 1437 .save = aurora_save, 1438 .outer_cache = { 1439 .resume = l2c_resume, 1440 }, 1441 }; 1442 1443 /* 1444 * For certain Broadcom SoCs, depending on the address range, different offsets 1445 * need to be added to the address before passing it to L2 for 1446 * invalidation/clean/flush 1447 * 1448 * Section Address Range Offset EMI 1449 * 1 0x00000000 - 0x3FFFFFFF 0x80000000 VC 1450 * 2 0x40000000 - 0xBFFFFFFF 0x40000000 SYS 1451 * 3 0xC0000000 - 0xFFFFFFFF 0x80000000 VC 1452 * 1453 * When the start and end addresses have crossed two different sections, we 1454 * need to break the L2 operation into two, each within its own section. 1455 * For example, if we need to invalidate addresses starts at 0xBFFF0000 and 1456 * ends at 0xC0001000, we need do invalidate 1) 0xBFFF0000 - 0xBFFFFFFF and 2) 1457 * 0xC0000000 - 0xC0001000 1458 * 1459 * Note 1: 1460 * By breaking a single L2 operation into two, we may potentially suffer some 1461 * performance hit, but keep in mind the cross section case is very rare 1462 * 1463 * Note 2: 1464 * We do not need to handle the case when the start address is in 1465 * Section 1 and the end address is in Section 3, since it is not a valid use 1466 * case 1467 * 1468 * Note 3: 1469 * Section 1 in practical terms can no longer be used on rev A2. Because of 1470 * that the code does not need to handle section 1 at all. 1471 * 1472 */ 1473 #define BCM_SYS_EMI_START_ADDR 0x40000000UL 1474 #define BCM_VC_EMI_SEC3_START_ADDR 0xC0000000UL 1475 1476 #define BCM_SYS_EMI_OFFSET 0x40000000UL 1477 #define BCM_VC_EMI_OFFSET 0x80000000UL 1478 1479 static inline int bcm_addr_is_sys_emi(unsigned long addr) 1480 { 1481 return (addr >= BCM_SYS_EMI_START_ADDR) && 1482 (addr < BCM_VC_EMI_SEC3_START_ADDR); 1483 } 1484 1485 static inline unsigned long bcm_l2_phys_addr(unsigned long addr) 1486 { 1487 if (bcm_addr_is_sys_emi(addr)) 1488 return addr + BCM_SYS_EMI_OFFSET; 1489 else 1490 return addr + BCM_VC_EMI_OFFSET; 1491 } 1492 1493 static void bcm_inv_range(unsigned long start, unsigned long end) 1494 { 1495 unsigned long new_start, new_end; 1496 1497 BUG_ON(start < BCM_SYS_EMI_START_ADDR); 1498 1499 if (unlikely(end <= start)) 1500 return; 1501 1502 new_start = bcm_l2_phys_addr(start); 1503 new_end = bcm_l2_phys_addr(end); 1504 1505 /* normal case, no cross section between start and end */ 1506 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) { 1507 l2c210_inv_range(new_start, new_end); 1508 return; 1509 } 1510 1511 /* They cross sections, so it can only be a cross from section 1512 * 2 to section 3 1513 */ 1514 l2c210_inv_range(new_start, 1515 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1)); 1516 l2c210_inv_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR), 1517 new_end); 1518 } 1519 1520 static void bcm_clean_range(unsigned long start, unsigned long end) 1521 { 1522 unsigned long new_start, new_end; 1523 1524 BUG_ON(start < BCM_SYS_EMI_START_ADDR); 1525 1526 if (unlikely(end <= start)) 1527 return; 1528 1529 new_start = bcm_l2_phys_addr(start); 1530 new_end = bcm_l2_phys_addr(end); 1531 1532 /* normal case, no cross section between start and end */ 1533 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) { 1534 l2c210_clean_range(new_start, new_end); 1535 return; 1536 } 1537 1538 /* They cross sections, so it can only be a cross from section 1539 * 2 to section 3 1540 */ 1541 l2c210_clean_range(new_start, 1542 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1)); 1543 l2c210_clean_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR), 1544 new_end); 1545 } 1546 1547 static void bcm_flush_range(unsigned long start, unsigned long end) 1548 { 1549 unsigned long new_start, new_end; 1550 1551 BUG_ON(start < BCM_SYS_EMI_START_ADDR); 1552 1553 if (unlikely(end <= start)) 1554 return; 1555 1556 if ((end - start) >= l2x0_size) { 1557 outer_cache.flush_all(); 1558 return; 1559 } 1560 1561 new_start = bcm_l2_phys_addr(start); 1562 new_end = bcm_l2_phys_addr(end); 1563 1564 /* normal case, no cross section between start and end */ 1565 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) { 1566 l2c210_flush_range(new_start, new_end); 1567 return; 1568 } 1569 1570 /* They cross sections, so it can only be a cross from section 1571 * 2 to section 3 1572 */ 1573 l2c210_flush_range(new_start, 1574 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1)); 1575 l2c210_flush_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR), 1576 new_end); 1577 } 1578 1579 /* Broadcom L2C-310 start from ARMs R3P2 or later, and require no fixups */ 1580 static const struct l2c_init_data of_bcm_l2x0_data __initconst = { 1581 .type = "BCM-L2C-310", 1582 .way_size_0 = SZ_8K, 1583 .num_lock = 8, 1584 .of_parse = l2c310_of_parse, 1585 .enable = l2c310_enable, 1586 .save = l2c310_save, 1587 .configure = l2c310_configure, 1588 .outer_cache = { 1589 .inv_range = bcm_inv_range, 1590 .clean_range = bcm_clean_range, 1591 .flush_range = bcm_flush_range, 1592 .flush_all = l2c210_flush_all, 1593 .disable = l2c310_disable, 1594 .sync = l2c210_sync, 1595 .resume = l2c310_resume, 1596 }, 1597 }; 1598 1599 static void __init tauros3_save(void __iomem *base) 1600 { 1601 l2c_save(base); 1602 1603 l2x0_saved_regs.aux2_ctrl = 1604 readl_relaxed(base + TAUROS3_AUX2_CTRL); 1605 l2x0_saved_regs.prefetch_ctrl = 1606 readl_relaxed(base + L310_PREFETCH_CTRL); 1607 } 1608 1609 static void tauros3_configure(void __iomem *base) 1610 { 1611 writel_relaxed(l2x0_saved_regs.aux2_ctrl, 1612 base + TAUROS3_AUX2_CTRL); 1613 writel_relaxed(l2x0_saved_regs.prefetch_ctrl, 1614 base + L310_PREFETCH_CTRL); 1615 } 1616 1617 static const struct l2c_init_data of_tauros3_data __initconst = { 1618 .type = "Tauros3", 1619 .way_size_0 = SZ_8K, 1620 .num_lock = 8, 1621 .enable = l2c_enable, 1622 .save = tauros3_save, 1623 .configure = tauros3_configure, 1624 /* Tauros3 broadcasts L1 cache operations to L2 */ 1625 .outer_cache = { 1626 .resume = l2c_resume, 1627 }, 1628 }; 1629 1630 #define L2C_ID(name, fns) { .compatible = name, .data = (void *)&fns } 1631 static const struct of_device_id l2x0_ids[] __initconst = { 1632 L2C_ID("arm,l210-cache", of_l2c210_data), 1633 L2C_ID("arm,l220-cache", of_l2c220_data), 1634 L2C_ID("arm,pl310-cache", of_l2c310_data), 1635 L2C_ID("brcm,bcm11351-a2-pl310-cache", of_bcm_l2x0_data), 1636 L2C_ID("marvell,aurora-outer-cache", of_aurora_with_outer_data), 1637 L2C_ID("marvell,aurora-system-cache", of_aurora_no_outer_data), 1638 L2C_ID("marvell,tauros3-cache", of_tauros3_data), 1639 /* Deprecated IDs */ 1640 L2C_ID("bcm,bcm11351-a2-pl310-cache", of_bcm_l2x0_data), 1641 {} 1642 }; 1643 1644 int __init l2x0_of_init(u32 aux_val, u32 aux_mask) 1645 { 1646 const struct l2c_init_data *data; 1647 struct device_node *np; 1648 struct resource res; 1649 u32 cache_id, old_aux; 1650 1651 np = of_find_matching_node(NULL, l2x0_ids); 1652 if (!np) 1653 return -ENODEV; 1654 1655 if (of_address_to_resource(np, 0, &res)) 1656 return -ENODEV; 1657 1658 l2x0_base = ioremap(res.start, resource_size(&res)); 1659 if (!l2x0_base) 1660 return -ENOMEM; 1661 1662 l2x0_saved_regs.phy_base = res.start; 1663 1664 data = of_match_node(l2x0_ids, np)->data; 1665 1666 if (of_device_is_compatible(np, "arm,pl310-cache") && 1667 of_property_read_bool(np, "arm,io-coherent")) 1668 data = &of_l2c310_coherent_data; 1669 1670 old_aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); 1671 if (old_aux != ((old_aux & aux_mask) | aux_val)) { 1672 pr_warn("L2C: platform modifies aux control register: 0x%08x -> 0x%08x\n", 1673 old_aux, (old_aux & aux_mask) | aux_val); 1674 } else if (aux_mask != ~0U && aux_val != 0) { 1675 pr_alert("L2C: platform provided aux values match the hardware, so have no effect. Please remove them.\n"); 1676 } 1677 1678 /* All L2 caches are unified, so this property should be specified */ 1679 if (!of_property_read_bool(np, "cache-unified")) 1680 pr_err("L2C: device tree omits to specify unified cache\n"); 1681 1682 /* Read back current (default) hardware configuration */ 1683 if (data->save) 1684 data->save(l2x0_base); 1685 1686 /* L2 configuration can only be changed if the cache is disabled */ 1687 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) 1688 if (data->of_parse) 1689 data->of_parse(np, &aux_val, &aux_mask); 1690 1691 if (cache_id_part_number_from_dt) 1692 cache_id = cache_id_part_number_from_dt; 1693 else 1694 cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID); 1695 1696 return __l2c_init(data, aux_val, aux_mask, cache_id); 1697 } 1698 #endif 1699