1 /* 2 * arch/arm/mm/cache-l2x0.c - L210/L220/L310 cache controller support 3 * 4 * Copyright (C) 2007 ARM Limited 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software 17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18 */ 19 #include <linux/cpu.h> 20 #include <linux/err.h> 21 #include <linux/init.h> 22 #include <linux/smp.h> 23 #include <linux/spinlock.h> 24 #include <linux/log2.h> 25 #include <linux/io.h> 26 #include <linux/of.h> 27 #include <linux/of_address.h> 28 29 #include <asm/cacheflush.h> 30 #include <asm/cp15.h> 31 #include <asm/cputype.h> 32 #include <asm/hardware/cache-l2x0.h> 33 #include "cache-tauros3.h" 34 #include "cache-aurora-l2.h" 35 36 struct l2c_init_data { 37 const char *type; 38 unsigned way_size_0; 39 unsigned num_lock; 40 void (*of_parse)(const struct device_node *, u32 *, u32 *); 41 void (*enable)(void __iomem *, unsigned); 42 void (*fixup)(void __iomem *, u32, struct outer_cache_fns *); 43 void (*save)(void __iomem *); 44 void (*configure)(void __iomem *); 45 void (*unlock)(void __iomem *, unsigned); 46 struct outer_cache_fns outer_cache; 47 }; 48 49 #define CACHE_LINE_SIZE 32 50 51 static void __iomem *l2x0_base; 52 static const struct l2c_init_data *l2x0_data; 53 static DEFINE_RAW_SPINLOCK(l2x0_lock); 54 static u32 l2x0_way_mask; /* Bitmask of active ways */ 55 static u32 l2x0_size; 56 static unsigned long sync_reg_offset = L2X0_CACHE_SYNC; 57 58 struct l2x0_regs l2x0_saved_regs; 59 60 /* 61 * Common code for all cache controllers. 62 */ 63 static inline void l2c_wait_mask(void __iomem *reg, unsigned long mask) 64 { 65 /* wait for cache operation by line or way to complete */ 66 while (readl_relaxed(reg) & mask) 67 cpu_relax(); 68 } 69 70 /* 71 * By default, we write directly to secure registers. Platforms must 72 * override this if they are running non-secure. 73 */ 74 static void l2c_write_sec(unsigned long val, void __iomem *base, unsigned reg) 75 { 76 if (val == readl_relaxed(base + reg)) 77 return; 78 if (outer_cache.write_sec) 79 outer_cache.write_sec(val, reg); 80 else 81 writel_relaxed(val, base + reg); 82 } 83 84 /* 85 * This should only be called when we have a requirement that the 86 * register be written due to a work-around, as platforms running 87 * in non-secure mode may not be able to access this register. 88 */ 89 static inline void l2c_set_debug(void __iomem *base, unsigned long val) 90 { 91 l2c_write_sec(val, base, L2X0_DEBUG_CTRL); 92 } 93 94 static void __l2c_op_way(void __iomem *reg) 95 { 96 writel_relaxed(l2x0_way_mask, reg); 97 l2c_wait_mask(reg, l2x0_way_mask); 98 } 99 100 static inline void l2c_unlock(void __iomem *base, unsigned num) 101 { 102 unsigned i; 103 104 for (i = 0; i < num; i++) { 105 writel_relaxed(0, base + L2X0_LOCKDOWN_WAY_D_BASE + 106 i * L2X0_LOCKDOWN_STRIDE); 107 writel_relaxed(0, base + L2X0_LOCKDOWN_WAY_I_BASE + 108 i * L2X0_LOCKDOWN_STRIDE); 109 } 110 } 111 112 static void l2c_configure(void __iomem *base) 113 { 114 l2c_write_sec(l2x0_saved_regs.aux_ctrl, base, L2X0_AUX_CTRL); 115 } 116 117 /* 118 * Enable the L2 cache controller. This function must only be 119 * called when the cache controller is known to be disabled. 120 */ 121 static void l2c_enable(void __iomem *base, unsigned num_lock) 122 { 123 unsigned long flags; 124 125 if (outer_cache.configure) 126 outer_cache.configure(&l2x0_saved_regs); 127 else 128 l2x0_data->configure(base); 129 130 l2x0_data->unlock(base, num_lock); 131 132 local_irq_save(flags); 133 __l2c_op_way(base + L2X0_INV_WAY); 134 writel_relaxed(0, base + sync_reg_offset); 135 l2c_wait_mask(base + sync_reg_offset, 1); 136 local_irq_restore(flags); 137 138 l2c_write_sec(L2X0_CTRL_EN, base, L2X0_CTRL); 139 } 140 141 static void l2c_disable(void) 142 { 143 void __iomem *base = l2x0_base; 144 145 outer_cache.flush_all(); 146 l2c_write_sec(0, base, L2X0_CTRL); 147 dsb(st); 148 } 149 150 static void l2c_save(void __iomem *base) 151 { 152 l2x0_saved_regs.aux_ctrl = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); 153 } 154 155 static void l2c_resume(void) 156 { 157 void __iomem *base = l2x0_base; 158 159 /* Do not touch the controller if already enabled. */ 160 if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN)) 161 l2c_enable(base, l2x0_data->num_lock); 162 } 163 164 /* 165 * L2C-210 specific code. 166 * 167 * The L2C-2x0 PA, set/way and sync operations are atomic, but we must 168 * ensure that no background operation is running. The way operations 169 * are all background tasks. 170 * 171 * While a background operation is in progress, any new operation is 172 * ignored (unspecified whether this causes an error.) Thankfully, not 173 * used on SMP. 174 * 175 * Never has a different sync register other than L2X0_CACHE_SYNC, but 176 * we use sync_reg_offset here so we can share some of this with L2C-310. 177 */ 178 static void __l2c210_cache_sync(void __iomem *base) 179 { 180 writel_relaxed(0, base + sync_reg_offset); 181 } 182 183 static void __l2c210_op_pa_range(void __iomem *reg, unsigned long start, 184 unsigned long end) 185 { 186 while (start < end) { 187 writel_relaxed(start, reg); 188 start += CACHE_LINE_SIZE; 189 } 190 } 191 192 static void l2c210_inv_range(unsigned long start, unsigned long end) 193 { 194 void __iomem *base = l2x0_base; 195 196 if (start & (CACHE_LINE_SIZE - 1)) { 197 start &= ~(CACHE_LINE_SIZE - 1); 198 writel_relaxed(start, base + L2X0_CLEAN_INV_LINE_PA); 199 start += CACHE_LINE_SIZE; 200 } 201 202 if (end & (CACHE_LINE_SIZE - 1)) { 203 end &= ~(CACHE_LINE_SIZE - 1); 204 writel_relaxed(end, base + L2X0_CLEAN_INV_LINE_PA); 205 } 206 207 __l2c210_op_pa_range(base + L2X0_INV_LINE_PA, start, end); 208 __l2c210_cache_sync(base); 209 } 210 211 static void l2c210_clean_range(unsigned long start, unsigned long end) 212 { 213 void __iomem *base = l2x0_base; 214 215 start &= ~(CACHE_LINE_SIZE - 1); 216 __l2c210_op_pa_range(base + L2X0_CLEAN_LINE_PA, start, end); 217 __l2c210_cache_sync(base); 218 } 219 220 static void l2c210_flush_range(unsigned long start, unsigned long end) 221 { 222 void __iomem *base = l2x0_base; 223 224 start &= ~(CACHE_LINE_SIZE - 1); 225 __l2c210_op_pa_range(base + L2X0_CLEAN_INV_LINE_PA, start, end); 226 __l2c210_cache_sync(base); 227 } 228 229 static void l2c210_flush_all(void) 230 { 231 void __iomem *base = l2x0_base; 232 233 BUG_ON(!irqs_disabled()); 234 235 __l2c_op_way(base + L2X0_CLEAN_INV_WAY); 236 __l2c210_cache_sync(base); 237 } 238 239 static void l2c210_sync(void) 240 { 241 __l2c210_cache_sync(l2x0_base); 242 } 243 244 static const struct l2c_init_data l2c210_data __initconst = { 245 .type = "L2C-210", 246 .way_size_0 = SZ_8K, 247 .num_lock = 1, 248 .enable = l2c_enable, 249 .save = l2c_save, 250 .configure = l2c_configure, 251 .unlock = l2c_unlock, 252 .outer_cache = { 253 .inv_range = l2c210_inv_range, 254 .clean_range = l2c210_clean_range, 255 .flush_range = l2c210_flush_range, 256 .flush_all = l2c210_flush_all, 257 .disable = l2c_disable, 258 .sync = l2c210_sync, 259 .resume = l2c_resume, 260 }, 261 }; 262 263 /* 264 * L2C-220 specific code. 265 * 266 * All operations are background operations: they have to be waited for. 267 * Conflicting requests generate a slave error (which will cause an 268 * imprecise abort.) Never uses sync_reg_offset, so we hard-code the 269 * sync register here. 270 * 271 * However, we can re-use the l2c210_resume call. 272 */ 273 static inline void __l2c220_cache_sync(void __iomem *base) 274 { 275 writel_relaxed(0, base + L2X0_CACHE_SYNC); 276 l2c_wait_mask(base + L2X0_CACHE_SYNC, 1); 277 } 278 279 static void l2c220_op_way(void __iomem *base, unsigned reg) 280 { 281 unsigned long flags; 282 283 raw_spin_lock_irqsave(&l2x0_lock, flags); 284 __l2c_op_way(base + reg); 285 __l2c220_cache_sync(base); 286 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 287 } 288 289 static unsigned long l2c220_op_pa_range(void __iomem *reg, unsigned long start, 290 unsigned long end, unsigned long flags) 291 { 292 raw_spinlock_t *lock = &l2x0_lock; 293 294 while (start < end) { 295 unsigned long blk_end = start + min(end - start, 4096UL); 296 297 while (start < blk_end) { 298 l2c_wait_mask(reg, 1); 299 writel_relaxed(start, reg); 300 start += CACHE_LINE_SIZE; 301 } 302 303 if (blk_end < end) { 304 raw_spin_unlock_irqrestore(lock, flags); 305 raw_spin_lock_irqsave(lock, flags); 306 } 307 } 308 309 return flags; 310 } 311 312 static void l2c220_inv_range(unsigned long start, unsigned long end) 313 { 314 void __iomem *base = l2x0_base; 315 unsigned long flags; 316 317 raw_spin_lock_irqsave(&l2x0_lock, flags); 318 if ((start | end) & (CACHE_LINE_SIZE - 1)) { 319 if (start & (CACHE_LINE_SIZE - 1)) { 320 start &= ~(CACHE_LINE_SIZE - 1); 321 writel_relaxed(start, base + L2X0_CLEAN_INV_LINE_PA); 322 start += CACHE_LINE_SIZE; 323 } 324 325 if (end & (CACHE_LINE_SIZE - 1)) { 326 end &= ~(CACHE_LINE_SIZE - 1); 327 l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1); 328 writel_relaxed(end, base + L2X0_CLEAN_INV_LINE_PA); 329 } 330 } 331 332 flags = l2c220_op_pa_range(base + L2X0_INV_LINE_PA, 333 start, end, flags); 334 l2c_wait_mask(base + L2X0_INV_LINE_PA, 1); 335 __l2c220_cache_sync(base); 336 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 337 } 338 339 static void l2c220_clean_range(unsigned long start, unsigned long end) 340 { 341 void __iomem *base = l2x0_base; 342 unsigned long flags; 343 344 start &= ~(CACHE_LINE_SIZE - 1); 345 if ((end - start) >= l2x0_size) { 346 l2c220_op_way(base, L2X0_CLEAN_WAY); 347 return; 348 } 349 350 raw_spin_lock_irqsave(&l2x0_lock, flags); 351 flags = l2c220_op_pa_range(base + L2X0_CLEAN_LINE_PA, 352 start, end, flags); 353 l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1); 354 __l2c220_cache_sync(base); 355 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 356 } 357 358 static void l2c220_flush_range(unsigned long start, unsigned long end) 359 { 360 void __iomem *base = l2x0_base; 361 unsigned long flags; 362 363 start &= ~(CACHE_LINE_SIZE - 1); 364 if ((end - start) >= l2x0_size) { 365 l2c220_op_way(base, L2X0_CLEAN_INV_WAY); 366 return; 367 } 368 369 raw_spin_lock_irqsave(&l2x0_lock, flags); 370 flags = l2c220_op_pa_range(base + L2X0_CLEAN_INV_LINE_PA, 371 start, end, flags); 372 l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1); 373 __l2c220_cache_sync(base); 374 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 375 } 376 377 static void l2c220_flush_all(void) 378 { 379 l2c220_op_way(l2x0_base, L2X0_CLEAN_INV_WAY); 380 } 381 382 static void l2c220_sync(void) 383 { 384 unsigned long flags; 385 386 raw_spin_lock_irqsave(&l2x0_lock, flags); 387 __l2c220_cache_sync(l2x0_base); 388 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 389 } 390 391 static void l2c220_enable(void __iomem *base, unsigned num_lock) 392 { 393 /* 394 * Always enable non-secure access to the lockdown registers - 395 * we write to them as part of the L2C enable sequence so they 396 * need to be accessible. 397 */ 398 l2x0_saved_regs.aux_ctrl |= L220_AUX_CTRL_NS_LOCKDOWN; 399 400 l2c_enable(base, num_lock); 401 } 402 403 static void l2c220_unlock(void __iomem *base, unsigned num_lock) 404 { 405 if (readl_relaxed(base + L2X0_AUX_CTRL) & L220_AUX_CTRL_NS_LOCKDOWN) 406 l2c_unlock(base, num_lock); 407 } 408 409 static const struct l2c_init_data l2c220_data = { 410 .type = "L2C-220", 411 .way_size_0 = SZ_8K, 412 .num_lock = 1, 413 .enable = l2c220_enable, 414 .save = l2c_save, 415 .configure = l2c_configure, 416 .unlock = l2c220_unlock, 417 .outer_cache = { 418 .inv_range = l2c220_inv_range, 419 .clean_range = l2c220_clean_range, 420 .flush_range = l2c220_flush_range, 421 .flush_all = l2c220_flush_all, 422 .disable = l2c_disable, 423 .sync = l2c220_sync, 424 .resume = l2c_resume, 425 }, 426 }; 427 428 /* 429 * L2C-310 specific code. 430 * 431 * Very similar to L2C-210, the PA, set/way and sync operations are atomic, 432 * and the way operations are all background tasks. However, issuing an 433 * operation while a background operation is in progress results in a 434 * SLVERR response. We can reuse: 435 * 436 * __l2c210_cache_sync (using sync_reg_offset) 437 * l2c210_sync 438 * l2c210_inv_range (if 588369 is not applicable) 439 * l2c210_clean_range 440 * l2c210_flush_range (if 588369 is not applicable) 441 * l2c210_flush_all (if 727915 is not applicable) 442 * 443 * Errata: 444 * 588369: PL310 R0P0->R1P0, fixed R2P0. 445 * Affects: all clean+invalidate operations 446 * clean and invalidate skips the invalidate step, so we need to issue 447 * separate operations. We also require the above debug workaround 448 * enclosing this code fragment on affected parts. On unaffected parts, 449 * we must not use this workaround without the debug register writes 450 * to avoid exposing a problem similar to 727915. 451 * 452 * 727915: PL310 R2P0->R3P0, fixed R3P1. 453 * Affects: clean+invalidate by way 454 * clean and invalidate by way runs in the background, and a store can 455 * hit the line between the clean operation and invalidate operation, 456 * resulting in the store being lost. 457 * 458 * 752271: PL310 R3P0->R3P1-50REL0, fixed R3P2. 459 * Affects: 8x64-bit (double fill) line fetches 460 * double fill line fetches can fail to cause dirty data to be evicted 461 * from the cache before the new data overwrites the second line. 462 * 463 * 753970: PL310 R3P0, fixed R3P1. 464 * Affects: sync 465 * prevents merging writes after the sync operation, until another L2C 466 * operation is performed (or a number of other conditions.) 467 * 468 * 769419: PL310 R0P0->R3P1, fixed R3P2. 469 * Affects: store buffer 470 * store buffer is not automatically drained. 471 */ 472 static void l2c310_inv_range_erratum(unsigned long start, unsigned long end) 473 { 474 void __iomem *base = l2x0_base; 475 476 if ((start | end) & (CACHE_LINE_SIZE - 1)) { 477 unsigned long flags; 478 479 /* Erratum 588369 for both clean+invalidate operations */ 480 raw_spin_lock_irqsave(&l2x0_lock, flags); 481 l2c_set_debug(base, 0x03); 482 483 if (start & (CACHE_LINE_SIZE - 1)) { 484 start &= ~(CACHE_LINE_SIZE - 1); 485 writel_relaxed(start, base + L2X0_CLEAN_LINE_PA); 486 writel_relaxed(start, base + L2X0_INV_LINE_PA); 487 start += CACHE_LINE_SIZE; 488 } 489 490 if (end & (CACHE_LINE_SIZE - 1)) { 491 end &= ~(CACHE_LINE_SIZE - 1); 492 writel_relaxed(end, base + L2X0_CLEAN_LINE_PA); 493 writel_relaxed(end, base + L2X0_INV_LINE_PA); 494 } 495 496 l2c_set_debug(base, 0x00); 497 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 498 } 499 500 __l2c210_op_pa_range(base + L2X0_INV_LINE_PA, start, end); 501 __l2c210_cache_sync(base); 502 } 503 504 static void l2c310_flush_range_erratum(unsigned long start, unsigned long end) 505 { 506 raw_spinlock_t *lock = &l2x0_lock; 507 unsigned long flags; 508 void __iomem *base = l2x0_base; 509 510 raw_spin_lock_irqsave(lock, flags); 511 while (start < end) { 512 unsigned long blk_end = start + min(end - start, 4096UL); 513 514 l2c_set_debug(base, 0x03); 515 while (start < blk_end) { 516 writel_relaxed(start, base + L2X0_CLEAN_LINE_PA); 517 writel_relaxed(start, base + L2X0_INV_LINE_PA); 518 start += CACHE_LINE_SIZE; 519 } 520 l2c_set_debug(base, 0x00); 521 522 if (blk_end < end) { 523 raw_spin_unlock_irqrestore(lock, flags); 524 raw_spin_lock_irqsave(lock, flags); 525 } 526 } 527 raw_spin_unlock_irqrestore(lock, flags); 528 __l2c210_cache_sync(base); 529 } 530 531 static void l2c310_flush_all_erratum(void) 532 { 533 void __iomem *base = l2x0_base; 534 unsigned long flags; 535 536 raw_spin_lock_irqsave(&l2x0_lock, flags); 537 l2c_set_debug(base, 0x03); 538 __l2c_op_way(base + L2X0_CLEAN_INV_WAY); 539 l2c_set_debug(base, 0x00); 540 __l2c210_cache_sync(base); 541 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 542 } 543 544 static void __init l2c310_save(void __iomem *base) 545 { 546 unsigned revision; 547 548 l2c_save(base); 549 550 l2x0_saved_regs.tag_latency = readl_relaxed(base + 551 L310_TAG_LATENCY_CTRL); 552 l2x0_saved_regs.data_latency = readl_relaxed(base + 553 L310_DATA_LATENCY_CTRL); 554 l2x0_saved_regs.filter_end = readl_relaxed(base + 555 L310_ADDR_FILTER_END); 556 l2x0_saved_regs.filter_start = readl_relaxed(base + 557 L310_ADDR_FILTER_START); 558 559 revision = readl_relaxed(base + L2X0_CACHE_ID) & 560 L2X0_CACHE_ID_RTL_MASK; 561 562 /* From r2p0, there is Prefetch offset/control register */ 563 if (revision >= L310_CACHE_ID_RTL_R2P0) 564 l2x0_saved_regs.prefetch_ctrl = readl_relaxed(base + 565 L310_PREFETCH_CTRL); 566 567 /* From r3p0, there is Power control register */ 568 if (revision >= L310_CACHE_ID_RTL_R3P0) 569 l2x0_saved_regs.pwr_ctrl = readl_relaxed(base + 570 L310_POWER_CTRL); 571 } 572 573 static void l2c310_configure(void __iomem *base) 574 { 575 unsigned revision; 576 577 l2c_configure(base); 578 579 /* restore pl310 setup */ 580 l2c_write_sec(l2x0_saved_regs.tag_latency, base, 581 L310_TAG_LATENCY_CTRL); 582 l2c_write_sec(l2x0_saved_regs.data_latency, base, 583 L310_DATA_LATENCY_CTRL); 584 l2c_write_sec(l2x0_saved_regs.filter_end, base, 585 L310_ADDR_FILTER_END); 586 l2c_write_sec(l2x0_saved_regs.filter_start, base, 587 L310_ADDR_FILTER_START); 588 589 revision = readl_relaxed(base + L2X0_CACHE_ID) & 590 L2X0_CACHE_ID_RTL_MASK; 591 592 if (revision >= L310_CACHE_ID_RTL_R2P0) 593 l2c_write_sec(l2x0_saved_regs.prefetch_ctrl, base, 594 L310_PREFETCH_CTRL); 595 if (revision >= L310_CACHE_ID_RTL_R3P0) 596 l2c_write_sec(l2x0_saved_regs.pwr_ctrl, base, 597 L310_POWER_CTRL); 598 } 599 600 static int l2c310_cpu_enable_flz(struct notifier_block *nb, unsigned long act, void *data) 601 { 602 switch (act & ~CPU_TASKS_FROZEN) { 603 case CPU_STARTING: 604 set_auxcr(get_auxcr() | BIT(3) | BIT(2) | BIT(1)); 605 break; 606 case CPU_DYING: 607 set_auxcr(get_auxcr() & ~(BIT(3) | BIT(2) | BIT(1))); 608 break; 609 } 610 return NOTIFY_OK; 611 } 612 613 static void __init l2c310_enable(void __iomem *base, unsigned num_lock) 614 { 615 unsigned rev = readl_relaxed(base + L2X0_CACHE_ID) & L2X0_CACHE_ID_RTL_MASK; 616 bool cortex_a9 = read_cpuid_part() == ARM_CPU_PART_CORTEX_A9; 617 u32 aux = l2x0_saved_regs.aux_ctrl; 618 619 if (rev >= L310_CACHE_ID_RTL_R2P0) { 620 if (cortex_a9) { 621 aux |= L310_AUX_CTRL_EARLY_BRESP; 622 pr_info("L2C-310 enabling early BRESP for Cortex-A9\n"); 623 } else if (aux & L310_AUX_CTRL_EARLY_BRESP) { 624 pr_warn("L2C-310 early BRESP only supported with Cortex-A9\n"); 625 aux &= ~L310_AUX_CTRL_EARLY_BRESP; 626 } 627 } 628 629 if (cortex_a9) { 630 u32 aux_cur = readl_relaxed(base + L2X0_AUX_CTRL); 631 u32 acr = get_auxcr(); 632 633 pr_debug("Cortex-A9 ACR=0x%08x\n", acr); 634 635 if (acr & BIT(3) && !(aux_cur & L310_AUX_CTRL_FULL_LINE_ZERO)) 636 pr_err("L2C-310: full line of zeros enabled in Cortex-A9 but not L2C-310 - invalid\n"); 637 638 if (aux & L310_AUX_CTRL_FULL_LINE_ZERO && !(acr & BIT(3))) 639 pr_err("L2C-310: enabling full line of zeros but not enabled in Cortex-A9\n"); 640 641 if (!(aux & L310_AUX_CTRL_FULL_LINE_ZERO) && !outer_cache.write_sec) { 642 aux |= L310_AUX_CTRL_FULL_LINE_ZERO; 643 pr_info("L2C-310 full line of zeros enabled for Cortex-A9\n"); 644 } 645 } else if (aux & (L310_AUX_CTRL_FULL_LINE_ZERO | L310_AUX_CTRL_EARLY_BRESP)) { 646 pr_err("L2C-310: disabling Cortex-A9 specific feature bits\n"); 647 aux &= ~(L310_AUX_CTRL_FULL_LINE_ZERO | L310_AUX_CTRL_EARLY_BRESP); 648 } 649 650 /* 651 * Always enable non-secure access to the lockdown registers - 652 * we write to them as part of the L2C enable sequence so they 653 * need to be accessible. 654 */ 655 l2x0_saved_regs.aux_ctrl = aux | L310_AUX_CTRL_NS_LOCKDOWN; 656 657 l2c_enable(base, num_lock); 658 659 /* Read back resulting AUX_CTRL value as it could have been altered. */ 660 aux = readl_relaxed(base + L2X0_AUX_CTRL); 661 662 if (aux & (L310_AUX_CTRL_DATA_PREFETCH | L310_AUX_CTRL_INSTR_PREFETCH)) { 663 u32 prefetch = readl_relaxed(base + L310_PREFETCH_CTRL); 664 665 pr_info("L2C-310 %s%s prefetch enabled, offset %u lines\n", 666 aux & L310_AUX_CTRL_INSTR_PREFETCH ? "I" : "", 667 aux & L310_AUX_CTRL_DATA_PREFETCH ? "D" : "", 668 1 + (prefetch & L310_PREFETCH_CTRL_OFFSET_MASK)); 669 } 670 671 /* r3p0 or later has power control register */ 672 if (rev >= L310_CACHE_ID_RTL_R3P0) { 673 u32 power_ctrl; 674 675 power_ctrl = readl_relaxed(base + L310_POWER_CTRL); 676 pr_info("L2C-310 dynamic clock gating %sabled, standby mode %sabled\n", 677 power_ctrl & L310_DYNAMIC_CLK_GATING_EN ? "en" : "dis", 678 power_ctrl & L310_STNDBY_MODE_EN ? "en" : "dis"); 679 } 680 681 if (aux & L310_AUX_CTRL_FULL_LINE_ZERO) { 682 set_auxcr(get_auxcr() | BIT(3) | BIT(2) | BIT(1)); 683 cpu_notifier(l2c310_cpu_enable_flz, 0); 684 } 685 } 686 687 static void __init l2c310_fixup(void __iomem *base, u32 cache_id, 688 struct outer_cache_fns *fns) 689 { 690 unsigned revision = cache_id & L2X0_CACHE_ID_RTL_MASK; 691 const char *errata[8]; 692 unsigned n = 0; 693 694 if (IS_ENABLED(CONFIG_PL310_ERRATA_588369) && 695 revision < L310_CACHE_ID_RTL_R2P0 && 696 /* For bcm compatibility */ 697 fns->inv_range == l2c210_inv_range) { 698 fns->inv_range = l2c310_inv_range_erratum; 699 fns->flush_range = l2c310_flush_range_erratum; 700 errata[n++] = "588369"; 701 } 702 703 if (IS_ENABLED(CONFIG_PL310_ERRATA_727915) && 704 revision >= L310_CACHE_ID_RTL_R2P0 && 705 revision < L310_CACHE_ID_RTL_R3P1) { 706 fns->flush_all = l2c310_flush_all_erratum; 707 errata[n++] = "727915"; 708 } 709 710 if (revision >= L310_CACHE_ID_RTL_R3P0 && 711 revision < L310_CACHE_ID_RTL_R3P2) { 712 u32 val = l2x0_saved_regs.prefetch_ctrl; 713 /* I don't think bit23 is required here... but iMX6 does so */ 714 if (val & (BIT(30) | BIT(23))) { 715 val &= ~(BIT(30) | BIT(23)); 716 l2x0_saved_regs.prefetch_ctrl = val; 717 errata[n++] = "752271"; 718 } 719 } 720 721 if (IS_ENABLED(CONFIG_PL310_ERRATA_753970) && 722 revision == L310_CACHE_ID_RTL_R3P0) { 723 sync_reg_offset = L2X0_DUMMY_REG; 724 errata[n++] = "753970"; 725 } 726 727 if (IS_ENABLED(CONFIG_PL310_ERRATA_769419)) 728 errata[n++] = "769419"; 729 730 if (n) { 731 unsigned i; 732 733 pr_info("L2C-310 errat%s", n > 1 ? "a" : "um"); 734 for (i = 0; i < n; i++) 735 pr_cont(" %s", errata[i]); 736 pr_cont(" enabled\n"); 737 } 738 } 739 740 static void l2c310_disable(void) 741 { 742 /* 743 * If full-line-of-zeros is enabled, we must first disable it in the 744 * Cortex-A9 auxiliary control register before disabling the L2 cache. 745 */ 746 if (l2x0_saved_regs.aux_ctrl & L310_AUX_CTRL_FULL_LINE_ZERO) 747 set_auxcr(get_auxcr() & ~(BIT(3) | BIT(2) | BIT(1))); 748 749 l2c_disable(); 750 } 751 752 static void l2c310_resume(void) 753 { 754 l2c_resume(); 755 756 /* Re-enable full-line-of-zeros for Cortex-A9 */ 757 if (l2x0_saved_regs.aux_ctrl & L310_AUX_CTRL_FULL_LINE_ZERO) 758 set_auxcr(get_auxcr() | BIT(3) | BIT(2) | BIT(1)); 759 } 760 761 static void l2c310_unlock(void __iomem *base, unsigned num_lock) 762 { 763 if (readl_relaxed(base + L2X0_AUX_CTRL) & L310_AUX_CTRL_NS_LOCKDOWN) 764 l2c_unlock(base, num_lock); 765 } 766 767 static const struct l2c_init_data l2c310_init_fns __initconst = { 768 .type = "L2C-310", 769 .way_size_0 = SZ_8K, 770 .num_lock = 8, 771 .enable = l2c310_enable, 772 .fixup = l2c310_fixup, 773 .save = l2c310_save, 774 .configure = l2c310_configure, 775 .unlock = l2c310_unlock, 776 .outer_cache = { 777 .inv_range = l2c210_inv_range, 778 .clean_range = l2c210_clean_range, 779 .flush_range = l2c210_flush_range, 780 .flush_all = l2c210_flush_all, 781 .disable = l2c310_disable, 782 .sync = l2c210_sync, 783 .resume = l2c310_resume, 784 }, 785 }; 786 787 static int __init __l2c_init(const struct l2c_init_data *data, 788 u32 aux_val, u32 aux_mask, u32 cache_id, bool nosync) 789 { 790 struct outer_cache_fns fns; 791 unsigned way_size_bits, ways; 792 u32 aux, old_aux; 793 794 /* 795 * Save the pointer globally so that callbacks which do not receive 796 * context from callers can access the structure. 797 */ 798 l2x0_data = kmemdup(data, sizeof(*data), GFP_KERNEL); 799 if (!l2x0_data) 800 return -ENOMEM; 801 802 /* 803 * Sanity check the aux values. aux_mask is the bits we preserve 804 * from reading the hardware register, and aux_val is the bits we 805 * set. 806 */ 807 if (aux_val & aux_mask) 808 pr_alert("L2C: platform provided aux values permit register corruption.\n"); 809 810 old_aux = aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); 811 aux &= aux_mask; 812 aux |= aux_val; 813 814 if (old_aux != aux) 815 pr_warn("L2C: DT/platform modifies aux control register: 0x%08x -> 0x%08x\n", 816 old_aux, aux); 817 818 /* Determine the number of ways */ 819 switch (cache_id & L2X0_CACHE_ID_PART_MASK) { 820 case L2X0_CACHE_ID_PART_L310: 821 if ((aux_val | ~aux_mask) & (L2C_AUX_CTRL_WAY_SIZE_MASK | L310_AUX_CTRL_ASSOCIATIVITY_16)) 822 pr_warn("L2C: DT/platform tries to modify or specify cache size\n"); 823 if (aux & (1 << 16)) 824 ways = 16; 825 else 826 ways = 8; 827 break; 828 829 case L2X0_CACHE_ID_PART_L210: 830 case L2X0_CACHE_ID_PART_L220: 831 ways = (aux >> 13) & 0xf; 832 break; 833 834 case AURORA_CACHE_ID: 835 ways = (aux >> 13) & 0xf; 836 ways = 2 << ((ways + 1) >> 2); 837 break; 838 839 default: 840 /* Assume unknown chips have 8 ways */ 841 ways = 8; 842 break; 843 } 844 845 l2x0_way_mask = (1 << ways) - 1; 846 847 /* 848 * way_size_0 is the size that a way_size value of zero would be 849 * given the calculation: way_size = way_size_0 << way_size_bits. 850 * So, if way_size_bits=0 is reserved, but way_size_bits=1 is 16k, 851 * then way_size_0 would be 8k. 852 * 853 * L2 cache size = number of ways * way size. 854 */ 855 way_size_bits = (aux & L2C_AUX_CTRL_WAY_SIZE_MASK) >> 856 L2C_AUX_CTRL_WAY_SIZE_SHIFT; 857 l2x0_size = ways * (data->way_size_0 << way_size_bits); 858 859 fns = data->outer_cache; 860 fns.write_sec = outer_cache.write_sec; 861 fns.configure = outer_cache.configure; 862 if (data->fixup) 863 data->fixup(l2x0_base, cache_id, &fns); 864 if (nosync) { 865 pr_info("L2C: disabling outer sync\n"); 866 fns.sync = NULL; 867 } 868 869 /* 870 * Check if l2x0 controller is already enabled. If we are booting 871 * in non-secure mode accessing the below registers will fault. 872 */ 873 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) { 874 l2x0_saved_regs.aux_ctrl = aux; 875 876 data->enable(l2x0_base, data->num_lock); 877 } 878 879 outer_cache = fns; 880 881 /* 882 * It is strange to save the register state before initialisation, 883 * but hey, this is what the DT implementations decided to do. 884 */ 885 if (data->save) 886 data->save(l2x0_base); 887 888 /* Re-read it in case some bits are reserved. */ 889 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); 890 891 pr_info("%s cache controller enabled, %d ways, %d kB\n", 892 data->type, ways, l2x0_size >> 10); 893 pr_info("%s: CACHE_ID 0x%08x, AUX_CTRL 0x%08x\n", 894 data->type, cache_id, aux); 895 896 return 0; 897 } 898 899 void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask) 900 { 901 const struct l2c_init_data *data; 902 u32 cache_id; 903 904 l2x0_base = base; 905 906 cache_id = readl_relaxed(base + L2X0_CACHE_ID); 907 908 switch (cache_id & L2X0_CACHE_ID_PART_MASK) { 909 default: 910 case L2X0_CACHE_ID_PART_L210: 911 data = &l2c210_data; 912 break; 913 914 case L2X0_CACHE_ID_PART_L220: 915 data = &l2c220_data; 916 break; 917 918 case L2X0_CACHE_ID_PART_L310: 919 data = &l2c310_init_fns; 920 break; 921 } 922 923 /* Read back current (default) hardware configuration */ 924 if (data->save) 925 data->save(l2x0_base); 926 927 __l2c_init(data, aux_val, aux_mask, cache_id, false); 928 } 929 930 #ifdef CONFIG_OF 931 static int l2_wt_override; 932 933 /* Aurora don't have the cache ID register available, so we have to 934 * pass it though the device tree */ 935 static u32 cache_id_part_number_from_dt; 936 937 /** 938 * l2x0_cache_size_of_parse() - read cache size parameters from DT 939 * @np: the device tree node for the l2 cache 940 * @aux_val: pointer to machine-supplied auxilary register value, to 941 * be augmented by the call (bits to be set to 1) 942 * @aux_mask: pointer to machine-supplied auxilary register mask, to 943 * be augmented by the call (bits to be set to 0) 944 * @associativity: variable to return the calculated associativity in 945 * @max_way_size: the maximum size in bytes for the cache ways 946 */ 947 static int __init l2x0_cache_size_of_parse(const struct device_node *np, 948 u32 *aux_val, u32 *aux_mask, 949 u32 *associativity, 950 u32 max_way_size) 951 { 952 u32 mask = 0, val = 0; 953 u32 cache_size = 0, sets = 0; 954 u32 way_size_bits = 1; 955 u32 way_size = 0; 956 u32 block_size = 0; 957 u32 line_size = 0; 958 959 of_property_read_u32(np, "cache-size", &cache_size); 960 of_property_read_u32(np, "cache-sets", &sets); 961 of_property_read_u32(np, "cache-block-size", &block_size); 962 of_property_read_u32(np, "cache-line-size", &line_size); 963 964 if (!cache_size || !sets) 965 return -ENODEV; 966 967 /* All these l2 caches have the same line = block size actually */ 968 if (!line_size) { 969 if (block_size) { 970 /* If linesize is not given, it is equal to blocksize */ 971 line_size = block_size; 972 } else { 973 /* Fall back to known size */ 974 pr_warn("L2C OF: no cache block/line size given: " 975 "falling back to default size %d bytes\n", 976 CACHE_LINE_SIZE); 977 line_size = CACHE_LINE_SIZE; 978 } 979 } 980 981 if (line_size != CACHE_LINE_SIZE) 982 pr_warn("L2C OF: DT supplied line size %d bytes does " 983 "not match hardware line size of %d bytes\n", 984 line_size, 985 CACHE_LINE_SIZE); 986 987 /* 988 * Since: 989 * set size = cache size / sets 990 * ways = cache size / (sets * line size) 991 * way size = cache size / (cache size / (sets * line size)) 992 * way size = sets * line size 993 * associativity = ways = cache size / way size 994 */ 995 way_size = sets * line_size; 996 *associativity = cache_size / way_size; 997 998 if (way_size > max_way_size) { 999 pr_err("L2C OF: set size %dKB is too large\n", way_size); 1000 return -EINVAL; 1001 } 1002 1003 pr_info("L2C OF: override cache size: %d bytes (%dKB)\n", 1004 cache_size, cache_size >> 10); 1005 pr_info("L2C OF: override line size: %d bytes\n", line_size); 1006 pr_info("L2C OF: override way size: %d bytes (%dKB)\n", 1007 way_size, way_size >> 10); 1008 pr_info("L2C OF: override associativity: %d\n", *associativity); 1009 1010 /* 1011 * Calculates the bits 17:19 to set for way size: 1012 * 512KB -> 6, 256KB -> 5, ... 16KB -> 1 1013 */ 1014 way_size_bits = ilog2(way_size >> 10) - 3; 1015 if (way_size_bits < 1 || way_size_bits > 6) { 1016 pr_err("L2C OF: cache way size illegal: %dKB is not mapped\n", 1017 way_size); 1018 return -EINVAL; 1019 } 1020 1021 mask |= L2C_AUX_CTRL_WAY_SIZE_MASK; 1022 val |= (way_size_bits << L2C_AUX_CTRL_WAY_SIZE_SHIFT); 1023 1024 *aux_val &= ~mask; 1025 *aux_val |= val; 1026 *aux_mask &= ~mask; 1027 1028 return 0; 1029 } 1030 1031 static void __init l2x0_of_parse(const struct device_node *np, 1032 u32 *aux_val, u32 *aux_mask) 1033 { 1034 u32 data[2] = { 0, 0 }; 1035 u32 tag = 0; 1036 u32 dirty = 0; 1037 u32 val = 0, mask = 0; 1038 u32 assoc; 1039 int ret; 1040 1041 of_property_read_u32(np, "arm,tag-latency", &tag); 1042 if (tag) { 1043 mask |= L2X0_AUX_CTRL_TAG_LATENCY_MASK; 1044 val |= (tag - 1) << L2X0_AUX_CTRL_TAG_LATENCY_SHIFT; 1045 } 1046 1047 of_property_read_u32_array(np, "arm,data-latency", 1048 data, ARRAY_SIZE(data)); 1049 if (data[0] && data[1]) { 1050 mask |= L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK | 1051 L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK; 1052 val |= ((data[0] - 1) << L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT) | 1053 ((data[1] - 1) << L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT); 1054 } 1055 1056 of_property_read_u32(np, "arm,dirty-latency", &dirty); 1057 if (dirty) { 1058 mask |= L2X0_AUX_CTRL_DIRTY_LATENCY_MASK; 1059 val |= (dirty - 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT; 1060 } 1061 1062 if (of_property_read_bool(np, "arm,parity-enable")) { 1063 mask &= ~L2C_AUX_CTRL_PARITY_ENABLE; 1064 val |= L2C_AUX_CTRL_PARITY_ENABLE; 1065 } else if (of_property_read_bool(np, "arm,parity-disable")) { 1066 mask &= ~L2C_AUX_CTRL_PARITY_ENABLE; 1067 } 1068 1069 if (of_property_read_bool(np, "arm,shared-override")) { 1070 mask &= ~L2C_AUX_CTRL_SHARED_OVERRIDE; 1071 val |= L2C_AUX_CTRL_SHARED_OVERRIDE; 1072 } 1073 1074 ret = l2x0_cache_size_of_parse(np, aux_val, aux_mask, &assoc, SZ_256K); 1075 if (ret) 1076 return; 1077 1078 if (assoc > 8) { 1079 pr_err("l2x0 of: cache setting yield too high associativity\n"); 1080 pr_err("l2x0 of: %d calculated, max 8\n", assoc); 1081 } else { 1082 mask |= L2X0_AUX_CTRL_ASSOC_MASK; 1083 val |= (assoc << L2X0_AUX_CTRL_ASSOC_SHIFT); 1084 } 1085 1086 *aux_val &= ~mask; 1087 *aux_val |= val; 1088 *aux_mask &= ~mask; 1089 } 1090 1091 static const struct l2c_init_data of_l2c210_data __initconst = { 1092 .type = "L2C-210", 1093 .way_size_0 = SZ_8K, 1094 .num_lock = 1, 1095 .of_parse = l2x0_of_parse, 1096 .enable = l2c_enable, 1097 .save = l2c_save, 1098 .configure = l2c_configure, 1099 .unlock = l2c_unlock, 1100 .outer_cache = { 1101 .inv_range = l2c210_inv_range, 1102 .clean_range = l2c210_clean_range, 1103 .flush_range = l2c210_flush_range, 1104 .flush_all = l2c210_flush_all, 1105 .disable = l2c_disable, 1106 .sync = l2c210_sync, 1107 .resume = l2c_resume, 1108 }, 1109 }; 1110 1111 static const struct l2c_init_data of_l2c220_data __initconst = { 1112 .type = "L2C-220", 1113 .way_size_0 = SZ_8K, 1114 .num_lock = 1, 1115 .of_parse = l2x0_of_parse, 1116 .enable = l2c220_enable, 1117 .save = l2c_save, 1118 .configure = l2c_configure, 1119 .unlock = l2c220_unlock, 1120 .outer_cache = { 1121 .inv_range = l2c220_inv_range, 1122 .clean_range = l2c220_clean_range, 1123 .flush_range = l2c220_flush_range, 1124 .flush_all = l2c220_flush_all, 1125 .disable = l2c_disable, 1126 .sync = l2c220_sync, 1127 .resume = l2c_resume, 1128 }, 1129 }; 1130 1131 static void __init l2c310_of_parse(const struct device_node *np, 1132 u32 *aux_val, u32 *aux_mask) 1133 { 1134 u32 data[3] = { 0, 0, 0 }; 1135 u32 tag[3] = { 0, 0, 0 }; 1136 u32 filter[2] = { 0, 0 }; 1137 u32 assoc; 1138 u32 prefetch; 1139 u32 power; 1140 u32 val; 1141 int ret; 1142 1143 of_property_read_u32_array(np, "arm,tag-latency", tag, ARRAY_SIZE(tag)); 1144 if (tag[0] && tag[1] && tag[2]) 1145 l2x0_saved_regs.tag_latency = 1146 L310_LATENCY_CTRL_RD(tag[0] - 1) | 1147 L310_LATENCY_CTRL_WR(tag[1] - 1) | 1148 L310_LATENCY_CTRL_SETUP(tag[2] - 1); 1149 1150 of_property_read_u32_array(np, "arm,data-latency", 1151 data, ARRAY_SIZE(data)); 1152 if (data[0] && data[1] && data[2]) 1153 l2x0_saved_regs.data_latency = 1154 L310_LATENCY_CTRL_RD(data[0] - 1) | 1155 L310_LATENCY_CTRL_WR(data[1] - 1) | 1156 L310_LATENCY_CTRL_SETUP(data[2] - 1); 1157 1158 of_property_read_u32_array(np, "arm,filter-ranges", 1159 filter, ARRAY_SIZE(filter)); 1160 if (filter[1]) { 1161 l2x0_saved_regs.filter_end = 1162 ALIGN(filter[0] + filter[1], SZ_1M); 1163 l2x0_saved_regs.filter_start = (filter[0] & ~(SZ_1M - 1)) 1164 | L310_ADDR_FILTER_EN; 1165 } 1166 1167 ret = l2x0_cache_size_of_parse(np, aux_val, aux_mask, &assoc, SZ_512K); 1168 if (!ret) { 1169 switch (assoc) { 1170 case 16: 1171 *aux_val &= ~L2X0_AUX_CTRL_ASSOC_MASK; 1172 *aux_val |= L310_AUX_CTRL_ASSOCIATIVITY_16; 1173 *aux_mask &= ~L2X0_AUX_CTRL_ASSOC_MASK; 1174 break; 1175 case 8: 1176 *aux_val &= ~L2X0_AUX_CTRL_ASSOC_MASK; 1177 *aux_mask &= ~L2X0_AUX_CTRL_ASSOC_MASK; 1178 break; 1179 default: 1180 pr_err("L2C-310 OF cache associativity %d invalid, only 8 or 16 permitted\n", 1181 assoc); 1182 break; 1183 } 1184 } 1185 1186 if (of_property_read_bool(np, "arm,shared-override")) { 1187 *aux_val |= L2C_AUX_CTRL_SHARED_OVERRIDE; 1188 *aux_mask &= ~L2C_AUX_CTRL_SHARED_OVERRIDE; 1189 } 1190 1191 if (of_property_read_bool(np, "arm,parity-enable")) { 1192 *aux_val |= L2C_AUX_CTRL_PARITY_ENABLE; 1193 *aux_mask &= ~L2C_AUX_CTRL_PARITY_ENABLE; 1194 } else if (of_property_read_bool(np, "arm,parity-disable")) { 1195 *aux_val &= ~L2C_AUX_CTRL_PARITY_ENABLE; 1196 *aux_mask &= ~L2C_AUX_CTRL_PARITY_ENABLE; 1197 } 1198 1199 prefetch = l2x0_saved_regs.prefetch_ctrl; 1200 1201 ret = of_property_read_u32(np, "arm,double-linefill", &val); 1202 if (ret == 0) { 1203 if (val) 1204 prefetch |= L310_PREFETCH_CTRL_DBL_LINEFILL; 1205 else 1206 prefetch &= ~L310_PREFETCH_CTRL_DBL_LINEFILL; 1207 } else if (ret != -EINVAL) { 1208 pr_err("L2C-310 OF arm,double-linefill property value is missing\n"); 1209 } 1210 1211 ret = of_property_read_u32(np, "arm,double-linefill-incr", &val); 1212 if (ret == 0) { 1213 if (val) 1214 prefetch |= L310_PREFETCH_CTRL_DBL_LINEFILL_INCR; 1215 else 1216 prefetch &= ~L310_PREFETCH_CTRL_DBL_LINEFILL_INCR; 1217 } else if (ret != -EINVAL) { 1218 pr_err("L2C-310 OF arm,double-linefill-incr property value is missing\n"); 1219 } 1220 1221 ret = of_property_read_u32(np, "arm,double-linefill-wrap", &val); 1222 if (ret == 0) { 1223 if (!val) 1224 prefetch |= L310_PREFETCH_CTRL_DBL_LINEFILL_WRAP; 1225 else 1226 prefetch &= ~L310_PREFETCH_CTRL_DBL_LINEFILL_WRAP; 1227 } else if (ret != -EINVAL) { 1228 pr_err("L2C-310 OF arm,double-linefill-wrap property value is missing\n"); 1229 } 1230 1231 ret = of_property_read_u32(np, "arm,prefetch-drop", &val); 1232 if (ret == 0) { 1233 if (val) 1234 prefetch |= L310_PREFETCH_CTRL_PREFETCH_DROP; 1235 else 1236 prefetch &= ~L310_PREFETCH_CTRL_PREFETCH_DROP; 1237 } else if (ret != -EINVAL) { 1238 pr_err("L2C-310 OF arm,prefetch-drop property value is missing\n"); 1239 } 1240 1241 ret = of_property_read_u32(np, "arm,prefetch-offset", &val); 1242 if (ret == 0) { 1243 prefetch &= ~L310_PREFETCH_CTRL_OFFSET_MASK; 1244 prefetch |= val & L310_PREFETCH_CTRL_OFFSET_MASK; 1245 } else if (ret != -EINVAL) { 1246 pr_err("L2C-310 OF arm,prefetch-offset property value is missing\n"); 1247 } 1248 1249 ret = of_property_read_u32(np, "prefetch-data", &val); 1250 if (ret == 0) { 1251 if (val) 1252 prefetch |= L310_PREFETCH_CTRL_DATA_PREFETCH; 1253 else 1254 prefetch &= ~L310_PREFETCH_CTRL_DATA_PREFETCH; 1255 } else if (ret != -EINVAL) { 1256 pr_err("L2C-310 OF prefetch-data property value is missing\n"); 1257 } 1258 1259 ret = of_property_read_u32(np, "prefetch-instr", &val); 1260 if (ret == 0) { 1261 if (val) 1262 prefetch |= L310_PREFETCH_CTRL_INSTR_PREFETCH; 1263 else 1264 prefetch &= ~L310_PREFETCH_CTRL_INSTR_PREFETCH; 1265 } else if (ret != -EINVAL) { 1266 pr_err("L2C-310 OF prefetch-instr property value is missing\n"); 1267 } 1268 1269 l2x0_saved_regs.prefetch_ctrl = prefetch; 1270 1271 power = l2x0_saved_regs.pwr_ctrl | 1272 L310_DYNAMIC_CLK_GATING_EN | L310_STNDBY_MODE_EN; 1273 1274 ret = of_property_read_u32(np, "arm,dynamic-clock-gating", &val); 1275 if (!ret) { 1276 if (!val) 1277 power &= ~L310_DYNAMIC_CLK_GATING_EN; 1278 } else if (ret != -EINVAL) { 1279 pr_err("L2C-310 OF dynamic-clock-gating property value is missing or invalid\n"); 1280 } 1281 ret = of_property_read_u32(np, "arm,standby-mode", &val); 1282 if (!ret) { 1283 if (!val) 1284 power &= ~L310_STNDBY_MODE_EN; 1285 } else if (ret != -EINVAL) { 1286 pr_err("L2C-310 OF standby-mode property value is missing or invalid\n"); 1287 } 1288 1289 l2x0_saved_regs.pwr_ctrl = power; 1290 } 1291 1292 static const struct l2c_init_data of_l2c310_data __initconst = { 1293 .type = "L2C-310", 1294 .way_size_0 = SZ_8K, 1295 .num_lock = 8, 1296 .of_parse = l2c310_of_parse, 1297 .enable = l2c310_enable, 1298 .fixup = l2c310_fixup, 1299 .save = l2c310_save, 1300 .configure = l2c310_configure, 1301 .unlock = l2c310_unlock, 1302 .outer_cache = { 1303 .inv_range = l2c210_inv_range, 1304 .clean_range = l2c210_clean_range, 1305 .flush_range = l2c210_flush_range, 1306 .flush_all = l2c210_flush_all, 1307 .disable = l2c310_disable, 1308 .sync = l2c210_sync, 1309 .resume = l2c310_resume, 1310 }, 1311 }; 1312 1313 /* 1314 * This is a variant of the of_l2c310_data with .sync set to 1315 * NULL. Outer sync operations are not needed when the system is I/O 1316 * coherent, and potentially harmful in certain situations (PCIe/PL310 1317 * deadlock on Armada 375/38x due to hardware I/O coherency). The 1318 * other operations are kept because they are infrequent (therefore do 1319 * not cause the deadlock in practice) and needed for secondary CPU 1320 * boot and other power management activities. 1321 */ 1322 static const struct l2c_init_data of_l2c310_coherent_data __initconst = { 1323 .type = "L2C-310 Coherent", 1324 .way_size_0 = SZ_8K, 1325 .num_lock = 8, 1326 .of_parse = l2c310_of_parse, 1327 .enable = l2c310_enable, 1328 .fixup = l2c310_fixup, 1329 .save = l2c310_save, 1330 .configure = l2c310_configure, 1331 .unlock = l2c310_unlock, 1332 .outer_cache = { 1333 .inv_range = l2c210_inv_range, 1334 .clean_range = l2c210_clean_range, 1335 .flush_range = l2c210_flush_range, 1336 .flush_all = l2c210_flush_all, 1337 .disable = l2c310_disable, 1338 .resume = l2c310_resume, 1339 }, 1340 }; 1341 1342 /* 1343 * Note that the end addresses passed to Linux primitives are 1344 * noninclusive, while the hardware cache range operations use 1345 * inclusive start and end addresses. 1346 */ 1347 static unsigned long aurora_range_end(unsigned long start, unsigned long end) 1348 { 1349 /* 1350 * Limit the number of cache lines processed at once, 1351 * since cache range operations stall the CPU pipeline 1352 * until completion. 1353 */ 1354 if (end > start + MAX_RANGE_SIZE) 1355 end = start + MAX_RANGE_SIZE; 1356 1357 /* 1358 * Cache range operations can't straddle a page boundary. 1359 */ 1360 if (end > PAGE_ALIGN(start+1)) 1361 end = PAGE_ALIGN(start+1); 1362 1363 return end; 1364 } 1365 1366 static void aurora_pa_range(unsigned long start, unsigned long end, 1367 unsigned long offset) 1368 { 1369 void __iomem *base = l2x0_base; 1370 unsigned long range_end; 1371 unsigned long flags; 1372 1373 /* 1374 * round start and end adresses up to cache line size 1375 */ 1376 start &= ~(CACHE_LINE_SIZE - 1); 1377 end = ALIGN(end, CACHE_LINE_SIZE); 1378 1379 /* 1380 * perform operation on all full cache lines between 'start' and 'end' 1381 */ 1382 while (start < end) { 1383 range_end = aurora_range_end(start, end); 1384 1385 raw_spin_lock_irqsave(&l2x0_lock, flags); 1386 writel_relaxed(start, base + AURORA_RANGE_BASE_ADDR_REG); 1387 writel_relaxed(range_end - CACHE_LINE_SIZE, base + offset); 1388 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 1389 1390 writel_relaxed(0, base + AURORA_SYNC_REG); 1391 start = range_end; 1392 } 1393 } 1394 static void aurora_inv_range(unsigned long start, unsigned long end) 1395 { 1396 aurora_pa_range(start, end, AURORA_INVAL_RANGE_REG); 1397 } 1398 1399 static void aurora_clean_range(unsigned long start, unsigned long end) 1400 { 1401 /* 1402 * If L2 is forced to WT, the L2 will always be clean and we 1403 * don't need to do anything here. 1404 */ 1405 if (!l2_wt_override) 1406 aurora_pa_range(start, end, AURORA_CLEAN_RANGE_REG); 1407 } 1408 1409 static void aurora_flush_range(unsigned long start, unsigned long end) 1410 { 1411 if (l2_wt_override) 1412 aurora_pa_range(start, end, AURORA_INVAL_RANGE_REG); 1413 else 1414 aurora_pa_range(start, end, AURORA_FLUSH_RANGE_REG); 1415 } 1416 1417 static void aurora_flush_all(void) 1418 { 1419 void __iomem *base = l2x0_base; 1420 unsigned long flags; 1421 1422 /* clean all ways */ 1423 raw_spin_lock_irqsave(&l2x0_lock, flags); 1424 __l2c_op_way(base + L2X0_CLEAN_INV_WAY); 1425 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 1426 1427 writel_relaxed(0, base + AURORA_SYNC_REG); 1428 } 1429 1430 static void aurora_cache_sync(void) 1431 { 1432 writel_relaxed(0, l2x0_base + AURORA_SYNC_REG); 1433 } 1434 1435 static void aurora_disable(void) 1436 { 1437 void __iomem *base = l2x0_base; 1438 unsigned long flags; 1439 1440 raw_spin_lock_irqsave(&l2x0_lock, flags); 1441 __l2c_op_way(base + L2X0_CLEAN_INV_WAY); 1442 writel_relaxed(0, base + AURORA_SYNC_REG); 1443 l2c_write_sec(0, base, L2X0_CTRL); 1444 dsb(st); 1445 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 1446 } 1447 1448 static void aurora_save(void __iomem *base) 1449 { 1450 l2x0_saved_regs.ctrl = readl_relaxed(base + L2X0_CTRL); 1451 l2x0_saved_regs.aux_ctrl = readl_relaxed(base + L2X0_AUX_CTRL); 1452 } 1453 1454 /* 1455 * For Aurora cache in no outer mode, enable via the CP15 coprocessor 1456 * broadcasting of cache commands to L2. 1457 */ 1458 static void __init aurora_enable_no_outer(void __iomem *base, 1459 unsigned num_lock) 1460 { 1461 u32 u; 1462 1463 asm volatile("mrc p15, 1, %0, c15, c2, 0" : "=r" (u)); 1464 u |= AURORA_CTRL_FW; /* Set the FW bit */ 1465 asm volatile("mcr p15, 1, %0, c15, c2, 0" : : "r" (u)); 1466 1467 isb(); 1468 1469 l2c_enable(base, num_lock); 1470 } 1471 1472 static void __init aurora_fixup(void __iomem *base, u32 cache_id, 1473 struct outer_cache_fns *fns) 1474 { 1475 sync_reg_offset = AURORA_SYNC_REG; 1476 } 1477 1478 static void __init aurora_of_parse(const struct device_node *np, 1479 u32 *aux_val, u32 *aux_mask) 1480 { 1481 u32 val = AURORA_ACR_REPLACEMENT_TYPE_SEMIPLRU; 1482 u32 mask = AURORA_ACR_REPLACEMENT_MASK; 1483 1484 of_property_read_u32(np, "cache-id-part", 1485 &cache_id_part_number_from_dt); 1486 1487 /* Determine and save the write policy */ 1488 l2_wt_override = of_property_read_bool(np, "wt-override"); 1489 1490 if (l2_wt_override) { 1491 val |= AURORA_ACR_FORCE_WRITE_THRO_POLICY; 1492 mask |= AURORA_ACR_FORCE_WRITE_POLICY_MASK; 1493 } 1494 1495 *aux_val &= ~mask; 1496 *aux_val |= val; 1497 *aux_mask &= ~mask; 1498 } 1499 1500 static const struct l2c_init_data of_aurora_with_outer_data __initconst = { 1501 .type = "Aurora", 1502 .way_size_0 = SZ_4K, 1503 .num_lock = 4, 1504 .of_parse = aurora_of_parse, 1505 .enable = l2c_enable, 1506 .fixup = aurora_fixup, 1507 .save = aurora_save, 1508 .configure = l2c_configure, 1509 .unlock = l2c_unlock, 1510 .outer_cache = { 1511 .inv_range = aurora_inv_range, 1512 .clean_range = aurora_clean_range, 1513 .flush_range = aurora_flush_range, 1514 .flush_all = aurora_flush_all, 1515 .disable = aurora_disable, 1516 .sync = aurora_cache_sync, 1517 .resume = l2c_resume, 1518 }, 1519 }; 1520 1521 static const struct l2c_init_data of_aurora_no_outer_data __initconst = { 1522 .type = "Aurora", 1523 .way_size_0 = SZ_4K, 1524 .num_lock = 4, 1525 .of_parse = aurora_of_parse, 1526 .enable = aurora_enable_no_outer, 1527 .fixup = aurora_fixup, 1528 .save = aurora_save, 1529 .configure = l2c_configure, 1530 .unlock = l2c_unlock, 1531 .outer_cache = { 1532 .resume = l2c_resume, 1533 }, 1534 }; 1535 1536 /* 1537 * For certain Broadcom SoCs, depending on the address range, different offsets 1538 * need to be added to the address before passing it to L2 for 1539 * invalidation/clean/flush 1540 * 1541 * Section Address Range Offset EMI 1542 * 1 0x00000000 - 0x3FFFFFFF 0x80000000 VC 1543 * 2 0x40000000 - 0xBFFFFFFF 0x40000000 SYS 1544 * 3 0xC0000000 - 0xFFFFFFFF 0x80000000 VC 1545 * 1546 * When the start and end addresses have crossed two different sections, we 1547 * need to break the L2 operation into two, each within its own section. 1548 * For example, if we need to invalidate addresses starts at 0xBFFF0000 and 1549 * ends at 0xC0001000, we need do invalidate 1) 0xBFFF0000 - 0xBFFFFFFF and 2) 1550 * 0xC0000000 - 0xC0001000 1551 * 1552 * Note 1: 1553 * By breaking a single L2 operation into two, we may potentially suffer some 1554 * performance hit, but keep in mind the cross section case is very rare 1555 * 1556 * Note 2: 1557 * We do not need to handle the case when the start address is in 1558 * Section 1 and the end address is in Section 3, since it is not a valid use 1559 * case 1560 * 1561 * Note 3: 1562 * Section 1 in practical terms can no longer be used on rev A2. Because of 1563 * that the code does not need to handle section 1 at all. 1564 * 1565 */ 1566 #define BCM_SYS_EMI_START_ADDR 0x40000000UL 1567 #define BCM_VC_EMI_SEC3_START_ADDR 0xC0000000UL 1568 1569 #define BCM_SYS_EMI_OFFSET 0x40000000UL 1570 #define BCM_VC_EMI_OFFSET 0x80000000UL 1571 1572 static inline int bcm_addr_is_sys_emi(unsigned long addr) 1573 { 1574 return (addr >= BCM_SYS_EMI_START_ADDR) && 1575 (addr < BCM_VC_EMI_SEC3_START_ADDR); 1576 } 1577 1578 static inline unsigned long bcm_l2_phys_addr(unsigned long addr) 1579 { 1580 if (bcm_addr_is_sys_emi(addr)) 1581 return addr + BCM_SYS_EMI_OFFSET; 1582 else 1583 return addr + BCM_VC_EMI_OFFSET; 1584 } 1585 1586 static void bcm_inv_range(unsigned long start, unsigned long end) 1587 { 1588 unsigned long new_start, new_end; 1589 1590 BUG_ON(start < BCM_SYS_EMI_START_ADDR); 1591 1592 if (unlikely(end <= start)) 1593 return; 1594 1595 new_start = bcm_l2_phys_addr(start); 1596 new_end = bcm_l2_phys_addr(end); 1597 1598 /* normal case, no cross section between start and end */ 1599 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) { 1600 l2c210_inv_range(new_start, new_end); 1601 return; 1602 } 1603 1604 /* They cross sections, so it can only be a cross from section 1605 * 2 to section 3 1606 */ 1607 l2c210_inv_range(new_start, 1608 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1)); 1609 l2c210_inv_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR), 1610 new_end); 1611 } 1612 1613 static void bcm_clean_range(unsigned long start, unsigned long end) 1614 { 1615 unsigned long new_start, new_end; 1616 1617 BUG_ON(start < BCM_SYS_EMI_START_ADDR); 1618 1619 if (unlikely(end <= start)) 1620 return; 1621 1622 new_start = bcm_l2_phys_addr(start); 1623 new_end = bcm_l2_phys_addr(end); 1624 1625 /* normal case, no cross section between start and end */ 1626 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) { 1627 l2c210_clean_range(new_start, new_end); 1628 return; 1629 } 1630 1631 /* They cross sections, so it can only be a cross from section 1632 * 2 to section 3 1633 */ 1634 l2c210_clean_range(new_start, 1635 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1)); 1636 l2c210_clean_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR), 1637 new_end); 1638 } 1639 1640 static void bcm_flush_range(unsigned long start, unsigned long end) 1641 { 1642 unsigned long new_start, new_end; 1643 1644 BUG_ON(start < BCM_SYS_EMI_START_ADDR); 1645 1646 if (unlikely(end <= start)) 1647 return; 1648 1649 if ((end - start) >= l2x0_size) { 1650 outer_cache.flush_all(); 1651 return; 1652 } 1653 1654 new_start = bcm_l2_phys_addr(start); 1655 new_end = bcm_l2_phys_addr(end); 1656 1657 /* normal case, no cross section between start and end */ 1658 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) { 1659 l2c210_flush_range(new_start, new_end); 1660 return; 1661 } 1662 1663 /* They cross sections, so it can only be a cross from section 1664 * 2 to section 3 1665 */ 1666 l2c210_flush_range(new_start, 1667 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1)); 1668 l2c210_flush_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR), 1669 new_end); 1670 } 1671 1672 /* Broadcom L2C-310 start from ARMs R3P2 or later, and require no fixups */ 1673 static const struct l2c_init_data of_bcm_l2x0_data __initconst = { 1674 .type = "BCM-L2C-310", 1675 .way_size_0 = SZ_8K, 1676 .num_lock = 8, 1677 .of_parse = l2c310_of_parse, 1678 .enable = l2c310_enable, 1679 .save = l2c310_save, 1680 .configure = l2c310_configure, 1681 .unlock = l2c310_unlock, 1682 .outer_cache = { 1683 .inv_range = bcm_inv_range, 1684 .clean_range = bcm_clean_range, 1685 .flush_range = bcm_flush_range, 1686 .flush_all = l2c210_flush_all, 1687 .disable = l2c310_disable, 1688 .sync = l2c210_sync, 1689 .resume = l2c310_resume, 1690 }, 1691 }; 1692 1693 static void __init tauros3_save(void __iomem *base) 1694 { 1695 l2c_save(base); 1696 1697 l2x0_saved_regs.aux2_ctrl = 1698 readl_relaxed(base + TAUROS3_AUX2_CTRL); 1699 l2x0_saved_regs.prefetch_ctrl = 1700 readl_relaxed(base + L310_PREFETCH_CTRL); 1701 } 1702 1703 static void tauros3_configure(void __iomem *base) 1704 { 1705 l2c_configure(base); 1706 writel_relaxed(l2x0_saved_regs.aux2_ctrl, 1707 base + TAUROS3_AUX2_CTRL); 1708 writel_relaxed(l2x0_saved_regs.prefetch_ctrl, 1709 base + L310_PREFETCH_CTRL); 1710 } 1711 1712 static const struct l2c_init_data of_tauros3_data __initconst = { 1713 .type = "Tauros3", 1714 .way_size_0 = SZ_8K, 1715 .num_lock = 8, 1716 .enable = l2c_enable, 1717 .save = tauros3_save, 1718 .configure = tauros3_configure, 1719 .unlock = l2c_unlock, 1720 /* Tauros3 broadcasts L1 cache operations to L2 */ 1721 .outer_cache = { 1722 .resume = l2c_resume, 1723 }, 1724 }; 1725 1726 #define L2C_ID(name, fns) { .compatible = name, .data = (void *)&fns } 1727 static const struct of_device_id l2x0_ids[] __initconst = { 1728 L2C_ID("arm,l210-cache", of_l2c210_data), 1729 L2C_ID("arm,l220-cache", of_l2c220_data), 1730 L2C_ID("arm,pl310-cache", of_l2c310_data), 1731 L2C_ID("brcm,bcm11351-a2-pl310-cache", of_bcm_l2x0_data), 1732 L2C_ID("marvell,aurora-outer-cache", of_aurora_with_outer_data), 1733 L2C_ID("marvell,aurora-system-cache", of_aurora_no_outer_data), 1734 L2C_ID("marvell,tauros3-cache", of_tauros3_data), 1735 /* Deprecated IDs */ 1736 L2C_ID("bcm,bcm11351-a2-pl310-cache", of_bcm_l2x0_data), 1737 {} 1738 }; 1739 1740 int __init l2x0_of_init(u32 aux_val, u32 aux_mask) 1741 { 1742 const struct l2c_init_data *data; 1743 struct device_node *np; 1744 struct resource res; 1745 u32 cache_id, old_aux; 1746 u32 cache_level = 2; 1747 bool nosync = false; 1748 1749 np = of_find_matching_node(NULL, l2x0_ids); 1750 if (!np) 1751 return -ENODEV; 1752 1753 if (of_address_to_resource(np, 0, &res)) 1754 return -ENODEV; 1755 1756 l2x0_base = ioremap(res.start, resource_size(&res)); 1757 if (!l2x0_base) 1758 return -ENOMEM; 1759 1760 l2x0_saved_regs.phy_base = res.start; 1761 1762 data = of_match_node(l2x0_ids, np)->data; 1763 1764 if (of_device_is_compatible(np, "arm,pl310-cache") && 1765 of_property_read_bool(np, "arm,io-coherent")) 1766 data = &of_l2c310_coherent_data; 1767 1768 old_aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); 1769 if (old_aux != ((old_aux & aux_mask) | aux_val)) { 1770 pr_warn("L2C: platform modifies aux control register: 0x%08x -> 0x%08x\n", 1771 old_aux, (old_aux & aux_mask) | aux_val); 1772 } else if (aux_mask != ~0U && aux_val != 0) { 1773 pr_alert("L2C: platform provided aux values match the hardware, so have no effect. Please remove them.\n"); 1774 } 1775 1776 /* All L2 caches are unified, so this property should be specified */ 1777 if (!of_property_read_bool(np, "cache-unified")) 1778 pr_err("L2C: device tree omits to specify unified cache\n"); 1779 1780 if (of_property_read_u32(np, "cache-level", &cache_level)) 1781 pr_err("L2C: device tree omits to specify cache-level\n"); 1782 1783 if (cache_level != 2) 1784 pr_err("L2C: device tree specifies invalid cache level\n"); 1785 1786 nosync = of_property_read_bool(np, "arm,outer-sync-disable"); 1787 1788 /* Read back current (default) hardware configuration */ 1789 if (data->save) 1790 data->save(l2x0_base); 1791 1792 /* L2 configuration can only be changed if the cache is disabled */ 1793 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) 1794 if (data->of_parse) 1795 data->of_parse(np, &aux_val, &aux_mask); 1796 1797 if (cache_id_part_number_from_dt) 1798 cache_id = cache_id_part_number_from_dt; 1799 else 1800 cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID); 1801 1802 return __l2c_init(data, aux_val, aux_mask, cache_id, nosync); 1803 } 1804 #endif 1805