1 /* 2 * arch/arm/mm/cache-l2x0.c - L210/L220 cache controller support 3 * 4 * Copyright (C) 2007 ARM Limited 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software 17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18 */ 19 #include <linux/cpu.h> 20 #include <linux/err.h> 21 #include <linux/init.h> 22 #include <linux/smp.h> 23 #include <linux/spinlock.h> 24 #include <linux/log2.h> 25 #include <linux/io.h> 26 #include <linux/of.h> 27 #include <linux/of_address.h> 28 29 #include <asm/cacheflush.h> 30 #include <asm/cp15.h> 31 #include <asm/cputype.h> 32 #include <asm/hardware/cache-l2x0.h> 33 #include "cache-tauros3.h" 34 #include "cache-aurora-l2.h" 35 36 struct l2c_init_data { 37 const char *type; 38 unsigned way_size_0; 39 unsigned num_lock; 40 void (*of_parse)(const struct device_node *, u32 *, u32 *); 41 void (*enable)(void __iomem *, u32, unsigned); 42 void (*fixup)(void __iomem *, u32, struct outer_cache_fns *); 43 void (*save)(void __iomem *); 44 void (*configure)(void __iomem *); 45 struct outer_cache_fns outer_cache; 46 }; 47 48 #define CACHE_LINE_SIZE 32 49 50 static void __iomem *l2x0_base; 51 static const struct l2c_init_data *l2x0_data; 52 static DEFINE_RAW_SPINLOCK(l2x0_lock); 53 static u32 l2x0_way_mask; /* Bitmask of active ways */ 54 static u32 l2x0_size; 55 static unsigned long sync_reg_offset = L2X0_CACHE_SYNC; 56 57 struct l2x0_regs l2x0_saved_regs; 58 59 /* 60 * Common code for all cache controllers. 61 */ 62 static inline void l2c_wait_mask(void __iomem *reg, unsigned long mask) 63 { 64 /* wait for cache operation by line or way to complete */ 65 while (readl_relaxed(reg) & mask) 66 cpu_relax(); 67 } 68 69 /* 70 * By default, we write directly to secure registers. Platforms must 71 * override this if they are running non-secure. 72 */ 73 static void l2c_write_sec(unsigned long val, void __iomem *base, unsigned reg) 74 { 75 if (val == readl_relaxed(base + reg)) 76 return; 77 if (outer_cache.write_sec) 78 outer_cache.write_sec(val, reg); 79 else 80 writel_relaxed(val, base + reg); 81 } 82 83 /* 84 * This should only be called when we have a requirement that the 85 * register be written due to a work-around, as platforms running 86 * in non-secure mode may not be able to access this register. 87 */ 88 static inline void l2c_set_debug(void __iomem *base, unsigned long val) 89 { 90 l2c_write_sec(val, base, L2X0_DEBUG_CTRL); 91 } 92 93 static void __l2c_op_way(void __iomem *reg) 94 { 95 writel_relaxed(l2x0_way_mask, reg); 96 l2c_wait_mask(reg, l2x0_way_mask); 97 } 98 99 static inline void l2c_unlock(void __iomem *base, unsigned num) 100 { 101 unsigned i; 102 103 for (i = 0; i < num; i++) { 104 writel_relaxed(0, base + L2X0_LOCKDOWN_WAY_D_BASE + 105 i * L2X0_LOCKDOWN_STRIDE); 106 writel_relaxed(0, base + L2X0_LOCKDOWN_WAY_I_BASE + 107 i * L2X0_LOCKDOWN_STRIDE); 108 } 109 } 110 111 static void l2c_configure(void __iomem *base) 112 { 113 if (outer_cache.configure) { 114 outer_cache.configure(&l2x0_saved_regs); 115 return; 116 } 117 118 if (l2x0_data->configure) 119 l2x0_data->configure(base); 120 121 l2c_write_sec(l2x0_saved_regs.aux_ctrl, base, L2X0_AUX_CTRL); 122 } 123 124 /* 125 * Enable the L2 cache controller. This function must only be 126 * called when the cache controller is known to be disabled. 127 */ 128 static void l2c_enable(void __iomem *base, u32 aux, unsigned num_lock) 129 { 130 unsigned long flags; 131 132 /* Do not touch the controller if already enabled. */ 133 if (readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN) 134 return; 135 136 l2x0_saved_regs.aux_ctrl = aux; 137 l2c_configure(base); 138 139 l2c_unlock(base, num_lock); 140 141 local_irq_save(flags); 142 __l2c_op_way(base + L2X0_INV_WAY); 143 writel_relaxed(0, base + sync_reg_offset); 144 l2c_wait_mask(base + sync_reg_offset, 1); 145 local_irq_restore(flags); 146 147 l2c_write_sec(L2X0_CTRL_EN, base, L2X0_CTRL); 148 } 149 150 static void l2c_disable(void) 151 { 152 void __iomem *base = l2x0_base; 153 154 outer_cache.flush_all(); 155 l2c_write_sec(0, base, L2X0_CTRL); 156 dsb(st); 157 } 158 159 #ifdef CONFIG_CACHE_PL310 160 static inline void cache_wait(void __iomem *reg, unsigned long mask) 161 { 162 /* cache operations by line are atomic on PL310 */ 163 } 164 #else 165 #define cache_wait l2c_wait_mask 166 #endif 167 168 static inline void cache_sync(void) 169 { 170 void __iomem *base = l2x0_base; 171 172 writel_relaxed(0, base + sync_reg_offset); 173 cache_wait(base + L2X0_CACHE_SYNC, 1); 174 } 175 176 #if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915) 177 static inline void debug_writel(unsigned long val) 178 { 179 l2c_set_debug(l2x0_base, val); 180 } 181 #else 182 /* Optimised out for non-errata case */ 183 static inline void debug_writel(unsigned long val) 184 { 185 } 186 #endif 187 188 static void l2x0_cache_sync(void) 189 { 190 unsigned long flags; 191 192 raw_spin_lock_irqsave(&l2x0_lock, flags); 193 cache_sync(); 194 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 195 } 196 197 static void __l2x0_flush_all(void) 198 { 199 debug_writel(0x03); 200 __l2c_op_way(l2x0_base + L2X0_CLEAN_INV_WAY); 201 cache_sync(); 202 debug_writel(0x00); 203 } 204 205 static void l2x0_flush_all(void) 206 { 207 unsigned long flags; 208 209 /* clean all ways */ 210 raw_spin_lock_irqsave(&l2x0_lock, flags); 211 __l2x0_flush_all(); 212 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 213 } 214 215 static void l2x0_disable(void) 216 { 217 unsigned long flags; 218 219 raw_spin_lock_irqsave(&l2x0_lock, flags); 220 __l2x0_flush_all(); 221 l2c_write_sec(0, l2x0_base, L2X0_CTRL); 222 dsb(st); 223 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 224 } 225 226 static void l2c_save(void __iomem *base) 227 { 228 l2x0_saved_regs.aux_ctrl = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); 229 } 230 231 static void l2c_resume(void) 232 { 233 l2c_enable(l2x0_base, l2x0_saved_regs.aux_ctrl, l2x0_data->num_lock); 234 } 235 236 /* 237 * L2C-210 specific code. 238 * 239 * The L2C-2x0 PA, set/way and sync operations are atomic, but we must 240 * ensure that no background operation is running. The way operations 241 * are all background tasks. 242 * 243 * While a background operation is in progress, any new operation is 244 * ignored (unspecified whether this causes an error.) Thankfully, not 245 * used on SMP. 246 * 247 * Never has a different sync register other than L2X0_CACHE_SYNC, but 248 * we use sync_reg_offset here so we can share some of this with L2C-310. 249 */ 250 static void __l2c210_cache_sync(void __iomem *base) 251 { 252 writel_relaxed(0, base + sync_reg_offset); 253 } 254 255 static void __l2c210_op_pa_range(void __iomem *reg, unsigned long start, 256 unsigned long end) 257 { 258 while (start < end) { 259 writel_relaxed(start, reg); 260 start += CACHE_LINE_SIZE; 261 } 262 } 263 264 static void l2c210_inv_range(unsigned long start, unsigned long end) 265 { 266 void __iomem *base = l2x0_base; 267 268 if (start & (CACHE_LINE_SIZE - 1)) { 269 start &= ~(CACHE_LINE_SIZE - 1); 270 writel_relaxed(start, base + L2X0_CLEAN_INV_LINE_PA); 271 start += CACHE_LINE_SIZE; 272 } 273 274 if (end & (CACHE_LINE_SIZE - 1)) { 275 end &= ~(CACHE_LINE_SIZE - 1); 276 writel_relaxed(end, base + L2X0_CLEAN_INV_LINE_PA); 277 } 278 279 __l2c210_op_pa_range(base + L2X0_INV_LINE_PA, start, end); 280 __l2c210_cache_sync(base); 281 } 282 283 static void l2c210_clean_range(unsigned long start, unsigned long end) 284 { 285 void __iomem *base = l2x0_base; 286 287 start &= ~(CACHE_LINE_SIZE - 1); 288 __l2c210_op_pa_range(base + L2X0_CLEAN_LINE_PA, start, end); 289 __l2c210_cache_sync(base); 290 } 291 292 static void l2c210_flush_range(unsigned long start, unsigned long end) 293 { 294 void __iomem *base = l2x0_base; 295 296 start &= ~(CACHE_LINE_SIZE - 1); 297 __l2c210_op_pa_range(base + L2X0_CLEAN_INV_LINE_PA, start, end); 298 __l2c210_cache_sync(base); 299 } 300 301 static void l2c210_flush_all(void) 302 { 303 void __iomem *base = l2x0_base; 304 305 BUG_ON(!irqs_disabled()); 306 307 __l2c_op_way(base + L2X0_CLEAN_INV_WAY); 308 __l2c210_cache_sync(base); 309 } 310 311 static void l2c210_sync(void) 312 { 313 __l2c210_cache_sync(l2x0_base); 314 } 315 316 static const struct l2c_init_data l2c210_data __initconst = { 317 .type = "L2C-210", 318 .way_size_0 = SZ_8K, 319 .num_lock = 1, 320 .enable = l2c_enable, 321 .save = l2c_save, 322 .outer_cache = { 323 .inv_range = l2c210_inv_range, 324 .clean_range = l2c210_clean_range, 325 .flush_range = l2c210_flush_range, 326 .flush_all = l2c210_flush_all, 327 .disable = l2c_disable, 328 .sync = l2c210_sync, 329 .resume = l2c_resume, 330 }, 331 }; 332 333 /* 334 * L2C-220 specific code. 335 * 336 * All operations are background operations: they have to be waited for. 337 * Conflicting requests generate a slave error (which will cause an 338 * imprecise abort.) Never uses sync_reg_offset, so we hard-code the 339 * sync register here. 340 * 341 * However, we can re-use the l2c210_resume call. 342 */ 343 static inline void __l2c220_cache_sync(void __iomem *base) 344 { 345 writel_relaxed(0, base + L2X0_CACHE_SYNC); 346 l2c_wait_mask(base + L2X0_CACHE_SYNC, 1); 347 } 348 349 static void l2c220_op_way(void __iomem *base, unsigned reg) 350 { 351 unsigned long flags; 352 353 raw_spin_lock_irqsave(&l2x0_lock, flags); 354 __l2c_op_way(base + reg); 355 __l2c220_cache_sync(base); 356 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 357 } 358 359 static unsigned long l2c220_op_pa_range(void __iomem *reg, unsigned long start, 360 unsigned long end, unsigned long flags) 361 { 362 raw_spinlock_t *lock = &l2x0_lock; 363 364 while (start < end) { 365 unsigned long blk_end = start + min(end - start, 4096UL); 366 367 while (start < blk_end) { 368 l2c_wait_mask(reg, 1); 369 writel_relaxed(start, reg); 370 start += CACHE_LINE_SIZE; 371 } 372 373 if (blk_end < end) { 374 raw_spin_unlock_irqrestore(lock, flags); 375 raw_spin_lock_irqsave(lock, flags); 376 } 377 } 378 379 return flags; 380 } 381 382 static void l2c220_inv_range(unsigned long start, unsigned long end) 383 { 384 void __iomem *base = l2x0_base; 385 unsigned long flags; 386 387 raw_spin_lock_irqsave(&l2x0_lock, flags); 388 if ((start | end) & (CACHE_LINE_SIZE - 1)) { 389 if (start & (CACHE_LINE_SIZE - 1)) { 390 start &= ~(CACHE_LINE_SIZE - 1); 391 writel_relaxed(start, base + L2X0_CLEAN_INV_LINE_PA); 392 start += CACHE_LINE_SIZE; 393 } 394 395 if (end & (CACHE_LINE_SIZE - 1)) { 396 end &= ~(CACHE_LINE_SIZE - 1); 397 l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1); 398 writel_relaxed(end, base + L2X0_CLEAN_INV_LINE_PA); 399 } 400 } 401 402 flags = l2c220_op_pa_range(base + L2X0_INV_LINE_PA, 403 start, end, flags); 404 l2c_wait_mask(base + L2X0_INV_LINE_PA, 1); 405 __l2c220_cache_sync(base); 406 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 407 } 408 409 static void l2c220_clean_range(unsigned long start, unsigned long end) 410 { 411 void __iomem *base = l2x0_base; 412 unsigned long flags; 413 414 start &= ~(CACHE_LINE_SIZE - 1); 415 if ((end - start) >= l2x0_size) { 416 l2c220_op_way(base, L2X0_CLEAN_WAY); 417 return; 418 } 419 420 raw_spin_lock_irqsave(&l2x0_lock, flags); 421 flags = l2c220_op_pa_range(base + L2X0_CLEAN_LINE_PA, 422 start, end, flags); 423 l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1); 424 __l2c220_cache_sync(base); 425 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 426 } 427 428 static void l2c220_flush_range(unsigned long start, unsigned long end) 429 { 430 void __iomem *base = l2x0_base; 431 unsigned long flags; 432 433 start &= ~(CACHE_LINE_SIZE - 1); 434 if ((end - start) >= l2x0_size) { 435 l2c220_op_way(base, L2X0_CLEAN_INV_WAY); 436 return; 437 } 438 439 raw_spin_lock_irqsave(&l2x0_lock, flags); 440 flags = l2c220_op_pa_range(base + L2X0_CLEAN_INV_LINE_PA, 441 start, end, flags); 442 l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1); 443 __l2c220_cache_sync(base); 444 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 445 } 446 447 static void l2c220_flush_all(void) 448 { 449 l2c220_op_way(l2x0_base, L2X0_CLEAN_INV_WAY); 450 } 451 452 static void l2c220_sync(void) 453 { 454 unsigned long flags; 455 456 raw_spin_lock_irqsave(&l2x0_lock, flags); 457 __l2c220_cache_sync(l2x0_base); 458 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 459 } 460 461 static void l2c220_enable(void __iomem *base, u32 aux, unsigned num_lock) 462 { 463 /* 464 * Always enable non-secure access to the lockdown registers - 465 * we write to them as part of the L2C enable sequence so they 466 * need to be accessible. 467 */ 468 aux |= L220_AUX_CTRL_NS_LOCKDOWN; 469 470 l2c_enable(base, aux, num_lock); 471 } 472 473 static const struct l2c_init_data l2c220_data = { 474 .type = "L2C-220", 475 .way_size_0 = SZ_8K, 476 .num_lock = 1, 477 .enable = l2c220_enable, 478 .save = l2c_save, 479 .outer_cache = { 480 .inv_range = l2c220_inv_range, 481 .clean_range = l2c220_clean_range, 482 .flush_range = l2c220_flush_range, 483 .flush_all = l2c220_flush_all, 484 .disable = l2c_disable, 485 .sync = l2c220_sync, 486 .resume = l2c_resume, 487 }, 488 }; 489 490 /* 491 * L2C-310 specific code. 492 * 493 * Very similar to L2C-210, the PA, set/way and sync operations are atomic, 494 * and the way operations are all background tasks. However, issuing an 495 * operation while a background operation is in progress results in a 496 * SLVERR response. We can reuse: 497 * 498 * __l2c210_cache_sync (using sync_reg_offset) 499 * l2c210_sync 500 * l2c210_inv_range (if 588369 is not applicable) 501 * l2c210_clean_range 502 * l2c210_flush_range (if 588369 is not applicable) 503 * l2c210_flush_all (if 727915 is not applicable) 504 * 505 * Errata: 506 * 588369: PL310 R0P0->R1P0, fixed R2P0. 507 * Affects: all clean+invalidate operations 508 * clean and invalidate skips the invalidate step, so we need to issue 509 * separate operations. We also require the above debug workaround 510 * enclosing this code fragment on affected parts. On unaffected parts, 511 * we must not use this workaround without the debug register writes 512 * to avoid exposing a problem similar to 727915. 513 * 514 * 727915: PL310 R2P0->R3P0, fixed R3P1. 515 * Affects: clean+invalidate by way 516 * clean and invalidate by way runs in the background, and a store can 517 * hit the line between the clean operation and invalidate operation, 518 * resulting in the store being lost. 519 * 520 * 752271: PL310 R3P0->R3P1-50REL0, fixed R3P2. 521 * Affects: 8x64-bit (double fill) line fetches 522 * double fill line fetches can fail to cause dirty data to be evicted 523 * from the cache before the new data overwrites the second line. 524 * 525 * 753970: PL310 R3P0, fixed R3P1. 526 * Affects: sync 527 * prevents merging writes after the sync operation, until another L2C 528 * operation is performed (or a number of other conditions.) 529 * 530 * 769419: PL310 R0P0->R3P1, fixed R3P2. 531 * Affects: store buffer 532 * store buffer is not automatically drained. 533 */ 534 static void l2c310_inv_range_erratum(unsigned long start, unsigned long end) 535 { 536 void __iomem *base = l2x0_base; 537 538 if ((start | end) & (CACHE_LINE_SIZE - 1)) { 539 unsigned long flags; 540 541 /* Erratum 588369 for both clean+invalidate operations */ 542 raw_spin_lock_irqsave(&l2x0_lock, flags); 543 l2c_set_debug(base, 0x03); 544 545 if (start & (CACHE_LINE_SIZE - 1)) { 546 start &= ~(CACHE_LINE_SIZE - 1); 547 writel_relaxed(start, base + L2X0_CLEAN_LINE_PA); 548 writel_relaxed(start, base + L2X0_INV_LINE_PA); 549 start += CACHE_LINE_SIZE; 550 } 551 552 if (end & (CACHE_LINE_SIZE - 1)) { 553 end &= ~(CACHE_LINE_SIZE - 1); 554 writel_relaxed(end, base + L2X0_CLEAN_LINE_PA); 555 writel_relaxed(end, base + L2X0_INV_LINE_PA); 556 } 557 558 l2c_set_debug(base, 0x00); 559 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 560 } 561 562 __l2c210_op_pa_range(base + L2X0_INV_LINE_PA, start, end); 563 __l2c210_cache_sync(base); 564 } 565 566 static void l2c310_flush_range_erratum(unsigned long start, unsigned long end) 567 { 568 raw_spinlock_t *lock = &l2x0_lock; 569 unsigned long flags; 570 void __iomem *base = l2x0_base; 571 572 raw_spin_lock_irqsave(lock, flags); 573 while (start < end) { 574 unsigned long blk_end = start + min(end - start, 4096UL); 575 576 l2c_set_debug(base, 0x03); 577 while (start < blk_end) { 578 writel_relaxed(start, base + L2X0_CLEAN_LINE_PA); 579 writel_relaxed(start, base + L2X0_INV_LINE_PA); 580 start += CACHE_LINE_SIZE; 581 } 582 l2c_set_debug(base, 0x00); 583 584 if (blk_end < end) { 585 raw_spin_unlock_irqrestore(lock, flags); 586 raw_spin_lock_irqsave(lock, flags); 587 } 588 } 589 raw_spin_unlock_irqrestore(lock, flags); 590 __l2c210_cache_sync(base); 591 } 592 593 static void l2c310_flush_all_erratum(void) 594 { 595 void __iomem *base = l2x0_base; 596 unsigned long flags; 597 598 raw_spin_lock_irqsave(&l2x0_lock, flags); 599 l2c_set_debug(base, 0x03); 600 __l2c_op_way(base + L2X0_CLEAN_INV_WAY); 601 l2c_set_debug(base, 0x00); 602 __l2c210_cache_sync(base); 603 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 604 } 605 606 static void __init l2c310_save(void __iomem *base) 607 { 608 unsigned revision; 609 610 l2c_save(base); 611 612 l2x0_saved_regs.tag_latency = readl_relaxed(base + 613 L310_TAG_LATENCY_CTRL); 614 l2x0_saved_regs.data_latency = readl_relaxed(base + 615 L310_DATA_LATENCY_CTRL); 616 l2x0_saved_regs.filter_end = readl_relaxed(base + 617 L310_ADDR_FILTER_END); 618 l2x0_saved_regs.filter_start = readl_relaxed(base + 619 L310_ADDR_FILTER_START); 620 621 revision = readl_relaxed(base + L2X0_CACHE_ID) & 622 L2X0_CACHE_ID_RTL_MASK; 623 624 /* From r2p0, there is Prefetch offset/control register */ 625 if (revision >= L310_CACHE_ID_RTL_R2P0) 626 l2x0_saved_regs.prefetch_ctrl = readl_relaxed(base + 627 L310_PREFETCH_CTRL); 628 629 /* From r3p0, there is Power control register */ 630 if (revision >= L310_CACHE_ID_RTL_R3P0) 631 l2x0_saved_regs.pwr_ctrl = readl_relaxed(base + 632 L310_POWER_CTRL); 633 } 634 635 static void l2c310_configure(void __iomem *base) 636 { 637 unsigned revision; 638 639 /* restore pl310 setup */ 640 l2c_write_sec(l2x0_saved_regs.tag_latency, base, 641 L310_TAG_LATENCY_CTRL); 642 l2c_write_sec(l2x0_saved_regs.data_latency, base, 643 L310_DATA_LATENCY_CTRL); 644 l2c_write_sec(l2x0_saved_regs.filter_end, base, 645 L310_ADDR_FILTER_END); 646 l2c_write_sec(l2x0_saved_regs.filter_start, base, 647 L310_ADDR_FILTER_START); 648 649 revision = readl_relaxed(base + L2X0_CACHE_ID) & 650 L2X0_CACHE_ID_RTL_MASK; 651 652 if (revision >= L310_CACHE_ID_RTL_R2P0) 653 l2c_write_sec(l2x0_saved_regs.prefetch_ctrl, base, 654 L310_PREFETCH_CTRL); 655 if (revision >= L310_CACHE_ID_RTL_R3P0) 656 l2c_write_sec(l2x0_saved_regs.pwr_ctrl, base, 657 L310_POWER_CTRL); 658 } 659 660 static int l2c310_cpu_enable_flz(struct notifier_block *nb, unsigned long act, void *data) 661 { 662 switch (act & ~CPU_TASKS_FROZEN) { 663 case CPU_STARTING: 664 set_auxcr(get_auxcr() | BIT(3) | BIT(2) | BIT(1)); 665 break; 666 case CPU_DYING: 667 set_auxcr(get_auxcr() & ~(BIT(3) | BIT(2) | BIT(1))); 668 break; 669 } 670 return NOTIFY_OK; 671 } 672 673 static void __init l2c310_enable(void __iomem *base, u32 aux, unsigned num_lock) 674 { 675 unsigned rev = readl_relaxed(base + L2X0_CACHE_ID) & L2X0_CACHE_ID_RTL_MASK; 676 bool cortex_a9 = read_cpuid_part() == ARM_CPU_PART_CORTEX_A9; 677 678 if (rev >= L310_CACHE_ID_RTL_R2P0) { 679 if (cortex_a9) { 680 aux |= L310_AUX_CTRL_EARLY_BRESP; 681 pr_info("L2C-310 enabling early BRESP for Cortex-A9\n"); 682 } else if (aux & L310_AUX_CTRL_EARLY_BRESP) { 683 pr_warn("L2C-310 early BRESP only supported with Cortex-A9\n"); 684 aux &= ~L310_AUX_CTRL_EARLY_BRESP; 685 } 686 } 687 688 if (cortex_a9) { 689 u32 aux_cur = readl_relaxed(base + L2X0_AUX_CTRL); 690 u32 acr = get_auxcr(); 691 692 pr_debug("Cortex-A9 ACR=0x%08x\n", acr); 693 694 if (acr & BIT(3) && !(aux_cur & L310_AUX_CTRL_FULL_LINE_ZERO)) 695 pr_err("L2C-310: full line of zeros enabled in Cortex-A9 but not L2C-310 - invalid\n"); 696 697 if (aux & L310_AUX_CTRL_FULL_LINE_ZERO && !(acr & BIT(3))) 698 pr_err("L2C-310: enabling full line of zeros but not enabled in Cortex-A9\n"); 699 700 if (!(aux & L310_AUX_CTRL_FULL_LINE_ZERO) && !outer_cache.write_sec) { 701 aux |= L310_AUX_CTRL_FULL_LINE_ZERO; 702 pr_info("L2C-310 full line of zeros enabled for Cortex-A9\n"); 703 } 704 } else if (aux & (L310_AUX_CTRL_FULL_LINE_ZERO | L310_AUX_CTRL_EARLY_BRESP)) { 705 pr_err("L2C-310: disabling Cortex-A9 specific feature bits\n"); 706 aux &= ~(L310_AUX_CTRL_FULL_LINE_ZERO | L310_AUX_CTRL_EARLY_BRESP); 707 } 708 709 /* r3p0 or later has power control register */ 710 if (rev >= L310_CACHE_ID_RTL_R3P0) 711 l2x0_saved_regs.pwr_ctrl = L310_DYNAMIC_CLK_GATING_EN | 712 L310_STNDBY_MODE_EN; 713 714 /* 715 * Always enable non-secure access to the lockdown registers - 716 * we write to them as part of the L2C enable sequence so they 717 * need to be accessible. 718 */ 719 aux |= L310_AUX_CTRL_NS_LOCKDOWN; 720 721 l2c_enable(base, aux, num_lock); 722 723 /* Read back resulting AUX_CTRL value as it could have been altered. */ 724 aux = readl_relaxed(base + L2X0_AUX_CTRL); 725 726 if (aux & (L310_AUX_CTRL_DATA_PREFETCH | L310_AUX_CTRL_INSTR_PREFETCH)) { 727 u32 prefetch = readl_relaxed(base + L310_PREFETCH_CTRL); 728 729 pr_info("L2C-310 %s%s prefetch enabled, offset %u lines\n", 730 aux & L310_AUX_CTRL_INSTR_PREFETCH ? "I" : "", 731 aux & L310_AUX_CTRL_DATA_PREFETCH ? "D" : "", 732 1 + (prefetch & L310_PREFETCH_CTRL_OFFSET_MASK)); 733 } 734 735 /* r3p0 or later has power control register */ 736 if (rev >= L310_CACHE_ID_RTL_R3P0) { 737 u32 power_ctrl; 738 739 power_ctrl = readl_relaxed(base + L310_POWER_CTRL); 740 pr_info("L2C-310 dynamic clock gating %sabled, standby mode %sabled\n", 741 power_ctrl & L310_DYNAMIC_CLK_GATING_EN ? "en" : "dis", 742 power_ctrl & L310_STNDBY_MODE_EN ? "en" : "dis"); 743 } 744 745 if (aux & L310_AUX_CTRL_FULL_LINE_ZERO) { 746 set_auxcr(get_auxcr() | BIT(3) | BIT(2) | BIT(1)); 747 cpu_notifier(l2c310_cpu_enable_flz, 0); 748 } 749 } 750 751 static void __init l2c310_fixup(void __iomem *base, u32 cache_id, 752 struct outer_cache_fns *fns) 753 { 754 unsigned revision = cache_id & L2X0_CACHE_ID_RTL_MASK; 755 const char *errata[8]; 756 unsigned n = 0; 757 758 if (IS_ENABLED(CONFIG_PL310_ERRATA_588369) && 759 revision < L310_CACHE_ID_RTL_R2P0 && 760 /* For bcm compatibility */ 761 fns->inv_range == l2c210_inv_range) { 762 fns->inv_range = l2c310_inv_range_erratum; 763 fns->flush_range = l2c310_flush_range_erratum; 764 errata[n++] = "588369"; 765 } 766 767 if (IS_ENABLED(CONFIG_PL310_ERRATA_727915) && 768 revision >= L310_CACHE_ID_RTL_R2P0 && 769 revision < L310_CACHE_ID_RTL_R3P1) { 770 fns->flush_all = l2c310_flush_all_erratum; 771 errata[n++] = "727915"; 772 } 773 774 if (revision >= L310_CACHE_ID_RTL_R3P0 && 775 revision < L310_CACHE_ID_RTL_R3P2) { 776 u32 val = l2x0_saved_regs.prefetch_ctrl; 777 /* I don't think bit23 is required here... but iMX6 does so */ 778 if (val & (BIT(30) | BIT(23))) { 779 val &= ~(BIT(30) | BIT(23)); 780 l2x0_saved_regs.prefetch_ctrl = val; 781 errata[n++] = "752271"; 782 } 783 } 784 785 if (IS_ENABLED(CONFIG_PL310_ERRATA_753970) && 786 revision == L310_CACHE_ID_RTL_R3P0) { 787 sync_reg_offset = L2X0_DUMMY_REG; 788 errata[n++] = "753970"; 789 } 790 791 if (IS_ENABLED(CONFIG_PL310_ERRATA_769419)) 792 errata[n++] = "769419"; 793 794 if (n) { 795 unsigned i; 796 797 pr_info("L2C-310 errat%s", n > 1 ? "a" : "um"); 798 for (i = 0; i < n; i++) 799 pr_cont(" %s", errata[i]); 800 pr_cont(" enabled\n"); 801 } 802 } 803 804 static void l2c310_disable(void) 805 { 806 /* 807 * If full-line-of-zeros is enabled, we must first disable it in the 808 * Cortex-A9 auxiliary control register before disabling the L2 cache. 809 */ 810 if (l2x0_saved_regs.aux_ctrl & L310_AUX_CTRL_FULL_LINE_ZERO) 811 set_auxcr(get_auxcr() & ~(BIT(3) | BIT(2) | BIT(1))); 812 813 l2c_disable(); 814 } 815 816 static void l2c310_resume(void) 817 { 818 l2c_resume(); 819 820 /* Re-enable full-line-of-zeros for Cortex-A9 */ 821 if (l2x0_saved_regs.aux_ctrl & L310_AUX_CTRL_FULL_LINE_ZERO) 822 set_auxcr(get_auxcr() | BIT(3) | BIT(2) | BIT(1)); 823 } 824 825 static const struct l2c_init_data l2c310_init_fns __initconst = { 826 .type = "L2C-310", 827 .way_size_0 = SZ_8K, 828 .num_lock = 8, 829 .enable = l2c310_enable, 830 .fixup = l2c310_fixup, 831 .save = l2c310_save, 832 .configure = l2c310_configure, 833 .outer_cache = { 834 .inv_range = l2c210_inv_range, 835 .clean_range = l2c210_clean_range, 836 .flush_range = l2c210_flush_range, 837 .flush_all = l2c210_flush_all, 838 .disable = l2c310_disable, 839 .sync = l2c210_sync, 840 .resume = l2c310_resume, 841 }, 842 }; 843 844 static int __init __l2c_init(const struct l2c_init_data *data, 845 u32 aux_val, u32 aux_mask, u32 cache_id) 846 { 847 struct outer_cache_fns fns; 848 unsigned way_size_bits, ways; 849 u32 aux, old_aux; 850 851 /* 852 * Save the pointer globally so that callbacks which do not receive 853 * context from callers can access the structure. 854 */ 855 l2x0_data = kmemdup(data, sizeof(*data), GFP_KERNEL); 856 if (!l2x0_data) 857 return -ENOMEM; 858 859 /* 860 * Sanity check the aux values. aux_mask is the bits we preserve 861 * from reading the hardware register, and aux_val is the bits we 862 * set. 863 */ 864 if (aux_val & aux_mask) 865 pr_alert("L2C: platform provided aux values permit register corruption.\n"); 866 867 old_aux = aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); 868 aux &= aux_mask; 869 aux |= aux_val; 870 871 if (old_aux != aux) 872 pr_warn("L2C: DT/platform modifies aux control register: 0x%08x -> 0x%08x\n", 873 old_aux, aux); 874 875 /* Determine the number of ways */ 876 switch (cache_id & L2X0_CACHE_ID_PART_MASK) { 877 case L2X0_CACHE_ID_PART_L310: 878 if ((aux_val | ~aux_mask) & (L2C_AUX_CTRL_WAY_SIZE_MASK | L310_AUX_CTRL_ASSOCIATIVITY_16)) 879 pr_warn("L2C: DT/platform tries to modify or specify cache size\n"); 880 if (aux & (1 << 16)) 881 ways = 16; 882 else 883 ways = 8; 884 break; 885 886 case L2X0_CACHE_ID_PART_L210: 887 case L2X0_CACHE_ID_PART_L220: 888 ways = (aux >> 13) & 0xf; 889 break; 890 891 case AURORA_CACHE_ID: 892 ways = (aux >> 13) & 0xf; 893 ways = 2 << ((ways + 1) >> 2); 894 break; 895 896 default: 897 /* Assume unknown chips have 8 ways */ 898 ways = 8; 899 break; 900 } 901 902 l2x0_way_mask = (1 << ways) - 1; 903 904 /* 905 * way_size_0 is the size that a way_size value of zero would be 906 * given the calculation: way_size = way_size_0 << way_size_bits. 907 * So, if way_size_bits=0 is reserved, but way_size_bits=1 is 16k, 908 * then way_size_0 would be 8k. 909 * 910 * L2 cache size = number of ways * way size. 911 */ 912 way_size_bits = (aux & L2C_AUX_CTRL_WAY_SIZE_MASK) >> 913 L2C_AUX_CTRL_WAY_SIZE_SHIFT; 914 l2x0_size = ways * (data->way_size_0 << way_size_bits); 915 916 fns = data->outer_cache; 917 fns.write_sec = outer_cache.write_sec; 918 fns.configure = outer_cache.configure; 919 if (data->fixup) 920 data->fixup(l2x0_base, cache_id, &fns); 921 922 /* 923 * Check if l2x0 controller is already enabled. If we are booting 924 * in non-secure mode accessing the below registers will fault. 925 */ 926 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) 927 data->enable(l2x0_base, aux, data->num_lock); 928 929 outer_cache = fns; 930 931 /* 932 * It is strange to save the register state before initialisation, 933 * but hey, this is what the DT implementations decided to do. 934 */ 935 if (data->save) 936 data->save(l2x0_base); 937 938 /* Re-read it in case some bits are reserved. */ 939 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); 940 941 pr_info("%s cache controller enabled, %d ways, %d kB\n", 942 data->type, ways, l2x0_size >> 10); 943 pr_info("%s: CACHE_ID 0x%08x, AUX_CTRL 0x%08x\n", 944 data->type, cache_id, aux); 945 946 return 0; 947 } 948 949 void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask) 950 { 951 const struct l2c_init_data *data; 952 u32 cache_id; 953 954 l2x0_base = base; 955 956 cache_id = readl_relaxed(base + L2X0_CACHE_ID); 957 958 switch (cache_id & L2X0_CACHE_ID_PART_MASK) { 959 default: 960 case L2X0_CACHE_ID_PART_L210: 961 data = &l2c210_data; 962 break; 963 964 case L2X0_CACHE_ID_PART_L220: 965 data = &l2c220_data; 966 break; 967 968 case L2X0_CACHE_ID_PART_L310: 969 data = &l2c310_init_fns; 970 break; 971 } 972 973 /* Read back current (default) hardware configuration */ 974 if (data->save) 975 data->save(l2x0_base); 976 977 __l2c_init(data, aux_val, aux_mask, cache_id); 978 } 979 980 #ifdef CONFIG_OF 981 static int l2_wt_override; 982 983 /* Aurora don't have the cache ID register available, so we have to 984 * pass it though the device tree */ 985 static u32 cache_id_part_number_from_dt; 986 987 /** 988 * l2x0_cache_size_of_parse() - read cache size parameters from DT 989 * @np: the device tree node for the l2 cache 990 * @aux_val: pointer to machine-supplied auxilary register value, to 991 * be augmented by the call (bits to be set to 1) 992 * @aux_mask: pointer to machine-supplied auxilary register mask, to 993 * be augmented by the call (bits to be set to 0) 994 * @associativity: variable to return the calculated associativity in 995 * @max_way_size: the maximum size in bytes for the cache ways 996 */ 997 static int __init l2x0_cache_size_of_parse(const struct device_node *np, 998 u32 *aux_val, u32 *aux_mask, 999 u32 *associativity, 1000 u32 max_way_size) 1001 { 1002 u32 mask = 0, val = 0; 1003 u32 cache_size = 0, sets = 0; 1004 u32 way_size_bits = 1; 1005 u32 way_size = 0; 1006 u32 block_size = 0; 1007 u32 line_size = 0; 1008 1009 of_property_read_u32(np, "cache-size", &cache_size); 1010 of_property_read_u32(np, "cache-sets", &sets); 1011 of_property_read_u32(np, "cache-block-size", &block_size); 1012 of_property_read_u32(np, "cache-line-size", &line_size); 1013 1014 if (!cache_size || !sets) 1015 return -ENODEV; 1016 1017 /* All these l2 caches have the same line = block size actually */ 1018 if (!line_size) { 1019 if (block_size) { 1020 /* If linesize if not given, it is equal to blocksize */ 1021 line_size = block_size; 1022 } else { 1023 /* Fall back to known size */ 1024 pr_warn("L2C OF: no cache block/line size given: " 1025 "falling back to default size %d bytes\n", 1026 CACHE_LINE_SIZE); 1027 line_size = CACHE_LINE_SIZE; 1028 } 1029 } 1030 1031 if (line_size != CACHE_LINE_SIZE) 1032 pr_warn("L2C OF: DT supplied line size %d bytes does " 1033 "not match hardware line size of %d bytes\n", 1034 line_size, 1035 CACHE_LINE_SIZE); 1036 1037 /* 1038 * Since: 1039 * set size = cache size / sets 1040 * ways = cache size / (sets * line size) 1041 * way size = cache size / (cache size / (sets * line size)) 1042 * way size = sets * line size 1043 * associativity = ways = cache size / way size 1044 */ 1045 way_size = sets * line_size; 1046 *associativity = cache_size / way_size; 1047 1048 if (way_size > max_way_size) { 1049 pr_err("L2C OF: set size %dKB is too large\n", way_size); 1050 return -EINVAL; 1051 } 1052 1053 pr_info("L2C OF: override cache size: %d bytes (%dKB)\n", 1054 cache_size, cache_size >> 10); 1055 pr_info("L2C OF: override line size: %d bytes\n", line_size); 1056 pr_info("L2C OF: override way size: %d bytes (%dKB)\n", 1057 way_size, way_size >> 10); 1058 pr_info("L2C OF: override associativity: %d\n", *associativity); 1059 1060 /* 1061 * Calculates the bits 17:19 to set for way size: 1062 * 512KB -> 6, 256KB -> 5, ... 16KB -> 1 1063 */ 1064 way_size_bits = ilog2(way_size >> 10) - 3; 1065 if (way_size_bits < 1 || way_size_bits > 6) { 1066 pr_err("L2C OF: cache way size illegal: %dKB is not mapped\n", 1067 way_size); 1068 return -EINVAL; 1069 } 1070 1071 mask |= L2C_AUX_CTRL_WAY_SIZE_MASK; 1072 val |= (way_size_bits << L2C_AUX_CTRL_WAY_SIZE_SHIFT); 1073 1074 *aux_val &= ~mask; 1075 *aux_val |= val; 1076 *aux_mask &= ~mask; 1077 1078 return 0; 1079 } 1080 1081 static void __init l2x0_of_parse(const struct device_node *np, 1082 u32 *aux_val, u32 *aux_mask) 1083 { 1084 u32 data[2] = { 0, 0 }; 1085 u32 tag = 0; 1086 u32 dirty = 0; 1087 u32 val = 0, mask = 0; 1088 u32 assoc; 1089 int ret; 1090 1091 of_property_read_u32(np, "arm,tag-latency", &tag); 1092 if (tag) { 1093 mask |= L2X0_AUX_CTRL_TAG_LATENCY_MASK; 1094 val |= (tag - 1) << L2X0_AUX_CTRL_TAG_LATENCY_SHIFT; 1095 } 1096 1097 of_property_read_u32_array(np, "arm,data-latency", 1098 data, ARRAY_SIZE(data)); 1099 if (data[0] && data[1]) { 1100 mask |= L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK | 1101 L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK; 1102 val |= ((data[0] - 1) << L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT) | 1103 ((data[1] - 1) << L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT); 1104 } 1105 1106 of_property_read_u32(np, "arm,dirty-latency", &dirty); 1107 if (dirty) { 1108 mask |= L2X0_AUX_CTRL_DIRTY_LATENCY_MASK; 1109 val |= (dirty - 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT; 1110 } 1111 1112 ret = l2x0_cache_size_of_parse(np, aux_val, aux_mask, &assoc, SZ_256K); 1113 if (ret) 1114 return; 1115 1116 if (assoc > 8) { 1117 pr_err("l2x0 of: cache setting yield too high associativity\n"); 1118 pr_err("l2x0 of: %d calculated, max 8\n", assoc); 1119 } else { 1120 mask |= L2X0_AUX_CTRL_ASSOC_MASK; 1121 val |= (assoc << L2X0_AUX_CTRL_ASSOC_SHIFT); 1122 } 1123 1124 *aux_val &= ~mask; 1125 *aux_val |= val; 1126 *aux_mask &= ~mask; 1127 } 1128 1129 static const struct l2c_init_data of_l2c210_data __initconst = { 1130 .type = "L2C-210", 1131 .way_size_0 = SZ_8K, 1132 .num_lock = 1, 1133 .of_parse = l2x0_of_parse, 1134 .enable = l2c_enable, 1135 .save = l2c_save, 1136 .outer_cache = { 1137 .inv_range = l2c210_inv_range, 1138 .clean_range = l2c210_clean_range, 1139 .flush_range = l2c210_flush_range, 1140 .flush_all = l2c210_flush_all, 1141 .disable = l2c_disable, 1142 .sync = l2c210_sync, 1143 .resume = l2c_resume, 1144 }, 1145 }; 1146 1147 static const struct l2c_init_data of_l2c220_data __initconst = { 1148 .type = "L2C-220", 1149 .way_size_0 = SZ_8K, 1150 .num_lock = 1, 1151 .of_parse = l2x0_of_parse, 1152 .enable = l2c220_enable, 1153 .save = l2c_save, 1154 .outer_cache = { 1155 .inv_range = l2c220_inv_range, 1156 .clean_range = l2c220_clean_range, 1157 .flush_range = l2c220_flush_range, 1158 .flush_all = l2c220_flush_all, 1159 .disable = l2c_disable, 1160 .sync = l2c220_sync, 1161 .resume = l2c_resume, 1162 }, 1163 }; 1164 1165 static void __init l2c310_of_parse(const struct device_node *np, 1166 u32 *aux_val, u32 *aux_mask) 1167 { 1168 u32 data[3] = { 0, 0, 0 }; 1169 u32 tag[3] = { 0, 0, 0 }; 1170 u32 filter[2] = { 0, 0 }; 1171 u32 assoc; 1172 u32 prefetch; 1173 u32 val; 1174 int ret; 1175 1176 of_property_read_u32_array(np, "arm,tag-latency", tag, ARRAY_SIZE(tag)); 1177 if (tag[0] && tag[1] && tag[2]) 1178 l2x0_saved_regs.tag_latency = 1179 L310_LATENCY_CTRL_RD(tag[0] - 1) | 1180 L310_LATENCY_CTRL_WR(tag[1] - 1) | 1181 L310_LATENCY_CTRL_SETUP(tag[2] - 1); 1182 1183 of_property_read_u32_array(np, "arm,data-latency", 1184 data, ARRAY_SIZE(data)); 1185 if (data[0] && data[1] && data[2]) 1186 l2x0_saved_regs.data_latency = 1187 L310_LATENCY_CTRL_RD(data[0] - 1) | 1188 L310_LATENCY_CTRL_WR(data[1] - 1) | 1189 L310_LATENCY_CTRL_SETUP(data[2] - 1); 1190 1191 of_property_read_u32_array(np, "arm,filter-ranges", 1192 filter, ARRAY_SIZE(filter)); 1193 if (filter[1]) { 1194 l2x0_saved_regs.filter_end = 1195 ALIGN(filter[0] + filter[1], SZ_1M); 1196 l2x0_saved_regs.filter_start = (filter[0] & ~(SZ_1M - 1)) 1197 | L310_ADDR_FILTER_EN; 1198 } 1199 1200 ret = l2x0_cache_size_of_parse(np, aux_val, aux_mask, &assoc, SZ_512K); 1201 if (ret) 1202 return; 1203 1204 switch (assoc) { 1205 case 16: 1206 *aux_val &= ~L2X0_AUX_CTRL_ASSOC_MASK; 1207 *aux_val |= L310_AUX_CTRL_ASSOCIATIVITY_16; 1208 *aux_mask &= ~L2X0_AUX_CTRL_ASSOC_MASK; 1209 break; 1210 case 8: 1211 *aux_val &= ~L2X0_AUX_CTRL_ASSOC_MASK; 1212 *aux_mask &= ~L2X0_AUX_CTRL_ASSOC_MASK; 1213 break; 1214 default: 1215 pr_err("L2C-310 OF cache associativity %d invalid, only 8 or 16 permitted\n", 1216 assoc); 1217 break; 1218 } 1219 1220 prefetch = l2x0_saved_regs.prefetch_ctrl; 1221 1222 ret = of_property_read_u32(np, "arm,double-linefill", &val); 1223 if (ret == 0) { 1224 if (val) 1225 prefetch |= L310_PREFETCH_CTRL_DBL_LINEFILL; 1226 else 1227 prefetch &= ~L310_PREFETCH_CTRL_DBL_LINEFILL; 1228 } else if (ret != -EINVAL) { 1229 pr_err("L2C-310 OF arm,double-linefill property value is missing\n"); 1230 } 1231 1232 ret = of_property_read_u32(np, "arm,double-linefill-incr", &val); 1233 if (ret == 0) { 1234 if (val) 1235 prefetch |= L310_PREFETCH_CTRL_DBL_LINEFILL_INCR; 1236 else 1237 prefetch &= ~L310_PREFETCH_CTRL_DBL_LINEFILL_INCR; 1238 } else if (ret != -EINVAL) { 1239 pr_err("L2C-310 OF arm,double-linefill-incr property value is missing\n"); 1240 } 1241 1242 ret = of_property_read_u32(np, "arm,double-linefill-wrap", &val); 1243 if (ret == 0) { 1244 if (!val) 1245 prefetch |= L310_PREFETCH_CTRL_DBL_LINEFILL_WRAP; 1246 else 1247 prefetch &= ~L310_PREFETCH_CTRL_DBL_LINEFILL_WRAP; 1248 } else if (ret != -EINVAL) { 1249 pr_err("L2C-310 OF arm,double-linefill-wrap property value is missing\n"); 1250 } 1251 1252 ret = of_property_read_u32(np, "arm,prefetch-drop", &val); 1253 if (ret == 0) { 1254 if (val) 1255 prefetch |= L310_PREFETCH_CTRL_PREFETCH_DROP; 1256 else 1257 prefetch &= ~L310_PREFETCH_CTRL_PREFETCH_DROP; 1258 } else if (ret != -EINVAL) { 1259 pr_err("L2C-310 OF arm,prefetch-drop property value is missing\n"); 1260 } 1261 1262 ret = of_property_read_u32(np, "arm,prefetch-offset", &val); 1263 if (ret == 0) { 1264 prefetch &= ~L310_PREFETCH_CTRL_OFFSET_MASK; 1265 prefetch |= val & L310_PREFETCH_CTRL_OFFSET_MASK; 1266 } else if (ret != -EINVAL) { 1267 pr_err("L2C-310 OF arm,prefetch-offset property value is missing\n"); 1268 } 1269 1270 l2x0_saved_regs.prefetch_ctrl = prefetch; 1271 } 1272 1273 static const struct l2c_init_data of_l2c310_data __initconst = { 1274 .type = "L2C-310", 1275 .way_size_0 = SZ_8K, 1276 .num_lock = 8, 1277 .of_parse = l2c310_of_parse, 1278 .enable = l2c310_enable, 1279 .fixup = l2c310_fixup, 1280 .save = l2c310_save, 1281 .configure = l2c310_configure, 1282 .outer_cache = { 1283 .inv_range = l2c210_inv_range, 1284 .clean_range = l2c210_clean_range, 1285 .flush_range = l2c210_flush_range, 1286 .flush_all = l2c210_flush_all, 1287 .disable = l2c310_disable, 1288 .sync = l2c210_sync, 1289 .resume = l2c310_resume, 1290 }, 1291 }; 1292 1293 /* 1294 * This is a variant of the of_l2c310_data with .sync set to 1295 * NULL. Outer sync operations are not needed when the system is I/O 1296 * coherent, and potentially harmful in certain situations (PCIe/PL310 1297 * deadlock on Armada 375/38x due to hardware I/O coherency). The 1298 * other operations are kept because they are infrequent (therefore do 1299 * not cause the deadlock in practice) and needed for secondary CPU 1300 * boot and other power management activities. 1301 */ 1302 static const struct l2c_init_data of_l2c310_coherent_data __initconst = { 1303 .type = "L2C-310 Coherent", 1304 .way_size_0 = SZ_8K, 1305 .num_lock = 8, 1306 .of_parse = l2c310_of_parse, 1307 .enable = l2c310_enable, 1308 .fixup = l2c310_fixup, 1309 .save = l2c310_save, 1310 .configure = l2c310_configure, 1311 .outer_cache = { 1312 .inv_range = l2c210_inv_range, 1313 .clean_range = l2c210_clean_range, 1314 .flush_range = l2c210_flush_range, 1315 .flush_all = l2c210_flush_all, 1316 .disable = l2c310_disable, 1317 .resume = l2c310_resume, 1318 }, 1319 }; 1320 1321 /* 1322 * Note that the end addresses passed to Linux primitives are 1323 * noninclusive, while the hardware cache range operations use 1324 * inclusive start and end addresses. 1325 */ 1326 static unsigned long calc_range_end(unsigned long start, unsigned long end) 1327 { 1328 /* 1329 * Limit the number of cache lines processed at once, 1330 * since cache range operations stall the CPU pipeline 1331 * until completion. 1332 */ 1333 if (end > start + MAX_RANGE_SIZE) 1334 end = start + MAX_RANGE_SIZE; 1335 1336 /* 1337 * Cache range operations can't straddle a page boundary. 1338 */ 1339 if (end > PAGE_ALIGN(start+1)) 1340 end = PAGE_ALIGN(start+1); 1341 1342 return end; 1343 } 1344 1345 /* 1346 * Make sure 'start' and 'end' reference the same page, as L2 is PIPT 1347 * and range operations only do a TLB lookup on the start address. 1348 */ 1349 static void aurora_pa_range(unsigned long start, unsigned long end, 1350 unsigned long offset) 1351 { 1352 unsigned long flags; 1353 1354 raw_spin_lock_irqsave(&l2x0_lock, flags); 1355 writel_relaxed(start, l2x0_base + AURORA_RANGE_BASE_ADDR_REG); 1356 writel_relaxed(end, l2x0_base + offset); 1357 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 1358 1359 cache_sync(); 1360 } 1361 1362 static void aurora_inv_range(unsigned long start, unsigned long end) 1363 { 1364 /* 1365 * round start and end adresses up to cache line size 1366 */ 1367 start &= ~(CACHE_LINE_SIZE - 1); 1368 end = ALIGN(end, CACHE_LINE_SIZE); 1369 1370 /* 1371 * Invalidate all full cache lines between 'start' and 'end'. 1372 */ 1373 while (start < end) { 1374 unsigned long range_end = calc_range_end(start, end); 1375 aurora_pa_range(start, range_end - CACHE_LINE_SIZE, 1376 AURORA_INVAL_RANGE_REG); 1377 start = range_end; 1378 } 1379 } 1380 1381 static void aurora_clean_range(unsigned long start, unsigned long end) 1382 { 1383 /* 1384 * If L2 is forced to WT, the L2 will always be clean and we 1385 * don't need to do anything here. 1386 */ 1387 if (!l2_wt_override) { 1388 start &= ~(CACHE_LINE_SIZE - 1); 1389 end = ALIGN(end, CACHE_LINE_SIZE); 1390 while (start != end) { 1391 unsigned long range_end = calc_range_end(start, end); 1392 aurora_pa_range(start, range_end - CACHE_LINE_SIZE, 1393 AURORA_CLEAN_RANGE_REG); 1394 start = range_end; 1395 } 1396 } 1397 } 1398 1399 static void aurora_flush_range(unsigned long start, unsigned long end) 1400 { 1401 start &= ~(CACHE_LINE_SIZE - 1); 1402 end = ALIGN(end, CACHE_LINE_SIZE); 1403 while (start != end) { 1404 unsigned long range_end = calc_range_end(start, end); 1405 /* 1406 * If L2 is forced to WT, the L2 will always be clean and we 1407 * just need to invalidate. 1408 */ 1409 if (l2_wt_override) 1410 aurora_pa_range(start, range_end - CACHE_LINE_SIZE, 1411 AURORA_INVAL_RANGE_REG); 1412 else 1413 aurora_pa_range(start, range_end - CACHE_LINE_SIZE, 1414 AURORA_FLUSH_RANGE_REG); 1415 start = range_end; 1416 } 1417 } 1418 1419 static void aurora_save(void __iomem *base) 1420 { 1421 l2x0_saved_regs.ctrl = readl_relaxed(base + L2X0_CTRL); 1422 l2x0_saved_regs.aux_ctrl = readl_relaxed(base + L2X0_AUX_CTRL); 1423 } 1424 1425 /* 1426 * For Aurora cache in no outer mode, enable via the CP15 coprocessor 1427 * broadcasting of cache commands to L2. 1428 */ 1429 static void __init aurora_enable_no_outer(void __iomem *base, u32 aux, 1430 unsigned num_lock) 1431 { 1432 u32 u; 1433 1434 asm volatile("mrc p15, 1, %0, c15, c2, 0" : "=r" (u)); 1435 u |= AURORA_CTRL_FW; /* Set the FW bit */ 1436 asm volatile("mcr p15, 1, %0, c15, c2, 0" : : "r" (u)); 1437 1438 isb(); 1439 1440 l2c_enable(base, aux, num_lock); 1441 } 1442 1443 static void __init aurora_fixup(void __iomem *base, u32 cache_id, 1444 struct outer_cache_fns *fns) 1445 { 1446 sync_reg_offset = AURORA_SYNC_REG; 1447 } 1448 1449 static void __init aurora_of_parse(const struct device_node *np, 1450 u32 *aux_val, u32 *aux_mask) 1451 { 1452 u32 val = AURORA_ACR_REPLACEMENT_TYPE_SEMIPLRU; 1453 u32 mask = AURORA_ACR_REPLACEMENT_MASK; 1454 1455 of_property_read_u32(np, "cache-id-part", 1456 &cache_id_part_number_from_dt); 1457 1458 /* Determine and save the write policy */ 1459 l2_wt_override = of_property_read_bool(np, "wt-override"); 1460 1461 if (l2_wt_override) { 1462 val |= AURORA_ACR_FORCE_WRITE_THRO_POLICY; 1463 mask |= AURORA_ACR_FORCE_WRITE_POLICY_MASK; 1464 } 1465 1466 *aux_val &= ~mask; 1467 *aux_val |= val; 1468 *aux_mask &= ~mask; 1469 } 1470 1471 static const struct l2c_init_data of_aurora_with_outer_data __initconst = { 1472 .type = "Aurora", 1473 .way_size_0 = SZ_4K, 1474 .num_lock = 4, 1475 .of_parse = aurora_of_parse, 1476 .enable = l2c_enable, 1477 .fixup = aurora_fixup, 1478 .save = aurora_save, 1479 .outer_cache = { 1480 .inv_range = aurora_inv_range, 1481 .clean_range = aurora_clean_range, 1482 .flush_range = aurora_flush_range, 1483 .flush_all = l2x0_flush_all, 1484 .disable = l2x0_disable, 1485 .sync = l2x0_cache_sync, 1486 .resume = l2c_resume, 1487 }, 1488 }; 1489 1490 static const struct l2c_init_data of_aurora_no_outer_data __initconst = { 1491 .type = "Aurora", 1492 .way_size_0 = SZ_4K, 1493 .num_lock = 4, 1494 .of_parse = aurora_of_parse, 1495 .enable = aurora_enable_no_outer, 1496 .fixup = aurora_fixup, 1497 .save = aurora_save, 1498 .outer_cache = { 1499 .resume = l2c_resume, 1500 }, 1501 }; 1502 1503 /* 1504 * For certain Broadcom SoCs, depending on the address range, different offsets 1505 * need to be added to the address before passing it to L2 for 1506 * invalidation/clean/flush 1507 * 1508 * Section Address Range Offset EMI 1509 * 1 0x00000000 - 0x3FFFFFFF 0x80000000 VC 1510 * 2 0x40000000 - 0xBFFFFFFF 0x40000000 SYS 1511 * 3 0xC0000000 - 0xFFFFFFFF 0x80000000 VC 1512 * 1513 * When the start and end addresses have crossed two different sections, we 1514 * need to break the L2 operation into two, each within its own section. 1515 * For example, if we need to invalidate addresses starts at 0xBFFF0000 and 1516 * ends at 0xC0001000, we need do invalidate 1) 0xBFFF0000 - 0xBFFFFFFF and 2) 1517 * 0xC0000000 - 0xC0001000 1518 * 1519 * Note 1: 1520 * By breaking a single L2 operation into two, we may potentially suffer some 1521 * performance hit, but keep in mind the cross section case is very rare 1522 * 1523 * Note 2: 1524 * We do not need to handle the case when the start address is in 1525 * Section 1 and the end address is in Section 3, since it is not a valid use 1526 * case 1527 * 1528 * Note 3: 1529 * Section 1 in practical terms can no longer be used on rev A2. Because of 1530 * that the code does not need to handle section 1 at all. 1531 * 1532 */ 1533 #define BCM_SYS_EMI_START_ADDR 0x40000000UL 1534 #define BCM_VC_EMI_SEC3_START_ADDR 0xC0000000UL 1535 1536 #define BCM_SYS_EMI_OFFSET 0x40000000UL 1537 #define BCM_VC_EMI_OFFSET 0x80000000UL 1538 1539 static inline int bcm_addr_is_sys_emi(unsigned long addr) 1540 { 1541 return (addr >= BCM_SYS_EMI_START_ADDR) && 1542 (addr < BCM_VC_EMI_SEC3_START_ADDR); 1543 } 1544 1545 static inline unsigned long bcm_l2_phys_addr(unsigned long addr) 1546 { 1547 if (bcm_addr_is_sys_emi(addr)) 1548 return addr + BCM_SYS_EMI_OFFSET; 1549 else 1550 return addr + BCM_VC_EMI_OFFSET; 1551 } 1552 1553 static void bcm_inv_range(unsigned long start, unsigned long end) 1554 { 1555 unsigned long new_start, new_end; 1556 1557 BUG_ON(start < BCM_SYS_EMI_START_ADDR); 1558 1559 if (unlikely(end <= start)) 1560 return; 1561 1562 new_start = bcm_l2_phys_addr(start); 1563 new_end = bcm_l2_phys_addr(end); 1564 1565 /* normal case, no cross section between start and end */ 1566 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) { 1567 l2c210_inv_range(new_start, new_end); 1568 return; 1569 } 1570 1571 /* They cross sections, so it can only be a cross from section 1572 * 2 to section 3 1573 */ 1574 l2c210_inv_range(new_start, 1575 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1)); 1576 l2c210_inv_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR), 1577 new_end); 1578 } 1579 1580 static void bcm_clean_range(unsigned long start, unsigned long end) 1581 { 1582 unsigned long new_start, new_end; 1583 1584 BUG_ON(start < BCM_SYS_EMI_START_ADDR); 1585 1586 if (unlikely(end <= start)) 1587 return; 1588 1589 new_start = bcm_l2_phys_addr(start); 1590 new_end = bcm_l2_phys_addr(end); 1591 1592 /* normal case, no cross section between start and end */ 1593 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) { 1594 l2c210_clean_range(new_start, new_end); 1595 return; 1596 } 1597 1598 /* They cross sections, so it can only be a cross from section 1599 * 2 to section 3 1600 */ 1601 l2c210_clean_range(new_start, 1602 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1)); 1603 l2c210_clean_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR), 1604 new_end); 1605 } 1606 1607 static void bcm_flush_range(unsigned long start, unsigned long end) 1608 { 1609 unsigned long new_start, new_end; 1610 1611 BUG_ON(start < BCM_SYS_EMI_START_ADDR); 1612 1613 if (unlikely(end <= start)) 1614 return; 1615 1616 if ((end - start) >= l2x0_size) { 1617 outer_cache.flush_all(); 1618 return; 1619 } 1620 1621 new_start = bcm_l2_phys_addr(start); 1622 new_end = bcm_l2_phys_addr(end); 1623 1624 /* normal case, no cross section between start and end */ 1625 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) { 1626 l2c210_flush_range(new_start, new_end); 1627 return; 1628 } 1629 1630 /* They cross sections, so it can only be a cross from section 1631 * 2 to section 3 1632 */ 1633 l2c210_flush_range(new_start, 1634 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1)); 1635 l2c210_flush_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR), 1636 new_end); 1637 } 1638 1639 /* Broadcom L2C-310 start from ARMs R3P2 or later, and require no fixups */ 1640 static const struct l2c_init_data of_bcm_l2x0_data __initconst = { 1641 .type = "BCM-L2C-310", 1642 .way_size_0 = SZ_8K, 1643 .num_lock = 8, 1644 .of_parse = l2c310_of_parse, 1645 .enable = l2c310_enable, 1646 .save = l2c310_save, 1647 .configure = l2c310_configure, 1648 .outer_cache = { 1649 .inv_range = bcm_inv_range, 1650 .clean_range = bcm_clean_range, 1651 .flush_range = bcm_flush_range, 1652 .flush_all = l2c210_flush_all, 1653 .disable = l2c310_disable, 1654 .sync = l2c210_sync, 1655 .resume = l2c310_resume, 1656 }, 1657 }; 1658 1659 static void __init tauros3_save(void __iomem *base) 1660 { 1661 l2c_save(base); 1662 1663 l2x0_saved_regs.aux2_ctrl = 1664 readl_relaxed(base + TAUROS3_AUX2_CTRL); 1665 l2x0_saved_regs.prefetch_ctrl = 1666 readl_relaxed(base + L310_PREFETCH_CTRL); 1667 } 1668 1669 static void tauros3_configure(void __iomem *base) 1670 { 1671 writel_relaxed(l2x0_saved_regs.aux2_ctrl, 1672 base + TAUROS3_AUX2_CTRL); 1673 writel_relaxed(l2x0_saved_regs.prefetch_ctrl, 1674 base + L310_PREFETCH_CTRL); 1675 } 1676 1677 static const struct l2c_init_data of_tauros3_data __initconst = { 1678 .type = "Tauros3", 1679 .way_size_0 = SZ_8K, 1680 .num_lock = 8, 1681 .enable = l2c_enable, 1682 .save = tauros3_save, 1683 .configure = tauros3_configure, 1684 /* Tauros3 broadcasts L1 cache operations to L2 */ 1685 .outer_cache = { 1686 .resume = l2c_resume, 1687 }, 1688 }; 1689 1690 #define L2C_ID(name, fns) { .compatible = name, .data = (void *)&fns } 1691 static const struct of_device_id l2x0_ids[] __initconst = { 1692 L2C_ID("arm,l210-cache", of_l2c210_data), 1693 L2C_ID("arm,l220-cache", of_l2c220_data), 1694 L2C_ID("arm,pl310-cache", of_l2c310_data), 1695 L2C_ID("brcm,bcm11351-a2-pl310-cache", of_bcm_l2x0_data), 1696 L2C_ID("marvell,aurora-outer-cache", of_aurora_with_outer_data), 1697 L2C_ID("marvell,aurora-system-cache", of_aurora_no_outer_data), 1698 L2C_ID("marvell,tauros3-cache", of_tauros3_data), 1699 /* Deprecated IDs */ 1700 L2C_ID("bcm,bcm11351-a2-pl310-cache", of_bcm_l2x0_data), 1701 {} 1702 }; 1703 1704 int __init l2x0_of_init(u32 aux_val, u32 aux_mask) 1705 { 1706 const struct l2c_init_data *data; 1707 struct device_node *np; 1708 struct resource res; 1709 u32 cache_id, old_aux; 1710 1711 np = of_find_matching_node(NULL, l2x0_ids); 1712 if (!np) 1713 return -ENODEV; 1714 1715 if (of_address_to_resource(np, 0, &res)) 1716 return -ENODEV; 1717 1718 l2x0_base = ioremap(res.start, resource_size(&res)); 1719 if (!l2x0_base) 1720 return -ENOMEM; 1721 1722 l2x0_saved_regs.phy_base = res.start; 1723 1724 data = of_match_node(l2x0_ids, np)->data; 1725 1726 if (of_device_is_compatible(np, "arm,pl310-cache") && 1727 of_property_read_bool(np, "arm,io-coherent")) 1728 data = &of_l2c310_coherent_data; 1729 1730 old_aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); 1731 if (old_aux != ((old_aux & aux_mask) | aux_val)) { 1732 pr_warn("L2C: platform modifies aux control register: 0x%08x -> 0x%08x\n", 1733 old_aux, (old_aux & aux_mask) | aux_val); 1734 } else if (aux_mask != ~0U && aux_val != 0) { 1735 pr_alert("L2C: platform provided aux values match the hardware, so have no effect. Please remove them.\n"); 1736 } 1737 1738 /* All L2 caches are unified, so this property should be specified */ 1739 if (!of_property_read_bool(np, "cache-unified")) 1740 pr_err("L2C: device tree omits to specify unified cache\n"); 1741 1742 /* Read back current (default) hardware configuration */ 1743 if (data->save) 1744 data->save(l2x0_base); 1745 1746 /* L2 configuration can only be changed if the cache is disabled */ 1747 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) 1748 if (data->of_parse) 1749 data->of_parse(np, &aux_val, &aux_mask); 1750 1751 if (cache_id_part_number_from_dt) 1752 cache_id = cache_id_part_number_from_dt; 1753 else 1754 cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID); 1755 1756 return __l2c_init(data, aux_val, aux_mask, cache_id); 1757 } 1758 #endif 1759