1 /* 2 * arch/arm/mm/cache-l2x0.c - L210/L220 cache controller support 3 * 4 * Copyright (C) 2007 ARM Limited 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software 17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18 */ 19 #include <linux/err.h> 20 #include <linux/init.h> 21 #include <linux/spinlock.h> 22 #include <linux/io.h> 23 #include <linux/of.h> 24 #include <linux/of_address.h> 25 26 #include <asm/cacheflush.h> 27 #include <asm/hardware/cache-l2x0.h> 28 #include "cache-tauros3.h" 29 #include "cache-aurora-l2.h" 30 31 struct l2c_init_data { 32 unsigned num_lock; 33 void (*of_parse)(const struct device_node *, u32 *, u32 *); 34 void (*enable)(void __iomem *, u32, unsigned); 35 void (*fixup)(void __iomem *, u32, struct outer_cache_fns *); 36 void (*save)(void __iomem *); 37 struct outer_cache_fns outer_cache; 38 }; 39 40 #define CACHE_LINE_SIZE 32 41 42 static void __iomem *l2x0_base; 43 static DEFINE_RAW_SPINLOCK(l2x0_lock); 44 static u32 l2x0_way_mask; /* Bitmask of active ways */ 45 static u32 l2x0_size; 46 static unsigned long sync_reg_offset = L2X0_CACHE_SYNC; 47 48 struct l2x0_regs l2x0_saved_regs; 49 50 /* 51 * Common code for all cache controllers. 52 */ 53 static inline void l2c_wait_mask(void __iomem *reg, unsigned long mask) 54 { 55 /* wait for cache operation by line or way to complete */ 56 while (readl_relaxed(reg) & mask) 57 cpu_relax(); 58 } 59 60 /* 61 * This should only be called when we have a requirement that the 62 * register be written due to a work-around, as platforms running 63 * in non-secure mode may not be able to access this register. 64 */ 65 static inline void l2c_set_debug(void __iomem *base, unsigned long val) 66 { 67 outer_cache.set_debug(val); 68 } 69 70 static void __l2c_op_way(void __iomem *reg) 71 { 72 writel_relaxed(l2x0_way_mask, reg); 73 l2c_wait_mask(reg, l2x0_way_mask); 74 } 75 76 static inline void l2c_unlock(void __iomem *base, unsigned num) 77 { 78 unsigned i; 79 80 for (i = 0; i < num; i++) { 81 writel_relaxed(0, base + L2X0_LOCKDOWN_WAY_D_BASE + 82 i * L2X0_LOCKDOWN_STRIDE); 83 writel_relaxed(0, base + L2X0_LOCKDOWN_WAY_I_BASE + 84 i * L2X0_LOCKDOWN_STRIDE); 85 } 86 } 87 88 /* 89 * Enable the L2 cache controller. This function must only be 90 * called when the cache controller is known to be disabled. 91 */ 92 static void l2c_enable(void __iomem *base, u32 aux, unsigned num_lock) 93 { 94 unsigned long flags; 95 96 /* Only write the aux register if it needs changing */ 97 if (readl_relaxed(base + L2X0_AUX_CTRL) != aux) 98 writel_relaxed(aux, base + L2X0_AUX_CTRL); 99 100 l2c_unlock(base, num_lock); 101 102 local_irq_save(flags); 103 __l2c_op_way(base + L2X0_INV_WAY); 104 writel_relaxed(0, base + sync_reg_offset); 105 l2c_wait_mask(base + sync_reg_offset, 1); 106 local_irq_restore(flags); 107 108 writel_relaxed(L2X0_CTRL_EN, base + L2X0_CTRL); 109 } 110 111 static void l2c_disable(void) 112 { 113 void __iomem *base = l2x0_base; 114 115 outer_cache.flush_all(); 116 writel_relaxed(0, base + L2X0_CTRL); 117 dsb(st); 118 } 119 120 #ifdef CONFIG_CACHE_PL310 121 static inline void cache_wait(void __iomem *reg, unsigned long mask) 122 { 123 /* cache operations by line are atomic on PL310 */ 124 } 125 #else 126 #define cache_wait l2c_wait_mask 127 #endif 128 129 static inline void cache_sync(void) 130 { 131 void __iomem *base = l2x0_base; 132 133 writel_relaxed(0, base + sync_reg_offset); 134 cache_wait(base + L2X0_CACHE_SYNC, 1); 135 } 136 137 static inline void l2x0_clean_line(unsigned long addr) 138 { 139 void __iomem *base = l2x0_base; 140 cache_wait(base + L2X0_CLEAN_LINE_PA, 1); 141 writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA); 142 } 143 144 static inline void l2x0_inv_line(unsigned long addr) 145 { 146 void __iomem *base = l2x0_base; 147 cache_wait(base + L2X0_INV_LINE_PA, 1); 148 writel_relaxed(addr, base + L2X0_INV_LINE_PA); 149 } 150 151 #if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915) 152 static inline void debug_writel(unsigned long val) 153 { 154 if (outer_cache.set_debug) 155 l2c_set_debug(l2x0_base, val); 156 } 157 #else 158 /* Optimised out for non-errata case */ 159 static inline void debug_writel(unsigned long val) 160 { 161 } 162 #endif 163 164 #ifdef CONFIG_PL310_ERRATA_588369 165 static inline void l2x0_flush_line(unsigned long addr) 166 { 167 void __iomem *base = l2x0_base; 168 169 /* Clean by PA followed by Invalidate by PA */ 170 cache_wait(base + L2X0_CLEAN_LINE_PA, 1); 171 writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA); 172 cache_wait(base + L2X0_INV_LINE_PA, 1); 173 writel_relaxed(addr, base + L2X0_INV_LINE_PA); 174 } 175 #else 176 177 static inline void l2x0_flush_line(unsigned long addr) 178 { 179 void __iomem *base = l2x0_base; 180 cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1); 181 writel_relaxed(addr, base + L2X0_CLEAN_INV_LINE_PA); 182 } 183 #endif 184 185 static void l2x0_cache_sync(void) 186 { 187 unsigned long flags; 188 189 raw_spin_lock_irqsave(&l2x0_lock, flags); 190 cache_sync(); 191 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 192 } 193 194 static void __l2x0_flush_all(void) 195 { 196 debug_writel(0x03); 197 __l2c_op_way(l2x0_base + L2X0_CLEAN_INV_WAY); 198 cache_sync(); 199 debug_writel(0x00); 200 } 201 202 static void l2x0_flush_all(void) 203 { 204 unsigned long flags; 205 206 /* clean all ways */ 207 raw_spin_lock_irqsave(&l2x0_lock, flags); 208 __l2x0_flush_all(); 209 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 210 } 211 212 static void l2x0_clean_all(void) 213 { 214 unsigned long flags; 215 216 /* clean all ways */ 217 raw_spin_lock_irqsave(&l2x0_lock, flags); 218 __l2c_op_way(l2x0_base + L2X0_CLEAN_WAY); 219 cache_sync(); 220 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 221 } 222 223 static void l2x0_inv_all(void) 224 { 225 unsigned long flags; 226 227 /* invalidate all ways */ 228 raw_spin_lock_irqsave(&l2x0_lock, flags); 229 /* Invalidating when L2 is enabled is a nono */ 230 BUG_ON(readl(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN); 231 __l2c_op_way(l2x0_base + L2X0_INV_WAY); 232 cache_sync(); 233 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 234 } 235 236 static void l2x0_inv_range(unsigned long start, unsigned long end) 237 { 238 void __iomem *base = l2x0_base; 239 unsigned long flags; 240 241 raw_spin_lock_irqsave(&l2x0_lock, flags); 242 if (start & (CACHE_LINE_SIZE - 1)) { 243 start &= ~(CACHE_LINE_SIZE - 1); 244 debug_writel(0x03); 245 l2x0_flush_line(start); 246 debug_writel(0x00); 247 start += CACHE_LINE_SIZE; 248 } 249 250 if (end & (CACHE_LINE_SIZE - 1)) { 251 end &= ~(CACHE_LINE_SIZE - 1); 252 debug_writel(0x03); 253 l2x0_flush_line(end); 254 debug_writel(0x00); 255 } 256 257 while (start < end) { 258 unsigned long blk_end = start + min(end - start, 4096UL); 259 260 while (start < blk_end) { 261 l2x0_inv_line(start); 262 start += CACHE_LINE_SIZE; 263 } 264 265 if (blk_end < end) { 266 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 267 raw_spin_lock_irqsave(&l2x0_lock, flags); 268 } 269 } 270 cache_wait(base + L2X0_INV_LINE_PA, 1); 271 cache_sync(); 272 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 273 } 274 275 static void l2x0_clean_range(unsigned long start, unsigned long end) 276 { 277 void __iomem *base = l2x0_base; 278 unsigned long flags; 279 280 if ((end - start) >= l2x0_size) { 281 l2x0_clean_all(); 282 return; 283 } 284 285 raw_spin_lock_irqsave(&l2x0_lock, flags); 286 start &= ~(CACHE_LINE_SIZE - 1); 287 while (start < end) { 288 unsigned long blk_end = start + min(end - start, 4096UL); 289 290 while (start < blk_end) { 291 l2x0_clean_line(start); 292 start += CACHE_LINE_SIZE; 293 } 294 295 if (blk_end < end) { 296 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 297 raw_spin_lock_irqsave(&l2x0_lock, flags); 298 } 299 } 300 cache_wait(base + L2X0_CLEAN_LINE_PA, 1); 301 cache_sync(); 302 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 303 } 304 305 static void l2x0_flush_range(unsigned long start, unsigned long end) 306 { 307 void __iomem *base = l2x0_base; 308 unsigned long flags; 309 310 if ((end - start) >= l2x0_size) { 311 l2x0_flush_all(); 312 return; 313 } 314 315 raw_spin_lock_irqsave(&l2x0_lock, flags); 316 start &= ~(CACHE_LINE_SIZE - 1); 317 while (start < end) { 318 unsigned long blk_end = start + min(end - start, 4096UL); 319 320 debug_writel(0x03); 321 while (start < blk_end) { 322 l2x0_flush_line(start); 323 start += CACHE_LINE_SIZE; 324 } 325 debug_writel(0x00); 326 327 if (blk_end < end) { 328 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 329 raw_spin_lock_irqsave(&l2x0_lock, flags); 330 } 331 } 332 cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1); 333 cache_sync(); 334 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 335 } 336 337 static void l2x0_disable(void) 338 { 339 unsigned long flags; 340 341 raw_spin_lock_irqsave(&l2x0_lock, flags); 342 __l2x0_flush_all(); 343 writel_relaxed(0, l2x0_base + L2X0_CTRL); 344 dsb(st); 345 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 346 } 347 348 static void l2x0_enable(void __iomem *base, u32 aux, unsigned num_lock) 349 { 350 unsigned id; 351 352 id = readl_relaxed(base + L2X0_CACHE_ID) & L2X0_CACHE_ID_PART_MASK; 353 if (id == L2X0_CACHE_ID_PART_L310) 354 num_lock = 8; 355 else 356 num_lock = 1; 357 358 /* l2x0 controller is disabled */ 359 writel_relaxed(aux, base + L2X0_AUX_CTRL); 360 361 /* Make sure that I&D is not locked down when starting */ 362 l2c_unlock(base, num_lock); 363 364 l2x0_inv_all(); 365 366 /* enable L2X0 */ 367 writel_relaxed(L2X0_CTRL_EN, base + L2X0_CTRL); 368 } 369 370 static void l2x0_resume(void) 371 { 372 void __iomem *base = l2x0_base; 373 374 if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN)) 375 l2x0_enable(base, l2x0_saved_regs.aux_ctrl, 0); 376 } 377 378 static const struct l2c_init_data l2x0_init_fns __initconst = { 379 .enable = l2x0_enable, 380 .outer_cache = { 381 .inv_range = l2x0_inv_range, 382 .clean_range = l2x0_clean_range, 383 .flush_range = l2x0_flush_range, 384 .flush_all = l2x0_flush_all, 385 .disable = l2x0_disable, 386 .sync = l2x0_cache_sync, 387 .resume = l2x0_resume, 388 }, 389 }; 390 391 /* 392 * L2C-210 specific code. 393 * 394 * The L2C-2x0 PA, set/way and sync operations are atomic, but we must 395 * ensure that no background operation is running. The way operations 396 * are all background tasks. 397 * 398 * While a background operation is in progress, any new operation is 399 * ignored (unspecified whether this causes an error.) Thankfully, not 400 * used on SMP. 401 * 402 * Never has a different sync register other than L2X0_CACHE_SYNC, but 403 * we use sync_reg_offset here so we can share some of this with L2C-310. 404 */ 405 static void __l2c210_cache_sync(void __iomem *base) 406 { 407 writel_relaxed(0, base + sync_reg_offset); 408 } 409 410 static void __l2c210_op_pa_range(void __iomem *reg, unsigned long start, 411 unsigned long end) 412 { 413 while (start < end) { 414 writel_relaxed(start, reg); 415 start += CACHE_LINE_SIZE; 416 } 417 } 418 419 static void l2c210_inv_range(unsigned long start, unsigned long end) 420 { 421 void __iomem *base = l2x0_base; 422 423 if (start & (CACHE_LINE_SIZE - 1)) { 424 start &= ~(CACHE_LINE_SIZE - 1); 425 writel_relaxed(start, base + L2X0_CLEAN_INV_LINE_PA); 426 start += CACHE_LINE_SIZE; 427 } 428 429 if (end & (CACHE_LINE_SIZE - 1)) { 430 end &= ~(CACHE_LINE_SIZE - 1); 431 writel_relaxed(end, base + L2X0_CLEAN_INV_LINE_PA); 432 } 433 434 __l2c210_op_pa_range(base + L2X0_INV_LINE_PA, start, end); 435 __l2c210_cache_sync(base); 436 } 437 438 static void l2c210_clean_range(unsigned long start, unsigned long end) 439 { 440 void __iomem *base = l2x0_base; 441 442 start &= ~(CACHE_LINE_SIZE - 1); 443 __l2c210_op_pa_range(base + L2X0_CLEAN_LINE_PA, start, end); 444 __l2c210_cache_sync(base); 445 } 446 447 static void l2c210_flush_range(unsigned long start, unsigned long end) 448 { 449 void __iomem *base = l2x0_base; 450 451 start &= ~(CACHE_LINE_SIZE - 1); 452 __l2c210_op_pa_range(base + L2X0_CLEAN_INV_LINE_PA, start, end); 453 __l2c210_cache_sync(base); 454 } 455 456 static void l2c210_flush_all(void) 457 { 458 void __iomem *base = l2x0_base; 459 460 BUG_ON(!irqs_disabled()); 461 462 __l2c_op_way(base + L2X0_CLEAN_INV_WAY); 463 __l2c210_cache_sync(base); 464 } 465 466 static void l2c210_sync(void) 467 { 468 __l2c210_cache_sync(l2x0_base); 469 } 470 471 static void l2c210_resume(void) 472 { 473 void __iomem *base = l2x0_base; 474 475 if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN)) 476 l2c_enable(base, l2x0_saved_regs.aux_ctrl, 1); 477 } 478 479 static const struct l2c_init_data l2c210_data __initconst = { 480 .num_lock = 1, 481 .enable = l2c_enable, 482 .outer_cache = { 483 .inv_range = l2c210_inv_range, 484 .clean_range = l2c210_clean_range, 485 .flush_range = l2c210_flush_range, 486 .flush_all = l2c210_flush_all, 487 .disable = l2c_disable, 488 .sync = l2c210_sync, 489 .resume = l2c210_resume, 490 }, 491 }; 492 493 /* 494 * L2C-310 specific code. 495 * 496 * Very similar to L2C-210, the PA, set/way and sync operations are atomic, 497 * and the way operations are all background tasks. However, issuing an 498 * operation while a background operation is in progress results in a 499 * SLVERR response. We can reuse: 500 * 501 * __l2c210_cache_sync (using sync_reg_offset) 502 * l2c210_sync 503 * l2c210_inv_range (if 588369 is not applicable) 504 * l2c210_clean_range 505 * l2c210_flush_range (if 588369 is not applicable) 506 * l2c210_flush_all (if 727915 is not applicable) 507 * 508 * Errata: 509 * 588369: PL310 R0P0->R1P0, fixed R2P0. 510 * Affects: all clean+invalidate operations 511 * clean and invalidate skips the invalidate step, so we need to issue 512 * separate operations. We also require the above debug workaround 513 * enclosing this code fragment on affected parts. On unaffected parts, 514 * we must not use this workaround without the debug register writes 515 * to avoid exposing a problem similar to 727915. 516 * 517 * 727915: PL310 R2P0->R3P0, fixed R3P1. 518 * Affects: clean+invalidate by way 519 * clean and invalidate by way runs in the background, and a store can 520 * hit the line between the clean operation and invalidate operation, 521 * resulting in the store being lost. 522 * 523 * 753970: PL310 R3P0, fixed R3P1. 524 * Affects: sync 525 * prevents merging writes after the sync operation, until another L2C 526 * operation is performed (or a number of other conditions.) 527 * 528 * 769419: PL310 R0P0->R3P1, fixed R3P2. 529 * Affects: store buffer 530 * store buffer is not automatically drained. 531 */ 532 static void l2c310_set_debug(unsigned long val) 533 { 534 writel_relaxed(val, l2x0_base + L2X0_DEBUG_CTRL); 535 } 536 537 static void l2c310_inv_range_erratum(unsigned long start, unsigned long end) 538 { 539 void __iomem *base = l2x0_base; 540 541 if ((start | end) & (CACHE_LINE_SIZE - 1)) { 542 unsigned long flags; 543 544 /* Erratum 588369 for both clean+invalidate operations */ 545 raw_spin_lock_irqsave(&l2x0_lock, flags); 546 l2c_set_debug(base, 0x03); 547 548 if (start & (CACHE_LINE_SIZE - 1)) { 549 start &= ~(CACHE_LINE_SIZE - 1); 550 writel_relaxed(start, base + L2X0_CLEAN_LINE_PA); 551 writel_relaxed(start, base + L2X0_INV_LINE_PA); 552 start += CACHE_LINE_SIZE; 553 } 554 555 if (end & (CACHE_LINE_SIZE - 1)) { 556 end &= ~(CACHE_LINE_SIZE - 1); 557 writel_relaxed(end, base + L2X0_CLEAN_LINE_PA); 558 writel_relaxed(end, base + L2X0_INV_LINE_PA); 559 } 560 561 l2c_set_debug(base, 0x00); 562 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 563 } 564 565 __l2c210_op_pa_range(base + L2X0_INV_LINE_PA, start, end); 566 __l2c210_cache_sync(base); 567 } 568 569 static void l2c310_flush_range_erratum(unsigned long start, unsigned long end) 570 { 571 raw_spinlock_t *lock = &l2x0_lock; 572 unsigned long flags; 573 void __iomem *base = l2x0_base; 574 575 raw_spin_lock_irqsave(lock, flags); 576 while (start < end) { 577 unsigned long blk_end = start + min(end - start, 4096UL); 578 579 l2c_set_debug(base, 0x03); 580 while (start < blk_end) { 581 writel_relaxed(start, base + L2X0_CLEAN_LINE_PA); 582 writel_relaxed(start, base + L2X0_INV_LINE_PA); 583 start += CACHE_LINE_SIZE; 584 } 585 l2c_set_debug(base, 0x00); 586 587 if (blk_end < end) { 588 raw_spin_unlock_irqrestore(lock, flags); 589 raw_spin_lock_irqsave(lock, flags); 590 } 591 } 592 raw_spin_unlock_irqrestore(lock, flags); 593 __l2c210_cache_sync(base); 594 } 595 596 static void l2c310_flush_all_erratum(void) 597 { 598 void __iomem *base = l2x0_base; 599 unsigned long flags; 600 601 raw_spin_lock_irqsave(&l2x0_lock, flags); 602 l2c_set_debug(base, 0x03); 603 __l2c_op_way(base + L2X0_CLEAN_INV_WAY); 604 l2c_set_debug(base, 0x00); 605 __l2c210_cache_sync(base); 606 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 607 } 608 609 static void __init l2c310_save(void __iomem *base) 610 { 611 unsigned revision; 612 613 l2x0_saved_regs.tag_latency = readl_relaxed(base + 614 L2X0_TAG_LATENCY_CTRL); 615 l2x0_saved_regs.data_latency = readl_relaxed(base + 616 L2X0_DATA_LATENCY_CTRL); 617 l2x0_saved_regs.filter_end = readl_relaxed(base + 618 L2X0_ADDR_FILTER_END); 619 l2x0_saved_regs.filter_start = readl_relaxed(base + 620 L2X0_ADDR_FILTER_START); 621 622 revision = readl_relaxed(base + L2X0_CACHE_ID) & 623 L2X0_CACHE_ID_RTL_MASK; 624 625 /* From r2p0, there is Prefetch offset/control register */ 626 if (revision >= L310_CACHE_ID_RTL_R2P0) 627 l2x0_saved_regs.prefetch_ctrl = readl_relaxed(base + 628 L2X0_PREFETCH_CTRL); 629 630 /* From r3p0, there is Power control register */ 631 if (revision >= L310_CACHE_ID_RTL_R3P0) 632 l2x0_saved_regs.pwr_ctrl = readl_relaxed(base + 633 L2X0_POWER_CTRL); 634 } 635 636 static void l2c310_resume(void) 637 { 638 void __iomem *base = l2x0_base; 639 640 if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN)) { 641 unsigned revision; 642 643 /* restore pl310 setup */ 644 writel_relaxed(l2x0_saved_regs.tag_latency, 645 base + L2X0_TAG_LATENCY_CTRL); 646 writel_relaxed(l2x0_saved_regs.data_latency, 647 base + L2X0_DATA_LATENCY_CTRL); 648 writel_relaxed(l2x0_saved_regs.filter_end, 649 base + L2X0_ADDR_FILTER_END); 650 writel_relaxed(l2x0_saved_regs.filter_start, 651 base + L2X0_ADDR_FILTER_START); 652 653 revision = readl_relaxed(base + L2X0_CACHE_ID) & 654 L2X0_CACHE_ID_RTL_MASK; 655 656 if (revision >= L310_CACHE_ID_RTL_R2P0) 657 writel_relaxed(l2x0_saved_regs.prefetch_ctrl, 658 base + L2X0_PREFETCH_CTRL); 659 if (revision >= L310_CACHE_ID_RTL_R3P0) 660 writel_relaxed(l2x0_saved_regs.pwr_ctrl, 661 base + L2X0_POWER_CTRL); 662 663 l2c_enable(base, l2x0_saved_regs.aux_ctrl, 8); 664 } 665 } 666 667 static void __init l2c310_fixup(void __iomem *base, u32 cache_id, 668 struct outer_cache_fns *fns) 669 { 670 unsigned revision = cache_id & L2X0_CACHE_ID_RTL_MASK; 671 const char *errata[4]; 672 unsigned n = 0; 673 674 /* For compatibility */ 675 if (revision <= L310_CACHE_ID_RTL_R3P0) 676 fns->set_debug = l2c310_set_debug; 677 678 if (IS_ENABLED(CONFIG_PL310_ERRATA_588369) && 679 revision < L310_CACHE_ID_RTL_R2P0 && 680 /* For bcm compatibility */ 681 fns->inv_range == l2c210_inv_range) { 682 fns->inv_range = l2c310_inv_range_erratum; 683 fns->flush_range = l2c310_flush_range_erratum; 684 errata[n++] = "588369"; 685 } 686 687 if (IS_ENABLED(CONFIG_PL310_ERRATA_727915) && 688 revision >= L310_CACHE_ID_RTL_R2P0 && 689 revision < L310_CACHE_ID_RTL_R3P1) { 690 fns->flush_all = l2c310_flush_all_erratum; 691 errata[n++] = "727915"; 692 } 693 694 if (IS_ENABLED(CONFIG_PL310_ERRATA_753970) && 695 revision == L310_CACHE_ID_RTL_R3P0) { 696 sync_reg_offset = L2X0_DUMMY_REG; 697 errata[n++] = "753970"; 698 } 699 700 if (IS_ENABLED(CONFIG_PL310_ERRATA_769419)) 701 errata[n++] = "769419"; 702 703 if (n) { 704 unsigned i; 705 706 pr_info("L2C-310 errat%s", n > 1 ? "a" : "um"); 707 for (i = 0; i < n; i++) 708 pr_cont(" %s", errata[i]); 709 pr_cont(" enabled\n"); 710 } 711 } 712 713 static const struct l2c_init_data l2c310_init_fns __initconst = { 714 .num_lock = 8, 715 .enable = l2c_enable, 716 .fixup = l2c310_fixup, 717 .save = l2c310_save, 718 .outer_cache = { 719 .inv_range = l2c210_inv_range, 720 .clean_range = l2c210_clean_range, 721 .flush_range = l2c210_flush_range, 722 .flush_all = l2c210_flush_all, 723 .disable = l2c_disable, 724 .sync = l2c210_sync, 725 .set_debug = l2c310_set_debug, 726 .resume = l2c310_resume, 727 }, 728 }; 729 730 static void __init __l2c_init(const struct l2c_init_data *data, 731 u32 aux_val, u32 aux_mask, u32 cache_id) 732 { 733 struct outer_cache_fns fns; 734 u32 aux; 735 u32 way_size = 0; 736 int ways; 737 int way_size_shift = L2X0_WAY_SIZE_SHIFT; 738 const char *type; 739 740 /* 741 * It is strange to save the register state before initialisation, 742 * but hey, this is what the DT implementations decided to do. 743 */ 744 if (data->save) 745 data->save(l2x0_base); 746 747 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); 748 749 aux &= aux_mask; 750 aux |= aux_val; 751 752 /* Determine the number of ways */ 753 switch (cache_id & L2X0_CACHE_ID_PART_MASK) { 754 case L2X0_CACHE_ID_PART_L310: 755 if (aux & (1 << 16)) 756 ways = 16; 757 else 758 ways = 8; 759 type = "L310"; 760 break; 761 762 case L2X0_CACHE_ID_PART_L210: 763 ways = (aux >> 13) & 0xf; 764 type = "L210"; 765 break; 766 767 case AURORA_CACHE_ID: 768 ways = (aux >> 13) & 0xf; 769 ways = 2 << ((ways + 1) >> 2); 770 way_size_shift = AURORA_WAY_SIZE_SHIFT; 771 type = "Aurora"; 772 break; 773 774 default: 775 /* Assume unknown chips have 8 ways */ 776 ways = 8; 777 type = "L2x0 series"; 778 break; 779 } 780 781 l2x0_way_mask = (1 << ways) - 1; 782 783 /* 784 * L2 cache Size = Way size * Number of ways 785 */ 786 way_size = (aux & L2X0_AUX_CTRL_WAY_SIZE_MASK) >> 17; 787 way_size = 1 << (way_size + way_size_shift); 788 789 l2x0_size = ways * way_size * SZ_1K; 790 791 fns = data->outer_cache; 792 if (data->fixup) 793 data->fixup(l2x0_base, cache_id, &fns); 794 795 /* 796 * Check if l2x0 controller is already enabled. If we are booting 797 * in non-secure mode accessing the below registers will fault. 798 */ 799 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) 800 data->enable(l2x0_base, aux, data->num_lock); 801 802 /* Re-read it in case some bits are reserved. */ 803 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); 804 805 /* Save the value for resuming. */ 806 l2x0_saved_regs.aux_ctrl = aux; 807 808 outer_cache = fns; 809 810 pr_info("%s cache controller enabled, %d ways, %d kB\n", 811 type, ways, l2x0_size >> 10); 812 pr_info("%s: CACHE_ID 0x%08x, AUX_CTRL 0x%08x\n", 813 type, cache_id, aux); 814 } 815 816 void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask) 817 { 818 const struct l2c_init_data *data; 819 u32 cache_id; 820 821 l2x0_base = base; 822 823 cache_id = readl_relaxed(base + L2X0_CACHE_ID); 824 825 switch (cache_id & L2X0_CACHE_ID_PART_MASK) { 826 default: 827 data = &l2x0_init_fns; 828 break; 829 830 case L2X0_CACHE_ID_PART_L210: 831 data = &l2c210_data; 832 break; 833 834 case L2X0_CACHE_ID_PART_L310: 835 data = &l2c310_init_fns; 836 break; 837 } 838 839 __l2c_init(data, aux_val, aux_mask, cache_id); 840 } 841 842 #ifdef CONFIG_OF 843 static int l2_wt_override; 844 845 /* Aurora don't have the cache ID register available, so we have to 846 * pass it though the device tree */ 847 static u32 cache_id_part_number_from_dt; 848 849 static void __init l2x0_of_parse(const struct device_node *np, 850 u32 *aux_val, u32 *aux_mask) 851 { 852 u32 data[2] = { 0, 0 }; 853 u32 tag = 0; 854 u32 dirty = 0; 855 u32 val = 0, mask = 0; 856 857 of_property_read_u32(np, "arm,tag-latency", &tag); 858 if (tag) { 859 mask |= L2X0_AUX_CTRL_TAG_LATENCY_MASK; 860 val |= (tag - 1) << L2X0_AUX_CTRL_TAG_LATENCY_SHIFT; 861 } 862 863 of_property_read_u32_array(np, "arm,data-latency", 864 data, ARRAY_SIZE(data)); 865 if (data[0] && data[1]) { 866 mask |= L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK | 867 L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK; 868 val |= ((data[0] - 1) << L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT) | 869 ((data[1] - 1) << L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT); 870 } 871 872 of_property_read_u32(np, "arm,dirty-latency", &dirty); 873 if (dirty) { 874 mask |= L2X0_AUX_CTRL_DIRTY_LATENCY_MASK; 875 val |= (dirty - 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT; 876 } 877 878 *aux_val &= ~mask; 879 *aux_val |= val; 880 *aux_mask &= ~mask; 881 } 882 883 static const struct l2c_init_data of_l2c210_data __initconst = { 884 .num_lock = 1, 885 .of_parse = l2x0_of_parse, 886 .enable = l2c_enable, 887 .outer_cache = { 888 .inv_range = l2c210_inv_range, 889 .clean_range = l2c210_clean_range, 890 .flush_range = l2c210_flush_range, 891 .flush_all = l2c210_flush_all, 892 .disable = l2c_disable, 893 .sync = l2c210_sync, 894 .resume = l2c210_resume, 895 }, 896 }; 897 898 static const struct l2c_init_data of_l2x0_data __initconst = { 899 .of_parse = l2x0_of_parse, 900 .enable = l2x0_enable, 901 .outer_cache = { 902 .inv_range = l2x0_inv_range, 903 .clean_range = l2x0_clean_range, 904 .flush_range = l2x0_flush_range, 905 .flush_all = l2x0_flush_all, 906 .disable = l2x0_disable, 907 .sync = l2x0_cache_sync, 908 .resume = l2x0_resume, 909 }, 910 }; 911 912 static void __init l2c310_of_parse(const struct device_node *np, 913 u32 *aux_val, u32 *aux_mask) 914 { 915 u32 data[3] = { 0, 0, 0 }; 916 u32 tag[3] = { 0, 0, 0 }; 917 u32 filter[2] = { 0, 0 }; 918 919 of_property_read_u32_array(np, "arm,tag-latency", tag, ARRAY_SIZE(tag)); 920 if (tag[0] && tag[1] && tag[2]) 921 writel_relaxed( 922 ((tag[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) | 923 ((tag[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) | 924 ((tag[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT), 925 l2x0_base + L2X0_TAG_LATENCY_CTRL); 926 927 of_property_read_u32_array(np, "arm,data-latency", 928 data, ARRAY_SIZE(data)); 929 if (data[0] && data[1] && data[2]) 930 writel_relaxed( 931 ((data[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) | 932 ((data[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) | 933 ((data[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT), 934 l2x0_base + L2X0_DATA_LATENCY_CTRL); 935 936 of_property_read_u32_array(np, "arm,filter-ranges", 937 filter, ARRAY_SIZE(filter)); 938 if (filter[1]) { 939 writel_relaxed(ALIGN(filter[0] + filter[1], SZ_1M), 940 l2x0_base + L2X0_ADDR_FILTER_END); 941 writel_relaxed((filter[0] & ~(SZ_1M - 1)) | L2X0_ADDR_FILTER_EN, 942 l2x0_base + L2X0_ADDR_FILTER_START); 943 } 944 } 945 946 static const struct l2c_init_data of_l2c310_data __initconst = { 947 .num_lock = 8, 948 .of_parse = l2c310_of_parse, 949 .enable = l2c_enable, 950 .fixup = l2c310_fixup, 951 .save = l2c310_save, 952 .outer_cache = { 953 .inv_range = l2c210_inv_range, 954 .clean_range = l2c210_clean_range, 955 .flush_range = l2c210_flush_range, 956 .flush_all = l2c210_flush_all, 957 .disable = l2c_disable, 958 .sync = l2c210_sync, 959 .set_debug = l2c310_set_debug, 960 .resume = l2c310_resume, 961 }, 962 }; 963 964 /* 965 * Note that the end addresses passed to Linux primitives are 966 * noninclusive, while the hardware cache range operations use 967 * inclusive start and end addresses. 968 */ 969 static unsigned long calc_range_end(unsigned long start, unsigned long end) 970 { 971 /* 972 * Limit the number of cache lines processed at once, 973 * since cache range operations stall the CPU pipeline 974 * until completion. 975 */ 976 if (end > start + MAX_RANGE_SIZE) 977 end = start + MAX_RANGE_SIZE; 978 979 /* 980 * Cache range operations can't straddle a page boundary. 981 */ 982 if (end > PAGE_ALIGN(start+1)) 983 end = PAGE_ALIGN(start+1); 984 985 return end; 986 } 987 988 /* 989 * Make sure 'start' and 'end' reference the same page, as L2 is PIPT 990 * and range operations only do a TLB lookup on the start address. 991 */ 992 static void aurora_pa_range(unsigned long start, unsigned long end, 993 unsigned long offset) 994 { 995 unsigned long flags; 996 997 raw_spin_lock_irqsave(&l2x0_lock, flags); 998 writel_relaxed(start, l2x0_base + AURORA_RANGE_BASE_ADDR_REG); 999 writel_relaxed(end, l2x0_base + offset); 1000 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 1001 1002 cache_sync(); 1003 } 1004 1005 static void aurora_inv_range(unsigned long start, unsigned long end) 1006 { 1007 /* 1008 * round start and end adresses up to cache line size 1009 */ 1010 start &= ~(CACHE_LINE_SIZE - 1); 1011 end = ALIGN(end, CACHE_LINE_SIZE); 1012 1013 /* 1014 * Invalidate all full cache lines between 'start' and 'end'. 1015 */ 1016 while (start < end) { 1017 unsigned long range_end = calc_range_end(start, end); 1018 aurora_pa_range(start, range_end - CACHE_LINE_SIZE, 1019 AURORA_INVAL_RANGE_REG); 1020 start = range_end; 1021 } 1022 } 1023 1024 static void aurora_clean_range(unsigned long start, unsigned long end) 1025 { 1026 /* 1027 * If L2 is forced to WT, the L2 will always be clean and we 1028 * don't need to do anything here. 1029 */ 1030 if (!l2_wt_override) { 1031 start &= ~(CACHE_LINE_SIZE - 1); 1032 end = ALIGN(end, CACHE_LINE_SIZE); 1033 while (start != end) { 1034 unsigned long range_end = calc_range_end(start, end); 1035 aurora_pa_range(start, range_end - CACHE_LINE_SIZE, 1036 AURORA_CLEAN_RANGE_REG); 1037 start = range_end; 1038 } 1039 } 1040 } 1041 1042 static void aurora_flush_range(unsigned long start, unsigned long end) 1043 { 1044 start &= ~(CACHE_LINE_SIZE - 1); 1045 end = ALIGN(end, CACHE_LINE_SIZE); 1046 while (start != end) { 1047 unsigned long range_end = calc_range_end(start, end); 1048 /* 1049 * If L2 is forced to WT, the L2 will always be clean and we 1050 * just need to invalidate. 1051 */ 1052 if (l2_wt_override) 1053 aurora_pa_range(start, range_end - CACHE_LINE_SIZE, 1054 AURORA_INVAL_RANGE_REG); 1055 else 1056 aurora_pa_range(start, range_end - CACHE_LINE_SIZE, 1057 AURORA_FLUSH_RANGE_REG); 1058 start = range_end; 1059 } 1060 } 1061 1062 static void aurora_save(void __iomem *base) 1063 { 1064 l2x0_saved_regs.ctrl = readl_relaxed(base + L2X0_CTRL); 1065 l2x0_saved_regs.aux_ctrl = readl_relaxed(base + L2X0_AUX_CTRL); 1066 } 1067 1068 static void aurora_resume(void) 1069 { 1070 void __iomem *base = l2x0_base; 1071 1072 if (!(readl(base + L2X0_CTRL) & L2X0_CTRL_EN)) { 1073 writel_relaxed(l2x0_saved_regs.aux_ctrl, base + L2X0_AUX_CTRL); 1074 writel_relaxed(l2x0_saved_regs.ctrl, base + L2X0_CTRL); 1075 } 1076 } 1077 1078 /* 1079 * For Aurora cache in no outer mode, enable via the CP15 coprocessor 1080 * broadcasting of cache commands to L2. 1081 */ 1082 static void __init aurora_enable_no_outer(void __iomem *base, u32 aux, 1083 unsigned num_lock) 1084 { 1085 u32 u; 1086 1087 asm volatile("mrc p15, 1, %0, c15, c2, 0" : "=r" (u)); 1088 u |= AURORA_CTRL_FW; /* Set the FW bit */ 1089 asm volatile("mcr p15, 1, %0, c15, c2, 0" : : "r" (u)); 1090 1091 isb(); 1092 1093 l2c_enable(base, aux, num_lock); 1094 } 1095 1096 static void __init aurora_fixup(void __iomem *base, u32 cache_id, 1097 struct outer_cache_fns *fns) 1098 { 1099 sync_reg_offset = AURORA_SYNC_REG; 1100 } 1101 1102 static void __init aurora_of_parse(const struct device_node *np, 1103 u32 *aux_val, u32 *aux_mask) 1104 { 1105 u32 val = AURORA_ACR_REPLACEMENT_TYPE_SEMIPLRU; 1106 u32 mask = AURORA_ACR_REPLACEMENT_MASK; 1107 1108 of_property_read_u32(np, "cache-id-part", 1109 &cache_id_part_number_from_dt); 1110 1111 /* Determine and save the write policy */ 1112 l2_wt_override = of_property_read_bool(np, "wt-override"); 1113 1114 if (l2_wt_override) { 1115 val |= AURORA_ACR_FORCE_WRITE_THRO_POLICY; 1116 mask |= AURORA_ACR_FORCE_WRITE_POLICY_MASK; 1117 } 1118 1119 *aux_val &= ~mask; 1120 *aux_val |= val; 1121 *aux_mask &= ~mask; 1122 } 1123 1124 static const struct l2c_init_data of_aurora_with_outer_data __initconst = { 1125 .num_lock = 4, 1126 .of_parse = aurora_of_parse, 1127 .enable = l2c_enable, 1128 .fixup = aurora_fixup, 1129 .save = aurora_save, 1130 .outer_cache = { 1131 .inv_range = aurora_inv_range, 1132 .clean_range = aurora_clean_range, 1133 .flush_range = aurora_flush_range, 1134 .flush_all = l2x0_flush_all, 1135 .disable = l2x0_disable, 1136 .sync = l2x0_cache_sync, 1137 .resume = aurora_resume, 1138 }, 1139 }; 1140 1141 static const struct l2c_init_data of_aurora_no_outer_data __initconst = { 1142 .num_lock = 4, 1143 .of_parse = aurora_of_parse, 1144 .enable = aurora_enable_no_outer, 1145 .fixup = aurora_fixup, 1146 .save = aurora_save, 1147 .outer_cache = { 1148 .resume = aurora_resume, 1149 }, 1150 }; 1151 1152 /* 1153 * For certain Broadcom SoCs, depending on the address range, different offsets 1154 * need to be added to the address before passing it to L2 for 1155 * invalidation/clean/flush 1156 * 1157 * Section Address Range Offset EMI 1158 * 1 0x00000000 - 0x3FFFFFFF 0x80000000 VC 1159 * 2 0x40000000 - 0xBFFFFFFF 0x40000000 SYS 1160 * 3 0xC0000000 - 0xFFFFFFFF 0x80000000 VC 1161 * 1162 * When the start and end addresses have crossed two different sections, we 1163 * need to break the L2 operation into two, each within its own section. 1164 * For example, if we need to invalidate addresses starts at 0xBFFF0000 and 1165 * ends at 0xC0001000, we need do invalidate 1) 0xBFFF0000 - 0xBFFFFFFF and 2) 1166 * 0xC0000000 - 0xC0001000 1167 * 1168 * Note 1: 1169 * By breaking a single L2 operation into two, we may potentially suffer some 1170 * performance hit, but keep in mind the cross section case is very rare 1171 * 1172 * Note 2: 1173 * We do not need to handle the case when the start address is in 1174 * Section 1 and the end address is in Section 3, since it is not a valid use 1175 * case 1176 * 1177 * Note 3: 1178 * Section 1 in practical terms can no longer be used on rev A2. Because of 1179 * that the code does not need to handle section 1 at all. 1180 * 1181 */ 1182 #define BCM_SYS_EMI_START_ADDR 0x40000000UL 1183 #define BCM_VC_EMI_SEC3_START_ADDR 0xC0000000UL 1184 1185 #define BCM_SYS_EMI_OFFSET 0x40000000UL 1186 #define BCM_VC_EMI_OFFSET 0x80000000UL 1187 1188 static inline int bcm_addr_is_sys_emi(unsigned long addr) 1189 { 1190 return (addr >= BCM_SYS_EMI_START_ADDR) && 1191 (addr < BCM_VC_EMI_SEC3_START_ADDR); 1192 } 1193 1194 static inline unsigned long bcm_l2_phys_addr(unsigned long addr) 1195 { 1196 if (bcm_addr_is_sys_emi(addr)) 1197 return addr + BCM_SYS_EMI_OFFSET; 1198 else 1199 return addr + BCM_VC_EMI_OFFSET; 1200 } 1201 1202 static void bcm_inv_range(unsigned long start, unsigned long end) 1203 { 1204 unsigned long new_start, new_end; 1205 1206 BUG_ON(start < BCM_SYS_EMI_START_ADDR); 1207 1208 if (unlikely(end <= start)) 1209 return; 1210 1211 new_start = bcm_l2_phys_addr(start); 1212 new_end = bcm_l2_phys_addr(end); 1213 1214 /* normal case, no cross section between start and end */ 1215 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) { 1216 l2x0_inv_range(new_start, new_end); 1217 return; 1218 } 1219 1220 /* They cross sections, so it can only be a cross from section 1221 * 2 to section 3 1222 */ 1223 l2x0_inv_range(new_start, 1224 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1)); 1225 l2x0_inv_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR), 1226 new_end); 1227 } 1228 1229 static void bcm_clean_range(unsigned long start, unsigned long end) 1230 { 1231 unsigned long new_start, new_end; 1232 1233 BUG_ON(start < BCM_SYS_EMI_START_ADDR); 1234 1235 if (unlikely(end <= start)) 1236 return; 1237 1238 if ((end - start) >= l2x0_size) { 1239 l2x0_clean_all(); 1240 return; 1241 } 1242 1243 new_start = bcm_l2_phys_addr(start); 1244 new_end = bcm_l2_phys_addr(end); 1245 1246 /* normal case, no cross section between start and end */ 1247 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) { 1248 l2x0_clean_range(new_start, new_end); 1249 return; 1250 } 1251 1252 /* They cross sections, so it can only be a cross from section 1253 * 2 to section 3 1254 */ 1255 l2x0_clean_range(new_start, 1256 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1)); 1257 l2x0_clean_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR), 1258 new_end); 1259 } 1260 1261 static void bcm_flush_range(unsigned long start, unsigned long end) 1262 { 1263 unsigned long new_start, new_end; 1264 1265 BUG_ON(start < BCM_SYS_EMI_START_ADDR); 1266 1267 if (unlikely(end <= start)) 1268 return; 1269 1270 if ((end - start) >= l2x0_size) { 1271 l2x0_flush_all(); 1272 return; 1273 } 1274 1275 new_start = bcm_l2_phys_addr(start); 1276 new_end = bcm_l2_phys_addr(end); 1277 1278 /* normal case, no cross section between start and end */ 1279 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) { 1280 l2x0_flush_range(new_start, new_end); 1281 return; 1282 } 1283 1284 /* They cross sections, so it can only be a cross from section 1285 * 2 to section 3 1286 */ 1287 l2x0_flush_range(new_start, 1288 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1)); 1289 l2x0_flush_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR), 1290 new_end); 1291 } 1292 1293 static const struct l2c_init_data of_bcm_l2x0_data __initconst = { 1294 .num_lock = 8, 1295 .of_parse = l2c310_of_parse, 1296 .enable = l2c_enable, 1297 .fixup = l2c310_fixup, 1298 .save = l2c310_save, 1299 .outer_cache = { 1300 .inv_range = bcm_inv_range, 1301 .clean_range = bcm_clean_range, 1302 .flush_range = bcm_flush_range, 1303 .flush_all = l2c210_flush_all, 1304 .disable = l2c_disable, 1305 .sync = l2c210_sync, 1306 .resume = l2c310_resume, 1307 }, 1308 }; 1309 1310 static void __init tauros3_save(void __iomem *base) 1311 { 1312 l2x0_saved_regs.aux2_ctrl = 1313 readl_relaxed(base + TAUROS3_AUX2_CTRL); 1314 l2x0_saved_regs.prefetch_ctrl = 1315 readl_relaxed(base + L2X0_PREFETCH_CTRL); 1316 } 1317 1318 static void tauros3_resume(void) 1319 { 1320 void __iomem *base = l2x0_base; 1321 1322 if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN)) { 1323 writel_relaxed(l2x0_saved_regs.aux2_ctrl, 1324 base + TAUROS3_AUX2_CTRL); 1325 writel_relaxed(l2x0_saved_regs.prefetch_ctrl, 1326 base + L2X0_PREFETCH_CTRL); 1327 1328 l2c_enable(base, l2x0_saved_regs.aux_ctrl, 8); 1329 } 1330 } 1331 1332 static const struct l2c_init_data of_tauros3_data __initconst = { 1333 .num_lock = 8, 1334 .enable = l2c_enable, 1335 .save = tauros3_save, 1336 /* Tauros3 broadcasts L1 cache operations to L2 */ 1337 .outer_cache = { 1338 .resume = tauros3_resume, 1339 }, 1340 }; 1341 1342 #define L2C_ID(name, fns) { .compatible = name, .data = (void *)&fns } 1343 static const struct of_device_id l2x0_ids[] __initconst = { 1344 L2C_ID("arm,l210-cache", of_l2c210_data), 1345 L2C_ID("arm,l220-cache", of_l2x0_data), 1346 L2C_ID("arm,pl310-cache", of_l2c310_data), 1347 L2C_ID("brcm,bcm11351-a2-pl310-cache", of_bcm_l2x0_data), 1348 L2C_ID("marvell,aurora-outer-cache", of_aurora_with_outer_data), 1349 L2C_ID("marvell,aurora-system-cache", of_aurora_no_outer_data), 1350 L2C_ID("marvell,tauros3-cache", of_tauros3_data), 1351 /* Deprecated IDs */ 1352 L2C_ID("bcm,bcm11351-a2-pl310-cache", of_bcm_l2x0_data), 1353 {} 1354 }; 1355 1356 int __init l2x0_of_init(u32 aux_val, u32 aux_mask) 1357 { 1358 const struct l2c_init_data *data; 1359 struct device_node *np; 1360 struct resource res; 1361 u32 cache_id; 1362 1363 np = of_find_matching_node(NULL, l2x0_ids); 1364 if (!np) 1365 return -ENODEV; 1366 1367 if (of_address_to_resource(np, 0, &res)) 1368 return -ENODEV; 1369 1370 l2x0_base = ioremap(res.start, resource_size(&res)); 1371 if (!l2x0_base) 1372 return -ENOMEM; 1373 1374 l2x0_saved_regs.phy_base = res.start; 1375 1376 data = of_match_node(l2x0_ids, np)->data; 1377 1378 /* L2 configuration can only be changed if the cache is disabled */ 1379 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) 1380 if (data->of_parse) 1381 data->of_parse(np, &aux_val, &aux_mask); 1382 1383 if (cache_id_part_number_from_dt) 1384 cache_id = cache_id_part_number_from_dt; 1385 else 1386 cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID); 1387 1388 __l2c_init(data, aux_val, aux_mask, cache_id); 1389 1390 return 0; 1391 } 1392 #endif 1393