1 /* 2 * arch/arm/mm/cache-l2x0.c - L210/L220 cache controller support 3 * 4 * Copyright (C) 2007 ARM Limited 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software 17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18 */ 19 #include <linux/err.h> 20 #include <linux/init.h> 21 #include <linux/spinlock.h> 22 #include <linux/io.h> 23 #include <linux/of.h> 24 #include <linux/of_address.h> 25 26 #include <asm/cacheflush.h> 27 #include <asm/hardware/cache-l2x0.h> 28 #include "cache-tauros3.h" 29 #include "cache-aurora-l2.h" 30 31 struct l2c_init_data { 32 const char *type; 33 unsigned way_size_0; 34 unsigned num_lock; 35 void (*of_parse)(const struct device_node *, u32 *, u32 *); 36 void (*enable)(void __iomem *, u32, unsigned); 37 void (*fixup)(void __iomem *, u32, struct outer_cache_fns *); 38 void (*save)(void __iomem *); 39 struct outer_cache_fns outer_cache; 40 }; 41 42 #define CACHE_LINE_SIZE 32 43 44 static void __iomem *l2x0_base; 45 static DEFINE_RAW_SPINLOCK(l2x0_lock); 46 static u32 l2x0_way_mask; /* Bitmask of active ways */ 47 static u32 l2x0_size; 48 static unsigned long sync_reg_offset = L2X0_CACHE_SYNC; 49 50 struct l2x0_regs l2x0_saved_regs; 51 52 /* 53 * Common code for all cache controllers. 54 */ 55 static inline void l2c_wait_mask(void __iomem *reg, unsigned long mask) 56 { 57 /* wait for cache operation by line or way to complete */ 58 while (readl_relaxed(reg) & mask) 59 cpu_relax(); 60 } 61 62 /* 63 * This should only be called when we have a requirement that the 64 * register be written due to a work-around, as platforms running 65 * in non-secure mode may not be able to access this register. 66 */ 67 static inline void l2c_set_debug(void __iomem *base, unsigned long val) 68 { 69 outer_cache.set_debug(val); 70 } 71 72 static void __l2c_op_way(void __iomem *reg) 73 { 74 writel_relaxed(l2x0_way_mask, reg); 75 l2c_wait_mask(reg, l2x0_way_mask); 76 } 77 78 static inline void l2c_unlock(void __iomem *base, unsigned num) 79 { 80 unsigned i; 81 82 for (i = 0; i < num; i++) { 83 writel_relaxed(0, base + L2X0_LOCKDOWN_WAY_D_BASE + 84 i * L2X0_LOCKDOWN_STRIDE); 85 writel_relaxed(0, base + L2X0_LOCKDOWN_WAY_I_BASE + 86 i * L2X0_LOCKDOWN_STRIDE); 87 } 88 } 89 90 /* 91 * Enable the L2 cache controller. This function must only be 92 * called when the cache controller is known to be disabled. 93 */ 94 static void l2c_enable(void __iomem *base, u32 aux, unsigned num_lock) 95 { 96 unsigned long flags; 97 98 /* Only write the aux register if it needs changing */ 99 if (readl_relaxed(base + L2X0_AUX_CTRL) != aux) 100 writel_relaxed(aux, base + L2X0_AUX_CTRL); 101 102 l2c_unlock(base, num_lock); 103 104 local_irq_save(flags); 105 __l2c_op_way(base + L2X0_INV_WAY); 106 writel_relaxed(0, base + sync_reg_offset); 107 l2c_wait_mask(base + sync_reg_offset, 1); 108 local_irq_restore(flags); 109 110 writel_relaxed(L2X0_CTRL_EN, base + L2X0_CTRL); 111 } 112 113 static void l2c_disable(void) 114 { 115 void __iomem *base = l2x0_base; 116 117 outer_cache.flush_all(); 118 writel_relaxed(0, base + L2X0_CTRL); 119 dsb(st); 120 } 121 122 #ifdef CONFIG_CACHE_PL310 123 static inline void cache_wait(void __iomem *reg, unsigned long mask) 124 { 125 /* cache operations by line are atomic on PL310 */ 126 } 127 #else 128 #define cache_wait l2c_wait_mask 129 #endif 130 131 static inline void cache_sync(void) 132 { 133 void __iomem *base = l2x0_base; 134 135 writel_relaxed(0, base + sync_reg_offset); 136 cache_wait(base + L2X0_CACHE_SYNC, 1); 137 } 138 139 #if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915) 140 static inline void debug_writel(unsigned long val) 141 { 142 if (outer_cache.set_debug) 143 l2c_set_debug(l2x0_base, val); 144 } 145 #else 146 /* Optimised out for non-errata case */ 147 static inline void debug_writel(unsigned long val) 148 { 149 } 150 #endif 151 152 static void l2x0_cache_sync(void) 153 { 154 unsigned long flags; 155 156 raw_spin_lock_irqsave(&l2x0_lock, flags); 157 cache_sync(); 158 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 159 } 160 161 static void __l2x0_flush_all(void) 162 { 163 debug_writel(0x03); 164 __l2c_op_way(l2x0_base + L2X0_CLEAN_INV_WAY); 165 cache_sync(); 166 debug_writel(0x00); 167 } 168 169 static void l2x0_flush_all(void) 170 { 171 unsigned long flags; 172 173 /* clean all ways */ 174 raw_spin_lock_irqsave(&l2x0_lock, flags); 175 __l2x0_flush_all(); 176 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 177 } 178 179 static void l2x0_disable(void) 180 { 181 unsigned long flags; 182 183 raw_spin_lock_irqsave(&l2x0_lock, flags); 184 __l2x0_flush_all(); 185 writel_relaxed(0, l2x0_base + L2X0_CTRL); 186 dsb(st); 187 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 188 } 189 190 /* 191 * L2C-210 specific code. 192 * 193 * The L2C-2x0 PA, set/way and sync operations are atomic, but we must 194 * ensure that no background operation is running. The way operations 195 * are all background tasks. 196 * 197 * While a background operation is in progress, any new operation is 198 * ignored (unspecified whether this causes an error.) Thankfully, not 199 * used on SMP. 200 * 201 * Never has a different sync register other than L2X0_CACHE_SYNC, but 202 * we use sync_reg_offset here so we can share some of this with L2C-310. 203 */ 204 static void __l2c210_cache_sync(void __iomem *base) 205 { 206 writel_relaxed(0, base + sync_reg_offset); 207 } 208 209 static void __l2c210_op_pa_range(void __iomem *reg, unsigned long start, 210 unsigned long end) 211 { 212 while (start < end) { 213 writel_relaxed(start, reg); 214 start += CACHE_LINE_SIZE; 215 } 216 } 217 218 static void l2c210_inv_range(unsigned long start, unsigned long end) 219 { 220 void __iomem *base = l2x0_base; 221 222 if (start & (CACHE_LINE_SIZE - 1)) { 223 start &= ~(CACHE_LINE_SIZE - 1); 224 writel_relaxed(start, base + L2X0_CLEAN_INV_LINE_PA); 225 start += CACHE_LINE_SIZE; 226 } 227 228 if (end & (CACHE_LINE_SIZE - 1)) { 229 end &= ~(CACHE_LINE_SIZE - 1); 230 writel_relaxed(end, base + L2X0_CLEAN_INV_LINE_PA); 231 } 232 233 __l2c210_op_pa_range(base + L2X0_INV_LINE_PA, start, end); 234 __l2c210_cache_sync(base); 235 } 236 237 static void l2c210_clean_range(unsigned long start, unsigned long end) 238 { 239 void __iomem *base = l2x0_base; 240 241 start &= ~(CACHE_LINE_SIZE - 1); 242 __l2c210_op_pa_range(base + L2X0_CLEAN_LINE_PA, start, end); 243 __l2c210_cache_sync(base); 244 } 245 246 static void l2c210_flush_range(unsigned long start, unsigned long end) 247 { 248 void __iomem *base = l2x0_base; 249 250 start &= ~(CACHE_LINE_SIZE - 1); 251 __l2c210_op_pa_range(base + L2X0_CLEAN_INV_LINE_PA, start, end); 252 __l2c210_cache_sync(base); 253 } 254 255 static void l2c210_flush_all(void) 256 { 257 void __iomem *base = l2x0_base; 258 259 BUG_ON(!irqs_disabled()); 260 261 __l2c_op_way(base + L2X0_CLEAN_INV_WAY); 262 __l2c210_cache_sync(base); 263 } 264 265 static void l2c210_sync(void) 266 { 267 __l2c210_cache_sync(l2x0_base); 268 } 269 270 static void l2c210_resume(void) 271 { 272 void __iomem *base = l2x0_base; 273 274 if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN)) 275 l2c_enable(base, l2x0_saved_regs.aux_ctrl, 1); 276 } 277 278 static const struct l2c_init_data l2c210_data __initconst = { 279 .type = "L2C-210", 280 .way_size_0 = SZ_8K, 281 .num_lock = 1, 282 .enable = l2c_enable, 283 .outer_cache = { 284 .inv_range = l2c210_inv_range, 285 .clean_range = l2c210_clean_range, 286 .flush_range = l2c210_flush_range, 287 .flush_all = l2c210_flush_all, 288 .disable = l2c_disable, 289 .sync = l2c210_sync, 290 .resume = l2c210_resume, 291 }, 292 }; 293 294 /* 295 * L2C-220 specific code. 296 * 297 * All operations are background operations: they have to be waited for. 298 * Conflicting requests generate a slave error (which will cause an 299 * imprecise abort.) Never uses sync_reg_offset, so we hard-code the 300 * sync register here. 301 * 302 * However, we can re-use the l2c210_resume call. 303 */ 304 static inline void __l2c220_cache_sync(void __iomem *base) 305 { 306 writel_relaxed(0, base + L2X0_CACHE_SYNC); 307 l2c_wait_mask(base + L2X0_CACHE_SYNC, 1); 308 } 309 310 static void l2c220_op_way(void __iomem *base, unsigned reg) 311 { 312 unsigned long flags; 313 314 raw_spin_lock_irqsave(&l2x0_lock, flags); 315 __l2c_op_way(base + reg); 316 __l2c220_cache_sync(base); 317 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 318 } 319 320 static unsigned long l2c220_op_pa_range(void __iomem *reg, unsigned long start, 321 unsigned long end, unsigned long flags) 322 { 323 raw_spinlock_t *lock = &l2x0_lock; 324 325 while (start < end) { 326 unsigned long blk_end = start + min(end - start, 4096UL); 327 328 while (start < blk_end) { 329 l2c_wait_mask(reg, 1); 330 writel_relaxed(start, reg); 331 start += CACHE_LINE_SIZE; 332 } 333 334 if (blk_end < end) { 335 raw_spin_unlock_irqrestore(lock, flags); 336 raw_spin_lock_irqsave(lock, flags); 337 } 338 } 339 340 return flags; 341 } 342 343 static void l2c220_inv_range(unsigned long start, unsigned long end) 344 { 345 void __iomem *base = l2x0_base; 346 unsigned long flags; 347 348 raw_spin_lock_irqsave(&l2x0_lock, flags); 349 if ((start | end) & (CACHE_LINE_SIZE - 1)) { 350 if (start & (CACHE_LINE_SIZE - 1)) { 351 start &= ~(CACHE_LINE_SIZE - 1); 352 writel_relaxed(start, base + L2X0_CLEAN_INV_LINE_PA); 353 start += CACHE_LINE_SIZE; 354 } 355 356 if (end & (CACHE_LINE_SIZE - 1)) { 357 end &= ~(CACHE_LINE_SIZE - 1); 358 l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1); 359 writel_relaxed(end, base + L2X0_CLEAN_INV_LINE_PA); 360 } 361 } 362 363 flags = l2c220_op_pa_range(base + L2X0_INV_LINE_PA, 364 start, end, flags); 365 l2c_wait_mask(base + L2X0_INV_LINE_PA, 1); 366 __l2c220_cache_sync(base); 367 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 368 } 369 370 static void l2c220_clean_range(unsigned long start, unsigned long end) 371 { 372 void __iomem *base = l2x0_base; 373 unsigned long flags; 374 375 start &= ~(CACHE_LINE_SIZE - 1); 376 if ((end - start) >= l2x0_size) { 377 l2c220_op_way(base, L2X0_CLEAN_WAY); 378 return; 379 } 380 381 raw_spin_lock_irqsave(&l2x0_lock, flags); 382 flags = l2c220_op_pa_range(base + L2X0_CLEAN_LINE_PA, 383 start, end, flags); 384 l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1); 385 __l2c220_cache_sync(base); 386 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 387 } 388 389 static void l2c220_flush_range(unsigned long start, unsigned long end) 390 { 391 void __iomem *base = l2x0_base; 392 unsigned long flags; 393 394 start &= ~(CACHE_LINE_SIZE - 1); 395 if ((end - start) >= l2x0_size) { 396 l2c220_op_way(base, L2X0_CLEAN_INV_WAY); 397 return; 398 } 399 400 raw_spin_lock_irqsave(&l2x0_lock, flags); 401 flags = l2c220_op_pa_range(base + L2X0_CLEAN_INV_LINE_PA, 402 start, end, flags); 403 l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1); 404 __l2c220_cache_sync(base); 405 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 406 } 407 408 static void l2c220_flush_all(void) 409 { 410 l2c220_op_way(l2x0_base, L2X0_CLEAN_INV_WAY); 411 } 412 413 static void l2c220_sync(void) 414 { 415 unsigned long flags; 416 417 raw_spin_lock_irqsave(&l2x0_lock, flags); 418 __l2c220_cache_sync(l2x0_base); 419 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 420 } 421 422 static const struct l2c_init_data l2c220_data = { 423 .type = "L2C-220", 424 .way_size_0 = SZ_8K, 425 .num_lock = 1, 426 .enable = l2c_enable, 427 .outer_cache = { 428 .inv_range = l2c220_inv_range, 429 .clean_range = l2c220_clean_range, 430 .flush_range = l2c220_flush_range, 431 .flush_all = l2c220_flush_all, 432 .disable = l2c_disable, 433 .sync = l2c220_sync, 434 .resume = l2c210_resume, 435 }, 436 }; 437 438 /* 439 * L2C-310 specific code. 440 * 441 * Very similar to L2C-210, the PA, set/way and sync operations are atomic, 442 * and the way operations are all background tasks. However, issuing an 443 * operation while a background operation is in progress results in a 444 * SLVERR response. We can reuse: 445 * 446 * __l2c210_cache_sync (using sync_reg_offset) 447 * l2c210_sync 448 * l2c210_inv_range (if 588369 is not applicable) 449 * l2c210_clean_range 450 * l2c210_flush_range (if 588369 is not applicable) 451 * l2c210_flush_all (if 727915 is not applicable) 452 * 453 * Errata: 454 * 588369: PL310 R0P0->R1P0, fixed R2P0. 455 * Affects: all clean+invalidate operations 456 * clean and invalidate skips the invalidate step, so we need to issue 457 * separate operations. We also require the above debug workaround 458 * enclosing this code fragment on affected parts. On unaffected parts, 459 * we must not use this workaround without the debug register writes 460 * to avoid exposing a problem similar to 727915. 461 * 462 * 727915: PL310 R2P0->R3P0, fixed R3P1. 463 * Affects: clean+invalidate by way 464 * clean and invalidate by way runs in the background, and a store can 465 * hit the line between the clean operation and invalidate operation, 466 * resulting in the store being lost. 467 * 468 * 753970: PL310 R3P0, fixed R3P1. 469 * Affects: sync 470 * prevents merging writes after the sync operation, until another L2C 471 * operation is performed (or a number of other conditions.) 472 * 473 * 769419: PL310 R0P0->R3P1, fixed R3P2. 474 * Affects: store buffer 475 * store buffer is not automatically drained. 476 */ 477 static void l2c310_set_debug(unsigned long val) 478 { 479 writel_relaxed(val, l2x0_base + L2X0_DEBUG_CTRL); 480 } 481 482 static void l2c310_inv_range_erratum(unsigned long start, unsigned long end) 483 { 484 void __iomem *base = l2x0_base; 485 486 if ((start | end) & (CACHE_LINE_SIZE - 1)) { 487 unsigned long flags; 488 489 /* Erratum 588369 for both clean+invalidate operations */ 490 raw_spin_lock_irqsave(&l2x0_lock, flags); 491 l2c_set_debug(base, 0x03); 492 493 if (start & (CACHE_LINE_SIZE - 1)) { 494 start &= ~(CACHE_LINE_SIZE - 1); 495 writel_relaxed(start, base + L2X0_CLEAN_LINE_PA); 496 writel_relaxed(start, base + L2X0_INV_LINE_PA); 497 start += CACHE_LINE_SIZE; 498 } 499 500 if (end & (CACHE_LINE_SIZE - 1)) { 501 end &= ~(CACHE_LINE_SIZE - 1); 502 writel_relaxed(end, base + L2X0_CLEAN_LINE_PA); 503 writel_relaxed(end, base + L2X0_INV_LINE_PA); 504 } 505 506 l2c_set_debug(base, 0x00); 507 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 508 } 509 510 __l2c210_op_pa_range(base + L2X0_INV_LINE_PA, start, end); 511 __l2c210_cache_sync(base); 512 } 513 514 static void l2c310_flush_range_erratum(unsigned long start, unsigned long end) 515 { 516 raw_spinlock_t *lock = &l2x0_lock; 517 unsigned long flags; 518 void __iomem *base = l2x0_base; 519 520 raw_spin_lock_irqsave(lock, flags); 521 while (start < end) { 522 unsigned long blk_end = start + min(end - start, 4096UL); 523 524 l2c_set_debug(base, 0x03); 525 while (start < blk_end) { 526 writel_relaxed(start, base + L2X0_CLEAN_LINE_PA); 527 writel_relaxed(start, base + L2X0_INV_LINE_PA); 528 start += CACHE_LINE_SIZE; 529 } 530 l2c_set_debug(base, 0x00); 531 532 if (blk_end < end) { 533 raw_spin_unlock_irqrestore(lock, flags); 534 raw_spin_lock_irqsave(lock, flags); 535 } 536 } 537 raw_spin_unlock_irqrestore(lock, flags); 538 __l2c210_cache_sync(base); 539 } 540 541 static void l2c310_flush_all_erratum(void) 542 { 543 void __iomem *base = l2x0_base; 544 unsigned long flags; 545 546 raw_spin_lock_irqsave(&l2x0_lock, flags); 547 l2c_set_debug(base, 0x03); 548 __l2c_op_way(base + L2X0_CLEAN_INV_WAY); 549 l2c_set_debug(base, 0x00); 550 __l2c210_cache_sync(base); 551 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 552 } 553 554 static void __init l2c310_save(void __iomem *base) 555 { 556 unsigned revision; 557 558 l2x0_saved_regs.tag_latency = readl_relaxed(base + 559 L2X0_TAG_LATENCY_CTRL); 560 l2x0_saved_regs.data_latency = readl_relaxed(base + 561 L2X0_DATA_LATENCY_CTRL); 562 l2x0_saved_regs.filter_end = readl_relaxed(base + 563 L2X0_ADDR_FILTER_END); 564 l2x0_saved_regs.filter_start = readl_relaxed(base + 565 L2X0_ADDR_FILTER_START); 566 567 revision = readl_relaxed(base + L2X0_CACHE_ID) & 568 L2X0_CACHE_ID_RTL_MASK; 569 570 /* From r2p0, there is Prefetch offset/control register */ 571 if (revision >= L310_CACHE_ID_RTL_R2P0) 572 l2x0_saved_regs.prefetch_ctrl = readl_relaxed(base + 573 L2X0_PREFETCH_CTRL); 574 575 /* From r3p0, there is Power control register */ 576 if (revision >= L310_CACHE_ID_RTL_R3P0) 577 l2x0_saved_regs.pwr_ctrl = readl_relaxed(base + 578 L2X0_POWER_CTRL); 579 } 580 581 static void l2c310_resume(void) 582 { 583 void __iomem *base = l2x0_base; 584 585 if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN)) { 586 unsigned revision; 587 588 /* restore pl310 setup */ 589 writel_relaxed(l2x0_saved_regs.tag_latency, 590 base + L2X0_TAG_LATENCY_CTRL); 591 writel_relaxed(l2x0_saved_regs.data_latency, 592 base + L2X0_DATA_LATENCY_CTRL); 593 writel_relaxed(l2x0_saved_regs.filter_end, 594 base + L2X0_ADDR_FILTER_END); 595 writel_relaxed(l2x0_saved_regs.filter_start, 596 base + L2X0_ADDR_FILTER_START); 597 598 revision = readl_relaxed(base + L2X0_CACHE_ID) & 599 L2X0_CACHE_ID_RTL_MASK; 600 601 if (revision >= L310_CACHE_ID_RTL_R2P0) 602 writel_relaxed(l2x0_saved_regs.prefetch_ctrl, 603 base + L2X0_PREFETCH_CTRL); 604 if (revision >= L310_CACHE_ID_RTL_R3P0) 605 writel_relaxed(l2x0_saved_regs.pwr_ctrl, 606 base + L2X0_POWER_CTRL); 607 608 l2c_enable(base, l2x0_saved_regs.aux_ctrl, 8); 609 } 610 } 611 612 static void __init l2c310_fixup(void __iomem *base, u32 cache_id, 613 struct outer_cache_fns *fns) 614 { 615 unsigned revision = cache_id & L2X0_CACHE_ID_RTL_MASK; 616 const char *errata[4]; 617 unsigned n = 0; 618 619 /* For compatibility */ 620 if (revision <= L310_CACHE_ID_RTL_R3P0) 621 fns->set_debug = l2c310_set_debug; 622 623 if (IS_ENABLED(CONFIG_PL310_ERRATA_588369) && 624 revision < L310_CACHE_ID_RTL_R2P0 && 625 /* For bcm compatibility */ 626 fns->inv_range == l2c210_inv_range) { 627 fns->inv_range = l2c310_inv_range_erratum; 628 fns->flush_range = l2c310_flush_range_erratum; 629 errata[n++] = "588369"; 630 } 631 632 if (IS_ENABLED(CONFIG_PL310_ERRATA_727915) && 633 revision >= L310_CACHE_ID_RTL_R2P0 && 634 revision < L310_CACHE_ID_RTL_R3P1) { 635 fns->flush_all = l2c310_flush_all_erratum; 636 errata[n++] = "727915"; 637 } 638 639 if (IS_ENABLED(CONFIG_PL310_ERRATA_753970) && 640 revision == L310_CACHE_ID_RTL_R3P0) { 641 sync_reg_offset = L2X0_DUMMY_REG; 642 errata[n++] = "753970"; 643 } 644 645 if (IS_ENABLED(CONFIG_PL310_ERRATA_769419)) 646 errata[n++] = "769419"; 647 648 if (n) { 649 unsigned i; 650 651 pr_info("L2C-310 errat%s", n > 1 ? "a" : "um"); 652 for (i = 0; i < n; i++) 653 pr_cont(" %s", errata[i]); 654 pr_cont(" enabled\n"); 655 } 656 } 657 658 static const struct l2c_init_data l2c310_init_fns __initconst = { 659 .type = "L2C-310", 660 .way_size_0 = SZ_8K, 661 .num_lock = 8, 662 .enable = l2c_enable, 663 .fixup = l2c310_fixup, 664 .save = l2c310_save, 665 .outer_cache = { 666 .inv_range = l2c210_inv_range, 667 .clean_range = l2c210_clean_range, 668 .flush_range = l2c210_flush_range, 669 .flush_all = l2c210_flush_all, 670 .disable = l2c_disable, 671 .sync = l2c210_sync, 672 .set_debug = l2c310_set_debug, 673 .resume = l2c310_resume, 674 }, 675 }; 676 677 static void __init __l2c_init(const struct l2c_init_data *data, 678 u32 aux_val, u32 aux_mask, u32 cache_id) 679 { 680 struct outer_cache_fns fns; 681 unsigned way_size_bits, ways; 682 u32 aux; 683 684 /* 685 * It is strange to save the register state before initialisation, 686 * but hey, this is what the DT implementations decided to do. 687 */ 688 if (data->save) 689 data->save(l2x0_base); 690 691 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); 692 693 aux &= aux_mask; 694 aux |= aux_val; 695 696 /* Determine the number of ways */ 697 switch (cache_id & L2X0_CACHE_ID_PART_MASK) { 698 case L2X0_CACHE_ID_PART_L310: 699 if (aux & (1 << 16)) 700 ways = 16; 701 else 702 ways = 8; 703 break; 704 705 case L2X0_CACHE_ID_PART_L210: 706 case L2X0_CACHE_ID_PART_L220: 707 ways = (aux >> 13) & 0xf; 708 break; 709 710 case AURORA_CACHE_ID: 711 ways = (aux >> 13) & 0xf; 712 ways = 2 << ((ways + 1) >> 2); 713 break; 714 715 default: 716 /* Assume unknown chips have 8 ways */ 717 ways = 8; 718 break; 719 } 720 721 l2x0_way_mask = (1 << ways) - 1; 722 723 /* 724 * way_size_0 is the size that a way_size value of zero would be 725 * given the calculation: way_size = way_size_0 << way_size_bits. 726 * So, if way_size_bits=0 is reserved, but way_size_bits=1 is 16k, 727 * then way_size_0 would be 8k. 728 * 729 * L2 cache size = number of ways * way size. 730 */ 731 way_size_bits = (aux & L2X0_AUX_CTRL_WAY_SIZE_MASK) >> 17; 732 l2x0_size = ways * (data->way_size_0 << way_size_bits); 733 734 fns = data->outer_cache; 735 if (data->fixup) 736 data->fixup(l2x0_base, cache_id, &fns); 737 738 /* 739 * Check if l2x0 controller is already enabled. If we are booting 740 * in non-secure mode accessing the below registers will fault. 741 */ 742 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) 743 data->enable(l2x0_base, aux, data->num_lock); 744 745 /* Re-read it in case some bits are reserved. */ 746 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); 747 748 /* Save the value for resuming. */ 749 l2x0_saved_regs.aux_ctrl = aux; 750 751 outer_cache = fns; 752 753 pr_info("%s cache controller enabled, %d ways, %d kB\n", 754 data->type, ways, l2x0_size >> 10); 755 pr_info("%s: CACHE_ID 0x%08x, AUX_CTRL 0x%08x\n", 756 data->type, cache_id, aux); 757 } 758 759 void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask) 760 { 761 const struct l2c_init_data *data; 762 u32 cache_id; 763 764 l2x0_base = base; 765 766 cache_id = readl_relaxed(base + L2X0_CACHE_ID); 767 768 switch (cache_id & L2X0_CACHE_ID_PART_MASK) { 769 default: 770 case L2X0_CACHE_ID_PART_L210: 771 data = &l2c210_data; 772 break; 773 774 case L2X0_CACHE_ID_PART_L220: 775 data = &l2c220_data; 776 break; 777 778 case L2X0_CACHE_ID_PART_L310: 779 data = &l2c310_init_fns; 780 break; 781 } 782 783 __l2c_init(data, aux_val, aux_mask, cache_id); 784 } 785 786 #ifdef CONFIG_OF 787 static int l2_wt_override; 788 789 /* Aurora don't have the cache ID register available, so we have to 790 * pass it though the device tree */ 791 static u32 cache_id_part_number_from_dt; 792 793 static void __init l2x0_of_parse(const struct device_node *np, 794 u32 *aux_val, u32 *aux_mask) 795 { 796 u32 data[2] = { 0, 0 }; 797 u32 tag = 0; 798 u32 dirty = 0; 799 u32 val = 0, mask = 0; 800 801 of_property_read_u32(np, "arm,tag-latency", &tag); 802 if (tag) { 803 mask |= L2X0_AUX_CTRL_TAG_LATENCY_MASK; 804 val |= (tag - 1) << L2X0_AUX_CTRL_TAG_LATENCY_SHIFT; 805 } 806 807 of_property_read_u32_array(np, "arm,data-latency", 808 data, ARRAY_SIZE(data)); 809 if (data[0] && data[1]) { 810 mask |= L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK | 811 L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK; 812 val |= ((data[0] - 1) << L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT) | 813 ((data[1] - 1) << L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT); 814 } 815 816 of_property_read_u32(np, "arm,dirty-latency", &dirty); 817 if (dirty) { 818 mask |= L2X0_AUX_CTRL_DIRTY_LATENCY_MASK; 819 val |= (dirty - 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT; 820 } 821 822 *aux_val &= ~mask; 823 *aux_val |= val; 824 *aux_mask &= ~mask; 825 } 826 827 static const struct l2c_init_data of_l2c210_data __initconst = { 828 .type = "L2C-210", 829 .way_size_0 = SZ_8K, 830 .num_lock = 1, 831 .of_parse = l2x0_of_parse, 832 .enable = l2c_enable, 833 .outer_cache = { 834 .inv_range = l2c210_inv_range, 835 .clean_range = l2c210_clean_range, 836 .flush_range = l2c210_flush_range, 837 .flush_all = l2c210_flush_all, 838 .disable = l2c_disable, 839 .sync = l2c210_sync, 840 .resume = l2c210_resume, 841 }, 842 }; 843 844 static const struct l2c_init_data of_l2c220_data __initconst = { 845 .type = "L2C-220", 846 .way_size_0 = SZ_8K, 847 .num_lock = 1, 848 .of_parse = l2x0_of_parse, 849 .enable = l2c_enable, 850 .outer_cache = { 851 .inv_range = l2c220_inv_range, 852 .clean_range = l2c220_clean_range, 853 .flush_range = l2c220_flush_range, 854 .flush_all = l2c220_flush_all, 855 .disable = l2c_disable, 856 .sync = l2c220_sync, 857 .resume = l2c210_resume, 858 }, 859 }; 860 861 static void __init l2c310_of_parse(const struct device_node *np, 862 u32 *aux_val, u32 *aux_mask) 863 { 864 u32 data[3] = { 0, 0, 0 }; 865 u32 tag[3] = { 0, 0, 0 }; 866 u32 filter[2] = { 0, 0 }; 867 868 of_property_read_u32_array(np, "arm,tag-latency", tag, ARRAY_SIZE(tag)); 869 if (tag[0] && tag[1] && tag[2]) 870 writel_relaxed( 871 ((tag[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) | 872 ((tag[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) | 873 ((tag[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT), 874 l2x0_base + L2X0_TAG_LATENCY_CTRL); 875 876 of_property_read_u32_array(np, "arm,data-latency", 877 data, ARRAY_SIZE(data)); 878 if (data[0] && data[1] && data[2]) 879 writel_relaxed( 880 ((data[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) | 881 ((data[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) | 882 ((data[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT), 883 l2x0_base + L2X0_DATA_LATENCY_CTRL); 884 885 of_property_read_u32_array(np, "arm,filter-ranges", 886 filter, ARRAY_SIZE(filter)); 887 if (filter[1]) { 888 writel_relaxed(ALIGN(filter[0] + filter[1], SZ_1M), 889 l2x0_base + L2X0_ADDR_FILTER_END); 890 writel_relaxed((filter[0] & ~(SZ_1M - 1)) | L2X0_ADDR_FILTER_EN, 891 l2x0_base + L2X0_ADDR_FILTER_START); 892 } 893 } 894 895 static const struct l2c_init_data of_l2c310_data __initconst = { 896 .type = "L2C-310", 897 .way_size_0 = SZ_8K, 898 .num_lock = 8, 899 .of_parse = l2c310_of_parse, 900 .enable = l2c_enable, 901 .fixup = l2c310_fixup, 902 .save = l2c310_save, 903 .outer_cache = { 904 .inv_range = l2c210_inv_range, 905 .clean_range = l2c210_clean_range, 906 .flush_range = l2c210_flush_range, 907 .flush_all = l2c210_flush_all, 908 .disable = l2c_disable, 909 .sync = l2c210_sync, 910 .set_debug = l2c310_set_debug, 911 .resume = l2c310_resume, 912 }, 913 }; 914 915 /* 916 * Note that the end addresses passed to Linux primitives are 917 * noninclusive, while the hardware cache range operations use 918 * inclusive start and end addresses. 919 */ 920 static unsigned long calc_range_end(unsigned long start, unsigned long end) 921 { 922 /* 923 * Limit the number of cache lines processed at once, 924 * since cache range operations stall the CPU pipeline 925 * until completion. 926 */ 927 if (end > start + MAX_RANGE_SIZE) 928 end = start + MAX_RANGE_SIZE; 929 930 /* 931 * Cache range operations can't straddle a page boundary. 932 */ 933 if (end > PAGE_ALIGN(start+1)) 934 end = PAGE_ALIGN(start+1); 935 936 return end; 937 } 938 939 /* 940 * Make sure 'start' and 'end' reference the same page, as L2 is PIPT 941 * and range operations only do a TLB lookup on the start address. 942 */ 943 static void aurora_pa_range(unsigned long start, unsigned long end, 944 unsigned long offset) 945 { 946 unsigned long flags; 947 948 raw_spin_lock_irqsave(&l2x0_lock, flags); 949 writel_relaxed(start, l2x0_base + AURORA_RANGE_BASE_ADDR_REG); 950 writel_relaxed(end, l2x0_base + offset); 951 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 952 953 cache_sync(); 954 } 955 956 static void aurora_inv_range(unsigned long start, unsigned long end) 957 { 958 /* 959 * round start and end adresses up to cache line size 960 */ 961 start &= ~(CACHE_LINE_SIZE - 1); 962 end = ALIGN(end, CACHE_LINE_SIZE); 963 964 /* 965 * Invalidate all full cache lines between 'start' and 'end'. 966 */ 967 while (start < end) { 968 unsigned long range_end = calc_range_end(start, end); 969 aurora_pa_range(start, range_end - CACHE_LINE_SIZE, 970 AURORA_INVAL_RANGE_REG); 971 start = range_end; 972 } 973 } 974 975 static void aurora_clean_range(unsigned long start, unsigned long end) 976 { 977 /* 978 * If L2 is forced to WT, the L2 will always be clean and we 979 * don't need to do anything here. 980 */ 981 if (!l2_wt_override) { 982 start &= ~(CACHE_LINE_SIZE - 1); 983 end = ALIGN(end, CACHE_LINE_SIZE); 984 while (start != end) { 985 unsigned long range_end = calc_range_end(start, end); 986 aurora_pa_range(start, range_end - CACHE_LINE_SIZE, 987 AURORA_CLEAN_RANGE_REG); 988 start = range_end; 989 } 990 } 991 } 992 993 static void aurora_flush_range(unsigned long start, unsigned long end) 994 { 995 start &= ~(CACHE_LINE_SIZE - 1); 996 end = ALIGN(end, CACHE_LINE_SIZE); 997 while (start != end) { 998 unsigned long range_end = calc_range_end(start, end); 999 /* 1000 * If L2 is forced to WT, the L2 will always be clean and we 1001 * just need to invalidate. 1002 */ 1003 if (l2_wt_override) 1004 aurora_pa_range(start, range_end - CACHE_LINE_SIZE, 1005 AURORA_INVAL_RANGE_REG); 1006 else 1007 aurora_pa_range(start, range_end - CACHE_LINE_SIZE, 1008 AURORA_FLUSH_RANGE_REG); 1009 start = range_end; 1010 } 1011 } 1012 1013 static void aurora_save(void __iomem *base) 1014 { 1015 l2x0_saved_regs.ctrl = readl_relaxed(base + L2X0_CTRL); 1016 l2x0_saved_regs.aux_ctrl = readl_relaxed(base + L2X0_AUX_CTRL); 1017 } 1018 1019 static void aurora_resume(void) 1020 { 1021 void __iomem *base = l2x0_base; 1022 1023 if (!(readl(base + L2X0_CTRL) & L2X0_CTRL_EN)) { 1024 writel_relaxed(l2x0_saved_regs.aux_ctrl, base + L2X0_AUX_CTRL); 1025 writel_relaxed(l2x0_saved_regs.ctrl, base + L2X0_CTRL); 1026 } 1027 } 1028 1029 /* 1030 * For Aurora cache in no outer mode, enable via the CP15 coprocessor 1031 * broadcasting of cache commands to L2. 1032 */ 1033 static void __init aurora_enable_no_outer(void __iomem *base, u32 aux, 1034 unsigned num_lock) 1035 { 1036 u32 u; 1037 1038 asm volatile("mrc p15, 1, %0, c15, c2, 0" : "=r" (u)); 1039 u |= AURORA_CTRL_FW; /* Set the FW bit */ 1040 asm volatile("mcr p15, 1, %0, c15, c2, 0" : : "r" (u)); 1041 1042 isb(); 1043 1044 l2c_enable(base, aux, num_lock); 1045 } 1046 1047 static void __init aurora_fixup(void __iomem *base, u32 cache_id, 1048 struct outer_cache_fns *fns) 1049 { 1050 sync_reg_offset = AURORA_SYNC_REG; 1051 } 1052 1053 static void __init aurora_of_parse(const struct device_node *np, 1054 u32 *aux_val, u32 *aux_mask) 1055 { 1056 u32 val = AURORA_ACR_REPLACEMENT_TYPE_SEMIPLRU; 1057 u32 mask = AURORA_ACR_REPLACEMENT_MASK; 1058 1059 of_property_read_u32(np, "cache-id-part", 1060 &cache_id_part_number_from_dt); 1061 1062 /* Determine and save the write policy */ 1063 l2_wt_override = of_property_read_bool(np, "wt-override"); 1064 1065 if (l2_wt_override) { 1066 val |= AURORA_ACR_FORCE_WRITE_THRO_POLICY; 1067 mask |= AURORA_ACR_FORCE_WRITE_POLICY_MASK; 1068 } 1069 1070 *aux_val &= ~mask; 1071 *aux_val |= val; 1072 *aux_mask &= ~mask; 1073 } 1074 1075 static const struct l2c_init_data of_aurora_with_outer_data __initconst = { 1076 .type = "Aurora", 1077 .way_size_0 = SZ_4K, 1078 .num_lock = 4, 1079 .of_parse = aurora_of_parse, 1080 .enable = l2c_enable, 1081 .fixup = aurora_fixup, 1082 .save = aurora_save, 1083 .outer_cache = { 1084 .inv_range = aurora_inv_range, 1085 .clean_range = aurora_clean_range, 1086 .flush_range = aurora_flush_range, 1087 .flush_all = l2x0_flush_all, 1088 .disable = l2x0_disable, 1089 .sync = l2x0_cache_sync, 1090 .resume = aurora_resume, 1091 }, 1092 }; 1093 1094 static const struct l2c_init_data of_aurora_no_outer_data __initconst = { 1095 .type = "Aurora", 1096 .way_size_0 = SZ_4K, 1097 .num_lock = 4, 1098 .of_parse = aurora_of_parse, 1099 .enable = aurora_enable_no_outer, 1100 .fixup = aurora_fixup, 1101 .save = aurora_save, 1102 .outer_cache = { 1103 .resume = aurora_resume, 1104 }, 1105 }; 1106 1107 /* 1108 * For certain Broadcom SoCs, depending on the address range, different offsets 1109 * need to be added to the address before passing it to L2 for 1110 * invalidation/clean/flush 1111 * 1112 * Section Address Range Offset EMI 1113 * 1 0x00000000 - 0x3FFFFFFF 0x80000000 VC 1114 * 2 0x40000000 - 0xBFFFFFFF 0x40000000 SYS 1115 * 3 0xC0000000 - 0xFFFFFFFF 0x80000000 VC 1116 * 1117 * When the start and end addresses have crossed two different sections, we 1118 * need to break the L2 operation into two, each within its own section. 1119 * For example, if we need to invalidate addresses starts at 0xBFFF0000 and 1120 * ends at 0xC0001000, we need do invalidate 1) 0xBFFF0000 - 0xBFFFFFFF and 2) 1121 * 0xC0000000 - 0xC0001000 1122 * 1123 * Note 1: 1124 * By breaking a single L2 operation into two, we may potentially suffer some 1125 * performance hit, but keep in mind the cross section case is very rare 1126 * 1127 * Note 2: 1128 * We do not need to handle the case when the start address is in 1129 * Section 1 and the end address is in Section 3, since it is not a valid use 1130 * case 1131 * 1132 * Note 3: 1133 * Section 1 in practical terms can no longer be used on rev A2. Because of 1134 * that the code does not need to handle section 1 at all. 1135 * 1136 */ 1137 #define BCM_SYS_EMI_START_ADDR 0x40000000UL 1138 #define BCM_VC_EMI_SEC3_START_ADDR 0xC0000000UL 1139 1140 #define BCM_SYS_EMI_OFFSET 0x40000000UL 1141 #define BCM_VC_EMI_OFFSET 0x80000000UL 1142 1143 static inline int bcm_addr_is_sys_emi(unsigned long addr) 1144 { 1145 return (addr >= BCM_SYS_EMI_START_ADDR) && 1146 (addr < BCM_VC_EMI_SEC3_START_ADDR); 1147 } 1148 1149 static inline unsigned long bcm_l2_phys_addr(unsigned long addr) 1150 { 1151 if (bcm_addr_is_sys_emi(addr)) 1152 return addr + BCM_SYS_EMI_OFFSET; 1153 else 1154 return addr + BCM_VC_EMI_OFFSET; 1155 } 1156 1157 static void bcm_inv_range(unsigned long start, unsigned long end) 1158 { 1159 unsigned long new_start, new_end; 1160 1161 BUG_ON(start < BCM_SYS_EMI_START_ADDR); 1162 1163 if (unlikely(end <= start)) 1164 return; 1165 1166 new_start = bcm_l2_phys_addr(start); 1167 new_end = bcm_l2_phys_addr(end); 1168 1169 /* normal case, no cross section between start and end */ 1170 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) { 1171 l2c210_inv_range(new_start, new_end); 1172 return; 1173 } 1174 1175 /* They cross sections, so it can only be a cross from section 1176 * 2 to section 3 1177 */ 1178 l2c210_inv_range(new_start, 1179 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1)); 1180 l2c210_inv_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR), 1181 new_end); 1182 } 1183 1184 static void bcm_clean_range(unsigned long start, unsigned long end) 1185 { 1186 unsigned long new_start, new_end; 1187 1188 BUG_ON(start < BCM_SYS_EMI_START_ADDR); 1189 1190 if (unlikely(end <= start)) 1191 return; 1192 1193 new_start = bcm_l2_phys_addr(start); 1194 new_end = bcm_l2_phys_addr(end); 1195 1196 /* normal case, no cross section between start and end */ 1197 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) { 1198 l2c210_clean_range(new_start, new_end); 1199 return; 1200 } 1201 1202 /* They cross sections, so it can only be a cross from section 1203 * 2 to section 3 1204 */ 1205 l2c210_clean_range(new_start, 1206 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1)); 1207 l2c210_clean_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR), 1208 new_end); 1209 } 1210 1211 static void bcm_flush_range(unsigned long start, unsigned long end) 1212 { 1213 unsigned long new_start, new_end; 1214 1215 BUG_ON(start < BCM_SYS_EMI_START_ADDR); 1216 1217 if (unlikely(end <= start)) 1218 return; 1219 1220 if ((end - start) >= l2x0_size) { 1221 outer_cache.flush_all(); 1222 return; 1223 } 1224 1225 new_start = bcm_l2_phys_addr(start); 1226 new_end = bcm_l2_phys_addr(end); 1227 1228 /* normal case, no cross section between start and end */ 1229 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) { 1230 l2c210_flush_range(new_start, new_end); 1231 return; 1232 } 1233 1234 /* They cross sections, so it can only be a cross from section 1235 * 2 to section 3 1236 */ 1237 l2c210_flush_range(new_start, 1238 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1)); 1239 l2c210_flush_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR), 1240 new_end); 1241 } 1242 1243 /* Broadcom L2C-310 start from ARMs R3P2 or later, and require no fixups */ 1244 static const struct l2c_init_data of_bcm_l2x0_data __initconst = { 1245 .type = "BCM-L2C-310", 1246 .way_size_0 = SZ_8K, 1247 .num_lock = 8, 1248 .of_parse = l2c310_of_parse, 1249 .enable = l2c_enable, 1250 .save = l2c310_save, 1251 .outer_cache = { 1252 .inv_range = bcm_inv_range, 1253 .clean_range = bcm_clean_range, 1254 .flush_range = bcm_flush_range, 1255 .flush_all = l2c210_flush_all, 1256 .disable = l2c_disable, 1257 .sync = l2c210_sync, 1258 .resume = l2c310_resume, 1259 }, 1260 }; 1261 1262 static void __init tauros3_save(void __iomem *base) 1263 { 1264 l2x0_saved_regs.aux2_ctrl = 1265 readl_relaxed(base + TAUROS3_AUX2_CTRL); 1266 l2x0_saved_regs.prefetch_ctrl = 1267 readl_relaxed(base + L2X0_PREFETCH_CTRL); 1268 } 1269 1270 static void tauros3_resume(void) 1271 { 1272 void __iomem *base = l2x0_base; 1273 1274 if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN)) { 1275 writel_relaxed(l2x0_saved_regs.aux2_ctrl, 1276 base + TAUROS3_AUX2_CTRL); 1277 writel_relaxed(l2x0_saved_regs.prefetch_ctrl, 1278 base + L2X0_PREFETCH_CTRL); 1279 1280 l2c_enable(base, l2x0_saved_regs.aux_ctrl, 8); 1281 } 1282 } 1283 1284 static const struct l2c_init_data of_tauros3_data __initconst = { 1285 .type = "Tauros3", 1286 .way_size_0 = SZ_8K, 1287 .num_lock = 8, 1288 .enable = l2c_enable, 1289 .save = tauros3_save, 1290 /* Tauros3 broadcasts L1 cache operations to L2 */ 1291 .outer_cache = { 1292 .resume = tauros3_resume, 1293 }, 1294 }; 1295 1296 #define L2C_ID(name, fns) { .compatible = name, .data = (void *)&fns } 1297 static const struct of_device_id l2x0_ids[] __initconst = { 1298 L2C_ID("arm,l210-cache", of_l2c210_data), 1299 L2C_ID("arm,l220-cache", of_l2c220_data), 1300 L2C_ID("arm,pl310-cache", of_l2c310_data), 1301 L2C_ID("brcm,bcm11351-a2-pl310-cache", of_bcm_l2x0_data), 1302 L2C_ID("marvell,aurora-outer-cache", of_aurora_with_outer_data), 1303 L2C_ID("marvell,aurora-system-cache", of_aurora_no_outer_data), 1304 L2C_ID("marvell,tauros3-cache", of_tauros3_data), 1305 /* Deprecated IDs */ 1306 L2C_ID("bcm,bcm11351-a2-pl310-cache", of_bcm_l2x0_data), 1307 {} 1308 }; 1309 1310 int __init l2x0_of_init(u32 aux_val, u32 aux_mask) 1311 { 1312 const struct l2c_init_data *data; 1313 struct device_node *np; 1314 struct resource res; 1315 u32 cache_id; 1316 1317 np = of_find_matching_node(NULL, l2x0_ids); 1318 if (!np) 1319 return -ENODEV; 1320 1321 if (of_address_to_resource(np, 0, &res)) 1322 return -ENODEV; 1323 1324 l2x0_base = ioremap(res.start, resource_size(&res)); 1325 if (!l2x0_base) 1326 return -ENOMEM; 1327 1328 l2x0_saved_regs.phy_base = res.start; 1329 1330 data = of_match_node(l2x0_ids, np)->data; 1331 1332 /* L2 configuration can only be changed if the cache is disabled */ 1333 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) 1334 if (data->of_parse) 1335 data->of_parse(np, &aux_val, &aux_mask); 1336 1337 if (cache_id_part_number_from_dt) 1338 cache_id = cache_id_part_number_from_dt; 1339 else 1340 cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID); 1341 1342 __l2c_init(data, aux_val, aux_mask, cache_id); 1343 1344 return 0; 1345 } 1346 #endif 1347