1 /* 2 * arch/arm/mm/cache-l2x0.c - L210/L220 cache controller support 3 * 4 * Copyright (C) 2007 ARM Limited 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software 17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18 */ 19 #include <linux/err.h> 20 #include <linux/init.h> 21 #include <linux/spinlock.h> 22 #include <linux/io.h> 23 #include <linux/of.h> 24 #include <linux/of_address.h> 25 26 #include <asm/cacheflush.h> 27 #include <asm/hardware/cache-l2x0.h> 28 #include "cache-tauros3.h" 29 #include "cache-aurora-l2.h" 30 31 struct l2c_init_data { 32 const char *type; 33 unsigned num_lock; 34 void (*of_parse)(const struct device_node *, u32 *, u32 *); 35 void (*enable)(void __iomem *, u32, unsigned); 36 void (*fixup)(void __iomem *, u32, struct outer_cache_fns *); 37 void (*save)(void __iomem *); 38 struct outer_cache_fns outer_cache; 39 }; 40 41 #define CACHE_LINE_SIZE 32 42 43 static void __iomem *l2x0_base; 44 static DEFINE_RAW_SPINLOCK(l2x0_lock); 45 static u32 l2x0_way_mask; /* Bitmask of active ways */ 46 static u32 l2x0_size; 47 static unsigned long sync_reg_offset = L2X0_CACHE_SYNC; 48 49 struct l2x0_regs l2x0_saved_regs; 50 51 /* 52 * Common code for all cache controllers. 53 */ 54 static inline void l2c_wait_mask(void __iomem *reg, unsigned long mask) 55 { 56 /* wait for cache operation by line or way to complete */ 57 while (readl_relaxed(reg) & mask) 58 cpu_relax(); 59 } 60 61 /* 62 * This should only be called when we have a requirement that the 63 * register be written due to a work-around, as platforms running 64 * in non-secure mode may not be able to access this register. 65 */ 66 static inline void l2c_set_debug(void __iomem *base, unsigned long val) 67 { 68 outer_cache.set_debug(val); 69 } 70 71 static void __l2c_op_way(void __iomem *reg) 72 { 73 writel_relaxed(l2x0_way_mask, reg); 74 l2c_wait_mask(reg, l2x0_way_mask); 75 } 76 77 static inline void l2c_unlock(void __iomem *base, unsigned num) 78 { 79 unsigned i; 80 81 for (i = 0; i < num; i++) { 82 writel_relaxed(0, base + L2X0_LOCKDOWN_WAY_D_BASE + 83 i * L2X0_LOCKDOWN_STRIDE); 84 writel_relaxed(0, base + L2X0_LOCKDOWN_WAY_I_BASE + 85 i * L2X0_LOCKDOWN_STRIDE); 86 } 87 } 88 89 /* 90 * Enable the L2 cache controller. This function must only be 91 * called when the cache controller is known to be disabled. 92 */ 93 static void l2c_enable(void __iomem *base, u32 aux, unsigned num_lock) 94 { 95 unsigned long flags; 96 97 /* Only write the aux register if it needs changing */ 98 if (readl_relaxed(base + L2X0_AUX_CTRL) != aux) 99 writel_relaxed(aux, base + L2X0_AUX_CTRL); 100 101 l2c_unlock(base, num_lock); 102 103 local_irq_save(flags); 104 __l2c_op_way(base + L2X0_INV_WAY); 105 writel_relaxed(0, base + sync_reg_offset); 106 l2c_wait_mask(base + sync_reg_offset, 1); 107 local_irq_restore(flags); 108 109 writel_relaxed(L2X0_CTRL_EN, base + L2X0_CTRL); 110 } 111 112 static void l2c_disable(void) 113 { 114 void __iomem *base = l2x0_base; 115 116 outer_cache.flush_all(); 117 writel_relaxed(0, base + L2X0_CTRL); 118 dsb(st); 119 } 120 121 #ifdef CONFIG_CACHE_PL310 122 static inline void cache_wait(void __iomem *reg, unsigned long mask) 123 { 124 /* cache operations by line are atomic on PL310 */ 125 } 126 #else 127 #define cache_wait l2c_wait_mask 128 #endif 129 130 static inline void cache_sync(void) 131 { 132 void __iomem *base = l2x0_base; 133 134 writel_relaxed(0, base + sync_reg_offset); 135 cache_wait(base + L2X0_CACHE_SYNC, 1); 136 } 137 138 #if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915) 139 static inline void debug_writel(unsigned long val) 140 { 141 if (outer_cache.set_debug) 142 l2c_set_debug(l2x0_base, val); 143 } 144 #else 145 /* Optimised out for non-errata case */ 146 static inline void debug_writel(unsigned long val) 147 { 148 } 149 #endif 150 151 static void l2x0_cache_sync(void) 152 { 153 unsigned long flags; 154 155 raw_spin_lock_irqsave(&l2x0_lock, flags); 156 cache_sync(); 157 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 158 } 159 160 static void __l2x0_flush_all(void) 161 { 162 debug_writel(0x03); 163 __l2c_op_way(l2x0_base + L2X0_CLEAN_INV_WAY); 164 cache_sync(); 165 debug_writel(0x00); 166 } 167 168 static void l2x0_flush_all(void) 169 { 170 unsigned long flags; 171 172 /* clean all ways */ 173 raw_spin_lock_irqsave(&l2x0_lock, flags); 174 __l2x0_flush_all(); 175 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 176 } 177 178 static void l2x0_disable(void) 179 { 180 unsigned long flags; 181 182 raw_spin_lock_irqsave(&l2x0_lock, flags); 183 __l2x0_flush_all(); 184 writel_relaxed(0, l2x0_base + L2X0_CTRL); 185 dsb(st); 186 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 187 } 188 189 /* 190 * L2C-210 specific code. 191 * 192 * The L2C-2x0 PA, set/way and sync operations are atomic, but we must 193 * ensure that no background operation is running. The way operations 194 * are all background tasks. 195 * 196 * While a background operation is in progress, any new operation is 197 * ignored (unspecified whether this causes an error.) Thankfully, not 198 * used on SMP. 199 * 200 * Never has a different sync register other than L2X0_CACHE_SYNC, but 201 * we use sync_reg_offset here so we can share some of this with L2C-310. 202 */ 203 static void __l2c210_cache_sync(void __iomem *base) 204 { 205 writel_relaxed(0, base + sync_reg_offset); 206 } 207 208 static void __l2c210_op_pa_range(void __iomem *reg, unsigned long start, 209 unsigned long end) 210 { 211 while (start < end) { 212 writel_relaxed(start, reg); 213 start += CACHE_LINE_SIZE; 214 } 215 } 216 217 static void l2c210_inv_range(unsigned long start, unsigned long end) 218 { 219 void __iomem *base = l2x0_base; 220 221 if (start & (CACHE_LINE_SIZE - 1)) { 222 start &= ~(CACHE_LINE_SIZE - 1); 223 writel_relaxed(start, base + L2X0_CLEAN_INV_LINE_PA); 224 start += CACHE_LINE_SIZE; 225 } 226 227 if (end & (CACHE_LINE_SIZE - 1)) { 228 end &= ~(CACHE_LINE_SIZE - 1); 229 writel_relaxed(end, base + L2X0_CLEAN_INV_LINE_PA); 230 } 231 232 __l2c210_op_pa_range(base + L2X0_INV_LINE_PA, start, end); 233 __l2c210_cache_sync(base); 234 } 235 236 static void l2c210_clean_range(unsigned long start, unsigned long end) 237 { 238 void __iomem *base = l2x0_base; 239 240 start &= ~(CACHE_LINE_SIZE - 1); 241 __l2c210_op_pa_range(base + L2X0_CLEAN_LINE_PA, start, end); 242 __l2c210_cache_sync(base); 243 } 244 245 static void l2c210_flush_range(unsigned long start, unsigned long end) 246 { 247 void __iomem *base = l2x0_base; 248 249 start &= ~(CACHE_LINE_SIZE - 1); 250 __l2c210_op_pa_range(base + L2X0_CLEAN_INV_LINE_PA, start, end); 251 __l2c210_cache_sync(base); 252 } 253 254 static void l2c210_flush_all(void) 255 { 256 void __iomem *base = l2x0_base; 257 258 BUG_ON(!irqs_disabled()); 259 260 __l2c_op_way(base + L2X0_CLEAN_INV_WAY); 261 __l2c210_cache_sync(base); 262 } 263 264 static void l2c210_sync(void) 265 { 266 __l2c210_cache_sync(l2x0_base); 267 } 268 269 static void l2c210_resume(void) 270 { 271 void __iomem *base = l2x0_base; 272 273 if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN)) 274 l2c_enable(base, l2x0_saved_regs.aux_ctrl, 1); 275 } 276 277 static const struct l2c_init_data l2c210_data __initconst = { 278 .type = "L2C-210", 279 .num_lock = 1, 280 .enable = l2c_enable, 281 .outer_cache = { 282 .inv_range = l2c210_inv_range, 283 .clean_range = l2c210_clean_range, 284 .flush_range = l2c210_flush_range, 285 .flush_all = l2c210_flush_all, 286 .disable = l2c_disable, 287 .sync = l2c210_sync, 288 .resume = l2c210_resume, 289 }, 290 }; 291 292 /* 293 * L2C-220 specific code. 294 * 295 * All operations are background operations: they have to be waited for. 296 * Conflicting requests generate a slave error (which will cause an 297 * imprecise abort.) Never uses sync_reg_offset, so we hard-code the 298 * sync register here. 299 * 300 * However, we can re-use the l2c210_resume call. 301 */ 302 static inline void __l2c220_cache_sync(void __iomem *base) 303 { 304 writel_relaxed(0, base + L2X0_CACHE_SYNC); 305 l2c_wait_mask(base + L2X0_CACHE_SYNC, 1); 306 } 307 308 static void l2c220_op_way(void __iomem *base, unsigned reg) 309 { 310 unsigned long flags; 311 312 raw_spin_lock_irqsave(&l2x0_lock, flags); 313 __l2c_op_way(base + reg); 314 __l2c220_cache_sync(base); 315 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 316 } 317 318 static unsigned long l2c220_op_pa_range(void __iomem *reg, unsigned long start, 319 unsigned long end, unsigned long flags) 320 { 321 raw_spinlock_t *lock = &l2x0_lock; 322 323 while (start < end) { 324 unsigned long blk_end = start + min(end - start, 4096UL); 325 326 while (start < blk_end) { 327 l2c_wait_mask(reg, 1); 328 writel_relaxed(start, reg); 329 start += CACHE_LINE_SIZE; 330 } 331 332 if (blk_end < end) { 333 raw_spin_unlock_irqrestore(lock, flags); 334 raw_spin_lock_irqsave(lock, flags); 335 } 336 } 337 338 return flags; 339 } 340 341 static void l2c220_inv_range(unsigned long start, unsigned long end) 342 { 343 void __iomem *base = l2x0_base; 344 unsigned long flags; 345 346 raw_spin_lock_irqsave(&l2x0_lock, flags); 347 if ((start | end) & (CACHE_LINE_SIZE - 1)) { 348 if (start & (CACHE_LINE_SIZE - 1)) { 349 start &= ~(CACHE_LINE_SIZE - 1); 350 writel_relaxed(start, base + L2X0_CLEAN_INV_LINE_PA); 351 start += CACHE_LINE_SIZE; 352 } 353 354 if (end & (CACHE_LINE_SIZE - 1)) { 355 end &= ~(CACHE_LINE_SIZE - 1); 356 l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1); 357 writel_relaxed(end, base + L2X0_CLEAN_INV_LINE_PA); 358 } 359 } 360 361 flags = l2c220_op_pa_range(base + L2X0_INV_LINE_PA, 362 start, end, flags); 363 l2c_wait_mask(base + L2X0_INV_LINE_PA, 1); 364 __l2c220_cache_sync(base); 365 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 366 } 367 368 static void l2c220_clean_range(unsigned long start, unsigned long end) 369 { 370 void __iomem *base = l2x0_base; 371 unsigned long flags; 372 373 start &= ~(CACHE_LINE_SIZE - 1); 374 if ((end - start) >= l2x0_size) { 375 l2c220_op_way(base, L2X0_CLEAN_WAY); 376 return; 377 } 378 379 raw_spin_lock_irqsave(&l2x0_lock, flags); 380 flags = l2c220_op_pa_range(base + L2X0_CLEAN_LINE_PA, 381 start, end, flags); 382 l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1); 383 __l2c220_cache_sync(base); 384 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 385 } 386 387 static void l2c220_flush_range(unsigned long start, unsigned long end) 388 { 389 void __iomem *base = l2x0_base; 390 unsigned long flags; 391 392 start &= ~(CACHE_LINE_SIZE - 1); 393 if ((end - start) >= l2x0_size) { 394 l2c220_op_way(base, L2X0_CLEAN_INV_WAY); 395 return; 396 } 397 398 raw_spin_lock_irqsave(&l2x0_lock, flags); 399 flags = l2c220_op_pa_range(base + L2X0_CLEAN_INV_LINE_PA, 400 start, end, flags); 401 l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1); 402 __l2c220_cache_sync(base); 403 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 404 } 405 406 static void l2c220_flush_all(void) 407 { 408 l2c220_op_way(l2x0_base, L2X0_CLEAN_INV_WAY); 409 } 410 411 static void l2c220_sync(void) 412 { 413 unsigned long flags; 414 415 raw_spin_lock_irqsave(&l2x0_lock, flags); 416 __l2c220_cache_sync(l2x0_base); 417 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 418 } 419 420 static const struct l2c_init_data l2c220_data = { 421 .type = "L2C-220", 422 .num_lock = 1, 423 .enable = l2c_enable, 424 .outer_cache = { 425 .inv_range = l2c220_inv_range, 426 .clean_range = l2c220_clean_range, 427 .flush_range = l2c220_flush_range, 428 .flush_all = l2c220_flush_all, 429 .disable = l2c_disable, 430 .sync = l2c220_sync, 431 .resume = l2c210_resume, 432 }, 433 }; 434 435 /* 436 * L2C-310 specific code. 437 * 438 * Very similar to L2C-210, the PA, set/way and sync operations are atomic, 439 * and the way operations are all background tasks. However, issuing an 440 * operation while a background operation is in progress results in a 441 * SLVERR response. We can reuse: 442 * 443 * __l2c210_cache_sync (using sync_reg_offset) 444 * l2c210_sync 445 * l2c210_inv_range (if 588369 is not applicable) 446 * l2c210_clean_range 447 * l2c210_flush_range (if 588369 is not applicable) 448 * l2c210_flush_all (if 727915 is not applicable) 449 * 450 * Errata: 451 * 588369: PL310 R0P0->R1P0, fixed R2P0. 452 * Affects: all clean+invalidate operations 453 * clean and invalidate skips the invalidate step, so we need to issue 454 * separate operations. We also require the above debug workaround 455 * enclosing this code fragment on affected parts. On unaffected parts, 456 * we must not use this workaround without the debug register writes 457 * to avoid exposing a problem similar to 727915. 458 * 459 * 727915: PL310 R2P0->R3P0, fixed R3P1. 460 * Affects: clean+invalidate by way 461 * clean and invalidate by way runs in the background, and a store can 462 * hit the line between the clean operation and invalidate operation, 463 * resulting in the store being lost. 464 * 465 * 753970: PL310 R3P0, fixed R3P1. 466 * Affects: sync 467 * prevents merging writes after the sync operation, until another L2C 468 * operation is performed (or a number of other conditions.) 469 * 470 * 769419: PL310 R0P0->R3P1, fixed R3P2. 471 * Affects: store buffer 472 * store buffer is not automatically drained. 473 */ 474 static void l2c310_set_debug(unsigned long val) 475 { 476 writel_relaxed(val, l2x0_base + L2X0_DEBUG_CTRL); 477 } 478 479 static void l2c310_inv_range_erratum(unsigned long start, unsigned long end) 480 { 481 void __iomem *base = l2x0_base; 482 483 if ((start | end) & (CACHE_LINE_SIZE - 1)) { 484 unsigned long flags; 485 486 /* Erratum 588369 for both clean+invalidate operations */ 487 raw_spin_lock_irqsave(&l2x0_lock, flags); 488 l2c_set_debug(base, 0x03); 489 490 if (start & (CACHE_LINE_SIZE - 1)) { 491 start &= ~(CACHE_LINE_SIZE - 1); 492 writel_relaxed(start, base + L2X0_CLEAN_LINE_PA); 493 writel_relaxed(start, base + L2X0_INV_LINE_PA); 494 start += CACHE_LINE_SIZE; 495 } 496 497 if (end & (CACHE_LINE_SIZE - 1)) { 498 end &= ~(CACHE_LINE_SIZE - 1); 499 writel_relaxed(end, base + L2X0_CLEAN_LINE_PA); 500 writel_relaxed(end, base + L2X0_INV_LINE_PA); 501 } 502 503 l2c_set_debug(base, 0x00); 504 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 505 } 506 507 __l2c210_op_pa_range(base + L2X0_INV_LINE_PA, start, end); 508 __l2c210_cache_sync(base); 509 } 510 511 static void l2c310_flush_range_erratum(unsigned long start, unsigned long end) 512 { 513 raw_spinlock_t *lock = &l2x0_lock; 514 unsigned long flags; 515 void __iomem *base = l2x0_base; 516 517 raw_spin_lock_irqsave(lock, flags); 518 while (start < end) { 519 unsigned long blk_end = start + min(end - start, 4096UL); 520 521 l2c_set_debug(base, 0x03); 522 while (start < blk_end) { 523 writel_relaxed(start, base + L2X0_CLEAN_LINE_PA); 524 writel_relaxed(start, base + L2X0_INV_LINE_PA); 525 start += CACHE_LINE_SIZE; 526 } 527 l2c_set_debug(base, 0x00); 528 529 if (blk_end < end) { 530 raw_spin_unlock_irqrestore(lock, flags); 531 raw_spin_lock_irqsave(lock, flags); 532 } 533 } 534 raw_spin_unlock_irqrestore(lock, flags); 535 __l2c210_cache_sync(base); 536 } 537 538 static void l2c310_flush_all_erratum(void) 539 { 540 void __iomem *base = l2x0_base; 541 unsigned long flags; 542 543 raw_spin_lock_irqsave(&l2x0_lock, flags); 544 l2c_set_debug(base, 0x03); 545 __l2c_op_way(base + L2X0_CLEAN_INV_WAY); 546 l2c_set_debug(base, 0x00); 547 __l2c210_cache_sync(base); 548 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 549 } 550 551 static void __init l2c310_save(void __iomem *base) 552 { 553 unsigned revision; 554 555 l2x0_saved_regs.tag_latency = readl_relaxed(base + 556 L2X0_TAG_LATENCY_CTRL); 557 l2x0_saved_regs.data_latency = readl_relaxed(base + 558 L2X0_DATA_LATENCY_CTRL); 559 l2x0_saved_regs.filter_end = readl_relaxed(base + 560 L2X0_ADDR_FILTER_END); 561 l2x0_saved_regs.filter_start = readl_relaxed(base + 562 L2X0_ADDR_FILTER_START); 563 564 revision = readl_relaxed(base + L2X0_CACHE_ID) & 565 L2X0_CACHE_ID_RTL_MASK; 566 567 /* From r2p0, there is Prefetch offset/control register */ 568 if (revision >= L310_CACHE_ID_RTL_R2P0) 569 l2x0_saved_regs.prefetch_ctrl = readl_relaxed(base + 570 L2X0_PREFETCH_CTRL); 571 572 /* From r3p0, there is Power control register */ 573 if (revision >= L310_CACHE_ID_RTL_R3P0) 574 l2x0_saved_regs.pwr_ctrl = readl_relaxed(base + 575 L2X0_POWER_CTRL); 576 } 577 578 static void l2c310_resume(void) 579 { 580 void __iomem *base = l2x0_base; 581 582 if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN)) { 583 unsigned revision; 584 585 /* restore pl310 setup */ 586 writel_relaxed(l2x0_saved_regs.tag_latency, 587 base + L2X0_TAG_LATENCY_CTRL); 588 writel_relaxed(l2x0_saved_regs.data_latency, 589 base + L2X0_DATA_LATENCY_CTRL); 590 writel_relaxed(l2x0_saved_regs.filter_end, 591 base + L2X0_ADDR_FILTER_END); 592 writel_relaxed(l2x0_saved_regs.filter_start, 593 base + L2X0_ADDR_FILTER_START); 594 595 revision = readl_relaxed(base + L2X0_CACHE_ID) & 596 L2X0_CACHE_ID_RTL_MASK; 597 598 if (revision >= L310_CACHE_ID_RTL_R2P0) 599 writel_relaxed(l2x0_saved_regs.prefetch_ctrl, 600 base + L2X0_PREFETCH_CTRL); 601 if (revision >= L310_CACHE_ID_RTL_R3P0) 602 writel_relaxed(l2x0_saved_regs.pwr_ctrl, 603 base + L2X0_POWER_CTRL); 604 605 l2c_enable(base, l2x0_saved_regs.aux_ctrl, 8); 606 } 607 } 608 609 static void __init l2c310_fixup(void __iomem *base, u32 cache_id, 610 struct outer_cache_fns *fns) 611 { 612 unsigned revision = cache_id & L2X0_CACHE_ID_RTL_MASK; 613 const char *errata[4]; 614 unsigned n = 0; 615 616 /* For compatibility */ 617 if (revision <= L310_CACHE_ID_RTL_R3P0) 618 fns->set_debug = l2c310_set_debug; 619 620 if (IS_ENABLED(CONFIG_PL310_ERRATA_588369) && 621 revision < L310_CACHE_ID_RTL_R2P0 && 622 /* For bcm compatibility */ 623 fns->inv_range == l2c210_inv_range) { 624 fns->inv_range = l2c310_inv_range_erratum; 625 fns->flush_range = l2c310_flush_range_erratum; 626 errata[n++] = "588369"; 627 } 628 629 if (IS_ENABLED(CONFIG_PL310_ERRATA_727915) && 630 revision >= L310_CACHE_ID_RTL_R2P0 && 631 revision < L310_CACHE_ID_RTL_R3P1) { 632 fns->flush_all = l2c310_flush_all_erratum; 633 errata[n++] = "727915"; 634 } 635 636 if (IS_ENABLED(CONFIG_PL310_ERRATA_753970) && 637 revision == L310_CACHE_ID_RTL_R3P0) { 638 sync_reg_offset = L2X0_DUMMY_REG; 639 errata[n++] = "753970"; 640 } 641 642 if (IS_ENABLED(CONFIG_PL310_ERRATA_769419)) 643 errata[n++] = "769419"; 644 645 if (n) { 646 unsigned i; 647 648 pr_info("L2C-310 errat%s", n > 1 ? "a" : "um"); 649 for (i = 0; i < n; i++) 650 pr_cont(" %s", errata[i]); 651 pr_cont(" enabled\n"); 652 } 653 } 654 655 static const struct l2c_init_data l2c310_init_fns __initconst = { 656 .type = "L2C-310", 657 .num_lock = 8, 658 .enable = l2c_enable, 659 .fixup = l2c310_fixup, 660 .save = l2c310_save, 661 .outer_cache = { 662 .inv_range = l2c210_inv_range, 663 .clean_range = l2c210_clean_range, 664 .flush_range = l2c210_flush_range, 665 .flush_all = l2c210_flush_all, 666 .disable = l2c_disable, 667 .sync = l2c210_sync, 668 .set_debug = l2c310_set_debug, 669 .resume = l2c310_resume, 670 }, 671 }; 672 673 static void __init __l2c_init(const struct l2c_init_data *data, 674 u32 aux_val, u32 aux_mask, u32 cache_id) 675 { 676 struct outer_cache_fns fns; 677 u32 aux; 678 u32 way_size = 0; 679 int ways; 680 int way_size_shift = L2X0_WAY_SIZE_SHIFT; 681 682 /* 683 * It is strange to save the register state before initialisation, 684 * but hey, this is what the DT implementations decided to do. 685 */ 686 if (data->save) 687 data->save(l2x0_base); 688 689 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); 690 691 aux &= aux_mask; 692 aux |= aux_val; 693 694 /* Determine the number of ways */ 695 switch (cache_id & L2X0_CACHE_ID_PART_MASK) { 696 case L2X0_CACHE_ID_PART_L310: 697 if (aux & (1 << 16)) 698 ways = 16; 699 else 700 ways = 8; 701 break; 702 703 case L2X0_CACHE_ID_PART_L210: 704 case L2X0_CACHE_ID_PART_L220: 705 ways = (aux >> 13) & 0xf; 706 break; 707 708 case AURORA_CACHE_ID: 709 ways = (aux >> 13) & 0xf; 710 ways = 2 << ((ways + 1) >> 2); 711 way_size_shift = AURORA_WAY_SIZE_SHIFT; 712 break; 713 714 default: 715 /* Assume unknown chips have 8 ways */ 716 ways = 8; 717 break; 718 } 719 720 l2x0_way_mask = (1 << ways) - 1; 721 722 /* 723 * L2 cache Size = Way size * Number of ways 724 */ 725 way_size = (aux & L2X0_AUX_CTRL_WAY_SIZE_MASK) >> 17; 726 way_size = 1 << (way_size + way_size_shift); 727 728 l2x0_size = ways * way_size * SZ_1K; 729 730 fns = data->outer_cache; 731 if (data->fixup) 732 data->fixup(l2x0_base, cache_id, &fns); 733 734 /* 735 * Check if l2x0 controller is already enabled. If we are booting 736 * in non-secure mode accessing the below registers will fault. 737 */ 738 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) 739 data->enable(l2x0_base, aux, data->num_lock); 740 741 /* Re-read it in case some bits are reserved. */ 742 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); 743 744 /* Save the value for resuming. */ 745 l2x0_saved_regs.aux_ctrl = aux; 746 747 outer_cache = fns; 748 749 pr_info("%s cache controller enabled, %d ways, %d kB\n", 750 data->type, ways, l2x0_size >> 10); 751 pr_info("%s: CACHE_ID 0x%08x, AUX_CTRL 0x%08x\n", 752 data->type, cache_id, aux); 753 } 754 755 void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask) 756 { 757 const struct l2c_init_data *data; 758 u32 cache_id; 759 760 l2x0_base = base; 761 762 cache_id = readl_relaxed(base + L2X0_CACHE_ID); 763 764 switch (cache_id & L2X0_CACHE_ID_PART_MASK) { 765 default: 766 case L2X0_CACHE_ID_PART_L210: 767 data = &l2c210_data; 768 break; 769 770 case L2X0_CACHE_ID_PART_L220: 771 data = &l2c220_data; 772 break; 773 774 case L2X0_CACHE_ID_PART_L310: 775 data = &l2c310_init_fns; 776 break; 777 } 778 779 __l2c_init(data, aux_val, aux_mask, cache_id); 780 } 781 782 #ifdef CONFIG_OF 783 static int l2_wt_override; 784 785 /* Aurora don't have the cache ID register available, so we have to 786 * pass it though the device tree */ 787 static u32 cache_id_part_number_from_dt; 788 789 static void __init l2x0_of_parse(const struct device_node *np, 790 u32 *aux_val, u32 *aux_mask) 791 { 792 u32 data[2] = { 0, 0 }; 793 u32 tag = 0; 794 u32 dirty = 0; 795 u32 val = 0, mask = 0; 796 797 of_property_read_u32(np, "arm,tag-latency", &tag); 798 if (tag) { 799 mask |= L2X0_AUX_CTRL_TAG_LATENCY_MASK; 800 val |= (tag - 1) << L2X0_AUX_CTRL_TAG_LATENCY_SHIFT; 801 } 802 803 of_property_read_u32_array(np, "arm,data-latency", 804 data, ARRAY_SIZE(data)); 805 if (data[0] && data[1]) { 806 mask |= L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK | 807 L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK; 808 val |= ((data[0] - 1) << L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT) | 809 ((data[1] - 1) << L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT); 810 } 811 812 of_property_read_u32(np, "arm,dirty-latency", &dirty); 813 if (dirty) { 814 mask |= L2X0_AUX_CTRL_DIRTY_LATENCY_MASK; 815 val |= (dirty - 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT; 816 } 817 818 *aux_val &= ~mask; 819 *aux_val |= val; 820 *aux_mask &= ~mask; 821 } 822 823 static const struct l2c_init_data of_l2c210_data __initconst = { 824 .type = "L2C-210", 825 .num_lock = 1, 826 .of_parse = l2x0_of_parse, 827 .enable = l2c_enable, 828 .outer_cache = { 829 .inv_range = l2c210_inv_range, 830 .clean_range = l2c210_clean_range, 831 .flush_range = l2c210_flush_range, 832 .flush_all = l2c210_flush_all, 833 .disable = l2c_disable, 834 .sync = l2c210_sync, 835 .resume = l2c210_resume, 836 }, 837 }; 838 839 static const struct l2c_init_data of_l2c220_data __initconst = { 840 .type = "L2C-220", 841 .num_lock = 1, 842 .of_parse = l2x0_of_parse, 843 .enable = l2c_enable, 844 .outer_cache = { 845 .inv_range = l2c220_inv_range, 846 .clean_range = l2c220_clean_range, 847 .flush_range = l2c220_flush_range, 848 .flush_all = l2c220_flush_all, 849 .disable = l2c_disable, 850 .sync = l2c220_sync, 851 .resume = l2c210_resume, 852 }, 853 }; 854 855 static void __init l2c310_of_parse(const struct device_node *np, 856 u32 *aux_val, u32 *aux_mask) 857 { 858 u32 data[3] = { 0, 0, 0 }; 859 u32 tag[3] = { 0, 0, 0 }; 860 u32 filter[2] = { 0, 0 }; 861 862 of_property_read_u32_array(np, "arm,tag-latency", tag, ARRAY_SIZE(tag)); 863 if (tag[0] && tag[1] && tag[2]) 864 writel_relaxed( 865 ((tag[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) | 866 ((tag[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) | 867 ((tag[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT), 868 l2x0_base + L2X0_TAG_LATENCY_CTRL); 869 870 of_property_read_u32_array(np, "arm,data-latency", 871 data, ARRAY_SIZE(data)); 872 if (data[0] && data[1] && data[2]) 873 writel_relaxed( 874 ((data[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) | 875 ((data[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) | 876 ((data[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT), 877 l2x0_base + L2X0_DATA_LATENCY_CTRL); 878 879 of_property_read_u32_array(np, "arm,filter-ranges", 880 filter, ARRAY_SIZE(filter)); 881 if (filter[1]) { 882 writel_relaxed(ALIGN(filter[0] + filter[1], SZ_1M), 883 l2x0_base + L2X0_ADDR_FILTER_END); 884 writel_relaxed((filter[0] & ~(SZ_1M - 1)) | L2X0_ADDR_FILTER_EN, 885 l2x0_base + L2X0_ADDR_FILTER_START); 886 } 887 } 888 889 static const struct l2c_init_data of_l2c310_data __initconst = { 890 .type = "L2C-310", 891 .num_lock = 8, 892 .of_parse = l2c310_of_parse, 893 .enable = l2c_enable, 894 .fixup = l2c310_fixup, 895 .save = l2c310_save, 896 .outer_cache = { 897 .inv_range = l2c210_inv_range, 898 .clean_range = l2c210_clean_range, 899 .flush_range = l2c210_flush_range, 900 .flush_all = l2c210_flush_all, 901 .disable = l2c_disable, 902 .sync = l2c210_sync, 903 .set_debug = l2c310_set_debug, 904 .resume = l2c310_resume, 905 }, 906 }; 907 908 /* 909 * Note that the end addresses passed to Linux primitives are 910 * noninclusive, while the hardware cache range operations use 911 * inclusive start and end addresses. 912 */ 913 static unsigned long calc_range_end(unsigned long start, unsigned long end) 914 { 915 /* 916 * Limit the number of cache lines processed at once, 917 * since cache range operations stall the CPU pipeline 918 * until completion. 919 */ 920 if (end > start + MAX_RANGE_SIZE) 921 end = start + MAX_RANGE_SIZE; 922 923 /* 924 * Cache range operations can't straddle a page boundary. 925 */ 926 if (end > PAGE_ALIGN(start+1)) 927 end = PAGE_ALIGN(start+1); 928 929 return end; 930 } 931 932 /* 933 * Make sure 'start' and 'end' reference the same page, as L2 is PIPT 934 * and range operations only do a TLB lookup on the start address. 935 */ 936 static void aurora_pa_range(unsigned long start, unsigned long end, 937 unsigned long offset) 938 { 939 unsigned long flags; 940 941 raw_spin_lock_irqsave(&l2x0_lock, flags); 942 writel_relaxed(start, l2x0_base + AURORA_RANGE_BASE_ADDR_REG); 943 writel_relaxed(end, l2x0_base + offset); 944 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 945 946 cache_sync(); 947 } 948 949 static void aurora_inv_range(unsigned long start, unsigned long end) 950 { 951 /* 952 * round start and end adresses up to cache line size 953 */ 954 start &= ~(CACHE_LINE_SIZE - 1); 955 end = ALIGN(end, CACHE_LINE_SIZE); 956 957 /* 958 * Invalidate all full cache lines between 'start' and 'end'. 959 */ 960 while (start < end) { 961 unsigned long range_end = calc_range_end(start, end); 962 aurora_pa_range(start, range_end - CACHE_LINE_SIZE, 963 AURORA_INVAL_RANGE_REG); 964 start = range_end; 965 } 966 } 967 968 static void aurora_clean_range(unsigned long start, unsigned long end) 969 { 970 /* 971 * If L2 is forced to WT, the L2 will always be clean and we 972 * don't need to do anything here. 973 */ 974 if (!l2_wt_override) { 975 start &= ~(CACHE_LINE_SIZE - 1); 976 end = ALIGN(end, CACHE_LINE_SIZE); 977 while (start != end) { 978 unsigned long range_end = calc_range_end(start, end); 979 aurora_pa_range(start, range_end - CACHE_LINE_SIZE, 980 AURORA_CLEAN_RANGE_REG); 981 start = range_end; 982 } 983 } 984 } 985 986 static void aurora_flush_range(unsigned long start, unsigned long end) 987 { 988 start &= ~(CACHE_LINE_SIZE - 1); 989 end = ALIGN(end, CACHE_LINE_SIZE); 990 while (start != end) { 991 unsigned long range_end = calc_range_end(start, end); 992 /* 993 * If L2 is forced to WT, the L2 will always be clean and we 994 * just need to invalidate. 995 */ 996 if (l2_wt_override) 997 aurora_pa_range(start, range_end - CACHE_LINE_SIZE, 998 AURORA_INVAL_RANGE_REG); 999 else 1000 aurora_pa_range(start, range_end - CACHE_LINE_SIZE, 1001 AURORA_FLUSH_RANGE_REG); 1002 start = range_end; 1003 } 1004 } 1005 1006 static void aurora_save(void __iomem *base) 1007 { 1008 l2x0_saved_regs.ctrl = readl_relaxed(base + L2X0_CTRL); 1009 l2x0_saved_regs.aux_ctrl = readl_relaxed(base + L2X0_AUX_CTRL); 1010 } 1011 1012 static void aurora_resume(void) 1013 { 1014 void __iomem *base = l2x0_base; 1015 1016 if (!(readl(base + L2X0_CTRL) & L2X0_CTRL_EN)) { 1017 writel_relaxed(l2x0_saved_regs.aux_ctrl, base + L2X0_AUX_CTRL); 1018 writel_relaxed(l2x0_saved_regs.ctrl, base + L2X0_CTRL); 1019 } 1020 } 1021 1022 /* 1023 * For Aurora cache in no outer mode, enable via the CP15 coprocessor 1024 * broadcasting of cache commands to L2. 1025 */ 1026 static void __init aurora_enable_no_outer(void __iomem *base, u32 aux, 1027 unsigned num_lock) 1028 { 1029 u32 u; 1030 1031 asm volatile("mrc p15, 1, %0, c15, c2, 0" : "=r" (u)); 1032 u |= AURORA_CTRL_FW; /* Set the FW bit */ 1033 asm volatile("mcr p15, 1, %0, c15, c2, 0" : : "r" (u)); 1034 1035 isb(); 1036 1037 l2c_enable(base, aux, num_lock); 1038 } 1039 1040 static void __init aurora_fixup(void __iomem *base, u32 cache_id, 1041 struct outer_cache_fns *fns) 1042 { 1043 sync_reg_offset = AURORA_SYNC_REG; 1044 } 1045 1046 static void __init aurora_of_parse(const struct device_node *np, 1047 u32 *aux_val, u32 *aux_mask) 1048 { 1049 u32 val = AURORA_ACR_REPLACEMENT_TYPE_SEMIPLRU; 1050 u32 mask = AURORA_ACR_REPLACEMENT_MASK; 1051 1052 of_property_read_u32(np, "cache-id-part", 1053 &cache_id_part_number_from_dt); 1054 1055 /* Determine and save the write policy */ 1056 l2_wt_override = of_property_read_bool(np, "wt-override"); 1057 1058 if (l2_wt_override) { 1059 val |= AURORA_ACR_FORCE_WRITE_THRO_POLICY; 1060 mask |= AURORA_ACR_FORCE_WRITE_POLICY_MASK; 1061 } 1062 1063 *aux_val &= ~mask; 1064 *aux_val |= val; 1065 *aux_mask &= ~mask; 1066 } 1067 1068 static const struct l2c_init_data of_aurora_with_outer_data __initconst = { 1069 .type = "Aurora", 1070 .num_lock = 4, 1071 .of_parse = aurora_of_parse, 1072 .enable = l2c_enable, 1073 .fixup = aurora_fixup, 1074 .save = aurora_save, 1075 .outer_cache = { 1076 .inv_range = aurora_inv_range, 1077 .clean_range = aurora_clean_range, 1078 .flush_range = aurora_flush_range, 1079 .flush_all = l2x0_flush_all, 1080 .disable = l2x0_disable, 1081 .sync = l2x0_cache_sync, 1082 .resume = aurora_resume, 1083 }, 1084 }; 1085 1086 static const struct l2c_init_data of_aurora_no_outer_data __initconst = { 1087 .type = "Aurora", 1088 .num_lock = 4, 1089 .of_parse = aurora_of_parse, 1090 .enable = aurora_enable_no_outer, 1091 .fixup = aurora_fixup, 1092 .save = aurora_save, 1093 .outer_cache = { 1094 .resume = aurora_resume, 1095 }, 1096 }; 1097 1098 /* 1099 * For certain Broadcom SoCs, depending on the address range, different offsets 1100 * need to be added to the address before passing it to L2 for 1101 * invalidation/clean/flush 1102 * 1103 * Section Address Range Offset EMI 1104 * 1 0x00000000 - 0x3FFFFFFF 0x80000000 VC 1105 * 2 0x40000000 - 0xBFFFFFFF 0x40000000 SYS 1106 * 3 0xC0000000 - 0xFFFFFFFF 0x80000000 VC 1107 * 1108 * When the start and end addresses have crossed two different sections, we 1109 * need to break the L2 operation into two, each within its own section. 1110 * For example, if we need to invalidate addresses starts at 0xBFFF0000 and 1111 * ends at 0xC0001000, we need do invalidate 1) 0xBFFF0000 - 0xBFFFFFFF and 2) 1112 * 0xC0000000 - 0xC0001000 1113 * 1114 * Note 1: 1115 * By breaking a single L2 operation into two, we may potentially suffer some 1116 * performance hit, but keep in mind the cross section case is very rare 1117 * 1118 * Note 2: 1119 * We do not need to handle the case when the start address is in 1120 * Section 1 and the end address is in Section 3, since it is not a valid use 1121 * case 1122 * 1123 * Note 3: 1124 * Section 1 in practical terms can no longer be used on rev A2. Because of 1125 * that the code does not need to handle section 1 at all. 1126 * 1127 */ 1128 #define BCM_SYS_EMI_START_ADDR 0x40000000UL 1129 #define BCM_VC_EMI_SEC3_START_ADDR 0xC0000000UL 1130 1131 #define BCM_SYS_EMI_OFFSET 0x40000000UL 1132 #define BCM_VC_EMI_OFFSET 0x80000000UL 1133 1134 static inline int bcm_addr_is_sys_emi(unsigned long addr) 1135 { 1136 return (addr >= BCM_SYS_EMI_START_ADDR) && 1137 (addr < BCM_VC_EMI_SEC3_START_ADDR); 1138 } 1139 1140 static inline unsigned long bcm_l2_phys_addr(unsigned long addr) 1141 { 1142 if (bcm_addr_is_sys_emi(addr)) 1143 return addr + BCM_SYS_EMI_OFFSET; 1144 else 1145 return addr + BCM_VC_EMI_OFFSET; 1146 } 1147 1148 static void bcm_inv_range(unsigned long start, unsigned long end) 1149 { 1150 unsigned long new_start, new_end; 1151 1152 BUG_ON(start < BCM_SYS_EMI_START_ADDR); 1153 1154 if (unlikely(end <= start)) 1155 return; 1156 1157 new_start = bcm_l2_phys_addr(start); 1158 new_end = bcm_l2_phys_addr(end); 1159 1160 /* normal case, no cross section between start and end */ 1161 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) { 1162 l2c210_inv_range(new_start, new_end); 1163 return; 1164 } 1165 1166 /* They cross sections, so it can only be a cross from section 1167 * 2 to section 3 1168 */ 1169 l2c210_inv_range(new_start, 1170 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1)); 1171 l2c210_inv_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR), 1172 new_end); 1173 } 1174 1175 static void bcm_clean_range(unsigned long start, unsigned long end) 1176 { 1177 unsigned long new_start, new_end; 1178 1179 BUG_ON(start < BCM_SYS_EMI_START_ADDR); 1180 1181 if (unlikely(end <= start)) 1182 return; 1183 1184 new_start = bcm_l2_phys_addr(start); 1185 new_end = bcm_l2_phys_addr(end); 1186 1187 /* normal case, no cross section between start and end */ 1188 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) { 1189 l2c210_clean_range(new_start, new_end); 1190 return; 1191 } 1192 1193 /* They cross sections, so it can only be a cross from section 1194 * 2 to section 3 1195 */ 1196 l2c210_clean_range(new_start, 1197 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1)); 1198 l2c210_clean_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR), 1199 new_end); 1200 } 1201 1202 static void bcm_flush_range(unsigned long start, unsigned long end) 1203 { 1204 unsigned long new_start, new_end; 1205 1206 BUG_ON(start < BCM_SYS_EMI_START_ADDR); 1207 1208 if (unlikely(end <= start)) 1209 return; 1210 1211 if ((end - start) >= l2x0_size) { 1212 outer_cache.flush_all(); 1213 return; 1214 } 1215 1216 new_start = bcm_l2_phys_addr(start); 1217 new_end = bcm_l2_phys_addr(end); 1218 1219 /* normal case, no cross section between start and end */ 1220 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) { 1221 l2c210_flush_range(new_start, new_end); 1222 return; 1223 } 1224 1225 /* They cross sections, so it can only be a cross from section 1226 * 2 to section 3 1227 */ 1228 l2c210_flush_range(new_start, 1229 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1)); 1230 l2c210_flush_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR), 1231 new_end); 1232 } 1233 1234 /* Broadcom L2C-310 start from ARMs R3P2 or later, and require no fixups */ 1235 static const struct l2c_init_data of_bcm_l2x0_data __initconst = { 1236 .type = "BCM-L2C-310", 1237 .num_lock = 8, 1238 .of_parse = l2c310_of_parse, 1239 .enable = l2c_enable, 1240 .save = l2c310_save, 1241 .outer_cache = { 1242 .inv_range = bcm_inv_range, 1243 .clean_range = bcm_clean_range, 1244 .flush_range = bcm_flush_range, 1245 .flush_all = l2c210_flush_all, 1246 .disable = l2c_disable, 1247 .sync = l2c210_sync, 1248 .resume = l2c310_resume, 1249 }, 1250 }; 1251 1252 static void __init tauros3_save(void __iomem *base) 1253 { 1254 l2x0_saved_regs.aux2_ctrl = 1255 readl_relaxed(base + TAUROS3_AUX2_CTRL); 1256 l2x0_saved_regs.prefetch_ctrl = 1257 readl_relaxed(base + L2X0_PREFETCH_CTRL); 1258 } 1259 1260 static void tauros3_resume(void) 1261 { 1262 void __iomem *base = l2x0_base; 1263 1264 if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN)) { 1265 writel_relaxed(l2x0_saved_regs.aux2_ctrl, 1266 base + TAUROS3_AUX2_CTRL); 1267 writel_relaxed(l2x0_saved_regs.prefetch_ctrl, 1268 base + L2X0_PREFETCH_CTRL); 1269 1270 l2c_enable(base, l2x0_saved_regs.aux_ctrl, 8); 1271 } 1272 } 1273 1274 static const struct l2c_init_data of_tauros3_data __initconst = { 1275 .type = "Tauros3", 1276 .num_lock = 8, 1277 .enable = l2c_enable, 1278 .save = tauros3_save, 1279 /* Tauros3 broadcasts L1 cache operations to L2 */ 1280 .outer_cache = { 1281 .resume = tauros3_resume, 1282 }, 1283 }; 1284 1285 #define L2C_ID(name, fns) { .compatible = name, .data = (void *)&fns } 1286 static const struct of_device_id l2x0_ids[] __initconst = { 1287 L2C_ID("arm,l210-cache", of_l2c210_data), 1288 L2C_ID("arm,l220-cache", of_l2c220_data), 1289 L2C_ID("arm,pl310-cache", of_l2c310_data), 1290 L2C_ID("brcm,bcm11351-a2-pl310-cache", of_bcm_l2x0_data), 1291 L2C_ID("marvell,aurora-outer-cache", of_aurora_with_outer_data), 1292 L2C_ID("marvell,aurora-system-cache", of_aurora_no_outer_data), 1293 L2C_ID("marvell,tauros3-cache", of_tauros3_data), 1294 /* Deprecated IDs */ 1295 L2C_ID("bcm,bcm11351-a2-pl310-cache", of_bcm_l2x0_data), 1296 {} 1297 }; 1298 1299 int __init l2x0_of_init(u32 aux_val, u32 aux_mask) 1300 { 1301 const struct l2c_init_data *data; 1302 struct device_node *np; 1303 struct resource res; 1304 u32 cache_id; 1305 1306 np = of_find_matching_node(NULL, l2x0_ids); 1307 if (!np) 1308 return -ENODEV; 1309 1310 if (of_address_to_resource(np, 0, &res)) 1311 return -ENODEV; 1312 1313 l2x0_base = ioremap(res.start, resource_size(&res)); 1314 if (!l2x0_base) 1315 return -ENOMEM; 1316 1317 l2x0_saved_regs.phy_base = res.start; 1318 1319 data = of_match_node(l2x0_ids, np)->data; 1320 1321 /* L2 configuration can only be changed if the cache is disabled */ 1322 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) 1323 if (data->of_parse) 1324 data->of_parse(np, &aux_val, &aux_mask); 1325 1326 if (cache_id_part_number_from_dt) 1327 cache_id = cache_id_part_number_from_dt; 1328 else 1329 cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID); 1330 1331 __l2c_init(data, aux_val, aux_mask, cache_id); 1332 1333 return 0; 1334 } 1335 #endif 1336