1 /* 2 * arch/arm/mm/cache-l2x0.c - L210/L220 cache controller support 3 * 4 * Copyright (C) 2007 ARM Limited 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software 17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18 */ 19 #include <linux/err.h> 20 #include <linux/init.h> 21 #include <linux/spinlock.h> 22 #include <linux/io.h> 23 #include <linux/of.h> 24 #include <linux/of_address.h> 25 26 #include <asm/cacheflush.h> 27 #include <asm/hardware/cache-l2x0.h> 28 #include "cache-tauros3.h" 29 #include "cache-aurora-l2.h" 30 31 struct l2c_init_data { 32 unsigned num_lock; 33 void (*of_parse)(const struct device_node *, u32 *, u32 *); 34 void (*enable)(void __iomem *, u32, unsigned); 35 void (*fixup)(void __iomem *, u32, struct outer_cache_fns *); 36 void (*save)(void __iomem *); 37 struct outer_cache_fns outer_cache; 38 }; 39 40 #define CACHE_LINE_SIZE 32 41 42 static void __iomem *l2x0_base; 43 static DEFINE_RAW_SPINLOCK(l2x0_lock); 44 static u32 l2x0_way_mask; /* Bitmask of active ways */ 45 static u32 l2x0_size; 46 static unsigned long sync_reg_offset = L2X0_CACHE_SYNC; 47 48 struct l2x0_regs l2x0_saved_regs; 49 50 /* 51 * Common code for all cache controllers. 52 */ 53 static inline void l2c_wait_mask(void __iomem *reg, unsigned long mask) 54 { 55 /* wait for cache operation by line or way to complete */ 56 while (readl_relaxed(reg) & mask) 57 cpu_relax(); 58 } 59 60 /* 61 * This should only be called when we have a requirement that the 62 * register be written due to a work-around, as platforms running 63 * in non-secure mode may not be able to access this register. 64 */ 65 static inline void l2c_set_debug(void __iomem *base, unsigned long val) 66 { 67 outer_cache.set_debug(val); 68 } 69 70 static void __l2c_op_way(void __iomem *reg) 71 { 72 writel_relaxed(l2x0_way_mask, reg); 73 l2c_wait_mask(reg, l2x0_way_mask); 74 } 75 76 static inline void l2c_unlock(void __iomem *base, unsigned num) 77 { 78 unsigned i; 79 80 for (i = 0; i < num; i++) { 81 writel_relaxed(0, base + L2X0_LOCKDOWN_WAY_D_BASE + 82 i * L2X0_LOCKDOWN_STRIDE); 83 writel_relaxed(0, base + L2X0_LOCKDOWN_WAY_I_BASE + 84 i * L2X0_LOCKDOWN_STRIDE); 85 } 86 } 87 88 /* 89 * Enable the L2 cache controller. This function must only be 90 * called when the cache controller is known to be disabled. 91 */ 92 static void l2c_enable(void __iomem *base, u32 aux, unsigned num_lock) 93 { 94 unsigned long flags; 95 96 /* Only write the aux register if it needs changing */ 97 if (readl_relaxed(base + L2X0_AUX_CTRL) != aux) 98 writel_relaxed(aux, base + L2X0_AUX_CTRL); 99 100 l2c_unlock(base, num_lock); 101 102 local_irq_save(flags); 103 __l2c_op_way(base + L2X0_INV_WAY); 104 writel_relaxed(0, base + sync_reg_offset); 105 l2c_wait_mask(base + sync_reg_offset, 1); 106 local_irq_restore(flags); 107 108 writel_relaxed(L2X0_CTRL_EN, base + L2X0_CTRL); 109 } 110 111 static void l2c_disable(void) 112 { 113 void __iomem *base = l2x0_base; 114 115 outer_cache.flush_all(); 116 writel_relaxed(0, base + L2X0_CTRL); 117 dsb(st); 118 } 119 120 #ifdef CONFIG_CACHE_PL310 121 static inline void cache_wait(void __iomem *reg, unsigned long mask) 122 { 123 /* cache operations by line are atomic on PL310 */ 124 } 125 #else 126 #define cache_wait l2c_wait_mask 127 #endif 128 129 static inline void cache_sync(void) 130 { 131 void __iomem *base = l2x0_base; 132 133 writel_relaxed(0, base + sync_reg_offset); 134 cache_wait(base + L2X0_CACHE_SYNC, 1); 135 } 136 137 static inline void l2x0_clean_line(unsigned long addr) 138 { 139 void __iomem *base = l2x0_base; 140 cache_wait(base + L2X0_CLEAN_LINE_PA, 1); 141 writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA); 142 } 143 144 static inline void l2x0_inv_line(unsigned long addr) 145 { 146 void __iomem *base = l2x0_base; 147 cache_wait(base + L2X0_INV_LINE_PA, 1); 148 writel_relaxed(addr, base + L2X0_INV_LINE_PA); 149 } 150 151 #if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915) 152 static inline void debug_writel(unsigned long val) 153 { 154 if (outer_cache.set_debug) 155 l2c_set_debug(l2x0_base, val); 156 } 157 158 static void pl310_set_debug(unsigned long val) 159 { 160 writel_relaxed(val, l2x0_base + L2X0_DEBUG_CTRL); 161 } 162 #else 163 /* Optimised out for non-errata case */ 164 static inline void debug_writel(unsigned long val) 165 { 166 } 167 168 #define pl310_set_debug NULL 169 #endif 170 171 #ifdef CONFIG_PL310_ERRATA_588369 172 static inline void l2x0_flush_line(unsigned long addr) 173 { 174 void __iomem *base = l2x0_base; 175 176 /* Clean by PA followed by Invalidate by PA */ 177 cache_wait(base + L2X0_CLEAN_LINE_PA, 1); 178 writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA); 179 cache_wait(base + L2X0_INV_LINE_PA, 1); 180 writel_relaxed(addr, base + L2X0_INV_LINE_PA); 181 } 182 #else 183 184 static inline void l2x0_flush_line(unsigned long addr) 185 { 186 void __iomem *base = l2x0_base; 187 cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1); 188 writel_relaxed(addr, base + L2X0_CLEAN_INV_LINE_PA); 189 } 190 #endif 191 192 static void l2x0_cache_sync(void) 193 { 194 unsigned long flags; 195 196 raw_spin_lock_irqsave(&l2x0_lock, flags); 197 cache_sync(); 198 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 199 } 200 201 static void __l2x0_flush_all(void) 202 { 203 debug_writel(0x03); 204 __l2c_op_way(l2x0_base + L2X0_CLEAN_INV_WAY); 205 cache_sync(); 206 debug_writel(0x00); 207 } 208 209 static void l2x0_flush_all(void) 210 { 211 unsigned long flags; 212 213 /* clean all ways */ 214 raw_spin_lock_irqsave(&l2x0_lock, flags); 215 __l2x0_flush_all(); 216 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 217 } 218 219 static void l2x0_clean_all(void) 220 { 221 unsigned long flags; 222 223 /* clean all ways */ 224 raw_spin_lock_irqsave(&l2x0_lock, flags); 225 __l2c_op_way(l2x0_base + L2X0_CLEAN_WAY); 226 cache_sync(); 227 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 228 } 229 230 static void l2x0_inv_all(void) 231 { 232 unsigned long flags; 233 234 /* invalidate all ways */ 235 raw_spin_lock_irqsave(&l2x0_lock, flags); 236 /* Invalidating when L2 is enabled is a nono */ 237 BUG_ON(readl(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN); 238 __l2c_op_way(l2x0_base + L2X0_INV_WAY); 239 cache_sync(); 240 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 241 } 242 243 static void l2x0_inv_range(unsigned long start, unsigned long end) 244 { 245 void __iomem *base = l2x0_base; 246 unsigned long flags; 247 248 raw_spin_lock_irqsave(&l2x0_lock, flags); 249 if (start & (CACHE_LINE_SIZE - 1)) { 250 start &= ~(CACHE_LINE_SIZE - 1); 251 debug_writel(0x03); 252 l2x0_flush_line(start); 253 debug_writel(0x00); 254 start += CACHE_LINE_SIZE; 255 } 256 257 if (end & (CACHE_LINE_SIZE - 1)) { 258 end &= ~(CACHE_LINE_SIZE - 1); 259 debug_writel(0x03); 260 l2x0_flush_line(end); 261 debug_writel(0x00); 262 } 263 264 while (start < end) { 265 unsigned long blk_end = start + min(end - start, 4096UL); 266 267 while (start < blk_end) { 268 l2x0_inv_line(start); 269 start += CACHE_LINE_SIZE; 270 } 271 272 if (blk_end < end) { 273 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 274 raw_spin_lock_irqsave(&l2x0_lock, flags); 275 } 276 } 277 cache_wait(base + L2X0_INV_LINE_PA, 1); 278 cache_sync(); 279 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 280 } 281 282 static void l2x0_clean_range(unsigned long start, unsigned long end) 283 { 284 void __iomem *base = l2x0_base; 285 unsigned long flags; 286 287 if ((end - start) >= l2x0_size) { 288 l2x0_clean_all(); 289 return; 290 } 291 292 raw_spin_lock_irqsave(&l2x0_lock, flags); 293 start &= ~(CACHE_LINE_SIZE - 1); 294 while (start < end) { 295 unsigned long blk_end = start + min(end - start, 4096UL); 296 297 while (start < blk_end) { 298 l2x0_clean_line(start); 299 start += CACHE_LINE_SIZE; 300 } 301 302 if (blk_end < end) { 303 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 304 raw_spin_lock_irqsave(&l2x0_lock, flags); 305 } 306 } 307 cache_wait(base + L2X0_CLEAN_LINE_PA, 1); 308 cache_sync(); 309 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 310 } 311 312 static void l2x0_flush_range(unsigned long start, unsigned long end) 313 { 314 void __iomem *base = l2x0_base; 315 unsigned long flags; 316 317 if ((end - start) >= l2x0_size) { 318 l2x0_flush_all(); 319 return; 320 } 321 322 raw_spin_lock_irqsave(&l2x0_lock, flags); 323 start &= ~(CACHE_LINE_SIZE - 1); 324 while (start < end) { 325 unsigned long blk_end = start + min(end - start, 4096UL); 326 327 debug_writel(0x03); 328 while (start < blk_end) { 329 l2x0_flush_line(start); 330 start += CACHE_LINE_SIZE; 331 } 332 debug_writel(0x00); 333 334 if (blk_end < end) { 335 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 336 raw_spin_lock_irqsave(&l2x0_lock, flags); 337 } 338 } 339 cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1); 340 cache_sync(); 341 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 342 } 343 344 static void l2x0_disable(void) 345 { 346 unsigned long flags; 347 348 raw_spin_lock_irqsave(&l2x0_lock, flags); 349 __l2x0_flush_all(); 350 writel_relaxed(0, l2x0_base + L2X0_CTRL); 351 dsb(st); 352 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 353 } 354 355 static void l2x0_unlock(u32 cache_id) 356 { 357 int lockregs; 358 359 switch (cache_id & L2X0_CACHE_ID_PART_MASK) { 360 case L2X0_CACHE_ID_PART_L310: 361 lockregs = 8; 362 break; 363 default: 364 /* L210 and unknown types */ 365 lockregs = 1; 366 break; 367 } 368 369 l2c_unlock(l2x0_base, lockregs); 370 } 371 372 static void l2x0_enable(void __iomem *base, u32 aux, unsigned num_lock) 373 { 374 /* l2x0 controller is disabled */ 375 writel_relaxed(aux, base + L2X0_AUX_CTRL); 376 377 /* Make sure that I&D is not locked down when starting */ 378 l2x0_unlock(readl_relaxed(base + L2X0_CACHE_ID)); 379 380 l2x0_inv_all(); 381 382 /* enable L2X0 */ 383 writel_relaxed(L2X0_CTRL_EN, base + L2X0_CTRL); 384 } 385 386 static void l2x0_resume(void) 387 { 388 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) { 389 /* restore aux ctrl and enable l2 */ 390 l2x0_unlock(readl_relaxed(l2x0_base + L2X0_CACHE_ID)); 391 392 writel_relaxed(l2x0_saved_regs.aux_ctrl, l2x0_base + 393 L2X0_AUX_CTRL); 394 395 l2x0_inv_all(); 396 397 writel_relaxed(L2X0_CTRL_EN, l2x0_base + L2X0_CTRL); 398 } 399 } 400 401 static const struct l2c_init_data l2x0_init_fns __initconst = { 402 .enable = l2x0_enable, 403 .outer_cache = { 404 .inv_range = l2x0_inv_range, 405 .clean_range = l2x0_clean_range, 406 .flush_range = l2x0_flush_range, 407 .flush_all = l2x0_flush_all, 408 .disable = l2x0_disable, 409 .sync = l2x0_cache_sync, 410 .resume = l2x0_resume, 411 }, 412 }; 413 414 /* 415 * L2C-310 specific code. 416 * 417 * Errata: 418 * 588369: PL310 R0P0->R1P0, fixed R2P0. 419 * Affects: all clean+invalidate operations 420 * clean and invalidate skips the invalidate step, so we need to issue 421 * separate operations. We also require the above debug workaround 422 * enclosing this code fragment on affected parts. On unaffected parts, 423 * we must not use this workaround without the debug register writes 424 * to avoid exposing a problem similar to 727915. 425 * 426 * 727915: PL310 R2P0->R3P0, fixed R3P1. 427 * Affects: clean+invalidate by way 428 * clean and invalidate by way runs in the background, and a store can 429 * hit the line between the clean operation and invalidate operation, 430 * resulting in the store being lost. 431 * 432 * 753970: PL310 R3P0, fixed R3P1. 433 * Affects: sync 434 * prevents merging writes after the sync operation, until another L2C 435 * operation is performed (or a number of other conditions.) 436 * 437 * 769419: PL310 R0P0->R3P1, fixed R3P2. 438 * Affects: store buffer 439 * store buffer is not automatically drained. 440 */ 441 static void __init pl310_save(void __iomem *base) 442 { 443 u32 l2x0_revision = readl_relaxed(base + L2X0_CACHE_ID) & 444 L2X0_CACHE_ID_RTL_MASK; 445 446 l2x0_saved_regs.tag_latency = readl_relaxed(base + 447 L2X0_TAG_LATENCY_CTRL); 448 l2x0_saved_regs.data_latency = readl_relaxed(base + 449 L2X0_DATA_LATENCY_CTRL); 450 l2x0_saved_regs.filter_end = readl_relaxed(base + 451 L2X0_ADDR_FILTER_END); 452 l2x0_saved_regs.filter_start = readl_relaxed(base + 453 L2X0_ADDR_FILTER_START); 454 455 if (l2x0_revision >= L310_CACHE_ID_RTL_R2P0) { 456 /* 457 * From r2p0, there is Prefetch offset/control register 458 */ 459 l2x0_saved_regs.prefetch_ctrl = readl_relaxed(base + 460 L2X0_PREFETCH_CTRL); 461 /* 462 * From r3p0, there is Power control register 463 */ 464 if (l2x0_revision >= L310_CACHE_ID_RTL_R3P0) 465 l2x0_saved_regs.pwr_ctrl = readl_relaxed(base + 466 L2X0_POWER_CTRL); 467 } 468 } 469 470 static void pl310_resume(void) 471 { 472 u32 l2x0_revision; 473 474 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) { 475 /* restore pl310 setup */ 476 writel_relaxed(l2x0_saved_regs.tag_latency, 477 l2x0_base + L2X0_TAG_LATENCY_CTRL); 478 writel_relaxed(l2x0_saved_regs.data_latency, 479 l2x0_base + L2X0_DATA_LATENCY_CTRL); 480 writel_relaxed(l2x0_saved_regs.filter_end, 481 l2x0_base + L2X0_ADDR_FILTER_END); 482 writel_relaxed(l2x0_saved_regs.filter_start, 483 l2x0_base + L2X0_ADDR_FILTER_START); 484 485 l2x0_revision = readl_relaxed(l2x0_base + L2X0_CACHE_ID) & 486 L2X0_CACHE_ID_RTL_MASK; 487 488 if (l2x0_revision >= L310_CACHE_ID_RTL_R2P0) { 489 writel_relaxed(l2x0_saved_regs.prefetch_ctrl, 490 l2x0_base + L2X0_PREFETCH_CTRL); 491 if (l2x0_revision >= L310_CACHE_ID_RTL_R3P0) 492 writel_relaxed(l2x0_saved_regs.pwr_ctrl, 493 l2x0_base + L2X0_POWER_CTRL); 494 } 495 } 496 497 l2x0_resume(); 498 } 499 500 static void __init l2c310_fixup(void __iomem *base, u32 cache_id, 501 struct outer_cache_fns *fns) 502 { 503 unsigned revision = cache_id & L2X0_CACHE_ID_RTL_MASK; 504 const char *errata[4]; 505 unsigned n = 0; 506 507 if (revision <= L310_CACHE_ID_RTL_R3P0) 508 fns->set_debug = pl310_set_debug; 509 510 if (IS_ENABLED(CONFIG_PL310_ERRATA_753970) && 511 revision == L310_CACHE_ID_RTL_R3P0) { 512 sync_reg_offset = L2X0_DUMMY_REG; 513 errata[n++] = "753970"; 514 } 515 516 if (IS_ENABLED(CONFIG_PL310_ERRATA_769419)) 517 errata[n++] = "769419"; 518 519 if (n) { 520 unsigned i; 521 522 pr_info("L2C-310 errat%s", n > 1 ? "a" : "um"); 523 for (i = 0; i < n; i++) 524 pr_cont(" %s", errata[i]); 525 pr_cont(" enabled\n"); 526 } 527 } 528 529 static const struct l2c_init_data l2c310_init_fns __initconst = { 530 .num_lock = 8, 531 .enable = l2c_enable, 532 .fixup = l2c310_fixup, 533 .save = pl310_save, 534 .outer_cache = { 535 .inv_range = l2x0_inv_range, 536 .clean_range = l2x0_clean_range, 537 .flush_range = l2x0_flush_range, 538 .flush_all = l2x0_flush_all, 539 .disable = l2x0_disable, 540 .sync = l2x0_cache_sync, 541 .resume = pl310_resume, 542 }, 543 }; 544 545 static void __init __l2c_init(const struct l2c_init_data *data, 546 u32 aux_val, u32 aux_mask, u32 cache_id) 547 { 548 struct outer_cache_fns fns; 549 u32 aux; 550 u32 way_size = 0; 551 int ways; 552 int way_size_shift = L2X0_WAY_SIZE_SHIFT; 553 const char *type; 554 555 /* 556 * It is strange to save the register state before initialisation, 557 * but hey, this is what the DT implementations decided to do. 558 */ 559 if (data->save) 560 data->save(l2x0_base); 561 562 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); 563 564 aux &= aux_mask; 565 aux |= aux_val; 566 567 /* Determine the number of ways */ 568 switch (cache_id & L2X0_CACHE_ID_PART_MASK) { 569 case L2X0_CACHE_ID_PART_L310: 570 if (aux & (1 << 16)) 571 ways = 16; 572 else 573 ways = 8; 574 type = "L310"; 575 break; 576 577 case L2X0_CACHE_ID_PART_L210: 578 ways = (aux >> 13) & 0xf; 579 type = "L210"; 580 break; 581 582 case AURORA_CACHE_ID: 583 ways = (aux >> 13) & 0xf; 584 ways = 2 << ((ways + 1) >> 2); 585 way_size_shift = AURORA_WAY_SIZE_SHIFT; 586 type = "Aurora"; 587 break; 588 589 default: 590 /* Assume unknown chips have 8 ways */ 591 ways = 8; 592 type = "L2x0 series"; 593 break; 594 } 595 596 l2x0_way_mask = (1 << ways) - 1; 597 598 /* 599 * L2 cache Size = Way size * Number of ways 600 */ 601 way_size = (aux & L2X0_AUX_CTRL_WAY_SIZE_MASK) >> 17; 602 way_size = 1 << (way_size + way_size_shift); 603 604 l2x0_size = ways * way_size * SZ_1K; 605 606 fns = data->outer_cache; 607 if (data->fixup) 608 data->fixup(l2x0_base, cache_id, &fns); 609 610 /* 611 * Check if l2x0 controller is already enabled. If we are booting 612 * in non-secure mode accessing the below registers will fault. 613 */ 614 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) 615 data->enable(l2x0_base, aux, data->num_lock); 616 617 /* Re-read it in case some bits are reserved. */ 618 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); 619 620 /* Save the value for resuming. */ 621 l2x0_saved_regs.aux_ctrl = aux; 622 623 outer_cache = fns; 624 625 pr_info("%s cache controller enabled, %d ways, %d kB\n", 626 type, ways, l2x0_size >> 10); 627 pr_info("%s: CACHE_ID 0x%08x, AUX_CTRL 0x%08x\n", 628 type, cache_id, aux); 629 } 630 631 void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask) 632 { 633 const struct l2c_init_data *data; 634 u32 cache_id; 635 636 l2x0_base = base; 637 638 cache_id = readl_relaxed(base + L2X0_CACHE_ID); 639 640 switch (cache_id & L2X0_CACHE_ID_PART_MASK) { 641 default: 642 data = &l2x0_init_fns; 643 break; 644 645 case L2X0_CACHE_ID_PART_L310: 646 data = &l2c310_init_fns; 647 break; 648 } 649 650 __l2c_init(data, aux_val, aux_mask, cache_id); 651 } 652 653 #ifdef CONFIG_OF 654 static int l2_wt_override; 655 656 /* Aurora don't have the cache ID register available, so we have to 657 * pass it though the device tree */ 658 static u32 cache_id_part_number_from_dt; 659 660 static void __init l2x0_of_parse(const struct device_node *np, 661 u32 *aux_val, u32 *aux_mask) 662 { 663 u32 data[2] = { 0, 0 }; 664 u32 tag = 0; 665 u32 dirty = 0; 666 u32 val = 0, mask = 0; 667 668 of_property_read_u32(np, "arm,tag-latency", &tag); 669 if (tag) { 670 mask |= L2X0_AUX_CTRL_TAG_LATENCY_MASK; 671 val |= (tag - 1) << L2X0_AUX_CTRL_TAG_LATENCY_SHIFT; 672 } 673 674 of_property_read_u32_array(np, "arm,data-latency", 675 data, ARRAY_SIZE(data)); 676 if (data[0] && data[1]) { 677 mask |= L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK | 678 L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK; 679 val |= ((data[0] - 1) << L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT) | 680 ((data[1] - 1) << L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT); 681 } 682 683 of_property_read_u32(np, "arm,dirty-latency", &dirty); 684 if (dirty) { 685 mask |= L2X0_AUX_CTRL_DIRTY_LATENCY_MASK; 686 val |= (dirty - 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT; 687 } 688 689 *aux_val &= ~mask; 690 *aux_val |= val; 691 *aux_mask &= ~mask; 692 } 693 694 static const struct l2c_init_data of_l2x0_data __initconst = { 695 .of_parse = l2x0_of_parse, 696 .enable = l2x0_enable, 697 .outer_cache = { 698 .inv_range = l2x0_inv_range, 699 .clean_range = l2x0_clean_range, 700 .flush_range = l2x0_flush_range, 701 .flush_all = l2x0_flush_all, 702 .disable = l2x0_disable, 703 .sync = l2x0_cache_sync, 704 .resume = l2x0_resume, 705 }, 706 }; 707 708 static void __init pl310_of_parse(const struct device_node *np, 709 u32 *aux_val, u32 *aux_mask) 710 { 711 u32 data[3] = { 0, 0, 0 }; 712 u32 tag[3] = { 0, 0, 0 }; 713 u32 filter[2] = { 0, 0 }; 714 715 of_property_read_u32_array(np, "arm,tag-latency", tag, ARRAY_SIZE(tag)); 716 if (tag[0] && tag[1] && tag[2]) 717 writel_relaxed( 718 ((tag[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) | 719 ((tag[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) | 720 ((tag[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT), 721 l2x0_base + L2X0_TAG_LATENCY_CTRL); 722 723 of_property_read_u32_array(np, "arm,data-latency", 724 data, ARRAY_SIZE(data)); 725 if (data[0] && data[1] && data[2]) 726 writel_relaxed( 727 ((data[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) | 728 ((data[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) | 729 ((data[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT), 730 l2x0_base + L2X0_DATA_LATENCY_CTRL); 731 732 of_property_read_u32_array(np, "arm,filter-ranges", 733 filter, ARRAY_SIZE(filter)); 734 if (filter[1]) { 735 writel_relaxed(ALIGN(filter[0] + filter[1], SZ_1M), 736 l2x0_base + L2X0_ADDR_FILTER_END); 737 writel_relaxed((filter[0] & ~(SZ_1M - 1)) | L2X0_ADDR_FILTER_EN, 738 l2x0_base + L2X0_ADDR_FILTER_START); 739 } 740 } 741 742 static const struct l2c_init_data of_pl310_data __initconst = { 743 .num_lock = 8, 744 .of_parse = pl310_of_parse, 745 .enable = l2c_enable, 746 .fixup = l2c310_fixup, 747 .save = pl310_save, 748 .outer_cache = { 749 .inv_range = l2x0_inv_range, 750 .clean_range = l2x0_clean_range, 751 .flush_range = l2x0_flush_range, 752 .flush_all = l2x0_flush_all, 753 .disable = l2x0_disable, 754 .sync = l2x0_cache_sync, 755 .resume = pl310_resume, 756 }, 757 }; 758 759 /* 760 * Note that the end addresses passed to Linux primitives are 761 * noninclusive, while the hardware cache range operations use 762 * inclusive start and end addresses. 763 */ 764 static unsigned long calc_range_end(unsigned long start, unsigned long end) 765 { 766 /* 767 * Limit the number of cache lines processed at once, 768 * since cache range operations stall the CPU pipeline 769 * until completion. 770 */ 771 if (end > start + MAX_RANGE_SIZE) 772 end = start + MAX_RANGE_SIZE; 773 774 /* 775 * Cache range operations can't straddle a page boundary. 776 */ 777 if (end > PAGE_ALIGN(start+1)) 778 end = PAGE_ALIGN(start+1); 779 780 return end; 781 } 782 783 /* 784 * Make sure 'start' and 'end' reference the same page, as L2 is PIPT 785 * and range operations only do a TLB lookup on the start address. 786 */ 787 static void aurora_pa_range(unsigned long start, unsigned long end, 788 unsigned long offset) 789 { 790 unsigned long flags; 791 792 raw_spin_lock_irqsave(&l2x0_lock, flags); 793 writel_relaxed(start, l2x0_base + AURORA_RANGE_BASE_ADDR_REG); 794 writel_relaxed(end, l2x0_base + offset); 795 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 796 797 cache_sync(); 798 } 799 800 static void aurora_inv_range(unsigned long start, unsigned long end) 801 { 802 /* 803 * round start and end adresses up to cache line size 804 */ 805 start &= ~(CACHE_LINE_SIZE - 1); 806 end = ALIGN(end, CACHE_LINE_SIZE); 807 808 /* 809 * Invalidate all full cache lines between 'start' and 'end'. 810 */ 811 while (start < end) { 812 unsigned long range_end = calc_range_end(start, end); 813 aurora_pa_range(start, range_end - CACHE_LINE_SIZE, 814 AURORA_INVAL_RANGE_REG); 815 start = range_end; 816 } 817 } 818 819 static void aurora_clean_range(unsigned long start, unsigned long end) 820 { 821 /* 822 * If L2 is forced to WT, the L2 will always be clean and we 823 * don't need to do anything here. 824 */ 825 if (!l2_wt_override) { 826 start &= ~(CACHE_LINE_SIZE - 1); 827 end = ALIGN(end, CACHE_LINE_SIZE); 828 while (start != end) { 829 unsigned long range_end = calc_range_end(start, end); 830 aurora_pa_range(start, range_end - CACHE_LINE_SIZE, 831 AURORA_CLEAN_RANGE_REG); 832 start = range_end; 833 } 834 } 835 } 836 837 static void aurora_flush_range(unsigned long start, unsigned long end) 838 { 839 start &= ~(CACHE_LINE_SIZE - 1); 840 end = ALIGN(end, CACHE_LINE_SIZE); 841 while (start != end) { 842 unsigned long range_end = calc_range_end(start, end); 843 /* 844 * If L2 is forced to WT, the L2 will always be clean and we 845 * just need to invalidate. 846 */ 847 if (l2_wt_override) 848 aurora_pa_range(start, range_end - CACHE_LINE_SIZE, 849 AURORA_INVAL_RANGE_REG); 850 else 851 aurora_pa_range(start, range_end - CACHE_LINE_SIZE, 852 AURORA_FLUSH_RANGE_REG); 853 start = range_end; 854 } 855 } 856 857 static void aurora_save(void __iomem *base) 858 { 859 l2x0_saved_regs.ctrl = readl_relaxed(base + L2X0_CTRL); 860 l2x0_saved_regs.aux_ctrl = readl_relaxed(base + L2X0_AUX_CTRL); 861 } 862 863 static void aurora_resume(void) 864 { 865 if (!(readl(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) { 866 writel_relaxed(l2x0_saved_regs.aux_ctrl, 867 l2x0_base + L2X0_AUX_CTRL); 868 writel_relaxed(l2x0_saved_regs.ctrl, l2x0_base + L2X0_CTRL); 869 } 870 } 871 872 /* 873 * For Aurora cache in no outer mode, enable via the CP15 coprocessor 874 * broadcasting of cache commands to L2. 875 */ 876 static void __init aurora_enable_no_outer(void __iomem *base, u32 aux, 877 unsigned num_lock) 878 { 879 u32 u; 880 881 asm volatile("mrc p15, 1, %0, c15, c2, 0" : "=r" (u)); 882 u |= AURORA_CTRL_FW; /* Set the FW bit */ 883 asm volatile("mcr p15, 1, %0, c15, c2, 0" : : "r" (u)); 884 885 isb(); 886 887 l2c_enable(base, aux, num_lock); 888 } 889 890 static void __init aurora_fixup(void __iomem *base, u32 cache_id, 891 struct outer_cache_fns *fns) 892 { 893 sync_reg_offset = AURORA_SYNC_REG; 894 } 895 896 static void __init aurora_of_parse(const struct device_node *np, 897 u32 *aux_val, u32 *aux_mask) 898 { 899 u32 val = AURORA_ACR_REPLACEMENT_TYPE_SEMIPLRU; 900 u32 mask = AURORA_ACR_REPLACEMENT_MASK; 901 902 of_property_read_u32(np, "cache-id-part", 903 &cache_id_part_number_from_dt); 904 905 /* Determine and save the write policy */ 906 l2_wt_override = of_property_read_bool(np, "wt-override"); 907 908 if (l2_wt_override) { 909 val |= AURORA_ACR_FORCE_WRITE_THRO_POLICY; 910 mask |= AURORA_ACR_FORCE_WRITE_POLICY_MASK; 911 } 912 913 *aux_val &= ~mask; 914 *aux_val |= val; 915 *aux_mask &= ~mask; 916 } 917 918 static const struct l2c_init_data of_aurora_with_outer_data __initconst = { 919 .num_lock = 4, 920 .of_parse = aurora_of_parse, 921 .enable = l2c_enable, 922 .fixup = aurora_fixup, 923 .save = aurora_save, 924 .outer_cache = { 925 .inv_range = aurora_inv_range, 926 .clean_range = aurora_clean_range, 927 .flush_range = aurora_flush_range, 928 .flush_all = l2x0_flush_all, 929 .disable = l2x0_disable, 930 .sync = l2x0_cache_sync, 931 .resume = aurora_resume, 932 }, 933 }; 934 935 static const struct l2c_init_data of_aurora_no_outer_data __initconst = { 936 .num_lock = 4, 937 .of_parse = aurora_of_parse, 938 .enable = aurora_enable_no_outer, 939 .fixup = aurora_fixup, 940 .save = aurora_save, 941 .outer_cache = { 942 .resume = aurora_resume, 943 }, 944 }; 945 946 /* 947 * For certain Broadcom SoCs, depending on the address range, different offsets 948 * need to be added to the address before passing it to L2 for 949 * invalidation/clean/flush 950 * 951 * Section Address Range Offset EMI 952 * 1 0x00000000 - 0x3FFFFFFF 0x80000000 VC 953 * 2 0x40000000 - 0xBFFFFFFF 0x40000000 SYS 954 * 3 0xC0000000 - 0xFFFFFFFF 0x80000000 VC 955 * 956 * When the start and end addresses have crossed two different sections, we 957 * need to break the L2 operation into two, each within its own section. 958 * For example, if we need to invalidate addresses starts at 0xBFFF0000 and 959 * ends at 0xC0001000, we need do invalidate 1) 0xBFFF0000 - 0xBFFFFFFF and 2) 960 * 0xC0000000 - 0xC0001000 961 * 962 * Note 1: 963 * By breaking a single L2 operation into two, we may potentially suffer some 964 * performance hit, but keep in mind the cross section case is very rare 965 * 966 * Note 2: 967 * We do not need to handle the case when the start address is in 968 * Section 1 and the end address is in Section 3, since it is not a valid use 969 * case 970 * 971 * Note 3: 972 * Section 1 in practical terms can no longer be used on rev A2. Because of 973 * that the code does not need to handle section 1 at all. 974 * 975 */ 976 #define BCM_SYS_EMI_START_ADDR 0x40000000UL 977 #define BCM_VC_EMI_SEC3_START_ADDR 0xC0000000UL 978 979 #define BCM_SYS_EMI_OFFSET 0x40000000UL 980 #define BCM_VC_EMI_OFFSET 0x80000000UL 981 982 static inline int bcm_addr_is_sys_emi(unsigned long addr) 983 { 984 return (addr >= BCM_SYS_EMI_START_ADDR) && 985 (addr < BCM_VC_EMI_SEC3_START_ADDR); 986 } 987 988 static inline unsigned long bcm_l2_phys_addr(unsigned long addr) 989 { 990 if (bcm_addr_is_sys_emi(addr)) 991 return addr + BCM_SYS_EMI_OFFSET; 992 else 993 return addr + BCM_VC_EMI_OFFSET; 994 } 995 996 static void bcm_inv_range(unsigned long start, unsigned long end) 997 { 998 unsigned long new_start, new_end; 999 1000 BUG_ON(start < BCM_SYS_EMI_START_ADDR); 1001 1002 if (unlikely(end <= start)) 1003 return; 1004 1005 new_start = bcm_l2_phys_addr(start); 1006 new_end = bcm_l2_phys_addr(end); 1007 1008 /* normal case, no cross section between start and end */ 1009 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) { 1010 l2x0_inv_range(new_start, new_end); 1011 return; 1012 } 1013 1014 /* They cross sections, so it can only be a cross from section 1015 * 2 to section 3 1016 */ 1017 l2x0_inv_range(new_start, 1018 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1)); 1019 l2x0_inv_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR), 1020 new_end); 1021 } 1022 1023 static void bcm_clean_range(unsigned long start, unsigned long end) 1024 { 1025 unsigned long new_start, new_end; 1026 1027 BUG_ON(start < BCM_SYS_EMI_START_ADDR); 1028 1029 if (unlikely(end <= start)) 1030 return; 1031 1032 if ((end - start) >= l2x0_size) { 1033 l2x0_clean_all(); 1034 return; 1035 } 1036 1037 new_start = bcm_l2_phys_addr(start); 1038 new_end = bcm_l2_phys_addr(end); 1039 1040 /* normal case, no cross section between start and end */ 1041 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) { 1042 l2x0_clean_range(new_start, new_end); 1043 return; 1044 } 1045 1046 /* They cross sections, so it can only be a cross from section 1047 * 2 to section 3 1048 */ 1049 l2x0_clean_range(new_start, 1050 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1)); 1051 l2x0_clean_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR), 1052 new_end); 1053 } 1054 1055 static void bcm_flush_range(unsigned long start, unsigned long end) 1056 { 1057 unsigned long new_start, new_end; 1058 1059 BUG_ON(start < BCM_SYS_EMI_START_ADDR); 1060 1061 if (unlikely(end <= start)) 1062 return; 1063 1064 if ((end - start) >= l2x0_size) { 1065 l2x0_flush_all(); 1066 return; 1067 } 1068 1069 new_start = bcm_l2_phys_addr(start); 1070 new_end = bcm_l2_phys_addr(end); 1071 1072 /* normal case, no cross section between start and end */ 1073 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) { 1074 l2x0_flush_range(new_start, new_end); 1075 return; 1076 } 1077 1078 /* They cross sections, so it can only be a cross from section 1079 * 2 to section 3 1080 */ 1081 l2x0_flush_range(new_start, 1082 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1)); 1083 l2x0_flush_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR), 1084 new_end); 1085 } 1086 1087 static const struct l2c_init_data of_bcm_l2x0_data __initconst = { 1088 .num_lock = 8, 1089 .of_parse = pl310_of_parse, 1090 .enable = l2c_enable, 1091 .fixup = l2c310_fixup, 1092 .save = pl310_save, 1093 .outer_cache = { 1094 .inv_range = bcm_inv_range, 1095 .clean_range = bcm_clean_range, 1096 .flush_range = bcm_flush_range, 1097 .flush_all = l2x0_flush_all, 1098 .disable = l2x0_disable, 1099 .sync = l2x0_cache_sync, 1100 .resume = pl310_resume, 1101 }, 1102 }; 1103 1104 static void __init tauros3_save(void __iomem *base) 1105 { 1106 l2x0_saved_regs.aux2_ctrl = 1107 readl_relaxed(base + TAUROS3_AUX2_CTRL); 1108 l2x0_saved_regs.prefetch_ctrl = 1109 readl_relaxed(base + L2X0_PREFETCH_CTRL); 1110 } 1111 1112 static void tauros3_resume(void) 1113 { 1114 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) { 1115 writel_relaxed(l2x0_saved_regs.aux2_ctrl, 1116 l2x0_base + TAUROS3_AUX2_CTRL); 1117 writel_relaxed(l2x0_saved_regs.prefetch_ctrl, 1118 l2x0_base + L2X0_PREFETCH_CTRL); 1119 } 1120 1121 l2x0_resume(); 1122 } 1123 1124 static const struct l2c_init_data of_tauros3_data __initconst = { 1125 .num_lock = 8, 1126 .enable = l2c_enable, 1127 .save = tauros3_save, 1128 /* Tauros3 broadcasts L1 cache operations to L2 */ 1129 .outer_cache = { 1130 .resume = tauros3_resume, 1131 }, 1132 }; 1133 1134 #define L2C_ID(name, fns) { .compatible = name, .data = (void *)&fns } 1135 static const struct of_device_id l2x0_ids[] __initconst = { 1136 L2C_ID("arm,l210-cache", of_l2x0_data), 1137 L2C_ID("arm,l220-cache", of_l2x0_data), 1138 L2C_ID("arm,pl310-cache", of_pl310_data), 1139 L2C_ID("brcm,bcm11351-a2-pl310-cache", of_bcm_l2x0_data), 1140 L2C_ID("marvell,aurora-outer-cache", of_aurora_with_outer_data), 1141 L2C_ID("marvell,aurora-system-cache", of_aurora_no_outer_data), 1142 L2C_ID("marvell,tauros3-cache", of_tauros3_data), 1143 /* Deprecated IDs */ 1144 L2C_ID("bcm,bcm11351-a2-pl310-cache", of_bcm_l2x0_data), 1145 {} 1146 }; 1147 1148 int __init l2x0_of_init(u32 aux_val, u32 aux_mask) 1149 { 1150 const struct l2c_init_data *data; 1151 struct device_node *np; 1152 struct resource res; 1153 u32 cache_id; 1154 1155 np = of_find_matching_node(NULL, l2x0_ids); 1156 if (!np) 1157 return -ENODEV; 1158 1159 if (of_address_to_resource(np, 0, &res)) 1160 return -ENODEV; 1161 1162 l2x0_base = ioremap(res.start, resource_size(&res)); 1163 if (!l2x0_base) 1164 return -ENOMEM; 1165 1166 l2x0_saved_regs.phy_base = res.start; 1167 1168 data = of_match_node(l2x0_ids, np)->data; 1169 1170 /* L2 configuration can only be changed if the cache is disabled */ 1171 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) 1172 if (data->of_parse) 1173 data->of_parse(np, &aux_val, &aux_mask); 1174 1175 if (cache_id_part_number_from_dt) 1176 cache_id = cache_id_part_number_from_dt; 1177 else 1178 cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID); 1179 1180 __l2c_init(data, aux_val, aux_mask, cache_id); 1181 1182 return 0; 1183 } 1184 #endif 1185