1 /* 2 * arch/arm/mm/cache-l2x0.c - L210/L220 cache controller support 3 * 4 * Copyright (C) 2007 ARM Limited 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software 17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18 */ 19 #include <linux/err.h> 20 #include <linux/init.h> 21 #include <linux/spinlock.h> 22 #include <linux/io.h> 23 #include <linux/of.h> 24 #include <linux/of_address.h> 25 26 #include <asm/cacheflush.h> 27 #include <asm/hardware/cache-l2x0.h> 28 #include "cache-tauros3.h" 29 #include "cache-aurora-l2.h" 30 31 struct l2c_init_data { 32 void (*of_parse)(const struct device_node *, u32 *, u32 *); 33 void (*save)(void __iomem *); 34 struct outer_cache_fns outer_cache; 35 }; 36 37 #define CACHE_LINE_SIZE 32 38 39 static void __iomem *l2x0_base; 40 static DEFINE_RAW_SPINLOCK(l2x0_lock); 41 static u32 l2x0_way_mask; /* Bitmask of active ways */ 42 static u32 l2x0_size; 43 static unsigned long sync_reg_offset = L2X0_CACHE_SYNC; 44 45 struct l2x0_regs l2x0_saved_regs; 46 47 /* 48 * Common code for all cache controllers. 49 */ 50 static inline void l2c_wait_mask(void __iomem *reg, unsigned long mask) 51 { 52 /* wait for cache operation by line or way to complete */ 53 while (readl_relaxed(reg) & mask) 54 cpu_relax(); 55 } 56 57 /* 58 * This should only be called when we have a requirement that the 59 * register be written due to a work-around, as platforms running 60 * in non-secure mode may not be able to access this register. 61 */ 62 static inline void l2c_set_debug(void __iomem *base, unsigned long val) 63 { 64 outer_cache.set_debug(val); 65 } 66 67 static void __l2c_op_way(void __iomem *reg) 68 { 69 writel_relaxed(l2x0_way_mask, reg); 70 l2c_wait_mask(reg, l2x0_way_mask); 71 } 72 73 static inline void l2c_unlock(void __iomem *base, unsigned num) 74 { 75 unsigned i; 76 77 for (i = 0; i < num; i++) { 78 writel_relaxed(0, base + L2X0_LOCKDOWN_WAY_D_BASE + 79 i * L2X0_LOCKDOWN_STRIDE); 80 writel_relaxed(0, base + L2X0_LOCKDOWN_WAY_I_BASE + 81 i * L2X0_LOCKDOWN_STRIDE); 82 } 83 } 84 85 #ifdef CONFIG_CACHE_PL310 86 static inline void cache_wait(void __iomem *reg, unsigned long mask) 87 { 88 /* cache operations by line are atomic on PL310 */ 89 } 90 #else 91 #define cache_wait l2c_wait_mask 92 #endif 93 94 static inline void cache_sync(void) 95 { 96 void __iomem *base = l2x0_base; 97 98 writel_relaxed(0, base + sync_reg_offset); 99 cache_wait(base + L2X0_CACHE_SYNC, 1); 100 } 101 102 static inline void l2x0_clean_line(unsigned long addr) 103 { 104 void __iomem *base = l2x0_base; 105 cache_wait(base + L2X0_CLEAN_LINE_PA, 1); 106 writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA); 107 } 108 109 static inline void l2x0_inv_line(unsigned long addr) 110 { 111 void __iomem *base = l2x0_base; 112 cache_wait(base + L2X0_INV_LINE_PA, 1); 113 writel_relaxed(addr, base + L2X0_INV_LINE_PA); 114 } 115 116 #if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915) 117 static inline void debug_writel(unsigned long val) 118 { 119 if (outer_cache.set_debug) 120 l2c_set_debug(l2x0_base, val); 121 } 122 123 static void pl310_set_debug(unsigned long val) 124 { 125 writel_relaxed(val, l2x0_base + L2X0_DEBUG_CTRL); 126 } 127 #else 128 /* Optimised out for non-errata case */ 129 static inline void debug_writel(unsigned long val) 130 { 131 } 132 133 #define pl310_set_debug NULL 134 #endif 135 136 #ifdef CONFIG_PL310_ERRATA_588369 137 static inline void l2x0_flush_line(unsigned long addr) 138 { 139 void __iomem *base = l2x0_base; 140 141 /* Clean by PA followed by Invalidate by PA */ 142 cache_wait(base + L2X0_CLEAN_LINE_PA, 1); 143 writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA); 144 cache_wait(base + L2X0_INV_LINE_PA, 1); 145 writel_relaxed(addr, base + L2X0_INV_LINE_PA); 146 } 147 #else 148 149 static inline void l2x0_flush_line(unsigned long addr) 150 { 151 void __iomem *base = l2x0_base; 152 cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1); 153 writel_relaxed(addr, base + L2X0_CLEAN_INV_LINE_PA); 154 } 155 #endif 156 157 static void l2x0_cache_sync(void) 158 { 159 unsigned long flags; 160 161 raw_spin_lock_irqsave(&l2x0_lock, flags); 162 cache_sync(); 163 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 164 } 165 166 static void __l2x0_flush_all(void) 167 { 168 debug_writel(0x03); 169 __l2c_op_way(l2x0_base + L2X0_CLEAN_INV_WAY); 170 cache_sync(); 171 debug_writel(0x00); 172 } 173 174 static void l2x0_flush_all(void) 175 { 176 unsigned long flags; 177 178 /* clean all ways */ 179 raw_spin_lock_irqsave(&l2x0_lock, flags); 180 __l2x0_flush_all(); 181 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 182 } 183 184 static void l2x0_clean_all(void) 185 { 186 unsigned long flags; 187 188 /* clean all ways */ 189 raw_spin_lock_irqsave(&l2x0_lock, flags); 190 __l2c_op_way(l2x0_base + L2X0_CLEAN_WAY); 191 cache_sync(); 192 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 193 } 194 195 static void l2x0_inv_all(void) 196 { 197 unsigned long flags; 198 199 /* invalidate all ways */ 200 raw_spin_lock_irqsave(&l2x0_lock, flags); 201 /* Invalidating when L2 is enabled is a nono */ 202 BUG_ON(readl(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN); 203 __l2c_op_way(l2x0_base + L2X0_INV_WAY); 204 cache_sync(); 205 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 206 } 207 208 static void l2x0_inv_range(unsigned long start, unsigned long end) 209 { 210 void __iomem *base = l2x0_base; 211 unsigned long flags; 212 213 raw_spin_lock_irqsave(&l2x0_lock, flags); 214 if (start & (CACHE_LINE_SIZE - 1)) { 215 start &= ~(CACHE_LINE_SIZE - 1); 216 debug_writel(0x03); 217 l2x0_flush_line(start); 218 debug_writel(0x00); 219 start += CACHE_LINE_SIZE; 220 } 221 222 if (end & (CACHE_LINE_SIZE - 1)) { 223 end &= ~(CACHE_LINE_SIZE - 1); 224 debug_writel(0x03); 225 l2x0_flush_line(end); 226 debug_writel(0x00); 227 } 228 229 while (start < end) { 230 unsigned long blk_end = start + min(end - start, 4096UL); 231 232 while (start < blk_end) { 233 l2x0_inv_line(start); 234 start += CACHE_LINE_SIZE; 235 } 236 237 if (blk_end < end) { 238 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 239 raw_spin_lock_irqsave(&l2x0_lock, flags); 240 } 241 } 242 cache_wait(base + L2X0_INV_LINE_PA, 1); 243 cache_sync(); 244 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 245 } 246 247 static void l2x0_clean_range(unsigned long start, unsigned long end) 248 { 249 void __iomem *base = l2x0_base; 250 unsigned long flags; 251 252 if ((end - start) >= l2x0_size) { 253 l2x0_clean_all(); 254 return; 255 } 256 257 raw_spin_lock_irqsave(&l2x0_lock, flags); 258 start &= ~(CACHE_LINE_SIZE - 1); 259 while (start < end) { 260 unsigned long blk_end = start + min(end - start, 4096UL); 261 262 while (start < blk_end) { 263 l2x0_clean_line(start); 264 start += CACHE_LINE_SIZE; 265 } 266 267 if (blk_end < end) { 268 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 269 raw_spin_lock_irqsave(&l2x0_lock, flags); 270 } 271 } 272 cache_wait(base + L2X0_CLEAN_LINE_PA, 1); 273 cache_sync(); 274 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 275 } 276 277 static void l2x0_flush_range(unsigned long start, unsigned long end) 278 { 279 void __iomem *base = l2x0_base; 280 unsigned long flags; 281 282 if ((end - start) >= l2x0_size) { 283 l2x0_flush_all(); 284 return; 285 } 286 287 raw_spin_lock_irqsave(&l2x0_lock, flags); 288 start &= ~(CACHE_LINE_SIZE - 1); 289 while (start < end) { 290 unsigned long blk_end = start + min(end - start, 4096UL); 291 292 debug_writel(0x03); 293 while (start < blk_end) { 294 l2x0_flush_line(start); 295 start += CACHE_LINE_SIZE; 296 } 297 debug_writel(0x00); 298 299 if (blk_end < end) { 300 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 301 raw_spin_lock_irqsave(&l2x0_lock, flags); 302 } 303 } 304 cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1); 305 cache_sync(); 306 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 307 } 308 309 static void l2x0_disable(void) 310 { 311 unsigned long flags; 312 313 raw_spin_lock_irqsave(&l2x0_lock, flags); 314 __l2x0_flush_all(); 315 writel_relaxed(0, l2x0_base + L2X0_CTRL); 316 dsb(st); 317 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 318 } 319 320 static void l2x0_unlock(u32 cache_id) 321 { 322 int lockregs; 323 324 switch (cache_id & L2X0_CACHE_ID_PART_MASK) { 325 case L2X0_CACHE_ID_PART_L310: 326 lockregs = 8; 327 break; 328 case AURORA_CACHE_ID: 329 lockregs = 4; 330 break; 331 default: 332 /* L210 and unknown types */ 333 lockregs = 1; 334 break; 335 } 336 337 l2c_unlock(l2x0_base, lockregs); 338 } 339 340 static const struct l2c_init_data l2x0_init_fns __initconst = { 341 .outer_cache = { 342 .inv_range = l2x0_inv_range, 343 .clean_range = l2x0_clean_range, 344 .flush_range = l2x0_flush_range, 345 .flush_all = l2x0_flush_all, 346 .disable = l2x0_disable, 347 .sync = l2x0_cache_sync, 348 }, 349 }; 350 351 static void __init __l2c_init(const struct l2c_init_data *data, 352 u32 aux_val, u32 aux_mask, u32 cache_id) 353 { 354 u32 aux; 355 u32 way_size = 0; 356 int ways; 357 int way_size_shift = L2X0_WAY_SIZE_SHIFT; 358 const char *type; 359 360 /* 361 * It is strange to save the register state before initialisation, 362 * but hey, this is what the DT implementations decided to do. 363 */ 364 if (data->save) 365 data->save(l2x0_base); 366 367 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); 368 369 aux &= aux_mask; 370 aux |= aux_val; 371 372 /* Determine the number of ways */ 373 switch (cache_id & L2X0_CACHE_ID_PART_MASK) { 374 case L2X0_CACHE_ID_PART_L310: 375 if (aux & (1 << 16)) 376 ways = 16; 377 else 378 ways = 8; 379 type = "L310"; 380 #ifdef CONFIG_PL310_ERRATA_753970 381 /* Unmapped register. */ 382 sync_reg_offset = L2X0_DUMMY_REG; 383 #endif 384 break; 385 case L2X0_CACHE_ID_PART_L210: 386 ways = (aux >> 13) & 0xf; 387 type = "L210"; 388 break; 389 390 case AURORA_CACHE_ID: 391 sync_reg_offset = AURORA_SYNC_REG; 392 ways = (aux >> 13) & 0xf; 393 ways = 2 << ((ways + 1) >> 2); 394 way_size_shift = AURORA_WAY_SIZE_SHIFT; 395 type = "Aurora"; 396 break; 397 default: 398 /* Assume unknown chips have 8 ways */ 399 ways = 8; 400 type = "L2x0 series"; 401 break; 402 } 403 404 l2x0_way_mask = (1 << ways) - 1; 405 406 /* 407 * L2 cache Size = Way size * Number of ways 408 */ 409 way_size = (aux & L2X0_AUX_CTRL_WAY_SIZE_MASK) >> 17; 410 way_size = 1 << (way_size + way_size_shift); 411 412 l2x0_size = ways * way_size * SZ_1K; 413 414 /* 415 * Check if l2x0 controller is already enabled. 416 * If you are booting from non-secure mode 417 * accessing the below registers will fault. 418 */ 419 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) { 420 /* Make sure that I&D is not locked down when starting */ 421 l2x0_unlock(cache_id); 422 423 /* l2x0 controller is disabled */ 424 writel_relaxed(aux, l2x0_base + L2X0_AUX_CTRL); 425 426 l2x0_inv_all(); 427 428 /* enable L2X0 */ 429 writel_relaxed(L2X0_CTRL_EN, l2x0_base + L2X0_CTRL); 430 } 431 432 /* Re-read it in case some bits are reserved. */ 433 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); 434 435 /* Save the value for resuming. */ 436 l2x0_saved_regs.aux_ctrl = aux; 437 438 outer_cache = data->outer_cache; 439 440 if ((cache_id & L2X0_CACHE_ID_PART_MASK) == L2X0_CACHE_ID_PART_L310 && 441 (cache_id & L2X0_CACHE_ID_RTL_MASK) <= L310_CACHE_ID_RTL_R3P0) 442 outer_cache.set_debug = pl310_set_debug; 443 444 pr_info("%s cache controller enabled\n", type); 445 pr_info("l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d kB\n", 446 ways, cache_id, aux, l2x0_size >> 10); 447 } 448 449 void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask) 450 { 451 u32 cache_id; 452 453 l2x0_base = base; 454 455 cache_id = readl_relaxed(base + L2X0_CACHE_ID); 456 457 __l2c_init(&l2x0_init_fns, aux_val, aux_mask, cache_id); 458 } 459 460 #ifdef CONFIG_OF 461 static int l2_wt_override; 462 463 /* Aurora don't have the cache ID register available, so we have to 464 * pass it though the device tree */ 465 static u32 cache_id_part_number_from_dt; 466 467 static void __init l2x0_of_parse(const struct device_node *np, 468 u32 *aux_val, u32 *aux_mask) 469 { 470 u32 data[2] = { 0, 0 }; 471 u32 tag = 0; 472 u32 dirty = 0; 473 u32 val = 0, mask = 0; 474 475 of_property_read_u32(np, "arm,tag-latency", &tag); 476 if (tag) { 477 mask |= L2X0_AUX_CTRL_TAG_LATENCY_MASK; 478 val |= (tag - 1) << L2X0_AUX_CTRL_TAG_LATENCY_SHIFT; 479 } 480 481 of_property_read_u32_array(np, "arm,data-latency", 482 data, ARRAY_SIZE(data)); 483 if (data[0] && data[1]) { 484 mask |= L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK | 485 L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK; 486 val |= ((data[0] - 1) << L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT) | 487 ((data[1] - 1) << L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT); 488 } 489 490 of_property_read_u32(np, "arm,dirty-latency", &dirty); 491 if (dirty) { 492 mask |= L2X0_AUX_CTRL_DIRTY_LATENCY_MASK; 493 val |= (dirty - 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT; 494 } 495 496 *aux_val &= ~mask; 497 *aux_val |= val; 498 *aux_mask &= ~mask; 499 } 500 501 static void l2x0_resume(void) 502 { 503 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) { 504 /* restore aux ctrl and enable l2 */ 505 l2x0_unlock(readl_relaxed(l2x0_base + L2X0_CACHE_ID)); 506 507 writel_relaxed(l2x0_saved_regs.aux_ctrl, l2x0_base + 508 L2X0_AUX_CTRL); 509 510 l2x0_inv_all(); 511 512 writel_relaxed(L2X0_CTRL_EN, l2x0_base + L2X0_CTRL); 513 } 514 } 515 516 static const struct l2c_init_data of_l2x0_data __initconst = { 517 .of_parse = l2x0_of_parse, 518 .outer_cache = { 519 .inv_range = l2x0_inv_range, 520 .clean_range = l2x0_clean_range, 521 .flush_range = l2x0_flush_range, 522 .flush_all = l2x0_flush_all, 523 .disable = l2x0_disable, 524 .sync = l2x0_cache_sync, 525 .resume = l2x0_resume, 526 }, 527 }; 528 529 static void __init pl310_of_parse(const struct device_node *np, 530 u32 *aux_val, u32 *aux_mask) 531 { 532 u32 data[3] = { 0, 0, 0 }; 533 u32 tag[3] = { 0, 0, 0 }; 534 u32 filter[2] = { 0, 0 }; 535 536 of_property_read_u32_array(np, "arm,tag-latency", tag, ARRAY_SIZE(tag)); 537 if (tag[0] && tag[1] && tag[2]) 538 writel_relaxed( 539 ((tag[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) | 540 ((tag[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) | 541 ((tag[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT), 542 l2x0_base + L2X0_TAG_LATENCY_CTRL); 543 544 of_property_read_u32_array(np, "arm,data-latency", 545 data, ARRAY_SIZE(data)); 546 if (data[0] && data[1] && data[2]) 547 writel_relaxed( 548 ((data[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) | 549 ((data[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) | 550 ((data[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT), 551 l2x0_base + L2X0_DATA_LATENCY_CTRL); 552 553 of_property_read_u32_array(np, "arm,filter-ranges", 554 filter, ARRAY_SIZE(filter)); 555 if (filter[1]) { 556 writel_relaxed(ALIGN(filter[0] + filter[1], SZ_1M), 557 l2x0_base + L2X0_ADDR_FILTER_END); 558 writel_relaxed((filter[0] & ~(SZ_1M - 1)) | L2X0_ADDR_FILTER_EN, 559 l2x0_base + L2X0_ADDR_FILTER_START); 560 } 561 } 562 563 static void __init pl310_save(void __iomem *base) 564 { 565 u32 l2x0_revision = readl_relaxed(base + L2X0_CACHE_ID) & 566 L2X0_CACHE_ID_RTL_MASK; 567 568 l2x0_saved_regs.tag_latency = readl_relaxed(base + 569 L2X0_TAG_LATENCY_CTRL); 570 l2x0_saved_regs.data_latency = readl_relaxed(base + 571 L2X0_DATA_LATENCY_CTRL); 572 l2x0_saved_regs.filter_end = readl_relaxed(base + 573 L2X0_ADDR_FILTER_END); 574 l2x0_saved_regs.filter_start = readl_relaxed(base + 575 L2X0_ADDR_FILTER_START); 576 577 if (l2x0_revision >= L310_CACHE_ID_RTL_R2P0) { 578 /* 579 * From r2p0, there is Prefetch offset/control register 580 */ 581 l2x0_saved_regs.prefetch_ctrl = readl_relaxed(base + 582 L2X0_PREFETCH_CTRL); 583 /* 584 * From r3p0, there is Power control register 585 */ 586 if (l2x0_revision >= L310_CACHE_ID_RTL_R3P0) 587 l2x0_saved_regs.pwr_ctrl = readl_relaxed(base + 588 L2X0_POWER_CTRL); 589 } 590 } 591 592 static void pl310_resume(void) 593 { 594 u32 l2x0_revision; 595 596 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) { 597 /* restore pl310 setup */ 598 writel_relaxed(l2x0_saved_regs.tag_latency, 599 l2x0_base + L2X0_TAG_LATENCY_CTRL); 600 writel_relaxed(l2x0_saved_regs.data_latency, 601 l2x0_base + L2X0_DATA_LATENCY_CTRL); 602 writel_relaxed(l2x0_saved_regs.filter_end, 603 l2x0_base + L2X0_ADDR_FILTER_END); 604 writel_relaxed(l2x0_saved_regs.filter_start, 605 l2x0_base + L2X0_ADDR_FILTER_START); 606 607 l2x0_revision = readl_relaxed(l2x0_base + L2X0_CACHE_ID) & 608 L2X0_CACHE_ID_RTL_MASK; 609 610 if (l2x0_revision >= L310_CACHE_ID_RTL_R2P0) { 611 writel_relaxed(l2x0_saved_regs.prefetch_ctrl, 612 l2x0_base + L2X0_PREFETCH_CTRL); 613 if (l2x0_revision >= L310_CACHE_ID_RTL_R3P0) 614 writel_relaxed(l2x0_saved_regs.pwr_ctrl, 615 l2x0_base + L2X0_POWER_CTRL); 616 } 617 } 618 619 l2x0_resume(); 620 } 621 622 static const struct l2c_init_data of_pl310_data __initconst = { 623 .of_parse = pl310_of_parse, 624 .save = pl310_save, 625 .outer_cache = { 626 .inv_range = l2x0_inv_range, 627 .clean_range = l2x0_clean_range, 628 .flush_range = l2x0_flush_range, 629 .flush_all = l2x0_flush_all, 630 .disable = l2x0_disable, 631 .sync = l2x0_cache_sync, 632 .resume = pl310_resume, 633 }, 634 }; 635 636 /* 637 * Note that the end addresses passed to Linux primitives are 638 * noninclusive, while the hardware cache range operations use 639 * inclusive start and end addresses. 640 */ 641 static unsigned long calc_range_end(unsigned long start, unsigned long end) 642 { 643 /* 644 * Limit the number of cache lines processed at once, 645 * since cache range operations stall the CPU pipeline 646 * until completion. 647 */ 648 if (end > start + MAX_RANGE_SIZE) 649 end = start + MAX_RANGE_SIZE; 650 651 /* 652 * Cache range operations can't straddle a page boundary. 653 */ 654 if (end > PAGE_ALIGN(start+1)) 655 end = PAGE_ALIGN(start+1); 656 657 return end; 658 } 659 660 /* 661 * Make sure 'start' and 'end' reference the same page, as L2 is PIPT 662 * and range operations only do a TLB lookup on the start address. 663 */ 664 static void aurora_pa_range(unsigned long start, unsigned long end, 665 unsigned long offset) 666 { 667 unsigned long flags; 668 669 raw_spin_lock_irqsave(&l2x0_lock, flags); 670 writel_relaxed(start, l2x0_base + AURORA_RANGE_BASE_ADDR_REG); 671 writel_relaxed(end, l2x0_base + offset); 672 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 673 674 cache_sync(); 675 } 676 677 static void aurora_inv_range(unsigned long start, unsigned long end) 678 { 679 /* 680 * round start and end adresses up to cache line size 681 */ 682 start &= ~(CACHE_LINE_SIZE - 1); 683 end = ALIGN(end, CACHE_LINE_SIZE); 684 685 /* 686 * Invalidate all full cache lines between 'start' and 'end'. 687 */ 688 while (start < end) { 689 unsigned long range_end = calc_range_end(start, end); 690 aurora_pa_range(start, range_end - CACHE_LINE_SIZE, 691 AURORA_INVAL_RANGE_REG); 692 start = range_end; 693 } 694 } 695 696 static void aurora_clean_range(unsigned long start, unsigned long end) 697 { 698 /* 699 * If L2 is forced to WT, the L2 will always be clean and we 700 * don't need to do anything here. 701 */ 702 if (!l2_wt_override) { 703 start &= ~(CACHE_LINE_SIZE - 1); 704 end = ALIGN(end, CACHE_LINE_SIZE); 705 while (start != end) { 706 unsigned long range_end = calc_range_end(start, end); 707 aurora_pa_range(start, range_end - CACHE_LINE_SIZE, 708 AURORA_CLEAN_RANGE_REG); 709 start = range_end; 710 } 711 } 712 } 713 714 static void aurora_flush_range(unsigned long start, unsigned long end) 715 { 716 start &= ~(CACHE_LINE_SIZE - 1); 717 end = ALIGN(end, CACHE_LINE_SIZE); 718 while (start != end) { 719 unsigned long range_end = calc_range_end(start, end); 720 /* 721 * If L2 is forced to WT, the L2 will always be clean and we 722 * just need to invalidate. 723 */ 724 if (l2_wt_override) 725 aurora_pa_range(start, range_end - CACHE_LINE_SIZE, 726 AURORA_INVAL_RANGE_REG); 727 else 728 aurora_pa_range(start, range_end - CACHE_LINE_SIZE, 729 AURORA_FLUSH_RANGE_REG); 730 start = range_end; 731 } 732 } 733 734 static void aurora_save(void __iomem *base) 735 { 736 l2x0_saved_regs.ctrl = readl_relaxed(base + L2X0_CTRL); 737 l2x0_saved_regs.aux_ctrl = readl_relaxed(base + L2X0_AUX_CTRL); 738 } 739 740 static void aurora_resume(void) 741 { 742 if (!(readl(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) { 743 writel_relaxed(l2x0_saved_regs.aux_ctrl, 744 l2x0_base + L2X0_AUX_CTRL); 745 writel_relaxed(l2x0_saved_regs.ctrl, l2x0_base + L2X0_CTRL); 746 } 747 } 748 749 static void __init aurora_broadcast_l2_commands(void) 750 { 751 __u32 u; 752 /* Enable Broadcasting of cache commands to L2*/ 753 __asm__ __volatile__("mrc p15, 1, %0, c15, c2, 0" : "=r"(u)); 754 u |= AURORA_CTRL_FW; /* Set the FW bit */ 755 __asm__ __volatile__("mcr p15, 1, %0, c15, c2, 0\n" : : "r"(u)); 756 isb(); 757 } 758 759 static void __init aurora_of_parse(const struct device_node *np, 760 u32 *aux_val, u32 *aux_mask) 761 { 762 u32 val = AURORA_ACR_REPLACEMENT_TYPE_SEMIPLRU; 763 u32 mask = AURORA_ACR_REPLACEMENT_MASK; 764 765 of_property_read_u32(np, "cache-id-part", 766 &cache_id_part_number_from_dt); 767 768 /* Determine and save the write policy */ 769 l2_wt_override = of_property_read_bool(np, "wt-override"); 770 771 if (l2_wt_override) { 772 val |= AURORA_ACR_FORCE_WRITE_THRO_POLICY; 773 mask |= AURORA_ACR_FORCE_WRITE_POLICY_MASK; 774 } 775 776 *aux_val &= ~mask; 777 *aux_val |= val; 778 *aux_mask &= ~mask; 779 } 780 781 static const struct l2c_init_data of_aurora_with_outer_data __initconst = { 782 .of_parse = aurora_of_parse, 783 .save = aurora_save, 784 .outer_cache = { 785 .inv_range = aurora_inv_range, 786 .clean_range = aurora_clean_range, 787 .flush_range = aurora_flush_range, 788 .flush_all = l2x0_flush_all, 789 .disable = l2x0_disable, 790 .sync = l2x0_cache_sync, 791 .resume = aurora_resume, 792 }, 793 }; 794 795 static const struct l2c_init_data of_aurora_no_outer_data __initconst = { 796 .of_parse = aurora_of_parse, 797 .save = aurora_save, 798 .outer_cache = { 799 .resume = aurora_resume, 800 }, 801 }; 802 803 /* 804 * For certain Broadcom SoCs, depending on the address range, different offsets 805 * need to be added to the address before passing it to L2 for 806 * invalidation/clean/flush 807 * 808 * Section Address Range Offset EMI 809 * 1 0x00000000 - 0x3FFFFFFF 0x80000000 VC 810 * 2 0x40000000 - 0xBFFFFFFF 0x40000000 SYS 811 * 3 0xC0000000 - 0xFFFFFFFF 0x80000000 VC 812 * 813 * When the start and end addresses have crossed two different sections, we 814 * need to break the L2 operation into two, each within its own section. 815 * For example, if we need to invalidate addresses starts at 0xBFFF0000 and 816 * ends at 0xC0001000, we need do invalidate 1) 0xBFFF0000 - 0xBFFFFFFF and 2) 817 * 0xC0000000 - 0xC0001000 818 * 819 * Note 1: 820 * By breaking a single L2 operation into two, we may potentially suffer some 821 * performance hit, but keep in mind the cross section case is very rare 822 * 823 * Note 2: 824 * We do not need to handle the case when the start address is in 825 * Section 1 and the end address is in Section 3, since it is not a valid use 826 * case 827 * 828 * Note 3: 829 * Section 1 in practical terms can no longer be used on rev A2. Because of 830 * that the code does not need to handle section 1 at all. 831 * 832 */ 833 #define BCM_SYS_EMI_START_ADDR 0x40000000UL 834 #define BCM_VC_EMI_SEC3_START_ADDR 0xC0000000UL 835 836 #define BCM_SYS_EMI_OFFSET 0x40000000UL 837 #define BCM_VC_EMI_OFFSET 0x80000000UL 838 839 static inline int bcm_addr_is_sys_emi(unsigned long addr) 840 { 841 return (addr >= BCM_SYS_EMI_START_ADDR) && 842 (addr < BCM_VC_EMI_SEC3_START_ADDR); 843 } 844 845 static inline unsigned long bcm_l2_phys_addr(unsigned long addr) 846 { 847 if (bcm_addr_is_sys_emi(addr)) 848 return addr + BCM_SYS_EMI_OFFSET; 849 else 850 return addr + BCM_VC_EMI_OFFSET; 851 } 852 853 static void bcm_inv_range(unsigned long start, unsigned long end) 854 { 855 unsigned long new_start, new_end; 856 857 BUG_ON(start < BCM_SYS_EMI_START_ADDR); 858 859 if (unlikely(end <= start)) 860 return; 861 862 new_start = bcm_l2_phys_addr(start); 863 new_end = bcm_l2_phys_addr(end); 864 865 /* normal case, no cross section between start and end */ 866 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) { 867 l2x0_inv_range(new_start, new_end); 868 return; 869 } 870 871 /* They cross sections, so it can only be a cross from section 872 * 2 to section 3 873 */ 874 l2x0_inv_range(new_start, 875 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1)); 876 l2x0_inv_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR), 877 new_end); 878 } 879 880 static void bcm_clean_range(unsigned long start, unsigned long end) 881 { 882 unsigned long new_start, new_end; 883 884 BUG_ON(start < BCM_SYS_EMI_START_ADDR); 885 886 if (unlikely(end <= start)) 887 return; 888 889 if ((end - start) >= l2x0_size) { 890 l2x0_clean_all(); 891 return; 892 } 893 894 new_start = bcm_l2_phys_addr(start); 895 new_end = bcm_l2_phys_addr(end); 896 897 /* normal case, no cross section between start and end */ 898 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) { 899 l2x0_clean_range(new_start, new_end); 900 return; 901 } 902 903 /* They cross sections, so it can only be a cross from section 904 * 2 to section 3 905 */ 906 l2x0_clean_range(new_start, 907 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1)); 908 l2x0_clean_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR), 909 new_end); 910 } 911 912 static void bcm_flush_range(unsigned long start, unsigned long end) 913 { 914 unsigned long new_start, new_end; 915 916 BUG_ON(start < BCM_SYS_EMI_START_ADDR); 917 918 if (unlikely(end <= start)) 919 return; 920 921 if ((end - start) >= l2x0_size) { 922 l2x0_flush_all(); 923 return; 924 } 925 926 new_start = bcm_l2_phys_addr(start); 927 new_end = bcm_l2_phys_addr(end); 928 929 /* normal case, no cross section between start and end */ 930 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) { 931 l2x0_flush_range(new_start, new_end); 932 return; 933 } 934 935 /* They cross sections, so it can only be a cross from section 936 * 2 to section 3 937 */ 938 l2x0_flush_range(new_start, 939 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1)); 940 l2x0_flush_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR), 941 new_end); 942 } 943 944 static const struct l2c_init_data of_bcm_l2x0_data __initconst = { 945 .of_parse = pl310_of_parse, 946 .save = pl310_save, 947 .outer_cache = { 948 .inv_range = bcm_inv_range, 949 .clean_range = bcm_clean_range, 950 .flush_range = bcm_flush_range, 951 .flush_all = l2x0_flush_all, 952 .disable = l2x0_disable, 953 .sync = l2x0_cache_sync, 954 .resume = pl310_resume, 955 }, 956 }; 957 958 static void __init tauros3_save(void __iomem *base) 959 { 960 l2x0_saved_regs.aux2_ctrl = 961 readl_relaxed(base + TAUROS3_AUX2_CTRL); 962 l2x0_saved_regs.prefetch_ctrl = 963 readl_relaxed(base + L2X0_PREFETCH_CTRL); 964 } 965 966 static void tauros3_resume(void) 967 { 968 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) { 969 writel_relaxed(l2x0_saved_regs.aux2_ctrl, 970 l2x0_base + TAUROS3_AUX2_CTRL); 971 writel_relaxed(l2x0_saved_regs.prefetch_ctrl, 972 l2x0_base + L2X0_PREFETCH_CTRL); 973 } 974 975 l2x0_resume(); 976 } 977 978 static const struct l2c_init_data of_tauros3_data __initconst = { 979 .save = tauros3_save, 980 /* Tauros3 broadcasts L1 cache operations to L2 */ 981 .outer_cache = { 982 .resume = tauros3_resume, 983 }, 984 }; 985 986 #define L2C_ID(name, fns) { .compatible = name, .data = (void *)&fns } 987 static const struct of_device_id l2x0_ids[] __initconst = { 988 L2C_ID("arm,l210-cache", of_l2x0_data), 989 L2C_ID("arm,l220-cache", of_l2x0_data), 990 L2C_ID("arm,pl310-cache", of_pl310_data), 991 L2C_ID("brcm,bcm11351-a2-pl310-cache", of_bcm_l2x0_data), 992 L2C_ID("marvell,aurora-outer-cache", of_aurora_with_outer_data), 993 L2C_ID("marvell,aurora-system-cache", of_aurora_no_outer_data), 994 L2C_ID("marvell,tauros3-cache", of_tauros3_data), 995 /* Deprecated IDs */ 996 L2C_ID("bcm,bcm11351-a2-pl310-cache", of_bcm_l2x0_data), 997 {} 998 }; 999 1000 int __init l2x0_of_init(u32 aux_val, u32 aux_mask) 1001 { 1002 const struct l2c_init_data *data; 1003 struct device_node *np; 1004 struct resource res; 1005 u32 cache_id; 1006 1007 np = of_find_matching_node(NULL, l2x0_ids); 1008 if (!np) 1009 return -ENODEV; 1010 1011 if (of_address_to_resource(np, 0, &res)) 1012 return -ENODEV; 1013 1014 l2x0_base = ioremap(res.start, resource_size(&res)); 1015 if (!l2x0_base) 1016 return -ENOMEM; 1017 1018 l2x0_saved_regs.phy_base = res.start; 1019 1020 data = of_match_node(l2x0_ids, np)->data; 1021 1022 /* L2 configuration can only be changed if the cache is disabled */ 1023 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) { 1024 if (data->of_parse) 1025 data->of_parse(np, &aux_val, &aux_mask); 1026 1027 /* For aurora cache in no outer mode select the 1028 * correct mode using the coprocessor*/ 1029 if (data == &of_aurora_no_outer_data) 1030 aurora_broadcast_l2_commands(); 1031 } 1032 1033 if (cache_id_part_number_from_dt) 1034 cache_id = cache_id_part_number_from_dt; 1035 else 1036 cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID); 1037 1038 __l2c_init(data, aux_val, aux_mask, cache_id); 1039 1040 return 0; 1041 } 1042 #endif 1043