1 /* 2 * arch/arm/mm/cache-l2x0.c - L210/L220 cache controller support 3 * 4 * Copyright (C) 2007 ARM Limited 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software 17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18 */ 19 #include <linux/err.h> 20 #include <linux/init.h> 21 #include <linux/spinlock.h> 22 #include <linux/io.h> 23 #include <linux/of.h> 24 #include <linux/of_address.h> 25 26 #include <asm/cacheflush.h> 27 #include <asm/hardware/cache-l2x0.h> 28 29 #define CACHE_LINE_SIZE 32 30 31 static void __iomem *l2x0_base; 32 static DEFINE_RAW_SPINLOCK(l2x0_lock); 33 static uint32_t l2x0_way_mask; /* Bitmask of active ways */ 34 static uint32_t l2x0_size; 35 36 struct l2x0_regs l2x0_saved_regs; 37 38 struct l2x0_of_data { 39 void (*setup)(const struct device_node *, __u32 *, __u32 *); 40 void (*save)(void); 41 void (*resume)(void); 42 }; 43 44 static inline void cache_wait_way(void __iomem *reg, unsigned long mask) 45 { 46 /* wait for cache operation by line or way to complete */ 47 while (readl_relaxed(reg) & mask) 48 cpu_relax(); 49 } 50 51 #ifdef CONFIG_CACHE_PL310 52 static inline void cache_wait(void __iomem *reg, unsigned long mask) 53 { 54 /* cache operations by line are atomic on PL310 */ 55 } 56 #else 57 #define cache_wait cache_wait_way 58 #endif 59 60 static inline void cache_sync(void) 61 { 62 void __iomem *base = l2x0_base; 63 64 #ifdef CONFIG_PL310_ERRATA_753970 65 /* write to an unmmapped register */ 66 writel_relaxed(0, base + L2X0_DUMMY_REG); 67 #else 68 writel_relaxed(0, base + L2X0_CACHE_SYNC); 69 #endif 70 cache_wait(base + L2X0_CACHE_SYNC, 1); 71 } 72 73 static inline void l2x0_clean_line(unsigned long addr) 74 { 75 void __iomem *base = l2x0_base; 76 cache_wait(base + L2X0_CLEAN_LINE_PA, 1); 77 writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA); 78 } 79 80 static inline void l2x0_inv_line(unsigned long addr) 81 { 82 void __iomem *base = l2x0_base; 83 cache_wait(base + L2X0_INV_LINE_PA, 1); 84 writel_relaxed(addr, base + L2X0_INV_LINE_PA); 85 } 86 87 #if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915) 88 89 #define debug_writel(val) outer_cache.set_debug(val) 90 91 static void l2x0_set_debug(unsigned long val) 92 { 93 writel_relaxed(val, l2x0_base + L2X0_DEBUG_CTRL); 94 } 95 #else 96 /* Optimised out for non-errata case */ 97 static inline void debug_writel(unsigned long val) 98 { 99 } 100 101 #define l2x0_set_debug NULL 102 #endif 103 104 #ifdef CONFIG_PL310_ERRATA_588369 105 static inline void l2x0_flush_line(unsigned long addr) 106 { 107 void __iomem *base = l2x0_base; 108 109 /* Clean by PA followed by Invalidate by PA */ 110 cache_wait(base + L2X0_CLEAN_LINE_PA, 1); 111 writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA); 112 cache_wait(base + L2X0_INV_LINE_PA, 1); 113 writel_relaxed(addr, base + L2X0_INV_LINE_PA); 114 } 115 #else 116 117 static inline void l2x0_flush_line(unsigned long addr) 118 { 119 void __iomem *base = l2x0_base; 120 cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1); 121 writel_relaxed(addr, base + L2X0_CLEAN_INV_LINE_PA); 122 } 123 #endif 124 125 static void l2x0_cache_sync(void) 126 { 127 unsigned long flags; 128 129 raw_spin_lock_irqsave(&l2x0_lock, flags); 130 cache_sync(); 131 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 132 } 133 134 static void __l2x0_flush_all(void) 135 { 136 debug_writel(0x03); 137 writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_INV_WAY); 138 cache_wait_way(l2x0_base + L2X0_CLEAN_INV_WAY, l2x0_way_mask); 139 cache_sync(); 140 debug_writel(0x00); 141 } 142 143 static void l2x0_flush_all(void) 144 { 145 unsigned long flags; 146 147 /* clean all ways */ 148 raw_spin_lock_irqsave(&l2x0_lock, flags); 149 __l2x0_flush_all(); 150 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 151 } 152 153 static void l2x0_clean_all(void) 154 { 155 unsigned long flags; 156 157 /* clean all ways */ 158 raw_spin_lock_irqsave(&l2x0_lock, flags); 159 writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_WAY); 160 cache_wait_way(l2x0_base + L2X0_CLEAN_WAY, l2x0_way_mask); 161 cache_sync(); 162 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 163 } 164 165 static void l2x0_inv_all(void) 166 { 167 unsigned long flags; 168 169 /* invalidate all ways */ 170 raw_spin_lock_irqsave(&l2x0_lock, flags); 171 /* Invalidating when L2 is enabled is a nono */ 172 BUG_ON(readl(l2x0_base + L2X0_CTRL) & 1); 173 writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_INV_WAY); 174 cache_wait_way(l2x0_base + L2X0_INV_WAY, l2x0_way_mask); 175 cache_sync(); 176 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 177 } 178 179 static void l2x0_inv_range(unsigned long start, unsigned long end) 180 { 181 void __iomem *base = l2x0_base; 182 unsigned long flags; 183 184 raw_spin_lock_irqsave(&l2x0_lock, flags); 185 if (start & (CACHE_LINE_SIZE - 1)) { 186 start &= ~(CACHE_LINE_SIZE - 1); 187 debug_writel(0x03); 188 l2x0_flush_line(start); 189 debug_writel(0x00); 190 start += CACHE_LINE_SIZE; 191 } 192 193 if (end & (CACHE_LINE_SIZE - 1)) { 194 end &= ~(CACHE_LINE_SIZE - 1); 195 debug_writel(0x03); 196 l2x0_flush_line(end); 197 debug_writel(0x00); 198 } 199 200 while (start < end) { 201 unsigned long blk_end = start + min(end - start, 4096UL); 202 203 while (start < blk_end) { 204 l2x0_inv_line(start); 205 start += CACHE_LINE_SIZE; 206 } 207 208 if (blk_end < end) { 209 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 210 raw_spin_lock_irqsave(&l2x0_lock, flags); 211 } 212 } 213 cache_wait(base + L2X0_INV_LINE_PA, 1); 214 cache_sync(); 215 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 216 } 217 218 static void l2x0_clean_range(unsigned long start, unsigned long end) 219 { 220 void __iomem *base = l2x0_base; 221 unsigned long flags; 222 223 if ((end - start) >= l2x0_size) { 224 l2x0_clean_all(); 225 return; 226 } 227 228 raw_spin_lock_irqsave(&l2x0_lock, flags); 229 start &= ~(CACHE_LINE_SIZE - 1); 230 while (start < end) { 231 unsigned long blk_end = start + min(end - start, 4096UL); 232 233 while (start < blk_end) { 234 l2x0_clean_line(start); 235 start += CACHE_LINE_SIZE; 236 } 237 238 if (blk_end < end) { 239 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 240 raw_spin_lock_irqsave(&l2x0_lock, flags); 241 } 242 } 243 cache_wait(base + L2X0_CLEAN_LINE_PA, 1); 244 cache_sync(); 245 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 246 } 247 248 static void l2x0_flush_range(unsigned long start, unsigned long end) 249 { 250 void __iomem *base = l2x0_base; 251 unsigned long flags; 252 253 if ((end - start) >= l2x0_size) { 254 l2x0_flush_all(); 255 return; 256 } 257 258 raw_spin_lock_irqsave(&l2x0_lock, flags); 259 start &= ~(CACHE_LINE_SIZE - 1); 260 while (start < end) { 261 unsigned long blk_end = start + min(end - start, 4096UL); 262 263 debug_writel(0x03); 264 while (start < blk_end) { 265 l2x0_flush_line(start); 266 start += CACHE_LINE_SIZE; 267 } 268 debug_writel(0x00); 269 270 if (blk_end < end) { 271 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 272 raw_spin_lock_irqsave(&l2x0_lock, flags); 273 } 274 } 275 cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1); 276 cache_sync(); 277 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 278 } 279 280 static void l2x0_disable(void) 281 { 282 unsigned long flags; 283 284 raw_spin_lock_irqsave(&l2x0_lock, flags); 285 __l2x0_flush_all(); 286 writel_relaxed(0, l2x0_base + L2X0_CTRL); 287 dsb(); 288 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 289 } 290 291 static void l2x0_unlock(__u32 cache_id) 292 { 293 int lockregs; 294 int i; 295 296 if (cache_id == L2X0_CACHE_ID_PART_L310) 297 lockregs = 8; 298 else 299 /* L210 and unknown types */ 300 lockregs = 1; 301 302 for (i = 0; i < lockregs; i++) { 303 writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_D_BASE + 304 i * L2X0_LOCKDOWN_STRIDE); 305 writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_I_BASE + 306 i * L2X0_LOCKDOWN_STRIDE); 307 } 308 } 309 310 void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask) 311 { 312 __u32 aux; 313 __u32 cache_id; 314 __u32 way_size = 0; 315 int ways; 316 const char *type; 317 318 l2x0_base = base; 319 320 cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID); 321 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); 322 323 aux &= aux_mask; 324 aux |= aux_val; 325 326 /* Determine the number of ways */ 327 switch (cache_id & L2X0_CACHE_ID_PART_MASK) { 328 case L2X0_CACHE_ID_PART_L310: 329 if (aux & (1 << 16)) 330 ways = 16; 331 else 332 ways = 8; 333 type = "L310"; 334 break; 335 case L2X0_CACHE_ID_PART_L210: 336 ways = (aux >> 13) & 0xf; 337 type = "L210"; 338 break; 339 default: 340 /* Assume unknown chips have 8 ways */ 341 ways = 8; 342 type = "L2x0 series"; 343 break; 344 } 345 346 l2x0_way_mask = (1 << ways) - 1; 347 348 /* 349 * L2 cache Size = Way size * Number of ways 350 */ 351 way_size = (aux & L2X0_AUX_CTRL_WAY_SIZE_MASK) >> 17; 352 way_size = 1 << (way_size + 3); 353 l2x0_size = ways * way_size * SZ_1K; 354 355 /* 356 * Check if l2x0 controller is already enabled. 357 * If you are booting from non-secure mode 358 * accessing the below registers will fault. 359 */ 360 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) { 361 /* Make sure that I&D is not locked down when starting */ 362 l2x0_unlock(cache_id); 363 364 /* l2x0 controller is disabled */ 365 writel_relaxed(aux, l2x0_base + L2X0_AUX_CTRL); 366 367 l2x0_saved_regs.aux_ctrl = aux; 368 369 l2x0_inv_all(); 370 371 /* enable L2X0 */ 372 writel_relaxed(1, l2x0_base + L2X0_CTRL); 373 } 374 375 outer_cache.inv_range = l2x0_inv_range; 376 outer_cache.clean_range = l2x0_clean_range; 377 outer_cache.flush_range = l2x0_flush_range; 378 outer_cache.sync = l2x0_cache_sync; 379 outer_cache.flush_all = l2x0_flush_all; 380 outer_cache.inv_all = l2x0_inv_all; 381 outer_cache.disable = l2x0_disable; 382 outer_cache.set_debug = l2x0_set_debug; 383 384 printk(KERN_INFO "%s cache controller enabled\n", type); 385 printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d B\n", 386 ways, cache_id, aux, l2x0_size); 387 } 388 389 #ifdef CONFIG_OF 390 static void __init l2x0_of_setup(const struct device_node *np, 391 __u32 *aux_val, __u32 *aux_mask) 392 { 393 u32 data[2] = { 0, 0 }; 394 u32 tag = 0; 395 u32 dirty = 0; 396 u32 val = 0, mask = 0; 397 398 of_property_read_u32(np, "arm,tag-latency", &tag); 399 if (tag) { 400 mask |= L2X0_AUX_CTRL_TAG_LATENCY_MASK; 401 val |= (tag - 1) << L2X0_AUX_CTRL_TAG_LATENCY_SHIFT; 402 } 403 404 of_property_read_u32_array(np, "arm,data-latency", 405 data, ARRAY_SIZE(data)); 406 if (data[0] && data[1]) { 407 mask |= L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK | 408 L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK; 409 val |= ((data[0] - 1) << L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT) | 410 ((data[1] - 1) << L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT); 411 } 412 413 of_property_read_u32(np, "arm,dirty-latency", &dirty); 414 if (dirty) { 415 mask |= L2X0_AUX_CTRL_DIRTY_LATENCY_MASK; 416 val |= (dirty - 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT; 417 } 418 419 *aux_val &= ~mask; 420 *aux_val |= val; 421 *aux_mask &= ~mask; 422 } 423 424 static void __init pl310_of_setup(const struct device_node *np, 425 __u32 *aux_val, __u32 *aux_mask) 426 { 427 u32 data[3] = { 0, 0, 0 }; 428 u32 tag[3] = { 0, 0, 0 }; 429 u32 filter[2] = { 0, 0 }; 430 431 of_property_read_u32_array(np, "arm,tag-latency", tag, ARRAY_SIZE(tag)); 432 if (tag[0] && tag[1] && tag[2]) 433 writel_relaxed( 434 ((tag[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) | 435 ((tag[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) | 436 ((tag[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT), 437 l2x0_base + L2X0_TAG_LATENCY_CTRL); 438 439 of_property_read_u32_array(np, "arm,data-latency", 440 data, ARRAY_SIZE(data)); 441 if (data[0] && data[1] && data[2]) 442 writel_relaxed( 443 ((data[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) | 444 ((data[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) | 445 ((data[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT), 446 l2x0_base + L2X0_DATA_LATENCY_CTRL); 447 448 of_property_read_u32_array(np, "arm,filter-ranges", 449 filter, ARRAY_SIZE(filter)); 450 if (filter[1]) { 451 writel_relaxed(ALIGN(filter[0] + filter[1], SZ_1M), 452 l2x0_base + L2X0_ADDR_FILTER_END); 453 writel_relaxed((filter[0] & ~(SZ_1M - 1)) | L2X0_ADDR_FILTER_EN, 454 l2x0_base + L2X0_ADDR_FILTER_START); 455 } 456 } 457 458 static void __init pl310_save(void) 459 { 460 u32 l2x0_revision = readl_relaxed(l2x0_base + L2X0_CACHE_ID) & 461 L2X0_CACHE_ID_RTL_MASK; 462 463 l2x0_saved_regs.tag_latency = readl_relaxed(l2x0_base + 464 L2X0_TAG_LATENCY_CTRL); 465 l2x0_saved_regs.data_latency = readl_relaxed(l2x0_base + 466 L2X0_DATA_LATENCY_CTRL); 467 l2x0_saved_regs.filter_end = readl_relaxed(l2x0_base + 468 L2X0_ADDR_FILTER_END); 469 l2x0_saved_regs.filter_start = readl_relaxed(l2x0_base + 470 L2X0_ADDR_FILTER_START); 471 472 if (l2x0_revision >= L2X0_CACHE_ID_RTL_R2P0) { 473 /* 474 * From r2p0, there is Prefetch offset/control register 475 */ 476 l2x0_saved_regs.prefetch_ctrl = readl_relaxed(l2x0_base + 477 L2X0_PREFETCH_CTRL); 478 /* 479 * From r3p0, there is Power control register 480 */ 481 if (l2x0_revision >= L2X0_CACHE_ID_RTL_R3P0) 482 l2x0_saved_regs.pwr_ctrl = readl_relaxed(l2x0_base + 483 L2X0_POWER_CTRL); 484 } 485 } 486 487 static void l2x0_resume(void) 488 { 489 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) { 490 /* restore aux ctrl and enable l2 */ 491 l2x0_unlock(readl_relaxed(l2x0_base + L2X0_CACHE_ID)); 492 493 writel_relaxed(l2x0_saved_regs.aux_ctrl, l2x0_base + 494 L2X0_AUX_CTRL); 495 496 l2x0_inv_all(); 497 498 writel_relaxed(1, l2x0_base + L2X0_CTRL); 499 } 500 } 501 502 static void pl310_resume(void) 503 { 504 u32 l2x0_revision; 505 506 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) { 507 /* restore pl310 setup */ 508 writel_relaxed(l2x0_saved_regs.tag_latency, 509 l2x0_base + L2X0_TAG_LATENCY_CTRL); 510 writel_relaxed(l2x0_saved_regs.data_latency, 511 l2x0_base + L2X0_DATA_LATENCY_CTRL); 512 writel_relaxed(l2x0_saved_regs.filter_end, 513 l2x0_base + L2X0_ADDR_FILTER_END); 514 writel_relaxed(l2x0_saved_regs.filter_start, 515 l2x0_base + L2X0_ADDR_FILTER_START); 516 517 l2x0_revision = readl_relaxed(l2x0_base + L2X0_CACHE_ID) & 518 L2X0_CACHE_ID_RTL_MASK; 519 520 if (l2x0_revision >= L2X0_CACHE_ID_RTL_R2P0) { 521 writel_relaxed(l2x0_saved_regs.prefetch_ctrl, 522 l2x0_base + L2X0_PREFETCH_CTRL); 523 if (l2x0_revision >= L2X0_CACHE_ID_RTL_R3P0) 524 writel_relaxed(l2x0_saved_regs.pwr_ctrl, 525 l2x0_base + L2X0_POWER_CTRL); 526 } 527 } 528 529 l2x0_resume(); 530 } 531 532 static const struct l2x0_of_data pl310_data = { 533 pl310_of_setup, 534 pl310_save, 535 pl310_resume, 536 }; 537 538 static const struct l2x0_of_data l2x0_data = { 539 l2x0_of_setup, 540 NULL, 541 l2x0_resume, 542 }; 543 544 static const struct of_device_id l2x0_ids[] __initconst = { 545 { .compatible = "arm,pl310-cache", .data = (void *)&pl310_data }, 546 { .compatible = "arm,l220-cache", .data = (void *)&l2x0_data }, 547 { .compatible = "arm,l210-cache", .data = (void *)&l2x0_data }, 548 {} 549 }; 550 551 int __init l2x0_of_init(__u32 aux_val, __u32 aux_mask) 552 { 553 struct device_node *np; 554 struct l2x0_of_data *data; 555 struct resource res; 556 557 np = of_find_matching_node(NULL, l2x0_ids); 558 if (!np) 559 return -ENODEV; 560 561 if (of_address_to_resource(np, 0, &res)) 562 return -ENODEV; 563 564 l2x0_base = ioremap(res.start, resource_size(&res)); 565 if (!l2x0_base) 566 return -ENOMEM; 567 568 l2x0_saved_regs.phy_base = res.start; 569 570 data = of_match_node(l2x0_ids, np)->data; 571 572 /* L2 configuration can only be changed if the cache is disabled */ 573 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) { 574 if (data->setup) 575 data->setup(np, &aux_val, &aux_mask); 576 } 577 578 if (data->save) 579 data->save(); 580 581 l2x0_init(l2x0_base, aux_val, aux_mask); 582 583 outer_cache.resume = data->resume; 584 return 0; 585 } 586 #endif 587