1 /* 2 * arch/arm/mm/cache-l2x0.c - L210/L220 cache controller support 3 * 4 * Copyright (C) 2007 ARM Limited 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software 17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18 */ 19 #include <linux/err.h> 20 #include <linux/init.h> 21 #include <linux/spinlock.h> 22 #include <linux/io.h> 23 #include <linux/of.h> 24 #include <linux/of_address.h> 25 26 #include <asm/cacheflush.h> 27 #include <asm/hardware/cache-l2x0.h> 28 29 #define CACHE_LINE_SIZE 32 30 31 static void __iomem *l2x0_base; 32 static DEFINE_RAW_SPINLOCK(l2x0_lock); 33 static u32 l2x0_way_mask; /* Bitmask of active ways */ 34 static u32 l2x0_size; 35 static unsigned long sync_reg_offset = L2X0_CACHE_SYNC; 36 37 struct l2x0_regs l2x0_saved_regs; 38 39 struct l2x0_of_data { 40 void (*setup)(const struct device_node *, u32 *, u32 *); 41 void (*save)(void); 42 void (*resume)(void); 43 }; 44 45 static inline void cache_wait_way(void __iomem *reg, unsigned long mask) 46 { 47 /* wait for cache operation by line or way to complete */ 48 while (readl_relaxed(reg) & mask) 49 cpu_relax(); 50 } 51 52 #ifdef CONFIG_CACHE_PL310 53 static inline void cache_wait(void __iomem *reg, unsigned long mask) 54 { 55 /* cache operations by line are atomic on PL310 */ 56 } 57 #else 58 #define cache_wait cache_wait_way 59 #endif 60 61 static inline void cache_sync(void) 62 { 63 void __iomem *base = l2x0_base; 64 65 writel_relaxed(0, base + sync_reg_offset); 66 cache_wait(base + L2X0_CACHE_SYNC, 1); 67 } 68 69 static inline void l2x0_clean_line(unsigned long addr) 70 { 71 void __iomem *base = l2x0_base; 72 cache_wait(base + L2X0_CLEAN_LINE_PA, 1); 73 writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA); 74 } 75 76 static inline void l2x0_inv_line(unsigned long addr) 77 { 78 void __iomem *base = l2x0_base; 79 cache_wait(base + L2X0_INV_LINE_PA, 1); 80 writel_relaxed(addr, base + L2X0_INV_LINE_PA); 81 } 82 83 #if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915) 84 85 #define debug_writel(val) outer_cache.set_debug(val) 86 87 static void l2x0_set_debug(unsigned long val) 88 { 89 writel_relaxed(val, l2x0_base + L2X0_DEBUG_CTRL); 90 } 91 #else 92 /* Optimised out for non-errata case */ 93 static inline void debug_writel(unsigned long val) 94 { 95 } 96 97 #define l2x0_set_debug NULL 98 #endif 99 100 #ifdef CONFIG_PL310_ERRATA_588369 101 static inline void l2x0_flush_line(unsigned long addr) 102 { 103 void __iomem *base = l2x0_base; 104 105 /* Clean by PA followed by Invalidate by PA */ 106 cache_wait(base + L2X0_CLEAN_LINE_PA, 1); 107 writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA); 108 cache_wait(base + L2X0_INV_LINE_PA, 1); 109 writel_relaxed(addr, base + L2X0_INV_LINE_PA); 110 } 111 #else 112 113 static inline void l2x0_flush_line(unsigned long addr) 114 { 115 void __iomem *base = l2x0_base; 116 cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1); 117 writel_relaxed(addr, base + L2X0_CLEAN_INV_LINE_PA); 118 } 119 #endif 120 121 static void l2x0_cache_sync(void) 122 { 123 unsigned long flags; 124 125 raw_spin_lock_irqsave(&l2x0_lock, flags); 126 cache_sync(); 127 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 128 } 129 130 static void __l2x0_flush_all(void) 131 { 132 debug_writel(0x03); 133 writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_INV_WAY); 134 cache_wait_way(l2x0_base + L2X0_CLEAN_INV_WAY, l2x0_way_mask); 135 cache_sync(); 136 debug_writel(0x00); 137 } 138 139 static void l2x0_flush_all(void) 140 { 141 unsigned long flags; 142 143 /* clean all ways */ 144 raw_spin_lock_irqsave(&l2x0_lock, flags); 145 __l2x0_flush_all(); 146 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 147 } 148 149 static void l2x0_clean_all(void) 150 { 151 unsigned long flags; 152 153 /* clean all ways */ 154 raw_spin_lock_irqsave(&l2x0_lock, flags); 155 writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_WAY); 156 cache_wait_way(l2x0_base + L2X0_CLEAN_WAY, l2x0_way_mask); 157 cache_sync(); 158 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 159 } 160 161 static void l2x0_inv_all(void) 162 { 163 unsigned long flags; 164 165 /* invalidate all ways */ 166 raw_spin_lock_irqsave(&l2x0_lock, flags); 167 /* Invalidating when L2 is enabled is a nono */ 168 BUG_ON(readl(l2x0_base + L2X0_CTRL) & 1); 169 writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_INV_WAY); 170 cache_wait_way(l2x0_base + L2X0_INV_WAY, l2x0_way_mask); 171 cache_sync(); 172 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 173 } 174 175 static void l2x0_inv_range(unsigned long start, unsigned long end) 176 { 177 void __iomem *base = l2x0_base; 178 unsigned long flags; 179 180 raw_spin_lock_irqsave(&l2x0_lock, flags); 181 if (start & (CACHE_LINE_SIZE - 1)) { 182 start &= ~(CACHE_LINE_SIZE - 1); 183 debug_writel(0x03); 184 l2x0_flush_line(start); 185 debug_writel(0x00); 186 start += CACHE_LINE_SIZE; 187 } 188 189 if (end & (CACHE_LINE_SIZE - 1)) { 190 end &= ~(CACHE_LINE_SIZE - 1); 191 debug_writel(0x03); 192 l2x0_flush_line(end); 193 debug_writel(0x00); 194 } 195 196 while (start < end) { 197 unsigned long blk_end = start + min(end - start, 4096UL); 198 199 while (start < blk_end) { 200 l2x0_inv_line(start); 201 start += CACHE_LINE_SIZE; 202 } 203 204 if (blk_end < end) { 205 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 206 raw_spin_lock_irqsave(&l2x0_lock, flags); 207 } 208 } 209 cache_wait(base + L2X0_INV_LINE_PA, 1); 210 cache_sync(); 211 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 212 } 213 214 static void l2x0_clean_range(unsigned long start, unsigned long end) 215 { 216 void __iomem *base = l2x0_base; 217 unsigned long flags; 218 219 if ((end - start) >= l2x0_size) { 220 l2x0_clean_all(); 221 return; 222 } 223 224 raw_spin_lock_irqsave(&l2x0_lock, flags); 225 start &= ~(CACHE_LINE_SIZE - 1); 226 while (start < end) { 227 unsigned long blk_end = start + min(end - start, 4096UL); 228 229 while (start < blk_end) { 230 l2x0_clean_line(start); 231 start += CACHE_LINE_SIZE; 232 } 233 234 if (blk_end < end) { 235 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 236 raw_spin_lock_irqsave(&l2x0_lock, flags); 237 } 238 } 239 cache_wait(base + L2X0_CLEAN_LINE_PA, 1); 240 cache_sync(); 241 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 242 } 243 244 static void l2x0_flush_range(unsigned long start, unsigned long end) 245 { 246 void __iomem *base = l2x0_base; 247 unsigned long flags; 248 249 if ((end - start) >= l2x0_size) { 250 l2x0_flush_all(); 251 return; 252 } 253 254 raw_spin_lock_irqsave(&l2x0_lock, flags); 255 start &= ~(CACHE_LINE_SIZE - 1); 256 while (start < end) { 257 unsigned long blk_end = start + min(end - start, 4096UL); 258 259 debug_writel(0x03); 260 while (start < blk_end) { 261 l2x0_flush_line(start); 262 start += CACHE_LINE_SIZE; 263 } 264 debug_writel(0x00); 265 266 if (blk_end < end) { 267 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 268 raw_spin_lock_irqsave(&l2x0_lock, flags); 269 } 270 } 271 cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1); 272 cache_sync(); 273 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 274 } 275 276 static void l2x0_disable(void) 277 { 278 unsigned long flags; 279 280 raw_spin_lock_irqsave(&l2x0_lock, flags); 281 __l2x0_flush_all(); 282 writel_relaxed(0, l2x0_base + L2X0_CTRL); 283 dsb(); 284 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 285 } 286 287 static void l2x0_unlock(u32 cache_id) 288 { 289 int lockregs; 290 int i; 291 292 if (cache_id == L2X0_CACHE_ID_PART_L310) 293 lockregs = 8; 294 else 295 /* L210 and unknown types */ 296 lockregs = 1; 297 298 for (i = 0; i < lockregs; i++) { 299 writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_D_BASE + 300 i * L2X0_LOCKDOWN_STRIDE); 301 writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_I_BASE + 302 i * L2X0_LOCKDOWN_STRIDE); 303 } 304 } 305 306 void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask) 307 { 308 u32 aux; 309 u32 cache_id; 310 u32 way_size = 0; 311 int ways; 312 const char *type; 313 314 l2x0_base = base; 315 316 cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID); 317 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); 318 319 aux &= aux_mask; 320 aux |= aux_val; 321 322 /* Determine the number of ways */ 323 switch (cache_id & L2X0_CACHE_ID_PART_MASK) { 324 case L2X0_CACHE_ID_PART_L310: 325 if (aux & (1 << 16)) 326 ways = 16; 327 else 328 ways = 8; 329 type = "L310"; 330 #ifdef CONFIG_PL310_ERRATA_753970 331 /* Unmapped register. */ 332 sync_reg_offset = L2X0_DUMMY_REG; 333 #endif 334 break; 335 case L2X0_CACHE_ID_PART_L210: 336 ways = (aux >> 13) & 0xf; 337 type = "L210"; 338 break; 339 default: 340 /* Assume unknown chips have 8 ways */ 341 ways = 8; 342 type = "L2x0 series"; 343 break; 344 } 345 346 l2x0_way_mask = (1 << ways) - 1; 347 348 /* 349 * L2 cache Size = Way size * Number of ways 350 */ 351 way_size = (aux & L2X0_AUX_CTRL_WAY_SIZE_MASK) >> 17; 352 way_size = 1 << (way_size + 3); 353 l2x0_size = ways * way_size * SZ_1K; 354 355 /* 356 * Check if l2x0 controller is already enabled. 357 * If you are booting from non-secure mode 358 * accessing the below registers will fault. 359 */ 360 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) { 361 /* Make sure that I&D is not locked down when starting */ 362 l2x0_unlock(cache_id); 363 364 /* l2x0 controller is disabled */ 365 writel_relaxed(aux, l2x0_base + L2X0_AUX_CTRL); 366 367 l2x0_saved_regs.aux_ctrl = aux; 368 369 l2x0_inv_all(); 370 371 /* enable L2X0 */ 372 writel_relaxed(1, l2x0_base + L2X0_CTRL); 373 } 374 375 outer_cache.inv_range = l2x0_inv_range; 376 outer_cache.clean_range = l2x0_clean_range; 377 outer_cache.flush_range = l2x0_flush_range; 378 outer_cache.sync = l2x0_cache_sync; 379 outer_cache.flush_all = l2x0_flush_all; 380 outer_cache.inv_all = l2x0_inv_all; 381 outer_cache.disable = l2x0_disable; 382 outer_cache.set_debug = l2x0_set_debug; 383 384 printk(KERN_INFO "%s cache controller enabled\n", type); 385 printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d B\n", 386 ways, cache_id, aux, l2x0_size); 387 } 388 389 #ifdef CONFIG_OF 390 static void __init l2x0_of_setup(const struct device_node *np, 391 u32 *aux_val, u32 *aux_mask) 392 { 393 u32 data[2] = { 0, 0 }; 394 u32 tag = 0; 395 u32 dirty = 0; 396 u32 val = 0, mask = 0; 397 398 of_property_read_u32(np, "arm,tag-latency", &tag); 399 if (tag) { 400 mask |= L2X0_AUX_CTRL_TAG_LATENCY_MASK; 401 val |= (tag - 1) << L2X0_AUX_CTRL_TAG_LATENCY_SHIFT; 402 } 403 404 of_property_read_u32_array(np, "arm,data-latency", 405 data, ARRAY_SIZE(data)); 406 if (data[0] && data[1]) { 407 mask |= L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK | 408 L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK; 409 val |= ((data[0] - 1) << L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT) | 410 ((data[1] - 1) << L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT); 411 } 412 413 of_property_read_u32(np, "arm,dirty-latency", &dirty); 414 if (dirty) { 415 mask |= L2X0_AUX_CTRL_DIRTY_LATENCY_MASK; 416 val |= (dirty - 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT; 417 } 418 419 *aux_val &= ~mask; 420 *aux_val |= val; 421 *aux_mask &= ~mask; 422 } 423 424 static void __init pl310_of_setup(const struct device_node *np, 425 u32 *aux_val, u32 *aux_mask) 426 { 427 u32 data[3] = { 0, 0, 0 }; 428 u32 tag[3] = { 0, 0, 0 }; 429 u32 filter[2] = { 0, 0 }; 430 431 of_property_read_u32_array(np, "arm,tag-latency", tag, ARRAY_SIZE(tag)); 432 if (tag[0] && tag[1] && tag[2]) 433 writel_relaxed( 434 ((tag[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) | 435 ((tag[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) | 436 ((tag[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT), 437 l2x0_base + L2X0_TAG_LATENCY_CTRL); 438 439 of_property_read_u32_array(np, "arm,data-latency", 440 data, ARRAY_SIZE(data)); 441 if (data[0] && data[1] && data[2]) 442 writel_relaxed( 443 ((data[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) | 444 ((data[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) | 445 ((data[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT), 446 l2x0_base + L2X0_DATA_LATENCY_CTRL); 447 448 of_property_read_u32_array(np, "arm,filter-ranges", 449 filter, ARRAY_SIZE(filter)); 450 if (filter[1]) { 451 writel_relaxed(ALIGN(filter[0] + filter[1], SZ_1M), 452 l2x0_base + L2X0_ADDR_FILTER_END); 453 writel_relaxed((filter[0] & ~(SZ_1M - 1)) | L2X0_ADDR_FILTER_EN, 454 l2x0_base + L2X0_ADDR_FILTER_START); 455 } 456 } 457 458 static void __init pl310_save(void) 459 { 460 u32 l2x0_revision = readl_relaxed(l2x0_base + L2X0_CACHE_ID) & 461 L2X0_CACHE_ID_RTL_MASK; 462 463 l2x0_saved_regs.tag_latency = readl_relaxed(l2x0_base + 464 L2X0_TAG_LATENCY_CTRL); 465 l2x0_saved_regs.data_latency = readl_relaxed(l2x0_base + 466 L2X0_DATA_LATENCY_CTRL); 467 l2x0_saved_regs.filter_end = readl_relaxed(l2x0_base + 468 L2X0_ADDR_FILTER_END); 469 l2x0_saved_regs.filter_start = readl_relaxed(l2x0_base + 470 L2X0_ADDR_FILTER_START); 471 472 if (l2x0_revision >= L2X0_CACHE_ID_RTL_R2P0) { 473 /* 474 * From r2p0, there is Prefetch offset/control register 475 */ 476 l2x0_saved_regs.prefetch_ctrl = readl_relaxed(l2x0_base + 477 L2X0_PREFETCH_CTRL); 478 /* 479 * From r3p0, there is Power control register 480 */ 481 if (l2x0_revision >= L2X0_CACHE_ID_RTL_R3P0) 482 l2x0_saved_regs.pwr_ctrl = readl_relaxed(l2x0_base + 483 L2X0_POWER_CTRL); 484 } 485 } 486 487 static void l2x0_resume(void) 488 { 489 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) { 490 /* restore aux ctrl and enable l2 */ 491 l2x0_unlock(readl_relaxed(l2x0_base + L2X0_CACHE_ID)); 492 493 writel_relaxed(l2x0_saved_regs.aux_ctrl, l2x0_base + 494 L2X0_AUX_CTRL); 495 496 l2x0_inv_all(); 497 498 writel_relaxed(1, l2x0_base + L2X0_CTRL); 499 } 500 } 501 502 static void pl310_resume(void) 503 { 504 u32 l2x0_revision; 505 506 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) { 507 /* restore pl310 setup */ 508 writel_relaxed(l2x0_saved_regs.tag_latency, 509 l2x0_base + L2X0_TAG_LATENCY_CTRL); 510 writel_relaxed(l2x0_saved_regs.data_latency, 511 l2x0_base + L2X0_DATA_LATENCY_CTRL); 512 writel_relaxed(l2x0_saved_regs.filter_end, 513 l2x0_base + L2X0_ADDR_FILTER_END); 514 writel_relaxed(l2x0_saved_regs.filter_start, 515 l2x0_base + L2X0_ADDR_FILTER_START); 516 517 l2x0_revision = readl_relaxed(l2x0_base + L2X0_CACHE_ID) & 518 L2X0_CACHE_ID_RTL_MASK; 519 520 if (l2x0_revision >= L2X0_CACHE_ID_RTL_R2P0) { 521 writel_relaxed(l2x0_saved_regs.prefetch_ctrl, 522 l2x0_base + L2X0_PREFETCH_CTRL); 523 if (l2x0_revision >= L2X0_CACHE_ID_RTL_R3P0) 524 writel_relaxed(l2x0_saved_regs.pwr_ctrl, 525 l2x0_base + L2X0_POWER_CTRL); 526 } 527 } 528 529 l2x0_resume(); 530 } 531 532 static const struct l2x0_of_data pl310_data = { 533 pl310_of_setup, 534 pl310_save, 535 pl310_resume, 536 }; 537 538 static const struct l2x0_of_data l2x0_data = { 539 l2x0_of_setup, 540 NULL, 541 l2x0_resume, 542 }; 543 544 static const struct of_device_id l2x0_ids[] __initconst = { 545 { .compatible = "arm,pl310-cache", .data = (void *)&pl310_data }, 546 { .compatible = "arm,l220-cache", .data = (void *)&l2x0_data }, 547 { .compatible = "arm,l210-cache", .data = (void *)&l2x0_data }, 548 {} 549 }; 550 551 int __init l2x0_of_init(u32 aux_val, u32 aux_mask) 552 { 553 struct device_node *np; 554 struct l2x0_of_data *data; 555 struct resource res; 556 557 np = of_find_matching_node(NULL, l2x0_ids); 558 if (!np) 559 return -ENODEV; 560 561 if (of_address_to_resource(np, 0, &res)) 562 return -ENODEV; 563 564 l2x0_base = ioremap(res.start, resource_size(&res)); 565 if (!l2x0_base) 566 return -ENOMEM; 567 568 l2x0_saved_regs.phy_base = res.start; 569 570 data = of_match_node(l2x0_ids, np)->data; 571 572 /* L2 configuration can only be changed if the cache is disabled */ 573 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) { 574 if (data->setup) 575 data->setup(np, &aux_val, &aux_mask); 576 } 577 578 if (data->save) 579 data->save(); 580 581 l2x0_init(l2x0_base, aux_val, aux_mask); 582 583 outer_cache.resume = data->resume; 584 return 0; 585 } 586 #endif 587