1 /* 2 * arch/arm/mm/cache-l2x0.c - L210/L220 cache controller support 3 * 4 * Copyright (C) 2007 ARM Limited 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software 17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18 */ 19 #include <linux/err.h> 20 #include <linux/init.h> 21 #include <linux/spinlock.h> 22 #include <linux/io.h> 23 #include <linux/of.h> 24 #include <linux/of_address.h> 25 26 #include <asm/cacheflush.h> 27 #include <asm/hardware/cache-l2x0.h> 28 29 #define CACHE_LINE_SIZE 32 30 31 static void __iomem *l2x0_base; 32 static DEFINE_SPINLOCK(l2x0_lock); 33 static uint32_t l2x0_way_mask; /* Bitmask of active ways */ 34 static uint32_t l2x0_size; 35 36 static inline void cache_wait_way(void __iomem *reg, unsigned long mask) 37 { 38 /* wait for cache operation by line or way to complete */ 39 while (readl_relaxed(reg) & mask) 40 cpu_relax(); 41 } 42 43 #ifdef CONFIG_CACHE_PL310 44 static inline void cache_wait(void __iomem *reg, unsigned long mask) 45 { 46 /* cache operations by line are atomic on PL310 */ 47 } 48 #else 49 #define cache_wait cache_wait_way 50 #endif 51 52 static inline void cache_sync(void) 53 { 54 void __iomem *base = l2x0_base; 55 56 #ifdef CONFIG_ARM_ERRATA_753970 57 /* write to an unmmapped register */ 58 writel_relaxed(0, base + L2X0_DUMMY_REG); 59 #else 60 writel_relaxed(0, base + L2X0_CACHE_SYNC); 61 #endif 62 cache_wait(base + L2X0_CACHE_SYNC, 1); 63 } 64 65 static inline void l2x0_clean_line(unsigned long addr) 66 { 67 void __iomem *base = l2x0_base; 68 cache_wait(base + L2X0_CLEAN_LINE_PA, 1); 69 writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA); 70 } 71 72 static inline void l2x0_inv_line(unsigned long addr) 73 { 74 void __iomem *base = l2x0_base; 75 cache_wait(base + L2X0_INV_LINE_PA, 1); 76 writel_relaxed(addr, base + L2X0_INV_LINE_PA); 77 } 78 79 #if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915) 80 81 #define debug_writel(val) outer_cache.set_debug(val) 82 83 static void l2x0_set_debug(unsigned long val) 84 { 85 writel_relaxed(val, l2x0_base + L2X0_DEBUG_CTRL); 86 } 87 #else 88 /* Optimised out for non-errata case */ 89 static inline void debug_writel(unsigned long val) 90 { 91 } 92 93 #define l2x0_set_debug NULL 94 #endif 95 96 #ifdef CONFIG_PL310_ERRATA_588369 97 static inline void l2x0_flush_line(unsigned long addr) 98 { 99 void __iomem *base = l2x0_base; 100 101 /* Clean by PA followed by Invalidate by PA */ 102 cache_wait(base + L2X0_CLEAN_LINE_PA, 1); 103 writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA); 104 cache_wait(base + L2X0_INV_LINE_PA, 1); 105 writel_relaxed(addr, base + L2X0_INV_LINE_PA); 106 } 107 #else 108 109 static inline void l2x0_flush_line(unsigned long addr) 110 { 111 void __iomem *base = l2x0_base; 112 cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1); 113 writel_relaxed(addr, base + L2X0_CLEAN_INV_LINE_PA); 114 } 115 #endif 116 117 static void l2x0_cache_sync(void) 118 { 119 unsigned long flags; 120 121 spin_lock_irqsave(&l2x0_lock, flags); 122 cache_sync(); 123 spin_unlock_irqrestore(&l2x0_lock, flags); 124 } 125 126 static void __l2x0_flush_all(void) 127 { 128 debug_writel(0x03); 129 writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_INV_WAY); 130 cache_wait_way(l2x0_base + L2X0_CLEAN_INV_WAY, l2x0_way_mask); 131 cache_sync(); 132 debug_writel(0x00); 133 } 134 135 static void l2x0_flush_all(void) 136 { 137 unsigned long flags; 138 139 /* clean all ways */ 140 spin_lock_irqsave(&l2x0_lock, flags); 141 __l2x0_flush_all(); 142 spin_unlock_irqrestore(&l2x0_lock, flags); 143 } 144 145 static void l2x0_clean_all(void) 146 { 147 unsigned long flags; 148 149 /* clean all ways */ 150 spin_lock_irqsave(&l2x0_lock, flags); 151 writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_WAY); 152 cache_wait_way(l2x0_base + L2X0_CLEAN_WAY, l2x0_way_mask); 153 cache_sync(); 154 spin_unlock_irqrestore(&l2x0_lock, flags); 155 } 156 157 static void l2x0_inv_all(void) 158 { 159 unsigned long flags; 160 161 /* invalidate all ways */ 162 spin_lock_irqsave(&l2x0_lock, flags); 163 /* Invalidating when L2 is enabled is a nono */ 164 BUG_ON(readl(l2x0_base + L2X0_CTRL) & 1); 165 writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_INV_WAY); 166 cache_wait_way(l2x0_base + L2X0_INV_WAY, l2x0_way_mask); 167 cache_sync(); 168 spin_unlock_irqrestore(&l2x0_lock, flags); 169 } 170 171 static void l2x0_inv_range(unsigned long start, unsigned long end) 172 { 173 void __iomem *base = l2x0_base; 174 unsigned long flags; 175 176 spin_lock_irqsave(&l2x0_lock, flags); 177 if (start & (CACHE_LINE_SIZE - 1)) { 178 start &= ~(CACHE_LINE_SIZE - 1); 179 debug_writel(0x03); 180 l2x0_flush_line(start); 181 debug_writel(0x00); 182 start += CACHE_LINE_SIZE; 183 } 184 185 if (end & (CACHE_LINE_SIZE - 1)) { 186 end &= ~(CACHE_LINE_SIZE - 1); 187 debug_writel(0x03); 188 l2x0_flush_line(end); 189 debug_writel(0x00); 190 } 191 192 while (start < end) { 193 unsigned long blk_end = start + min(end - start, 4096UL); 194 195 while (start < blk_end) { 196 l2x0_inv_line(start); 197 start += CACHE_LINE_SIZE; 198 } 199 200 if (blk_end < end) { 201 spin_unlock_irqrestore(&l2x0_lock, flags); 202 spin_lock_irqsave(&l2x0_lock, flags); 203 } 204 } 205 cache_wait(base + L2X0_INV_LINE_PA, 1); 206 cache_sync(); 207 spin_unlock_irqrestore(&l2x0_lock, flags); 208 } 209 210 static void l2x0_clean_range(unsigned long start, unsigned long end) 211 { 212 void __iomem *base = l2x0_base; 213 unsigned long flags; 214 215 if ((end - start) >= l2x0_size) { 216 l2x0_clean_all(); 217 return; 218 } 219 220 spin_lock_irqsave(&l2x0_lock, flags); 221 start &= ~(CACHE_LINE_SIZE - 1); 222 while (start < end) { 223 unsigned long blk_end = start + min(end - start, 4096UL); 224 225 while (start < blk_end) { 226 l2x0_clean_line(start); 227 start += CACHE_LINE_SIZE; 228 } 229 230 if (blk_end < end) { 231 spin_unlock_irqrestore(&l2x0_lock, flags); 232 spin_lock_irqsave(&l2x0_lock, flags); 233 } 234 } 235 cache_wait(base + L2X0_CLEAN_LINE_PA, 1); 236 cache_sync(); 237 spin_unlock_irqrestore(&l2x0_lock, flags); 238 } 239 240 static void l2x0_flush_range(unsigned long start, unsigned long end) 241 { 242 void __iomem *base = l2x0_base; 243 unsigned long flags; 244 245 if ((end - start) >= l2x0_size) { 246 l2x0_flush_all(); 247 return; 248 } 249 250 spin_lock_irqsave(&l2x0_lock, flags); 251 start &= ~(CACHE_LINE_SIZE - 1); 252 while (start < end) { 253 unsigned long blk_end = start + min(end - start, 4096UL); 254 255 debug_writel(0x03); 256 while (start < blk_end) { 257 l2x0_flush_line(start); 258 start += CACHE_LINE_SIZE; 259 } 260 debug_writel(0x00); 261 262 if (blk_end < end) { 263 spin_unlock_irqrestore(&l2x0_lock, flags); 264 spin_lock_irqsave(&l2x0_lock, flags); 265 } 266 } 267 cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1); 268 cache_sync(); 269 spin_unlock_irqrestore(&l2x0_lock, flags); 270 } 271 272 static void l2x0_disable(void) 273 { 274 unsigned long flags; 275 276 spin_lock_irqsave(&l2x0_lock, flags); 277 __l2x0_flush_all(); 278 writel_relaxed(0, l2x0_base + L2X0_CTRL); 279 dsb(); 280 spin_unlock_irqrestore(&l2x0_lock, flags); 281 } 282 283 static void __init l2x0_unlock(__u32 cache_id) 284 { 285 int lockregs; 286 int i; 287 288 if (cache_id == L2X0_CACHE_ID_PART_L310) 289 lockregs = 8; 290 else 291 /* L210 and unknown types */ 292 lockregs = 1; 293 294 for (i = 0; i < lockregs; i++) { 295 writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_D_BASE + 296 i * L2X0_LOCKDOWN_STRIDE); 297 writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_I_BASE + 298 i * L2X0_LOCKDOWN_STRIDE); 299 } 300 } 301 302 void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask) 303 { 304 __u32 aux; 305 __u32 cache_id; 306 __u32 way_size = 0; 307 int ways; 308 const char *type; 309 310 l2x0_base = base; 311 312 cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID); 313 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); 314 315 aux &= aux_mask; 316 aux |= aux_val; 317 318 /* Determine the number of ways */ 319 switch (cache_id & L2X0_CACHE_ID_PART_MASK) { 320 case L2X0_CACHE_ID_PART_L310: 321 if (aux & (1 << 16)) 322 ways = 16; 323 else 324 ways = 8; 325 type = "L310"; 326 break; 327 case L2X0_CACHE_ID_PART_L210: 328 ways = (aux >> 13) & 0xf; 329 type = "L210"; 330 break; 331 default: 332 /* Assume unknown chips have 8 ways */ 333 ways = 8; 334 type = "L2x0 series"; 335 break; 336 } 337 338 l2x0_way_mask = (1 << ways) - 1; 339 340 /* 341 * L2 cache Size = Way size * Number of ways 342 */ 343 way_size = (aux & L2X0_AUX_CTRL_WAY_SIZE_MASK) >> 17; 344 way_size = 1 << (way_size + 3); 345 l2x0_size = ways * way_size * SZ_1K; 346 347 /* 348 * Check if l2x0 controller is already enabled. 349 * If you are booting from non-secure mode 350 * accessing the below registers will fault. 351 */ 352 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) { 353 /* Make sure that I&D is not locked down when starting */ 354 l2x0_unlock(cache_id); 355 356 /* l2x0 controller is disabled */ 357 writel_relaxed(aux, l2x0_base + L2X0_AUX_CTRL); 358 359 l2x0_inv_all(); 360 361 /* enable L2X0 */ 362 writel_relaxed(1, l2x0_base + L2X0_CTRL); 363 } 364 365 outer_cache.inv_range = l2x0_inv_range; 366 outer_cache.clean_range = l2x0_clean_range; 367 outer_cache.flush_range = l2x0_flush_range; 368 outer_cache.sync = l2x0_cache_sync; 369 outer_cache.flush_all = l2x0_flush_all; 370 outer_cache.inv_all = l2x0_inv_all; 371 outer_cache.disable = l2x0_disable; 372 outer_cache.set_debug = l2x0_set_debug; 373 374 printk(KERN_INFO "%s cache controller enabled\n", type); 375 printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d B\n", 376 ways, cache_id, aux, l2x0_size); 377 } 378 379 #ifdef CONFIG_OF 380 static void __init l2x0_of_setup(const struct device_node *np, 381 __u32 *aux_val, __u32 *aux_mask) 382 { 383 u32 data[2] = { 0, 0 }; 384 u32 tag = 0; 385 u32 dirty = 0; 386 u32 val = 0, mask = 0; 387 388 of_property_read_u32(np, "arm,tag-latency", &tag); 389 if (tag) { 390 mask |= L2X0_AUX_CTRL_TAG_LATENCY_MASK; 391 val |= (tag - 1) << L2X0_AUX_CTRL_TAG_LATENCY_SHIFT; 392 } 393 394 of_property_read_u32_array(np, "arm,data-latency", 395 data, ARRAY_SIZE(data)); 396 if (data[0] && data[1]) { 397 mask |= L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK | 398 L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK; 399 val |= ((data[0] - 1) << L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT) | 400 ((data[1] - 1) << L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT); 401 } 402 403 of_property_read_u32(np, "arm,dirty-latency", &dirty); 404 if (dirty) { 405 mask |= L2X0_AUX_CTRL_DIRTY_LATENCY_MASK; 406 val |= (dirty - 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT; 407 } 408 409 *aux_val &= ~mask; 410 *aux_val |= val; 411 *aux_mask &= ~mask; 412 } 413 414 static void __init pl310_of_setup(const struct device_node *np, 415 __u32 *aux_val, __u32 *aux_mask) 416 { 417 u32 data[3] = { 0, 0, 0 }; 418 u32 tag[3] = { 0, 0, 0 }; 419 u32 filter[2] = { 0, 0 }; 420 421 of_property_read_u32_array(np, "arm,tag-latency", tag, ARRAY_SIZE(tag)); 422 if (tag[0] && tag[1] && tag[2]) 423 writel_relaxed( 424 ((tag[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) | 425 ((tag[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) | 426 ((tag[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT), 427 l2x0_base + L2X0_TAG_LATENCY_CTRL); 428 429 of_property_read_u32_array(np, "arm,data-latency", 430 data, ARRAY_SIZE(data)); 431 if (data[0] && data[1] && data[2]) 432 writel_relaxed( 433 ((data[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) | 434 ((data[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) | 435 ((data[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT), 436 l2x0_base + L2X0_DATA_LATENCY_CTRL); 437 438 of_property_read_u32_array(np, "arm,filter-ranges", 439 filter, ARRAY_SIZE(filter)); 440 if (filter[0] && filter[1]) { 441 writel_relaxed(ALIGN(filter[0] + filter[1], SZ_1M), 442 l2x0_base + L2X0_ADDR_FILTER_END); 443 writel_relaxed((filter[0] & ~(SZ_1M - 1)) | L2X0_ADDR_FILTER_EN, 444 l2x0_base + L2X0_ADDR_FILTER_START); 445 } 446 } 447 448 static const struct of_device_id l2x0_ids[] __initconst = { 449 { .compatible = "arm,pl310-cache", .data = pl310_of_setup }, 450 { .compatible = "arm,l220-cache", .data = l2x0_of_setup }, 451 { .compatible = "arm,l210-cache", .data = l2x0_of_setup }, 452 {} 453 }; 454 455 int __init l2x0_of_init(__u32 aux_val, __u32 aux_mask) 456 { 457 struct device_node *np; 458 void (*l2_setup)(const struct device_node *np, 459 __u32 *aux_val, __u32 *aux_mask); 460 461 np = of_find_matching_node(NULL, l2x0_ids); 462 if (!np) 463 return -ENODEV; 464 l2x0_base = of_iomap(np, 0); 465 if (!l2x0_base) 466 return -ENOMEM; 467 468 /* L2 configuration can only be changed if the cache is disabled */ 469 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) { 470 l2_setup = of_match_node(l2x0_ids, np)->data; 471 if (l2_setup) 472 l2_setup(np, &aux_val, &aux_mask); 473 } 474 l2x0_init(l2x0_base, aux_val, aux_mask); 475 return 0; 476 } 477 #endif 478