1 /* 2 * arch/arm/mm/cache-l2x0.c - L210/L220 cache controller support 3 * 4 * Copyright (C) 2007 ARM Limited 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software 17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18 */ 19 #include <linux/init.h> 20 #include <linux/spinlock.h> 21 #include <linux/io.h> 22 23 #include <asm/cacheflush.h> 24 #include <asm/hardware/cache-l2x0.h> 25 26 #define CACHE_LINE_SIZE 32 27 28 static void __iomem *l2x0_base; 29 static DEFINE_SPINLOCK(l2x0_lock); 30 static uint32_t l2x0_way_mask; /* Bitmask of active ways */ 31 static uint32_t l2x0_size; 32 33 static inline void cache_wait_way(void __iomem *reg, unsigned long mask) 34 { 35 /* wait for cache operation by line or way to complete */ 36 while (readl_relaxed(reg) & mask) 37 ; 38 } 39 40 #ifdef CONFIG_CACHE_PL310 41 static inline void cache_wait(void __iomem *reg, unsigned long mask) 42 { 43 /* cache operations by line are atomic on PL310 */ 44 } 45 #else 46 #define cache_wait cache_wait_way 47 #endif 48 49 static inline void cache_sync(void) 50 { 51 void __iomem *base = l2x0_base; 52 53 #ifdef CONFIG_ARM_ERRATA_753970 54 /* write to an unmmapped register */ 55 writel_relaxed(0, base + L2X0_DUMMY_REG); 56 #else 57 writel_relaxed(0, base + L2X0_CACHE_SYNC); 58 #endif 59 cache_wait(base + L2X0_CACHE_SYNC, 1); 60 } 61 62 static inline void l2x0_clean_line(unsigned long addr) 63 { 64 void __iomem *base = l2x0_base; 65 cache_wait(base + L2X0_CLEAN_LINE_PA, 1); 66 writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA); 67 } 68 69 static inline void l2x0_inv_line(unsigned long addr) 70 { 71 void __iomem *base = l2x0_base; 72 cache_wait(base + L2X0_INV_LINE_PA, 1); 73 writel_relaxed(addr, base + L2X0_INV_LINE_PA); 74 } 75 76 #ifdef CONFIG_PL310_ERRATA_588369 77 static void debug_writel(unsigned long val) 78 { 79 extern void omap_smc1(u32 fn, u32 arg); 80 81 /* 82 * Texas Instrument secure monitor api to modify the 83 * PL310 Debug Control Register. 84 */ 85 omap_smc1(0x100, val); 86 } 87 88 static inline void l2x0_flush_line(unsigned long addr) 89 { 90 void __iomem *base = l2x0_base; 91 92 /* Clean by PA followed by Invalidate by PA */ 93 cache_wait(base + L2X0_CLEAN_LINE_PA, 1); 94 writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA); 95 cache_wait(base + L2X0_INV_LINE_PA, 1); 96 writel_relaxed(addr, base + L2X0_INV_LINE_PA); 97 } 98 #else 99 100 /* Optimised out for non-errata case */ 101 static inline void debug_writel(unsigned long val) 102 { 103 } 104 105 static inline void l2x0_flush_line(unsigned long addr) 106 { 107 void __iomem *base = l2x0_base; 108 cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1); 109 writel_relaxed(addr, base + L2X0_CLEAN_INV_LINE_PA); 110 } 111 #endif 112 113 static void l2x0_cache_sync(void) 114 { 115 unsigned long flags; 116 117 spin_lock_irqsave(&l2x0_lock, flags); 118 cache_sync(); 119 spin_unlock_irqrestore(&l2x0_lock, flags); 120 } 121 122 static void l2x0_flush_all(void) 123 { 124 unsigned long flags; 125 126 /* clean all ways */ 127 spin_lock_irqsave(&l2x0_lock, flags); 128 writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_INV_WAY); 129 cache_wait_way(l2x0_base + L2X0_CLEAN_INV_WAY, l2x0_way_mask); 130 cache_sync(); 131 spin_unlock_irqrestore(&l2x0_lock, flags); 132 } 133 134 static void l2x0_clean_all(void) 135 { 136 unsigned long flags; 137 138 /* clean all ways */ 139 spin_lock_irqsave(&l2x0_lock, flags); 140 writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_WAY); 141 cache_wait_way(l2x0_base + L2X0_CLEAN_WAY, l2x0_way_mask); 142 cache_sync(); 143 spin_unlock_irqrestore(&l2x0_lock, flags); 144 } 145 146 static void l2x0_inv_all(void) 147 { 148 unsigned long flags; 149 150 /* invalidate all ways */ 151 spin_lock_irqsave(&l2x0_lock, flags); 152 /* Invalidating when L2 is enabled is a nono */ 153 BUG_ON(readl(l2x0_base + L2X0_CTRL) & 1); 154 writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_INV_WAY); 155 cache_wait_way(l2x0_base + L2X0_INV_WAY, l2x0_way_mask); 156 cache_sync(); 157 spin_unlock_irqrestore(&l2x0_lock, flags); 158 } 159 160 static void l2x0_inv_range(unsigned long start, unsigned long end) 161 { 162 void __iomem *base = l2x0_base; 163 unsigned long flags; 164 165 spin_lock_irqsave(&l2x0_lock, flags); 166 if (start & (CACHE_LINE_SIZE - 1)) { 167 start &= ~(CACHE_LINE_SIZE - 1); 168 debug_writel(0x03); 169 l2x0_flush_line(start); 170 debug_writel(0x00); 171 start += CACHE_LINE_SIZE; 172 } 173 174 if (end & (CACHE_LINE_SIZE - 1)) { 175 end &= ~(CACHE_LINE_SIZE - 1); 176 debug_writel(0x03); 177 l2x0_flush_line(end); 178 debug_writel(0x00); 179 } 180 181 while (start < end) { 182 unsigned long blk_end = start + min(end - start, 4096UL); 183 184 while (start < blk_end) { 185 l2x0_inv_line(start); 186 start += CACHE_LINE_SIZE; 187 } 188 189 if (blk_end < end) { 190 spin_unlock_irqrestore(&l2x0_lock, flags); 191 spin_lock_irqsave(&l2x0_lock, flags); 192 } 193 } 194 cache_wait(base + L2X0_INV_LINE_PA, 1); 195 cache_sync(); 196 spin_unlock_irqrestore(&l2x0_lock, flags); 197 } 198 199 static void l2x0_clean_range(unsigned long start, unsigned long end) 200 { 201 void __iomem *base = l2x0_base; 202 unsigned long flags; 203 204 if ((end - start) >= l2x0_size) { 205 l2x0_clean_all(); 206 return; 207 } 208 209 spin_lock_irqsave(&l2x0_lock, flags); 210 start &= ~(CACHE_LINE_SIZE - 1); 211 while (start < end) { 212 unsigned long blk_end = start + min(end - start, 4096UL); 213 214 while (start < blk_end) { 215 l2x0_clean_line(start); 216 start += CACHE_LINE_SIZE; 217 } 218 219 if (blk_end < end) { 220 spin_unlock_irqrestore(&l2x0_lock, flags); 221 spin_lock_irqsave(&l2x0_lock, flags); 222 } 223 } 224 cache_wait(base + L2X0_CLEAN_LINE_PA, 1); 225 cache_sync(); 226 spin_unlock_irqrestore(&l2x0_lock, flags); 227 } 228 229 static void l2x0_flush_range(unsigned long start, unsigned long end) 230 { 231 void __iomem *base = l2x0_base; 232 unsigned long flags; 233 234 if ((end - start) >= l2x0_size) { 235 l2x0_flush_all(); 236 return; 237 } 238 239 spin_lock_irqsave(&l2x0_lock, flags); 240 start &= ~(CACHE_LINE_SIZE - 1); 241 while (start < end) { 242 unsigned long blk_end = start + min(end - start, 4096UL); 243 244 debug_writel(0x03); 245 while (start < blk_end) { 246 l2x0_flush_line(start); 247 start += CACHE_LINE_SIZE; 248 } 249 debug_writel(0x00); 250 251 if (blk_end < end) { 252 spin_unlock_irqrestore(&l2x0_lock, flags); 253 spin_lock_irqsave(&l2x0_lock, flags); 254 } 255 } 256 cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1); 257 cache_sync(); 258 spin_unlock_irqrestore(&l2x0_lock, flags); 259 } 260 261 static void l2x0_disable(void) 262 { 263 unsigned long flags; 264 265 spin_lock_irqsave(&l2x0_lock, flags); 266 writel(0, l2x0_base + L2X0_CTRL); 267 spin_unlock_irqrestore(&l2x0_lock, flags); 268 } 269 270 void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask) 271 { 272 __u32 aux; 273 __u32 cache_id; 274 __u32 way_size = 0; 275 int ways; 276 const char *type; 277 278 l2x0_base = base; 279 280 cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID); 281 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); 282 283 aux &= aux_mask; 284 aux |= aux_val; 285 286 /* Determine the number of ways */ 287 switch (cache_id & L2X0_CACHE_ID_PART_MASK) { 288 case L2X0_CACHE_ID_PART_L310: 289 if (aux & (1 << 16)) 290 ways = 16; 291 else 292 ways = 8; 293 type = "L310"; 294 break; 295 case L2X0_CACHE_ID_PART_L210: 296 ways = (aux >> 13) & 0xf; 297 type = "L210"; 298 break; 299 default: 300 /* Assume unknown chips have 8 ways */ 301 ways = 8; 302 type = "L2x0 series"; 303 break; 304 } 305 306 l2x0_way_mask = (1 << ways) - 1; 307 308 /* 309 * L2 cache Size = Way size * Number of ways 310 */ 311 way_size = (aux & L2X0_AUX_CTRL_WAY_SIZE_MASK) >> 17; 312 way_size = 1 << (way_size + 3); 313 l2x0_size = ways * way_size * SZ_1K; 314 315 /* 316 * Check if l2x0 controller is already enabled. 317 * If you are booting from non-secure mode 318 * accessing the below registers will fault. 319 */ 320 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) { 321 322 /* l2x0 controller is disabled */ 323 writel_relaxed(aux, l2x0_base + L2X0_AUX_CTRL); 324 325 l2x0_inv_all(); 326 327 /* enable L2X0 */ 328 writel_relaxed(1, l2x0_base + L2X0_CTRL); 329 } 330 331 outer_cache.inv_range = l2x0_inv_range; 332 outer_cache.clean_range = l2x0_clean_range; 333 outer_cache.flush_range = l2x0_flush_range; 334 outer_cache.sync = l2x0_cache_sync; 335 outer_cache.flush_all = l2x0_flush_all; 336 outer_cache.inv_all = l2x0_inv_all; 337 outer_cache.disable = l2x0_disable; 338 339 printk(KERN_INFO "%s cache controller enabled\n", type); 340 printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d B\n", 341 ways, cache_id, aux, l2x0_size); 342 } 343