1 /* 2 * (C) Copyright 2010 3 * Texas Instruments, <www.ti.com> 4 * Aneesh V <aneesh@ti.com> 5 * 6 * SPDX-License-Identifier: GPL-2.0+ 7 */ 8 #include <linux/types.h> 9 #include <common.h> 10 #include <asm/armv7.h> 11 #include <asm/utils.h> 12 13 #define ARMV7_DCACHE_INVAL_ALL 1 14 #define ARMV7_DCACHE_CLEAN_INVAL_ALL 2 15 #define ARMV7_DCACHE_INVAL_RANGE 3 16 #define ARMV7_DCACHE_CLEAN_INVAL_RANGE 4 17 18 #ifndef CONFIG_SYS_DCACHE_OFF 19 static int check_cache_range(unsigned long start, unsigned long stop) 20 { 21 int ok = 1; 22 23 if (start & (CONFIG_SYS_CACHELINE_SIZE - 1)) 24 ok = 0; 25 26 if (stop & (CONFIG_SYS_CACHELINE_SIZE - 1)) 27 ok = 0; 28 29 if (!ok) 30 debug("CACHE: Misaligned operation at range [%08lx, %08lx]\n", 31 start, stop); 32 33 return ok; 34 } 35 36 /* 37 * Write the level and type you want to Cache Size Selection Register(CSSELR) 38 * to get size details from Current Cache Size ID Register(CCSIDR) 39 */ 40 static void set_csselr(u32 level, u32 type) 41 { 42 u32 csselr = level << 1 | type; 43 44 /* Write to Cache Size Selection Register(CSSELR) */ 45 asm volatile ("mcr p15, 2, %0, c0, c0, 0" : : "r" (csselr)); 46 } 47 48 static u32 get_ccsidr(void) 49 { 50 u32 ccsidr; 51 52 /* Read current CP15 Cache Size ID Register */ 53 asm volatile ("mrc p15, 1, %0, c0, c0, 0" : "=r" (ccsidr)); 54 return ccsidr; 55 } 56 57 static u32 get_clidr(void) 58 { 59 u32 clidr; 60 61 /* Read current CP15 Cache Level ID Register */ 62 asm volatile ("mrc p15,1,%0,c0,c0,1" : "=r" (clidr)); 63 return clidr; 64 } 65 66 static void v7_inval_dcache_level_setway(u32 level, u32 num_sets, 67 u32 num_ways, u32 way_shift, 68 u32 log2_line_len) 69 { 70 int way, set; 71 u32 setway; 72 73 /* 74 * For optimal assembly code: 75 * a. count down 76 * b. have bigger loop inside 77 */ 78 for (way = num_ways - 1; way >= 0 ; way--) { 79 for (set = num_sets - 1; set >= 0; set--) { 80 setway = (level << 1) | (set << log2_line_len) | 81 (way << way_shift); 82 /* Invalidate data/unified cache line by set/way */ 83 asm volatile (" mcr p15, 0, %0, c7, c6, 2" 84 : : "r" (setway)); 85 } 86 } 87 /* DSB to make sure the operation is complete */ 88 DSB; 89 } 90 91 static void v7_clean_inval_dcache_level_setway(u32 level, u32 num_sets, 92 u32 num_ways, u32 way_shift, 93 u32 log2_line_len) 94 { 95 int way, set; 96 u32 setway; 97 98 /* 99 * For optimal assembly code: 100 * a. count down 101 * b. have bigger loop inside 102 */ 103 for (way = num_ways - 1; way >= 0 ; way--) { 104 for (set = num_sets - 1; set >= 0; set--) { 105 setway = (level << 1) | (set << log2_line_len) | 106 (way << way_shift); 107 /* 108 * Clean & Invalidate data/unified 109 * cache line by set/way 110 */ 111 asm volatile (" mcr p15, 0, %0, c7, c14, 2" 112 : : "r" (setway)); 113 } 114 } 115 /* DSB to make sure the operation is complete */ 116 DSB; 117 } 118 119 static void v7_maint_dcache_level_setway(u32 level, u32 operation) 120 { 121 u32 ccsidr; 122 u32 num_sets, num_ways, log2_line_len, log2_num_ways; 123 u32 way_shift; 124 125 set_csselr(level, ARMV7_CSSELR_IND_DATA_UNIFIED); 126 127 ccsidr = get_ccsidr(); 128 129 log2_line_len = ((ccsidr & CCSIDR_LINE_SIZE_MASK) >> 130 CCSIDR_LINE_SIZE_OFFSET) + 2; 131 /* Converting from words to bytes */ 132 log2_line_len += 2; 133 134 num_ways = ((ccsidr & CCSIDR_ASSOCIATIVITY_MASK) >> 135 CCSIDR_ASSOCIATIVITY_OFFSET) + 1; 136 num_sets = ((ccsidr & CCSIDR_NUM_SETS_MASK) >> 137 CCSIDR_NUM_SETS_OFFSET) + 1; 138 /* 139 * According to ARMv7 ARM number of sets and number of ways need 140 * not be a power of 2 141 */ 142 log2_num_ways = log_2_n_round_up(num_ways); 143 144 way_shift = (32 - log2_num_ways); 145 if (operation == ARMV7_DCACHE_INVAL_ALL) { 146 v7_inval_dcache_level_setway(level, num_sets, num_ways, 147 way_shift, log2_line_len); 148 } else if (operation == ARMV7_DCACHE_CLEAN_INVAL_ALL) { 149 v7_clean_inval_dcache_level_setway(level, num_sets, num_ways, 150 way_shift, log2_line_len); 151 } 152 } 153 154 static void v7_maint_dcache_all(u32 operation) 155 { 156 u32 level, cache_type, level_start_bit = 0; 157 u32 clidr = get_clidr(); 158 159 for (level = 0; level < 7; level++) { 160 cache_type = (clidr >> level_start_bit) & 0x7; 161 if ((cache_type == ARMV7_CLIDR_CTYPE_DATA_ONLY) || 162 (cache_type == ARMV7_CLIDR_CTYPE_INSTRUCTION_DATA) || 163 (cache_type == ARMV7_CLIDR_CTYPE_UNIFIED)) 164 v7_maint_dcache_level_setway(level, operation); 165 level_start_bit += 3; 166 } 167 } 168 169 static void v7_dcache_clean_inval_range(u32 start, u32 stop, u32 line_len) 170 { 171 u32 mva; 172 173 /* Align start to cache line boundary */ 174 start &= ~(line_len - 1); 175 for (mva = start; mva < stop; mva = mva + line_len) { 176 /* DCCIMVAC - Clean & Invalidate data cache by MVA to PoC */ 177 asm volatile ("mcr p15, 0, %0, c7, c14, 1" : : "r" (mva)); 178 } 179 } 180 181 static void v7_dcache_inval_range(u32 start, u32 stop, u32 line_len) 182 { 183 u32 mva; 184 185 /* 186 * If start address is not aligned to cache-line do not 187 * invalidate the first cache-line 188 */ 189 if (start & (line_len - 1)) { 190 printf("ERROR: %s - start address is not aligned - 0x%08x\n", 191 __func__, start); 192 /* move to next cache line */ 193 start = (start + line_len - 1) & ~(line_len - 1); 194 } 195 196 /* 197 * If stop address is not aligned to cache-line do not 198 * invalidate the last cache-line 199 */ 200 if (stop & (line_len - 1)) { 201 printf("ERROR: %s - stop address is not aligned - 0x%08x\n", 202 __func__, stop); 203 /* align to the beginning of this cache line */ 204 stop &= ~(line_len - 1); 205 } 206 207 for (mva = start; mva < stop; mva = mva + line_len) { 208 /* DCIMVAC - Invalidate data cache by MVA to PoC */ 209 asm volatile ("mcr p15, 0, %0, c7, c6, 1" : : "r" (mva)); 210 } 211 } 212 213 static void v7_dcache_maint_range(u32 start, u32 stop, u32 range_op) 214 { 215 u32 line_len, ccsidr; 216 217 ccsidr = get_ccsidr(); 218 line_len = ((ccsidr & CCSIDR_LINE_SIZE_MASK) >> 219 CCSIDR_LINE_SIZE_OFFSET) + 2; 220 /* Converting from words to bytes */ 221 line_len += 2; 222 /* converting from log2(linelen) to linelen */ 223 line_len = 1 << line_len; 224 225 switch (range_op) { 226 case ARMV7_DCACHE_CLEAN_INVAL_RANGE: 227 v7_dcache_clean_inval_range(start, stop, line_len); 228 break; 229 case ARMV7_DCACHE_INVAL_RANGE: 230 v7_dcache_inval_range(start, stop, line_len); 231 break; 232 } 233 234 /* DSB to make sure the operation is complete */ 235 DSB; 236 } 237 238 /* Invalidate TLB */ 239 static void v7_inval_tlb(void) 240 { 241 /* Invalidate entire unified TLB */ 242 asm volatile ("mcr p15, 0, %0, c8, c7, 0" : : "r" (0)); 243 /* Invalidate entire data TLB */ 244 asm volatile ("mcr p15, 0, %0, c8, c6, 0" : : "r" (0)); 245 /* Invalidate entire instruction TLB */ 246 asm volatile ("mcr p15, 0, %0, c8, c5, 0" : : "r" (0)); 247 /* Full system DSB - make sure that the invalidation is complete */ 248 DSB; 249 /* Full system ISB - make sure the instruction stream sees it */ 250 ISB; 251 } 252 253 void invalidate_dcache_all(void) 254 { 255 v7_maint_dcache_all(ARMV7_DCACHE_INVAL_ALL); 256 257 v7_outer_cache_inval_all(); 258 } 259 260 /* 261 * Performs a clean & invalidation of the entire data cache 262 * at all levels 263 */ 264 void flush_dcache_all(void) 265 { 266 v7_maint_dcache_all(ARMV7_DCACHE_CLEAN_INVAL_ALL); 267 268 v7_outer_cache_flush_all(); 269 } 270 271 /* 272 * Invalidates range in all levels of D-cache/unified cache used: 273 * Affects the range [start, stop - 1] 274 */ 275 void invalidate_dcache_range(unsigned long start, unsigned long stop) 276 { 277 check_cache_range(start, stop); 278 279 v7_dcache_maint_range(start, stop, ARMV7_DCACHE_INVAL_RANGE); 280 281 v7_outer_cache_inval_range(start, stop); 282 } 283 284 /* 285 * Flush range(clean & invalidate) from all levels of D-cache/unified 286 * cache used: 287 * Affects the range [start, stop - 1] 288 */ 289 void flush_dcache_range(unsigned long start, unsigned long stop) 290 { 291 check_cache_range(start, stop); 292 293 v7_dcache_maint_range(start, stop, ARMV7_DCACHE_CLEAN_INVAL_RANGE); 294 295 v7_outer_cache_flush_range(start, stop); 296 } 297 298 void arm_init_before_mmu(void) 299 { 300 v7_outer_cache_enable(); 301 invalidate_dcache_all(); 302 v7_inval_tlb(); 303 } 304 305 void mmu_page_table_flush(unsigned long start, unsigned long stop) 306 { 307 flush_dcache_range(start, stop); 308 v7_inval_tlb(); 309 } 310 #else /* #ifndef CONFIG_SYS_DCACHE_OFF */ 311 void invalidate_dcache_all(void) 312 { 313 } 314 315 void flush_dcache_all(void) 316 { 317 } 318 319 void arm_init_before_mmu(void) 320 { 321 } 322 323 void mmu_page_table_flush(unsigned long start, unsigned long stop) 324 { 325 } 326 327 void arm_init_domains(void) 328 { 329 } 330 #endif /* #ifndef CONFIG_SYS_DCACHE_OFF */ 331 332 #ifndef CONFIG_SYS_ICACHE_OFF 333 /* Invalidate entire I-cache and branch predictor array */ 334 void invalidate_icache_all(void) 335 { 336 /* 337 * Invalidate all instruction caches to PoU. 338 * Also flushes branch target cache. 339 */ 340 asm volatile ("mcr p15, 0, %0, c7, c5, 0" : : "r" (0)); 341 342 /* Invalidate entire branch predictor array */ 343 asm volatile ("mcr p15, 0, %0, c7, c5, 6" : : "r" (0)); 344 345 /* Full system DSB - make sure that the invalidation is complete */ 346 DSB; 347 348 /* ISB - make sure the instruction stream sees it */ 349 ISB; 350 } 351 #else 352 void invalidate_icache_all(void) 353 { 354 } 355 #endif 356 357 /* Stub implementations for outer cache operations */ 358 __weak void v7_outer_cache_enable(void) {} 359 __weak void v7_outer_cache_disable(void) {} 360 __weak void v7_outer_cache_flush_all(void) {} 361 __weak void v7_outer_cache_inval_all(void) {} 362 __weak void v7_outer_cache_flush_range(u32 start, u32 end) {} 363 __weak void v7_outer_cache_inval_range(u32 start, u32 end) {} 364