1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * arch/arm/include/asm/cacheflush.h 4 * 5 * Copyright (C) 1999-2002 Russell King 6 */ 7 #ifndef _ASMARM_CACHEFLUSH_H 8 #define _ASMARM_CACHEFLUSH_H 9 10 #include <linux/mm.h> 11 12 #include <asm/glue-cache.h> 13 #include <asm/shmparam.h> 14 #include <asm/cachetype.h> 15 #include <asm/outercache.h> 16 17 #define CACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT) 18 19 /* 20 * This flag is used to indicate that the page pointed to by a pte is clean 21 * and does not require cleaning before returning it to the user. 22 */ 23 #define PG_dcache_clean PG_arch_1 24 25 /* 26 * MM Cache Management 27 * =================== 28 * 29 * The arch/arm/mm/cache-*.S and arch/arm/mm/proc-*.S files 30 * implement these methods. 31 * 32 * Start addresses are inclusive and end addresses are exclusive; 33 * start addresses should be rounded down, end addresses up. 34 * 35 * See Documentation/core-api/cachetlb.rst for more information. 36 * Please note that the implementation of these, and the required 37 * effects are cache-type (VIVT/VIPT/PIPT) specific. 38 * 39 * flush_icache_all() 40 * 41 * Unconditionally clean and invalidate the entire icache. 42 * Currently only needed for cache-v6.S and cache-v7.S, see 43 * __flush_icache_all for the generic implementation. 44 * 45 * flush_kern_all() 46 * 47 * Unconditionally clean and invalidate the entire cache. 48 * 49 * flush_kern_louis() 50 * 51 * Flush data cache levels up to the level of unification 52 * inner shareable and invalidate the I-cache. 53 * Only needed from v7 onwards, falls back to flush_cache_all() 54 * for all other processor versions. 55 * 56 * flush_user_all() 57 * 58 * Clean and invalidate all user space cache entries 59 * before a change of page tables. 60 * 61 * flush_user_range(start, end, flags) 62 * 63 * Clean and invalidate a range of cache entries in the 64 * specified address space before a change of page tables. 65 * - start - user start address (inclusive, page aligned) 66 * - end - user end address (exclusive, page aligned) 67 * - flags - vma->vm_flags field 68 * 69 * coherent_kern_range(start, end) 70 * 71 * Ensure coherency between the Icache and the Dcache in the 72 * region described by start, end. If you have non-snooping 73 * Harvard caches, you need to implement this function. 74 * - start - virtual start address 75 * - end - virtual end address 76 * 77 * coherent_user_range(start, end) 78 * 79 * Ensure coherency between the Icache and the Dcache in the 80 * region described by start, end. If you have non-snooping 81 * Harvard caches, you need to implement this function. 82 * - start - virtual start address 83 * - end - virtual end address 84 * 85 * flush_kern_dcache_area(kaddr, size) 86 * 87 * Ensure that the data held in page is written back. 88 * - kaddr - page address 89 * - size - region size 90 * 91 * DMA Cache Coherency 92 * =================== 93 * 94 * dma_flush_range(start, end) 95 * 96 * Clean and invalidate the specified virtual address range. 97 * - start - virtual start address 98 * - end - virtual end address 99 */ 100 101 struct cpu_cache_fns { 102 void (*flush_icache_all)(void); 103 void (*flush_kern_all)(void); 104 void (*flush_kern_louis)(void); 105 void (*flush_user_all)(void); 106 void (*flush_user_range)(unsigned long, unsigned long, unsigned int); 107 108 void (*coherent_kern_range)(unsigned long, unsigned long); 109 int (*coherent_user_range)(unsigned long, unsigned long); 110 void (*flush_kern_dcache_area)(void *, size_t); 111 112 void (*dma_map_area)(const void *, size_t, int); 113 void (*dma_unmap_area)(const void *, size_t, int); 114 115 void (*dma_flush_range)(const void *, const void *); 116 } __no_randomize_layout; 117 118 /* 119 * Select the calling method 120 */ 121 #ifdef MULTI_CACHE 122 123 extern struct cpu_cache_fns cpu_cache; 124 125 #define __cpuc_flush_icache_all cpu_cache.flush_icache_all 126 #define __cpuc_flush_kern_all cpu_cache.flush_kern_all 127 #define __cpuc_flush_kern_louis cpu_cache.flush_kern_louis 128 #define __cpuc_flush_user_all cpu_cache.flush_user_all 129 #define __cpuc_flush_user_range cpu_cache.flush_user_range 130 #define __cpuc_coherent_kern_range cpu_cache.coherent_kern_range 131 #define __cpuc_coherent_user_range cpu_cache.coherent_user_range 132 #define __cpuc_flush_dcache_area cpu_cache.flush_kern_dcache_area 133 134 /* 135 * These are private to the dma-mapping API. Do not use directly. 136 * Their sole purpose is to ensure that data held in the cache 137 * is visible to DMA, or data written by DMA to system memory is 138 * visible to the CPU. 139 */ 140 #define dmac_flush_range cpu_cache.dma_flush_range 141 142 #else 143 144 extern void __cpuc_flush_icache_all(void); 145 extern void __cpuc_flush_kern_all(void); 146 extern void __cpuc_flush_kern_louis(void); 147 extern void __cpuc_flush_user_all(void); 148 extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int); 149 extern void __cpuc_coherent_kern_range(unsigned long, unsigned long); 150 extern int __cpuc_coherent_user_range(unsigned long, unsigned long); 151 extern void __cpuc_flush_dcache_area(void *, size_t); 152 153 /* 154 * These are private to the dma-mapping API. Do not use directly. 155 * Their sole purpose is to ensure that data held in the cache 156 * is visible to DMA, or data written by DMA to system memory is 157 * visible to the CPU. 158 */ 159 extern void dmac_flush_range(const void *, const void *); 160 161 #endif 162 163 /* 164 * Copy user data from/to a page which is mapped into a different 165 * processes address space. Really, we want to allow our "user 166 * space" model to handle this. 167 */ 168 extern void copy_to_user_page(struct vm_area_struct *, struct page *, 169 unsigned long, void *, const void *, unsigned long); 170 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ 171 do { \ 172 memcpy(dst, src, len); \ 173 } while (0) 174 175 /* 176 * Convert calls to our calling convention. 177 */ 178 179 /* Invalidate I-cache */ 180 #define __flush_icache_all_generic() \ 181 asm("mcr p15, 0, %0, c7, c5, 0" \ 182 : : "r" (0)); 183 184 /* Invalidate I-cache inner shareable */ 185 #define __flush_icache_all_v7_smp() \ 186 asm("mcr p15, 0, %0, c7, c1, 0" \ 187 : : "r" (0)); 188 189 /* 190 * Optimized __flush_icache_all for the common cases. Note that UP ARMv7 191 * will fall through to use __flush_icache_all_generic. 192 */ 193 #if (defined(CONFIG_CPU_V7) && \ 194 (defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K))) || \ 195 defined(CONFIG_SMP_ON_UP) 196 #define __flush_icache_preferred __cpuc_flush_icache_all 197 #elif __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP) 198 #define __flush_icache_preferred __flush_icache_all_v7_smp 199 #elif __LINUX_ARM_ARCH__ == 6 && defined(CONFIG_ARM_ERRATA_411920) 200 #define __flush_icache_preferred __cpuc_flush_icache_all 201 #else 202 #define __flush_icache_preferred __flush_icache_all_generic 203 #endif 204 205 static inline void __flush_icache_all(void) 206 { 207 __flush_icache_preferred(); 208 dsb(ishst); 209 } 210 211 /* 212 * Flush caches up to Level of Unification Inner Shareable 213 */ 214 #define flush_cache_louis() __cpuc_flush_kern_louis() 215 216 #define flush_cache_all() __cpuc_flush_kern_all() 217 218 static inline void vivt_flush_cache_mm(struct mm_struct *mm) 219 { 220 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) 221 __cpuc_flush_user_all(); 222 } 223 224 static inline void 225 vivt_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) 226 { 227 struct mm_struct *mm = vma->vm_mm; 228 229 if (!mm || cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) 230 __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end), 231 vma->vm_flags); 232 } 233 234 static inline void vivt_flush_cache_pages(struct vm_area_struct *vma, 235 unsigned long user_addr, unsigned long pfn, unsigned int nr) 236 { 237 struct mm_struct *mm = vma->vm_mm; 238 239 if (!mm || cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) { 240 unsigned long addr = user_addr & PAGE_MASK; 241 __cpuc_flush_user_range(addr, addr + nr * PAGE_SIZE, 242 vma->vm_flags); 243 } 244 } 245 246 #ifndef CONFIG_CPU_CACHE_VIPT 247 #define flush_cache_mm(mm) \ 248 vivt_flush_cache_mm(mm) 249 #define flush_cache_range(vma,start,end) \ 250 vivt_flush_cache_range(vma,start,end) 251 #define flush_cache_pages(vma, addr, pfn, nr) \ 252 vivt_flush_cache_pages(vma, addr, pfn, nr) 253 #else 254 void flush_cache_mm(struct mm_struct *mm); 255 void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); 256 void flush_cache_pages(struct vm_area_struct *vma, unsigned long user_addr, 257 unsigned long pfn, unsigned int nr); 258 #endif 259 260 #define flush_cache_dup_mm(mm) flush_cache_mm(mm) 261 #define flush_cache_page(vma, addr, pfn) flush_cache_pages(vma, addr, pfn, 1) 262 263 /* 264 * flush_icache_user_range is used when we want to ensure that the 265 * Harvard caches are synchronised for the user space address range. 266 * This is used for the ARM private sys_cacheflush system call. 267 */ 268 #define flush_icache_user_range(s,e) __cpuc_coherent_user_range(s,e) 269 270 /* 271 * Perform necessary cache operations to ensure that data previously 272 * stored within this range of addresses can be executed by the CPU. 273 */ 274 #define flush_icache_range(s,e) __cpuc_coherent_kern_range(s,e) 275 276 /* 277 * Perform necessary cache operations to ensure that the TLB will 278 * see data written in the specified area. 279 */ 280 #define clean_dcache_area(start,size) cpu_dcache_clean_area(start, size) 281 282 /* 283 * flush_dcache_page is used when the kernel has written to the page 284 * cache page at virtual address page->virtual. 285 * 286 * If this page isn't mapped (ie, page_mapping == NULL), or it might 287 * have userspace mappings, then we _must_ always clean + invalidate 288 * the dcache entries associated with the kernel mapping. 289 * 290 * Otherwise we can defer the operation, and clean the cache when we are 291 * about to change to user space. This is the same method as used on SPARC64. 292 * See update_mmu_cache for the user space part. 293 */ 294 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 295 void flush_dcache_page(struct page *); 296 void flush_dcache_folio(struct folio *folio); 297 #define flush_dcache_folio flush_dcache_folio 298 299 #define ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE 1 300 static inline void flush_kernel_vmap_range(void *addr, int size) 301 { 302 if ((cache_is_vivt() || cache_is_vipt_aliasing())) 303 __cpuc_flush_dcache_area(addr, (size_t)size); 304 } 305 static inline void invalidate_kernel_vmap_range(void *addr, int size) 306 { 307 if ((cache_is_vivt() || cache_is_vipt_aliasing())) 308 __cpuc_flush_dcache_area(addr, (size_t)size); 309 } 310 311 #define ARCH_HAS_FLUSH_ANON_PAGE 312 static inline void flush_anon_page(struct vm_area_struct *vma, 313 struct page *page, unsigned long vmaddr) 314 { 315 extern void __flush_anon_page(struct vm_area_struct *vma, 316 struct page *, unsigned long); 317 if (PageAnon(page)) 318 __flush_anon_page(vma, page, vmaddr); 319 } 320 321 #define flush_dcache_mmap_lock(mapping) xa_lock_irq(&mapping->i_pages) 322 #define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&mapping->i_pages) 323 324 /* 325 * flush_cache_vmap() is used when creating mappings (eg, via vmap, 326 * vmalloc, ioremap etc) in kernel space for pages. On non-VIPT 327 * caches, since the direct-mappings of these pages may contain cached 328 * data, we need to do a full cache flush to ensure that writebacks 329 * don't corrupt data placed into these pages via the new mappings. 330 */ 331 static inline void flush_cache_vmap(unsigned long start, unsigned long end) 332 { 333 if (!cache_is_vipt_nonaliasing()) 334 flush_cache_all(); 335 else 336 /* 337 * set_pte_at() called from vmap_pte_range() does not 338 * have a DSB after cleaning the cache line. 339 */ 340 dsb(ishst); 341 } 342 343 static inline void flush_cache_vunmap(unsigned long start, unsigned long end) 344 { 345 if (!cache_is_vipt_nonaliasing()) 346 flush_cache_all(); 347 } 348 349 /* 350 * Memory synchronization helpers for mixed cached vs non cached accesses. 351 * 352 * Some synchronization algorithms have to set states in memory with the 353 * cache enabled or disabled depending on the code path. It is crucial 354 * to always ensure proper cache maintenance to update main memory right 355 * away in that case. 356 * 357 * Any cached write must be followed by a cache clean operation. 358 * Any cached read must be preceded by a cache invalidate operation. 359 * Yet, in the read case, a cache flush i.e. atomic clean+invalidate 360 * operation is needed to avoid discarding possible concurrent writes to the 361 * accessed memory. 362 * 363 * Also, in order to prevent a cached writer from interfering with an 364 * adjacent non-cached writer, each state variable must be located to 365 * a separate cache line. 366 */ 367 368 /* 369 * This needs to be >= the max cache writeback size of all 370 * supported platforms included in the current kernel configuration. 371 * This is used to align state variables to their own cache lines. 372 */ 373 #define __CACHE_WRITEBACK_ORDER 6 /* guessed from existing platforms */ 374 #define __CACHE_WRITEBACK_GRANULE (1 << __CACHE_WRITEBACK_ORDER) 375 376 /* 377 * There is no __cpuc_clean_dcache_area but we use it anyway for 378 * code intent clarity, and alias it to __cpuc_flush_dcache_area. 379 */ 380 #define __cpuc_clean_dcache_area __cpuc_flush_dcache_area 381 382 /* 383 * Ensure preceding writes to *p by this CPU are visible to 384 * subsequent reads by other CPUs: 385 */ 386 static inline void __sync_cache_range_w(volatile void *p, size_t size) 387 { 388 char *_p = (char *)p; 389 390 __cpuc_clean_dcache_area(_p, size); 391 outer_clean_range(__pa(_p), __pa(_p + size)); 392 } 393 394 /* 395 * Ensure preceding writes to *p by other CPUs are visible to 396 * subsequent reads by this CPU. We must be careful not to 397 * discard data simultaneously written by another CPU, hence the 398 * usage of flush rather than invalidate operations. 399 */ 400 static inline void __sync_cache_range_r(volatile void *p, size_t size) 401 { 402 char *_p = (char *)p; 403 404 #ifdef CONFIG_OUTER_CACHE 405 if (outer_cache.flush_range) { 406 /* 407 * Ensure dirty data migrated from other CPUs into our cache 408 * are cleaned out safely before the outer cache is cleaned: 409 */ 410 __cpuc_clean_dcache_area(_p, size); 411 412 /* Clean and invalidate stale data for *p from outer ... */ 413 outer_flush_range(__pa(_p), __pa(_p + size)); 414 } 415 #endif 416 417 /* ... and inner cache: */ 418 __cpuc_flush_dcache_area(_p, size); 419 } 420 421 #define sync_cache_w(ptr) __sync_cache_range_w(ptr, sizeof *(ptr)) 422 #define sync_cache_r(ptr) __sync_cache_range_r(ptr, sizeof *(ptr)) 423 424 /* 425 * Disabling cache access for one CPU in an ARMv7 SMP system is tricky. 426 * To do so we must: 427 * 428 * - Clear the SCTLR.C bit to prevent further cache allocations 429 * - Flush the desired level of cache 430 * - Clear the ACTLR "SMP" bit to disable local coherency 431 * 432 * ... and so without any intervening memory access in between those steps, 433 * not even to the stack. 434 * 435 * WARNING -- After this has been called: 436 * 437 * - No ldrex/strex (and similar) instructions must be used. 438 * - The CPU is obviously no longer coherent with the other CPUs. 439 * - This is unlikely to work as expected if Linux is running non-secure. 440 * 441 * Note: 442 * 443 * - This is known to apply to several ARMv7 processor implementations, 444 * however some exceptions may exist. Caveat emptor. 445 * 446 * - The clobber list is dictated by the call to v7_flush_dcache_*. 447 */ 448 #define v7_exit_coherency_flush(level) \ 449 asm volatile( \ 450 ".arch armv7-a \n\t" \ 451 "mrc p15, 0, r0, c1, c0, 0 @ get SCTLR \n\t" \ 452 "bic r0, r0, #"__stringify(CR_C)" \n\t" \ 453 "mcr p15, 0, r0, c1, c0, 0 @ set SCTLR \n\t" \ 454 "isb \n\t" \ 455 "bl v7_flush_dcache_"__stringify(level)" \n\t" \ 456 "mrc p15, 0, r0, c1, c0, 1 @ get ACTLR \n\t" \ 457 "bic r0, r0, #(1 << 6) @ disable local coherency \n\t" \ 458 "mcr p15, 0, r0, c1, c0, 1 @ set ACTLR \n\t" \ 459 "isb \n\t" \ 460 "dsb" \ 461 : : : "r0","r1","r2","r3","r4","r5","r6", \ 462 "r9","r10","ip","lr","memory" ) 463 464 void flush_uprobe_xol_access(struct page *page, unsigned long uaddr, 465 void *kaddr, unsigned long len); 466 467 468 #ifdef CONFIG_CPU_ICACHE_MISMATCH_WORKAROUND 469 void check_cpu_icache_size(int cpuid); 470 #else 471 static inline void check_cpu_icache_size(int cpuid) { } 472 #endif 473 474 #endif 475