1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* 3 */ 4 #ifndef _ASM_POWERPC_CACHEFLUSH_H 5 #define _ASM_POWERPC_CACHEFLUSH_H 6 7 #ifdef __KERNEL__ 8 9 #include <linux/mm.h> 10 #include <asm/cputable.h> 11 12 /* 13 * No cache flushing is required when address mappings are changed, 14 * because the caches on PowerPCs are physically addressed. 15 */ 16 #define flush_cache_all() do { } while (0) 17 #define flush_cache_mm(mm) do { } while (0) 18 #define flush_cache_dup_mm(mm) do { } while (0) 19 #define flush_cache_range(vma, start, end) do { } while (0) 20 #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) 21 #define flush_icache_page(vma, page) do { } while (0) 22 #define flush_cache_vunmap(start, end) do { } while (0) 23 24 #ifdef CONFIG_PPC_BOOK3S_64 25 /* 26 * Book3s has no ptesync after setting a pte, so without this ptesync it's 27 * possible for a kernel virtual mapping access to return a spurious fault 28 * if it's accessed right after the pte is set. The page fault handler does 29 * not expect this type of fault. flush_cache_vmap is not exactly the right 30 * place to put this, but it seems to work well enough. 31 */ 32 #define flush_cache_vmap(start, end) do { asm volatile("ptesync" ::: "memory"); } while (0) 33 #else 34 #define flush_cache_vmap(start, end) do { } while (0) 35 #endif 36 37 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 38 extern void flush_dcache_page(struct page *page); 39 #define flush_dcache_mmap_lock(mapping) do { } while (0) 40 #define flush_dcache_mmap_unlock(mapping) do { } while (0) 41 42 extern void flush_icache_range(unsigned long, unsigned long); 43 extern void flush_icache_user_range(struct vm_area_struct *vma, 44 struct page *page, unsigned long addr, 45 int len); 46 extern void __flush_dcache_icache(void *page_va); 47 extern void flush_dcache_icache_page(struct page *page); 48 #if defined(CONFIG_PPC32) && !defined(CONFIG_BOOKE) 49 extern void __flush_dcache_icache_phys(unsigned long physaddr); 50 #else 51 static inline void __flush_dcache_icache_phys(unsigned long physaddr) 52 { 53 BUG(); 54 } 55 #endif 56 57 #ifdef CONFIG_PPC32 58 /* 59 * Write any modified data cache blocks out to memory and invalidate them. 60 * Does not invalidate the corresponding instruction cache blocks. 61 */ 62 static inline void flush_dcache_range(unsigned long start, unsigned long stop) 63 { 64 void *addr = (void *)(start & ~(L1_CACHE_BYTES - 1)); 65 unsigned long size = stop - (unsigned long)addr + (L1_CACHE_BYTES - 1); 66 unsigned long i; 67 68 for (i = 0; i < size >> L1_CACHE_SHIFT; i++, addr += L1_CACHE_BYTES) 69 dcbf(addr); 70 mb(); /* sync */ 71 } 72 73 /* 74 * Write any modified data cache blocks out to memory. 75 * Does not invalidate the corresponding cache lines (especially for 76 * any corresponding instruction cache). 77 */ 78 static inline void clean_dcache_range(unsigned long start, unsigned long stop) 79 { 80 void *addr = (void *)(start & ~(L1_CACHE_BYTES - 1)); 81 unsigned long size = stop - (unsigned long)addr + (L1_CACHE_BYTES - 1); 82 unsigned long i; 83 84 for (i = 0; i < size >> L1_CACHE_SHIFT; i++, addr += L1_CACHE_BYTES) 85 dcbst(addr); 86 mb(); /* sync */ 87 } 88 89 /* 90 * Like above, but invalidate the D-cache. This is used by the 8xx 91 * to invalidate the cache so the PPC core doesn't get stale data 92 * from the CPM (no cache snooping here :-). 93 */ 94 static inline void invalidate_dcache_range(unsigned long start, 95 unsigned long stop) 96 { 97 void *addr = (void *)(start & ~(L1_CACHE_BYTES - 1)); 98 unsigned long size = stop - (unsigned long)addr + (L1_CACHE_BYTES - 1); 99 unsigned long i; 100 101 for (i = 0; i < size >> L1_CACHE_SHIFT; i++, addr += L1_CACHE_BYTES) 102 dcbi(addr); 103 mb(); /* sync */ 104 } 105 106 #endif /* CONFIG_PPC32 */ 107 #ifdef CONFIG_PPC64 108 extern void flush_dcache_range(unsigned long start, unsigned long stop); 109 extern void flush_inval_dcache_range(unsigned long start, unsigned long stop); 110 #endif 111 112 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ 113 do { \ 114 memcpy(dst, src, len); \ 115 flush_icache_user_range(vma, page, vaddr, len); \ 116 } while (0) 117 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ 118 memcpy(dst, src, len) 119 120 #endif /* __KERNEL__ */ 121 122 #endif /* _ASM_POWERPC_CACHEFLUSH_H */ 123