1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _ASM_GENERIC_CACHEFLUSH_H 3 #define _ASM_GENERIC_CACHEFLUSH_H 4 5 /* 6 * The cache doesn't need to be flushed when TLB entries change when 7 * the cache is mapped to physical memory, not virtual memory 8 */ 9 #ifndef flush_cache_all 10 static inline void flush_cache_all(void) 11 { 12 } 13 #endif 14 15 #ifndef flush_cache_mm 16 static inline void flush_cache_mm(struct mm_struct *mm) 17 { 18 } 19 #endif 20 21 #ifndef flush_cache_dup_mm 22 static inline void flush_cache_dup_mm(struct mm_struct *mm) 23 { 24 } 25 #endif 26 27 #ifndef flush_cache_range 28 static inline void flush_cache_range(struct vm_area_struct *vma, 29 unsigned long start, 30 unsigned long end) 31 { 32 } 33 #endif 34 35 #ifndef flush_cache_page 36 static inline void flush_cache_page(struct vm_area_struct *vma, 37 unsigned long vmaddr, 38 unsigned long pfn) 39 { 40 } 41 #endif 42 43 #ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 44 static inline void flush_dcache_page(struct page *page) 45 { 46 } 47 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 48 #endif 49 50 51 #ifndef flush_dcache_mmap_lock 52 static inline void flush_dcache_mmap_lock(struct address_space *mapping) 53 { 54 } 55 #endif 56 57 #ifndef flush_dcache_mmap_unlock 58 static inline void flush_dcache_mmap_unlock(struct address_space *mapping) 59 { 60 } 61 #endif 62 63 #ifndef flush_icache_range 64 static inline void flush_icache_range(unsigned long start, unsigned long end) 65 { 66 } 67 #endif 68 69 #ifndef flush_icache_user_range 70 #define flush_icache_user_range flush_icache_range 71 #endif 72 73 #ifndef flush_icache_page 74 static inline void flush_icache_page(struct vm_area_struct *vma, 75 struct page *page) 76 { 77 } 78 #endif 79 80 #ifndef flush_icache_user_page 81 static inline void flush_icache_user_page(struct vm_area_struct *vma, 82 struct page *page, 83 unsigned long addr, int len) 84 { 85 } 86 #endif 87 88 #ifndef flush_cache_vmap 89 static inline void flush_cache_vmap(unsigned long start, unsigned long end) 90 { 91 } 92 #endif 93 94 #ifndef flush_cache_vunmap 95 static inline void flush_cache_vunmap(unsigned long start, unsigned long end) 96 { 97 } 98 #endif 99 100 #ifndef copy_to_user_page 101 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ 102 do { \ 103 memcpy(dst, src, len); \ 104 flush_icache_user_page(vma, page, vaddr, len); \ 105 } while (0) 106 #endif 107 108 #ifndef copy_from_user_page 109 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ 110 memcpy(dst, src, len) 111 #endif 112 113 #endif /* _ASM_GENERIC_CACHEFLUSH_H */ 114