1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_GENERIC_CACHEFLUSH_H
3 #define _ASM_GENERIC_CACHEFLUSH_H
4 
5 #include <linux/instrumented.h>
6 
7 struct mm_struct;
8 struct vm_area_struct;
9 struct page;
10 struct address_space;
11 
12 /*
13  * The cache doesn't need to be flushed when TLB entries change when
14  * the cache is mapped to physical memory, not virtual memory
15  */
16 #ifndef flush_cache_all
flush_cache_all(void)17 static inline void flush_cache_all(void)
18 {
19 }
20 #endif
21 
22 #ifndef flush_cache_mm
flush_cache_mm(struct mm_struct * mm)23 static inline void flush_cache_mm(struct mm_struct *mm)
24 {
25 }
26 #endif
27 
28 #ifndef flush_cache_dup_mm
flush_cache_dup_mm(struct mm_struct * mm)29 static inline void flush_cache_dup_mm(struct mm_struct *mm)
30 {
31 }
32 #endif
33 
34 #ifndef flush_cache_range
flush_cache_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)35 static inline void flush_cache_range(struct vm_area_struct *vma,
36 				     unsigned long start,
37 				     unsigned long end)
38 {
39 }
40 #endif
41 
42 #ifndef flush_cache_page
flush_cache_page(struct vm_area_struct * vma,unsigned long vmaddr,unsigned long pfn)43 static inline void flush_cache_page(struct vm_area_struct *vma,
44 				    unsigned long vmaddr,
45 				    unsigned long pfn)
46 {
47 }
48 #endif
49 
50 #ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
flush_dcache_page(struct page * page)51 static inline void flush_dcache_page(struct page *page)
52 {
53 }
54 
55 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
56 #endif
57 
58 #ifndef flush_dcache_mmap_lock
flush_dcache_mmap_lock(struct address_space * mapping)59 static inline void flush_dcache_mmap_lock(struct address_space *mapping)
60 {
61 }
62 #endif
63 
64 #ifndef flush_dcache_mmap_unlock
flush_dcache_mmap_unlock(struct address_space * mapping)65 static inline void flush_dcache_mmap_unlock(struct address_space *mapping)
66 {
67 }
68 #endif
69 
70 #ifndef flush_icache_range
flush_icache_range(unsigned long start,unsigned long end)71 static inline void flush_icache_range(unsigned long start, unsigned long end)
72 {
73 }
74 #endif
75 
76 #ifndef flush_icache_user_range
77 #define flush_icache_user_range flush_icache_range
78 #endif
79 
80 #ifndef flush_icache_user_page
flush_icache_user_page(struct vm_area_struct * vma,struct page * page,unsigned long addr,int len)81 static inline void flush_icache_user_page(struct vm_area_struct *vma,
82 					   struct page *page,
83 					   unsigned long addr, int len)
84 {
85 }
86 #endif
87 
88 #ifndef flush_cache_vmap
flush_cache_vmap(unsigned long start,unsigned long end)89 static inline void flush_cache_vmap(unsigned long start, unsigned long end)
90 {
91 }
92 #endif
93 
94 #ifndef flush_cache_vmap_early
flush_cache_vmap_early(unsigned long start,unsigned long end)95 static inline void flush_cache_vmap_early(unsigned long start, unsigned long end)
96 {
97 }
98 #endif
99 
100 #ifndef flush_cache_vunmap
flush_cache_vunmap(unsigned long start,unsigned long end)101 static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
102 {
103 }
104 #endif
105 
106 #ifndef copy_to_user_page
107 #define copy_to_user_page(vma, page, vaddr, dst, src, len)	\
108 	do { \
109 		instrument_copy_to_user((void __user *)dst, src, len); \
110 		memcpy(dst, src, len); \
111 		flush_icache_user_page(vma, page, vaddr, len); \
112 	} while (0)
113 #endif
114 
115 
116 #ifndef copy_from_user_page
117 #define copy_from_user_page(vma, page, vaddr, dst, src, len)		  \
118 	do {								  \
119 		instrument_copy_from_user_before(dst, (void __user *)src, \
120 						 len);			  \
121 		memcpy(dst, src, len);					  \
122 		instrument_copy_from_user_after(dst, (void __user *)src, len, \
123 						0);			  \
124 	} while (0)
125 #endif
126 
127 #endif /* _ASM_GENERIC_CACHEFLUSH_H */
128