1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __ASM_CACHEFLUSH_H
3 #define __ASM_CACHEFLUSH_H
4 
5 /* Keep includes the same across arches.  */
6 #include <linux/mm.h>
7 
8 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
9 
10 /*
11  * The cache doesn't need to be flushed when TLB entries change when
12  * the cache is mapped to physical memory, not virtual memory
13  */
14 static inline void flush_cache_all(void)
15 {
16 }
17 
18 static inline void flush_cache_mm(struct mm_struct *mm)
19 {
20 }
21 
22 static inline void flush_cache_dup_mm(struct mm_struct *mm)
23 {
24 }
25 
26 static inline void flush_cache_range(struct vm_area_struct *vma,
27 				     unsigned long start,
28 				     unsigned long end)
29 {
30 }
31 
32 static inline void flush_cache_page(struct vm_area_struct *vma,
33 				    unsigned long vmaddr,
34 				    unsigned long pfn)
35 {
36 }
37 
38 static inline void flush_dcache_page(struct page *page)
39 {
40 }
41 
42 static inline void flush_dcache_mmap_lock(struct address_space *mapping)
43 {
44 }
45 
46 static inline void flush_dcache_mmap_unlock(struct address_space *mapping)
47 {
48 }
49 
50 static inline void flush_icache_range(unsigned long start, unsigned long end)
51 {
52 }
53 
54 static inline void flush_icache_page(struct vm_area_struct *vma,
55 				     struct page *page)
56 {
57 }
58 
59 static inline void flush_icache_user_range(struct vm_area_struct *vma,
60 					   struct page *page,
61 					   unsigned long addr, int len)
62 {
63 }
64 
65 static inline void flush_cache_vmap(unsigned long start, unsigned long end)
66 {
67 }
68 
69 static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
70 {
71 }
72 
73 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
74 	do { \
75 		memcpy(dst, src, len); \
76 		flush_icache_user_range(vma, page, vaddr, len); \
77 	} while (0)
78 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
79 	memcpy(dst, src, len)
80 
81 #endif /* __ASM_CACHEFLUSH_H */
82