1 /*
2  * Based on arch/arm/include/asm/cacheflush.h
3  *
4  * Copyright (C) 1999-2002 Russell King.
5  * Copyright (C) 2012 ARM Ltd.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19 #ifndef __ASM_CACHEFLUSH_H
20 #define __ASM_CACHEFLUSH_H
21 
22 #include <linux/mm.h>
23 
24 /*
25  * This flag is used to indicate that the page pointed to by a pte is clean
26  * and does not require cleaning before returning it to the user.
27  */
28 #define PG_dcache_clean PG_arch_1
29 
30 /*
31  *	MM Cache Management
32  *	===================
33  *
34  *	The arch/arm64/mm/cache.S implements these methods.
35  *
36  *	Start addresses are inclusive and end addresses are exclusive; start
37  *	addresses should be rounded down, end addresses up.
38  *
39  *	See Documentation/core-api/cachetlb.rst for more information. Please note that
40  *	the implementation assumes non-aliasing VIPT D-cache and (aliasing)
41  *	VIPT I-cache.
42  *
43  *	flush_cache_mm(mm)
44  *
45  *		Clean and invalidate all user space cache entries
46  *		before a change of page tables.
47  *
48  *	flush_icache_range(start, end)
49  *
50  *		Ensure coherency between the I-cache and the D-cache in the
51  *		region described by start, end.
52  *		- start  - virtual start address
53  *		- end    - virtual end address
54  *
55  *	invalidate_icache_range(start, end)
56  *
57  *		Invalidate the I-cache in the region described by start, end.
58  *		- start  - virtual start address
59  *		- end    - virtual end address
60  *
61  *	__flush_cache_user_range(start, end)
62  *
63  *		Ensure coherency between the I-cache and the D-cache in the
64  *		region described by start, end.
65  *		- start  - virtual start address
66  *		- end    - virtual end address
67  *
68  *	__flush_dcache_area(kaddr, size)
69  *
70  *		Ensure that the data held in page is written back.
71  *		- kaddr  - page address
72  *		- size   - region size
73  */
74 extern void flush_icache_range(unsigned long start, unsigned long end);
75 extern int  invalidate_icache_range(unsigned long start, unsigned long end);
76 extern void __flush_dcache_area(void *addr, size_t len);
77 extern void __inval_dcache_area(void *addr, size_t len);
78 extern void __clean_dcache_area_poc(void *addr, size_t len);
79 extern void __clean_dcache_area_pop(void *addr, size_t len);
80 extern void __clean_dcache_area_pou(void *addr, size_t len);
81 extern long __flush_cache_user_range(unsigned long start, unsigned long end);
82 extern void sync_icache_aliases(void *kaddr, unsigned long len);
83 
84 static inline void flush_cache_mm(struct mm_struct *mm)
85 {
86 }
87 
88 static inline void flush_cache_page(struct vm_area_struct *vma,
89 				    unsigned long user_addr, unsigned long pfn)
90 {
91 }
92 
93 static inline void flush_cache_range(struct vm_area_struct *vma,
94 				     unsigned long start, unsigned long end)
95 {
96 }
97 
98 /*
99  * Cache maintenance functions used by the DMA API. No to be used directly.
100  */
101 extern void __dma_map_area(const void *, size_t, int);
102 extern void __dma_unmap_area(const void *, size_t, int);
103 extern void __dma_flush_area(const void *, size_t);
104 
105 /*
106  * Copy user data from/to a page which is mapped into a different
107  * processes address space.  Really, we want to allow our "user
108  * space" model to handle this.
109  */
110 extern void copy_to_user_page(struct vm_area_struct *, struct page *,
111 	unsigned long, void *, const void *, unsigned long);
112 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
113 	do {							\
114 		memcpy(dst, src, len);				\
115 	} while (0)
116 
117 #define flush_cache_dup_mm(mm) flush_cache_mm(mm)
118 
119 /*
120  * flush_dcache_page is used when the kernel has written to the page
121  * cache page at virtual address page->virtual.
122  *
123  * If this page isn't mapped (ie, page_mapping == NULL), or it might
124  * have userspace mappings, then we _must_ always clean + invalidate
125  * the dcache entries associated with the kernel mapping.
126  *
127  * Otherwise we can defer the operation, and clean the cache when we are
128  * about to change to user space.  This is the same method as used on SPARC64.
129  * See update_mmu_cache for the user space part.
130  */
131 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
132 extern void flush_dcache_page(struct page *);
133 
134 static inline void __flush_icache_all(void)
135 {
136 	if (cpus_have_const_cap(ARM64_HAS_CACHE_DIC))
137 		return;
138 
139 	asm("ic	ialluis");
140 	dsb(ish);
141 }
142 
143 #define flush_dcache_mmap_lock(mapping)		do { } while (0)
144 #define flush_dcache_mmap_unlock(mapping)	do { } while (0)
145 
146 /*
147  * We don't appear to need to do anything here.  In fact, if we did, we'd
148  * duplicate cache flushing elsewhere performed by flush_dcache_page().
149  */
150 #define flush_icache_page(vma,page)	do { } while (0)
151 
152 /*
153  * Not required on AArch64 (PIPT or VIPT non-aliasing D-cache).
154  */
155 static inline void flush_cache_vmap(unsigned long start, unsigned long end)
156 {
157 }
158 
159 static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
160 {
161 }
162 
163 int set_memory_valid(unsigned long addr, int numpages, int enable);
164 
165 #endif
166