xref: /openbmc/linux/arch/arm64/include/asm/tlbflush.h (revision c67e8ec0)
1 /*
2  * Based on arch/arm/include/asm/tlbflush.h
3  *
4  * Copyright (C) 1999-2003 Russell King
5  * Copyright (C) 2012 ARM Ltd.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19 #ifndef __ASM_TLBFLUSH_H
20 #define __ASM_TLBFLUSH_H
21 
22 #ifndef __ASSEMBLY__
23 
24 #include <linux/mm_types.h>
25 #include <linux/sched.h>
26 #include <asm/cputype.h>
27 #include <asm/mmu.h>
28 
29 /*
30  * Raw TLBI operations.
31  *
32  * Where necessary, use the __tlbi() macro to avoid asm()
33  * boilerplate. Drivers and most kernel code should use the TLB
34  * management routines in preference to the macro below.
35  *
36  * The macro can be used as __tlbi(op) or __tlbi(op, arg), depending
37  * on whether a particular TLBI operation takes an argument or
38  * not. The macros handles invoking the asm with or without the
39  * register argument as appropriate.
40  */
41 #define __TLBI_0(op, arg) asm ("tlbi " #op "\n"				       \
42 		   ALTERNATIVE("nop\n			nop",		       \
43 			       "dsb ish\n		tlbi " #op,	       \
44 			       ARM64_WORKAROUND_REPEAT_TLBI,		       \
45 			       CONFIG_ARM64_WORKAROUND_REPEAT_TLBI)	       \
46 			    : : )
47 
48 #define __TLBI_1(op, arg) asm ("tlbi " #op ", %0\n"			       \
49 		   ALTERNATIVE("nop\n			nop",		       \
50 			       "dsb ish\n		tlbi " #op ", %0",     \
51 			       ARM64_WORKAROUND_REPEAT_TLBI,		       \
52 			       CONFIG_ARM64_WORKAROUND_REPEAT_TLBI)	       \
53 			    : : "r" (arg))
54 
55 #define __TLBI_N(op, arg, n, ...) __TLBI_##n(op, arg)
56 
57 #define __tlbi(op, ...)		__TLBI_N(op, ##__VA_ARGS__, 1, 0)
58 
59 #define __tlbi_user(op, arg) do {						\
60 	if (arm64_kernel_unmapped_at_el0())					\
61 		__tlbi(op, (arg) | USER_ASID_FLAG);				\
62 } while (0)
63 
64 /* This macro creates a properly formatted VA operand for the TLBI */
65 #define __TLBI_VADDR(addr, asid)				\
66 	({							\
67 		unsigned long __ta = (addr) >> 12;		\
68 		__ta &= GENMASK_ULL(43, 0);			\
69 		__ta |= (unsigned long)(asid) << 48;		\
70 		__ta;						\
71 	})
72 
73 /*
74  *	TLB Invalidation
75  *	================
76  *
77  * 	This header file implements the low-level TLB invalidation routines
78  *	(sometimes referred to as "flushing" in the kernel) for arm64.
79  *
80  *	Every invalidation operation uses the following template:
81  *
82  *	DSB ISHST	// Ensure prior page-table updates have completed
83  *	TLBI ...	// Invalidate the TLB
84  *	DSB ISH		// Ensure the TLB invalidation has completed
85  *      if (invalidated kernel mappings)
86  *		ISB	// Discard any instructions fetched from the old mapping
87  *
88  *
89  *	The following functions form part of the "core" TLB invalidation API,
90  *	as documented in Documentation/core-api/cachetlb.rst:
91  *
92  *	flush_tlb_all()
93  *		Invalidate the entire TLB (kernel + user) on all CPUs
94  *
95  *	flush_tlb_mm(mm)
96  *		Invalidate an entire user address space on all CPUs.
97  *		The 'mm' argument identifies the ASID to invalidate.
98  *
99  *	flush_tlb_range(vma, start, end)
100  *		Invalidate the virtual-address range '[start, end)' on all
101  *		CPUs for the user address space corresponding to 'vma->mm'.
102  *		Note that this operation also invalidates any walk-cache
103  *		entries associated with translations for the specified address
104  *		range.
105  *
106  *	flush_tlb_kernel_range(start, end)
107  *		Same as flush_tlb_range(..., start, end), but applies to
108  * 		kernel mappings rather than a particular user address space.
109  *		Whilst not explicitly documented, this function is used when
110  *		unmapping pages from vmalloc/io space.
111  *
112  *	flush_tlb_page(vma, addr)
113  *		Invalidate a single user mapping for address 'addr' in the
114  *		address space corresponding to 'vma->mm'.  Note that this
115  *		operation only invalidates a single, last-level page-table
116  *		entry and therefore does not affect any walk-caches.
117  *
118  *
119  *	Next, we have some undocumented invalidation routines that you probably
120  *	don't want to call unless you know what you're doing:
121  *
122  *	local_flush_tlb_all()
123  *		Same as flush_tlb_all(), but only applies to the calling CPU.
124  *
125  *	__flush_tlb_kernel_pgtable(addr)
126  *		Invalidate a single kernel mapping for address 'addr' on all
127  *		CPUs, ensuring that any walk-cache entries associated with the
128  *		translation are also invalidated.
129  *
130  *	__flush_tlb_range(vma, start, end, stride, last_level)
131  *		Invalidate the virtual-address range '[start, end)' on all
132  *		CPUs for the user address space corresponding to 'vma->mm'.
133  *		The invalidation operations are issued at a granularity
134  *		determined by 'stride' and only affect any walk-cache entries
135  *		if 'last_level' is equal to false.
136  *
137  *
138  *	Finally, take a look at asm/tlb.h to see how tlb_flush() is implemented
139  *	on top of these routines, since that is our interface to the mmu_gather
140  *	API as used by munmap() and friends.
141  */
142 static inline void local_flush_tlb_all(void)
143 {
144 	dsb(nshst);
145 	__tlbi(vmalle1);
146 	dsb(nsh);
147 	isb();
148 }
149 
150 static inline void flush_tlb_all(void)
151 {
152 	dsb(ishst);
153 	__tlbi(vmalle1is);
154 	dsb(ish);
155 	isb();
156 }
157 
158 static inline void flush_tlb_mm(struct mm_struct *mm)
159 {
160 	unsigned long asid = __TLBI_VADDR(0, ASID(mm));
161 
162 	dsb(ishst);
163 	__tlbi(aside1is, asid);
164 	__tlbi_user(aside1is, asid);
165 	dsb(ish);
166 }
167 
168 static inline void flush_tlb_page_nosync(struct vm_area_struct *vma,
169 					 unsigned long uaddr)
170 {
171 	unsigned long addr = __TLBI_VADDR(uaddr, ASID(vma->vm_mm));
172 
173 	dsb(ishst);
174 	__tlbi(vale1is, addr);
175 	__tlbi_user(vale1is, addr);
176 }
177 
178 static inline void flush_tlb_page(struct vm_area_struct *vma,
179 				  unsigned long uaddr)
180 {
181 	flush_tlb_page_nosync(vma, uaddr);
182 	dsb(ish);
183 }
184 
185 /*
186  * This is meant to avoid soft lock-ups on large TLB flushing ranges and not
187  * necessarily a performance improvement.
188  */
189 #define MAX_TLBI_OPS	PTRS_PER_PTE
190 
191 static inline void __flush_tlb_range(struct vm_area_struct *vma,
192 				     unsigned long start, unsigned long end,
193 				     unsigned long stride, bool last_level)
194 {
195 	unsigned long asid = ASID(vma->vm_mm);
196 	unsigned long addr;
197 
198 	if ((end - start) >= (MAX_TLBI_OPS * stride)) {
199 		flush_tlb_mm(vma->vm_mm);
200 		return;
201 	}
202 
203 	/* Convert the stride into units of 4k */
204 	stride >>= 12;
205 
206 	start = __TLBI_VADDR(start, asid);
207 	end = __TLBI_VADDR(end, asid);
208 
209 	dsb(ishst);
210 	for (addr = start; addr < end; addr += stride) {
211 		if (last_level) {
212 			__tlbi(vale1is, addr);
213 			__tlbi_user(vale1is, addr);
214 		} else {
215 			__tlbi(vae1is, addr);
216 			__tlbi_user(vae1is, addr);
217 		}
218 	}
219 	dsb(ish);
220 }
221 
222 static inline void flush_tlb_range(struct vm_area_struct *vma,
223 				   unsigned long start, unsigned long end)
224 {
225 	/*
226 	 * We cannot use leaf-only invalidation here, since we may be invalidating
227 	 * table entries as part of collapsing hugepages or moving page tables.
228 	 */
229 	__flush_tlb_range(vma, start, end, PAGE_SIZE, false);
230 }
231 
232 static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
233 {
234 	unsigned long addr;
235 
236 	if ((end - start) > (MAX_TLBI_OPS * PAGE_SIZE)) {
237 		flush_tlb_all();
238 		return;
239 	}
240 
241 	start = __TLBI_VADDR(start, 0);
242 	end = __TLBI_VADDR(end, 0);
243 
244 	dsb(ishst);
245 	for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12))
246 		__tlbi(vaale1is, addr);
247 	dsb(ish);
248 	isb();
249 }
250 
251 /*
252  * Used to invalidate the TLB (walk caches) corresponding to intermediate page
253  * table levels (pgd/pud/pmd).
254  */
255 static inline void __flush_tlb_kernel_pgtable(unsigned long kaddr)
256 {
257 	unsigned long addr = __TLBI_VADDR(kaddr, 0);
258 
259 	dsb(ishst);
260 	__tlbi(vaae1is, addr);
261 	dsb(ish);
262 }
263 #endif
264 
265 #endif
266