xref: /openbmc/linux/arch/arm64/include/asm/tlbflush.h (revision fed8b7e366e7c8f81e957ef91aa8f0a38e038c66)
1 /*
2  * Based on arch/arm/include/asm/tlbflush.h
3  *
4  * Copyright (C) 1999-2003 Russell King
5  * Copyright (C) 2012 ARM Ltd.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19 #ifndef __ASM_TLBFLUSH_H
20 #define __ASM_TLBFLUSH_H
21 
22 #ifndef __ASSEMBLY__
23 
24 #include <linux/sched.h>
25 #include <asm/cputype.h>
26 #include <asm/mmu.h>
27 
28 /*
29  * Raw TLBI operations.
30  *
31  * Where necessary, use the __tlbi() macro to avoid asm()
32  * boilerplate. Drivers and most kernel code should use the TLB
33  * management routines in preference to the macro below.
34  *
35  * The macro can be used as __tlbi(op) or __tlbi(op, arg), depending
36  * on whether a particular TLBI operation takes an argument or
37  * not. The macros handles invoking the asm with or without the
38  * register argument as appropriate.
39  */
40 #define __TLBI_0(op, arg) asm ("tlbi " #op "\n"				       \
41 		   ALTERNATIVE("nop\n			nop",		       \
42 			       "dsb ish\n		tlbi " #op,	       \
43 			       ARM64_WORKAROUND_REPEAT_TLBI,		       \
44 			       CONFIG_ARM64_WORKAROUND_REPEAT_TLBI)	       \
45 			    : : )
46 
47 #define __TLBI_1(op, arg) asm ("tlbi " #op ", %0\n"			       \
48 		   ALTERNATIVE("nop\n			nop",		       \
49 			       "dsb ish\n		tlbi " #op ", %0",     \
50 			       ARM64_WORKAROUND_REPEAT_TLBI,		       \
51 			       CONFIG_ARM64_WORKAROUND_REPEAT_TLBI)	       \
52 			    : : "r" (arg))
53 
54 #define __TLBI_N(op, arg, n, ...) __TLBI_##n(op, arg)
55 
56 #define __tlbi(op, ...)		__TLBI_N(op, ##__VA_ARGS__, 1, 0)
57 
58 #define __tlbi_user(op, arg) do {						\
59 	if (arm64_kernel_unmapped_at_el0())					\
60 		__tlbi(op, (arg) | USER_ASID_FLAG);				\
61 } while (0)
62 
63 /* This macro creates a properly formatted VA operand for the TLBI */
64 #define __TLBI_VADDR(addr, asid)				\
65 	({							\
66 		unsigned long __ta = (addr) >> 12;		\
67 		__ta &= GENMASK_ULL(43, 0);			\
68 		__ta |= (unsigned long)(asid) << 48;		\
69 		__ta;						\
70 	})
71 
72 /*
73  *	TLB Invalidation
74  *	================
75  *
76  * 	This header file implements the low-level TLB invalidation routines
77  *	(sometimes referred to as "flushing" in the kernel) for arm64.
78  *
79  *	Every invalidation operation uses the following template:
80  *
81  *	DSB ISHST	// Ensure prior page-table updates have completed
82  *	TLBI ...	// Invalidate the TLB
83  *	DSB ISH		// Ensure the TLB invalidation has completed
84  *      if (invalidated kernel mappings)
85  *		ISB	// Discard any instructions fetched from the old mapping
86  *
87  *
88  *	The following functions form part of the "core" TLB invalidation API,
89  *	as documented in Documentation/core-api/cachetlb.rst:
90  *
91  *	flush_tlb_all()
92  *		Invalidate the entire TLB (kernel + user) on all CPUs
93  *
94  *	flush_tlb_mm(mm)
95  *		Invalidate an entire user address space on all CPUs.
96  *		The 'mm' argument identifies the ASID to invalidate.
97  *
98  *	flush_tlb_range(vma, start, end)
99  *		Invalidate the virtual-address range '[start, end)' on all
100  *		CPUs for the user address space corresponding to 'vma->mm'.
101  *		Note that this operation also invalidates any walk-cache
102  *		entries associated with translations for the specified address
103  *		range.
104  *
105  *	flush_tlb_kernel_range(start, end)
106  *		Same as flush_tlb_range(..., start, end), but applies to
107  * 		kernel mappings rather than a particular user address space.
108  *		Whilst not explicitly documented, this function is used when
109  *		unmapping pages from vmalloc/io space.
110  *
111  *	flush_tlb_page(vma, addr)
112  *		Invalidate a single user mapping for address 'addr' in the
113  *		address space corresponding to 'vma->mm'.  Note that this
114  *		operation only invalidates a single, last-level page-table
115  *		entry and therefore does not affect any walk-caches.
116  *
117  *
118  *	Next, we have some undocumented invalidation routines that you probably
119  *	don't want to call unless you know what you're doing:
120  *
121  *	local_flush_tlb_all()
122  *		Same as flush_tlb_all(), but only applies to the calling CPU.
123  *
124  *	__flush_tlb_kernel_pgtable(addr)
125  *		Invalidate a single kernel mapping for address 'addr' on all
126  *		CPUs, ensuring that any walk-cache entries associated with the
127  *		translation are also invalidated.
128  *
129  *	__flush_tlb_range(vma, start, end, stride, last_level)
130  *		Invalidate the virtual-address range '[start, end)' on all
131  *		CPUs for the user address space corresponding to 'vma->mm'.
132  *		The invalidation operations are issued at a granularity
133  *		determined by 'stride' and only affect any walk-cache entries
134  *		if 'last_level' is equal to false.
135  *
136  *
137  *	Finally, take a look at asm/tlb.h to see how tlb_flush() is implemented
138  *	on top of these routines, since that is our interface to the mmu_gather
139  *	API as used by munmap() and friends.
140  */
141 static inline void local_flush_tlb_all(void)
142 {
143 	dsb(nshst);
144 	__tlbi(vmalle1);
145 	dsb(nsh);
146 	isb();
147 }
148 
149 static inline void flush_tlb_all(void)
150 {
151 	dsb(ishst);
152 	__tlbi(vmalle1is);
153 	dsb(ish);
154 	isb();
155 }
156 
157 static inline void flush_tlb_mm(struct mm_struct *mm)
158 {
159 	unsigned long asid = __TLBI_VADDR(0, ASID(mm));
160 
161 	dsb(ishst);
162 	__tlbi(aside1is, asid);
163 	__tlbi_user(aside1is, asid);
164 	dsb(ish);
165 }
166 
167 static inline void flush_tlb_page(struct vm_area_struct *vma,
168 				  unsigned long uaddr)
169 {
170 	unsigned long addr = __TLBI_VADDR(uaddr, ASID(vma->vm_mm));
171 
172 	dsb(ishst);
173 	__tlbi(vale1is, addr);
174 	__tlbi_user(vale1is, addr);
175 	dsb(ish);
176 }
177 
178 /*
179  * This is meant to avoid soft lock-ups on large TLB flushing ranges and not
180  * necessarily a performance improvement.
181  */
182 #define MAX_TLBI_OPS	1024UL
183 
184 static inline void __flush_tlb_range(struct vm_area_struct *vma,
185 				     unsigned long start, unsigned long end,
186 				     unsigned long stride, bool last_level)
187 {
188 	unsigned long asid = ASID(vma->vm_mm);
189 	unsigned long addr;
190 
191 	if ((end - start) > (MAX_TLBI_OPS * stride)) {
192 		flush_tlb_mm(vma->vm_mm);
193 		return;
194 	}
195 
196 	/* Convert the stride into units of 4k */
197 	stride >>= 12;
198 
199 	start = __TLBI_VADDR(start, asid);
200 	end = __TLBI_VADDR(end, asid);
201 
202 	dsb(ishst);
203 	for (addr = start; addr < end; addr += stride) {
204 		if (last_level) {
205 			__tlbi(vale1is, addr);
206 			__tlbi_user(vale1is, addr);
207 		} else {
208 			__tlbi(vae1is, addr);
209 			__tlbi_user(vae1is, addr);
210 		}
211 	}
212 	dsb(ish);
213 }
214 
215 static inline void flush_tlb_range(struct vm_area_struct *vma,
216 				   unsigned long start, unsigned long end)
217 {
218 	/*
219 	 * We cannot use leaf-only invalidation here, since we may be invalidating
220 	 * table entries as part of collapsing hugepages or moving page tables.
221 	 */
222 	__flush_tlb_range(vma, start, end, PAGE_SIZE, false);
223 }
224 
225 static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
226 {
227 	unsigned long addr;
228 
229 	if ((end - start) > (MAX_TLBI_OPS * PAGE_SIZE)) {
230 		flush_tlb_all();
231 		return;
232 	}
233 
234 	start = __TLBI_VADDR(start, 0);
235 	end = __TLBI_VADDR(end, 0);
236 
237 	dsb(ishst);
238 	for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12))
239 		__tlbi(vaale1is, addr);
240 	dsb(ish);
241 	isb();
242 }
243 
244 /*
245  * Used to invalidate the TLB (walk caches) corresponding to intermediate page
246  * table levels (pgd/pud/pmd).
247  */
248 static inline void __flush_tlb_kernel_pgtable(unsigned long kaddr)
249 {
250 	unsigned long addr = __TLBI_VADDR(kaddr, 0);
251 
252 	dsb(ishst);
253 	__tlbi(vaae1is, addr);
254 	dsb(ish);
255 }
256 #endif
257 
258 #endif
259