xref: /openbmc/linux/arch/arm64/include/asm/tlbflush.h (revision 160b8e75)
1 /*
2  * Based on arch/arm/include/asm/tlbflush.h
3  *
4  * Copyright (C) 1999-2003 Russell King
5  * Copyright (C) 2012 ARM Ltd.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19 #ifndef __ASM_TLBFLUSH_H
20 #define __ASM_TLBFLUSH_H
21 
22 #ifndef __ASSEMBLY__
23 
24 #include <linux/sched.h>
25 #include <asm/cputype.h>
26 #include <asm/mmu.h>
27 
28 /*
29  * Raw TLBI operations.
30  *
31  * Where necessary, use the __tlbi() macro to avoid asm()
32  * boilerplate. Drivers and most kernel code should use the TLB
33  * management routines in preference to the macro below.
34  *
35  * The macro can be used as __tlbi(op) or __tlbi(op, arg), depending
36  * on whether a particular TLBI operation takes an argument or
37  * not. The macros handles invoking the asm with or without the
38  * register argument as appropriate.
39  */
40 #define __TLBI_0(op, arg) asm ("tlbi " #op "\n"				       \
41 		   ALTERNATIVE("nop\n			nop",		       \
42 			       "dsb ish\n		tlbi " #op,	       \
43 			       ARM64_WORKAROUND_REPEAT_TLBI,		       \
44 			       CONFIG_QCOM_FALKOR_ERRATUM_1009)		       \
45 			    : : )
46 
47 #define __TLBI_1(op, arg) asm ("tlbi " #op ", %0\n"			       \
48 		   ALTERNATIVE("nop\n			nop",		       \
49 			       "dsb ish\n		tlbi " #op ", %0",     \
50 			       ARM64_WORKAROUND_REPEAT_TLBI,		       \
51 			       CONFIG_QCOM_FALKOR_ERRATUM_1009)		       \
52 			    : : "r" (arg))
53 
54 #define __TLBI_N(op, arg, n, ...) __TLBI_##n(op, arg)
55 
56 #define __tlbi(op, ...)		__TLBI_N(op, ##__VA_ARGS__, 1, 0)
57 
58 #define __tlbi_user(op, arg) do {						\
59 	if (arm64_kernel_unmapped_at_el0())					\
60 		__tlbi(op, (arg) | USER_ASID_FLAG);				\
61 } while (0)
62 
63 /*
64  *	TLB Management
65  *	==============
66  *
67  *	The TLB specific code is expected to perform whatever tests it needs
68  *	to determine if it should invalidate the TLB for each call.  Start
69  *	addresses are inclusive and end addresses are exclusive; it is safe to
70  *	round these addresses down.
71  *
72  *	flush_tlb_all()
73  *
74  *		Invalidate the entire TLB.
75  *
76  *	flush_tlb_mm(mm)
77  *
78  *		Invalidate all TLB entries in a particular address space.
79  *		- mm	- mm_struct describing address space
80  *
81  *	flush_tlb_range(mm,start,end)
82  *
83  *		Invalidate a range of TLB entries in the specified address
84  *		space.
85  *		- mm	- mm_struct describing address space
86  *		- start - start address (may not be aligned)
87  *		- end	- end address (exclusive, may not be aligned)
88  *
89  *	flush_tlb_page(vaddr,vma)
90  *
91  *		Invalidate the specified page in the specified address range.
92  *		- vaddr - virtual address (may not be aligned)
93  *		- vma	- vma_struct describing address range
94  *
95  *	flush_kern_tlb_page(kaddr)
96  *
97  *		Invalidate the TLB entry for the specified page.  The address
98  *		will be in the kernels virtual memory space.  Current uses
99  *		only require the D-TLB to be invalidated.
100  *		- kaddr - Kernel virtual memory address
101  */
102 static inline void local_flush_tlb_all(void)
103 {
104 	dsb(nshst);
105 	__tlbi(vmalle1);
106 	dsb(nsh);
107 	isb();
108 }
109 
110 static inline void flush_tlb_all(void)
111 {
112 	dsb(ishst);
113 	__tlbi(vmalle1is);
114 	dsb(ish);
115 	isb();
116 }
117 
118 static inline void flush_tlb_mm(struct mm_struct *mm)
119 {
120 	unsigned long asid = ASID(mm) << 48;
121 
122 	dsb(ishst);
123 	__tlbi(aside1is, asid);
124 	__tlbi_user(aside1is, asid);
125 	dsb(ish);
126 }
127 
128 static inline void flush_tlb_page(struct vm_area_struct *vma,
129 				  unsigned long uaddr)
130 {
131 	unsigned long addr = uaddr >> 12 | (ASID(vma->vm_mm) << 48);
132 
133 	dsb(ishst);
134 	__tlbi(vale1is, addr);
135 	__tlbi_user(vale1is, addr);
136 	dsb(ish);
137 }
138 
139 /*
140  * This is meant to avoid soft lock-ups on large TLB flushing ranges and not
141  * necessarily a performance improvement.
142  */
143 #define MAX_TLB_RANGE	(1024UL << PAGE_SHIFT)
144 
145 static inline void __flush_tlb_range(struct vm_area_struct *vma,
146 				     unsigned long start, unsigned long end,
147 				     bool last_level)
148 {
149 	unsigned long asid = ASID(vma->vm_mm) << 48;
150 	unsigned long addr;
151 
152 	if ((end - start) > MAX_TLB_RANGE) {
153 		flush_tlb_mm(vma->vm_mm);
154 		return;
155 	}
156 
157 	start = asid | (start >> 12);
158 	end = asid | (end >> 12);
159 
160 	dsb(ishst);
161 	for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12)) {
162 		if (last_level) {
163 			__tlbi(vale1is, addr);
164 			__tlbi_user(vale1is, addr);
165 		} else {
166 			__tlbi(vae1is, addr);
167 			__tlbi_user(vae1is, addr);
168 		}
169 	}
170 	dsb(ish);
171 }
172 
173 static inline void flush_tlb_range(struct vm_area_struct *vma,
174 				   unsigned long start, unsigned long end)
175 {
176 	__flush_tlb_range(vma, start, end, false);
177 }
178 
179 static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
180 {
181 	unsigned long addr;
182 
183 	if ((end - start) > MAX_TLB_RANGE) {
184 		flush_tlb_all();
185 		return;
186 	}
187 
188 	start >>= 12;
189 	end >>= 12;
190 
191 	dsb(ishst);
192 	for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12))
193 		__tlbi(vaae1is, addr);
194 	dsb(ish);
195 	isb();
196 }
197 
198 /*
199  * Used to invalidate the TLB (walk caches) corresponding to intermediate page
200  * table levels (pgd/pud/pmd).
201  */
202 static inline void __flush_tlb_pgtable(struct mm_struct *mm,
203 				       unsigned long uaddr)
204 {
205 	unsigned long addr = uaddr >> 12 | (ASID(mm) << 48);
206 
207 	__tlbi(vae1is, addr);
208 	__tlbi_user(vae1is, addr);
209 	dsb(ish);
210 }
211 
212 #endif
213 
214 #endif
215