xref: /openbmc/linux/arch/arm64/include/asm/tlbflush.h (revision bbde9fc1824aab58bc78c084163007dd6c03fe5b)
1 /*
2  * Based on arch/arm/include/asm/tlbflush.h
3  *
4  * Copyright (C) 1999-2003 Russell King
5  * Copyright (C) 2012 ARM Ltd.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19 #ifndef __ASM_TLBFLUSH_H
20 #define __ASM_TLBFLUSH_H
21 
22 #ifndef __ASSEMBLY__
23 
24 #include <linux/sched.h>
25 #include <asm/cputype.h>
26 
27 /*
28  *	TLB Management
29  *	==============
30  *
31  *	The TLB specific code is expected to perform whatever tests it needs
32  *	to determine if it should invalidate the TLB for each call.  Start
33  *	addresses are inclusive and end addresses are exclusive; it is safe to
34  *	round these addresses down.
35  *
36  *	flush_tlb_all()
37  *
38  *		Invalidate the entire TLB.
39  *
40  *	flush_tlb_mm(mm)
41  *
42  *		Invalidate all TLB entries in a particular address space.
43  *		- mm	- mm_struct describing address space
44  *
45  *	flush_tlb_range(mm,start,end)
46  *
47  *		Invalidate a range of TLB entries in the specified address
48  *		space.
49  *		- mm	- mm_struct describing address space
50  *		- start - start address (may not be aligned)
51  *		- end	- end address (exclusive, may not be aligned)
52  *
53  *	flush_tlb_page(vaddr,vma)
54  *
55  *		Invalidate the specified page in the specified address range.
56  *		- vaddr - virtual address (may not be aligned)
57  *		- vma	- vma_struct describing address range
58  *
59  *	flush_kern_tlb_page(kaddr)
60  *
61  *		Invalidate the TLB entry for the specified page.  The address
62  *		will be in the kernels virtual memory space.  Current uses
63  *		only require the D-TLB to be invalidated.
64  *		- kaddr - Kernel virtual memory address
65  */
66 static inline void flush_tlb_all(void)
67 {
68 	dsb(ishst);
69 	asm("tlbi	vmalle1is");
70 	dsb(ish);
71 	isb();
72 }
73 
74 static inline void flush_tlb_mm(struct mm_struct *mm)
75 {
76 	unsigned long asid = (unsigned long)ASID(mm) << 48;
77 
78 	dsb(ishst);
79 	asm("tlbi	aside1is, %0" : : "r" (asid));
80 	dsb(ish);
81 }
82 
83 static inline void flush_tlb_page(struct vm_area_struct *vma,
84 				  unsigned long uaddr)
85 {
86 	unsigned long addr = uaddr >> 12 |
87 		((unsigned long)ASID(vma->vm_mm) << 48);
88 
89 	dsb(ishst);
90 	asm("tlbi	vae1is, %0" : : "r" (addr));
91 	dsb(ish);
92 }
93 
94 static inline void __flush_tlb_range(struct vm_area_struct *vma,
95 				     unsigned long start, unsigned long end)
96 {
97 	unsigned long asid = (unsigned long)ASID(vma->vm_mm) << 48;
98 	unsigned long addr;
99 	start = asid | (start >> 12);
100 	end = asid | (end >> 12);
101 
102 	dsb(ishst);
103 	for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12))
104 		asm("tlbi vae1is, %0" : : "r"(addr));
105 	dsb(ish);
106 }
107 
108 static inline void __flush_tlb_kernel_range(unsigned long start, unsigned long end)
109 {
110 	unsigned long addr;
111 	start >>= 12;
112 	end >>= 12;
113 
114 	dsb(ishst);
115 	for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12))
116 		asm("tlbi vaae1is, %0" : : "r"(addr));
117 	dsb(ish);
118 	isb();
119 }
120 
121 /*
122  * This is meant to avoid soft lock-ups on large TLB flushing ranges and not
123  * necessarily a performance improvement.
124  */
125 #define MAX_TLB_RANGE	(1024UL << PAGE_SHIFT)
126 
127 static inline void flush_tlb_range(struct vm_area_struct *vma,
128 				   unsigned long start, unsigned long end)
129 {
130 	if ((end - start) <= MAX_TLB_RANGE)
131 		__flush_tlb_range(vma, start, end);
132 	else
133 		flush_tlb_mm(vma->vm_mm);
134 }
135 
136 static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
137 {
138 	if ((end - start) <= MAX_TLB_RANGE)
139 		__flush_tlb_kernel_range(start, end);
140 	else
141 		flush_tlb_all();
142 }
143 
144 /*
145  * Used to invalidate the TLB (walk caches) corresponding to intermediate page
146  * table levels (pgd/pud/pmd).
147  */
148 static inline void __flush_tlb_pgtable(struct mm_struct *mm,
149 				       unsigned long uaddr)
150 {
151 	unsigned long addr = uaddr >> 12 | ((unsigned long)ASID(mm) << 48);
152 
153 	dsb(ishst);
154 	asm("tlbi	vae1is, %0" : : "r" (addr));
155 	dsb(ish);
156 }
157 /*
158  * On AArch64, the cache coherency is handled via the set_pte_at() function.
159  */
160 static inline void update_mmu_cache(struct vm_area_struct *vma,
161 				    unsigned long addr, pte_t *ptep)
162 {
163 	/*
164 	 * set_pte() does not have a DSB for user mappings, so make sure that
165 	 * the page table write is visible.
166 	 */
167 	dsb(ishst);
168 }
169 
170 #define update_mmu_cache_pmd(vma, address, pmd) do { } while (0)
171 
172 #endif
173 
174 #endif
175