xref: /openbmc/linux/arch/ia64/include/asm/tlb.h (revision 791d3ef2)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_IA64_TLB_H
3 #define _ASM_IA64_TLB_H
4 /*
5  * Based on <asm-generic/tlb.h>.
6  *
7  * Copyright (C) 2002-2003 Hewlett-Packard Co
8  *	David Mosberger-Tang <davidm@hpl.hp.com>
9  */
10 /*
11  * Removing a translation from a page table (including TLB-shootdown) is a four-step
12  * procedure:
13  *
14  *	(1) Flush (virtual) caches --- ensures virtual memory is coherent with kernel memory
15  *	    (this is a no-op on ia64).
16  *	(2) Clear the relevant portions of the page-table
17  *	(3) Flush the TLBs --- ensures that stale content is gone from CPU TLBs
18  *	(4) Release the pages that were freed up in step (2).
19  *
20  * Note that the ordering of these steps is crucial to avoid races on MP machines.
21  *
22  * The Linux kernel defines several platform-specific hooks for TLB-shootdown.  When
23  * unmapping a portion of the virtual address space, these hooks are called according to
24  * the following template:
25  *
26  *	tlb <- tlb_gather_mmu(mm, start, end);		// start unmap for address space MM
27  *	{
28  *	  for each vma that needs a shootdown do {
29  *	    tlb_start_vma(tlb, vma);
30  *	      for each page-table-entry PTE that needs to be removed do {
31  *		tlb_remove_tlb_entry(tlb, pte, address);
32  *		if (pte refers to a normal page) {
33  *		  tlb_remove_page(tlb, page);
34  *		}
35  *	      }
36  *	    tlb_end_vma(tlb, vma);
37  *	  }
38  *	}
39  *	tlb_finish_mmu(tlb, start, end);	// finish unmap for address space MM
40  */
41 #include <linux/mm.h>
42 #include <linux/pagemap.h>
43 #include <linux/swap.h>
44 
45 #include <asm/pgalloc.h>
46 #include <asm/processor.h>
47 #include <asm/tlbflush.h>
48 #include <asm/machvec.h>
49 
50 /*
51  * If we can't allocate a page to make a big batch of page pointers
52  * to work on, then just handle a few from the on-stack structure.
53  */
54 #define	IA64_GATHER_BUNDLE	8
55 
56 struct mmu_gather {
57 	struct mm_struct	*mm;
58 	unsigned int		nr;
59 	unsigned int		max;
60 	unsigned char		fullmm;		/* non-zero means full mm flush */
61 	unsigned char		need_flush;	/* really unmapped some PTEs? */
62 	unsigned long		start, end;
63 	unsigned long		start_addr;
64 	unsigned long		end_addr;
65 	struct page		**pages;
66 	struct page		*local[IA64_GATHER_BUNDLE];
67 };
68 
69 struct ia64_tr_entry {
70 	u64 ifa;
71 	u64 itir;
72 	u64 pte;
73 	u64 rr;
74 }; /*Record for tr entry!*/
75 
76 extern int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size);
77 extern void ia64_ptr_entry(u64 target_mask, int slot);
78 
79 extern struct ia64_tr_entry *ia64_idtrs[NR_CPUS];
80 
81 /*
82  region register macros
83 */
84 #define RR_TO_VE(val)   (((val) >> 0) & 0x0000000000000001)
85 #define RR_VE(val)	(((val) & 0x0000000000000001) << 0)
86 #define RR_VE_MASK	0x0000000000000001L
87 #define RR_VE_SHIFT	0
88 #define RR_TO_PS(val)	(((val) >> 2) & 0x000000000000003f)
89 #define RR_PS(val)	(((val) & 0x000000000000003f) << 2)
90 #define RR_PS_MASK	0x00000000000000fcL
91 #define RR_PS_SHIFT	2
92 #define RR_RID_MASK	0x00000000ffffff00L
93 #define RR_TO_RID(val) 	((val >> 8) & 0xffffff)
94 
95 static inline void
96 ia64_tlb_flush_mmu_tlbonly(struct mmu_gather *tlb, unsigned long start, unsigned long end)
97 {
98 	tlb->need_flush = 0;
99 
100 	if (tlb->fullmm) {
101 		/*
102 		 * Tearing down the entire address space.  This happens both as a result
103 		 * of exit() and execve().  The latter case necessitates the call to
104 		 * flush_tlb_mm() here.
105 		 */
106 		flush_tlb_mm(tlb->mm);
107 	} else if (unlikely (end - start >= 1024*1024*1024*1024UL
108 			     || REGION_NUMBER(start) != REGION_NUMBER(end - 1)))
109 	{
110 		/*
111 		 * If we flush more than a tera-byte or across regions, we're probably
112 		 * better off just flushing the entire TLB(s).  This should be very rare
113 		 * and is not worth optimizing for.
114 		 */
115 		flush_tlb_all();
116 	} else {
117 		/*
118 		 * XXX fix me: flush_tlb_range() should take an mm pointer instead of a
119 		 * vma pointer.
120 		 */
121 		struct vm_area_struct vma;
122 
123 		vma.vm_mm = tlb->mm;
124 		/* flush the address range from the tlb: */
125 		flush_tlb_range(&vma, start, end);
126 		/* now flush the virt. page-table area mapping the address range: */
127 		flush_tlb_range(&vma, ia64_thash(start), ia64_thash(end));
128 	}
129 
130 }
131 
132 static inline void
133 ia64_tlb_flush_mmu_free(struct mmu_gather *tlb)
134 {
135 	unsigned long i;
136 	unsigned int nr;
137 
138 	/* lastly, release the freed pages */
139 	nr = tlb->nr;
140 
141 	tlb->nr = 0;
142 	tlb->start_addr = ~0UL;
143 	for (i = 0; i < nr; ++i)
144 		free_page_and_swap_cache(tlb->pages[i]);
145 }
146 
147 /*
148  * Flush the TLB for address range START to END and, if not in fast mode, release the
149  * freed pages that where gathered up to this point.
150  */
151 static inline void
152 ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end)
153 {
154 	if (!tlb->need_flush)
155 		return;
156 	ia64_tlb_flush_mmu_tlbonly(tlb, start, end);
157 	ia64_tlb_flush_mmu_free(tlb);
158 }
159 
160 static inline void __tlb_alloc_page(struct mmu_gather *tlb)
161 {
162 	unsigned long addr = __get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
163 
164 	if (addr) {
165 		tlb->pages = (void *)addr;
166 		tlb->max = PAGE_SIZE / sizeof(void *);
167 	}
168 }
169 
170 
171 static inline void
172 arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
173 			unsigned long start, unsigned long end)
174 {
175 	tlb->mm = mm;
176 	tlb->max = ARRAY_SIZE(tlb->local);
177 	tlb->pages = tlb->local;
178 	tlb->nr = 0;
179 	tlb->fullmm = !(start | (end+1));
180 	tlb->start = start;
181 	tlb->end = end;
182 	tlb->start_addr = ~0UL;
183 }
184 
185 /*
186  * Called at the end of the shootdown operation to free up any resources that were
187  * collected.
188  */
189 static inline void
190 arch_tlb_finish_mmu(struct mmu_gather *tlb,
191 			unsigned long start, unsigned long end, bool force)
192 {
193 	if (force)
194 		tlb->need_flush = 1;
195 	/*
196 	 * Note: tlb->nr may be 0 at this point, so we can't rely on tlb->start_addr and
197 	 * tlb->end_addr.
198 	 */
199 	ia64_tlb_flush_mmu(tlb, start, end);
200 
201 	/* keep the page table cache within bounds */
202 	check_pgt_cache();
203 
204 	if (tlb->pages != tlb->local)
205 		free_pages((unsigned long)tlb->pages, 0);
206 }
207 
208 /*
209  * Logically, this routine frees PAGE.  On MP machines, the actual freeing of the page
210  * must be delayed until after the TLB has been flushed (see comments at the beginning of
211  * this file).
212  */
213 static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
214 {
215 	tlb->need_flush = 1;
216 
217 	if (!tlb->nr && tlb->pages == tlb->local)
218 		__tlb_alloc_page(tlb);
219 
220 	tlb->pages[tlb->nr++] = page;
221 	VM_WARN_ON(tlb->nr > tlb->max);
222 	if (tlb->nr == tlb->max)
223 		return true;
224 	return false;
225 }
226 
227 static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
228 {
229 	ia64_tlb_flush_mmu_tlbonly(tlb, tlb->start_addr, tlb->end_addr);
230 }
231 
232 static inline void tlb_flush_mmu_free(struct mmu_gather *tlb)
233 {
234 	ia64_tlb_flush_mmu_free(tlb);
235 }
236 
237 static inline void tlb_flush_mmu(struct mmu_gather *tlb)
238 {
239 	ia64_tlb_flush_mmu(tlb, tlb->start_addr, tlb->end_addr);
240 }
241 
242 static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
243 {
244 	if (__tlb_remove_page(tlb, page))
245 		tlb_flush_mmu(tlb);
246 }
247 
248 static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
249 					  struct page *page, int page_size)
250 {
251 	return __tlb_remove_page(tlb, page);
252 }
253 
254 static inline void tlb_remove_page_size(struct mmu_gather *tlb,
255 					struct page *page, int page_size)
256 {
257 	return tlb_remove_page(tlb, page);
258 }
259 
260 /*
261  * Remove TLB entry for PTE mapped at virtual address ADDRESS.  This is called for any
262  * PTE, not just those pointing to (normal) physical memory.
263  */
264 static inline void
265 __tlb_remove_tlb_entry (struct mmu_gather *tlb, pte_t *ptep, unsigned long address)
266 {
267 	if (tlb->start_addr == ~0UL)
268 		tlb->start_addr = address;
269 	tlb->end_addr = address + PAGE_SIZE;
270 }
271 
272 #define tlb_migrate_finish(mm)	platform_tlb_migrate_finish(mm)
273 
274 #define tlb_start_vma(tlb, vma)			do { } while (0)
275 #define tlb_end_vma(tlb, vma)			do { } while (0)
276 
277 #define tlb_remove_tlb_entry(tlb, ptep, addr)		\
278 do {							\
279 	tlb->need_flush = 1;				\
280 	__tlb_remove_tlb_entry(tlb, ptep, addr);	\
281 } while (0)
282 
283 #define tlb_remove_huge_tlb_entry(h, tlb, ptep, address)	\
284 	tlb_remove_tlb_entry(tlb, ptep, address)
285 
286 #define tlb_remove_check_page_size_change tlb_remove_check_page_size_change
287 static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
288 						     unsigned int page_size)
289 {
290 }
291 
292 #define pte_free_tlb(tlb, ptep, address)		\
293 do {							\
294 	tlb->need_flush = 1;				\
295 	__pte_free_tlb(tlb, ptep, address);		\
296 } while (0)
297 
298 #define pmd_free_tlb(tlb, ptep, address)		\
299 do {							\
300 	tlb->need_flush = 1;				\
301 	__pmd_free_tlb(tlb, ptep, address);		\
302 } while (0)
303 
304 #define pud_free_tlb(tlb, pudp, address)		\
305 do {							\
306 	tlb->need_flush = 1;				\
307 	__pud_free_tlb(tlb, pudp, address);		\
308 } while (0)
309 
310 #endif /* _ASM_IA64_TLB_H */
311