xref: /openbmc/linux/include/asm-generic/tlb.h (revision 4e1a33b1)
1 /* include/asm-generic/tlb.h
2  *
3  *	Generic TLB shootdown code
4  *
5  * Copyright 2001 Red Hat, Inc.
6  * Based on code from mm/memory.c Copyright Linus Torvalds and others.
7  *
8  * Copyright 2011 Red Hat, Inc., Peter Zijlstra
9  *
10  * This program is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU General Public License
12  * as published by the Free Software Foundation; either version
13  * 2 of the License, or (at your option) any later version.
14  */
15 #ifndef _ASM_GENERIC__TLB_H
16 #define _ASM_GENERIC__TLB_H
17 
18 #include <linux/swap.h>
19 #include <asm/pgalloc.h>
20 #include <asm/tlbflush.h>
21 
22 #ifdef CONFIG_HAVE_RCU_TABLE_FREE
23 /*
24  * Semi RCU freeing of the page directories.
25  *
26  * This is needed by some architectures to implement software pagetable walkers.
27  *
28  * gup_fast() and other software pagetable walkers do a lockless page-table
29  * walk and therefore needs some synchronization with the freeing of the page
30  * directories. The chosen means to accomplish that is by disabling IRQs over
31  * the walk.
32  *
33  * Architectures that use IPIs to flush TLBs will then automagically DTRT,
34  * since we unlink the page, flush TLBs, free the page. Since the disabling of
35  * IRQs delays the completion of the TLB flush we can never observe an already
36  * freed page.
37  *
38  * Architectures that do not have this (PPC) need to delay the freeing by some
39  * other means, this is that means.
40  *
41  * What we do is batch the freed directory pages (tables) and RCU free them.
42  * We use the sched RCU variant, as that guarantees that IRQ/preempt disabling
43  * holds off grace periods.
44  *
45  * However, in order to batch these pages we need to allocate storage, this
46  * allocation is deep inside the MM code and can thus easily fail on memory
47  * pressure. To guarantee progress we fall back to single table freeing, see
48  * the implementation of tlb_remove_table_one().
49  *
50  */
51 struct mmu_table_batch {
52 	struct rcu_head		rcu;
53 	unsigned int		nr;
54 	void			*tables[0];
55 };
56 
57 #define MAX_TABLE_BATCH		\
58 	((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))
59 
60 extern void tlb_table_flush(struct mmu_gather *tlb);
61 extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
62 
63 #endif
64 
65 /*
66  * If we can't allocate a page to make a big batch of page pointers
67  * to work on, then just handle a few from the on-stack structure.
68  */
69 #define MMU_GATHER_BUNDLE	8
70 
71 struct mmu_gather_batch {
72 	struct mmu_gather_batch	*next;
73 	unsigned int		nr;
74 	unsigned int		max;
75 	struct page		*pages[0];
76 };
77 
78 #define MAX_GATHER_BATCH	\
79 	((PAGE_SIZE - sizeof(struct mmu_gather_batch)) / sizeof(void *))
80 
81 /*
82  * Limit the maximum number of mmu_gather batches to reduce a risk of soft
83  * lockups for non-preemptible kernels on huge machines when a lot of memory
84  * is zapped during unmapping.
85  * 10K pages freed at once should be safe even without a preemption point.
86  */
87 #define MAX_GATHER_BATCH_COUNT	(10000UL/MAX_GATHER_BATCH)
88 
89 /* struct mmu_gather is an opaque type used by the mm code for passing around
90  * any data needed by arch specific code for tlb_remove_page.
91  */
92 struct mmu_gather {
93 	struct mm_struct	*mm;
94 #ifdef CONFIG_HAVE_RCU_TABLE_FREE
95 	struct mmu_table_batch	*batch;
96 #endif
97 	unsigned long		start;
98 	unsigned long		end;
99 	/* we are in the middle of an operation to clear
100 	 * a full mm and can make some optimizations */
101 	unsigned int		fullmm : 1,
102 	/* we have performed an operation which
103 	 * requires a complete flush of the tlb */
104 				need_flush_all : 1;
105 
106 	struct mmu_gather_batch *active;
107 	struct mmu_gather_batch	local;
108 	struct page		*__pages[MMU_GATHER_BUNDLE];
109 	unsigned int		batch_count;
110 	int page_size;
111 };
112 
113 #define HAVE_GENERIC_MMU_GATHER
114 
115 void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end);
116 void tlb_flush_mmu(struct mmu_gather *tlb);
117 void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start,
118 							unsigned long end);
119 extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page,
120 				   int page_size);
121 
122 static inline void __tlb_adjust_range(struct mmu_gather *tlb,
123 				      unsigned long address,
124 				      unsigned int range_size)
125 {
126 	tlb->start = min(tlb->start, address);
127 	tlb->end = max(tlb->end, address + range_size);
128 }
129 
130 static inline void __tlb_reset_range(struct mmu_gather *tlb)
131 {
132 	if (tlb->fullmm) {
133 		tlb->start = tlb->end = ~0;
134 	} else {
135 		tlb->start = TASK_SIZE;
136 		tlb->end = 0;
137 	}
138 }
139 
140 static inline void tlb_remove_page_size(struct mmu_gather *tlb,
141 					struct page *page, int page_size)
142 {
143 	if (__tlb_remove_page_size(tlb, page, page_size))
144 		tlb_flush_mmu(tlb);
145 }
146 
147 static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
148 {
149 	return __tlb_remove_page_size(tlb, page, PAGE_SIZE);
150 }
151 
152 /* tlb_remove_page
153  *	Similar to __tlb_remove_page but will call tlb_flush_mmu() itself when
154  *	required.
155  */
156 static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
157 {
158 	return tlb_remove_page_size(tlb, page, PAGE_SIZE);
159 }
160 
161 #ifndef tlb_remove_check_page_size_change
162 #define tlb_remove_check_page_size_change tlb_remove_check_page_size_change
163 static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
164 						     unsigned int page_size)
165 {
166 	/*
167 	 * We don't care about page size change, just update
168 	 * mmu_gather page size here so that debug checks
169 	 * doesn't throw false warning.
170 	 */
171 #ifdef CONFIG_DEBUG_VM
172 	tlb->page_size = page_size;
173 #endif
174 }
175 #endif
176 
177 /*
178  * In the case of tlb vma handling, we can optimise these away in the
179  * case where we're doing a full MM flush.  When we're doing a munmap,
180  * the vmas are adjusted to only cover the region to be torn down.
181  */
182 #ifndef tlb_start_vma
183 #define tlb_start_vma(tlb, vma) do { } while (0)
184 #endif
185 
186 #define __tlb_end_vma(tlb, vma)					\
187 	do {							\
188 		if (!tlb->fullmm && tlb->end) {			\
189 			tlb_flush(tlb);				\
190 			__tlb_reset_range(tlb);			\
191 		}						\
192 	} while (0)
193 
194 #ifndef tlb_end_vma
195 #define tlb_end_vma	__tlb_end_vma
196 #endif
197 
198 #ifndef __tlb_remove_tlb_entry
199 #define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
200 #endif
201 
202 /**
203  * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
204  *
205  * Record the fact that pte's were really unmapped by updating the range,
206  * so we can later optimise away the tlb invalidate.   This helps when
207  * userspace is unmapping already-unmapped pages, which happens quite a lot.
208  */
209 #define tlb_remove_tlb_entry(tlb, ptep, address)		\
210 	do {							\
211 		__tlb_adjust_range(tlb, address, PAGE_SIZE);	\
212 		__tlb_remove_tlb_entry(tlb, ptep, address);	\
213 	} while (0)
214 
215 #define tlb_remove_huge_tlb_entry(h, tlb, ptep, address)	     \
216 	do {							     \
217 		__tlb_adjust_range(tlb, address, huge_page_size(h)); \
218 		__tlb_remove_tlb_entry(tlb, ptep, address);	     \
219 	} while (0)
220 
221 /**
222  * tlb_remove_pmd_tlb_entry - remember a pmd mapping for later tlb invalidation
223  * This is a nop so far, because only x86 needs it.
224  */
225 #ifndef __tlb_remove_pmd_tlb_entry
226 #define __tlb_remove_pmd_tlb_entry(tlb, pmdp, address) do {} while (0)
227 #endif
228 
229 #define tlb_remove_pmd_tlb_entry(tlb, pmdp, address)			\
230 	do {								\
231 		__tlb_adjust_range(tlb, address, HPAGE_PMD_SIZE);	\
232 		__tlb_remove_pmd_tlb_entry(tlb, pmdp, address);		\
233 	} while (0)
234 
235 /**
236  * tlb_remove_pud_tlb_entry - remember a pud mapping for later tlb
237  * invalidation. This is a nop so far, because only x86 needs it.
238  */
239 #ifndef __tlb_remove_pud_tlb_entry
240 #define __tlb_remove_pud_tlb_entry(tlb, pudp, address) do {} while (0)
241 #endif
242 
243 #define tlb_remove_pud_tlb_entry(tlb, pudp, address)			\
244 	do {								\
245 		__tlb_adjust_range(tlb, address, HPAGE_PUD_SIZE);	\
246 		__tlb_remove_pud_tlb_entry(tlb, pudp, address);		\
247 	} while (0)
248 
249 /*
250  * For things like page tables caches (ie caching addresses "inside" the
251  * page tables, like x86 does), for legacy reasons, flushing an
252  * individual page had better flush the page table caches behind it. This
253  * is definitely how x86 works, for example. And if you have an
254  * architected non-legacy page table cache (which I'm not aware of
255  * anybody actually doing), you're going to have some architecturally
256  * explicit flushing for that, likely *separate* from a regular TLB entry
257  * flush, and thus you'd need more than just some range expansion..
258  *
259  * So if we ever find an architecture
260  * that would want something that odd, I think it is up to that
261  * architecture to do its own odd thing, not cause pain for others
262  * http://lkml.kernel.org/r/CA+55aFzBggoXtNXQeng5d_mRoDnaMBE5Y+URs+PHR67nUpMtaw@mail.gmail.com
263  *
264  * For now w.r.t page table cache, mark the range_size as PAGE_SIZE
265  */
266 
267 #define pte_free_tlb(tlb, ptep, address)			\
268 	do {							\
269 		__tlb_adjust_range(tlb, address, PAGE_SIZE);	\
270 		__pte_free_tlb(tlb, ptep, address);		\
271 	} while (0)
272 
273 #ifndef __ARCH_HAS_4LEVEL_HACK
274 #define pud_free_tlb(tlb, pudp, address)			\
275 	do {							\
276 		__tlb_adjust_range(tlb, address, PAGE_SIZE);	\
277 		__pud_free_tlb(tlb, pudp, address);		\
278 	} while (0)
279 #endif
280 
281 #define pmd_free_tlb(tlb, pmdp, address)			\
282 	do {							\
283 		__tlb_adjust_range(tlb, address, PAGE_SIZE);	\
284 		__pmd_free_tlb(tlb, pmdp, address);		\
285 	} while (0)
286 
287 #define tlb_migrate_finish(mm) do {} while (0)
288 
289 #endif /* _ASM_GENERIC__TLB_H */
290