xref: /openbmc/linux/arch/arm/include/asm/tlb.h (revision ca79522c)
1 /*
2  *  arch/arm/include/asm/tlb.h
3  *
4  *  Copyright (C) 2002 Russell King
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  *  Experimentation shows that on a StrongARM, it appears to be faster
11  *  to use the "invalidate whole tlb" rather than "invalidate single
12  *  tlb" for this.
13  *
14  *  This appears true for both the process fork+exit case, as well as
15  *  the munmap-large-area case.
16  */
17 #ifndef __ASMARM_TLB_H
18 #define __ASMARM_TLB_H
19 
20 #include <asm/cacheflush.h>
21 
22 #ifndef CONFIG_MMU
23 
24 #include <linux/pagemap.h>
25 
26 #define tlb_flush(tlb)	((void) tlb)
27 
28 #include <asm-generic/tlb.h>
29 
30 #else /* !CONFIG_MMU */
31 
32 #include <linux/swap.h>
33 #include <asm/pgalloc.h>
34 #include <asm/tlbflush.h>
35 
36 /*
37  * We need to delay page freeing for SMP as other CPUs can access pages
38  * which have been removed but not yet had their TLB entries invalidated.
39  * Also, as ARMv7 speculative prefetch can drag new entries into the TLB,
40  * we need to apply this same delaying tactic to ensure correct operation.
41  */
42 #if defined(CONFIG_SMP) || defined(CONFIG_CPU_32v7)
43 #define tlb_fast_mode(tlb)	0
44 #else
45 #define tlb_fast_mode(tlb)	1
46 #endif
47 
48 #define MMU_GATHER_BUNDLE	8
49 
50 /*
51  * TLB handling.  This allows us to remove pages from the page
52  * tables, and efficiently handle the TLB issues.
53  */
54 struct mmu_gather {
55 	struct mm_struct	*mm;
56 	unsigned int		fullmm;
57 	struct vm_area_struct	*vma;
58 	unsigned long		range_start;
59 	unsigned long		range_end;
60 	unsigned int		nr;
61 	unsigned int		max;
62 	struct page		**pages;
63 	struct page		*local[MMU_GATHER_BUNDLE];
64 };
65 
66 DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
67 
68 /*
69  * This is unnecessarily complex.  There's three ways the TLB shootdown
70  * code is used:
71  *  1. Unmapping a range of vmas.  See zap_page_range(), unmap_region().
72  *     tlb->fullmm = 0, and tlb_start_vma/tlb_end_vma will be called.
73  *     tlb->vma will be non-NULL.
74  *  2. Unmapping all vmas.  See exit_mmap().
75  *     tlb->fullmm = 1, and tlb_start_vma/tlb_end_vma will be called.
76  *     tlb->vma will be non-NULL.  Additionally, page tables will be freed.
77  *  3. Unmapping argument pages.  See shift_arg_pages().
78  *     tlb->fullmm = 0, but tlb_start_vma/tlb_end_vma will not be called.
79  *     tlb->vma will be NULL.
80  */
81 static inline void tlb_flush(struct mmu_gather *tlb)
82 {
83 	if (tlb->fullmm || !tlb->vma)
84 		flush_tlb_mm(tlb->mm);
85 	else if (tlb->range_end > 0) {
86 		flush_tlb_range(tlb->vma, tlb->range_start, tlb->range_end);
87 		tlb->range_start = TASK_SIZE;
88 		tlb->range_end = 0;
89 	}
90 }
91 
92 static inline void tlb_add_flush(struct mmu_gather *tlb, unsigned long addr)
93 {
94 	if (!tlb->fullmm) {
95 		if (addr < tlb->range_start)
96 			tlb->range_start = addr;
97 		if (addr + PAGE_SIZE > tlb->range_end)
98 			tlb->range_end = addr + PAGE_SIZE;
99 	}
100 }
101 
102 static inline void __tlb_alloc_page(struct mmu_gather *tlb)
103 {
104 	unsigned long addr = __get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
105 
106 	if (addr) {
107 		tlb->pages = (void *)addr;
108 		tlb->max = PAGE_SIZE / sizeof(struct page *);
109 	}
110 }
111 
112 static inline void tlb_flush_mmu(struct mmu_gather *tlb)
113 {
114 	tlb_flush(tlb);
115 	if (!tlb_fast_mode(tlb)) {
116 		free_pages_and_swap_cache(tlb->pages, tlb->nr);
117 		tlb->nr = 0;
118 		if (tlb->pages == tlb->local)
119 			__tlb_alloc_page(tlb);
120 	}
121 }
122 
123 static inline void
124 tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int fullmm)
125 {
126 	tlb->mm = mm;
127 	tlb->fullmm = fullmm;
128 	tlb->vma = NULL;
129 	tlb->max = ARRAY_SIZE(tlb->local);
130 	tlb->pages = tlb->local;
131 	tlb->nr = 0;
132 	__tlb_alloc_page(tlb);
133 }
134 
135 static inline void
136 tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
137 {
138 	tlb_flush_mmu(tlb);
139 
140 	/* keep the page table cache within bounds */
141 	check_pgt_cache();
142 
143 	if (tlb->pages != tlb->local)
144 		free_pages((unsigned long)tlb->pages, 0);
145 }
146 
147 /*
148  * Memorize the range for the TLB flush.
149  */
150 static inline void
151 tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long addr)
152 {
153 	tlb_add_flush(tlb, addr);
154 }
155 
156 /*
157  * In the case of tlb vma handling, we can optimise these away in the
158  * case where we're doing a full MM flush.  When we're doing a munmap,
159  * the vmas are adjusted to only cover the region to be torn down.
160  */
161 static inline void
162 tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
163 {
164 	if (!tlb->fullmm) {
165 		flush_cache_range(vma, vma->vm_start, vma->vm_end);
166 		tlb->vma = vma;
167 		tlb->range_start = TASK_SIZE;
168 		tlb->range_end = 0;
169 	}
170 }
171 
172 static inline void
173 tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
174 {
175 	if (!tlb->fullmm)
176 		tlb_flush(tlb);
177 }
178 
179 static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
180 {
181 	if (tlb_fast_mode(tlb)) {
182 		free_page_and_swap_cache(page);
183 		return 1; /* avoid calling tlb_flush_mmu */
184 	}
185 
186 	tlb->pages[tlb->nr++] = page;
187 	VM_BUG_ON(tlb->nr > tlb->max);
188 	return tlb->max - tlb->nr;
189 }
190 
191 static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
192 {
193 	if (!__tlb_remove_page(tlb, page))
194 		tlb_flush_mmu(tlb);
195 }
196 
197 static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
198 	unsigned long addr)
199 {
200 	pgtable_page_dtor(pte);
201 
202 #ifdef CONFIG_ARM_LPAE
203 	tlb_add_flush(tlb, addr);
204 #else
205 	/*
206 	 * With the classic ARM MMU, a pte page has two corresponding pmd
207 	 * entries, each covering 1MB.
208 	 */
209 	addr &= PMD_MASK;
210 	tlb_add_flush(tlb, addr + SZ_1M - PAGE_SIZE);
211 	tlb_add_flush(tlb, addr + SZ_1M);
212 #endif
213 
214 	tlb_remove_page(tlb, pte);
215 }
216 
217 static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
218 				  unsigned long addr)
219 {
220 #ifdef CONFIG_ARM_LPAE
221 	tlb_add_flush(tlb, addr);
222 	tlb_remove_page(tlb, virt_to_page(pmdp));
223 #endif
224 }
225 
226 #define pte_free_tlb(tlb, ptep, addr)	__pte_free_tlb(tlb, ptep, addr)
227 #define pmd_free_tlb(tlb, pmdp, addr)	__pmd_free_tlb(tlb, pmdp, addr)
228 #define pud_free_tlb(tlb, pudp, addr)	pud_free((tlb)->mm, pudp)
229 
230 #define tlb_migrate_finish(mm)		do { } while (0)
231 
232 #endif /* CONFIG_MMU */
233 #endif
234