xref: /openbmc/linux/arch/arm/include/asm/tlb.h (revision 39b6f3aa)
1 /*
2  *  arch/arm/include/asm/tlb.h
3  *
4  *  Copyright (C) 2002 Russell King
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  *  Experimentation shows that on a StrongARM, it appears to be faster
11  *  to use the "invalidate whole tlb" rather than "invalidate single
12  *  tlb" for this.
13  *
14  *  This appears true for both the process fork+exit case, as well as
15  *  the munmap-large-area case.
16  */
17 #ifndef __ASMARM_TLB_H
18 #define __ASMARM_TLB_H
19 
20 #include <asm/cacheflush.h>
21 
22 #ifndef CONFIG_MMU
23 
24 #include <linux/pagemap.h>
25 
26 #define tlb_flush(tlb)	((void) tlb)
27 
28 #include <asm-generic/tlb.h>
29 
30 #else /* !CONFIG_MMU */
31 
32 #include <linux/swap.h>
33 #include <asm/pgalloc.h>
34 #include <asm/tlbflush.h>
35 
36 #define MMU_GATHER_BUNDLE	8
37 
38 /*
39  * TLB handling.  This allows us to remove pages from the page
40  * tables, and efficiently handle the TLB issues.
41  */
42 struct mmu_gather {
43 	struct mm_struct	*mm;
44 	unsigned int		fullmm;
45 	struct vm_area_struct	*vma;
46 	unsigned long		range_start;
47 	unsigned long		range_end;
48 	unsigned int		nr;
49 	unsigned int		max;
50 	struct page		**pages;
51 	struct page		*local[MMU_GATHER_BUNDLE];
52 };
53 
54 DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
55 
56 /*
57  * This is unnecessarily complex.  There's three ways the TLB shootdown
58  * code is used:
59  *  1. Unmapping a range of vmas.  See zap_page_range(), unmap_region().
60  *     tlb->fullmm = 0, and tlb_start_vma/tlb_end_vma will be called.
61  *     tlb->vma will be non-NULL.
62  *  2. Unmapping all vmas.  See exit_mmap().
63  *     tlb->fullmm = 1, and tlb_start_vma/tlb_end_vma will be called.
64  *     tlb->vma will be non-NULL.  Additionally, page tables will be freed.
65  *  3. Unmapping argument pages.  See shift_arg_pages().
66  *     tlb->fullmm = 0, but tlb_start_vma/tlb_end_vma will not be called.
67  *     tlb->vma will be NULL.
68  */
69 static inline void tlb_flush(struct mmu_gather *tlb)
70 {
71 	if (tlb->fullmm || !tlb->vma)
72 		flush_tlb_mm(tlb->mm);
73 	else if (tlb->range_end > 0) {
74 		flush_tlb_range(tlb->vma, tlb->range_start, tlb->range_end);
75 		tlb->range_start = TASK_SIZE;
76 		tlb->range_end = 0;
77 	}
78 }
79 
80 static inline void tlb_add_flush(struct mmu_gather *tlb, unsigned long addr)
81 {
82 	if (!tlb->fullmm) {
83 		if (addr < tlb->range_start)
84 			tlb->range_start = addr;
85 		if (addr + PAGE_SIZE > tlb->range_end)
86 			tlb->range_end = addr + PAGE_SIZE;
87 	}
88 }
89 
90 static inline void __tlb_alloc_page(struct mmu_gather *tlb)
91 {
92 	unsigned long addr = __get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
93 
94 	if (addr) {
95 		tlb->pages = (void *)addr;
96 		tlb->max = PAGE_SIZE / sizeof(struct page *);
97 	}
98 }
99 
100 static inline void tlb_flush_mmu(struct mmu_gather *tlb)
101 {
102 	tlb_flush(tlb);
103 	free_pages_and_swap_cache(tlb->pages, tlb->nr);
104 	tlb->nr = 0;
105 	if (tlb->pages == tlb->local)
106 		__tlb_alloc_page(tlb);
107 }
108 
109 static inline void
110 tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int fullmm)
111 {
112 	tlb->mm = mm;
113 	tlb->fullmm = fullmm;
114 	tlb->vma = NULL;
115 	tlb->max = ARRAY_SIZE(tlb->local);
116 	tlb->pages = tlb->local;
117 	tlb->nr = 0;
118 	__tlb_alloc_page(tlb);
119 }
120 
121 static inline void
122 tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
123 {
124 	tlb_flush_mmu(tlb);
125 
126 	/* keep the page table cache within bounds */
127 	check_pgt_cache();
128 
129 	if (tlb->pages != tlb->local)
130 		free_pages((unsigned long)tlb->pages, 0);
131 }
132 
133 /*
134  * Memorize the range for the TLB flush.
135  */
136 static inline void
137 tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long addr)
138 {
139 	tlb_add_flush(tlb, addr);
140 }
141 
142 /*
143  * In the case of tlb vma handling, we can optimise these away in the
144  * case where we're doing a full MM flush.  When we're doing a munmap,
145  * the vmas are adjusted to only cover the region to be torn down.
146  */
147 static inline void
148 tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
149 {
150 	if (!tlb->fullmm) {
151 		flush_cache_range(vma, vma->vm_start, vma->vm_end);
152 		tlb->vma = vma;
153 		tlb->range_start = TASK_SIZE;
154 		tlb->range_end = 0;
155 	}
156 }
157 
158 static inline void
159 tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
160 {
161 	if (!tlb->fullmm)
162 		tlb_flush(tlb);
163 }
164 
165 static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
166 {
167 	tlb->pages[tlb->nr++] = page;
168 	VM_BUG_ON(tlb->nr > tlb->max);
169 	return tlb->max - tlb->nr;
170 }
171 
172 static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
173 {
174 	if (!__tlb_remove_page(tlb, page))
175 		tlb_flush_mmu(tlb);
176 }
177 
178 static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
179 	unsigned long addr)
180 {
181 	pgtable_page_dtor(pte);
182 
183 #ifdef CONFIG_ARM_LPAE
184 	tlb_add_flush(tlb, addr);
185 #else
186 	/*
187 	 * With the classic ARM MMU, a pte page has two corresponding pmd
188 	 * entries, each covering 1MB.
189 	 */
190 	addr &= PMD_MASK;
191 	tlb_add_flush(tlb, addr + SZ_1M - PAGE_SIZE);
192 	tlb_add_flush(tlb, addr + SZ_1M);
193 #endif
194 
195 	tlb_remove_page(tlb, pte);
196 }
197 
198 static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
199 				  unsigned long addr)
200 {
201 #ifdef CONFIG_ARM_LPAE
202 	tlb_add_flush(tlb, addr);
203 	tlb_remove_page(tlb, virt_to_page(pmdp));
204 #endif
205 }
206 
207 #define pte_free_tlb(tlb, ptep, addr)	__pte_free_tlb(tlb, ptep, addr)
208 #define pmd_free_tlb(tlb, pmdp, addr)	__pmd_free_tlb(tlb, pmdp, addr)
209 #define pud_free_tlb(tlb, pudp, addr)	pud_free((tlb)->mm, pudp)
210 
211 #define tlb_migrate_finish(mm)		do { } while (0)
212 
213 #endif /* CONFIG_MMU */
214 #endif
215