xref: /openbmc/linux/arch/arm/include/asm/tlb.h (revision 06824ba824b3e9f2fedb38bee79af0643198ed7f)
1 /*
2  *  arch/arm/include/asm/tlb.h
3  *
4  *  Copyright (C) 2002 Russell King
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  *  Experimentation shows that on a StrongARM, it appears to be faster
11  *  to use the "invalidate whole tlb" rather than "invalidate single
12  *  tlb" for this.
13  *
14  *  This appears true for both the process fork+exit case, as well as
15  *  the munmap-large-area case.
16  */
17 #ifndef __ASMARM_TLB_H
18 #define __ASMARM_TLB_H
19 
20 #include <asm/cacheflush.h>
21 
22 #ifndef CONFIG_MMU
23 
24 #include <linux/pagemap.h>
25 #include <asm-generic/tlb.h>
26 
27 #else /* !CONFIG_MMU */
28 
29 #include <linux/swap.h>
30 #include <asm/pgalloc.h>
31 #include <asm/tlbflush.h>
32 
33 /*
34  * We need to delay page freeing for SMP as other CPUs can access pages
35  * which have been removed but not yet had their TLB entries invalidated.
36  * Also, as ARMv7 speculative prefetch can drag new entries into the TLB,
37  * we need to apply this same delaying tactic to ensure correct operation.
38  */
39 #if defined(CONFIG_SMP) || defined(CONFIG_CPU_32v7)
40 #define tlb_fast_mode(tlb)	0
41 #define FREE_PTE_NR		500
42 #else
43 #define tlb_fast_mode(tlb)	1
44 #define FREE_PTE_NR		0
45 #endif
46 
47 /*
48  * TLB handling.  This allows us to remove pages from the page
49  * tables, and efficiently handle the TLB issues.
50  */
51 struct mmu_gather {
52 	struct mm_struct	*mm;
53 	unsigned int		fullmm;
54 	struct vm_area_struct	*vma;
55 	unsigned long		range_start;
56 	unsigned long		range_end;
57 	unsigned int		nr;
58 	struct page		*pages[FREE_PTE_NR];
59 };
60 
61 DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
62 
63 /*
64  * This is unnecessarily complex.  There's three ways the TLB shootdown
65  * code is used:
66  *  1. Unmapping a range of vmas.  See zap_page_range(), unmap_region().
67  *     tlb->fullmm = 0, and tlb_start_vma/tlb_end_vma will be called.
68  *     tlb->vma will be non-NULL.
69  *  2. Unmapping all vmas.  See exit_mmap().
70  *     tlb->fullmm = 1, and tlb_start_vma/tlb_end_vma will be called.
71  *     tlb->vma will be non-NULL.  Additionally, page tables will be freed.
72  *  3. Unmapping argument pages.  See shift_arg_pages().
73  *     tlb->fullmm = 0, but tlb_start_vma/tlb_end_vma will not be called.
74  *     tlb->vma will be NULL.
75  */
76 static inline void tlb_flush(struct mmu_gather *tlb)
77 {
78 	if (tlb->fullmm || !tlb->vma)
79 		flush_tlb_mm(tlb->mm);
80 	else if (tlb->range_end > 0) {
81 		flush_tlb_range(tlb->vma, tlb->range_start, tlb->range_end);
82 		tlb->range_start = TASK_SIZE;
83 		tlb->range_end = 0;
84 	}
85 }
86 
87 static inline void tlb_add_flush(struct mmu_gather *tlb, unsigned long addr)
88 {
89 	if (!tlb->fullmm) {
90 		if (addr < tlb->range_start)
91 			tlb->range_start = addr;
92 		if (addr + PAGE_SIZE > tlb->range_end)
93 			tlb->range_end = addr + PAGE_SIZE;
94 	}
95 }
96 
97 static inline void tlb_flush_mmu(struct mmu_gather *tlb)
98 {
99 	tlb_flush(tlb);
100 	if (!tlb_fast_mode(tlb)) {
101 		free_pages_and_swap_cache(tlb->pages, tlb->nr);
102 		tlb->nr = 0;
103 	}
104 }
105 
106 static inline struct mmu_gather *
107 tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
108 {
109 	struct mmu_gather *tlb = &get_cpu_var(mmu_gathers);
110 
111 	tlb->mm = mm;
112 	tlb->fullmm = full_mm_flush;
113 	tlb->vma = NULL;
114 	tlb->nr = 0;
115 
116 	return tlb;
117 }
118 
119 static inline void
120 tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
121 {
122 	tlb_flush_mmu(tlb);
123 
124 	/* keep the page table cache within bounds */
125 	check_pgt_cache();
126 
127 	put_cpu_var(mmu_gathers);
128 }
129 
130 /*
131  * Memorize the range for the TLB flush.
132  */
133 static inline void
134 tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long addr)
135 {
136 	tlb_add_flush(tlb, addr);
137 }
138 
139 /*
140  * In the case of tlb vma handling, we can optimise these away in the
141  * case where we're doing a full MM flush.  When we're doing a munmap,
142  * the vmas are adjusted to only cover the region to be torn down.
143  */
144 static inline void
145 tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
146 {
147 	if (!tlb->fullmm) {
148 		flush_cache_range(vma, vma->vm_start, vma->vm_end);
149 		tlb->vma = vma;
150 		tlb->range_start = TASK_SIZE;
151 		tlb->range_end = 0;
152 	}
153 }
154 
155 static inline void
156 tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
157 {
158 	if (!tlb->fullmm)
159 		tlb_flush(tlb);
160 }
161 
162 static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
163 {
164 	if (tlb_fast_mode(tlb)) {
165 		free_page_and_swap_cache(page);
166 	} else {
167 		tlb->pages[tlb->nr++] = page;
168 		if (tlb->nr >= FREE_PTE_NR)
169 			tlb_flush_mmu(tlb);
170 	}
171 }
172 
173 static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
174 	unsigned long addr)
175 {
176 	pgtable_page_dtor(pte);
177 	tlb_add_flush(tlb, addr);
178 	tlb_remove_page(tlb, pte);
179 }
180 
181 #define pte_free_tlb(tlb, ptep, addr)	__pte_free_tlb(tlb, ptep, addr)
182 #define pmd_free_tlb(tlb, pmdp, addr)	pmd_free((tlb)->mm, pmdp)
183 
184 #define tlb_migrate_finish(mm)		do { } while (0)
185 
186 #endif /* CONFIG_MMU */
187 #endif
188