xref: /openbmc/linux/arch/arm64/include/asm/tlb.h (revision 3c6a73cc)
1 /*
2  * Based on arch/arm/include/asm/tlb.h
3  *
4  * Copyright (C) 2002 Russell King
5  * Copyright (C) 2012 ARM Ltd.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19 #ifndef __ASM_TLB_H
20 #define __ASM_TLB_H
21 
22 #define  __tlb_remove_pmd_tlb_entry __tlb_remove_pmd_tlb_entry
23 
24 #include <asm-generic/tlb.h>
25 
26 #include <linux/pagemap.h>
27 #include <linux/swap.h>
28 
29 #ifdef CONFIG_HAVE_RCU_TABLE_FREE
30 
31 #define tlb_remove_entry(tlb, entry)	tlb_remove_table(tlb, entry)
32 static inline void __tlb_remove_table(void *_table)
33 {
34 	free_page_and_swap_cache((struct page *)_table);
35 }
36 #else
37 #define tlb_remove_entry(tlb, entry)	tlb_remove_page(tlb, entry)
38 #endif /* CONFIG_HAVE_RCU_TABLE_FREE */
39 
40 /*
41  * There's three ways the TLB shootdown code is used:
42  *  1. Unmapping a range of vmas.  See zap_page_range(), unmap_region().
43  *     tlb->fullmm = 0, and tlb_start_vma/tlb_end_vma will be called.
44  *  2. Unmapping all vmas.  See exit_mmap().
45  *     tlb->fullmm = 1, and tlb_start_vma/tlb_end_vma will be called.
46  *     Page tables will be freed.
47  *  3. Unmapping argument pages.  See shift_arg_pages().
48  *     tlb->fullmm = 0, but tlb_start_vma/tlb_end_vma will not be called.
49  */
50 static inline void tlb_flush(struct mmu_gather *tlb)
51 {
52 	if (tlb->fullmm) {
53 		flush_tlb_mm(tlb->mm);
54 	} else if (tlb->end > 0) {
55 		struct vm_area_struct vma = { .vm_mm = tlb->mm, };
56 		flush_tlb_range(&vma, tlb->start, tlb->end);
57 		tlb->start = TASK_SIZE;
58 		tlb->end = 0;
59 	}
60 }
61 
62 static inline void tlb_add_flush(struct mmu_gather *tlb, unsigned long addr)
63 {
64 	if (!tlb->fullmm) {
65 		tlb->start = min(tlb->start, addr);
66 		tlb->end = max(tlb->end, addr + PAGE_SIZE);
67 	}
68 }
69 
70 /*
71  * Memorize the range for the TLB flush.
72  */
73 static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep,
74 					  unsigned long addr)
75 {
76 	tlb_add_flush(tlb, addr);
77 }
78 
79 /*
80  * In the case of tlb vma handling, we can optimise these away in the
81  * case where we're doing a full MM flush.  When we're doing a munmap,
82  * the vmas are adjusted to only cover the region to be torn down.
83  */
84 static inline void tlb_start_vma(struct mmu_gather *tlb,
85 				 struct vm_area_struct *vma)
86 {
87 	if (!tlb->fullmm) {
88 		tlb->start = TASK_SIZE;
89 		tlb->end = 0;
90 	}
91 }
92 
93 static inline void tlb_end_vma(struct mmu_gather *tlb,
94 			       struct vm_area_struct *vma)
95 {
96 	if (!tlb->fullmm)
97 		tlb_flush(tlb);
98 }
99 
100 static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
101 				  unsigned long addr)
102 {
103 	pgtable_page_dtor(pte);
104 	tlb_add_flush(tlb, addr);
105 	tlb_remove_entry(tlb, pte);
106 }
107 
108 #if CONFIG_ARM64_PGTABLE_LEVELS > 2
109 static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
110 				  unsigned long addr)
111 {
112 	tlb_add_flush(tlb, addr);
113 	tlb_remove_entry(tlb, virt_to_page(pmdp));
114 }
115 #endif
116 
117 #if CONFIG_ARM64_PGTABLE_LEVELS > 3
118 static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pudp,
119 				  unsigned long addr)
120 {
121 	tlb_add_flush(tlb, addr);
122 	tlb_remove_entry(tlb, virt_to_page(pudp));
123 }
124 #endif
125 
126 static inline void __tlb_remove_pmd_tlb_entry(struct mmu_gather *tlb, pmd_t *pmdp,
127 						unsigned long address)
128 {
129 	tlb_add_flush(tlb, address);
130 }
131 
132 #endif
133