xref: /openbmc/linux/arch/arm64/include/asm/tlb.h (revision 3932b9ca)
1 /*
2  * Based on arch/arm/include/asm/tlb.h
3  *
4  * Copyright (C) 2002 Russell King
5  * Copyright (C) 2012 ARM Ltd.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19 #ifndef __ASM_TLB_H
20 #define __ASM_TLB_H
21 
22 #define  __tlb_remove_pmd_tlb_entry __tlb_remove_pmd_tlb_entry
23 
24 #include <asm-generic/tlb.h>
25 
26 /*
27  * There's three ways the TLB shootdown code is used:
28  *  1. Unmapping a range of vmas.  See zap_page_range(), unmap_region().
29  *     tlb->fullmm = 0, and tlb_start_vma/tlb_end_vma will be called.
30  *  2. Unmapping all vmas.  See exit_mmap().
31  *     tlb->fullmm = 1, and tlb_start_vma/tlb_end_vma will be called.
32  *     Page tables will be freed.
33  *  3. Unmapping argument pages.  See shift_arg_pages().
34  *     tlb->fullmm = 0, but tlb_start_vma/tlb_end_vma will not be called.
35  */
36 static inline void tlb_flush(struct mmu_gather *tlb)
37 {
38 	if (tlb->fullmm) {
39 		flush_tlb_mm(tlb->mm);
40 	} else if (tlb->end > 0) {
41 		struct vm_area_struct vma = { .vm_mm = tlb->mm, };
42 		flush_tlb_range(&vma, tlb->start, tlb->end);
43 		tlb->start = TASK_SIZE;
44 		tlb->end = 0;
45 	}
46 }
47 
48 static inline void tlb_add_flush(struct mmu_gather *tlb, unsigned long addr)
49 {
50 	if (!tlb->fullmm) {
51 		tlb->start = min(tlb->start, addr);
52 		tlb->end = max(tlb->end, addr + PAGE_SIZE);
53 	}
54 }
55 
56 /*
57  * Memorize the range for the TLB flush.
58  */
59 static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep,
60 					  unsigned long addr)
61 {
62 	tlb_add_flush(tlb, addr);
63 }
64 
65 /*
66  * In the case of tlb vma handling, we can optimise these away in the
67  * case where we're doing a full MM flush.  When we're doing a munmap,
68  * the vmas are adjusted to only cover the region to be torn down.
69  */
70 static inline void tlb_start_vma(struct mmu_gather *tlb,
71 				 struct vm_area_struct *vma)
72 {
73 	if (!tlb->fullmm) {
74 		tlb->start = TASK_SIZE;
75 		tlb->end = 0;
76 	}
77 }
78 
79 static inline void tlb_end_vma(struct mmu_gather *tlb,
80 			       struct vm_area_struct *vma)
81 {
82 	if (!tlb->fullmm)
83 		tlb_flush(tlb);
84 }
85 
86 static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
87 				  unsigned long addr)
88 {
89 	pgtable_page_dtor(pte);
90 	tlb_add_flush(tlb, addr);
91 	tlb_remove_page(tlb, pte);
92 }
93 
94 #if CONFIG_ARM64_PGTABLE_LEVELS > 2
95 static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
96 				  unsigned long addr)
97 {
98 	tlb_add_flush(tlb, addr);
99 	tlb_remove_page(tlb, virt_to_page(pmdp));
100 }
101 #endif
102 
103 #if CONFIG_ARM64_PGTABLE_LEVELS > 3
104 static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pudp,
105 				  unsigned long addr)
106 {
107 	tlb_add_flush(tlb, addr);
108 	tlb_remove_page(tlb, virt_to_page(pudp));
109 }
110 #endif
111 
112 static inline void __tlb_remove_pmd_tlb_entry(struct mmu_gather *tlb, pmd_t *pmdp,
113 						unsigned long address)
114 {
115 	tlb_add_flush(tlb, address);
116 }
117 
118 #endif
119