xref: /openbmc/linux/arch/arm64/include/asm/tlb.h (revision b34e08d5)
1 /*
2  * Based on arch/arm/include/asm/tlb.h
3  *
4  * Copyright (C) 2002 Russell King
5  * Copyright (C) 2012 ARM Ltd.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19 #ifndef __ASM_TLB_H
20 #define __ASM_TLB_H
21 
22 
23 #include <asm-generic/tlb.h>
24 
25 /*
26  * There's three ways the TLB shootdown code is used:
27  *  1. Unmapping a range of vmas.  See zap_page_range(), unmap_region().
28  *     tlb->fullmm = 0, and tlb_start_vma/tlb_end_vma will be called.
29  *  2. Unmapping all vmas.  See exit_mmap().
30  *     tlb->fullmm = 1, and tlb_start_vma/tlb_end_vma will be called.
31  *     Page tables will be freed.
32  *  3. Unmapping argument pages.  See shift_arg_pages().
33  *     tlb->fullmm = 0, but tlb_start_vma/tlb_end_vma will not be called.
34  */
35 static inline void tlb_flush(struct mmu_gather *tlb)
36 {
37 	if (tlb->fullmm) {
38 		flush_tlb_mm(tlb->mm);
39 	} else if (tlb->end > 0) {
40 		struct vm_area_struct vma = { .vm_mm = tlb->mm, };
41 		flush_tlb_range(&vma, tlb->start, tlb->end);
42 		tlb->start = TASK_SIZE;
43 		tlb->end = 0;
44 	}
45 }
46 
47 static inline void tlb_add_flush(struct mmu_gather *tlb, unsigned long addr)
48 {
49 	if (!tlb->fullmm) {
50 		tlb->start = min(tlb->start, addr);
51 		tlb->end = max(tlb->end, addr + PAGE_SIZE);
52 	}
53 }
54 
55 /*
56  * Memorize the range for the TLB flush.
57  */
58 static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep,
59 					  unsigned long addr)
60 {
61 	tlb_add_flush(tlb, addr);
62 }
63 
64 /*
65  * In the case of tlb vma handling, we can optimise these away in the
66  * case where we're doing a full MM flush.  When we're doing a munmap,
67  * the vmas are adjusted to only cover the region to be torn down.
68  */
69 static inline void tlb_start_vma(struct mmu_gather *tlb,
70 				 struct vm_area_struct *vma)
71 {
72 	if (!tlb->fullmm) {
73 		tlb->start = TASK_SIZE;
74 		tlb->end = 0;
75 	}
76 }
77 
78 static inline void tlb_end_vma(struct mmu_gather *tlb,
79 			       struct vm_area_struct *vma)
80 {
81 	if (!tlb->fullmm)
82 		tlb_flush(tlb);
83 }
84 
85 static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
86 				  unsigned long addr)
87 {
88 	pgtable_page_dtor(pte);
89 	tlb_add_flush(tlb, addr);
90 	tlb_remove_page(tlb, pte);
91 }
92 
93 #ifndef CONFIG_ARM64_64K_PAGES
94 static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
95 				  unsigned long addr)
96 {
97 	tlb_add_flush(tlb, addr);
98 	tlb_remove_page(tlb, virt_to_page(pmdp));
99 }
100 #endif
101 
102 
103 #endif
104