1 /*
2  * Copyright (C) 2016 - ARM Ltd
3  *
4  * stage2 page table helpers
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
17  */
18 
19 #ifndef __ARM64_S2_PGTABLE_H_
20 #define __ARM64_S2_PGTABLE_H_
21 
22 #include <linux/hugetlb.h>
23 #include <asm/pgtable.h>
24 
25 /*
26  * PGDIR_SHIFT determines the size a top-level page table entry can map
27  * and depends on the number of levels in the page table. Compute the
28  * PGDIR_SHIFT for a given number of levels.
29  */
30 #define pt_levels_pgdir_shift(lvls)	ARM64_HW_PGTABLE_LEVEL_SHIFT(4 - (lvls))
31 
32 /*
33  * The hardware supports concatenation of up to 16 tables at stage2 entry
34  * level and we use the feature whenever possible, which means we resolve 4
35  * additional bits of address at the entry level.
36  *
37  * This implies, the total number of page table levels required for
38  * IPA_SHIFT at stage2 expected by the hardware can be calculated using
39  * the same logic used for the (non-collapsable) stage1 page tables but for
40  * (IPA_SHIFT - 4).
41  */
42 #define stage2_pgtable_levels(ipa)	ARM64_HW_PGTABLE_LEVELS((ipa) - 4)
43 #define kvm_stage2_levels(kvm)		VTCR_EL2_LVLS(kvm->arch.vtcr)
44 
45 /* stage2_pgdir_shift() is the size mapped by top-level stage2 entry for the VM */
46 #define stage2_pgdir_shift(kvm)		pt_levels_pgdir_shift(kvm_stage2_levels(kvm))
47 #define stage2_pgdir_size(kvm)		(1ULL << stage2_pgdir_shift(kvm))
48 #define stage2_pgdir_mask(kvm)		~(stage2_pgdir_size(kvm) - 1)
49 
50 /*
51  * The number of PTRS across all concatenated stage2 tables given by the
52  * number of bits resolved at the initial level.
53  * If we force more levels than necessary, we may have (stage2_pgdir_shift > IPA),
54  * in which case, stage2_pgd_ptrs will have one entry.
55  */
56 #define pgd_ptrs_shift(ipa, pgdir_shift)	\
57 	((ipa) > (pgdir_shift) ? ((ipa) - (pgdir_shift)) : 0)
58 #define __s2_pgd_ptrs(ipa, lvls)		\
59 	(1 << (pgd_ptrs_shift((ipa), pt_levels_pgdir_shift(lvls))))
60 #define __s2_pgd_size(ipa, lvls)	(__s2_pgd_ptrs((ipa), (lvls)) * sizeof(pgd_t))
61 
62 #define stage2_pgd_ptrs(kvm)		__s2_pgd_ptrs(kvm_phys_shift(kvm), kvm_stage2_levels(kvm))
63 #define stage2_pgd_size(kvm)		__s2_pgd_size(kvm_phys_shift(kvm), kvm_stage2_levels(kvm))
64 
65 /*
66  * kvm_mmmu_cache_min_pages() is the number of pages required to install
67  * a stage-2 translation. We pre-allocate the entry level page table at
68  * the VM creation.
69  */
70 #define kvm_mmu_cache_min_pages(kvm)	(kvm_stage2_levels(kvm) - 1)
71 
72 /* Stage2 PUD definitions when the level is present */
73 static inline bool kvm_stage2_has_pud(struct kvm *kvm)
74 {
75 	return (CONFIG_PGTABLE_LEVELS > 3) && (kvm_stage2_levels(kvm) > 3);
76 }
77 
78 #define S2_PUD_SHIFT			ARM64_HW_PGTABLE_LEVEL_SHIFT(1)
79 #define S2_PUD_SIZE			(1UL << S2_PUD_SHIFT)
80 #define S2_PUD_MASK			(~(S2_PUD_SIZE - 1))
81 
82 static inline bool stage2_pgd_none(struct kvm *kvm, pgd_t pgd)
83 {
84 	if (kvm_stage2_has_pud(kvm))
85 		return pgd_none(pgd);
86 	else
87 		return 0;
88 }
89 
90 static inline void stage2_pgd_clear(struct kvm *kvm, pgd_t *pgdp)
91 {
92 	if (kvm_stage2_has_pud(kvm))
93 		pgd_clear(pgdp);
94 }
95 
96 static inline bool stage2_pgd_present(struct kvm *kvm, pgd_t pgd)
97 {
98 	if (kvm_stage2_has_pud(kvm))
99 		return pgd_present(pgd);
100 	else
101 		return 1;
102 }
103 
104 static inline void stage2_pgd_populate(struct kvm *kvm, pgd_t *pgd, pud_t *pud)
105 {
106 	if (kvm_stage2_has_pud(kvm))
107 		pgd_populate(NULL, pgd, pud);
108 }
109 
110 static inline pud_t *stage2_pud_offset(struct kvm *kvm,
111 				       pgd_t *pgd, unsigned long address)
112 {
113 	if (kvm_stage2_has_pud(kvm))
114 		return pud_offset(pgd, address);
115 	else
116 		return (pud_t *)pgd;
117 }
118 
119 static inline void stage2_pud_free(struct kvm *kvm, pud_t *pud)
120 {
121 	if (kvm_stage2_has_pud(kvm))
122 		pud_free(NULL, pud);
123 }
124 
125 static inline bool stage2_pud_table_empty(struct kvm *kvm, pud_t *pudp)
126 {
127 	if (kvm_stage2_has_pud(kvm))
128 		return kvm_page_empty(pudp);
129 	else
130 		return false;
131 }
132 
133 static inline phys_addr_t
134 stage2_pud_addr_end(struct kvm *kvm, phys_addr_t addr, phys_addr_t end)
135 {
136 	if (kvm_stage2_has_pud(kvm)) {
137 		phys_addr_t boundary = (addr + S2_PUD_SIZE) & S2_PUD_MASK;
138 
139 		return (boundary - 1 < end - 1) ? boundary : end;
140 	} else {
141 		return end;
142 	}
143 }
144 
145 /* Stage2 PMD definitions when the level is present */
146 static inline bool kvm_stage2_has_pmd(struct kvm *kvm)
147 {
148 	return (CONFIG_PGTABLE_LEVELS > 2) && (kvm_stage2_levels(kvm) > 2);
149 }
150 
151 #define S2_PMD_SHIFT			ARM64_HW_PGTABLE_LEVEL_SHIFT(2)
152 #define S2_PMD_SIZE			(1UL << S2_PMD_SHIFT)
153 #define S2_PMD_MASK			(~(S2_PMD_SIZE - 1))
154 
155 static inline bool stage2_pud_none(struct kvm *kvm, pud_t pud)
156 {
157 	if (kvm_stage2_has_pmd(kvm))
158 		return pud_none(pud);
159 	else
160 		return 0;
161 }
162 
163 static inline void stage2_pud_clear(struct kvm *kvm, pud_t *pud)
164 {
165 	if (kvm_stage2_has_pmd(kvm))
166 		pud_clear(pud);
167 }
168 
169 static inline bool stage2_pud_present(struct kvm *kvm, pud_t pud)
170 {
171 	if (kvm_stage2_has_pmd(kvm))
172 		return pud_present(pud);
173 	else
174 		return 1;
175 }
176 
177 static inline void stage2_pud_populate(struct kvm *kvm, pud_t *pud, pmd_t *pmd)
178 {
179 	if (kvm_stage2_has_pmd(kvm))
180 		pud_populate(NULL, pud, pmd);
181 }
182 
183 static inline pmd_t *stage2_pmd_offset(struct kvm *kvm,
184 				       pud_t *pud, unsigned long address)
185 {
186 	if (kvm_stage2_has_pmd(kvm))
187 		return pmd_offset(pud, address);
188 	else
189 		return (pmd_t *)pud;
190 }
191 
192 static inline void stage2_pmd_free(struct kvm *kvm, pmd_t *pmd)
193 {
194 	if (kvm_stage2_has_pmd(kvm))
195 		pmd_free(NULL, pmd);
196 }
197 
198 static inline bool stage2_pud_huge(struct kvm *kvm, pud_t pud)
199 {
200 	if (kvm_stage2_has_pmd(kvm))
201 		return pud_huge(pud);
202 	else
203 		return 0;
204 }
205 
206 static inline bool stage2_pmd_table_empty(struct kvm *kvm, pmd_t *pmdp)
207 {
208 	if (kvm_stage2_has_pmd(kvm))
209 		return kvm_page_empty(pmdp);
210 	else
211 		return 0;
212 }
213 
214 static inline phys_addr_t
215 stage2_pmd_addr_end(struct kvm *kvm, phys_addr_t addr, phys_addr_t end)
216 {
217 	if (kvm_stage2_has_pmd(kvm)) {
218 		phys_addr_t boundary = (addr + S2_PMD_SIZE) & S2_PMD_MASK;
219 
220 		return (boundary - 1 < end - 1) ? boundary : end;
221 	} else {
222 		return end;
223 	}
224 }
225 
226 static inline bool stage2_pte_table_empty(struct kvm *kvm, pte_t *ptep)
227 {
228 	return kvm_page_empty(ptep);
229 }
230 
231 static inline unsigned long stage2_pgd_index(struct kvm *kvm, phys_addr_t addr)
232 {
233 	return (((addr) >> stage2_pgdir_shift(kvm)) & (stage2_pgd_ptrs(kvm) - 1));
234 }
235 
236 static inline phys_addr_t
237 stage2_pgd_addr_end(struct kvm *kvm, phys_addr_t addr, phys_addr_t end)
238 {
239 	phys_addr_t boundary = (addr + stage2_pgdir_size(kvm)) & stage2_pgdir_mask(kvm);
240 
241 	return (boundary - 1 < end - 1) ? boundary : end;
242 }
243 
244 #endif	/* __ARM64_S2_PGTABLE_H_ */
245