1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2016 - ARM Ltd
4  *
5  * stage2 page table helpers
6  */
7 
8 #ifndef __ARM64_S2_PGTABLE_H_
9 #define __ARM64_S2_PGTABLE_H_
10 
11 #include <linux/hugetlb.h>
12 #include <asm/pgtable.h>
13 
14 /*
15  * PGDIR_SHIFT determines the size a top-level page table entry can map
16  * and depends on the number of levels in the page table. Compute the
17  * PGDIR_SHIFT for a given number of levels.
18  */
19 #define pt_levels_pgdir_shift(lvls)	ARM64_HW_PGTABLE_LEVEL_SHIFT(4 - (lvls))
20 
21 /*
22  * The hardware supports concatenation of up to 16 tables at stage2 entry
23  * level and we use the feature whenever possible, which means we resolve 4
24  * additional bits of address at the entry level.
25  *
26  * This implies, the total number of page table levels required for
27  * IPA_SHIFT at stage2 expected by the hardware can be calculated using
28  * the same logic used for the (non-collapsable) stage1 page tables but for
29  * (IPA_SHIFT - 4).
30  */
31 #define stage2_pgtable_levels(ipa)	ARM64_HW_PGTABLE_LEVELS((ipa) - 4)
32 #define kvm_stage2_levels(kvm)		VTCR_EL2_LVLS(kvm->arch.vtcr)
33 
34 /* stage2_pgdir_shift() is the size mapped by top-level stage2 entry for the VM */
35 #define stage2_pgdir_shift(kvm)		pt_levels_pgdir_shift(kvm_stage2_levels(kvm))
36 #define stage2_pgdir_size(kvm)		(1ULL << stage2_pgdir_shift(kvm))
37 #define stage2_pgdir_mask(kvm)		~(stage2_pgdir_size(kvm) - 1)
38 
39 /*
40  * The number of PTRS across all concatenated stage2 tables given by the
41  * number of bits resolved at the initial level.
42  * If we force more levels than necessary, we may have (stage2_pgdir_shift > IPA),
43  * in which case, stage2_pgd_ptrs will have one entry.
44  */
45 #define pgd_ptrs_shift(ipa, pgdir_shift)	\
46 	((ipa) > (pgdir_shift) ? ((ipa) - (pgdir_shift)) : 0)
47 #define __s2_pgd_ptrs(ipa, lvls)		\
48 	(1 << (pgd_ptrs_shift((ipa), pt_levels_pgdir_shift(lvls))))
49 #define __s2_pgd_size(ipa, lvls)	(__s2_pgd_ptrs((ipa), (lvls)) * sizeof(pgd_t))
50 
51 #define stage2_pgd_ptrs(kvm)		__s2_pgd_ptrs(kvm_phys_shift(kvm), kvm_stage2_levels(kvm))
52 #define stage2_pgd_size(kvm)		__s2_pgd_size(kvm_phys_shift(kvm), kvm_stage2_levels(kvm))
53 
54 /*
55  * kvm_mmmu_cache_min_pages() is the number of pages required to install
56  * a stage-2 translation. We pre-allocate the entry level page table at
57  * the VM creation.
58  */
59 #define kvm_mmu_cache_min_pages(kvm)	(kvm_stage2_levels(kvm) - 1)
60 
61 /* Stage2 PUD definitions when the level is present */
62 static inline bool kvm_stage2_has_pud(struct kvm *kvm)
63 {
64 	return (CONFIG_PGTABLE_LEVELS > 3) && (kvm_stage2_levels(kvm) > 3);
65 }
66 
67 #define S2_PUD_SHIFT			ARM64_HW_PGTABLE_LEVEL_SHIFT(1)
68 #define S2_PUD_SIZE			(1UL << S2_PUD_SHIFT)
69 #define S2_PUD_MASK			(~(S2_PUD_SIZE - 1))
70 
71 #define stage2_pgd_none(kvm, pgd)		pgd_none(pgd)
72 #define stage2_pgd_clear(kvm, pgd)		pgd_clear(pgd)
73 #define stage2_pgd_present(kvm, pgd)		pgd_present(pgd)
74 #define stage2_pgd_populate(kvm, pgd, p4d)	pgd_populate(NULL, pgd, p4d)
75 
76 static inline p4d_t *stage2_p4d_offset(struct kvm *kvm,
77 				       pgd_t *pgd, unsigned long address)
78 {
79 	return p4d_offset(pgd, address);
80 }
81 
82 static inline void stage2_p4d_free(struct kvm *kvm, p4d_t *p4d)
83 {
84 }
85 
86 static inline bool stage2_p4d_table_empty(struct kvm *kvm, p4d_t *p4dp)
87 {
88 	return false;
89 }
90 
91 static inline phys_addr_t stage2_p4d_addr_end(struct kvm *kvm,
92 					      phys_addr_t addr, phys_addr_t end)
93 {
94 	return end;
95 }
96 
97 static inline bool stage2_p4d_none(struct kvm *kvm, p4d_t p4d)
98 {
99 	if (kvm_stage2_has_pud(kvm))
100 		return p4d_none(p4d);
101 	else
102 		return 0;
103 }
104 
105 static inline void stage2_p4d_clear(struct kvm *kvm, p4d_t *p4dp)
106 {
107 	if (kvm_stage2_has_pud(kvm))
108 		p4d_clear(p4dp);
109 }
110 
111 static inline bool stage2_p4d_present(struct kvm *kvm, p4d_t p4d)
112 {
113 	if (kvm_stage2_has_pud(kvm))
114 		return p4d_present(p4d);
115 	else
116 		return 1;
117 }
118 
119 static inline void stage2_p4d_populate(struct kvm *kvm, p4d_t *p4d, pud_t *pud)
120 {
121 	if (kvm_stage2_has_pud(kvm))
122 		p4d_populate(NULL, p4d, pud);
123 }
124 
125 static inline pud_t *stage2_pud_offset(struct kvm *kvm,
126 				       p4d_t *p4d, unsigned long address)
127 {
128 	if (kvm_stage2_has_pud(kvm))
129 		return pud_offset(p4d, address);
130 	else
131 		return (pud_t *)p4d;
132 }
133 
134 static inline void stage2_pud_free(struct kvm *kvm, pud_t *pud)
135 {
136 	if (kvm_stage2_has_pud(kvm))
137 		free_page((unsigned long)pud);
138 }
139 
140 static inline bool stage2_pud_table_empty(struct kvm *kvm, pud_t *pudp)
141 {
142 	if (kvm_stage2_has_pud(kvm))
143 		return kvm_page_empty(pudp);
144 	else
145 		return false;
146 }
147 
148 static inline phys_addr_t
149 stage2_pud_addr_end(struct kvm *kvm, phys_addr_t addr, phys_addr_t end)
150 {
151 	if (kvm_stage2_has_pud(kvm)) {
152 		phys_addr_t boundary = (addr + S2_PUD_SIZE) & S2_PUD_MASK;
153 
154 		return (boundary - 1 < end - 1) ? boundary : end;
155 	} else {
156 		return end;
157 	}
158 }
159 
160 /* Stage2 PMD definitions when the level is present */
161 static inline bool kvm_stage2_has_pmd(struct kvm *kvm)
162 {
163 	return (CONFIG_PGTABLE_LEVELS > 2) && (kvm_stage2_levels(kvm) > 2);
164 }
165 
166 #define S2_PMD_SHIFT			ARM64_HW_PGTABLE_LEVEL_SHIFT(2)
167 #define S2_PMD_SIZE			(1UL << S2_PMD_SHIFT)
168 #define S2_PMD_MASK			(~(S2_PMD_SIZE - 1))
169 
170 static inline bool stage2_pud_none(struct kvm *kvm, pud_t pud)
171 {
172 	if (kvm_stage2_has_pmd(kvm))
173 		return pud_none(pud);
174 	else
175 		return 0;
176 }
177 
178 static inline void stage2_pud_clear(struct kvm *kvm, pud_t *pud)
179 {
180 	if (kvm_stage2_has_pmd(kvm))
181 		pud_clear(pud);
182 }
183 
184 static inline bool stage2_pud_present(struct kvm *kvm, pud_t pud)
185 {
186 	if (kvm_stage2_has_pmd(kvm))
187 		return pud_present(pud);
188 	else
189 		return 1;
190 }
191 
192 static inline void stage2_pud_populate(struct kvm *kvm, pud_t *pud, pmd_t *pmd)
193 {
194 	if (kvm_stage2_has_pmd(kvm))
195 		pud_populate(NULL, pud, pmd);
196 }
197 
198 static inline pmd_t *stage2_pmd_offset(struct kvm *kvm,
199 				       pud_t *pud, unsigned long address)
200 {
201 	if (kvm_stage2_has_pmd(kvm))
202 		return pmd_offset(pud, address);
203 	else
204 		return (pmd_t *)pud;
205 }
206 
207 static inline void stage2_pmd_free(struct kvm *kvm, pmd_t *pmd)
208 {
209 	if (kvm_stage2_has_pmd(kvm))
210 		free_page((unsigned long)pmd);
211 }
212 
213 static inline bool stage2_pud_huge(struct kvm *kvm, pud_t pud)
214 {
215 	if (kvm_stage2_has_pmd(kvm))
216 		return pud_huge(pud);
217 	else
218 		return 0;
219 }
220 
221 static inline bool stage2_pmd_table_empty(struct kvm *kvm, pmd_t *pmdp)
222 {
223 	if (kvm_stage2_has_pmd(kvm))
224 		return kvm_page_empty(pmdp);
225 	else
226 		return 0;
227 }
228 
229 static inline phys_addr_t
230 stage2_pmd_addr_end(struct kvm *kvm, phys_addr_t addr, phys_addr_t end)
231 {
232 	if (kvm_stage2_has_pmd(kvm)) {
233 		phys_addr_t boundary = (addr + S2_PMD_SIZE) & S2_PMD_MASK;
234 
235 		return (boundary - 1 < end - 1) ? boundary : end;
236 	} else {
237 		return end;
238 	}
239 }
240 
241 static inline bool stage2_pte_table_empty(struct kvm *kvm, pte_t *ptep)
242 {
243 	return kvm_page_empty(ptep);
244 }
245 
246 static inline unsigned long stage2_pgd_index(struct kvm *kvm, phys_addr_t addr)
247 {
248 	return (((addr) >> stage2_pgdir_shift(kvm)) & (stage2_pgd_ptrs(kvm) - 1));
249 }
250 
251 static inline phys_addr_t
252 stage2_pgd_addr_end(struct kvm *kvm, phys_addr_t addr, phys_addr_t end)
253 {
254 	phys_addr_t boundary = (addr + stage2_pgdir_size(kvm)) & stage2_pgdir_mask(kvm);
255 
256 	return (boundary - 1 < end - 1) ? boundary : end;
257 }
258 
259 #endif	/* __ARM64_S2_PGTABLE_H_ */
260