1 /*
2  * Copyright (C) 2016 - ARM Ltd
3  *
4  * stage2 page table helpers
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
17  */
18 
19 #ifndef __ARM64_S2_PGTABLE_H_
20 #define __ARM64_S2_PGTABLE_H_
21 
22 #include <linux/hugetlb.h>
23 #include <asm/pgtable.h>
24 
25 /*
26  * The hardware supports concatenation of up to 16 tables at stage2 entry level
27  * and we use the feature whenever possible.
28  *
29  * Now, the minimum number of bits resolved at any level is (PAGE_SHIFT - 3).
30  * On arm64, the smallest PAGE_SIZE supported is 4k, which means
31  *             (PAGE_SHIFT - 3) > 4 holds for all page sizes.
32  * This implies, the total number of page table levels at stage2 expected
33  * by the hardware is actually the number of levels required for (KVM_PHYS_SHIFT - 4)
34  * in normal translations(e.g, stage1), since we cannot have another level in
35  * the range (KVM_PHYS_SHIFT, KVM_PHYS_SHIFT - 4).
36  */
37 #define STAGE2_PGTABLE_LEVELS		ARM64_HW_PGTABLE_LEVELS(KVM_PHYS_SHIFT - 4)
38 
39 /*
40  * With all the supported VA_BITs and 40bit guest IPA, the following condition
41  * is always true:
42  *
43  *       STAGE2_PGTABLE_LEVELS <= CONFIG_PGTABLE_LEVELS
44  *
45  * We base our stage-2 page table walker helpers on this assumption and
46  * fall back to using the host version of the helper wherever possible.
47  * i.e, if a particular level is not folded (e.g, PUD) at stage2, we fall back
48  * to using the host version, since it is guaranteed it is not folded at host.
49  *
50  * If the condition breaks in the future, we can rearrange the host level
51  * definitions and reuse them for stage2. Till then...
52  */
53 #if STAGE2_PGTABLE_LEVELS > CONFIG_PGTABLE_LEVELS
54 #error "Unsupported combination of guest IPA and host VA_BITS."
55 #endif
56 
57 /* S2_PGDIR_SHIFT is the size mapped by top-level stage2 entry */
58 #define S2_PGDIR_SHIFT			ARM64_HW_PGTABLE_LEVEL_SHIFT(4 - STAGE2_PGTABLE_LEVELS)
59 #define S2_PGDIR_SIZE			(1UL << S2_PGDIR_SHIFT)
60 #define S2_PGDIR_MASK			(~(S2_PGDIR_SIZE - 1))
61 
62 /*
63  * The number of PTRS across all concatenated stage2 tables given by the
64  * number of bits resolved at the initial level.
65  */
66 #define PTRS_PER_S2_PGD			(1 << (KVM_PHYS_SHIFT - S2_PGDIR_SHIFT))
67 
68 /*
69  * kvm_mmmu_cache_min_pages() is the number of pages required to install
70  * a stage-2 translation. We pre-allocate the entry level page table at
71  * the VM creation.
72  */
73 #define kvm_mmu_cache_min_pages(kvm)	(STAGE2_PGTABLE_LEVELS - 1)
74 
75 /* Stage2 PUD definitions when the level is present */
76 #define STAGE2_PGTABLE_HAS_PUD		(STAGE2_PGTABLE_LEVELS > 3)
77 #define S2_PUD_SHIFT			ARM64_HW_PGTABLE_LEVEL_SHIFT(1)
78 #define S2_PUD_SIZE			(1UL << S2_PUD_SHIFT)
79 #define S2_PUD_MASK			(~(S2_PUD_SIZE - 1))
80 
81 static inline bool stage2_pgd_none(struct kvm *kvm, pgd_t pgd)
82 {
83 	if (STAGE2_PGTABLE_HAS_PUD)
84 		return pgd_none(pgd);
85 	else
86 		return 0;
87 }
88 
89 static inline void stage2_pgd_clear(struct kvm *kvm, pgd_t *pgdp)
90 {
91 	if (STAGE2_PGTABLE_HAS_PUD)
92 		pgd_clear(pgdp);
93 }
94 
95 static inline bool stage2_pgd_present(struct kvm *kvm, pgd_t pgd)
96 {
97 	if (STAGE2_PGTABLE_HAS_PUD)
98 		return pgd_present(pgd);
99 	else
100 		return 1;
101 }
102 
103 static inline void stage2_pgd_populate(struct kvm *kvm, pgd_t *pgd, pud_t *pud)
104 {
105 	if (STAGE2_PGTABLE_HAS_PUD)
106 		pgd_populate(NULL, pgd, pud);
107 }
108 
109 static inline pud_t *stage2_pud_offset(struct kvm *kvm,
110 				       pgd_t *pgd, unsigned long address)
111 {
112 	if (STAGE2_PGTABLE_HAS_PUD)
113 		return pud_offset(pgd, address);
114 	else
115 		return (pud_t *)pgd;
116 }
117 
118 static inline void stage2_pud_free(struct kvm *kvm, pud_t *pud)
119 {
120 	if (STAGE2_PGTABLE_HAS_PUD)
121 		pud_free(NULL, pud);
122 }
123 
124 static inline bool stage2_pud_table_empty(struct kvm *kvm, pud_t *pudp)
125 {
126 	if (STAGE2_PGTABLE_HAS_PUD)
127 		return kvm_page_empty(pudp);
128 	else
129 		return false;
130 }
131 
132 static inline phys_addr_t
133 stage2_pud_addr_end(struct kvm *kvm, phys_addr_t addr, phys_addr_t end)
134 {
135 	if (STAGE2_PGTABLE_HAS_PUD) {
136 		phys_addr_t boundary = (addr + S2_PUD_SIZE) & S2_PUD_MASK;
137 
138 		return (boundary - 1 < end - 1) ? boundary : end;
139 	} else {
140 		return end;
141 	}
142 }
143 
144 /* Stage2 PMD definitions when the level is present */
145 #define STAGE2_PGTABLE_HAS_PMD		(STAGE2_PGTABLE_LEVELS > 2)
146 #define S2_PMD_SHIFT			ARM64_HW_PGTABLE_LEVEL_SHIFT(2)
147 #define S2_PMD_SIZE			(1UL << S2_PMD_SHIFT)
148 #define S2_PMD_MASK			(~(S2_PMD_SIZE - 1))
149 
150 static inline bool stage2_pud_none(struct kvm *kvm, pud_t pud)
151 {
152 	if (STAGE2_PGTABLE_HAS_PMD)
153 		return pud_none(pud);
154 	else
155 		return 0;
156 }
157 
158 static inline void stage2_pud_clear(struct kvm *kvm, pud_t *pud)
159 {
160 	if (STAGE2_PGTABLE_HAS_PMD)
161 		pud_clear(pud);
162 }
163 
164 static inline bool stage2_pud_present(struct kvm *kvm, pud_t pud)
165 {
166 	if (STAGE2_PGTABLE_HAS_PMD)
167 		return pud_present(pud);
168 	else
169 		return 1;
170 }
171 
172 static inline void stage2_pud_populate(struct kvm *kvm, pud_t *pud, pmd_t *pmd)
173 {
174 	if (STAGE2_PGTABLE_HAS_PMD)
175 		pud_populate(NULL, pud, pmd);
176 }
177 
178 static inline pmd_t *stage2_pmd_offset(struct kvm *kvm,
179 				       pud_t *pud, unsigned long address)
180 {
181 	if (STAGE2_PGTABLE_HAS_PMD)
182 		return pmd_offset(pud, address);
183 	else
184 		return (pmd_t *)pud;
185 }
186 
187 static inline void stage2_pmd_free(struct kvm *kvm, pmd_t *pmd)
188 {
189 	if (STAGE2_PGTABLE_HAS_PMD)
190 		pmd_free(NULL, pmd);
191 }
192 
193 static inline bool stage2_pud_huge(struct kvm *kvm, pud_t pud)
194 {
195 	if (STAGE2_PGTABLE_HAS_PMD)
196 		return pud_huge(pud);
197 	else
198 		return 0;
199 }
200 
201 static inline bool stage2_pmd_table_empty(struct kvm *kvm, pmd_t *pmdp)
202 {
203 	if (STAGE2_PGTABLE_HAS_PMD)
204 		return kvm_page_empty(pmdp);
205 	else
206 		return 0;
207 }
208 
209 static inline phys_addr_t
210 stage2_pmd_addr_end(struct kvm *kvm, phys_addr_t addr, phys_addr_t end)
211 {
212 	if (STAGE2_PGTABLE_HAS_PMD) {
213 		phys_addr_t boundary = (addr + S2_PMD_SIZE) & S2_PMD_MASK;
214 
215 		return (boundary - 1 < end - 1) ? boundary : end;
216 	} else {
217 		return end;
218 	}
219 }
220 
221 static inline bool stage2_pte_table_empty(struct kvm *kvm, pte_t *ptep)
222 {
223 	return kvm_page_empty(ptep);
224 }
225 
226 #define stage2_pgd_size(kvm)	(PTRS_PER_S2_PGD * sizeof(pgd_t))
227 
228 static inline unsigned long stage2_pgd_index(struct kvm *kvm, phys_addr_t addr)
229 {
230 	return (((addr) >> S2_PGDIR_SHIFT) & (PTRS_PER_S2_PGD - 1));
231 }
232 
233 static inline phys_addr_t
234 stage2_pgd_addr_end(struct kvm *kvm, phys_addr_t addr, phys_addr_t end)
235 {
236 	phys_addr_t boundary = (addr + S2_PGDIR_SIZE) & S2_PGDIR_MASK;
237 
238 	return (boundary - 1 < end - 1) ? boundary : end;
239 }
240 
241 #endif	/* __ARM64_S2_PGTABLE_H_ */
242