xref: /openbmc/linux/arch/arm64/include/asm/kvm_mmu.h (revision 68198dca)
1 /*
2  * Copyright (C) 2012,2013 - ARM Ltd
3  * Author: Marc Zyngier <marc.zyngier@arm.com>
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License version 2 as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
16  */
17 
18 #ifndef __ARM64_KVM_MMU_H__
19 #define __ARM64_KVM_MMU_H__
20 
21 #include <asm/page.h>
22 #include <asm/memory.h>
23 #include <asm/cpufeature.h>
24 
25 /*
26  * As ARMv8.0 only has the TTBR0_EL2 register, we cannot express
27  * "negative" addresses. This makes it impossible to directly share
28  * mappings with the kernel.
29  *
30  * Instead, give the HYP mode its own VA region at a fixed offset from
31  * the kernel by just masking the top bits (which are all ones for a
32  * kernel address). We need to find out how many bits to mask.
33  *
34  * We want to build a set of page tables that cover both parts of the
35  * idmap (the trampoline page used to initialize EL2), and our normal
36  * runtime VA space, at the same time.
37  *
38  * Given that the kernel uses VA_BITS for its entire address space,
39  * and that half of that space (VA_BITS - 1) is used for the linear
40  * mapping, we can also limit the EL2 space to (VA_BITS - 1).
41  *
42  * The main question is "Within the VA_BITS space, does EL2 use the
43  * top or the bottom half of that space to shadow the kernel's linear
44  * mapping?". As we need to idmap the trampoline page, this is
45  * determined by the range in which this page lives.
46  *
47  * If the page is in the bottom half, we have to use the top half. If
48  * the page is in the top half, we have to use the bottom half:
49  *
50  * T = __pa_symbol(__hyp_idmap_text_start)
51  * if (T & BIT(VA_BITS - 1))
52  *	HYP_VA_MIN = 0  //idmap in upper half
53  * else
54  *	HYP_VA_MIN = 1 << (VA_BITS - 1)
55  * HYP_VA_MAX = HYP_VA_MIN + (1 << (VA_BITS - 1)) - 1
56  *
57  * This of course assumes that the trampoline page exists within the
58  * VA_BITS range. If it doesn't, then it means we're in the odd case
59  * where the kernel idmap (as well as HYP) uses more levels than the
60  * kernel runtime page tables (as seen when the kernel is configured
61  * for 4k pages, 39bits VA, and yet memory lives just above that
62  * limit, forcing the idmap to use 4 levels of page tables while the
63  * kernel itself only uses 3). In this particular case, it doesn't
64  * matter which side of VA_BITS we use, as we're guaranteed not to
65  * conflict with anything.
66  *
67  * When using VHE, there are no separate hyp mappings and all KVM
68  * functionality is already mapped as part of the main kernel
69  * mappings, and none of this applies in that case.
70  */
71 
72 #define HYP_PAGE_OFFSET_HIGH_MASK	((UL(1) << VA_BITS) - 1)
73 #define HYP_PAGE_OFFSET_LOW_MASK	((UL(1) << (VA_BITS - 1)) - 1)
74 
75 #ifdef __ASSEMBLY__
76 
77 #include <asm/alternative.h>
78 #include <asm/cpufeature.h>
79 
80 /*
81  * Convert a kernel VA into a HYP VA.
82  * reg: VA to be converted.
83  *
84  * This generates the following sequences:
85  * - High mask:
86  *		and x0, x0, #HYP_PAGE_OFFSET_HIGH_MASK
87  *		nop
88  * - Low mask:
89  *		and x0, x0, #HYP_PAGE_OFFSET_HIGH_MASK
90  *		and x0, x0, #HYP_PAGE_OFFSET_LOW_MASK
91  * - VHE:
92  *		nop
93  *		nop
94  *
95  * The "low mask" version works because the mask is a strict subset of
96  * the "high mask", hence performing the first mask for nothing.
97  * Should be completely invisible on any viable CPU.
98  */
99 .macro kern_hyp_va	reg
100 alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
101 	and     \reg, \reg, #HYP_PAGE_OFFSET_HIGH_MASK
102 alternative_else_nop_endif
103 alternative_if ARM64_HYP_OFFSET_LOW
104 	and     \reg, \reg, #HYP_PAGE_OFFSET_LOW_MASK
105 alternative_else_nop_endif
106 .endm
107 
108 #else
109 
110 #include <asm/pgalloc.h>
111 #include <asm/cache.h>
112 #include <asm/cacheflush.h>
113 #include <asm/mmu_context.h>
114 #include <asm/pgtable.h>
115 
116 static inline unsigned long __kern_hyp_va(unsigned long v)
117 {
118 	asm volatile(ALTERNATIVE("and %0, %0, %1",
119 				 "nop",
120 				 ARM64_HAS_VIRT_HOST_EXTN)
121 		     : "+r" (v)
122 		     : "i" (HYP_PAGE_OFFSET_HIGH_MASK));
123 	asm volatile(ALTERNATIVE("nop",
124 				 "and %0, %0, %1",
125 				 ARM64_HYP_OFFSET_LOW)
126 		     : "+r" (v)
127 		     : "i" (HYP_PAGE_OFFSET_LOW_MASK));
128 	return v;
129 }
130 
131 #define kern_hyp_va(v) 	((typeof(v))(__kern_hyp_va((unsigned long)(v))))
132 
133 /*
134  * We currently only support a 40bit IPA.
135  */
136 #define KVM_PHYS_SHIFT	(40)
137 #define KVM_PHYS_SIZE	(1UL << KVM_PHYS_SHIFT)
138 #define KVM_PHYS_MASK	(KVM_PHYS_SIZE - 1UL)
139 
140 #include <asm/stage2_pgtable.h>
141 
142 int create_hyp_mappings(void *from, void *to, pgprot_t prot);
143 int create_hyp_io_mappings(void *from, void *to, phys_addr_t);
144 void free_hyp_pgds(void);
145 
146 void stage2_unmap_vm(struct kvm *kvm);
147 int kvm_alloc_stage2_pgd(struct kvm *kvm);
148 void kvm_free_stage2_pgd(struct kvm *kvm);
149 int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
150 			  phys_addr_t pa, unsigned long size, bool writable);
151 
152 int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run);
153 
154 void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
155 
156 phys_addr_t kvm_mmu_get_httbr(void);
157 phys_addr_t kvm_get_idmap_vector(void);
158 int kvm_mmu_init(void);
159 void kvm_clear_hyp_idmap(void);
160 
161 #define	kvm_set_pte(ptep, pte)		set_pte(ptep, pte)
162 #define	kvm_set_pmd(pmdp, pmd)		set_pmd(pmdp, pmd)
163 
164 static inline pte_t kvm_s2pte_mkwrite(pte_t pte)
165 {
166 	pte_val(pte) |= PTE_S2_RDWR;
167 	return pte;
168 }
169 
170 static inline pmd_t kvm_s2pmd_mkwrite(pmd_t pmd)
171 {
172 	pmd_val(pmd) |= PMD_S2_RDWR;
173 	return pmd;
174 }
175 
176 static inline void kvm_set_s2pte_readonly(pte_t *pte)
177 {
178 	pteval_t old_pteval, pteval;
179 
180 	pteval = READ_ONCE(pte_val(*pte));
181 	do {
182 		old_pteval = pteval;
183 		pteval &= ~PTE_S2_RDWR;
184 		pteval |= PTE_S2_RDONLY;
185 		pteval = cmpxchg_relaxed(&pte_val(*pte), old_pteval, pteval);
186 	} while (pteval != old_pteval);
187 }
188 
189 static inline bool kvm_s2pte_readonly(pte_t *pte)
190 {
191 	return (pte_val(*pte) & PTE_S2_RDWR) == PTE_S2_RDONLY;
192 }
193 
194 static inline void kvm_set_s2pmd_readonly(pmd_t *pmd)
195 {
196 	kvm_set_s2pte_readonly((pte_t *)pmd);
197 }
198 
199 static inline bool kvm_s2pmd_readonly(pmd_t *pmd)
200 {
201 	return kvm_s2pte_readonly((pte_t *)pmd);
202 }
203 
204 static inline bool kvm_page_empty(void *ptr)
205 {
206 	struct page *ptr_page = virt_to_page(ptr);
207 	return page_count(ptr_page) == 1;
208 }
209 
210 #define hyp_pte_table_empty(ptep) kvm_page_empty(ptep)
211 
212 #ifdef __PAGETABLE_PMD_FOLDED
213 #define hyp_pmd_table_empty(pmdp) (0)
214 #else
215 #define hyp_pmd_table_empty(pmdp) kvm_page_empty(pmdp)
216 #endif
217 
218 #ifdef __PAGETABLE_PUD_FOLDED
219 #define hyp_pud_table_empty(pudp) (0)
220 #else
221 #define hyp_pud_table_empty(pudp) kvm_page_empty(pudp)
222 #endif
223 
224 struct kvm;
225 
226 #define kvm_flush_dcache_to_poc(a,l)	__flush_dcache_area((a), (l))
227 
228 static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
229 {
230 	return (vcpu_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101;
231 }
232 
233 static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu,
234 					       kvm_pfn_t pfn,
235 					       unsigned long size)
236 {
237 	void *va = page_address(pfn_to_page(pfn));
238 
239 	kvm_flush_dcache_to_poc(va, size);
240 
241 	if (icache_is_aliasing()) {
242 		/* any kind of VIPT cache */
243 		__flush_icache_all();
244 	} else if (is_kernel_in_hyp_mode() || !icache_is_vpipt()) {
245 		/* PIPT or VPIPT at EL2 (see comment in __kvm_tlb_flush_vmid_ipa) */
246 		flush_icache_range((unsigned long)va,
247 				   (unsigned long)va + size);
248 	}
249 }
250 
251 static inline void __kvm_flush_dcache_pte(pte_t pte)
252 {
253 	struct page *page = pte_page(pte);
254 	kvm_flush_dcache_to_poc(page_address(page), PAGE_SIZE);
255 }
256 
257 static inline void __kvm_flush_dcache_pmd(pmd_t pmd)
258 {
259 	struct page *page = pmd_page(pmd);
260 	kvm_flush_dcache_to_poc(page_address(page), PMD_SIZE);
261 }
262 
263 static inline void __kvm_flush_dcache_pud(pud_t pud)
264 {
265 	struct page *page = pud_page(pud);
266 	kvm_flush_dcache_to_poc(page_address(page), PUD_SIZE);
267 }
268 
269 #define kvm_virt_to_phys(x)		__pa_symbol(x)
270 
271 void kvm_set_way_flush(struct kvm_vcpu *vcpu);
272 void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled);
273 
274 static inline bool __kvm_cpu_uses_extended_idmap(void)
275 {
276 	return __cpu_uses_extended_idmap();
277 }
278 
279 static inline void __kvm_extend_hypmap(pgd_t *boot_hyp_pgd,
280 				       pgd_t *hyp_pgd,
281 				       pgd_t *merged_hyp_pgd,
282 				       unsigned long hyp_idmap_start)
283 {
284 	int idmap_idx;
285 
286 	/*
287 	 * Use the first entry to access the HYP mappings. It is
288 	 * guaranteed to be free, otherwise we wouldn't use an
289 	 * extended idmap.
290 	 */
291 	VM_BUG_ON(pgd_val(merged_hyp_pgd[0]));
292 	merged_hyp_pgd[0] = __pgd(__pa(hyp_pgd) | PMD_TYPE_TABLE);
293 
294 	/*
295 	 * Create another extended level entry that points to the boot HYP map,
296 	 * which contains an ID mapping of the HYP init code. We essentially
297 	 * merge the boot and runtime HYP maps by doing so, but they don't
298 	 * overlap anyway, so this is fine.
299 	 */
300 	idmap_idx = hyp_idmap_start >> VA_BITS;
301 	VM_BUG_ON(pgd_val(merged_hyp_pgd[idmap_idx]));
302 	merged_hyp_pgd[idmap_idx] = __pgd(__pa(boot_hyp_pgd) | PMD_TYPE_TABLE);
303 }
304 
305 static inline unsigned int kvm_get_vmid_bits(void)
306 {
307 	int reg = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
308 
309 	return (cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR1_VMIDBITS_SHIFT) == 2) ? 16 : 8;
310 }
311 
312 #endif /* __ASSEMBLY__ */
313 #endif /* __ARM64_KVM_MMU_H__ */
314