xref: /openbmc/linux/arch/arm64/include/asm/kvm_mmu.h (revision 4f205687)
1 /*
2  * Copyright (C) 2012,2013 - ARM Ltd
3  * Author: Marc Zyngier <marc.zyngier@arm.com>
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License version 2 as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
16  */
17 
18 #ifndef __ARM64_KVM_MMU_H__
19 #define __ARM64_KVM_MMU_H__
20 
21 #include <asm/page.h>
22 #include <asm/memory.h>
23 #include <asm/cpufeature.h>
24 
25 /*
26  * As ARMv8.0 only has the TTBR0_EL2 register, we cannot express
27  * "negative" addresses. This makes it impossible to directly share
28  * mappings with the kernel.
29  *
30  * Instead, give the HYP mode its own VA region at a fixed offset from
31  * the kernel by just masking the top bits (which are all ones for a
32  * kernel address).
33  *
34  * ARMv8.1 (using VHE) does have a TTBR1_EL2, and doesn't use these
35  * macros (the entire kernel runs at EL2).
36  */
37 #define HYP_PAGE_OFFSET_SHIFT	VA_BITS
38 #define HYP_PAGE_OFFSET_MASK	((UL(1) << HYP_PAGE_OFFSET_SHIFT) - 1)
39 #define HYP_PAGE_OFFSET		(PAGE_OFFSET & HYP_PAGE_OFFSET_MASK)
40 
41 /*
42  * Our virtual mapping for the idmap-ed MMU-enable code. Must be
43  * shared across all the page-tables. Conveniently, we use the last
44  * possible page, where no kernel mapping will ever exist.
45  */
46 #define TRAMPOLINE_VA		(HYP_PAGE_OFFSET_MASK & PAGE_MASK)
47 
48 #ifdef __ASSEMBLY__
49 
50 #include <asm/alternative.h>
51 #include <asm/cpufeature.h>
52 
53 /*
54  * Convert a kernel VA into a HYP VA.
55  * reg: VA to be converted.
56  */
57 .macro kern_hyp_va	reg
58 alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
59 	and	\reg, \reg, #HYP_PAGE_OFFSET_MASK
60 alternative_else
61 	nop
62 alternative_endif
63 .endm
64 
65 #else
66 
67 #include <asm/pgalloc.h>
68 #include <asm/cachetype.h>
69 #include <asm/cacheflush.h>
70 #include <asm/mmu_context.h>
71 #include <asm/pgtable.h>
72 
73 #define KERN_TO_HYP(kva)	((unsigned long)kva - PAGE_OFFSET + HYP_PAGE_OFFSET)
74 
75 /*
76  * We currently only support a 40bit IPA.
77  */
78 #define KVM_PHYS_SHIFT	(40)
79 #define KVM_PHYS_SIZE	(1UL << KVM_PHYS_SHIFT)
80 #define KVM_PHYS_MASK	(KVM_PHYS_SIZE - 1UL)
81 
82 #include <asm/stage2_pgtable.h>
83 
84 int create_hyp_mappings(void *from, void *to);
85 int create_hyp_io_mappings(void *from, void *to, phys_addr_t);
86 void free_boot_hyp_pgd(void);
87 void free_hyp_pgds(void);
88 
89 void stage2_unmap_vm(struct kvm *kvm);
90 int kvm_alloc_stage2_pgd(struct kvm *kvm);
91 void kvm_free_stage2_pgd(struct kvm *kvm);
92 int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
93 			  phys_addr_t pa, unsigned long size, bool writable);
94 
95 int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run);
96 
97 void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
98 
99 phys_addr_t kvm_mmu_get_httbr(void);
100 phys_addr_t kvm_mmu_get_boot_httbr(void);
101 phys_addr_t kvm_get_idmap_vector(void);
102 phys_addr_t kvm_get_idmap_start(void);
103 int kvm_mmu_init(void);
104 void kvm_clear_hyp_idmap(void);
105 
106 #define	kvm_set_pte(ptep, pte)		set_pte(ptep, pte)
107 #define	kvm_set_pmd(pmdp, pmd)		set_pmd(pmdp, pmd)
108 
109 static inline void kvm_clean_pgd(pgd_t *pgd) {}
110 static inline void kvm_clean_pmd(pmd_t *pmd) {}
111 static inline void kvm_clean_pmd_entry(pmd_t *pmd) {}
112 static inline void kvm_clean_pte(pte_t *pte) {}
113 static inline void kvm_clean_pte_entry(pte_t *pte) {}
114 
115 static inline pte_t kvm_s2pte_mkwrite(pte_t pte)
116 {
117 	pte_val(pte) |= PTE_S2_RDWR;
118 	return pte;
119 }
120 
121 static inline pmd_t kvm_s2pmd_mkwrite(pmd_t pmd)
122 {
123 	pmd_val(pmd) |= PMD_S2_RDWR;
124 	return pmd;
125 }
126 
127 static inline void kvm_set_s2pte_readonly(pte_t *pte)
128 {
129 	pteval_t pteval;
130 	unsigned long tmp;
131 
132 	asm volatile("//	kvm_set_s2pte_readonly\n"
133 	"	prfm	pstl1strm, %2\n"
134 	"1:	ldxr	%0, %2\n"
135 	"	and	%0, %0, %3		// clear PTE_S2_RDWR\n"
136 	"	orr	%0, %0, %4		// set PTE_S2_RDONLY\n"
137 	"	stxr	%w1, %0, %2\n"
138 	"	cbnz	%w1, 1b\n"
139 	: "=&r" (pteval), "=&r" (tmp), "+Q" (pte_val(*pte))
140 	: "L" (~PTE_S2_RDWR), "L" (PTE_S2_RDONLY));
141 }
142 
143 static inline bool kvm_s2pte_readonly(pte_t *pte)
144 {
145 	return (pte_val(*pte) & PTE_S2_RDWR) == PTE_S2_RDONLY;
146 }
147 
148 static inline void kvm_set_s2pmd_readonly(pmd_t *pmd)
149 {
150 	kvm_set_s2pte_readonly((pte_t *)pmd);
151 }
152 
153 static inline bool kvm_s2pmd_readonly(pmd_t *pmd)
154 {
155 	return kvm_s2pte_readonly((pte_t *)pmd);
156 }
157 
158 static inline bool kvm_page_empty(void *ptr)
159 {
160 	struct page *ptr_page = virt_to_page(ptr);
161 	return page_count(ptr_page) == 1;
162 }
163 
164 #define hyp_pte_table_empty(ptep) kvm_page_empty(ptep)
165 
166 #ifdef __PAGETABLE_PMD_FOLDED
167 #define hyp_pmd_table_empty(pmdp) (0)
168 #else
169 #define hyp_pmd_table_empty(pmdp) kvm_page_empty(pmdp)
170 #endif
171 
172 #ifdef __PAGETABLE_PUD_FOLDED
173 #define hyp_pud_table_empty(pudp) (0)
174 #else
175 #define hyp_pud_table_empty(pudp) kvm_page_empty(pudp)
176 #endif
177 
178 struct kvm;
179 
180 #define kvm_flush_dcache_to_poc(a,l)	__flush_dcache_area((a), (l))
181 
182 static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
183 {
184 	return (vcpu_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101;
185 }
186 
187 static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu,
188 					       kvm_pfn_t pfn,
189 					       unsigned long size,
190 					       bool ipa_uncached)
191 {
192 	void *va = page_address(pfn_to_page(pfn));
193 
194 	if (!vcpu_has_cache_enabled(vcpu) || ipa_uncached)
195 		kvm_flush_dcache_to_poc(va, size);
196 
197 	if (!icache_is_aliasing()) {		/* PIPT */
198 		flush_icache_range((unsigned long)va,
199 				   (unsigned long)va + size);
200 	} else if (!icache_is_aivivt()) {	/* non ASID-tagged VIVT */
201 		/* any kind of VIPT cache */
202 		__flush_icache_all();
203 	}
204 }
205 
206 static inline void __kvm_flush_dcache_pte(pte_t pte)
207 {
208 	struct page *page = pte_page(pte);
209 	kvm_flush_dcache_to_poc(page_address(page), PAGE_SIZE);
210 }
211 
212 static inline void __kvm_flush_dcache_pmd(pmd_t pmd)
213 {
214 	struct page *page = pmd_page(pmd);
215 	kvm_flush_dcache_to_poc(page_address(page), PMD_SIZE);
216 }
217 
218 static inline void __kvm_flush_dcache_pud(pud_t pud)
219 {
220 	struct page *page = pud_page(pud);
221 	kvm_flush_dcache_to_poc(page_address(page), PUD_SIZE);
222 }
223 
224 #define kvm_virt_to_phys(x)		__virt_to_phys((unsigned long)(x))
225 
226 void kvm_set_way_flush(struct kvm_vcpu *vcpu);
227 void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled);
228 
229 static inline bool __kvm_cpu_uses_extended_idmap(void)
230 {
231 	return __cpu_uses_extended_idmap();
232 }
233 
234 static inline void __kvm_extend_hypmap(pgd_t *boot_hyp_pgd,
235 				       pgd_t *hyp_pgd,
236 				       pgd_t *merged_hyp_pgd,
237 				       unsigned long hyp_idmap_start)
238 {
239 	int idmap_idx;
240 
241 	/*
242 	 * Use the first entry to access the HYP mappings. It is
243 	 * guaranteed to be free, otherwise we wouldn't use an
244 	 * extended idmap.
245 	 */
246 	VM_BUG_ON(pgd_val(merged_hyp_pgd[0]));
247 	merged_hyp_pgd[0] = __pgd(__pa(hyp_pgd) | PMD_TYPE_TABLE);
248 
249 	/*
250 	 * Create another extended level entry that points to the boot HYP map,
251 	 * which contains an ID mapping of the HYP init code. We essentially
252 	 * merge the boot and runtime HYP maps by doing so, but they don't
253 	 * overlap anyway, so this is fine.
254 	 */
255 	idmap_idx = hyp_idmap_start >> VA_BITS;
256 	VM_BUG_ON(pgd_val(merged_hyp_pgd[idmap_idx]));
257 	merged_hyp_pgd[idmap_idx] = __pgd(__pa(boot_hyp_pgd) | PMD_TYPE_TABLE);
258 }
259 
260 static inline unsigned int kvm_get_vmid_bits(void)
261 {
262 	int reg = read_system_reg(SYS_ID_AA64MMFR1_EL1);
263 
264 	return (cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR1_VMIDBITS_SHIFT) == 2) ? 16 : 8;
265 }
266 
267 #endif /* __ASSEMBLY__ */
268 #endif /* __ARM64_KVM_MMU_H__ */
269