xref: /openbmc/linux/arch/arm64/include/asm/kvm_mmu.h (revision 97cbd2fc0257c6af7036a9a6415ca8ad43535d6b)
1caab277bSThomas Gleixner /* SPDX-License-Identifier: GPL-2.0-only */
237c43753SMarc Zyngier /*
337c43753SMarc Zyngier  * Copyright (C) 2012,2013 - ARM Ltd
437c43753SMarc Zyngier  * Author: Marc Zyngier <marc.zyngier@arm.com>
537c43753SMarc Zyngier  */
637c43753SMarc Zyngier 
737c43753SMarc Zyngier #ifndef __ARM64_KVM_MMU_H__
837c43753SMarc Zyngier #define __ARM64_KVM_MMU_H__
937c43753SMarc Zyngier 
1037c43753SMarc Zyngier #include <asm/page.h>
1137c43753SMarc Zyngier #include <asm/memory.h>
129ef2b48bSWill Deacon #include <asm/mmu.h>
1320475f78SVladimir Murzin #include <asm/cpufeature.h>
1437c43753SMarc Zyngier 
1537c43753SMarc Zyngier /*
16cedbb8b7SMarc Zyngier  * As ARMv8.0 only has the TTBR0_EL2 register, we cannot express
1737c43753SMarc Zyngier  * "negative" addresses. This makes it impossible to directly share
1837c43753SMarc Zyngier  * mappings with the kernel.
1937c43753SMarc Zyngier  *
2037c43753SMarc Zyngier  * Instead, give the HYP mode its own VA region at a fixed offset from
2137c43753SMarc Zyngier  * the kernel by just masking the top bits (which are all ones for a
2282a81bffSMarc Zyngier  * kernel address). We need to find out how many bits to mask.
23cedbb8b7SMarc Zyngier  *
2482a81bffSMarc Zyngier  * We want to build a set of page tables that cover both parts of the
2582a81bffSMarc Zyngier  * idmap (the trampoline page used to initialize EL2), and our normal
2682a81bffSMarc Zyngier  * runtime VA space, at the same time.
2782a81bffSMarc Zyngier  *
2882a81bffSMarc Zyngier  * Given that the kernel uses VA_BITS for its entire address space,
2982a81bffSMarc Zyngier  * and that half of that space (VA_BITS - 1) is used for the linear
3082a81bffSMarc Zyngier  * mapping, we can also limit the EL2 space to (VA_BITS - 1).
3182a81bffSMarc Zyngier  *
3282a81bffSMarc Zyngier  * The main question is "Within the VA_BITS space, does EL2 use the
3382a81bffSMarc Zyngier  * top or the bottom half of that space to shadow the kernel's linear
3482a81bffSMarc Zyngier  * mapping?". As we need to idmap the trampoline page, this is
3582a81bffSMarc Zyngier  * determined by the range in which this page lives.
3682a81bffSMarc Zyngier  *
3782a81bffSMarc Zyngier  * If the page is in the bottom half, we have to use the top half. If
3882a81bffSMarc Zyngier  * the page is in the top half, we have to use the bottom half:
3982a81bffSMarc Zyngier  *
402077be67SLaura Abbott  * T = __pa_symbol(__hyp_idmap_text_start)
4182a81bffSMarc Zyngier  * if (T & BIT(VA_BITS - 1))
4282a81bffSMarc Zyngier  *	HYP_VA_MIN = 0  //idmap in upper half
4382a81bffSMarc Zyngier  * else
4482a81bffSMarc Zyngier  *	HYP_VA_MIN = 1 << (VA_BITS - 1)
4582a81bffSMarc Zyngier  * HYP_VA_MAX = HYP_VA_MIN + (1 << (VA_BITS - 1)) - 1
4682a81bffSMarc Zyngier  *
4782a81bffSMarc Zyngier  * When using VHE, there are no separate hyp mappings and all KVM
4882a81bffSMarc Zyngier  * functionality is already mapped as part of the main kernel
4982a81bffSMarc Zyngier  * mappings, and none of this applies in that case.
5037c43753SMarc Zyngier  */
51d53d9bc6SMarc Zyngier 
5237c43753SMarc Zyngier #ifdef __ASSEMBLY__
5337c43753SMarc Zyngier 
54cedbb8b7SMarc Zyngier #include <asm/alternative.h>
55cedbb8b7SMarc Zyngier 
5637c43753SMarc Zyngier /*
5737c43753SMarc Zyngier  * Convert a kernel VA into a HYP VA.
5837c43753SMarc Zyngier  * reg: VA to be converted.
59fd81e6bfSMarc Zyngier  *
602b4d1606SMarc Zyngier  * The actual code generation takes place in kvm_update_va_mask, and
612b4d1606SMarc Zyngier  * the instructions below are only there to reserve the space and
622b4d1606SMarc Zyngier  * perform the register allocation (kvm_update_va_mask uses the
632b4d1606SMarc Zyngier  * specific registers encoded in the instructions).
6437c43753SMarc Zyngier  */
6537c43753SMarc Zyngier .macro kern_hyp_va	reg
662b4d1606SMarc Zyngier alternative_cb kvm_update_va_mask
67ed57cac8SMarc Zyngier 	and     \reg, \reg, #1		/* mask with va_mask */
68ed57cac8SMarc Zyngier 	ror	\reg, \reg, #1		/* rotate to the first tag bit */
69ed57cac8SMarc Zyngier 	add	\reg, \reg, #0		/* insert the low 12 bits of the tag */
70ed57cac8SMarc Zyngier 	add	\reg, \reg, #0, lsl 12	/* insert the top 12 bits of the tag */
71ed57cac8SMarc Zyngier 	ror	\reg, \reg, #63		/* rotate back */
722b4d1606SMarc Zyngier alternative_cb_end
7337c43753SMarc Zyngier .endm
7437c43753SMarc Zyngier 
7568b824e4SMarc Zyngier /*
76*97cbd2fcSDavid Brazdil  * Convert a hypervisor VA to a PA
77*97cbd2fcSDavid Brazdil  * reg: hypervisor address to be converted in place
78*97cbd2fcSDavid Brazdil  * tmp: temporary register
79*97cbd2fcSDavid Brazdil  */
80*97cbd2fcSDavid Brazdil .macro hyp_pa reg, tmp
81*97cbd2fcSDavid Brazdil 	ldr_l	\tmp, hyp_physvirt_offset
82*97cbd2fcSDavid Brazdil 	add	\reg, \reg, \tmp
83*97cbd2fcSDavid Brazdil .endm
84*97cbd2fcSDavid Brazdil 
85*97cbd2fcSDavid Brazdil /*
86*97cbd2fcSDavid Brazdil  * Convert a hypervisor VA to a kernel image address
87*97cbd2fcSDavid Brazdil  * reg: hypervisor address to be converted in place
8868b824e4SMarc Zyngier  * tmp: temporary register
8968b824e4SMarc Zyngier  *
9068b824e4SMarc Zyngier  * The actual code generation takes place in kvm_get_kimage_voffset, and
9168b824e4SMarc Zyngier  * the instructions below are only there to reserve the space and
9268b824e4SMarc Zyngier  * perform the register allocation (kvm_get_kimage_voffset uses the
9368b824e4SMarc Zyngier  * specific registers encoded in the instructions).
9468b824e4SMarc Zyngier  */
95*97cbd2fcSDavid Brazdil .macro hyp_kimg_va reg, tmp
96*97cbd2fcSDavid Brazdil 	/* Convert hyp VA -> PA. */
97*97cbd2fcSDavid Brazdil 	hyp_pa	\reg, \tmp
98*97cbd2fcSDavid Brazdil 
99*97cbd2fcSDavid Brazdil 	/* Load kimage_voffset. */
10068b824e4SMarc Zyngier alternative_cb kvm_get_kimage_voffset
10168b824e4SMarc Zyngier 	movz	\tmp, #0
10268b824e4SMarc Zyngier 	movk	\tmp, #0, lsl #16
10368b824e4SMarc Zyngier 	movk	\tmp, #0, lsl #32
10468b824e4SMarc Zyngier 	movk	\tmp, #0, lsl #48
10568b824e4SMarc Zyngier alternative_cb_end
10668b824e4SMarc Zyngier 
107*97cbd2fcSDavid Brazdil 	/* Convert PA -> kimg VA. */
108*97cbd2fcSDavid Brazdil 	add	\reg, \reg, \tmp
1095be1d622SDavid Brazdil .endm
1105be1d622SDavid Brazdil 
11137c43753SMarc Zyngier #else
11237c43753SMarc Zyngier 
11365fddcfcSMike Rapoport #include <linux/pgtable.h>
11438f791a4SChristoffer Dall #include <asm/pgalloc.h>
11502f7760eSWill Deacon #include <asm/cache.h>
11637c43753SMarc Zyngier #include <asm/cacheflush.h>
117e4c5a685SArd Biesheuvel #include <asm/mmu_context.h>
11837c43753SMarc Zyngier 
1192b4d1606SMarc Zyngier void kvm_update_va_mask(struct alt_instr *alt,
1202b4d1606SMarc Zyngier 			__le32 *origptr, __le32 *updptr, int nr_inst);
1210492747cSSebastian Andrzej Siewior void kvm_compute_layout(void);
1226ec6259dSDavid Brazdil void kvm_apply_hyp_relocations(void);
1232b4d1606SMarc Zyngier 
1245c37f1aeSJames Morse static __always_inline unsigned long __kern_hyp_va(unsigned long v)
125fd81e6bfSMarc Zyngier {
126ed57cac8SMarc Zyngier 	asm volatile(ALTERNATIVE_CB("and %0, %0, #1\n"
127ed57cac8SMarc Zyngier 				    "ror %0, %0, #1\n"
128ed57cac8SMarc Zyngier 				    "add %0, %0, #0\n"
129ed57cac8SMarc Zyngier 				    "add %0, %0, #0, lsl 12\n"
130ed57cac8SMarc Zyngier 				    "ror %0, %0, #63\n",
1312b4d1606SMarc Zyngier 				    kvm_update_va_mask)
1322b4d1606SMarc Zyngier 		     : "+r" (v));
133fd81e6bfSMarc Zyngier 	return v;
134fd81e6bfSMarc Zyngier }
135fd81e6bfSMarc Zyngier 
13694d0e598SMarc Zyngier #define kern_hyp_va(v) 	((typeof(v))(__kern_hyp_va((unsigned long)(v))))
13737c43753SMarc Zyngier 
1381db9d9deSMarc Zyngier static __always_inline unsigned long __kimg_hyp_va(unsigned long v)
1391db9d9deSMarc Zyngier {
1401db9d9deSMarc Zyngier 	unsigned long offset;
1411db9d9deSMarc Zyngier 
1421db9d9deSMarc Zyngier 	asm volatile(ALTERNATIVE_CB("movz %0, #0\n"
1431db9d9deSMarc Zyngier 				    "movk %0, #0, lsl #16\n"
1441db9d9deSMarc Zyngier 				    "movk %0, #0, lsl #32\n"
1451db9d9deSMarc Zyngier 				    "movk %0, #0, lsl #48\n",
1461db9d9deSMarc Zyngier 				    kvm_update_kimg_phys_offset)
1471db9d9deSMarc Zyngier 		     : "=r" (offset));
1481db9d9deSMarc Zyngier 
1491db9d9deSMarc Zyngier 	return __kern_hyp_va((v - offset) | PAGE_OFFSET);
1501db9d9deSMarc Zyngier }
1511db9d9deSMarc Zyngier 
1521db9d9deSMarc Zyngier #define kimg_fn_hyp_va(v) 	((typeof(*v))(__kimg_hyp_va((unsigned long)(v))))
1531db9d9deSMarc Zyngier 
1541db9d9deSMarc Zyngier #define kimg_fn_ptr(x)	(typeof(x) **)(x)
1551db9d9deSMarc Zyngier 
15637c43753SMarc Zyngier /*
1571b44471bSZenghui Yu  * We currently support using a VM-specified IPA size. For backward
1581b44471bSZenghui Yu  * compatibility, the default IPA size is fixed to 40bits.
15937c43753SMarc Zyngier  */
160dbff124eSJoel Schopp #define KVM_PHYS_SHIFT	(40)
161e55cac5bSSuzuki K Poulose 
16213ac4bbcSSuzuki K Poulose #define kvm_phys_shift(kvm)		VTCR_EL2_IPA(kvm->arch.vtcr)
163e55cac5bSSuzuki K Poulose #define kvm_phys_size(kvm)		(_AC(1, ULL) << kvm_phys_shift(kvm))
164e55cac5bSSuzuki K Poulose #define kvm_phys_mask(kvm)		(kvm_phys_size(kvm) - _AC(1, ULL))
16537c43753SMarc Zyngier 
1660f9d09b8SWill Deacon #include <asm/kvm_pgtable.h>
167c0ef6326SSuzuki K Poulose #include <asm/stage2_pgtable.h>
168c0ef6326SSuzuki K Poulose 
1690f9d09b8SWill Deacon int create_hyp_mappings(void *from, void *to, enum kvm_pgtable_prot prot);
170807a3784SMarc Zyngier int create_hyp_io_mappings(phys_addr_t phys_addr, size_t size,
1711bb32a44SMarc Zyngier 			   void __iomem **kaddr,
1721bb32a44SMarc Zyngier 			   void __iomem **haddr);
173dc2e4633SMarc Zyngier int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size,
174dc2e4633SMarc Zyngier 			     void **haddr);
17537c43753SMarc Zyngier void free_hyp_pgds(void);
17637c43753SMarc Zyngier 
177957db105SChristoffer Dall void stage2_unmap_vm(struct kvm *kvm);
178a0e50aa3SChristoffer Dall int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu);
179a0e50aa3SChristoffer Dall void kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu);
18037c43753SMarc Zyngier int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
181c40f2f8fSArd Biesheuvel 			  phys_addr_t pa, unsigned long size, bool writable);
18237c43753SMarc Zyngier 
18374cc7e0cSTianjia Zhang int kvm_handle_guest_abort(struct kvm_vcpu *vcpu);
18437c43753SMarc Zyngier 
18537c43753SMarc Zyngier phys_addr_t kvm_mmu_get_httbr(void);
18637c43753SMarc Zyngier phys_addr_t kvm_get_idmap_vector(void);
18737c43753SMarc Zyngier int kvm_mmu_init(void);
188e9f63768SMike Rapoport 
18937c43753SMarc Zyngier struct kvm;
19037c43753SMarc Zyngier 
1912d58b733SMarc Zyngier #define kvm_flush_dcache_to_poc(a,l)	__flush_dcache_area((a), (l))
1922d58b733SMarc Zyngier 
1932d58b733SMarc Zyngier static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
1942d58b733SMarc Zyngier {
1958d404c4cSChristoffer Dall 	return (vcpu_read_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101;
1962d58b733SMarc Zyngier }
1972d58b733SMarc Zyngier 
19817ab9d57SMarc Zyngier static inline void __clean_dcache_guest_page(kvm_pfn_t pfn, unsigned long size)
19937c43753SMarc Zyngier {
2000d3e4d4fSMarc Zyngier 	void *va = page_address(pfn_to_page(pfn));
2010d3e4d4fSMarc Zyngier 
202e48d53a9SMarc Zyngier 	/*
203e48d53a9SMarc Zyngier 	 * With FWB, we ensure that the guest always accesses memory using
204e48d53a9SMarc Zyngier 	 * cacheable attributes, and we don't have to clean to PoC when
205e48d53a9SMarc Zyngier 	 * faulting in pages. Furthermore, FWB implies IDC, so cleaning to
206e48d53a9SMarc Zyngier 	 * PoU is not required either in this case.
207e48d53a9SMarc Zyngier 	 */
208e48d53a9SMarc Zyngier 	if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB))
209e48d53a9SMarc Zyngier 		return;
210e48d53a9SMarc Zyngier 
2110d3e4d4fSMarc Zyngier 	kvm_flush_dcache_to_poc(va, size);
212a15f6939SMarc Zyngier }
2132d58b733SMarc Zyngier 
21417ab9d57SMarc Zyngier static inline void __invalidate_icache_guest_page(kvm_pfn_t pfn,
215a15f6939SMarc Zyngier 						  unsigned long size)
216a15f6939SMarc Zyngier {
21787da236eSWill Deacon 	if (icache_is_aliasing()) {
21837c43753SMarc Zyngier 		/* any kind of VIPT cache */
21937c43753SMarc Zyngier 		__flush_icache_all();
22087da236eSWill Deacon 	} else if (is_kernel_in_hyp_mode() || !icache_is_vpipt()) {
22187da236eSWill Deacon 		/* PIPT or VPIPT at EL2 (see comment in __kvm_tlb_flush_vmid_ipa) */
222a15f6939SMarc Zyngier 		void *va = page_address(pfn_to_page(pfn));
223a15f6939SMarc Zyngier 
2244fee9473SMarc Zyngier 		invalidate_icache_range((unsigned long)va,
22587da236eSWill Deacon 					(unsigned long)va + size);
22637c43753SMarc Zyngier 	}
22737c43753SMarc Zyngier }
22837c43753SMarc Zyngier 
2293c1e7165SMarc Zyngier void kvm_set_way_flush(struct kvm_vcpu *vcpu);
2303c1e7165SMarc Zyngier void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled);
2319d218a1fSMarc Zyngier 
23220475f78SVladimir Murzin static inline unsigned int kvm_get_vmid_bits(void)
23320475f78SVladimir Murzin {
23446823dd1SDave Martin 	int reg = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
23520475f78SVladimir Murzin 
236c73433fcSAnshuman Khandual 	return get_vmid_bits(reg);
23720475f78SVladimir Murzin }
23820475f78SVladimir Murzin 
239bf308242SAndre Przywara /*
240bf308242SAndre Przywara  * We are not in the kvm->srcu critical section most of the time, so we take
241bf308242SAndre Przywara  * the SRCU read lock here. Since we copy the data from the user page, we
242bf308242SAndre Przywara  * can immediately drop the lock again.
243bf308242SAndre Przywara  */
244bf308242SAndre Przywara static inline int kvm_read_guest_lock(struct kvm *kvm,
245bf308242SAndre Przywara 				      gpa_t gpa, void *data, unsigned long len)
246bf308242SAndre Przywara {
247bf308242SAndre Przywara 	int srcu_idx = srcu_read_lock(&kvm->srcu);
248bf308242SAndre Przywara 	int ret = kvm_read_guest(kvm, gpa, data, len);
249bf308242SAndre Przywara 
250bf308242SAndre Przywara 	srcu_read_unlock(&kvm->srcu, srcu_idx);
251bf308242SAndre Przywara 
252bf308242SAndre Przywara 	return ret;
253bf308242SAndre Przywara }
254bf308242SAndre Przywara 
255a6ecfb11SMarc Zyngier static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa,
256a6ecfb11SMarc Zyngier 				       const void *data, unsigned long len)
257a6ecfb11SMarc Zyngier {
258a6ecfb11SMarc Zyngier 	int srcu_idx = srcu_read_lock(&kvm->srcu);
259a6ecfb11SMarc Zyngier 	int ret = kvm_write_guest(kvm, gpa, data, len);
260a6ecfb11SMarc Zyngier 
261a6ecfb11SMarc Zyngier 	srcu_read_unlock(&kvm->srcu, srcu_idx);
262a6ecfb11SMarc Zyngier 
263a6ecfb11SMarc Zyngier 	return ret;
264a6ecfb11SMarc Zyngier }
265a6ecfb11SMarc Zyngier 
266529c4b05SKristina Martsenko #define kvm_phys_to_vttbr(addr)		phys_to_ttbr(addr)
267529c4b05SKristina Martsenko 
268a0e50aa3SChristoffer Dall static __always_inline u64 kvm_get_vttbr(struct kvm_s2_mmu *mmu)
269ab510027SVladimir Murzin {
270a0e50aa3SChristoffer Dall 	struct kvm_vmid *vmid = &mmu->vmid;
271e329fb75SChristoffer Dall 	u64 vmid_field, baddr;
272e329fb75SChristoffer Dall 	u64 cnp = system_supports_cnp() ? VTTBR_CNP_BIT : 0;
273e329fb75SChristoffer Dall 
274a0e50aa3SChristoffer Dall 	baddr = mmu->pgd_phys;
275e329fb75SChristoffer Dall 	vmid_field = (u64)vmid->vmid << VTTBR_VMID_SHIFT;
276e329fb75SChristoffer Dall 	return kvm_phys_to_vttbr(baddr) | vmid_field | cnp;
277ab510027SVladimir Murzin }
278ab510027SVladimir Murzin 
279fe677be9SMarc Zyngier /*
280fe677be9SMarc Zyngier  * Must be called from hyp code running at EL2 with an updated VTTBR
281fe677be9SMarc Zyngier  * and interrupts disabled.
282fe677be9SMarc Zyngier  */
283a0e50aa3SChristoffer Dall static __always_inline void __load_guest_stage2(struct kvm_s2_mmu *mmu)
284fe677be9SMarc Zyngier {
285a0e50aa3SChristoffer Dall 	write_sysreg(kern_hyp_va(mmu->kvm)->arch.vtcr, vtcr_el2);
286a0e50aa3SChristoffer Dall 	write_sysreg(kvm_get_vttbr(mmu), vttbr_el2);
287fe677be9SMarc Zyngier 
288fe677be9SMarc Zyngier 	/*
289fe677be9SMarc Zyngier 	 * ARM errata 1165522 and 1530923 require the actual execution of the
290fe677be9SMarc Zyngier 	 * above before we can switch to the EL1/EL0 translation regime used by
291fe677be9SMarc Zyngier 	 * the guest.
292fe677be9SMarc Zyngier 	 */
293fe677be9SMarc Zyngier 	asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT));
294fe677be9SMarc Zyngier }
295fe677be9SMarc Zyngier 
29637c43753SMarc Zyngier #endif /* __ASSEMBLY__ */
29737c43753SMarc Zyngier #endif /* __ARM64_KVM_MMU_H__ */
298