xref: /openbmc/linux/arch/arm64/include/asm/kvm_mmu.h (revision bc1d2892e9aa6dcf6cd83adbd3616051cbd4c429)
1caab277bSThomas Gleixner /* SPDX-License-Identifier: GPL-2.0-only */
237c43753SMarc Zyngier /*
337c43753SMarc Zyngier  * Copyright (C) 2012,2013 - ARM Ltd
437c43753SMarc Zyngier  * Author: Marc Zyngier <marc.zyngier@arm.com>
537c43753SMarc Zyngier  */
637c43753SMarc Zyngier 
737c43753SMarc Zyngier #ifndef __ARM64_KVM_MMU_H__
837c43753SMarc Zyngier #define __ARM64_KVM_MMU_H__
937c43753SMarc Zyngier 
1037c43753SMarc Zyngier #include <asm/page.h>
1137c43753SMarc Zyngier #include <asm/memory.h>
129ef2b48bSWill Deacon #include <asm/mmu.h>
1320475f78SVladimir Murzin #include <asm/cpufeature.h>
1437c43753SMarc Zyngier 
1537c43753SMarc Zyngier /*
16cedbb8b7SMarc Zyngier  * As ARMv8.0 only has the TTBR0_EL2 register, we cannot express
1737c43753SMarc Zyngier  * "negative" addresses. This makes it impossible to directly share
1837c43753SMarc Zyngier  * mappings with the kernel.
1937c43753SMarc Zyngier  *
2037c43753SMarc Zyngier  * Instead, give the HYP mode its own VA region at a fixed offset from
2137c43753SMarc Zyngier  * the kernel by just masking the top bits (which are all ones for a
2282a81bffSMarc Zyngier  * kernel address). We need to find out how many bits to mask.
23cedbb8b7SMarc Zyngier  *
2482a81bffSMarc Zyngier  * We want to build a set of page tables that cover both parts of the
2582a81bffSMarc Zyngier  * idmap (the trampoline page used to initialize EL2), and our normal
2682a81bffSMarc Zyngier  * runtime VA space, at the same time.
2782a81bffSMarc Zyngier  *
2882a81bffSMarc Zyngier  * Given that the kernel uses VA_BITS for its entire address space,
2982a81bffSMarc Zyngier  * and that half of that space (VA_BITS - 1) is used for the linear
3082a81bffSMarc Zyngier  * mapping, we can also limit the EL2 space to (VA_BITS - 1).
3182a81bffSMarc Zyngier  *
3282a81bffSMarc Zyngier  * The main question is "Within the VA_BITS space, does EL2 use the
3382a81bffSMarc Zyngier  * top or the bottom half of that space to shadow the kernel's linear
3482a81bffSMarc Zyngier  * mapping?". As we need to idmap the trampoline page, this is
3582a81bffSMarc Zyngier  * determined by the range in which this page lives.
3682a81bffSMarc Zyngier  *
3782a81bffSMarc Zyngier  * If the page is in the bottom half, we have to use the top half. If
3882a81bffSMarc Zyngier  * the page is in the top half, we have to use the bottom half:
3982a81bffSMarc Zyngier  *
402077be67SLaura Abbott  * T = __pa_symbol(__hyp_idmap_text_start)
4182a81bffSMarc Zyngier  * if (T & BIT(VA_BITS - 1))
4282a81bffSMarc Zyngier  *	HYP_VA_MIN = 0  //idmap in upper half
4382a81bffSMarc Zyngier  * else
4482a81bffSMarc Zyngier  *	HYP_VA_MIN = 1 << (VA_BITS - 1)
4582a81bffSMarc Zyngier  * HYP_VA_MAX = HYP_VA_MIN + (1 << (VA_BITS - 1)) - 1
4682a81bffSMarc Zyngier  *
4782a81bffSMarc Zyngier  * When using VHE, there are no separate hyp mappings and all KVM
4882a81bffSMarc Zyngier  * functionality is already mapped as part of the main kernel
4982a81bffSMarc Zyngier  * mappings, and none of this applies in that case.
5037c43753SMarc Zyngier  */
51d53d9bc6SMarc Zyngier 
5237c43753SMarc Zyngier #ifdef __ASSEMBLY__
5337c43753SMarc Zyngier 
54cedbb8b7SMarc Zyngier #include <asm/alternative.h>
55cedbb8b7SMarc Zyngier 
5637c43753SMarc Zyngier /*
5737c43753SMarc Zyngier  * Convert a kernel VA into a HYP VA.
5837c43753SMarc Zyngier  * reg: VA to be converted.
59fd81e6bfSMarc Zyngier  *
602b4d1606SMarc Zyngier  * The actual code generation takes place in kvm_update_va_mask, and
612b4d1606SMarc Zyngier  * the instructions below are only there to reserve the space and
622b4d1606SMarc Zyngier  * perform the register allocation (kvm_update_va_mask uses the
632b4d1606SMarc Zyngier  * specific registers encoded in the instructions).
6437c43753SMarc Zyngier  */
6537c43753SMarc Zyngier .macro kern_hyp_va	reg
662b4d1606SMarc Zyngier alternative_cb kvm_update_va_mask
67ed57cac8SMarc Zyngier 	and     \reg, \reg, #1		/* mask with va_mask */
68ed57cac8SMarc Zyngier 	ror	\reg, \reg, #1		/* rotate to the first tag bit */
69ed57cac8SMarc Zyngier 	add	\reg, \reg, #0		/* insert the low 12 bits of the tag */
70ed57cac8SMarc Zyngier 	add	\reg, \reg, #0, lsl 12	/* insert the top 12 bits of the tag */
71ed57cac8SMarc Zyngier 	ror	\reg, \reg, #63		/* rotate back */
722b4d1606SMarc Zyngier alternative_cb_end
7337c43753SMarc Zyngier .endm
7437c43753SMarc Zyngier 
7568b824e4SMarc Zyngier /*
7697cbd2fcSDavid Brazdil  * Convert a hypervisor VA to a PA
7797cbd2fcSDavid Brazdil  * reg: hypervisor address to be converted in place
7897cbd2fcSDavid Brazdil  * tmp: temporary register
7997cbd2fcSDavid Brazdil  */
8097cbd2fcSDavid Brazdil .macro hyp_pa reg, tmp
8197cbd2fcSDavid Brazdil 	ldr_l	\tmp, hyp_physvirt_offset
8297cbd2fcSDavid Brazdil 	add	\reg, \reg, \tmp
8397cbd2fcSDavid Brazdil .endm
8497cbd2fcSDavid Brazdil 
8597cbd2fcSDavid Brazdil /*
8697cbd2fcSDavid Brazdil  * Convert a hypervisor VA to a kernel image address
8797cbd2fcSDavid Brazdil  * reg: hypervisor address to be converted in place
8868b824e4SMarc Zyngier  * tmp: temporary register
8968b824e4SMarc Zyngier  *
9068b824e4SMarc Zyngier  * The actual code generation takes place in kvm_get_kimage_voffset, and
9168b824e4SMarc Zyngier  * the instructions below are only there to reserve the space and
9268b824e4SMarc Zyngier  * perform the register allocation (kvm_get_kimage_voffset uses the
9368b824e4SMarc Zyngier  * specific registers encoded in the instructions).
9468b824e4SMarc Zyngier  */
9597cbd2fcSDavid Brazdil .macro hyp_kimg_va reg, tmp
9697cbd2fcSDavid Brazdil 	/* Convert hyp VA -> PA. */
9797cbd2fcSDavid Brazdil 	hyp_pa	\reg, \tmp
9897cbd2fcSDavid Brazdil 
9997cbd2fcSDavid Brazdil 	/* Load kimage_voffset. */
10068b824e4SMarc Zyngier alternative_cb kvm_get_kimage_voffset
10168b824e4SMarc Zyngier 	movz	\tmp, #0
10268b824e4SMarc Zyngier 	movk	\tmp, #0, lsl #16
10368b824e4SMarc Zyngier 	movk	\tmp, #0, lsl #32
10468b824e4SMarc Zyngier 	movk	\tmp, #0, lsl #48
10568b824e4SMarc Zyngier alternative_cb_end
10668b824e4SMarc Zyngier 
10797cbd2fcSDavid Brazdil 	/* Convert PA -> kimg VA. */
10897cbd2fcSDavid Brazdil 	add	\reg, \reg, \tmp
1095be1d622SDavid Brazdil .endm
1105be1d622SDavid Brazdil 
11137c43753SMarc Zyngier #else
11237c43753SMarc Zyngier 
11365fddcfcSMike Rapoport #include <linux/pgtable.h>
11438f791a4SChristoffer Dall #include <asm/pgalloc.h>
11502f7760eSWill Deacon #include <asm/cache.h>
11637c43753SMarc Zyngier #include <asm/cacheflush.h>
117e4c5a685SArd Biesheuvel #include <asm/mmu_context.h>
11837c43753SMarc Zyngier 
1192b4d1606SMarc Zyngier void kvm_update_va_mask(struct alt_instr *alt,
1202b4d1606SMarc Zyngier 			__le32 *origptr, __le32 *updptr, int nr_inst);
1210492747cSSebastian Andrzej Siewior void kvm_compute_layout(void);
1226ec6259dSDavid Brazdil void kvm_apply_hyp_relocations(void);
1232b4d1606SMarc Zyngier 
1245c37f1aeSJames Morse static __always_inline unsigned long __kern_hyp_va(unsigned long v)
125fd81e6bfSMarc Zyngier {
126ed57cac8SMarc Zyngier 	asm volatile(ALTERNATIVE_CB("and %0, %0, #1\n"
127ed57cac8SMarc Zyngier 				    "ror %0, %0, #1\n"
128ed57cac8SMarc Zyngier 				    "add %0, %0, #0\n"
129ed57cac8SMarc Zyngier 				    "add %0, %0, #0, lsl 12\n"
130ed57cac8SMarc Zyngier 				    "ror %0, %0, #63\n",
1312b4d1606SMarc Zyngier 				    kvm_update_va_mask)
1322b4d1606SMarc Zyngier 		     : "+r" (v));
133fd81e6bfSMarc Zyngier 	return v;
134fd81e6bfSMarc Zyngier }
135fd81e6bfSMarc Zyngier 
13694d0e598SMarc Zyngier #define kern_hyp_va(v) 	((typeof(v))(__kern_hyp_va((unsigned long)(v))))
13737c43753SMarc Zyngier 
13837c43753SMarc Zyngier /*
1391b44471bSZenghui Yu  * We currently support using a VM-specified IPA size. For backward
1401b44471bSZenghui Yu  * compatibility, the default IPA size is fixed to 40bits.
14137c43753SMarc Zyngier  */
142dbff124eSJoel Schopp #define KVM_PHYS_SHIFT	(40)
143e55cac5bSSuzuki K Poulose 
14413ac4bbcSSuzuki K Poulose #define kvm_phys_shift(kvm)		VTCR_EL2_IPA(kvm->arch.vtcr)
145e55cac5bSSuzuki K Poulose #define kvm_phys_size(kvm)		(_AC(1, ULL) << kvm_phys_shift(kvm))
146e55cac5bSSuzuki K Poulose #define kvm_phys_mask(kvm)		(kvm_phys_size(kvm) - _AC(1, ULL))
14737c43753SMarc Zyngier 
1480f9d09b8SWill Deacon #include <asm/kvm_pgtable.h>
149c0ef6326SSuzuki K Poulose #include <asm/stage2_pgtable.h>
150c0ef6326SSuzuki K Poulose 
1510f9d09b8SWill Deacon int create_hyp_mappings(void *from, void *to, enum kvm_pgtable_prot prot);
152807a3784SMarc Zyngier int create_hyp_io_mappings(phys_addr_t phys_addr, size_t size,
1531bb32a44SMarc Zyngier 			   void __iomem **kaddr,
1541bb32a44SMarc Zyngier 			   void __iomem **haddr);
155dc2e4633SMarc Zyngier int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size,
156dc2e4633SMarc Zyngier 			     void **haddr);
15737c43753SMarc Zyngier void free_hyp_pgds(void);
15837c43753SMarc Zyngier 
159957db105SChristoffer Dall void stage2_unmap_vm(struct kvm *kvm);
160a0e50aa3SChristoffer Dall int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu);
161a0e50aa3SChristoffer Dall void kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu);
16237c43753SMarc Zyngier int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
163c40f2f8fSArd Biesheuvel 			  phys_addr_t pa, unsigned long size, bool writable);
16437c43753SMarc Zyngier 
16574cc7e0cSTianjia Zhang int kvm_handle_guest_abort(struct kvm_vcpu *vcpu);
16637c43753SMarc Zyngier 
16737c43753SMarc Zyngier phys_addr_t kvm_mmu_get_httbr(void);
16837c43753SMarc Zyngier phys_addr_t kvm_get_idmap_vector(void);
16937c43753SMarc Zyngier int kvm_mmu_init(void);
170e9f63768SMike Rapoport 
171*bc1d2892SQuentin Perret static inline void *__kvm_vector_slot2addr(void *base,
172*bc1d2892SQuentin Perret 					   enum arm64_hyp_spectre_vector slot)
173*bc1d2892SQuentin Perret {
174*bc1d2892SQuentin Perret 	int idx = slot - (slot != HYP_VECTOR_DIRECT);
175*bc1d2892SQuentin Perret 
176*bc1d2892SQuentin Perret 	return base + (idx * SZ_2K);
177*bc1d2892SQuentin Perret }
178*bc1d2892SQuentin Perret 
17937c43753SMarc Zyngier struct kvm;
18037c43753SMarc Zyngier 
1812d58b733SMarc Zyngier #define kvm_flush_dcache_to_poc(a,l)	__flush_dcache_area((a), (l))
1822d58b733SMarc Zyngier 
1832d58b733SMarc Zyngier static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
1842d58b733SMarc Zyngier {
1858d404c4cSChristoffer Dall 	return (vcpu_read_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101;
1862d58b733SMarc Zyngier }
1872d58b733SMarc Zyngier 
18817ab9d57SMarc Zyngier static inline void __clean_dcache_guest_page(kvm_pfn_t pfn, unsigned long size)
18937c43753SMarc Zyngier {
1900d3e4d4fSMarc Zyngier 	void *va = page_address(pfn_to_page(pfn));
1910d3e4d4fSMarc Zyngier 
192e48d53a9SMarc Zyngier 	/*
193e48d53a9SMarc Zyngier 	 * With FWB, we ensure that the guest always accesses memory using
194e48d53a9SMarc Zyngier 	 * cacheable attributes, and we don't have to clean to PoC when
195e48d53a9SMarc Zyngier 	 * faulting in pages. Furthermore, FWB implies IDC, so cleaning to
196e48d53a9SMarc Zyngier 	 * PoU is not required either in this case.
197e48d53a9SMarc Zyngier 	 */
198e48d53a9SMarc Zyngier 	if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB))
199e48d53a9SMarc Zyngier 		return;
200e48d53a9SMarc Zyngier 
2010d3e4d4fSMarc Zyngier 	kvm_flush_dcache_to_poc(va, size);
202a15f6939SMarc Zyngier }
2032d58b733SMarc Zyngier 
20417ab9d57SMarc Zyngier static inline void __invalidate_icache_guest_page(kvm_pfn_t pfn,
205a15f6939SMarc Zyngier 						  unsigned long size)
206a15f6939SMarc Zyngier {
20787da236eSWill Deacon 	if (icache_is_aliasing()) {
20837c43753SMarc Zyngier 		/* any kind of VIPT cache */
20937c43753SMarc Zyngier 		__flush_icache_all();
21087da236eSWill Deacon 	} else if (is_kernel_in_hyp_mode() || !icache_is_vpipt()) {
21187da236eSWill Deacon 		/* PIPT or VPIPT at EL2 (see comment in __kvm_tlb_flush_vmid_ipa) */
212a15f6939SMarc Zyngier 		void *va = page_address(pfn_to_page(pfn));
213a15f6939SMarc Zyngier 
2144fee9473SMarc Zyngier 		invalidate_icache_range((unsigned long)va,
21587da236eSWill Deacon 					(unsigned long)va + size);
21637c43753SMarc Zyngier 	}
21737c43753SMarc Zyngier }
21837c43753SMarc Zyngier 
2193c1e7165SMarc Zyngier void kvm_set_way_flush(struct kvm_vcpu *vcpu);
2203c1e7165SMarc Zyngier void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled);
2219d218a1fSMarc Zyngier 
22220475f78SVladimir Murzin static inline unsigned int kvm_get_vmid_bits(void)
22320475f78SVladimir Murzin {
22446823dd1SDave Martin 	int reg = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
22520475f78SVladimir Murzin 
226c73433fcSAnshuman Khandual 	return get_vmid_bits(reg);
22720475f78SVladimir Murzin }
22820475f78SVladimir Murzin 
229bf308242SAndre Przywara /*
230bf308242SAndre Przywara  * We are not in the kvm->srcu critical section most of the time, so we take
231bf308242SAndre Przywara  * the SRCU read lock here. Since we copy the data from the user page, we
232bf308242SAndre Przywara  * can immediately drop the lock again.
233bf308242SAndre Przywara  */
234bf308242SAndre Przywara static inline int kvm_read_guest_lock(struct kvm *kvm,
235bf308242SAndre Przywara 				      gpa_t gpa, void *data, unsigned long len)
236bf308242SAndre Przywara {
237bf308242SAndre Przywara 	int srcu_idx = srcu_read_lock(&kvm->srcu);
238bf308242SAndre Przywara 	int ret = kvm_read_guest(kvm, gpa, data, len);
239bf308242SAndre Przywara 
240bf308242SAndre Przywara 	srcu_read_unlock(&kvm->srcu, srcu_idx);
241bf308242SAndre Przywara 
242bf308242SAndre Przywara 	return ret;
243bf308242SAndre Przywara }
244bf308242SAndre Przywara 
245a6ecfb11SMarc Zyngier static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa,
246a6ecfb11SMarc Zyngier 				       const void *data, unsigned long len)
247a6ecfb11SMarc Zyngier {
248a6ecfb11SMarc Zyngier 	int srcu_idx = srcu_read_lock(&kvm->srcu);
249a6ecfb11SMarc Zyngier 	int ret = kvm_write_guest(kvm, gpa, data, len);
250a6ecfb11SMarc Zyngier 
251a6ecfb11SMarc Zyngier 	srcu_read_unlock(&kvm->srcu, srcu_idx);
252a6ecfb11SMarc Zyngier 
253a6ecfb11SMarc Zyngier 	return ret;
254a6ecfb11SMarc Zyngier }
255a6ecfb11SMarc Zyngier 
256529c4b05SKristina Martsenko #define kvm_phys_to_vttbr(addr)		phys_to_ttbr(addr)
257529c4b05SKristina Martsenko 
258a0e50aa3SChristoffer Dall static __always_inline u64 kvm_get_vttbr(struct kvm_s2_mmu *mmu)
259ab510027SVladimir Murzin {
260a0e50aa3SChristoffer Dall 	struct kvm_vmid *vmid = &mmu->vmid;
261e329fb75SChristoffer Dall 	u64 vmid_field, baddr;
262e329fb75SChristoffer Dall 	u64 cnp = system_supports_cnp() ? VTTBR_CNP_BIT : 0;
263e329fb75SChristoffer Dall 
264a0e50aa3SChristoffer Dall 	baddr = mmu->pgd_phys;
265e329fb75SChristoffer Dall 	vmid_field = (u64)vmid->vmid << VTTBR_VMID_SHIFT;
266e329fb75SChristoffer Dall 	return kvm_phys_to_vttbr(baddr) | vmid_field | cnp;
267ab510027SVladimir Murzin }
268ab510027SVladimir Murzin 
269fe677be9SMarc Zyngier /*
270fe677be9SMarc Zyngier  * Must be called from hyp code running at EL2 with an updated VTTBR
271fe677be9SMarc Zyngier  * and interrupts disabled.
272fe677be9SMarc Zyngier  */
273a0e50aa3SChristoffer Dall static __always_inline void __load_guest_stage2(struct kvm_s2_mmu *mmu)
274fe677be9SMarc Zyngier {
275a0e50aa3SChristoffer Dall 	write_sysreg(kern_hyp_va(mmu->kvm)->arch.vtcr, vtcr_el2);
276a0e50aa3SChristoffer Dall 	write_sysreg(kvm_get_vttbr(mmu), vttbr_el2);
277fe677be9SMarc Zyngier 
278fe677be9SMarc Zyngier 	/*
279fe677be9SMarc Zyngier 	 * ARM errata 1165522 and 1530923 require the actual execution of the
280fe677be9SMarc Zyngier 	 * above before we can switch to the EL1/EL0 translation regime used by
281fe677be9SMarc Zyngier 	 * the guest.
282fe677be9SMarc Zyngier 	 */
283fe677be9SMarc Zyngier 	asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT));
284fe677be9SMarc Zyngier }
285fe677be9SMarc Zyngier 
28637c43753SMarc Zyngier #endif /* __ASSEMBLY__ */
28737c43753SMarc Zyngier #endif /* __ARM64_KVM_MMU_H__ */
288