xref: /openbmc/linux/arch/arm64/include/asm/kvm_mmu.h (revision c4b9fd2ac035a55d1fd98322f4360c9d07530597)
1caab277bSThomas Gleixner /* SPDX-License-Identifier: GPL-2.0-only */
237c43753SMarc Zyngier /*
337c43753SMarc Zyngier  * Copyright (C) 2012,2013 - ARM Ltd
437c43753SMarc Zyngier  * Author: Marc Zyngier <marc.zyngier@arm.com>
537c43753SMarc Zyngier  */
637c43753SMarc Zyngier 
737c43753SMarc Zyngier #ifndef __ARM64_KVM_MMU_H__
837c43753SMarc Zyngier #define __ARM64_KVM_MMU_H__
937c43753SMarc Zyngier 
1037c43753SMarc Zyngier #include <asm/page.h>
1137c43753SMarc Zyngier #include <asm/memory.h>
129ef2b48bSWill Deacon #include <asm/mmu.h>
1320475f78SVladimir Murzin #include <asm/cpufeature.h>
1437c43753SMarc Zyngier 
1537c43753SMarc Zyngier /*
16cedbb8b7SMarc Zyngier  * As ARMv8.0 only has the TTBR0_EL2 register, we cannot express
1737c43753SMarc Zyngier  * "negative" addresses. This makes it impossible to directly share
1837c43753SMarc Zyngier  * mappings with the kernel.
1937c43753SMarc Zyngier  *
2037c43753SMarc Zyngier  * Instead, give the HYP mode its own VA region at a fixed offset from
2137c43753SMarc Zyngier  * the kernel by just masking the top bits (which are all ones for a
2282a81bffSMarc Zyngier  * kernel address). We need to find out how many bits to mask.
23cedbb8b7SMarc Zyngier  *
2482a81bffSMarc Zyngier  * We want to build a set of page tables that cover both parts of the
2582a81bffSMarc Zyngier  * idmap (the trampoline page used to initialize EL2), and our normal
2682a81bffSMarc Zyngier  * runtime VA space, at the same time.
2782a81bffSMarc Zyngier  *
2882a81bffSMarc Zyngier  * Given that the kernel uses VA_BITS for its entire address space,
2982a81bffSMarc Zyngier  * and that half of that space (VA_BITS - 1) is used for the linear
3082a81bffSMarc Zyngier  * mapping, we can also limit the EL2 space to (VA_BITS - 1).
3182a81bffSMarc Zyngier  *
3282a81bffSMarc Zyngier  * The main question is "Within the VA_BITS space, does EL2 use the
3382a81bffSMarc Zyngier  * top or the bottom half of that space to shadow the kernel's linear
3482a81bffSMarc Zyngier  * mapping?". As we need to idmap the trampoline page, this is
3582a81bffSMarc Zyngier  * determined by the range in which this page lives.
3682a81bffSMarc Zyngier  *
3782a81bffSMarc Zyngier  * If the page is in the bottom half, we have to use the top half. If
3882a81bffSMarc Zyngier  * the page is in the top half, we have to use the bottom half:
3982a81bffSMarc Zyngier  *
402077be67SLaura Abbott  * T = __pa_symbol(__hyp_idmap_text_start)
4182a81bffSMarc Zyngier  * if (T & BIT(VA_BITS - 1))
4282a81bffSMarc Zyngier  *	HYP_VA_MIN = 0  //idmap in upper half
4382a81bffSMarc Zyngier  * else
4482a81bffSMarc Zyngier  *	HYP_VA_MIN = 1 << (VA_BITS - 1)
4582a81bffSMarc Zyngier  * HYP_VA_MAX = HYP_VA_MIN + (1 << (VA_BITS - 1)) - 1
4682a81bffSMarc Zyngier  *
4782a81bffSMarc Zyngier  * When using VHE, there are no separate hyp mappings and all KVM
4882a81bffSMarc Zyngier  * functionality is already mapped as part of the main kernel
4982a81bffSMarc Zyngier  * mappings, and none of this applies in that case.
5037c43753SMarc Zyngier  */
51d53d9bc6SMarc Zyngier 
5237c43753SMarc Zyngier #ifdef __ASSEMBLY__
5337c43753SMarc Zyngier 
54cedbb8b7SMarc Zyngier #include <asm/alternative.h>
55cedbb8b7SMarc Zyngier 
5637c43753SMarc Zyngier /*
5737c43753SMarc Zyngier  * Convert a kernel VA into a HYP VA.
5837c43753SMarc Zyngier  * reg: VA to be converted.
59fd81e6bfSMarc Zyngier  *
602b4d1606SMarc Zyngier  * The actual code generation takes place in kvm_update_va_mask, and
612b4d1606SMarc Zyngier  * the instructions below are only there to reserve the space and
622b4d1606SMarc Zyngier  * perform the register allocation (kvm_update_va_mask uses the
632b4d1606SMarc Zyngier  * specific registers encoded in the instructions).
6437c43753SMarc Zyngier  */
6537c43753SMarc Zyngier .macro kern_hyp_va	reg
661a6511ebSMarc Zyngier #ifndef __KVM_VHE_HYPERVISOR__
674c0bd995SMark Rutland alternative_cb ARM64_ALWAYS_SYSTEM, kvm_update_va_mask
68ed57cac8SMarc Zyngier 	and     \reg, \reg, #1		/* mask with va_mask */
69ed57cac8SMarc Zyngier 	ror	\reg, \reg, #1		/* rotate to the first tag bit */
70ed57cac8SMarc Zyngier 	add	\reg, \reg, #0		/* insert the low 12 bits of the tag */
71ed57cac8SMarc Zyngier 	add	\reg, \reg, #0, lsl 12	/* insert the top 12 bits of the tag */
72ed57cac8SMarc Zyngier 	ror	\reg, \reg, #63		/* rotate back */
732b4d1606SMarc Zyngier alternative_cb_end
741a6511ebSMarc Zyngier #endif
7537c43753SMarc Zyngier .endm
7637c43753SMarc Zyngier 
7768b824e4SMarc Zyngier /*
7897cbd2fcSDavid Brazdil  * Convert a hypervisor VA to a PA
7997cbd2fcSDavid Brazdil  * reg: hypervisor address to be converted in place
8097cbd2fcSDavid Brazdil  * tmp: temporary register
8197cbd2fcSDavid Brazdil  */
8297cbd2fcSDavid Brazdil .macro hyp_pa reg, tmp
8397cbd2fcSDavid Brazdil 	ldr_l	\tmp, hyp_physvirt_offset
8497cbd2fcSDavid Brazdil 	add	\reg, \reg, \tmp
8597cbd2fcSDavid Brazdil .endm
8697cbd2fcSDavid Brazdil 
8797cbd2fcSDavid Brazdil /*
8897cbd2fcSDavid Brazdil  * Convert a hypervisor VA to a kernel image address
8997cbd2fcSDavid Brazdil  * reg: hypervisor address to be converted in place
9068b824e4SMarc Zyngier  * tmp: temporary register
9168b824e4SMarc Zyngier  *
9268b824e4SMarc Zyngier  * The actual code generation takes place in kvm_get_kimage_voffset, and
9368b824e4SMarc Zyngier  * the instructions below are only there to reserve the space and
9468b824e4SMarc Zyngier  * perform the register allocation (kvm_get_kimage_voffset uses the
9568b824e4SMarc Zyngier  * specific registers encoded in the instructions).
9668b824e4SMarc Zyngier  */
9797cbd2fcSDavid Brazdil .macro hyp_kimg_va reg, tmp
9897cbd2fcSDavid Brazdil 	/* Convert hyp VA -> PA. */
9997cbd2fcSDavid Brazdil 	hyp_pa	\reg, \tmp
10097cbd2fcSDavid Brazdil 
10197cbd2fcSDavid Brazdil 	/* Load kimage_voffset. */
1024c0bd995SMark Rutland alternative_cb ARM64_ALWAYS_SYSTEM, kvm_get_kimage_voffset
10368b824e4SMarc Zyngier 	movz	\tmp, #0
10468b824e4SMarc Zyngier 	movk	\tmp, #0, lsl #16
10568b824e4SMarc Zyngier 	movk	\tmp, #0, lsl #32
10668b824e4SMarc Zyngier 	movk	\tmp, #0, lsl #48
10768b824e4SMarc Zyngier alternative_cb_end
10868b824e4SMarc Zyngier 
10997cbd2fcSDavid Brazdil 	/* Convert PA -> kimg VA. */
11097cbd2fcSDavid Brazdil 	add	\reg, \reg, \tmp
1115be1d622SDavid Brazdil .endm
1125be1d622SDavid Brazdil 
11337c43753SMarc Zyngier #else
11437c43753SMarc Zyngier 
11565fddcfcSMike Rapoport #include <linux/pgtable.h>
11638f791a4SChristoffer Dall #include <asm/pgalloc.h>
11702f7760eSWill Deacon #include <asm/cache.h>
11837c43753SMarc Zyngier #include <asm/cacheflush.h>
119e4c5a685SArd Biesheuvel #include <asm/mmu_context.h>
120191e0e15SChristoffer Dall #include <asm/kvm_emulate.h>
1213248136bSJulien Grall #include <asm/kvm_host.h>
12237c43753SMarc Zyngier 
1232b4d1606SMarc Zyngier void kvm_update_va_mask(struct alt_instr *alt,
1242b4d1606SMarc Zyngier 			__le32 *origptr, __le32 *updptr, int nr_inst);
1250492747cSSebastian Andrzej Siewior void kvm_compute_layout(void);
1266ec6259dSDavid Brazdil void kvm_apply_hyp_relocations(void);
1272b4d1606SMarc Zyngier 
128aec0fae6SAndrew Scull #define __hyp_pa(x) (((phys_addr_t)(x)) + hyp_physvirt_offset)
129aec0fae6SAndrew Scull 
1305c37f1aeSJames Morse static __always_inline unsigned long __kern_hyp_va(unsigned long v)
131fd81e6bfSMarc Zyngier {
1321a6511ebSMarc Zyngier #ifndef __KVM_VHE_HYPERVISOR__
133ed57cac8SMarc Zyngier 	asm volatile(ALTERNATIVE_CB("and %0, %0, #1\n"
134ed57cac8SMarc Zyngier 				    "ror %0, %0, #1\n"
135ed57cac8SMarc Zyngier 				    "add %0, %0, #0\n"
136ed57cac8SMarc Zyngier 				    "add %0, %0, #0, lsl 12\n"
137ed57cac8SMarc Zyngier 				    "ror %0, %0, #63\n",
1384c0bd995SMark Rutland 				    ARM64_ALWAYS_SYSTEM,
1392b4d1606SMarc Zyngier 				    kvm_update_va_mask)
1402b4d1606SMarc Zyngier 		     : "+r" (v));
1411a6511ebSMarc Zyngier #endif
142fd81e6bfSMarc Zyngier 	return v;
143fd81e6bfSMarc Zyngier }
144fd81e6bfSMarc Zyngier 
14594d0e598SMarc Zyngier #define kern_hyp_va(v) 	((typeof(v))(__kern_hyp_va((unsigned long)(v))))
14637c43753SMarc Zyngier 
14737c43753SMarc Zyngier /*
1481b44471bSZenghui Yu  * We currently support using a VM-specified IPA size. For backward
1491b44471bSZenghui Yu  * compatibility, the default IPA size is fixed to 40bits.
15037c43753SMarc Zyngier  */
151dbff124eSJoel Schopp #define KVM_PHYS_SHIFT	(40)
152e55cac5bSSuzuki K Poulose 
15313ac4bbcSSuzuki K Poulose #define kvm_phys_shift(kvm)		VTCR_EL2_IPA(kvm->arch.vtcr)
154e55cac5bSSuzuki K Poulose #define kvm_phys_size(kvm)		(_AC(1, ULL) << kvm_phys_shift(kvm))
155e55cac5bSSuzuki K Poulose #define kvm_phys_mask(kvm)		(kvm_phys_size(kvm) - _AC(1, ULL))
15637c43753SMarc Zyngier 
1570f9d09b8SWill Deacon #include <asm/kvm_pgtable.h>
158c0ef6326SSuzuki K Poulose #include <asm/stage2_pgtable.h>
159c0ef6326SSuzuki K Poulose 
1603f868e14SQuentin Perret int kvm_share_hyp(void *from, void *to);
16152b28657SQuentin Perret void kvm_unshare_hyp(void *from, void *to);
1620f9d09b8SWill Deacon int create_hyp_mappings(void *from, void *to, enum kvm_pgtable_prot prot);
163ce335431SKalesh Singh int __create_hyp_mappings(unsigned long start, unsigned long size,
164ce335431SKalesh Singh 			  unsigned long phys, enum kvm_pgtable_prot prot);
16592abe0f8SKalesh Singh int hyp_alloc_private_va_range(size_t size, unsigned long *haddr);
166807a3784SMarc Zyngier int create_hyp_io_mappings(phys_addr_t phys_addr, size_t size,
1671bb32a44SMarc Zyngier 			   void __iomem **kaddr,
1681bb32a44SMarc Zyngier 			   void __iomem **haddr);
169dc2e4633SMarc Zyngier int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size,
170dc2e4633SMarc Zyngier 			     void **haddr);
1718d20bd63SSean Christopherson void __init free_hyp_pgds(void);
17237c43753SMarc Zyngier 
173957db105SChristoffer Dall void stage2_unmap_vm(struct kvm *kvm);
174315775ffSQuentin Perret int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu, unsigned long type);
175a0e50aa3SChristoffer Dall void kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu);
17637c43753SMarc Zyngier int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
177c40f2f8fSArd Biesheuvel 			  phys_addr_t pa, unsigned long size, bool writable);
17837c43753SMarc Zyngier 
17974cc7e0cSTianjia Zhang int kvm_handle_guest_abort(struct kvm_vcpu *vcpu);
18037c43753SMarc Zyngier 
18137c43753SMarc Zyngier phys_addr_t kvm_mmu_get_httbr(void);
18237c43753SMarc Zyngier phys_addr_t kvm_get_idmap_vector(void);
1838d20bd63SSean Christopherson int __init kvm_mmu_init(u32 *hyp_va_bits);
184e9f63768SMike Rapoport 
185bc1d2892SQuentin Perret static inline void *__kvm_vector_slot2addr(void *base,
186bc1d2892SQuentin Perret 					   enum arm64_hyp_spectre_vector slot)
187bc1d2892SQuentin Perret {
188bc1d2892SQuentin Perret 	int idx = slot - (slot != HYP_VECTOR_DIRECT);
189bc1d2892SQuentin Perret 
190bc1d2892SQuentin Perret 	return base + (idx * SZ_2K);
191bc1d2892SQuentin Perret }
192bc1d2892SQuentin Perret 
19337c43753SMarc Zyngier struct kvm;
19437c43753SMarc Zyngier 
195814b1860SFuad Tabba #define kvm_flush_dcache_to_poc(a,l)	\
196fade9c2cSFuad Tabba 	dcache_clean_inval_poc((unsigned long)(a), (unsigned long)(a)+(l))
1972d58b733SMarc Zyngier 
1982d58b733SMarc Zyngier static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
1992d58b733SMarc Zyngier {
200191e0e15SChristoffer Dall 	u64 cache_bits = SCTLR_ELx_M | SCTLR_ELx_C;
201191e0e15SChristoffer Dall 	int reg;
202191e0e15SChristoffer Dall 
203191e0e15SChristoffer Dall 	if (vcpu_is_el2(vcpu))
204191e0e15SChristoffer Dall 		reg = SCTLR_EL2;
205191e0e15SChristoffer Dall 	else
206191e0e15SChristoffer Dall 		reg = SCTLR_EL1;
207191e0e15SChristoffer Dall 
208191e0e15SChristoffer Dall 	return (vcpu_read_sys_reg(vcpu, reg) & cache_bits) == cache_bits;
2092d58b733SMarc Zyngier }
2102d58b733SMarc Zyngier 
211378e6a9cSYanan Wang static inline void __clean_dcache_guest_page(void *va, size_t size)
21237c43753SMarc Zyngier {
213e48d53a9SMarc Zyngier 	/*
214e48d53a9SMarc Zyngier 	 * With FWB, we ensure that the guest always accesses memory using
215e48d53a9SMarc Zyngier 	 * cacheable attributes, and we don't have to clean to PoC when
216e48d53a9SMarc Zyngier 	 * faulting in pages. Furthermore, FWB implies IDC, so cleaning to
217e48d53a9SMarc Zyngier 	 * PoU is not required either in this case.
218e48d53a9SMarc Zyngier 	 */
219e48d53a9SMarc Zyngier 	if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB))
220e48d53a9SMarc Zyngier 		return;
221e48d53a9SMarc Zyngier 
2220d3e4d4fSMarc Zyngier 	kvm_flush_dcache_to_poc(va, size);
223a15f6939SMarc Zyngier }
2242d58b733SMarc Zyngier 
225378e6a9cSYanan Wang static inline void __invalidate_icache_guest_page(void *va, size_t size)
226a15f6939SMarc Zyngier {
22787da236eSWill Deacon 	if (icache_is_aliasing()) {
22837c43753SMarc Zyngier 		/* any kind of VIPT cache */
229fade9c2cSFuad Tabba 		icache_inval_all_pou();
230*c4b9fd2aSMarc Zyngier 	} else if (read_sysreg(CurrentEL) != CurrentEL_EL1 ||
231*c4b9fd2aSMarc Zyngier 		   !icache_is_vpipt()) {
23287da236eSWill Deacon 		/* PIPT or VPIPT at EL2 (see comment in __kvm_tlb_flush_vmid_ipa) */
23385c653fcSMarc Zyngier 		icache_inval_pou((unsigned long)va, (unsigned long)va + size);
23437c43753SMarc Zyngier 	}
23537c43753SMarc Zyngier }
23637c43753SMarc Zyngier 
2373c1e7165SMarc Zyngier void kvm_set_way_flush(struct kvm_vcpu *vcpu);
2383c1e7165SMarc Zyngier void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled);
2399d218a1fSMarc Zyngier 
24020475f78SVladimir Murzin static inline unsigned int kvm_get_vmid_bits(void)
24120475f78SVladimir Murzin {
24246823dd1SDave Martin 	int reg = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
24320475f78SVladimir Murzin 
244c73433fcSAnshuman Khandual 	return get_vmid_bits(reg);
24520475f78SVladimir Murzin }
24620475f78SVladimir Murzin 
247bf308242SAndre Przywara /*
248bf308242SAndre Przywara  * We are not in the kvm->srcu critical section most of the time, so we take
249bf308242SAndre Przywara  * the SRCU read lock here. Since we copy the data from the user page, we
250bf308242SAndre Przywara  * can immediately drop the lock again.
251bf308242SAndre Przywara  */
252bf308242SAndre Przywara static inline int kvm_read_guest_lock(struct kvm *kvm,
253bf308242SAndre Przywara 				      gpa_t gpa, void *data, unsigned long len)
254bf308242SAndre Przywara {
255bf308242SAndre Przywara 	int srcu_idx = srcu_read_lock(&kvm->srcu);
256bf308242SAndre Przywara 	int ret = kvm_read_guest(kvm, gpa, data, len);
257bf308242SAndre Przywara 
258bf308242SAndre Przywara 	srcu_read_unlock(&kvm->srcu, srcu_idx);
259bf308242SAndre Przywara 
260bf308242SAndre Przywara 	return ret;
261bf308242SAndre Przywara }
262bf308242SAndre Przywara 
263a6ecfb11SMarc Zyngier static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa,
264a6ecfb11SMarc Zyngier 				       const void *data, unsigned long len)
265a6ecfb11SMarc Zyngier {
266a6ecfb11SMarc Zyngier 	int srcu_idx = srcu_read_lock(&kvm->srcu);
267a6ecfb11SMarc Zyngier 	int ret = kvm_write_guest(kvm, gpa, data, len);
268a6ecfb11SMarc Zyngier 
269a6ecfb11SMarc Zyngier 	srcu_read_unlock(&kvm->srcu, srcu_idx);
270a6ecfb11SMarc Zyngier 
271a6ecfb11SMarc Zyngier 	return ret;
272a6ecfb11SMarc Zyngier }
273a6ecfb11SMarc Zyngier 
274529c4b05SKristina Martsenko #define kvm_phys_to_vttbr(addr)		phys_to_ttbr(addr)
275529c4b05SKristina Martsenko 
276cf364e08SMarc Zyngier /*
277cf364e08SMarc Zyngier  * When this is (directly or indirectly) used on the TLB invalidation
278cf364e08SMarc Zyngier  * path, we rely on a previously issued DSB so that page table updates
279cf364e08SMarc Zyngier  * and VMID reads are correctly ordered.
280cf364e08SMarc Zyngier  */
281a0e50aa3SChristoffer Dall static __always_inline u64 kvm_get_vttbr(struct kvm_s2_mmu *mmu)
282ab510027SVladimir Murzin {
283a0e50aa3SChristoffer Dall 	struct kvm_vmid *vmid = &mmu->vmid;
284e329fb75SChristoffer Dall 	u64 vmid_field, baddr;
285e329fb75SChristoffer Dall 	u64 cnp = system_supports_cnp() ? VTTBR_CNP_BIT : 0;
286e329fb75SChristoffer Dall 
287a0e50aa3SChristoffer Dall 	baddr = mmu->pgd_phys;
2883248136bSJulien Grall 	vmid_field = atomic64_read(&vmid->id) << VTTBR_VMID_SHIFT;
2893248136bSJulien Grall 	vmid_field &= VTTBR_VMID_MASK(kvm_arm_vmid_bits);
290e329fb75SChristoffer Dall 	return kvm_phys_to_vttbr(baddr) | vmid_field | cnp;
291ab510027SVladimir Murzin }
292ab510027SVladimir Murzin 
293fe677be9SMarc Zyngier /*
294fe677be9SMarc Zyngier  * Must be called from hyp code running at EL2 with an updated VTTBR
295fe677be9SMarc Zyngier  * and interrupts disabled.
296fe677be9SMarc Zyngier  */
2974efc0edeSMarc Zyngier static __always_inline void __load_stage2(struct kvm_s2_mmu *mmu,
2984efc0edeSMarc Zyngier 					  struct kvm_arch *arch)
299fe677be9SMarc Zyngier {
3004efc0edeSMarc Zyngier 	write_sysreg(arch->vtcr, vtcr_el2);
301a0e50aa3SChristoffer Dall 	write_sysreg(kvm_get_vttbr(mmu), vttbr_el2);
302fe677be9SMarc Zyngier 
303fe677be9SMarc Zyngier 	/*
304fe677be9SMarc Zyngier 	 * ARM errata 1165522 and 1530923 require the actual execution of the
305fe677be9SMarc Zyngier 	 * above before we can switch to the EL1/EL0 translation regime used by
306fe677be9SMarc Zyngier 	 * the guest.
307fe677be9SMarc Zyngier 	 */
308fe677be9SMarc Zyngier 	asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT));
309fe677be9SMarc Zyngier }
310fe677be9SMarc Zyngier 
311cfb1a98dSQuentin Perret static inline struct kvm *kvm_s2_mmu_to_kvm(struct kvm_s2_mmu *mmu)
312cfb1a98dSQuentin Perret {
313cfb1a98dSQuentin Perret 	return container_of(mmu->arch, struct kvm, arch);
314cfb1a98dSQuentin Perret }
31537c43753SMarc Zyngier #endif /* __ASSEMBLY__ */
31637c43753SMarc Zyngier #endif /* __ARM64_KVM_MMU_H__ */
317