1caab277bSThomas Gleixner /* SPDX-License-Identifier: GPL-2.0-only */
237c43753SMarc Zyngier /*
337c43753SMarc Zyngier * Copyright (C) 2012,2013 - ARM Ltd
437c43753SMarc Zyngier * Author: Marc Zyngier <marc.zyngier@arm.com>
537c43753SMarc Zyngier */
637c43753SMarc Zyngier
737c43753SMarc Zyngier #ifndef __ARM64_KVM_MMU_H__
837c43753SMarc Zyngier #define __ARM64_KVM_MMU_H__
937c43753SMarc Zyngier
1037c43753SMarc Zyngier #include <asm/page.h>
1137c43753SMarc Zyngier #include <asm/memory.h>
129ef2b48bSWill Deacon #include <asm/mmu.h>
1320475f78SVladimir Murzin #include <asm/cpufeature.h>
1437c43753SMarc Zyngier
1537c43753SMarc Zyngier /*
16cedbb8b7SMarc Zyngier * As ARMv8.0 only has the TTBR0_EL2 register, we cannot express
1737c43753SMarc Zyngier * "negative" addresses. This makes it impossible to directly share
1837c43753SMarc Zyngier * mappings with the kernel.
1937c43753SMarc Zyngier *
2037c43753SMarc Zyngier * Instead, give the HYP mode its own VA region at a fixed offset from
2137c43753SMarc Zyngier * the kernel by just masking the top bits (which are all ones for a
2282a81bffSMarc Zyngier * kernel address). We need to find out how many bits to mask.
23cedbb8b7SMarc Zyngier *
2482a81bffSMarc Zyngier * We want to build a set of page tables that cover both parts of the
2582a81bffSMarc Zyngier * idmap (the trampoline page used to initialize EL2), and our normal
2682a81bffSMarc Zyngier * runtime VA space, at the same time.
2782a81bffSMarc Zyngier *
2882a81bffSMarc Zyngier * Given that the kernel uses VA_BITS for its entire address space,
2982a81bffSMarc Zyngier * and that half of that space (VA_BITS - 1) is used for the linear
3082a81bffSMarc Zyngier * mapping, we can also limit the EL2 space to (VA_BITS - 1).
3182a81bffSMarc Zyngier *
3282a81bffSMarc Zyngier * The main question is "Within the VA_BITS space, does EL2 use the
3382a81bffSMarc Zyngier * top or the bottom half of that space to shadow the kernel's linear
3482a81bffSMarc Zyngier * mapping?". As we need to idmap the trampoline page, this is
3582a81bffSMarc Zyngier * determined by the range in which this page lives.
3682a81bffSMarc Zyngier *
3782a81bffSMarc Zyngier * If the page is in the bottom half, we have to use the top half. If
3882a81bffSMarc Zyngier * the page is in the top half, we have to use the bottom half:
3982a81bffSMarc Zyngier *
402077be67SLaura Abbott * T = __pa_symbol(__hyp_idmap_text_start)
4182a81bffSMarc Zyngier * if (T & BIT(VA_BITS - 1))
4282a81bffSMarc Zyngier * HYP_VA_MIN = 0 //idmap in upper half
4382a81bffSMarc Zyngier * else
4482a81bffSMarc Zyngier * HYP_VA_MIN = 1 << (VA_BITS - 1)
4582a81bffSMarc Zyngier * HYP_VA_MAX = HYP_VA_MIN + (1 << (VA_BITS - 1)) - 1
4682a81bffSMarc Zyngier *
4782a81bffSMarc Zyngier * When using VHE, there are no separate hyp mappings and all KVM
4882a81bffSMarc Zyngier * functionality is already mapped as part of the main kernel
4982a81bffSMarc Zyngier * mappings, and none of this applies in that case.
5037c43753SMarc Zyngier */
51d53d9bc6SMarc Zyngier
5237c43753SMarc Zyngier #ifdef __ASSEMBLY__
5337c43753SMarc Zyngier
54cedbb8b7SMarc Zyngier #include <asm/alternative.h>
55cedbb8b7SMarc Zyngier
5637c43753SMarc Zyngier /*
5737c43753SMarc Zyngier * Convert a kernel VA into a HYP VA.
5837c43753SMarc Zyngier * reg: VA to be converted.
59fd81e6bfSMarc Zyngier *
602b4d1606SMarc Zyngier * The actual code generation takes place in kvm_update_va_mask, and
612b4d1606SMarc Zyngier * the instructions below are only there to reserve the space and
622b4d1606SMarc Zyngier * perform the register allocation (kvm_update_va_mask uses the
632b4d1606SMarc Zyngier * specific registers encoded in the instructions).
6437c43753SMarc Zyngier */
6537c43753SMarc Zyngier .macro kern_hyp_va reg
661a6511ebSMarc Zyngier #ifndef __KVM_VHE_HYPERVISOR__
674c0bd995SMark Rutland alternative_cb ARM64_ALWAYS_SYSTEM, kvm_update_va_mask
68ed57cac8SMarc Zyngier and \reg, \reg, #1 /* mask with va_mask */
69ed57cac8SMarc Zyngier ror \reg, \reg, #1 /* rotate to the first tag bit */
70ed57cac8SMarc Zyngier add \reg, \reg, #0 /* insert the low 12 bits of the tag */
71ed57cac8SMarc Zyngier add \reg, \reg, #0, lsl 12 /* insert the top 12 bits of the tag */
72ed57cac8SMarc Zyngier ror \reg, \reg, #63 /* rotate back */
732b4d1606SMarc Zyngier alternative_cb_end
741a6511ebSMarc Zyngier #endif
7537c43753SMarc Zyngier .endm
7637c43753SMarc Zyngier
7768b824e4SMarc Zyngier /*
7897cbd2fcSDavid Brazdil * Convert a hypervisor VA to a PA
7997cbd2fcSDavid Brazdil * reg: hypervisor address to be converted in place
8097cbd2fcSDavid Brazdil * tmp: temporary register
8197cbd2fcSDavid Brazdil */
8297cbd2fcSDavid Brazdil .macro hyp_pa reg, tmp
8397cbd2fcSDavid Brazdil ldr_l \tmp, hyp_physvirt_offset
8497cbd2fcSDavid Brazdil add \reg, \reg, \tmp
8597cbd2fcSDavid Brazdil .endm
8697cbd2fcSDavid Brazdil
8797cbd2fcSDavid Brazdil /*
8897cbd2fcSDavid Brazdil * Convert a hypervisor VA to a kernel image address
8997cbd2fcSDavid Brazdil * reg: hypervisor address to be converted in place
9068b824e4SMarc Zyngier * tmp: temporary register
9168b824e4SMarc Zyngier *
9268b824e4SMarc Zyngier * The actual code generation takes place in kvm_get_kimage_voffset, and
9368b824e4SMarc Zyngier * the instructions below are only there to reserve the space and
9468b824e4SMarc Zyngier * perform the register allocation (kvm_get_kimage_voffset uses the
9568b824e4SMarc Zyngier * specific registers encoded in the instructions).
9668b824e4SMarc Zyngier */
9797cbd2fcSDavid Brazdil .macro hyp_kimg_va reg, tmp
9897cbd2fcSDavid Brazdil /* Convert hyp VA -> PA. */
9997cbd2fcSDavid Brazdil hyp_pa \reg, \tmp
10097cbd2fcSDavid Brazdil
10197cbd2fcSDavid Brazdil /* Load kimage_voffset. */
1024c0bd995SMark Rutland alternative_cb ARM64_ALWAYS_SYSTEM, kvm_get_kimage_voffset
10368b824e4SMarc Zyngier movz \tmp, #0
10468b824e4SMarc Zyngier movk \tmp, #0, lsl #16
10568b824e4SMarc Zyngier movk \tmp, #0, lsl #32
10668b824e4SMarc Zyngier movk \tmp, #0, lsl #48
10768b824e4SMarc Zyngier alternative_cb_end
10868b824e4SMarc Zyngier
10997cbd2fcSDavid Brazdil /* Convert PA -> kimg VA. */
11097cbd2fcSDavid Brazdil add \reg, \reg, \tmp
1115be1d622SDavid Brazdil .endm
1125be1d622SDavid Brazdil
11337c43753SMarc Zyngier #else
11437c43753SMarc Zyngier
11565fddcfcSMike Rapoport #include <linux/pgtable.h>
11638f791a4SChristoffer Dall #include <asm/pgalloc.h>
11702f7760eSWill Deacon #include <asm/cache.h>
11837c43753SMarc Zyngier #include <asm/cacheflush.h>
119e4c5a685SArd Biesheuvel #include <asm/mmu_context.h>
120191e0e15SChristoffer Dall #include <asm/kvm_emulate.h>
1213248136bSJulien Grall #include <asm/kvm_host.h>
12237c43753SMarc Zyngier
1232b4d1606SMarc Zyngier void kvm_update_va_mask(struct alt_instr *alt,
1242b4d1606SMarc Zyngier __le32 *origptr, __le32 *updptr, int nr_inst);
1250492747cSSebastian Andrzej Siewior void kvm_compute_layout(void);
1266ec6259dSDavid Brazdil void kvm_apply_hyp_relocations(void);
1272b4d1606SMarc Zyngier
128aec0fae6SAndrew Scull #define __hyp_pa(x) (((phys_addr_t)(x)) + hyp_physvirt_offset)
129aec0fae6SAndrew Scull
__kern_hyp_va(unsigned long v)1305c37f1aeSJames Morse static __always_inline unsigned long __kern_hyp_va(unsigned long v)
131fd81e6bfSMarc Zyngier {
1321a6511ebSMarc Zyngier #ifndef __KVM_VHE_HYPERVISOR__
133ed57cac8SMarc Zyngier asm volatile(ALTERNATIVE_CB("and %0, %0, #1\n"
134ed57cac8SMarc Zyngier "ror %0, %0, #1\n"
135ed57cac8SMarc Zyngier "add %0, %0, #0\n"
136ed57cac8SMarc Zyngier "add %0, %0, #0, lsl 12\n"
137ed57cac8SMarc Zyngier "ror %0, %0, #63\n",
1384c0bd995SMark Rutland ARM64_ALWAYS_SYSTEM,
1392b4d1606SMarc Zyngier kvm_update_va_mask)
1402b4d1606SMarc Zyngier : "+r" (v));
1411a6511ebSMarc Zyngier #endif
142fd81e6bfSMarc Zyngier return v;
143fd81e6bfSMarc Zyngier }
144fd81e6bfSMarc Zyngier
14594d0e598SMarc Zyngier #define kern_hyp_va(v) ((typeof(v))(__kern_hyp_va((unsigned long)(v))))
14637c43753SMarc Zyngier
14737c43753SMarc Zyngier /*
1481b44471bSZenghui Yu * We currently support using a VM-specified IPA size. For backward
1491b44471bSZenghui Yu * compatibility, the default IPA size is fixed to 40bits.
15037c43753SMarc Zyngier */
151dbff124eSJoel Schopp #define KVM_PHYS_SHIFT (40)
152e55cac5bSSuzuki K Poulose
15313ac4bbcSSuzuki K Poulose #define kvm_phys_shift(kvm) VTCR_EL2_IPA(kvm->arch.vtcr)
154e55cac5bSSuzuki K Poulose #define kvm_phys_size(kvm) (_AC(1, ULL) << kvm_phys_shift(kvm))
155e55cac5bSSuzuki K Poulose #define kvm_phys_mask(kvm) (kvm_phys_size(kvm) - _AC(1, ULL))
15637c43753SMarc Zyngier
1570f9d09b8SWill Deacon #include <asm/kvm_pgtable.h>
158c0ef6326SSuzuki K Poulose #include <asm/stage2_pgtable.h>
159c0ef6326SSuzuki K Poulose
1603f868e14SQuentin Perret int kvm_share_hyp(void *from, void *to);
16152b28657SQuentin Perret void kvm_unshare_hyp(void *from, void *to);
1620f9d09b8SWill Deacon int create_hyp_mappings(void *from, void *to, enum kvm_pgtable_prot prot);
163ce335431SKalesh Singh int __create_hyp_mappings(unsigned long start, unsigned long size,
164ce335431SKalesh Singh unsigned long phys, enum kvm_pgtable_prot prot);
16592abe0f8SKalesh Singh int hyp_alloc_private_va_range(size_t size, unsigned long *haddr);
166807a3784SMarc Zyngier int create_hyp_io_mappings(phys_addr_t phys_addr, size_t size,
1671bb32a44SMarc Zyngier void __iomem **kaddr,
1681bb32a44SMarc Zyngier void __iomem **haddr);
169dc2e4633SMarc Zyngier int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size,
170dc2e4633SMarc Zyngier void **haddr);
171*f156a7d1SVincent Donnefort int create_hyp_stack(phys_addr_t phys_addr, unsigned long *haddr);
1728d20bd63SSean Christopherson void __init free_hyp_pgds(void);
17337c43753SMarc Zyngier
174957db105SChristoffer Dall void stage2_unmap_vm(struct kvm *kvm);
175315775ffSQuentin Perret int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu, unsigned long type);
176ce2b6022SRicardo Koller void kvm_uninit_stage2_mmu(struct kvm *kvm);
177a0e50aa3SChristoffer Dall void kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu);
17837c43753SMarc Zyngier int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
179c40f2f8fSArd Biesheuvel phys_addr_t pa, unsigned long size, bool writable);
18037c43753SMarc Zyngier
18174cc7e0cSTianjia Zhang int kvm_handle_guest_abort(struct kvm_vcpu *vcpu);
18237c43753SMarc Zyngier
18337c43753SMarc Zyngier phys_addr_t kvm_mmu_get_httbr(void);
18437c43753SMarc Zyngier phys_addr_t kvm_get_idmap_vector(void);
1858d20bd63SSean Christopherson int __init kvm_mmu_init(u32 *hyp_va_bits);
186e9f63768SMike Rapoport
__kvm_vector_slot2addr(void * base,enum arm64_hyp_spectre_vector slot)187bc1d2892SQuentin Perret static inline void *__kvm_vector_slot2addr(void *base,
188bc1d2892SQuentin Perret enum arm64_hyp_spectre_vector slot)
189bc1d2892SQuentin Perret {
190bc1d2892SQuentin Perret int idx = slot - (slot != HYP_VECTOR_DIRECT);
191bc1d2892SQuentin Perret
192bc1d2892SQuentin Perret return base + (idx * SZ_2K);
193bc1d2892SQuentin Perret }
194bc1d2892SQuentin Perret
19537c43753SMarc Zyngier struct kvm;
19637c43753SMarc Zyngier
197814b1860SFuad Tabba #define kvm_flush_dcache_to_poc(a,l) \
198fade9c2cSFuad Tabba dcache_clean_inval_poc((unsigned long)(a), (unsigned long)(a)+(l))
1992d58b733SMarc Zyngier
vcpu_has_cache_enabled(struct kvm_vcpu * vcpu)2002d58b733SMarc Zyngier static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
2012d58b733SMarc Zyngier {
202191e0e15SChristoffer Dall u64 cache_bits = SCTLR_ELx_M | SCTLR_ELx_C;
203191e0e15SChristoffer Dall int reg;
204191e0e15SChristoffer Dall
205191e0e15SChristoffer Dall if (vcpu_is_el2(vcpu))
206191e0e15SChristoffer Dall reg = SCTLR_EL2;
207191e0e15SChristoffer Dall else
208191e0e15SChristoffer Dall reg = SCTLR_EL1;
209191e0e15SChristoffer Dall
210191e0e15SChristoffer Dall return (vcpu_read_sys_reg(vcpu, reg) & cache_bits) == cache_bits;
2112d58b733SMarc Zyngier }
2122d58b733SMarc Zyngier
__clean_dcache_guest_page(void * va,size_t size)213378e6a9cSYanan Wang static inline void __clean_dcache_guest_page(void *va, size_t size)
21437c43753SMarc Zyngier {
215e48d53a9SMarc Zyngier /*
216e48d53a9SMarc Zyngier * With FWB, we ensure that the guest always accesses memory using
217e48d53a9SMarc Zyngier * cacheable attributes, and we don't have to clean to PoC when
218e48d53a9SMarc Zyngier * faulting in pages. Furthermore, FWB implies IDC, so cleaning to
219e48d53a9SMarc Zyngier * PoU is not required either in this case.
220e48d53a9SMarc Zyngier */
221e48d53a9SMarc Zyngier if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB))
222e48d53a9SMarc Zyngier return;
223e48d53a9SMarc Zyngier
2240d3e4d4fSMarc Zyngier kvm_flush_dcache_to_poc(va, size);
225a15f6939SMarc Zyngier }
2262d58b733SMarc Zyngier
__invalidate_icache_guest_page(void * va,size_t size)227378e6a9cSYanan Wang static inline void __invalidate_icache_guest_page(void *va, size_t size)
228a15f6939SMarc Zyngier {
22987da236eSWill Deacon if (icache_is_aliasing()) {
23037c43753SMarc Zyngier /* any kind of VIPT cache */
231fade9c2cSFuad Tabba icache_inval_all_pou();
232c4b9fd2aSMarc Zyngier } else if (read_sysreg(CurrentEL) != CurrentEL_EL1 ||
233c4b9fd2aSMarc Zyngier !icache_is_vpipt()) {
23487da236eSWill Deacon /* PIPT or VPIPT at EL2 (see comment in __kvm_tlb_flush_vmid_ipa) */
23585c653fcSMarc Zyngier icache_inval_pou((unsigned long)va, (unsigned long)va + size);
23637c43753SMarc Zyngier }
23737c43753SMarc Zyngier }
23837c43753SMarc Zyngier
2393c1e7165SMarc Zyngier void kvm_set_way_flush(struct kvm_vcpu *vcpu);
2403c1e7165SMarc Zyngier void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled);
2419d218a1fSMarc Zyngier
kvm_get_vmid_bits(void)24220475f78SVladimir Murzin static inline unsigned int kvm_get_vmid_bits(void)
24320475f78SVladimir Murzin {
24446823dd1SDave Martin int reg = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
24520475f78SVladimir Murzin
246c73433fcSAnshuman Khandual return get_vmid_bits(reg);
24720475f78SVladimir Murzin }
24820475f78SVladimir Murzin
249bf308242SAndre Przywara /*
250bf308242SAndre Przywara * We are not in the kvm->srcu critical section most of the time, so we take
251bf308242SAndre Przywara * the SRCU read lock here. Since we copy the data from the user page, we
252bf308242SAndre Przywara * can immediately drop the lock again.
253bf308242SAndre Przywara */
kvm_read_guest_lock(struct kvm * kvm,gpa_t gpa,void * data,unsigned long len)254bf308242SAndre Przywara static inline int kvm_read_guest_lock(struct kvm *kvm,
255bf308242SAndre Przywara gpa_t gpa, void *data, unsigned long len)
256bf308242SAndre Przywara {
257bf308242SAndre Przywara int srcu_idx = srcu_read_lock(&kvm->srcu);
258bf308242SAndre Przywara int ret = kvm_read_guest(kvm, gpa, data, len);
259bf308242SAndre Przywara
260bf308242SAndre Przywara srcu_read_unlock(&kvm->srcu, srcu_idx);
261bf308242SAndre Przywara
262bf308242SAndre Przywara return ret;
263bf308242SAndre Przywara }
264bf308242SAndre Przywara
kvm_write_guest_lock(struct kvm * kvm,gpa_t gpa,const void * data,unsigned long len)265a6ecfb11SMarc Zyngier static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa,
266a6ecfb11SMarc Zyngier const void *data, unsigned long len)
267a6ecfb11SMarc Zyngier {
268a6ecfb11SMarc Zyngier int srcu_idx = srcu_read_lock(&kvm->srcu);
269a6ecfb11SMarc Zyngier int ret = kvm_write_guest(kvm, gpa, data, len);
270a6ecfb11SMarc Zyngier
271a6ecfb11SMarc Zyngier srcu_read_unlock(&kvm->srcu, srcu_idx);
272a6ecfb11SMarc Zyngier
273a6ecfb11SMarc Zyngier return ret;
274a6ecfb11SMarc Zyngier }
275a6ecfb11SMarc Zyngier
276529c4b05SKristina Martsenko #define kvm_phys_to_vttbr(addr) phys_to_ttbr(addr)
277529c4b05SKristina Martsenko
278cf364e08SMarc Zyngier /*
279cf364e08SMarc Zyngier * When this is (directly or indirectly) used on the TLB invalidation
280cf364e08SMarc Zyngier * path, we rely on a previously issued DSB so that page table updates
281cf364e08SMarc Zyngier * and VMID reads are correctly ordered.
282cf364e08SMarc Zyngier */
kvm_get_vttbr(struct kvm_s2_mmu * mmu)283a0e50aa3SChristoffer Dall static __always_inline u64 kvm_get_vttbr(struct kvm_s2_mmu *mmu)
284ab510027SVladimir Murzin {
285a0e50aa3SChristoffer Dall struct kvm_vmid *vmid = &mmu->vmid;
286e329fb75SChristoffer Dall u64 vmid_field, baddr;
287e329fb75SChristoffer Dall u64 cnp = system_supports_cnp() ? VTTBR_CNP_BIT : 0;
288e329fb75SChristoffer Dall
289a0e50aa3SChristoffer Dall baddr = mmu->pgd_phys;
2903248136bSJulien Grall vmid_field = atomic64_read(&vmid->id) << VTTBR_VMID_SHIFT;
2913248136bSJulien Grall vmid_field &= VTTBR_VMID_MASK(kvm_arm_vmid_bits);
292e329fb75SChristoffer Dall return kvm_phys_to_vttbr(baddr) | vmid_field | cnp;
293ab510027SVladimir Murzin }
294ab510027SVladimir Murzin
295fe677be9SMarc Zyngier /*
296fe677be9SMarc Zyngier * Must be called from hyp code running at EL2 with an updated VTTBR
297fe677be9SMarc Zyngier * and interrupts disabled.
298fe677be9SMarc Zyngier */
__load_stage2(struct kvm_s2_mmu * mmu,struct kvm_arch * arch)2994efc0edeSMarc Zyngier static __always_inline void __load_stage2(struct kvm_s2_mmu *mmu,
3004efc0edeSMarc Zyngier struct kvm_arch *arch)
301fe677be9SMarc Zyngier {
3024efc0edeSMarc Zyngier write_sysreg(arch->vtcr, vtcr_el2);
303a0e50aa3SChristoffer Dall write_sysreg(kvm_get_vttbr(mmu), vttbr_el2);
304fe677be9SMarc Zyngier
305fe677be9SMarc Zyngier /*
306fe677be9SMarc Zyngier * ARM errata 1165522 and 1530923 require the actual execution of the
307fe677be9SMarc Zyngier * above before we can switch to the EL1/EL0 translation regime used by
308fe677be9SMarc Zyngier * the guest.
309fe677be9SMarc Zyngier */
310fe677be9SMarc Zyngier asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT));
311fe677be9SMarc Zyngier }
312fe677be9SMarc Zyngier
kvm_s2_mmu_to_kvm(struct kvm_s2_mmu * mmu)313cfb1a98dSQuentin Perret static inline struct kvm *kvm_s2_mmu_to_kvm(struct kvm_s2_mmu *mmu)
314cfb1a98dSQuentin Perret {
315cfb1a98dSQuentin Perret return container_of(mmu->arch, struct kvm, arch);
316cfb1a98dSQuentin Perret }
31737c43753SMarc Zyngier #endif /* __ASSEMBLY__ */
31837c43753SMarc Zyngier #endif /* __ARM64_KVM_MMU_H__ */
319