1caab277bSThomas Gleixner /* SPDX-License-Identifier: GPL-2.0-only */ 237c43753SMarc Zyngier /* 337c43753SMarc Zyngier * Copyright (C) 2012,2013 - ARM Ltd 437c43753SMarc Zyngier * Author: Marc Zyngier <marc.zyngier@arm.com> 537c43753SMarc Zyngier */ 637c43753SMarc Zyngier 737c43753SMarc Zyngier #ifndef __ARM64_KVM_MMU_H__ 837c43753SMarc Zyngier #define __ARM64_KVM_MMU_H__ 937c43753SMarc Zyngier 1037c43753SMarc Zyngier #include <asm/page.h> 1137c43753SMarc Zyngier #include <asm/memory.h> 129ef2b48bSWill Deacon #include <asm/mmu.h> 1320475f78SVladimir Murzin #include <asm/cpufeature.h> 1437c43753SMarc Zyngier 1537c43753SMarc Zyngier /* 16cedbb8b7SMarc Zyngier * As ARMv8.0 only has the TTBR0_EL2 register, we cannot express 1737c43753SMarc Zyngier * "negative" addresses. This makes it impossible to directly share 1837c43753SMarc Zyngier * mappings with the kernel. 1937c43753SMarc Zyngier * 2037c43753SMarc Zyngier * Instead, give the HYP mode its own VA region at a fixed offset from 2137c43753SMarc Zyngier * the kernel by just masking the top bits (which are all ones for a 2282a81bffSMarc Zyngier * kernel address). We need to find out how many bits to mask. 23cedbb8b7SMarc Zyngier * 2482a81bffSMarc Zyngier * We want to build a set of page tables that cover both parts of the 2582a81bffSMarc Zyngier * idmap (the trampoline page used to initialize EL2), and our normal 2682a81bffSMarc Zyngier * runtime VA space, at the same time. 2782a81bffSMarc Zyngier * 2882a81bffSMarc Zyngier * Given that the kernel uses VA_BITS for its entire address space, 2982a81bffSMarc Zyngier * and that half of that space (VA_BITS - 1) is used for the linear 3082a81bffSMarc Zyngier * mapping, we can also limit the EL2 space to (VA_BITS - 1). 3182a81bffSMarc Zyngier * 3282a81bffSMarc Zyngier * The main question is "Within the VA_BITS space, does EL2 use the 3382a81bffSMarc Zyngier * top or the bottom half of that space to shadow the kernel's linear 3482a81bffSMarc Zyngier * mapping?". As we need to idmap the trampoline page, this is 3582a81bffSMarc Zyngier * determined by the range in which this page lives. 3682a81bffSMarc Zyngier * 3782a81bffSMarc Zyngier * If the page is in the bottom half, we have to use the top half. If 3882a81bffSMarc Zyngier * the page is in the top half, we have to use the bottom half: 3982a81bffSMarc Zyngier * 402077be67SLaura Abbott * T = __pa_symbol(__hyp_idmap_text_start) 4182a81bffSMarc Zyngier * if (T & BIT(VA_BITS - 1)) 4282a81bffSMarc Zyngier * HYP_VA_MIN = 0 //idmap in upper half 4382a81bffSMarc Zyngier * else 4482a81bffSMarc Zyngier * HYP_VA_MIN = 1 << (VA_BITS - 1) 4582a81bffSMarc Zyngier * HYP_VA_MAX = HYP_VA_MIN + (1 << (VA_BITS - 1)) - 1 4682a81bffSMarc Zyngier * 4782a81bffSMarc Zyngier * When using VHE, there are no separate hyp mappings and all KVM 4882a81bffSMarc Zyngier * functionality is already mapped as part of the main kernel 4982a81bffSMarc Zyngier * mappings, and none of this applies in that case. 5037c43753SMarc Zyngier */ 51d53d9bc6SMarc Zyngier 5237c43753SMarc Zyngier #ifdef __ASSEMBLY__ 5337c43753SMarc Zyngier 54cedbb8b7SMarc Zyngier #include <asm/alternative.h> 55cedbb8b7SMarc Zyngier 5637c43753SMarc Zyngier /* 5737c43753SMarc Zyngier * Convert a kernel VA into a HYP VA. 5837c43753SMarc Zyngier * reg: VA to be converted. 59fd81e6bfSMarc Zyngier * 602b4d1606SMarc Zyngier * The actual code generation takes place in kvm_update_va_mask, and 612b4d1606SMarc Zyngier * the instructions below are only there to reserve the space and 622b4d1606SMarc Zyngier * perform the register allocation (kvm_update_va_mask uses the 632b4d1606SMarc Zyngier * specific registers encoded in the instructions). 6437c43753SMarc Zyngier */ 6537c43753SMarc Zyngier .macro kern_hyp_va reg 664c0bd995SMark Rutland alternative_cb ARM64_ALWAYS_SYSTEM, kvm_update_va_mask 67ed57cac8SMarc Zyngier and \reg, \reg, #1 /* mask with va_mask */ 68ed57cac8SMarc Zyngier ror \reg, \reg, #1 /* rotate to the first tag bit */ 69ed57cac8SMarc Zyngier add \reg, \reg, #0 /* insert the low 12 bits of the tag */ 70ed57cac8SMarc Zyngier add \reg, \reg, #0, lsl 12 /* insert the top 12 bits of the tag */ 71ed57cac8SMarc Zyngier ror \reg, \reg, #63 /* rotate back */ 722b4d1606SMarc Zyngier alternative_cb_end 7337c43753SMarc Zyngier .endm 7437c43753SMarc Zyngier 7568b824e4SMarc Zyngier /* 7697cbd2fcSDavid Brazdil * Convert a hypervisor VA to a PA 7797cbd2fcSDavid Brazdil * reg: hypervisor address to be converted in place 7897cbd2fcSDavid Brazdil * tmp: temporary register 7997cbd2fcSDavid Brazdil */ 8097cbd2fcSDavid Brazdil .macro hyp_pa reg, tmp 8197cbd2fcSDavid Brazdil ldr_l \tmp, hyp_physvirt_offset 8297cbd2fcSDavid Brazdil add \reg, \reg, \tmp 8397cbd2fcSDavid Brazdil .endm 8497cbd2fcSDavid Brazdil 8597cbd2fcSDavid Brazdil /* 8697cbd2fcSDavid Brazdil * Convert a hypervisor VA to a kernel image address 8797cbd2fcSDavid Brazdil * reg: hypervisor address to be converted in place 8868b824e4SMarc Zyngier * tmp: temporary register 8968b824e4SMarc Zyngier * 9068b824e4SMarc Zyngier * The actual code generation takes place in kvm_get_kimage_voffset, and 9168b824e4SMarc Zyngier * the instructions below are only there to reserve the space and 9268b824e4SMarc Zyngier * perform the register allocation (kvm_get_kimage_voffset uses the 9368b824e4SMarc Zyngier * specific registers encoded in the instructions). 9468b824e4SMarc Zyngier */ 9597cbd2fcSDavid Brazdil .macro hyp_kimg_va reg, tmp 9697cbd2fcSDavid Brazdil /* Convert hyp VA -> PA. */ 9797cbd2fcSDavid Brazdil hyp_pa \reg, \tmp 9897cbd2fcSDavid Brazdil 9997cbd2fcSDavid Brazdil /* Load kimage_voffset. */ 1004c0bd995SMark Rutland alternative_cb ARM64_ALWAYS_SYSTEM, kvm_get_kimage_voffset 10168b824e4SMarc Zyngier movz \tmp, #0 10268b824e4SMarc Zyngier movk \tmp, #0, lsl #16 10368b824e4SMarc Zyngier movk \tmp, #0, lsl #32 10468b824e4SMarc Zyngier movk \tmp, #0, lsl #48 10568b824e4SMarc Zyngier alternative_cb_end 10668b824e4SMarc Zyngier 10797cbd2fcSDavid Brazdil /* Convert PA -> kimg VA. */ 10897cbd2fcSDavid Brazdil add \reg, \reg, \tmp 1095be1d622SDavid Brazdil .endm 1105be1d622SDavid Brazdil 11137c43753SMarc Zyngier #else 11237c43753SMarc Zyngier 11365fddcfcSMike Rapoport #include <linux/pgtable.h> 11438f791a4SChristoffer Dall #include <asm/pgalloc.h> 11502f7760eSWill Deacon #include <asm/cache.h> 11637c43753SMarc Zyngier #include <asm/cacheflush.h> 117e4c5a685SArd Biesheuvel #include <asm/mmu_context.h> 1183248136bSJulien Grall #include <asm/kvm_host.h> 11937c43753SMarc Zyngier 1202b4d1606SMarc Zyngier void kvm_update_va_mask(struct alt_instr *alt, 1212b4d1606SMarc Zyngier __le32 *origptr, __le32 *updptr, int nr_inst); 1220492747cSSebastian Andrzej Siewior void kvm_compute_layout(void); 1236ec6259dSDavid Brazdil void kvm_apply_hyp_relocations(void); 1242b4d1606SMarc Zyngier 125aec0fae6SAndrew Scull #define __hyp_pa(x) (((phys_addr_t)(x)) + hyp_physvirt_offset) 126aec0fae6SAndrew Scull 1275c37f1aeSJames Morse static __always_inline unsigned long __kern_hyp_va(unsigned long v) 128fd81e6bfSMarc Zyngier { 129ed57cac8SMarc Zyngier asm volatile(ALTERNATIVE_CB("and %0, %0, #1\n" 130ed57cac8SMarc Zyngier "ror %0, %0, #1\n" 131ed57cac8SMarc Zyngier "add %0, %0, #0\n" 132ed57cac8SMarc Zyngier "add %0, %0, #0, lsl 12\n" 133ed57cac8SMarc Zyngier "ror %0, %0, #63\n", 1344c0bd995SMark Rutland ARM64_ALWAYS_SYSTEM, 1352b4d1606SMarc Zyngier kvm_update_va_mask) 1362b4d1606SMarc Zyngier : "+r" (v)); 137fd81e6bfSMarc Zyngier return v; 138fd81e6bfSMarc Zyngier } 139fd81e6bfSMarc Zyngier 14094d0e598SMarc Zyngier #define kern_hyp_va(v) ((typeof(v))(__kern_hyp_va((unsigned long)(v)))) 14137c43753SMarc Zyngier 14237c43753SMarc Zyngier /* 1431b44471bSZenghui Yu * We currently support using a VM-specified IPA size. For backward 1441b44471bSZenghui Yu * compatibility, the default IPA size is fixed to 40bits. 14537c43753SMarc Zyngier */ 146dbff124eSJoel Schopp #define KVM_PHYS_SHIFT (40) 147e55cac5bSSuzuki K Poulose 14813ac4bbcSSuzuki K Poulose #define kvm_phys_shift(kvm) VTCR_EL2_IPA(kvm->arch.vtcr) 149e55cac5bSSuzuki K Poulose #define kvm_phys_size(kvm) (_AC(1, ULL) << kvm_phys_shift(kvm)) 150e55cac5bSSuzuki K Poulose #define kvm_phys_mask(kvm) (kvm_phys_size(kvm) - _AC(1, ULL)) 15137c43753SMarc Zyngier 1520f9d09b8SWill Deacon #include <asm/kvm_pgtable.h> 153c0ef6326SSuzuki K Poulose #include <asm/stage2_pgtable.h> 154c0ef6326SSuzuki K Poulose 1553f868e14SQuentin Perret int kvm_share_hyp(void *from, void *to); 15652b28657SQuentin Perret void kvm_unshare_hyp(void *from, void *to); 1570f9d09b8SWill Deacon int create_hyp_mappings(void *from, void *to, enum kvm_pgtable_prot prot); 158ce335431SKalesh Singh int __create_hyp_mappings(unsigned long start, unsigned long size, 159ce335431SKalesh Singh unsigned long phys, enum kvm_pgtable_prot prot); 16092abe0f8SKalesh Singh int hyp_alloc_private_va_range(size_t size, unsigned long *haddr); 161807a3784SMarc Zyngier int create_hyp_io_mappings(phys_addr_t phys_addr, size_t size, 1621bb32a44SMarc Zyngier void __iomem **kaddr, 1631bb32a44SMarc Zyngier void __iomem **haddr); 164dc2e4633SMarc Zyngier int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size, 165dc2e4633SMarc Zyngier void **haddr); 166*8d20bd63SSean Christopherson void __init free_hyp_pgds(void); 16737c43753SMarc Zyngier 168957db105SChristoffer Dall void stage2_unmap_vm(struct kvm *kvm); 169315775ffSQuentin Perret int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu, unsigned long type); 170a0e50aa3SChristoffer Dall void kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu); 17137c43753SMarc Zyngier int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, 172c40f2f8fSArd Biesheuvel phys_addr_t pa, unsigned long size, bool writable); 17337c43753SMarc Zyngier 17474cc7e0cSTianjia Zhang int kvm_handle_guest_abort(struct kvm_vcpu *vcpu); 17537c43753SMarc Zyngier 17637c43753SMarc Zyngier phys_addr_t kvm_mmu_get_httbr(void); 17737c43753SMarc Zyngier phys_addr_t kvm_get_idmap_vector(void); 178*8d20bd63SSean Christopherson int __init kvm_mmu_init(u32 *hyp_va_bits); 179e9f63768SMike Rapoport 180bc1d2892SQuentin Perret static inline void *__kvm_vector_slot2addr(void *base, 181bc1d2892SQuentin Perret enum arm64_hyp_spectre_vector slot) 182bc1d2892SQuentin Perret { 183bc1d2892SQuentin Perret int idx = slot - (slot != HYP_VECTOR_DIRECT); 184bc1d2892SQuentin Perret 185bc1d2892SQuentin Perret return base + (idx * SZ_2K); 186bc1d2892SQuentin Perret } 187bc1d2892SQuentin Perret 18837c43753SMarc Zyngier struct kvm; 18937c43753SMarc Zyngier 190814b1860SFuad Tabba #define kvm_flush_dcache_to_poc(a,l) \ 191fade9c2cSFuad Tabba dcache_clean_inval_poc((unsigned long)(a), (unsigned long)(a)+(l)) 1922d58b733SMarc Zyngier 1932d58b733SMarc Zyngier static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu) 1942d58b733SMarc Zyngier { 1958d404c4cSChristoffer Dall return (vcpu_read_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101; 1962d58b733SMarc Zyngier } 1972d58b733SMarc Zyngier 198378e6a9cSYanan Wang static inline void __clean_dcache_guest_page(void *va, size_t size) 19937c43753SMarc Zyngier { 200e48d53a9SMarc Zyngier /* 201e48d53a9SMarc Zyngier * With FWB, we ensure that the guest always accesses memory using 202e48d53a9SMarc Zyngier * cacheable attributes, and we don't have to clean to PoC when 203e48d53a9SMarc Zyngier * faulting in pages. Furthermore, FWB implies IDC, so cleaning to 204e48d53a9SMarc Zyngier * PoU is not required either in this case. 205e48d53a9SMarc Zyngier */ 206e48d53a9SMarc Zyngier if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) 207e48d53a9SMarc Zyngier return; 208e48d53a9SMarc Zyngier 2090d3e4d4fSMarc Zyngier kvm_flush_dcache_to_poc(va, size); 210a15f6939SMarc Zyngier } 2112d58b733SMarc Zyngier 212378e6a9cSYanan Wang static inline void __invalidate_icache_guest_page(void *va, size_t size) 213a15f6939SMarc Zyngier { 21487da236eSWill Deacon if (icache_is_aliasing()) { 21537c43753SMarc Zyngier /* any kind of VIPT cache */ 216fade9c2cSFuad Tabba icache_inval_all_pou(); 21787da236eSWill Deacon } else if (is_kernel_in_hyp_mode() || !icache_is_vpipt()) { 21887da236eSWill Deacon /* PIPT or VPIPT at EL2 (see comment in __kvm_tlb_flush_vmid_ipa) */ 21985c653fcSMarc Zyngier icache_inval_pou((unsigned long)va, (unsigned long)va + size); 22037c43753SMarc Zyngier } 22137c43753SMarc Zyngier } 22237c43753SMarc Zyngier 2233c1e7165SMarc Zyngier void kvm_set_way_flush(struct kvm_vcpu *vcpu); 2243c1e7165SMarc Zyngier void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled); 2259d218a1fSMarc Zyngier 22620475f78SVladimir Murzin static inline unsigned int kvm_get_vmid_bits(void) 22720475f78SVladimir Murzin { 22846823dd1SDave Martin int reg = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1); 22920475f78SVladimir Murzin 230c73433fcSAnshuman Khandual return get_vmid_bits(reg); 23120475f78SVladimir Murzin } 23220475f78SVladimir Murzin 233bf308242SAndre Przywara /* 234bf308242SAndre Przywara * We are not in the kvm->srcu critical section most of the time, so we take 235bf308242SAndre Przywara * the SRCU read lock here. Since we copy the data from the user page, we 236bf308242SAndre Przywara * can immediately drop the lock again. 237bf308242SAndre Przywara */ 238bf308242SAndre Przywara static inline int kvm_read_guest_lock(struct kvm *kvm, 239bf308242SAndre Przywara gpa_t gpa, void *data, unsigned long len) 240bf308242SAndre Przywara { 241bf308242SAndre Przywara int srcu_idx = srcu_read_lock(&kvm->srcu); 242bf308242SAndre Przywara int ret = kvm_read_guest(kvm, gpa, data, len); 243bf308242SAndre Przywara 244bf308242SAndre Przywara srcu_read_unlock(&kvm->srcu, srcu_idx); 245bf308242SAndre Przywara 246bf308242SAndre Przywara return ret; 247bf308242SAndre Przywara } 248bf308242SAndre Przywara 249a6ecfb11SMarc Zyngier static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa, 250a6ecfb11SMarc Zyngier const void *data, unsigned long len) 251a6ecfb11SMarc Zyngier { 252a6ecfb11SMarc Zyngier int srcu_idx = srcu_read_lock(&kvm->srcu); 253a6ecfb11SMarc Zyngier int ret = kvm_write_guest(kvm, gpa, data, len); 254a6ecfb11SMarc Zyngier 255a6ecfb11SMarc Zyngier srcu_read_unlock(&kvm->srcu, srcu_idx); 256a6ecfb11SMarc Zyngier 257a6ecfb11SMarc Zyngier return ret; 258a6ecfb11SMarc Zyngier } 259a6ecfb11SMarc Zyngier 260529c4b05SKristina Martsenko #define kvm_phys_to_vttbr(addr) phys_to_ttbr(addr) 261529c4b05SKristina Martsenko 262cf364e08SMarc Zyngier /* 263cf364e08SMarc Zyngier * When this is (directly or indirectly) used on the TLB invalidation 264cf364e08SMarc Zyngier * path, we rely on a previously issued DSB so that page table updates 265cf364e08SMarc Zyngier * and VMID reads are correctly ordered. 266cf364e08SMarc Zyngier */ 267a0e50aa3SChristoffer Dall static __always_inline u64 kvm_get_vttbr(struct kvm_s2_mmu *mmu) 268ab510027SVladimir Murzin { 269a0e50aa3SChristoffer Dall struct kvm_vmid *vmid = &mmu->vmid; 270e329fb75SChristoffer Dall u64 vmid_field, baddr; 271e329fb75SChristoffer Dall u64 cnp = system_supports_cnp() ? VTTBR_CNP_BIT : 0; 272e329fb75SChristoffer Dall 273a0e50aa3SChristoffer Dall baddr = mmu->pgd_phys; 2743248136bSJulien Grall vmid_field = atomic64_read(&vmid->id) << VTTBR_VMID_SHIFT; 2753248136bSJulien Grall vmid_field &= VTTBR_VMID_MASK(kvm_arm_vmid_bits); 276e329fb75SChristoffer Dall return kvm_phys_to_vttbr(baddr) | vmid_field | cnp; 277ab510027SVladimir Murzin } 278ab510027SVladimir Murzin 279fe677be9SMarc Zyngier /* 280fe677be9SMarc Zyngier * Must be called from hyp code running at EL2 with an updated VTTBR 281fe677be9SMarc Zyngier * and interrupts disabled. 282fe677be9SMarc Zyngier */ 2834efc0edeSMarc Zyngier static __always_inline void __load_stage2(struct kvm_s2_mmu *mmu, 2844efc0edeSMarc Zyngier struct kvm_arch *arch) 285fe677be9SMarc Zyngier { 2864efc0edeSMarc Zyngier write_sysreg(arch->vtcr, vtcr_el2); 287a0e50aa3SChristoffer Dall write_sysreg(kvm_get_vttbr(mmu), vttbr_el2); 288fe677be9SMarc Zyngier 289fe677be9SMarc Zyngier /* 290fe677be9SMarc Zyngier * ARM errata 1165522 and 1530923 require the actual execution of the 291fe677be9SMarc Zyngier * above before we can switch to the EL1/EL0 translation regime used by 292fe677be9SMarc Zyngier * the guest. 293fe677be9SMarc Zyngier */ 294fe677be9SMarc Zyngier asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT)); 295fe677be9SMarc Zyngier } 296fe677be9SMarc Zyngier 297cfb1a98dSQuentin Perret static inline struct kvm *kvm_s2_mmu_to_kvm(struct kvm_s2_mmu *mmu) 298cfb1a98dSQuentin Perret { 299cfb1a98dSQuentin Perret return container_of(mmu->arch, struct kvm, arch); 300cfb1a98dSQuentin Perret } 30137c43753SMarc Zyngier #endif /* __ASSEMBLY__ */ 30237c43753SMarc Zyngier #endif /* __ARM64_KVM_MMU_H__ */ 303