1caab277bSThomas Gleixner /* SPDX-License-Identifier: GPL-2.0-only */ 237c43753SMarc Zyngier /* 337c43753SMarc Zyngier * Copyright (C) 2012,2013 - ARM Ltd 437c43753SMarc Zyngier * Author: Marc Zyngier <marc.zyngier@arm.com> 537c43753SMarc Zyngier */ 637c43753SMarc Zyngier 737c43753SMarc Zyngier #ifndef __ARM64_KVM_MMU_H__ 837c43753SMarc Zyngier #define __ARM64_KVM_MMU_H__ 937c43753SMarc Zyngier 1037c43753SMarc Zyngier #include <asm/page.h> 1137c43753SMarc Zyngier #include <asm/memory.h> 129ef2b48bSWill Deacon #include <asm/mmu.h> 1320475f78SVladimir Murzin #include <asm/cpufeature.h> 1437c43753SMarc Zyngier 1537c43753SMarc Zyngier /* 16cedbb8b7SMarc Zyngier * As ARMv8.0 only has the TTBR0_EL2 register, we cannot express 1737c43753SMarc Zyngier * "negative" addresses. This makes it impossible to directly share 1837c43753SMarc Zyngier * mappings with the kernel. 1937c43753SMarc Zyngier * 2037c43753SMarc Zyngier * Instead, give the HYP mode its own VA region at a fixed offset from 2137c43753SMarc Zyngier * the kernel by just masking the top bits (which are all ones for a 2282a81bffSMarc Zyngier * kernel address). We need to find out how many bits to mask. 23cedbb8b7SMarc Zyngier * 2482a81bffSMarc Zyngier * We want to build a set of page tables that cover both parts of the 2582a81bffSMarc Zyngier * idmap (the trampoline page used to initialize EL2), and our normal 2682a81bffSMarc Zyngier * runtime VA space, at the same time. 2782a81bffSMarc Zyngier * 2882a81bffSMarc Zyngier * Given that the kernel uses VA_BITS for its entire address space, 2982a81bffSMarc Zyngier * and that half of that space (VA_BITS - 1) is used for the linear 3082a81bffSMarc Zyngier * mapping, we can also limit the EL2 space to (VA_BITS - 1). 3182a81bffSMarc Zyngier * 3282a81bffSMarc Zyngier * The main question is "Within the VA_BITS space, does EL2 use the 3382a81bffSMarc Zyngier * top or the bottom half of that space to shadow the kernel's linear 3482a81bffSMarc Zyngier * mapping?". As we need to idmap the trampoline page, this is 3582a81bffSMarc Zyngier * determined by the range in which this page lives. 3682a81bffSMarc Zyngier * 3782a81bffSMarc Zyngier * If the page is in the bottom half, we have to use the top half. If 3882a81bffSMarc Zyngier * the page is in the top half, we have to use the bottom half: 3982a81bffSMarc Zyngier * 402077be67SLaura Abbott * T = __pa_symbol(__hyp_idmap_text_start) 4182a81bffSMarc Zyngier * if (T & BIT(VA_BITS - 1)) 4282a81bffSMarc Zyngier * HYP_VA_MIN = 0 //idmap in upper half 4382a81bffSMarc Zyngier * else 4482a81bffSMarc Zyngier * HYP_VA_MIN = 1 << (VA_BITS - 1) 4582a81bffSMarc Zyngier * HYP_VA_MAX = HYP_VA_MIN + (1 << (VA_BITS - 1)) - 1 4682a81bffSMarc Zyngier * 4782a81bffSMarc Zyngier * When using VHE, there are no separate hyp mappings and all KVM 4882a81bffSMarc Zyngier * functionality is already mapped as part of the main kernel 4982a81bffSMarc Zyngier * mappings, and none of this applies in that case. 5037c43753SMarc Zyngier */ 51d53d9bc6SMarc Zyngier 5237c43753SMarc Zyngier #ifdef __ASSEMBLY__ 5337c43753SMarc Zyngier 54cedbb8b7SMarc Zyngier #include <asm/alternative.h> 55cedbb8b7SMarc Zyngier 5637c43753SMarc Zyngier /* 5737c43753SMarc Zyngier * Convert a kernel VA into a HYP VA. 5837c43753SMarc Zyngier * reg: VA to be converted. 59fd81e6bfSMarc Zyngier * 602b4d1606SMarc Zyngier * The actual code generation takes place in kvm_update_va_mask, and 612b4d1606SMarc Zyngier * the instructions below are only there to reserve the space and 622b4d1606SMarc Zyngier * perform the register allocation (kvm_update_va_mask uses the 632b4d1606SMarc Zyngier * specific registers encoded in the instructions). 6437c43753SMarc Zyngier */ 6537c43753SMarc Zyngier .macro kern_hyp_va reg 662b4d1606SMarc Zyngier alternative_cb kvm_update_va_mask 67ed57cac8SMarc Zyngier and \reg, \reg, #1 /* mask with va_mask */ 68ed57cac8SMarc Zyngier ror \reg, \reg, #1 /* rotate to the first tag bit */ 69ed57cac8SMarc Zyngier add \reg, \reg, #0 /* insert the low 12 bits of the tag */ 70ed57cac8SMarc Zyngier add \reg, \reg, #0, lsl 12 /* insert the top 12 bits of the tag */ 71ed57cac8SMarc Zyngier ror \reg, \reg, #63 /* rotate back */ 722b4d1606SMarc Zyngier alternative_cb_end 7337c43753SMarc Zyngier .endm 7437c43753SMarc Zyngier 7568b824e4SMarc Zyngier /* 7668b824e4SMarc Zyngier * Convert a kernel image address to a PA 7768b824e4SMarc Zyngier * reg: kernel address to be converted in place 7868b824e4SMarc Zyngier * tmp: temporary register 7968b824e4SMarc Zyngier * 8068b824e4SMarc Zyngier * The actual code generation takes place in kvm_get_kimage_voffset, and 8168b824e4SMarc Zyngier * the instructions below are only there to reserve the space and 8268b824e4SMarc Zyngier * perform the register allocation (kvm_get_kimage_voffset uses the 8368b824e4SMarc Zyngier * specific registers encoded in the instructions). 8468b824e4SMarc Zyngier */ 8568b824e4SMarc Zyngier .macro kimg_pa reg, tmp 8668b824e4SMarc Zyngier alternative_cb kvm_get_kimage_voffset 8768b824e4SMarc Zyngier movz \tmp, #0 8868b824e4SMarc Zyngier movk \tmp, #0, lsl #16 8968b824e4SMarc Zyngier movk \tmp, #0, lsl #32 9068b824e4SMarc Zyngier movk \tmp, #0, lsl #48 9168b824e4SMarc Zyngier alternative_cb_end 9268b824e4SMarc Zyngier 9368b824e4SMarc Zyngier /* reg = __pa(reg) */ 9468b824e4SMarc Zyngier sub \reg, \reg, \tmp 9568b824e4SMarc Zyngier .endm 9668b824e4SMarc Zyngier 97*5be1d622SDavid Brazdil /* 98*5be1d622SDavid Brazdil * Convert a kernel image address to a hyp VA 99*5be1d622SDavid Brazdil * reg: kernel address to be converted in place 100*5be1d622SDavid Brazdil * tmp: temporary register 101*5be1d622SDavid Brazdil * 102*5be1d622SDavid Brazdil * The actual code generation takes place in kvm_get_kimage_voffset, and 103*5be1d622SDavid Brazdil * the instructions below are only there to reserve the space and 104*5be1d622SDavid Brazdil * perform the register allocation (kvm_update_kimg_phys_offset uses the 105*5be1d622SDavid Brazdil * specific registers encoded in the instructions). 106*5be1d622SDavid Brazdil */ 107*5be1d622SDavid Brazdil .macro kimg_hyp_va reg, tmp 108*5be1d622SDavid Brazdil alternative_cb kvm_update_kimg_phys_offset 109*5be1d622SDavid Brazdil movz \tmp, #0 110*5be1d622SDavid Brazdil movk \tmp, #0, lsl #16 111*5be1d622SDavid Brazdil movk \tmp, #0, lsl #32 112*5be1d622SDavid Brazdil movk \tmp, #0, lsl #48 113*5be1d622SDavid Brazdil alternative_cb_end 114*5be1d622SDavid Brazdil 115*5be1d622SDavid Brazdil sub \reg, \reg, \tmp 116*5be1d622SDavid Brazdil mov_q \tmp, PAGE_OFFSET 117*5be1d622SDavid Brazdil orr \reg, \reg, \tmp 118*5be1d622SDavid Brazdil kern_hyp_va \reg 119*5be1d622SDavid Brazdil .endm 120*5be1d622SDavid Brazdil 12137c43753SMarc Zyngier #else 12237c43753SMarc Zyngier 12365fddcfcSMike Rapoport #include <linux/pgtable.h> 12438f791a4SChristoffer Dall #include <asm/pgalloc.h> 12502f7760eSWill Deacon #include <asm/cache.h> 12637c43753SMarc Zyngier #include <asm/cacheflush.h> 127e4c5a685SArd Biesheuvel #include <asm/mmu_context.h> 12837c43753SMarc Zyngier 1292b4d1606SMarc Zyngier void kvm_update_va_mask(struct alt_instr *alt, 1302b4d1606SMarc Zyngier __le32 *origptr, __le32 *updptr, int nr_inst); 1310492747cSSebastian Andrzej Siewior void kvm_compute_layout(void); 1322b4d1606SMarc Zyngier 1335c37f1aeSJames Morse static __always_inline unsigned long __kern_hyp_va(unsigned long v) 134fd81e6bfSMarc Zyngier { 135ed57cac8SMarc Zyngier asm volatile(ALTERNATIVE_CB("and %0, %0, #1\n" 136ed57cac8SMarc Zyngier "ror %0, %0, #1\n" 137ed57cac8SMarc Zyngier "add %0, %0, #0\n" 138ed57cac8SMarc Zyngier "add %0, %0, #0, lsl 12\n" 139ed57cac8SMarc Zyngier "ror %0, %0, #63\n", 1402b4d1606SMarc Zyngier kvm_update_va_mask) 1412b4d1606SMarc Zyngier : "+r" (v)); 142fd81e6bfSMarc Zyngier return v; 143fd81e6bfSMarc Zyngier } 144fd81e6bfSMarc Zyngier 14594d0e598SMarc Zyngier #define kern_hyp_va(v) ((typeof(v))(__kern_hyp_va((unsigned long)(v)))) 14637c43753SMarc Zyngier 1471db9d9deSMarc Zyngier static __always_inline unsigned long __kimg_hyp_va(unsigned long v) 1481db9d9deSMarc Zyngier { 1491db9d9deSMarc Zyngier unsigned long offset; 1501db9d9deSMarc Zyngier 1511db9d9deSMarc Zyngier asm volatile(ALTERNATIVE_CB("movz %0, #0\n" 1521db9d9deSMarc Zyngier "movk %0, #0, lsl #16\n" 1531db9d9deSMarc Zyngier "movk %0, #0, lsl #32\n" 1541db9d9deSMarc Zyngier "movk %0, #0, lsl #48\n", 1551db9d9deSMarc Zyngier kvm_update_kimg_phys_offset) 1561db9d9deSMarc Zyngier : "=r" (offset)); 1571db9d9deSMarc Zyngier 1581db9d9deSMarc Zyngier return __kern_hyp_va((v - offset) | PAGE_OFFSET); 1591db9d9deSMarc Zyngier } 1601db9d9deSMarc Zyngier 1611db9d9deSMarc Zyngier #define kimg_fn_hyp_va(v) ((typeof(*v))(__kimg_hyp_va((unsigned long)(v)))) 1621db9d9deSMarc Zyngier 1631db9d9deSMarc Zyngier #define kimg_fn_ptr(x) (typeof(x) **)(x) 1641db9d9deSMarc Zyngier 16537c43753SMarc Zyngier /* 1661b44471bSZenghui Yu * We currently support using a VM-specified IPA size. For backward 1671b44471bSZenghui Yu * compatibility, the default IPA size is fixed to 40bits. 16837c43753SMarc Zyngier */ 169dbff124eSJoel Schopp #define KVM_PHYS_SHIFT (40) 170e55cac5bSSuzuki K Poulose 17113ac4bbcSSuzuki K Poulose #define kvm_phys_shift(kvm) VTCR_EL2_IPA(kvm->arch.vtcr) 172e55cac5bSSuzuki K Poulose #define kvm_phys_size(kvm) (_AC(1, ULL) << kvm_phys_shift(kvm)) 173e55cac5bSSuzuki K Poulose #define kvm_phys_mask(kvm) (kvm_phys_size(kvm) - _AC(1, ULL)) 17437c43753SMarc Zyngier 1750f9d09b8SWill Deacon #include <asm/kvm_pgtable.h> 176c0ef6326SSuzuki K Poulose #include <asm/stage2_pgtable.h> 177c0ef6326SSuzuki K Poulose 1780f9d09b8SWill Deacon int create_hyp_mappings(void *from, void *to, enum kvm_pgtable_prot prot); 179807a3784SMarc Zyngier int create_hyp_io_mappings(phys_addr_t phys_addr, size_t size, 1801bb32a44SMarc Zyngier void __iomem **kaddr, 1811bb32a44SMarc Zyngier void __iomem **haddr); 182dc2e4633SMarc Zyngier int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size, 183dc2e4633SMarc Zyngier void **haddr); 18437c43753SMarc Zyngier void free_hyp_pgds(void); 18537c43753SMarc Zyngier 186957db105SChristoffer Dall void stage2_unmap_vm(struct kvm *kvm); 187a0e50aa3SChristoffer Dall int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu); 188a0e50aa3SChristoffer Dall void kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu); 18937c43753SMarc Zyngier int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, 190c40f2f8fSArd Biesheuvel phys_addr_t pa, unsigned long size, bool writable); 19137c43753SMarc Zyngier 19274cc7e0cSTianjia Zhang int kvm_handle_guest_abort(struct kvm_vcpu *vcpu); 19337c43753SMarc Zyngier 19437c43753SMarc Zyngier phys_addr_t kvm_mmu_get_httbr(void); 19537c43753SMarc Zyngier phys_addr_t kvm_get_idmap_vector(void); 19637c43753SMarc Zyngier int kvm_mmu_init(void); 197e9f63768SMike Rapoport 19837c43753SMarc Zyngier struct kvm; 19937c43753SMarc Zyngier 2002d58b733SMarc Zyngier #define kvm_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l)) 2012d58b733SMarc Zyngier 2022d58b733SMarc Zyngier static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu) 2032d58b733SMarc Zyngier { 2048d404c4cSChristoffer Dall return (vcpu_read_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101; 2052d58b733SMarc Zyngier } 2062d58b733SMarc Zyngier 20717ab9d57SMarc Zyngier static inline void __clean_dcache_guest_page(kvm_pfn_t pfn, unsigned long size) 20837c43753SMarc Zyngier { 2090d3e4d4fSMarc Zyngier void *va = page_address(pfn_to_page(pfn)); 2100d3e4d4fSMarc Zyngier 211e48d53a9SMarc Zyngier /* 212e48d53a9SMarc Zyngier * With FWB, we ensure that the guest always accesses memory using 213e48d53a9SMarc Zyngier * cacheable attributes, and we don't have to clean to PoC when 214e48d53a9SMarc Zyngier * faulting in pages. Furthermore, FWB implies IDC, so cleaning to 215e48d53a9SMarc Zyngier * PoU is not required either in this case. 216e48d53a9SMarc Zyngier */ 217e48d53a9SMarc Zyngier if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) 218e48d53a9SMarc Zyngier return; 219e48d53a9SMarc Zyngier 2200d3e4d4fSMarc Zyngier kvm_flush_dcache_to_poc(va, size); 221a15f6939SMarc Zyngier } 2222d58b733SMarc Zyngier 22317ab9d57SMarc Zyngier static inline void __invalidate_icache_guest_page(kvm_pfn_t pfn, 224a15f6939SMarc Zyngier unsigned long size) 225a15f6939SMarc Zyngier { 22687da236eSWill Deacon if (icache_is_aliasing()) { 22737c43753SMarc Zyngier /* any kind of VIPT cache */ 22837c43753SMarc Zyngier __flush_icache_all(); 22987da236eSWill Deacon } else if (is_kernel_in_hyp_mode() || !icache_is_vpipt()) { 23087da236eSWill Deacon /* PIPT or VPIPT at EL2 (see comment in __kvm_tlb_flush_vmid_ipa) */ 231a15f6939SMarc Zyngier void *va = page_address(pfn_to_page(pfn)); 232a15f6939SMarc Zyngier 2334fee9473SMarc Zyngier invalidate_icache_range((unsigned long)va, 23487da236eSWill Deacon (unsigned long)va + size); 23537c43753SMarc Zyngier } 23637c43753SMarc Zyngier } 23737c43753SMarc Zyngier 2383c1e7165SMarc Zyngier void kvm_set_way_flush(struct kvm_vcpu *vcpu); 2393c1e7165SMarc Zyngier void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled); 2409d218a1fSMarc Zyngier 24120475f78SVladimir Murzin static inline unsigned int kvm_get_vmid_bits(void) 24220475f78SVladimir Murzin { 24346823dd1SDave Martin int reg = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1); 24420475f78SVladimir Murzin 245c73433fcSAnshuman Khandual return get_vmid_bits(reg); 24620475f78SVladimir Murzin } 24720475f78SVladimir Murzin 248bf308242SAndre Przywara /* 249bf308242SAndre Przywara * We are not in the kvm->srcu critical section most of the time, so we take 250bf308242SAndre Przywara * the SRCU read lock here. Since we copy the data from the user page, we 251bf308242SAndre Przywara * can immediately drop the lock again. 252bf308242SAndre Przywara */ 253bf308242SAndre Przywara static inline int kvm_read_guest_lock(struct kvm *kvm, 254bf308242SAndre Przywara gpa_t gpa, void *data, unsigned long len) 255bf308242SAndre Przywara { 256bf308242SAndre Przywara int srcu_idx = srcu_read_lock(&kvm->srcu); 257bf308242SAndre Przywara int ret = kvm_read_guest(kvm, gpa, data, len); 258bf308242SAndre Przywara 259bf308242SAndre Przywara srcu_read_unlock(&kvm->srcu, srcu_idx); 260bf308242SAndre Przywara 261bf308242SAndre Przywara return ret; 262bf308242SAndre Przywara } 263bf308242SAndre Przywara 264a6ecfb11SMarc Zyngier static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa, 265a6ecfb11SMarc Zyngier const void *data, unsigned long len) 266a6ecfb11SMarc Zyngier { 267a6ecfb11SMarc Zyngier int srcu_idx = srcu_read_lock(&kvm->srcu); 268a6ecfb11SMarc Zyngier int ret = kvm_write_guest(kvm, gpa, data, len); 269a6ecfb11SMarc Zyngier 270a6ecfb11SMarc Zyngier srcu_read_unlock(&kvm->srcu, srcu_idx); 271a6ecfb11SMarc Zyngier 272a6ecfb11SMarc Zyngier return ret; 273a6ecfb11SMarc Zyngier } 274a6ecfb11SMarc Zyngier 275dee39247SMarc Zyngier /* 276dee39247SMarc Zyngier * EL2 vectors can be mapped and rerouted in a number of ways, 277dee39247SMarc Zyngier * depending on the kernel configuration and CPU present: 278dee39247SMarc Zyngier * 279688f1e4bSWill Deacon * - If the CPU is affected by Spectre-v2, the hardening sequence is 280688f1e4bSWill Deacon * placed in one of the vector slots, which is executed before jumping 281688f1e4bSWill Deacon * to the real vectors. 282dee39247SMarc Zyngier * 283688f1e4bSWill Deacon * - If the CPU also has the ARM64_HARDEN_EL2_VECTORS cap, the slot 284688f1e4bSWill Deacon * containing the hardening sequence is mapped next to the idmap page, 285688f1e4bSWill Deacon * and executed before jumping to the real vectors. 286dee39247SMarc Zyngier * 287dee39247SMarc Zyngier * - If the CPU only has the ARM64_HARDEN_EL2_VECTORS cap, then an 288dee39247SMarc Zyngier * empty slot is selected, mapped next to the idmap page, and 289dee39247SMarc Zyngier * executed before jumping to the real vectors. 290dee39247SMarc Zyngier * 291dee39247SMarc Zyngier * Note that ARM64_HARDEN_EL2_VECTORS is somewhat incompatible with 292dee39247SMarc Zyngier * VHE, as we don't have hypervisor-specific mappings. If the system 293dee39247SMarc Zyngier * is VHE and yet selects this capability, it will be ignored. 294dee39247SMarc Zyngier */ 295dee39247SMarc Zyngier extern void *__kvm_bp_vect_base; 296dee39247SMarc Zyngier extern int __kvm_harden_el2_vector_slot; 297dee39247SMarc Zyngier 2986840bdd7SMarc Zyngier static inline void *kvm_get_hyp_vector(void) 2996840bdd7SMarc Zyngier { 3006840bdd7SMarc Zyngier struct bp_hardening_data *data = arm64_get_bp_hardening_data(); 301dee39247SMarc Zyngier void *vect = kern_hyp_va(kvm_ksym_ref(__kvm_hyp_vector)); 302dee39247SMarc Zyngier int slot = -1; 3036840bdd7SMarc Zyngier 304688f1e4bSWill Deacon if (cpus_have_const_cap(ARM64_SPECTRE_V2) && data->fn) { 3056e52aab9SMark Brown vect = kern_hyp_va(kvm_ksym_ref(__bp_harden_hyp_vecs)); 306dee39247SMarc Zyngier slot = data->hyp_vectors_slot; 3076840bdd7SMarc Zyngier } 3086840bdd7SMarc Zyngier 309dee39247SMarc Zyngier if (this_cpu_has_cap(ARM64_HARDEN_EL2_VECTORS) && !has_vhe()) { 310dee39247SMarc Zyngier vect = __kvm_bp_vect_base; 311dee39247SMarc Zyngier if (slot == -1) 312dee39247SMarc Zyngier slot = __kvm_harden_el2_vector_slot; 313dee39247SMarc Zyngier } 314dee39247SMarc Zyngier 315dee39247SMarc Zyngier if (slot != -1) 316dee39247SMarc Zyngier vect += slot * SZ_2K; 317dee39247SMarc Zyngier 3186840bdd7SMarc Zyngier return vect; 3196840bdd7SMarc Zyngier } 3206840bdd7SMarc Zyngier 321529c4b05SKristina Martsenko #define kvm_phys_to_vttbr(addr) phys_to_ttbr(addr) 322529c4b05SKristina Martsenko 323a0e50aa3SChristoffer Dall static __always_inline u64 kvm_get_vttbr(struct kvm_s2_mmu *mmu) 324ab510027SVladimir Murzin { 325a0e50aa3SChristoffer Dall struct kvm_vmid *vmid = &mmu->vmid; 326e329fb75SChristoffer Dall u64 vmid_field, baddr; 327e329fb75SChristoffer Dall u64 cnp = system_supports_cnp() ? VTTBR_CNP_BIT : 0; 328e329fb75SChristoffer Dall 329a0e50aa3SChristoffer Dall baddr = mmu->pgd_phys; 330e329fb75SChristoffer Dall vmid_field = (u64)vmid->vmid << VTTBR_VMID_SHIFT; 331e329fb75SChristoffer Dall return kvm_phys_to_vttbr(baddr) | vmid_field | cnp; 332ab510027SVladimir Murzin } 333ab510027SVladimir Murzin 334fe677be9SMarc Zyngier /* 335fe677be9SMarc Zyngier * Must be called from hyp code running at EL2 with an updated VTTBR 336fe677be9SMarc Zyngier * and interrupts disabled. 337fe677be9SMarc Zyngier */ 338a0e50aa3SChristoffer Dall static __always_inline void __load_guest_stage2(struct kvm_s2_mmu *mmu) 339fe677be9SMarc Zyngier { 340a0e50aa3SChristoffer Dall write_sysreg(kern_hyp_va(mmu->kvm)->arch.vtcr, vtcr_el2); 341a0e50aa3SChristoffer Dall write_sysreg(kvm_get_vttbr(mmu), vttbr_el2); 342fe677be9SMarc Zyngier 343fe677be9SMarc Zyngier /* 344fe677be9SMarc Zyngier * ARM errata 1165522 and 1530923 require the actual execution of the 345fe677be9SMarc Zyngier * above before we can switch to the EL1/EL0 translation regime used by 346fe677be9SMarc Zyngier * the guest. 347fe677be9SMarc Zyngier */ 348fe677be9SMarc Zyngier asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT)); 349fe677be9SMarc Zyngier } 350fe677be9SMarc Zyngier 35137c43753SMarc Zyngier #endif /* __ASSEMBLY__ */ 35237c43753SMarc Zyngier #endif /* __ARM64_KVM_MMU_H__ */ 353