1caab277bSThomas Gleixner /* SPDX-License-Identifier: GPL-2.0-only */ 237c43753SMarc Zyngier /* 337c43753SMarc Zyngier * Copyright (C) 2012,2013 - ARM Ltd 437c43753SMarc Zyngier * Author: Marc Zyngier <marc.zyngier@arm.com> 537c43753SMarc Zyngier */ 637c43753SMarc Zyngier 737c43753SMarc Zyngier #ifndef __ARM64_KVM_MMU_H__ 837c43753SMarc Zyngier #define __ARM64_KVM_MMU_H__ 937c43753SMarc Zyngier 1037c43753SMarc Zyngier #include <asm/page.h> 1137c43753SMarc Zyngier #include <asm/memory.h> 12*9ef2b48bSWill Deacon #include <asm/mmu.h> 1320475f78SVladimir Murzin #include <asm/cpufeature.h> 1437c43753SMarc Zyngier 1537c43753SMarc Zyngier /* 16cedbb8b7SMarc Zyngier * As ARMv8.0 only has the TTBR0_EL2 register, we cannot express 1737c43753SMarc Zyngier * "negative" addresses. This makes it impossible to directly share 1837c43753SMarc Zyngier * mappings with the kernel. 1937c43753SMarc Zyngier * 2037c43753SMarc Zyngier * Instead, give the HYP mode its own VA region at a fixed offset from 2137c43753SMarc Zyngier * the kernel by just masking the top bits (which are all ones for a 2282a81bffSMarc Zyngier * kernel address). We need to find out how many bits to mask. 23cedbb8b7SMarc Zyngier * 2482a81bffSMarc Zyngier * We want to build a set of page tables that cover both parts of the 2582a81bffSMarc Zyngier * idmap (the trampoline page used to initialize EL2), and our normal 2682a81bffSMarc Zyngier * runtime VA space, at the same time. 2782a81bffSMarc Zyngier * 2882a81bffSMarc Zyngier * Given that the kernel uses VA_BITS for its entire address space, 2982a81bffSMarc Zyngier * and that half of that space (VA_BITS - 1) is used for the linear 3082a81bffSMarc Zyngier * mapping, we can also limit the EL2 space to (VA_BITS - 1). 3182a81bffSMarc Zyngier * 3282a81bffSMarc Zyngier * The main question is "Within the VA_BITS space, does EL2 use the 3382a81bffSMarc Zyngier * top or the bottom half of that space to shadow the kernel's linear 3482a81bffSMarc Zyngier * mapping?". As we need to idmap the trampoline page, this is 3582a81bffSMarc Zyngier * determined by the range in which this page lives. 3682a81bffSMarc Zyngier * 3782a81bffSMarc Zyngier * If the page is in the bottom half, we have to use the top half. If 3882a81bffSMarc Zyngier * the page is in the top half, we have to use the bottom half: 3982a81bffSMarc Zyngier * 402077be67SLaura Abbott * T = __pa_symbol(__hyp_idmap_text_start) 4182a81bffSMarc Zyngier * if (T & BIT(VA_BITS - 1)) 4282a81bffSMarc Zyngier * HYP_VA_MIN = 0 //idmap in upper half 4382a81bffSMarc Zyngier * else 4482a81bffSMarc Zyngier * HYP_VA_MIN = 1 << (VA_BITS - 1) 4582a81bffSMarc Zyngier * HYP_VA_MAX = HYP_VA_MIN + (1 << (VA_BITS - 1)) - 1 4682a81bffSMarc Zyngier * 4782a81bffSMarc Zyngier * This of course assumes that the trampoline page exists within the 4882a81bffSMarc Zyngier * VA_BITS range. If it doesn't, then it means we're in the odd case 4982a81bffSMarc Zyngier * where the kernel idmap (as well as HYP) uses more levels than the 5082a81bffSMarc Zyngier * kernel runtime page tables (as seen when the kernel is configured 5182a81bffSMarc Zyngier * for 4k pages, 39bits VA, and yet memory lives just above that 5282a81bffSMarc Zyngier * limit, forcing the idmap to use 4 levels of page tables while the 5382a81bffSMarc Zyngier * kernel itself only uses 3). In this particular case, it doesn't 5482a81bffSMarc Zyngier * matter which side of VA_BITS we use, as we're guaranteed not to 5582a81bffSMarc Zyngier * conflict with anything. 5682a81bffSMarc Zyngier * 5782a81bffSMarc Zyngier * When using VHE, there are no separate hyp mappings and all KVM 5882a81bffSMarc Zyngier * functionality is already mapped as part of the main kernel 5982a81bffSMarc Zyngier * mappings, and none of this applies in that case. 6037c43753SMarc Zyngier */ 61d53d9bc6SMarc Zyngier 6237c43753SMarc Zyngier #ifdef __ASSEMBLY__ 6337c43753SMarc Zyngier 64cedbb8b7SMarc Zyngier #include <asm/alternative.h> 65cedbb8b7SMarc Zyngier 6637c43753SMarc Zyngier /* 6737c43753SMarc Zyngier * Convert a kernel VA into a HYP VA. 6837c43753SMarc Zyngier * reg: VA to be converted. 69fd81e6bfSMarc Zyngier * 702b4d1606SMarc Zyngier * The actual code generation takes place in kvm_update_va_mask, and 712b4d1606SMarc Zyngier * the instructions below are only there to reserve the space and 722b4d1606SMarc Zyngier * perform the register allocation (kvm_update_va_mask uses the 732b4d1606SMarc Zyngier * specific registers encoded in the instructions). 7437c43753SMarc Zyngier */ 7537c43753SMarc Zyngier .macro kern_hyp_va reg 762b4d1606SMarc Zyngier alternative_cb kvm_update_va_mask 77ed57cac8SMarc Zyngier and \reg, \reg, #1 /* mask with va_mask */ 78ed57cac8SMarc Zyngier ror \reg, \reg, #1 /* rotate to the first tag bit */ 79ed57cac8SMarc Zyngier add \reg, \reg, #0 /* insert the low 12 bits of the tag */ 80ed57cac8SMarc Zyngier add \reg, \reg, #0, lsl 12 /* insert the top 12 bits of the tag */ 81ed57cac8SMarc Zyngier ror \reg, \reg, #63 /* rotate back */ 822b4d1606SMarc Zyngier alternative_cb_end 8337c43753SMarc Zyngier .endm 8437c43753SMarc Zyngier 8537c43753SMarc Zyngier #else 8637c43753SMarc Zyngier 8765fddcfcSMike Rapoport #include <linux/pgtable.h> 8838f791a4SChristoffer Dall #include <asm/pgalloc.h> 8902f7760eSWill Deacon #include <asm/cache.h> 9037c43753SMarc Zyngier #include <asm/cacheflush.h> 91e4c5a685SArd Biesheuvel #include <asm/mmu_context.h> 9237c43753SMarc Zyngier 932b4d1606SMarc Zyngier void kvm_update_va_mask(struct alt_instr *alt, 942b4d1606SMarc Zyngier __le32 *origptr, __le32 *updptr, int nr_inst); 950492747cSSebastian Andrzej Siewior void kvm_compute_layout(void); 962b4d1606SMarc Zyngier 975c37f1aeSJames Morse static __always_inline unsigned long __kern_hyp_va(unsigned long v) 98fd81e6bfSMarc Zyngier { 99ed57cac8SMarc Zyngier asm volatile(ALTERNATIVE_CB("and %0, %0, #1\n" 100ed57cac8SMarc Zyngier "ror %0, %0, #1\n" 101ed57cac8SMarc Zyngier "add %0, %0, #0\n" 102ed57cac8SMarc Zyngier "add %0, %0, #0, lsl 12\n" 103ed57cac8SMarc Zyngier "ror %0, %0, #63\n", 1042b4d1606SMarc Zyngier kvm_update_va_mask) 1052b4d1606SMarc Zyngier : "+r" (v)); 106fd81e6bfSMarc Zyngier return v; 107fd81e6bfSMarc Zyngier } 108fd81e6bfSMarc Zyngier 10994d0e598SMarc Zyngier #define kern_hyp_va(v) ((typeof(v))(__kern_hyp_va((unsigned long)(v)))) 11037c43753SMarc Zyngier 11137c43753SMarc Zyngier /* 1121b44471bSZenghui Yu * We currently support using a VM-specified IPA size. For backward 1131b44471bSZenghui Yu * compatibility, the default IPA size is fixed to 40bits. 11437c43753SMarc Zyngier */ 115dbff124eSJoel Schopp #define KVM_PHYS_SHIFT (40) 116e55cac5bSSuzuki K Poulose 11713ac4bbcSSuzuki K Poulose #define kvm_phys_shift(kvm) VTCR_EL2_IPA(kvm->arch.vtcr) 118e55cac5bSSuzuki K Poulose #define kvm_phys_size(kvm) (_AC(1, ULL) << kvm_phys_shift(kvm)) 119e55cac5bSSuzuki K Poulose #define kvm_phys_mask(kvm) (kvm_phys_size(kvm) - _AC(1, ULL)) 12037c43753SMarc Zyngier 121865b30cdSSuzuki K Poulose static inline bool kvm_page_empty(void *ptr) 122865b30cdSSuzuki K Poulose { 123865b30cdSSuzuki K Poulose struct page *ptr_page = virt_to_page(ptr); 124865b30cdSSuzuki K Poulose return page_count(ptr_page) == 1; 125865b30cdSSuzuki K Poulose } 12637c43753SMarc Zyngier 127c0ef6326SSuzuki K Poulose #include <asm/stage2_pgtable.h> 128c0ef6326SSuzuki K Poulose 129c8dddecdSMarc Zyngier int create_hyp_mappings(void *from, void *to, pgprot_t prot); 130807a3784SMarc Zyngier int create_hyp_io_mappings(phys_addr_t phys_addr, size_t size, 1311bb32a44SMarc Zyngier void __iomem **kaddr, 1321bb32a44SMarc Zyngier void __iomem **haddr); 133dc2e4633SMarc Zyngier int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size, 134dc2e4633SMarc Zyngier void **haddr); 13537c43753SMarc Zyngier void free_hyp_pgds(void); 13637c43753SMarc Zyngier 137957db105SChristoffer Dall void stage2_unmap_vm(struct kvm *kvm); 138a0e50aa3SChristoffer Dall int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu); 139a0e50aa3SChristoffer Dall void kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu); 14037c43753SMarc Zyngier int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, 141c40f2f8fSArd Biesheuvel phys_addr_t pa, unsigned long size, bool writable); 14237c43753SMarc Zyngier 14374cc7e0cSTianjia Zhang int kvm_handle_guest_abort(struct kvm_vcpu *vcpu); 14437c43753SMarc Zyngier 14537c43753SMarc Zyngier void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu); 14637c43753SMarc Zyngier 14737c43753SMarc Zyngier phys_addr_t kvm_mmu_get_httbr(void); 14837c43753SMarc Zyngier phys_addr_t kvm_get_idmap_vector(void); 14937c43753SMarc Zyngier int kvm_mmu_init(void); 15037c43753SMarc Zyngier void kvm_clear_hyp_idmap(void); 15137c43753SMarc Zyngier 1520db9dd8aSMarc Zyngier #define kvm_mk_pmd(ptep) \ 1530db9dd8aSMarc Zyngier __pmd(__phys_to_pmd_val(__pa(ptep)) | PMD_TYPE_TABLE) 1540db9dd8aSMarc Zyngier #define kvm_mk_pud(pmdp) \ 1550db9dd8aSMarc Zyngier __pud(__phys_to_pud_val(__pa(pmdp)) | PMD_TYPE_TABLE) 156e9f63768SMike Rapoport #define kvm_mk_p4d(pmdp) \ 157e9f63768SMike Rapoport __p4d(__phys_to_p4d_val(__pa(pmdp)) | PUD_TYPE_TABLE) 1580db9dd8aSMarc Zyngier 159b8e0ba7cSPunit Agrawal #define kvm_set_pud(pudp, pud) set_pud(pudp, pud) 160b8e0ba7cSPunit Agrawal 161f8df7338SPunit Agrawal #define kvm_pfn_pte(pfn, prot) pfn_pte(pfn, prot) 162f8df7338SPunit Agrawal #define kvm_pfn_pmd(pfn, prot) pfn_pmd(pfn, prot) 163b8e0ba7cSPunit Agrawal #define kvm_pfn_pud(pfn, prot) pfn_pud(pfn, prot) 164f8df7338SPunit Agrawal 165eb3f0624SPunit Agrawal #define kvm_pud_pfn(pud) pud_pfn(pud) 166eb3f0624SPunit Agrawal 167f8df7338SPunit Agrawal #define kvm_pmd_mkhuge(pmd) pmd_mkhuge(pmd) 168b8e0ba7cSPunit Agrawal #define kvm_pud_mkhuge(pud) pud_mkhuge(pud) 169f8df7338SPunit Agrawal 17006485053SCatalin Marinas static inline pte_t kvm_s2pte_mkwrite(pte_t pte) 17137c43753SMarc Zyngier { 17206485053SCatalin Marinas pte_val(pte) |= PTE_S2_RDWR; 17306485053SCatalin Marinas return pte; 17437c43753SMarc Zyngier } 17537c43753SMarc Zyngier 17606485053SCatalin Marinas static inline pmd_t kvm_s2pmd_mkwrite(pmd_t pmd) 177ad361f09SChristoffer Dall { 17806485053SCatalin Marinas pmd_val(pmd) |= PMD_S2_RDWR; 17906485053SCatalin Marinas return pmd; 180ad361f09SChristoffer Dall } 181ad361f09SChristoffer Dall 182b8e0ba7cSPunit Agrawal static inline pud_t kvm_s2pud_mkwrite(pud_t pud) 183b8e0ba7cSPunit Agrawal { 184b8e0ba7cSPunit Agrawal pud_val(pud) |= PUD_S2_RDWR; 185b8e0ba7cSPunit Agrawal return pud; 186b8e0ba7cSPunit Agrawal } 187b8e0ba7cSPunit Agrawal 188d0e22b4aSMarc Zyngier static inline pte_t kvm_s2pte_mkexec(pte_t pte) 189d0e22b4aSMarc Zyngier { 190d0e22b4aSMarc Zyngier pte_val(pte) &= ~PTE_S2_XN; 191d0e22b4aSMarc Zyngier return pte; 192d0e22b4aSMarc Zyngier } 193d0e22b4aSMarc Zyngier 194d0e22b4aSMarc Zyngier static inline pmd_t kvm_s2pmd_mkexec(pmd_t pmd) 195d0e22b4aSMarc Zyngier { 196d0e22b4aSMarc Zyngier pmd_val(pmd) &= ~PMD_S2_XN; 197d0e22b4aSMarc Zyngier return pmd; 198d0e22b4aSMarc Zyngier } 199d0e22b4aSMarc Zyngier 200b8e0ba7cSPunit Agrawal static inline pud_t kvm_s2pud_mkexec(pud_t pud) 201b8e0ba7cSPunit Agrawal { 202b8e0ba7cSPunit Agrawal pud_val(pud) &= ~PUD_S2_XN; 203b8e0ba7cSPunit Agrawal return pud; 204b8e0ba7cSPunit Agrawal } 205b8e0ba7cSPunit Agrawal 20620a004e7SWill Deacon static inline void kvm_set_s2pte_readonly(pte_t *ptep) 2078199ed0eSMario Smarduch { 2080966253dSCatalin Marinas pteval_t old_pteval, pteval; 20906485053SCatalin Marinas 21020a004e7SWill Deacon pteval = READ_ONCE(pte_val(*ptep)); 2110966253dSCatalin Marinas do { 2120966253dSCatalin Marinas old_pteval = pteval; 2130966253dSCatalin Marinas pteval &= ~PTE_S2_RDWR; 2140966253dSCatalin Marinas pteval |= PTE_S2_RDONLY; 21520a004e7SWill Deacon pteval = cmpxchg_relaxed(&pte_val(*ptep), old_pteval, pteval); 2160966253dSCatalin Marinas } while (pteval != old_pteval); 2178199ed0eSMario Smarduch } 2188199ed0eSMario Smarduch 21920a004e7SWill Deacon static inline bool kvm_s2pte_readonly(pte_t *ptep) 2208199ed0eSMario Smarduch { 22120a004e7SWill Deacon return (READ_ONCE(pte_val(*ptep)) & PTE_S2_RDWR) == PTE_S2_RDONLY; 2228199ed0eSMario Smarduch } 2238199ed0eSMario Smarduch 22420a004e7SWill Deacon static inline bool kvm_s2pte_exec(pte_t *ptep) 2257a3796d2SMarc Zyngier { 22620a004e7SWill Deacon return !(READ_ONCE(pte_val(*ptep)) & PTE_S2_XN); 2277a3796d2SMarc Zyngier } 2287a3796d2SMarc Zyngier 22920a004e7SWill Deacon static inline void kvm_set_s2pmd_readonly(pmd_t *pmdp) 2308199ed0eSMario Smarduch { 23120a004e7SWill Deacon kvm_set_s2pte_readonly((pte_t *)pmdp); 2328199ed0eSMario Smarduch } 2338199ed0eSMario Smarduch 23420a004e7SWill Deacon static inline bool kvm_s2pmd_readonly(pmd_t *pmdp) 2358199ed0eSMario Smarduch { 23620a004e7SWill Deacon return kvm_s2pte_readonly((pte_t *)pmdp); 23738f791a4SChristoffer Dall } 23838f791a4SChristoffer Dall 23920a004e7SWill Deacon static inline bool kvm_s2pmd_exec(pmd_t *pmdp) 2407a3796d2SMarc Zyngier { 24120a004e7SWill Deacon return !(READ_ONCE(pmd_val(*pmdp)) & PMD_S2_XN); 2427a3796d2SMarc Zyngier } 2437a3796d2SMarc Zyngier 2444ea5af53SPunit Agrawal static inline void kvm_set_s2pud_readonly(pud_t *pudp) 2454ea5af53SPunit Agrawal { 2464ea5af53SPunit Agrawal kvm_set_s2pte_readonly((pte_t *)pudp); 2474ea5af53SPunit Agrawal } 2484ea5af53SPunit Agrawal 2494ea5af53SPunit Agrawal static inline bool kvm_s2pud_readonly(pud_t *pudp) 2504ea5af53SPunit Agrawal { 2514ea5af53SPunit Agrawal return kvm_s2pte_readonly((pte_t *)pudp); 2524ea5af53SPunit Agrawal } 2534ea5af53SPunit Agrawal 25486d1c55eSPunit Agrawal static inline bool kvm_s2pud_exec(pud_t *pudp) 25586d1c55eSPunit Agrawal { 25686d1c55eSPunit Agrawal return !(READ_ONCE(pud_val(*pudp)) & PUD_S2_XN); 25786d1c55eSPunit Agrawal } 25886d1c55eSPunit Agrawal 259eb3f0624SPunit Agrawal static inline pud_t kvm_s2pud_mkyoung(pud_t pud) 260eb3f0624SPunit Agrawal { 261eb3f0624SPunit Agrawal return pud_mkyoung(pud); 262eb3f0624SPunit Agrawal } 263eb3f0624SPunit Agrawal 26435a63966SPunit Agrawal static inline bool kvm_s2pud_young(pud_t pud) 26535a63966SPunit Agrawal { 26635a63966SPunit Agrawal return pud_young(pud); 26735a63966SPunit Agrawal } 26835a63966SPunit Agrawal 26966f877faSSuzuki K Poulose #define hyp_pte_table_empty(ptep) kvm_page_empty(ptep) 27038f791a4SChristoffer Dall 27138f791a4SChristoffer Dall #ifdef __PAGETABLE_PMD_FOLDED 27266f877faSSuzuki K Poulose #define hyp_pmd_table_empty(pmdp) (0) 2734f853a71SChristoffer Dall #else 27466f877faSSuzuki K Poulose #define hyp_pmd_table_empty(pmdp) kvm_page_empty(pmdp) 2754f853a71SChristoffer Dall #endif 27638f791a4SChristoffer Dall 27738f791a4SChristoffer Dall #ifdef __PAGETABLE_PUD_FOLDED 27866f877faSSuzuki K Poulose #define hyp_pud_table_empty(pudp) (0) 27938f791a4SChristoffer Dall #else 28066f877faSSuzuki K Poulose #define hyp_pud_table_empty(pudp) kvm_page_empty(pudp) 28138f791a4SChristoffer Dall #endif 2824f853a71SChristoffer Dall 283e9f63768SMike Rapoport #ifdef __PAGETABLE_P4D_FOLDED 284e9f63768SMike Rapoport #define hyp_p4d_table_empty(p4dp) (0) 285e9f63768SMike Rapoport #else 286e9f63768SMike Rapoport #define hyp_p4d_table_empty(p4dp) kvm_page_empty(p4dp) 287e9f63768SMike Rapoport #endif 288e9f63768SMike Rapoport 28937c43753SMarc Zyngier struct kvm; 29037c43753SMarc Zyngier 2912d58b733SMarc Zyngier #define kvm_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l)) 2922d58b733SMarc Zyngier 2932d58b733SMarc Zyngier static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu) 2942d58b733SMarc Zyngier { 2958d404c4cSChristoffer Dall return (vcpu_read_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101; 2962d58b733SMarc Zyngier } 2972d58b733SMarc Zyngier 29817ab9d57SMarc Zyngier static inline void __clean_dcache_guest_page(kvm_pfn_t pfn, unsigned long size) 29937c43753SMarc Zyngier { 3000d3e4d4fSMarc Zyngier void *va = page_address(pfn_to_page(pfn)); 3010d3e4d4fSMarc Zyngier 302e48d53a9SMarc Zyngier /* 303e48d53a9SMarc Zyngier * With FWB, we ensure that the guest always accesses memory using 304e48d53a9SMarc Zyngier * cacheable attributes, and we don't have to clean to PoC when 305e48d53a9SMarc Zyngier * faulting in pages. Furthermore, FWB implies IDC, so cleaning to 306e48d53a9SMarc Zyngier * PoU is not required either in this case. 307e48d53a9SMarc Zyngier */ 308e48d53a9SMarc Zyngier if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) 309e48d53a9SMarc Zyngier return; 310e48d53a9SMarc Zyngier 3110d3e4d4fSMarc Zyngier kvm_flush_dcache_to_poc(va, size); 312a15f6939SMarc Zyngier } 3132d58b733SMarc Zyngier 31417ab9d57SMarc Zyngier static inline void __invalidate_icache_guest_page(kvm_pfn_t pfn, 315a15f6939SMarc Zyngier unsigned long size) 316a15f6939SMarc Zyngier { 31787da236eSWill Deacon if (icache_is_aliasing()) { 31837c43753SMarc Zyngier /* any kind of VIPT cache */ 31937c43753SMarc Zyngier __flush_icache_all(); 32087da236eSWill Deacon } else if (is_kernel_in_hyp_mode() || !icache_is_vpipt()) { 32187da236eSWill Deacon /* PIPT or VPIPT at EL2 (see comment in __kvm_tlb_flush_vmid_ipa) */ 322a15f6939SMarc Zyngier void *va = page_address(pfn_to_page(pfn)); 323a15f6939SMarc Zyngier 3244fee9473SMarc Zyngier invalidate_icache_range((unsigned long)va, 32587da236eSWill Deacon (unsigned long)va + size); 32637c43753SMarc Zyngier } 32737c43753SMarc Zyngier } 32837c43753SMarc Zyngier 329363ef89fSMarc Zyngier static inline void __kvm_flush_dcache_pte(pte_t pte) 330363ef89fSMarc Zyngier { 331e48d53a9SMarc Zyngier if (!cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) { 332363ef89fSMarc Zyngier struct page *page = pte_page(pte); 333363ef89fSMarc Zyngier kvm_flush_dcache_to_poc(page_address(page), PAGE_SIZE); 334363ef89fSMarc Zyngier } 335e48d53a9SMarc Zyngier } 336363ef89fSMarc Zyngier 337363ef89fSMarc Zyngier static inline void __kvm_flush_dcache_pmd(pmd_t pmd) 338363ef89fSMarc Zyngier { 339e48d53a9SMarc Zyngier if (!cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) { 340363ef89fSMarc Zyngier struct page *page = pmd_page(pmd); 341363ef89fSMarc Zyngier kvm_flush_dcache_to_poc(page_address(page), PMD_SIZE); 342363ef89fSMarc Zyngier } 343e48d53a9SMarc Zyngier } 344363ef89fSMarc Zyngier 345363ef89fSMarc Zyngier static inline void __kvm_flush_dcache_pud(pud_t pud) 346363ef89fSMarc Zyngier { 347e48d53a9SMarc Zyngier if (!cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) { 348363ef89fSMarc Zyngier struct page *page = pud_page(pud); 349363ef89fSMarc Zyngier kvm_flush_dcache_to_poc(page_address(page), PUD_SIZE); 350363ef89fSMarc Zyngier } 351e48d53a9SMarc Zyngier } 352363ef89fSMarc Zyngier 3533c1e7165SMarc Zyngier void kvm_set_way_flush(struct kvm_vcpu *vcpu); 3543c1e7165SMarc Zyngier void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled); 3559d218a1fSMarc Zyngier 356e4c5a685SArd Biesheuvel static inline bool __kvm_cpu_uses_extended_idmap(void) 357e4c5a685SArd Biesheuvel { 358fa2a8445SKristina Martsenko return __cpu_uses_extended_idmap_level(); 359fa2a8445SKristina Martsenko } 360fa2a8445SKristina Martsenko 361fa2a8445SKristina Martsenko static inline unsigned long __kvm_idmap_ptrs_per_pgd(void) 362fa2a8445SKristina Martsenko { 363fa2a8445SKristina Martsenko return idmap_ptrs_per_pgd; 364e4c5a685SArd Biesheuvel } 365e4c5a685SArd Biesheuvel 36619338304SKristina Martsenko /* 36719338304SKristina Martsenko * Can't use pgd_populate here, because the extended idmap adds an extra level 36819338304SKristina Martsenko * above CONFIG_PGTABLE_LEVELS (which is 2 or 3 if we're using the extended 36919338304SKristina Martsenko * idmap), and pgd_populate is only available if CONFIG_PGTABLE_LEVELS = 4. 37019338304SKristina Martsenko */ 371e4c5a685SArd Biesheuvel static inline void __kvm_extend_hypmap(pgd_t *boot_hyp_pgd, 372e4c5a685SArd Biesheuvel pgd_t *hyp_pgd, 373e4c5a685SArd Biesheuvel pgd_t *merged_hyp_pgd, 374e4c5a685SArd Biesheuvel unsigned long hyp_idmap_start) 375e4c5a685SArd Biesheuvel { 376e4c5a685SArd Biesheuvel int idmap_idx; 37775387b92SKristina Martsenko u64 pgd_addr; 378e4c5a685SArd Biesheuvel 379e4c5a685SArd Biesheuvel /* 380e4c5a685SArd Biesheuvel * Use the first entry to access the HYP mappings. It is 381e4c5a685SArd Biesheuvel * guaranteed to be free, otherwise we wouldn't use an 382e4c5a685SArd Biesheuvel * extended idmap. 383e4c5a685SArd Biesheuvel */ 384e4c5a685SArd Biesheuvel VM_BUG_ON(pgd_val(merged_hyp_pgd[0])); 38575387b92SKristina Martsenko pgd_addr = __phys_to_pgd_val(__pa(hyp_pgd)); 38675387b92SKristina Martsenko merged_hyp_pgd[0] = __pgd(pgd_addr | PMD_TYPE_TABLE); 387e4c5a685SArd Biesheuvel 388e4c5a685SArd Biesheuvel /* 389e4c5a685SArd Biesheuvel * Create another extended level entry that points to the boot HYP map, 390e4c5a685SArd Biesheuvel * which contains an ID mapping of the HYP init code. We essentially 391e4c5a685SArd Biesheuvel * merge the boot and runtime HYP maps by doing so, but they don't 392e4c5a685SArd Biesheuvel * overlap anyway, so this is fine. 393e4c5a685SArd Biesheuvel */ 394e4c5a685SArd Biesheuvel idmap_idx = hyp_idmap_start >> VA_BITS; 395e4c5a685SArd Biesheuvel VM_BUG_ON(pgd_val(merged_hyp_pgd[idmap_idx])); 39675387b92SKristina Martsenko pgd_addr = __phys_to_pgd_val(__pa(boot_hyp_pgd)); 39775387b92SKristina Martsenko merged_hyp_pgd[idmap_idx] = __pgd(pgd_addr | PMD_TYPE_TABLE); 398e4c5a685SArd Biesheuvel } 399e4c5a685SArd Biesheuvel 40020475f78SVladimir Murzin static inline unsigned int kvm_get_vmid_bits(void) 40120475f78SVladimir Murzin { 40246823dd1SDave Martin int reg = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1); 40320475f78SVladimir Murzin 404c73433fcSAnshuman Khandual return get_vmid_bits(reg); 40520475f78SVladimir Murzin } 40620475f78SVladimir Murzin 407bf308242SAndre Przywara /* 408bf308242SAndre Przywara * We are not in the kvm->srcu critical section most of the time, so we take 409bf308242SAndre Przywara * the SRCU read lock here. Since we copy the data from the user page, we 410bf308242SAndre Przywara * can immediately drop the lock again. 411bf308242SAndre Przywara */ 412bf308242SAndre Przywara static inline int kvm_read_guest_lock(struct kvm *kvm, 413bf308242SAndre Przywara gpa_t gpa, void *data, unsigned long len) 414bf308242SAndre Przywara { 415bf308242SAndre Przywara int srcu_idx = srcu_read_lock(&kvm->srcu); 416bf308242SAndre Przywara int ret = kvm_read_guest(kvm, gpa, data, len); 417bf308242SAndre Przywara 418bf308242SAndre Przywara srcu_read_unlock(&kvm->srcu, srcu_idx); 419bf308242SAndre Przywara 420bf308242SAndre Przywara return ret; 421bf308242SAndre Przywara } 422bf308242SAndre Przywara 423a6ecfb11SMarc Zyngier static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa, 424a6ecfb11SMarc Zyngier const void *data, unsigned long len) 425a6ecfb11SMarc Zyngier { 426a6ecfb11SMarc Zyngier int srcu_idx = srcu_read_lock(&kvm->srcu); 427a6ecfb11SMarc Zyngier int ret = kvm_write_guest(kvm, gpa, data, len); 428a6ecfb11SMarc Zyngier 429a6ecfb11SMarc Zyngier srcu_read_unlock(&kvm->srcu, srcu_idx); 430a6ecfb11SMarc Zyngier 431a6ecfb11SMarc Zyngier return ret; 432a6ecfb11SMarc Zyngier } 433a6ecfb11SMarc Zyngier 434dee39247SMarc Zyngier /* 435dee39247SMarc Zyngier * EL2 vectors can be mapped and rerouted in a number of ways, 436dee39247SMarc Zyngier * depending on the kernel configuration and CPU present: 437dee39247SMarc Zyngier * 438688f1e4bSWill Deacon * - If the CPU is affected by Spectre-v2, the hardening sequence is 439688f1e4bSWill Deacon * placed in one of the vector slots, which is executed before jumping 440688f1e4bSWill Deacon * to the real vectors. 441dee39247SMarc Zyngier * 442688f1e4bSWill Deacon * - If the CPU also has the ARM64_HARDEN_EL2_VECTORS cap, the slot 443688f1e4bSWill Deacon * containing the hardening sequence is mapped next to the idmap page, 444688f1e4bSWill Deacon * and executed before jumping to the real vectors. 445dee39247SMarc Zyngier * 446dee39247SMarc Zyngier * - If the CPU only has the ARM64_HARDEN_EL2_VECTORS cap, then an 447dee39247SMarc Zyngier * empty slot is selected, mapped next to the idmap page, and 448dee39247SMarc Zyngier * executed before jumping to the real vectors. 449dee39247SMarc Zyngier * 450dee39247SMarc Zyngier * Note that ARM64_HARDEN_EL2_VECTORS is somewhat incompatible with 451dee39247SMarc Zyngier * VHE, as we don't have hypervisor-specific mappings. If the system 452dee39247SMarc Zyngier * is VHE and yet selects this capability, it will be ignored. 453dee39247SMarc Zyngier */ 454dee39247SMarc Zyngier extern void *__kvm_bp_vect_base; 455dee39247SMarc Zyngier extern int __kvm_harden_el2_vector_slot; 456dee39247SMarc Zyngier 4576840bdd7SMarc Zyngier static inline void *kvm_get_hyp_vector(void) 4586840bdd7SMarc Zyngier { 4596840bdd7SMarc Zyngier struct bp_hardening_data *data = arm64_get_bp_hardening_data(); 460dee39247SMarc Zyngier void *vect = kern_hyp_va(kvm_ksym_ref(__kvm_hyp_vector)); 461dee39247SMarc Zyngier int slot = -1; 4626840bdd7SMarc Zyngier 463688f1e4bSWill Deacon if (cpus_have_const_cap(ARM64_SPECTRE_V2) && data->fn) { 4646e52aab9SMark Brown vect = kern_hyp_va(kvm_ksym_ref(__bp_harden_hyp_vecs)); 465dee39247SMarc Zyngier slot = data->hyp_vectors_slot; 4666840bdd7SMarc Zyngier } 4676840bdd7SMarc Zyngier 468dee39247SMarc Zyngier if (this_cpu_has_cap(ARM64_HARDEN_EL2_VECTORS) && !has_vhe()) { 469dee39247SMarc Zyngier vect = __kvm_bp_vect_base; 470dee39247SMarc Zyngier if (slot == -1) 471dee39247SMarc Zyngier slot = __kvm_harden_el2_vector_slot; 472dee39247SMarc Zyngier } 473dee39247SMarc Zyngier 474dee39247SMarc Zyngier if (slot != -1) 475dee39247SMarc Zyngier vect += slot * SZ_2K; 476dee39247SMarc Zyngier 4776840bdd7SMarc Zyngier return vect; 4786840bdd7SMarc Zyngier } 4796840bdd7SMarc Zyngier 480529c4b05SKristina Martsenko #define kvm_phys_to_vttbr(addr) phys_to_ttbr(addr) 481529c4b05SKristina Martsenko 48259558330SSuzuki K Poulose /* 48359558330SSuzuki K Poulose * Get the magic number 'x' for VTTBR:BADDR of this KVM instance. 48459558330SSuzuki K Poulose * With v8.2 LVA extensions, 'x' should be a minimum of 6 with 48559558330SSuzuki K Poulose * 52bit IPS. 48659558330SSuzuki K Poulose */ 48759558330SSuzuki K Poulose static inline int arm64_vttbr_x(u32 ipa_shift, u32 levels) 48859558330SSuzuki K Poulose { 48959558330SSuzuki K Poulose int x = ARM64_VTTBR_X(ipa_shift, levels); 49059558330SSuzuki K Poulose 49159558330SSuzuki K Poulose return (IS_ENABLED(CONFIG_ARM64_PA_BITS_52) && x < 6) ? 6 : x; 49259558330SSuzuki K Poulose } 49359558330SSuzuki K Poulose 49459558330SSuzuki K Poulose static inline u64 vttbr_baddr_mask(u32 ipa_shift, u32 levels) 49559558330SSuzuki K Poulose { 49659558330SSuzuki K Poulose unsigned int x = arm64_vttbr_x(ipa_shift, levels); 49759558330SSuzuki K Poulose 49859558330SSuzuki K Poulose return GENMASK_ULL(PHYS_MASK_SHIFT - 1, x); 49959558330SSuzuki K Poulose } 50059558330SSuzuki K Poulose 50159558330SSuzuki K Poulose static inline u64 kvm_vttbr_baddr_mask(struct kvm *kvm) 50259558330SSuzuki K Poulose { 50359558330SSuzuki K Poulose return vttbr_baddr_mask(kvm_phys_shift(kvm), kvm_stage2_levels(kvm)); 50459558330SSuzuki K Poulose } 50559558330SSuzuki K Poulose 506a0e50aa3SChristoffer Dall static __always_inline u64 kvm_get_vttbr(struct kvm_s2_mmu *mmu) 507ab510027SVladimir Murzin { 508a0e50aa3SChristoffer Dall struct kvm_vmid *vmid = &mmu->vmid; 509e329fb75SChristoffer Dall u64 vmid_field, baddr; 510e329fb75SChristoffer Dall u64 cnp = system_supports_cnp() ? VTTBR_CNP_BIT : 0; 511e329fb75SChristoffer Dall 512a0e50aa3SChristoffer Dall baddr = mmu->pgd_phys; 513e329fb75SChristoffer Dall vmid_field = (u64)vmid->vmid << VTTBR_VMID_SHIFT; 514e329fb75SChristoffer Dall return kvm_phys_to_vttbr(baddr) | vmid_field | cnp; 515ab510027SVladimir Murzin } 516ab510027SVladimir Murzin 517fe677be9SMarc Zyngier /* 518fe677be9SMarc Zyngier * Must be called from hyp code running at EL2 with an updated VTTBR 519fe677be9SMarc Zyngier * and interrupts disabled. 520fe677be9SMarc Zyngier */ 521a0e50aa3SChristoffer Dall static __always_inline void __load_guest_stage2(struct kvm_s2_mmu *mmu) 522fe677be9SMarc Zyngier { 523a0e50aa3SChristoffer Dall write_sysreg(kern_hyp_va(mmu->kvm)->arch.vtcr, vtcr_el2); 524a0e50aa3SChristoffer Dall write_sysreg(kvm_get_vttbr(mmu), vttbr_el2); 525fe677be9SMarc Zyngier 526fe677be9SMarc Zyngier /* 527fe677be9SMarc Zyngier * ARM errata 1165522 and 1530923 require the actual execution of the 528fe677be9SMarc Zyngier * above before we can switch to the EL1/EL0 translation regime used by 529fe677be9SMarc Zyngier * the guest. 530fe677be9SMarc Zyngier */ 531fe677be9SMarc Zyngier asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT)); 532fe677be9SMarc Zyngier } 533fe677be9SMarc Zyngier 53437c43753SMarc Zyngier #endif /* __ASSEMBLY__ */ 53537c43753SMarc Zyngier #endif /* __ARM64_KVM_MMU_H__ */ 536