137c43753SMarc Zyngier /* 237c43753SMarc Zyngier * Copyright (C) 2012,2013 - ARM Ltd 337c43753SMarc Zyngier * Author: Marc Zyngier <marc.zyngier@arm.com> 437c43753SMarc Zyngier * 537c43753SMarc Zyngier * This program is free software; you can redistribute it and/or modify 637c43753SMarc Zyngier * it under the terms of the GNU General Public License version 2 as 737c43753SMarc Zyngier * published by the Free Software Foundation. 837c43753SMarc Zyngier * 937c43753SMarc Zyngier * This program is distributed in the hope that it will be useful, 1037c43753SMarc Zyngier * but WITHOUT ANY WARRANTY; without even the implied warranty of 1137c43753SMarc Zyngier * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 1237c43753SMarc Zyngier * GNU General Public License for more details. 1337c43753SMarc Zyngier * 1437c43753SMarc Zyngier * You should have received a copy of the GNU General Public License 1537c43753SMarc Zyngier * along with this program. If not, see <http://www.gnu.org/licenses/>. 1637c43753SMarc Zyngier */ 1737c43753SMarc Zyngier 1837c43753SMarc Zyngier #ifndef __ARM64_KVM_MMU_H__ 1937c43753SMarc Zyngier #define __ARM64_KVM_MMU_H__ 2037c43753SMarc Zyngier 2137c43753SMarc Zyngier #include <asm/page.h> 2237c43753SMarc Zyngier #include <asm/memory.h> 2337c43753SMarc Zyngier 2437c43753SMarc Zyngier /* 2537c43753SMarc Zyngier * As we only have the TTBR0_EL2 register, we cannot express 2637c43753SMarc Zyngier * "negative" addresses. This makes it impossible to directly share 2737c43753SMarc Zyngier * mappings with the kernel. 2837c43753SMarc Zyngier * 2937c43753SMarc Zyngier * Instead, give the HYP mode its own VA region at a fixed offset from 3037c43753SMarc Zyngier * the kernel by just masking the top bits (which are all ones for a 3137c43753SMarc Zyngier * kernel address). 3237c43753SMarc Zyngier */ 3337c43753SMarc Zyngier #define HYP_PAGE_OFFSET_SHIFT VA_BITS 3437c43753SMarc Zyngier #define HYP_PAGE_OFFSET_MASK ((UL(1) << HYP_PAGE_OFFSET_SHIFT) - 1) 3537c43753SMarc Zyngier #define HYP_PAGE_OFFSET (PAGE_OFFSET & HYP_PAGE_OFFSET_MASK) 3637c43753SMarc Zyngier 3737c43753SMarc Zyngier /* 3837c43753SMarc Zyngier * Our virtual mapping for the idmap-ed MMU-enable code. Must be 3937c43753SMarc Zyngier * shared across all the page-tables. Conveniently, we use the last 4037c43753SMarc Zyngier * possible page, where no kernel mapping will ever exist. 4137c43753SMarc Zyngier */ 4237c43753SMarc Zyngier #define TRAMPOLINE_VA (HYP_PAGE_OFFSET_MASK & PAGE_MASK) 4337c43753SMarc Zyngier 4438f791a4SChristoffer Dall /* 4538f791a4SChristoffer Dall * KVM_MMU_CACHE_MIN_PAGES is the number of stage2 page table translation 4638f791a4SChristoffer Dall * levels in addition to the PGD and potentially the PUD which are 4738f791a4SChristoffer Dall * pre-allocated (we pre-allocate the fake PGD and the PUD when the Stage-2 4838f791a4SChristoffer Dall * tables use one level of tables less than the kernel. 4938f791a4SChristoffer Dall */ 5038f791a4SChristoffer Dall #ifdef CONFIG_ARM64_64K_PAGES 5138f791a4SChristoffer Dall #define KVM_MMU_CACHE_MIN_PAGES 1 5238f791a4SChristoffer Dall #else 5338f791a4SChristoffer Dall #define KVM_MMU_CACHE_MIN_PAGES 2 5438f791a4SChristoffer Dall #endif 5538f791a4SChristoffer Dall 5637c43753SMarc Zyngier #ifdef __ASSEMBLY__ 5737c43753SMarc Zyngier 5837c43753SMarc Zyngier /* 5937c43753SMarc Zyngier * Convert a kernel VA into a HYP VA. 6037c43753SMarc Zyngier * reg: VA to be converted. 6137c43753SMarc Zyngier */ 6237c43753SMarc Zyngier .macro kern_hyp_va reg 6337c43753SMarc Zyngier and \reg, \reg, #HYP_PAGE_OFFSET_MASK 6437c43753SMarc Zyngier .endm 6537c43753SMarc Zyngier 6637c43753SMarc Zyngier #else 6737c43753SMarc Zyngier 6838f791a4SChristoffer Dall #include <asm/pgalloc.h> 6937c43753SMarc Zyngier #include <asm/cachetype.h> 7037c43753SMarc Zyngier #include <asm/cacheflush.h> 7137c43753SMarc Zyngier 7237c43753SMarc Zyngier #define KERN_TO_HYP(kva) ((unsigned long)kva - PAGE_OFFSET + HYP_PAGE_OFFSET) 7337c43753SMarc Zyngier 7437c43753SMarc Zyngier /* 75dbff124eSJoel Schopp * We currently only support a 40bit IPA. 7637c43753SMarc Zyngier */ 77dbff124eSJoel Schopp #define KVM_PHYS_SHIFT (40) 7837c43753SMarc Zyngier #define KVM_PHYS_SIZE (1UL << KVM_PHYS_SHIFT) 7937c43753SMarc Zyngier #define KVM_PHYS_MASK (KVM_PHYS_SIZE - 1UL) 8037c43753SMarc Zyngier 8137c43753SMarc Zyngier int create_hyp_mappings(void *from, void *to); 8237c43753SMarc Zyngier int create_hyp_io_mappings(void *from, void *to, phys_addr_t); 8337c43753SMarc Zyngier void free_boot_hyp_pgd(void); 8437c43753SMarc Zyngier void free_hyp_pgds(void); 8537c43753SMarc Zyngier 86*957db105SChristoffer Dall void stage2_unmap_vm(struct kvm *kvm); 8737c43753SMarc Zyngier int kvm_alloc_stage2_pgd(struct kvm *kvm); 8837c43753SMarc Zyngier void kvm_free_stage2_pgd(struct kvm *kvm); 8937c43753SMarc Zyngier int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, 90c40f2f8fSArd Biesheuvel phys_addr_t pa, unsigned long size, bool writable); 9137c43753SMarc Zyngier 9237c43753SMarc Zyngier int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run); 9337c43753SMarc Zyngier 9437c43753SMarc Zyngier void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu); 9537c43753SMarc Zyngier 9637c43753SMarc Zyngier phys_addr_t kvm_mmu_get_httbr(void); 9737c43753SMarc Zyngier phys_addr_t kvm_mmu_get_boot_httbr(void); 9837c43753SMarc Zyngier phys_addr_t kvm_get_idmap_vector(void); 9937c43753SMarc Zyngier int kvm_mmu_init(void); 10037c43753SMarc Zyngier void kvm_clear_hyp_idmap(void); 10137c43753SMarc Zyngier 10237c43753SMarc Zyngier #define kvm_set_pte(ptep, pte) set_pte(ptep, pte) 103ad361f09SChristoffer Dall #define kvm_set_pmd(pmdp, pmd) set_pmd(pmdp, pmd) 10437c43753SMarc Zyngier 10537c43753SMarc Zyngier static inline void kvm_clean_pgd(pgd_t *pgd) {} 10638f791a4SChristoffer Dall static inline void kvm_clean_pmd(pmd_t *pmd) {} 10737c43753SMarc Zyngier static inline void kvm_clean_pmd_entry(pmd_t *pmd) {} 10837c43753SMarc Zyngier static inline void kvm_clean_pte(pte_t *pte) {} 10937c43753SMarc Zyngier static inline void kvm_clean_pte_entry(pte_t *pte) {} 11037c43753SMarc Zyngier 11137c43753SMarc Zyngier static inline void kvm_set_s2pte_writable(pte_t *pte) 11237c43753SMarc Zyngier { 11337c43753SMarc Zyngier pte_val(*pte) |= PTE_S2_RDWR; 11437c43753SMarc Zyngier } 11537c43753SMarc Zyngier 116ad361f09SChristoffer Dall static inline void kvm_set_s2pmd_writable(pmd_t *pmd) 117ad361f09SChristoffer Dall { 118ad361f09SChristoffer Dall pmd_val(*pmd) |= PMD_S2_RDWR; 119ad361f09SChristoffer Dall } 120ad361f09SChristoffer Dall 121a3c8bd31SMarc Zyngier #define kvm_pgd_addr_end(addr, end) pgd_addr_end(addr, end) 122a3c8bd31SMarc Zyngier #define kvm_pud_addr_end(addr, end) pud_addr_end(addr, end) 123a3c8bd31SMarc Zyngier #define kvm_pmd_addr_end(addr, end) pmd_addr_end(addr, end) 124a3c8bd31SMarc Zyngier 12538f791a4SChristoffer Dall /* 12638f791a4SChristoffer Dall * In the case where PGDIR_SHIFT is larger than KVM_PHYS_SHIFT, we can address 12738f791a4SChristoffer Dall * the entire IPA input range with a single pgd entry, and we would only need 12838f791a4SChristoffer Dall * one pgd entry. Note that in this case, the pgd is actually not used by 12938f791a4SChristoffer Dall * the MMU for Stage-2 translations, but is merely a fake pgd used as a data 13038f791a4SChristoffer Dall * structure for the kernel pgtable macros to work. 13138f791a4SChristoffer Dall */ 13238f791a4SChristoffer Dall #if PGDIR_SHIFT > KVM_PHYS_SHIFT 13338f791a4SChristoffer Dall #define PTRS_PER_S2_PGD_SHIFT 0 13438f791a4SChristoffer Dall #else 13538f791a4SChristoffer Dall #define PTRS_PER_S2_PGD_SHIFT (KVM_PHYS_SHIFT - PGDIR_SHIFT) 13638f791a4SChristoffer Dall #endif 13738f791a4SChristoffer Dall #define PTRS_PER_S2_PGD (1 << PTRS_PER_S2_PGD_SHIFT) 13838f791a4SChristoffer Dall #define S2_PGD_ORDER get_order(PTRS_PER_S2_PGD * sizeof(pgd_t)) 13938f791a4SChristoffer Dall 14038f791a4SChristoffer Dall /* 14138f791a4SChristoffer Dall * If we are concatenating first level stage-2 page tables, we would have less 14238f791a4SChristoffer Dall * than or equal to 16 pointers in the fake PGD, because that's what the 14338f791a4SChristoffer Dall * architecture allows. In this case, (4 - CONFIG_ARM64_PGTABLE_LEVELS) 14438f791a4SChristoffer Dall * represents the first level for the host, and we add 1 to go to the next 14538f791a4SChristoffer Dall * level (which uses contatenation) for the stage-2 tables. 14638f791a4SChristoffer Dall */ 14738f791a4SChristoffer Dall #if PTRS_PER_S2_PGD <= 16 14838f791a4SChristoffer Dall #define KVM_PREALLOC_LEVEL (4 - CONFIG_ARM64_PGTABLE_LEVELS + 1) 14938f791a4SChristoffer Dall #else 15038f791a4SChristoffer Dall #define KVM_PREALLOC_LEVEL (0) 15138f791a4SChristoffer Dall #endif 15238f791a4SChristoffer Dall 15338f791a4SChristoffer Dall /** 15438f791a4SChristoffer Dall * kvm_prealloc_hwpgd - allocate inital table for VTTBR 15538f791a4SChristoffer Dall * @kvm: The KVM struct pointer for the VM. 15638f791a4SChristoffer Dall * @pgd: The kernel pseudo pgd 15738f791a4SChristoffer Dall * 15838f791a4SChristoffer Dall * When the kernel uses more levels of page tables than the guest, we allocate 15938f791a4SChristoffer Dall * a fake PGD and pre-populate it to point to the next-level page table, which 16038f791a4SChristoffer Dall * will be the real initial page table pointed to by the VTTBR. 16138f791a4SChristoffer Dall * 16238f791a4SChristoffer Dall * When KVM_PREALLOC_LEVEL==2, we allocate a single page for the PMD and 16338f791a4SChristoffer Dall * the kernel will use folded pud. When KVM_PREALLOC_LEVEL==1, we 16438f791a4SChristoffer Dall * allocate 2 consecutive PUD pages. 16538f791a4SChristoffer Dall */ 16638f791a4SChristoffer Dall static inline int kvm_prealloc_hwpgd(struct kvm *kvm, pgd_t *pgd) 16738f791a4SChristoffer Dall { 16838f791a4SChristoffer Dall unsigned int i; 16938f791a4SChristoffer Dall unsigned long hwpgd; 17038f791a4SChristoffer Dall 17138f791a4SChristoffer Dall if (KVM_PREALLOC_LEVEL == 0) 17238f791a4SChristoffer Dall return 0; 17338f791a4SChristoffer Dall 17438f791a4SChristoffer Dall hwpgd = __get_free_pages(GFP_KERNEL | __GFP_ZERO, PTRS_PER_S2_PGD_SHIFT); 17538f791a4SChristoffer Dall if (!hwpgd) 17638f791a4SChristoffer Dall return -ENOMEM; 17738f791a4SChristoffer Dall 17838f791a4SChristoffer Dall for (i = 0; i < PTRS_PER_S2_PGD; i++) { 17938f791a4SChristoffer Dall if (KVM_PREALLOC_LEVEL == 1) 18038f791a4SChristoffer Dall pgd_populate(NULL, pgd + i, 18138f791a4SChristoffer Dall (pud_t *)hwpgd + i * PTRS_PER_PUD); 18238f791a4SChristoffer Dall else if (KVM_PREALLOC_LEVEL == 2) 18338f791a4SChristoffer Dall pud_populate(NULL, pud_offset(pgd, 0) + i, 18438f791a4SChristoffer Dall (pmd_t *)hwpgd + i * PTRS_PER_PMD); 18538f791a4SChristoffer Dall } 18638f791a4SChristoffer Dall 18738f791a4SChristoffer Dall return 0; 18838f791a4SChristoffer Dall } 18938f791a4SChristoffer Dall 19038f791a4SChristoffer Dall static inline void *kvm_get_hwpgd(struct kvm *kvm) 19138f791a4SChristoffer Dall { 19238f791a4SChristoffer Dall pgd_t *pgd = kvm->arch.pgd; 19338f791a4SChristoffer Dall pud_t *pud; 19438f791a4SChristoffer Dall 19538f791a4SChristoffer Dall if (KVM_PREALLOC_LEVEL == 0) 19638f791a4SChristoffer Dall return pgd; 19738f791a4SChristoffer Dall 19838f791a4SChristoffer Dall pud = pud_offset(pgd, 0); 19938f791a4SChristoffer Dall if (KVM_PREALLOC_LEVEL == 1) 20038f791a4SChristoffer Dall return pud; 20138f791a4SChristoffer Dall 20238f791a4SChristoffer Dall BUG_ON(KVM_PREALLOC_LEVEL != 2); 20338f791a4SChristoffer Dall return pmd_offset(pud, 0); 20438f791a4SChristoffer Dall } 20538f791a4SChristoffer Dall 20638f791a4SChristoffer Dall static inline void kvm_free_hwpgd(struct kvm *kvm) 20738f791a4SChristoffer Dall { 20838f791a4SChristoffer Dall if (KVM_PREALLOC_LEVEL > 0) { 20938f791a4SChristoffer Dall unsigned long hwpgd = (unsigned long)kvm_get_hwpgd(kvm); 21038f791a4SChristoffer Dall free_pages(hwpgd, PTRS_PER_S2_PGD_SHIFT); 21138f791a4SChristoffer Dall } 21238f791a4SChristoffer Dall } 21338f791a4SChristoffer Dall 2144f853a71SChristoffer Dall static inline bool kvm_page_empty(void *ptr) 2154f853a71SChristoffer Dall { 2164f853a71SChristoffer Dall struct page *ptr_page = virt_to_page(ptr); 2174f853a71SChristoffer Dall return page_count(ptr_page) == 1; 2184f853a71SChristoffer Dall } 2194f853a71SChristoffer Dall 22038f791a4SChristoffer Dall #define kvm_pte_table_empty(kvm, ptep) kvm_page_empty(ptep) 22138f791a4SChristoffer Dall 22238f791a4SChristoffer Dall #ifdef __PAGETABLE_PMD_FOLDED 22338f791a4SChristoffer Dall #define kvm_pmd_table_empty(kvm, pmdp) (0) 2244f853a71SChristoffer Dall #else 22538f791a4SChristoffer Dall #define kvm_pmd_table_empty(kvm, pmdp) \ 22638f791a4SChristoffer Dall (kvm_page_empty(pmdp) && (!(kvm) || KVM_PREALLOC_LEVEL < 2)) 2274f853a71SChristoffer Dall #endif 22838f791a4SChristoffer Dall 22938f791a4SChristoffer Dall #ifdef __PAGETABLE_PUD_FOLDED 23038f791a4SChristoffer Dall #define kvm_pud_table_empty(kvm, pudp) (0) 23138f791a4SChristoffer Dall #else 23238f791a4SChristoffer Dall #define kvm_pud_table_empty(kvm, pudp) \ 23338f791a4SChristoffer Dall (kvm_page_empty(pudp) && (!(kvm) || KVM_PREALLOC_LEVEL < 1)) 23438f791a4SChristoffer Dall #endif 2354f853a71SChristoffer Dall 2364f853a71SChristoffer Dall 23737c43753SMarc Zyngier struct kvm; 23837c43753SMarc Zyngier 2392d58b733SMarc Zyngier #define kvm_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l)) 2402d58b733SMarc Zyngier 2412d58b733SMarc Zyngier static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu) 2422d58b733SMarc Zyngier { 2432d58b733SMarc Zyngier return (vcpu_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101; 2442d58b733SMarc Zyngier } 2452d58b733SMarc Zyngier 2462d58b733SMarc Zyngier static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva, 247840f4bfbSLaszlo Ersek unsigned long size, 248840f4bfbSLaszlo Ersek bool ipa_uncached) 24937c43753SMarc Zyngier { 250840f4bfbSLaszlo Ersek if (!vcpu_has_cache_enabled(vcpu) || ipa_uncached) 2512d58b733SMarc Zyngier kvm_flush_dcache_to_poc((void *)hva, size); 2522d58b733SMarc Zyngier 25337c43753SMarc Zyngier if (!icache_is_aliasing()) { /* PIPT */ 254ad361f09SChristoffer Dall flush_icache_range(hva, hva + size); 25537c43753SMarc Zyngier } else if (!icache_is_aivivt()) { /* non ASID-tagged VIVT */ 25637c43753SMarc Zyngier /* any kind of VIPT cache */ 25737c43753SMarc Zyngier __flush_icache_all(); 25837c43753SMarc Zyngier } 25937c43753SMarc Zyngier } 26037c43753SMarc Zyngier 2614fda342cSSantosh Shilimkar #define kvm_virt_to_phys(x) __virt_to_phys((unsigned long)(x)) 26237c43753SMarc Zyngier 2639d218a1fSMarc Zyngier void stage2_flush_vm(struct kvm *kvm); 2649d218a1fSMarc Zyngier 26537c43753SMarc Zyngier #endif /* __ASSEMBLY__ */ 26637c43753SMarc Zyngier #endif /* __ARM64_KVM_MMU_H__ */ 267