137c43753SMarc Zyngier /* 237c43753SMarc Zyngier * Copyright (C) 2012,2013 - ARM Ltd 337c43753SMarc Zyngier * Author: Marc Zyngier <marc.zyngier@arm.com> 437c43753SMarc Zyngier * 537c43753SMarc Zyngier * This program is free software; you can redistribute it and/or modify 637c43753SMarc Zyngier * it under the terms of the GNU General Public License version 2 as 737c43753SMarc Zyngier * published by the Free Software Foundation. 837c43753SMarc Zyngier * 937c43753SMarc Zyngier * This program is distributed in the hope that it will be useful, 1037c43753SMarc Zyngier * but WITHOUT ANY WARRANTY; without even the implied warranty of 1137c43753SMarc Zyngier * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 1237c43753SMarc Zyngier * GNU General Public License for more details. 1337c43753SMarc Zyngier * 1437c43753SMarc Zyngier * You should have received a copy of the GNU General Public License 1537c43753SMarc Zyngier * along with this program. If not, see <http://www.gnu.org/licenses/>. 1637c43753SMarc Zyngier */ 1737c43753SMarc Zyngier 1837c43753SMarc Zyngier #ifndef __ARM64_KVM_MMU_H__ 1937c43753SMarc Zyngier #define __ARM64_KVM_MMU_H__ 2037c43753SMarc Zyngier 2137c43753SMarc Zyngier #include <asm/page.h> 2237c43753SMarc Zyngier #include <asm/memory.h> 2320475f78SVladimir Murzin #include <asm/cpufeature.h> 2437c43753SMarc Zyngier 2537c43753SMarc Zyngier /* 26*cedbb8b7SMarc Zyngier * As ARMv8.0 only has the TTBR0_EL2 register, we cannot express 2737c43753SMarc Zyngier * "negative" addresses. This makes it impossible to directly share 2837c43753SMarc Zyngier * mappings with the kernel. 2937c43753SMarc Zyngier * 3037c43753SMarc Zyngier * Instead, give the HYP mode its own VA region at a fixed offset from 3137c43753SMarc Zyngier * the kernel by just masking the top bits (which are all ones for a 3237c43753SMarc Zyngier * kernel address). 33*cedbb8b7SMarc Zyngier * 34*cedbb8b7SMarc Zyngier * ARMv8.1 (using VHE) does have a TTBR1_EL2, and doesn't use these 35*cedbb8b7SMarc Zyngier * macros (the entire kernel runs at EL2). 3637c43753SMarc Zyngier */ 3737c43753SMarc Zyngier #define HYP_PAGE_OFFSET_SHIFT VA_BITS 3837c43753SMarc Zyngier #define HYP_PAGE_OFFSET_MASK ((UL(1) << HYP_PAGE_OFFSET_SHIFT) - 1) 3937c43753SMarc Zyngier #define HYP_PAGE_OFFSET (PAGE_OFFSET & HYP_PAGE_OFFSET_MASK) 4037c43753SMarc Zyngier 4137c43753SMarc Zyngier /* 4237c43753SMarc Zyngier * Our virtual mapping for the idmap-ed MMU-enable code. Must be 4337c43753SMarc Zyngier * shared across all the page-tables. Conveniently, we use the last 4437c43753SMarc Zyngier * possible page, where no kernel mapping will ever exist. 4537c43753SMarc Zyngier */ 4637c43753SMarc Zyngier #define TRAMPOLINE_VA (HYP_PAGE_OFFSET_MASK & PAGE_MASK) 4737c43753SMarc Zyngier 4838f791a4SChristoffer Dall /* 4938f791a4SChristoffer Dall * KVM_MMU_CACHE_MIN_PAGES is the number of stage2 page table translation 5038f791a4SChristoffer Dall * levels in addition to the PGD and potentially the PUD which are 5138f791a4SChristoffer Dall * pre-allocated (we pre-allocate the fake PGD and the PUD when the Stage-2 5238f791a4SChristoffer Dall * tables use one level of tables less than the kernel. 5338f791a4SChristoffer Dall */ 5438f791a4SChristoffer Dall #ifdef CONFIG_ARM64_64K_PAGES 5538f791a4SChristoffer Dall #define KVM_MMU_CACHE_MIN_PAGES 1 5638f791a4SChristoffer Dall #else 5738f791a4SChristoffer Dall #define KVM_MMU_CACHE_MIN_PAGES 2 5838f791a4SChristoffer Dall #endif 5938f791a4SChristoffer Dall 6037c43753SMarc Zyngier #ifdef __ASSEMBLY__ 6137c43753SMarc Zyngier 62*cedbb8b7SMarc Zyngier #include <asm/alternative.h> 63*cedbb8b7SMarc Zyngier #include <asm/cpufeature.h> 64*cedbb8b7SMarc Zyngier 6537c43753SMarc Zyngier /* 6637c43753SMarc Zyngier * Convert a kernel VA into a HYP VA. 6737c43753SMarc Zyngier * reg: VA to be converted. 6837c43753SMarc Zyngier */ 6937c43753SMarc Zyngier .macro kern_hyp_va reg 70*cedbb8b7SMarc Zyngier alternative_if_not ARM64_HAS_VIRT_HOST_EXTN 7137c43753SMarc Zyngier and \reg, \reg, #HYP_PAGE_OFFSET_MASK 72*cedbb8b7SMarc Zyngier alternative_else 73*cedbb8b7SMarc Zyngier nop 74*cedbb8b7SMarc Zyngier alternative_endif 7537c43753SMarc Zyngier .endm 7637c43753SMarc Zyngier 7737c43753SMarc Zyngier #else 7837c43753SMarc Zyngier 7938f791a4SChristoffer Dall #include <asm/pgalloc.h> 8037c43753SMarc Zyngier #include <asm/cachetype.h> 8137c43753SMarc Zyngier #include <asm/cacheflush.h> 82e4c5a685SArd Biesheuvel #include <asm/mmu_context.h> 83e4c5a685SArd Biesheuvel #include <asm/pgtable.h> 8437c43753SMarc Zyngier 8537c43753SMarc Zyngier #define KERN_TO_HYP(kva) ((unsigned long)kva - PAGE_OFFSET + HYP_PAGE_OFFSET) 8637c43753SMarc Zyngier 8737c43753SMarc Zyngier /* 88dbff124eSJoel Schopp * We currently only support a 40bit IPA. 8937c43753SMarc Zyngier */ 90dbff124eSJoel Schopp #define KVM_PHYS_SHIFT (40) 9137c43753SMarc Zyngier #define KVM_PHYS_SIZE (1UL << KVM_PHYS_SHIFT) 9237c43753SMarc Zyngier #define KVM_PHYS_MASK (KVM_PHYS_SIZE - 1UL) 9337c43753SMarc Zyngier 9437c43753SMarc Zyngier int create_hyp_mappings(void *from, void *to); 9537c43753SMarc Zyngier int create_hyp_io_mappings(void *from, void *to, phys_addr_t); 9637c43753SMarc Zyngier void free_boot_hyp_pgd(void); 9737c43753SMarc Zyngier void free_hyp_pgds(void); 9837c43753SMarc Zyngier 99957db105SChristoffer Dall void stage2_unmap_vm(struct kvm *kvm); 10037c43753SMarc Zyngier int kvm_alloc_stage2_pgd(struct kvm *kvm); 10137c43753SMarc Zyngier void kvm_free_stage2_pgd(struct kvm *kvm); 10237c43753SMarc Zyngier int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, 103c40f2f8fSArd Biesheuvel phys_addr_t pa, unsigned long size, bool writable); 10437c43753SMarc Zyngier 10537c43753SMarc Zyngier int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run); 10637c43753SMarc Zyngier 10737c43753SMarc Zyngier void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu); 10837c43753SMarc Zyngier 10937c43753SMarc Zyngier phys_addr_t kvm_mmu_get_httbr(void); 11037c43753SMarc Zyngier phys_addr_t kvm_mmu_get_boot_httbr(void); 11137c43753SMarc Zyngier phys_addr_t kvm_get_idmap_vector(void); 11237c43753SMarc Zyngier int kvm_mmu_init(void); 11337c43753SMarc Zyngier void kvm_clear_hyp_idmap(void); 11437c43753SMarc Zyngier 11537c43753SMarc Zyngier #define kvm_set_pte(ptep, pte) set_pte(ptep, pte) 116ad361f09SChristoffer Dall #define kvm_set_pmd(pmdp, pmd) set_pmd(pmdp, pmd) 11737c43753SMarc Zyngier 11837c43753SMarc Zyngier static inline void kvm_clean_pgd(pgd_t *pgd) {} 11938f791a4SChristoffer Dall static inline void kvm_clean_pmd(pmd_t *pmd) {} 12037c43753SMarc Zyngier static inline void kvm_clean_pmd_entry(pmd_t *pmd) {} 12137c43753SMarc Zyngier static inline void kvm_clean_pte(pte_t *pte) {} 12237c43753SMarc Zyngier static inline void kvm_clean_pte_entry(pte_t *pte) {} 12337c43753SMarc Zyngier 12437c43753SMarc Zyngier static inline void kvm_set_s2pte_writable(pte_t *pte) 12537c43753SMarc Zyngier { 12637c43753SMarc Zyngier pte_val(*pte) |= PTE_S2_RDWR; 12737c43753SMarc Zyngier } 12837c43753SMarc Zyngier 129ad361f09SChristoffer Dall static inline void kvm_set_s2pmd_writable(pmd_t *pmd) 130ad361f09SChristoffer Dall { 131ad361f09SChristoffer Dall pmd_val(*pmd) |= PMD_S2_RDWR; 132ad361f09SChristoffer Dall } 133ad361f09SChristoffer Dall 1348199ed0eSMario Smarduch static inline void kvm_set_s2pte_readonly(pte_t *pte) 1358199ed0eSMario Smarduch { 1368199ed0eSMario Smarduch pte_val(*pte) = (pte_val(*pte) & ~PTE_S2_RDWR) | PTE_S2_RDONLY; 1378199ed0eSMario Smarduch } 1388199ed0eSMario Smarduch 1398199ed0eSMario Smarduch static inline bool kvm_s2pte_readonly(pte_t *pte) 1408199ed0eSMario Smarduch { 1418199ed0eSMario Smarduch return (pte_val(*pte) & PTE_S2_RDWR) == PTE_S2_RDONLY; 1428199ed0eSMario Smarduch } 1438199ed0eSMario Smarduch 1448199ed0eSMario Smarduch static inline void kvm_set_s2pmd_readonly(pmd_t *pmd) 1458199ed0eSMario Smarduch { 1468199ed0eSMario Smarduch pmd_val(*pmd) = (pmd_val(*pmd) & ~PMD_S2_RDWR) | PMD_S2_RDONLY; 1478199ed0eSMario Smarduch } 1488199ed0eSMario Smarduch 1498199ed0eSMario Smarduch static inline bool kvm_s2pmd_readonly(pmd_t *pmd) 1508199ed0eSMario Smarduch { 1518199ed0eSMario Smarduch return (pmd_val(*pmd) & PMD_S2_RDWR) == PMD_S2_RDONLY; 1528199ed0eSMario Smarduch } 1538199ed0eSMario Smarduch 1548199ed0eSMario Smarduch 155a3c8bd31SMarc Zyngier #define kvm_pgd_addr_end(addr, end) pgd_addr_end(addr, end) 156a3c8bd31SMarc Zyngier #define kvm_pud_addr_end(addr, end) pud_addr_end(addr, end) 157a3c8bd31SMarc Zyngier #define kvm_pmd_addr_end(addr, end) pmd_addr_end(addr, end) 158a3c8bd31SMarc Zyngier 15938f791a4SChristoffer Dall /* 16038f791a4SChristoffer Dall * In the case where PGDIR_SHIFT is larger than KVM_PHYS_SHIFT, we can address 16138f791a4SChristoffer Dall * the entire IPA input range with a single pgd entry, and we would only need 16238f791a4SChristoffer Dall * one pgd entry. Note that in this case, the pgd is actually not used by 16338f791a4SChristoffer Dall * the MMU for Stage-2 translations, but is merely a fake pgd used as a data 16438f791a4SChristoffer Dall * structure for the kernel pgtable macros to work. 16538f791a4SChristoffer Dall */ 16638f791a4SChristoffer Dall #if PGDIR_SHIFT > KVM_PHYS_SHIFT 16738f791a4SChristoffer Dall #define PTRS_PER_S2_PGD_SHIFT 0 16838f791a4SChristoffer Dall #else 16938f791a4SChristoffer Dall #define PTRS_PER_S2_PGD_SHIFT (KVM_PHYS_SHIFT - PGDIR_SHIFT) 17038f791a4SChristoffer Dall #endif 17138f791a4SChristoffer Dall #define PTRS_PER_S2_PGD (1 << PTRS_PER_S2_PGD_SHIFT) 17238f791a4SChristoffer Dall 17304b8dc85SMarc Zyngier #define kvm_pgd_index(addr) (((addr) >> PGDIR_SHIFT) & (PTRS_PER_S2_PGD - 1)) 17404b8dc85SMarc Zyngier 17538f791a4SChristoffer Dall /* 17638f791a4SChristoffer Dall * If we are concatenating first level stage-2 page tables, we would have less 17738f791a4SChristoffer Dall * than or equal to 16 pointers in the fake PGD, because that's what the 1789f25e6adSKirill A. Shutemov * architecture allows. In this case, (4 - CONFIG_PGTABLE_LEVELS) 17938f791a4SChristoffer Dall * represents the first level for the host, and we add 1 to go to the next 18038f791a4SChristoffer Dall * level (which uses contatenation) for the stage-2 tables. 18138f791a4SChristoffer Dall */ 18238f791a4SChristoffer Dall #if PTRS_PER_S2_PGD <= 16 1839f25e6adSKirill A. Shutemov #define KVM_PREALLOC_LEVEL (4 - CONFIG_PGTABLE_LEVELS + 1) 18438f791a4SChristoffer Dall #else 18538f791a4SChristoffer Dall #define KVM_PREALLOC_LEVEL (0) 18638f791a4SChristoffer Dall #endif 18738f791a4SChristoffer Dall 18838f791a4SChristoffer Dall static inline void *kvm_get_hwpgd(struct kvm *kvm) 18938f791a4SChristoffer Dall { 19038f791a4SChristoffer Dall pgd_t *pgd = kvm->arch.pgd; 19138f791a4SChristoffer Dall pud_t *pud; 19238f791a4SChristoffer Dall 19338f791a4SChristoffer Dall if (KVM_PREALLOC_LEVEL == 0) 19438f791a4SChristoffer Dall return pgd; 19538f791a4SChristoffer Dall 19638f791a4SChristoffer Dall pud = pud_offset(pgd, 0); 19738f791a4SChristoffer Dall if (KVM_PREALLOC_LEVEL == 1) 19838f791a4SChristoffer Dall return pud; 19938f791a4SChristoffer Dall 20038f791a4SChristoffer Dall BUG_ON(KVM_PREALLOC_LEVEL != 2); 20138f791a4SChristoffer Dall return pmd_offset(pud, 0); 20238f791a4SChristoffer Dall } 20338f791a4SChristoffer Dall 204a987370fSMarc Zyngier static inline unsigned int kvm_get_hwpgd_size(void) 20538f791a4SChristoffer Dall { 206a987370fSMarc Zyngier if (KVM_PREALLOC_LEVEL > 0) 207a987370fSMarc Zyngier return PTRS_PER_S2_PGD * PAGE_SIZE; 208a987370fSMarc Zyngier return PTRS_PER_S2_PGD * sizeof(pgd_t); 20938f791a4SChristoffer Dall } 21038f791a4SChristoffer Dall 2114f853a71SChristoffer Dall static inline bool kvm_page_empty(void *ptr) 2124f853a71SChristoffer Dall { 2134f853a71SChristoffer Dall struct page *ptr_page = virt_to_page(ptr); 2144f853a71SChristoffer Dall return page_count(ptr_page) == 1; 2154f853a71SChristoffer Dall } 2164f853a71SChristoffer Dall 21738f791a4SChristoffer Dall #define kvm_pte_table_empty(kvm, ptep) kvm_page_empty(ptep) 21838f791a4SChristoffer Dall 21938f791a4SChristoffer Dall #ifdef __PAGETABLE_PMD_FOLDED 22038f791a4SChristoffer Dall #define kvm_pmd_table_empty(kvm, pmdp) (0) 2214f853a71SChristoffer Dall #else 22238f791a4SChristoffer Dall #define kvm_pmd_table_empty(kvm, pmdp) \ 22338f791a4SChristoffer Dall (kvm_page_empty(pmdp) && (!(kvm) || KVM_PREALLOC_LEVEL < 2)) 2244f853a71SChristoffer Dall #endif 22538f791a4SChristoffer Dall 22638f791a4SChristoffer Dall #ifdef __PAGETABLE_PUD_FOLDED 22738f791a4SChristoffer Dall #define kvm_pud_table_empty(kvm, pudp) (0) 22838f791a4SChristoffer Dall #else 22938f791a4SChristoffer Dall #define kvm_pud_table_empty(kvm, pudp) \ 23038f791a4SChristoffer Dall (kvm_page_empty(pudp) && (!(kvm) || KVM_PREALLOC_LEVEL < 1)) 23138f791a4SChristoffer Dall #endif 2324f853a71SChristoffer Dall 2334f853a71SChristoffer Dall 23437c43753SMarc Zyngier struct kvm; 23537c43753SMarc Zyngier 2362d58b733SMarc Zyngier #define kvm_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l)) 2372d58b733SMarc Zyngier 2382d58b733SMarc Zyngier static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu) 2392d58b733SMarc Zyngier { 2402d58b733SMarc Zyngier return (vcpu_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101; 2412d58b733SMarc Zyngier } 2422d58b733SMarc Zyngier 243ba049e93SDan Williams static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu, 244ba049e93SDan Williams kvm_pfn_t pfn, 245840f4bfbSLaszlo Ersek unsigned long size, 246840f4bfbSLaszlo Ersek bool ipa_uncached) 24737c43753SMarc Zyngier { 2480d3e4d4fSMarc Zyngier void *va = page_address(pfn_to_page(pfn)); 2490d3e4d4fSMarc Zyngier 250840f4bfbSLaszlo Ersek if (!vcpu_has_cache_enabled(vcpu) || ipa_uncached) 2510d3e4d4fSMarc Zyngier kvm_flush_dcache_to_poc(va, size); 2522d58b733SMarc Zyngier 25337c43753SMarc Zyngier if (!icache_is_aliasing()) { /* PIPT */ 2540d3e4d4fSMarc Zyngier flush_icache_range((unsigned long)va, 2550d3e4d4fSMarc Zyngier (unsigned long)va + size); 25637c43753SMarc Zyngier } else if (!icache_is_aivivt()) { /* non ASID-tagged VIVT */ 25737c43753SMarc Zyngier /* any kind of VIPT cache */ 25837c43753SMarc Zyngier __flush_icache_all(); 25937c43753SMarc Zyngier } 26037c43753SMarc Zyngier } 26137c43753SMarc Zyngier 262363ef89fSMarc Zyngier static inline void __kvm_flush_dcache_pte(pte_t pte) 263363ef89fSMarc Zyngier { 264363ef89fSMarc Zyngier struct page *page = pte_page(pte); 265363ef89fSMarc Zyngier kvm_flush_dcache_to_poc(page_address(page), PAGE_SIZE); 266363ef89fSMarc Zyngier } 267363ef89fSMarc Zyngier 268363ef89fSMarc Zyngier static inline void __kvm_flush_dcache_pmd(pmd_t pmd) 269363ef89fSMarc Zyngier { 270363ef89fSMarc Zyngier struct page *page = pmd_page(pmd); 271363ef89fSMarc Zyngier kvm_flush_dcache_to_poc(page_address(page), PMD_SIZE); 272363ef89fSMarc Zyngier } 273363ef89fSMarc Zyngier 274363ef89fSMarc Zyngier static inline void __kvm_flush_dcache_pud(pud_t pud) 275363ef89fSMarc Zyngier { 276363ef89fSMarc Zyngier struct page *page = pud_page(pud); 277363ef89fSMarc Zyngier kvm_flush_dcache_to_poc(page_address(page), PUD_SIZE); 278363ef89fSMarc Zyngier } 279363ef89fSMarc Zyngier 2804fda342cSSantosh Shilimkar #define kvm_virt_to_phys(x) __virt_to_phys((unsigned long)(x)) 28137c43753SMarc Zyngier 2823c1e7165SMarc Zyngier void kvm_set_way_flush(struct kvm_vcpu *vcpu); 2833c1e7165SMarc Zyngier void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled); 2849d218a1fSMarc Zyngier 285e4c5a685SArd Biesheuvel static inline bool __kvm_cpu_uses_extended_idmap(void) 286e4c5a685SArd Biesheuvel { 287e4c5a685SArd Biesheuvel return __cpu_uses_extended_idmap(); 288e4c5a685SArd Biesheuvel } 289e4c5a685SArd Biesheuvel 290e4c5a685SArd Biesheuvel static inline void __kvm_extend_hypmap(pgd_t *boot_hyp_pgd, 291e4c5a685SArd Biesheuvel pgd_t *hyp_pgd, 292e4c5a685SArd Biesheuvel pgd_t *merged_hyp_pgd, 293e4c5a685SArd Biesheuvel unsigned long hyp_idmap_start) 294e4c5a685SArd Biesheuvel { 295e4c5a685SArd Biesheuvel int idmap_idx; 296e4c5a685SArd Biesheuvel 297e4c5a685SArd Biesheuvel /* 298e4c5a685SArd Biesheuvel * Use the first entry to access the HYP mappings. It is 299e4c5a685SArd Biesheuvel * guaranteed to be free, otherwise we wouldn't use an 300e4c5a685SArd Biesheuvel * extended idmap. 301e4c5a685SArd Biesheuvel */ 302e4c5a685SArd Biesheuvel VM_BUG_ON(pgd_val(merged_hyp_pgd[0])); 303e4c5a685SArd Biesheuvel merged_hyp_pgd[0] = __pgd(__pa(hyp_pgd) | PMD_TYPE_TABLE); 304e4c5a685SArd Biesheuvel 305e4c5a685SArd Biesheuvel /* 306e4c5a685SArd Biesheuvel * Create another extended level entry that points to the boot HYP map, 307e4c5a685SArd Biesheuvel * which contains an ID mapping of the HYP init code. We essentially 308e4c5a685SArd Biesheuvel * merge the boot and runtime HYP maps by doing so, but they don't 309e4c5a685SArd Biesheuvel * overlap anyway, so this is fine. 310e4c5a685SArd Biesheuvel */ 311e4c5a685SArd Biesheuvel idmap_idx = hyp_idmap_start >> VA_BITS; 312e4c5a685SArd Biesheuvel VM_BUG_ON(pgd_val(merged_hyp_pgd[idmap_idx])); 313e4c5a685SArd Biesheuvel merged_hyp_pgd[idmap_idx] = __pgd(__pa(boot_hyp_pgd) | PMD_TYPE_TABLE); 314e4c5a685SArd Biesheuvel } 315e4c5a685SArd Biesheuvel 31620475f78SVladimir Murzin static inline unsigned int kvm_get_vmid_bits(void) 31720475f78SVladimir Murzin { 31820475f78SVladimir Murzin int reg = read_system_reg(SYS_ID_AA64MMFR1_EL1); 31920475f78SVladimir Murzin 32020475f78SVladimir Murzin return (cpuid_feature_extract_field(reg, ID_AA64MMFR1_VMIDBITS_SHIFT) == 2) ? 16 : 8; 32120475f78SVladimir Murzin } 32220475f78SVladimir Murzin 32337c43753SMarc Zyngier #endif /* __ASSEMBLY__ */ 32437c43753SMarc Zyngier #endif /* __ARM64_KVM_MMU_H__ */ 325