1 /* 2 * Copyright (C) 2012,2013 - ARM Ltd 3 * Author: Marc Zyngier <marc.zyngier@arm.com> 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License version 2 as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program. If not, see <http://www.gnu.org/licenses/>. 16 */ 17 18 #ifndef __ARM64_KVM_MMU_H__ 19 #define __ARM64_KVM_MMU_H__ 20 21 #include <asm/page.h> 22 #include <asm/memory.h> 23 #include <asm/cpufeature.h> 24 25 /* 26 * As ARMv8.0 only has the TTBR0_EL2 register, we cannot express 27 * "negative" addresses. This makes it impossible to directly share 28 * mappings with the kernel. 29 * 30 * Instead, give the HYP mode its own VA region at a fixed offset from 31 * the kernel by just masking the top bits (which are all ones for a 32 * kernel address). 33 * 34 * ARMv8.1 (using VHE) does have a TTBR1_EL2, and doesn't use these 35 * macros (the entire kernel runs at EL2). 36 */ 37 #define HYP_PAGE_OFFSET_SHIFT VA_BITS 38 #define HYP_PAGE_OFFSET_MASK ((UL(1) << HYP_PAGE_OFFSET_SHIFT) - 1) 39 #define HYP_PAGE_OFFSET (PAGE_OFFSET & HYP_PAGE_OFFSET_MASK) 40 41 /* 42 * Our virtual mapping for the idmap-ed MMU-enable code. Must be 43 * shared across all the page-tables. Conveniently, we use the last 44 * possible page, where no kernel mapping will ever exist. 45 */ 46 #define TRAMPOLINE_VA (HYP_PAGE_OFFSET_MASK & PAGE_MASK) 47 48 /* 49 * KVM_MMU_CACHE_MIN_PAGES is the number of stage2 page table translation 50 * levels in addition to the PGD and potentially the PUD which are 51 * pre-allocated (we pre-allocate the fake PGD and the PUD when the Stage-2 52 * tables use one level of tables less than the kernel. 53 */ 54 #ifdef CONFIG_ARM64_64K_PAGES 55 #define KVM_MMU_CACHE_MIN_PAGES 1 56 #else 57 #define KVM_MMU_CACHE_MIN_PAGES 2 58 #endif 59 60 #ifdef __ASSEMBLY__ 61 62 #include <asm/alternative.h> 63 #include <asm/cpufeature.h> 64 65 /* 66 * Convert a kernel VA into a HYP VA. 67 * reg: VA to be converted. 68 */ 69 .macro kern_hyp_va reg 70 alternative_if_not ARM64_HAS_VIRT_HOST_EXTN 71 and \reg, \reg, #HYP_PAGE_OFFSET_MASK 72 alternative_else 73 nop 74 alternative_endif 75 .endm 76 77 #else 78 79 #include <asm/pgalloc.h> 80 #include <asm/cachetype.h> 81 #include <asm/cacheflush.h> 82 #include <asm/mmu_context.h> 83 #include <asm/pgtable.h> 84 85 #define KERN_TO_HYP(kva) ((unsigned long)kva - PAGE_OFFSET + HYP_PAGE_OFFSET) 86 87 /* 88 * We currently only support a 40bit IPA. 89 */ 90 #define KVM_PHYS_SHIFT (40) 91 #define KVM_PHYS_SIZE (1UL << KVM_PHYS_SHIFT) 92 #define KVM_PHYS_MASK (KVM_PHYS_SIZE - 1UL) 93 94 int create_hyp_mappings(void *from, void *to); 95 int create_hyp_io_mappings(void *from, void *to, phys_addr_t); 96 void free_boot_hyp_pgd(void); 97 void free_hyp_pgds(void); 98 99 void stage2_unmap_vm(struct kvm *kvm); 100 int kvm_alloc_stage2_pgd(struct kvm *kvm); 101 void kvm_free_stage2_pgd(struct kvm *kvm); 102 int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, 103 phys_addr_t pa, unsigned long size, bool writable); 104 105 int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run); 106 107 void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu); 108 109 phys_addr_t kvm_mmu_get_httbr(void); 110 phys_addr_t kvm_mmu_get_boot_httbr(void); 111 phys_addr_t kvm_get_idmap_vector(void); 112 int kvm_mmu_init(void); 113 void kvm_clear_hyp_idmap(void); 114 115 #define kvm_set_pte(ptep, pte) set_pte(ptep, pte) 116 #define kvm_set_pmd(pmdp, pmd) set_pmd(pmdp, pmd) 117 118 static inline void kvm_clean_pgd(pgd_t *pgd) {} 119 static inline void kvm_clean_pmd(pmd_t *pmd) {} 120 static inline void kvm_clean_pmd_entry(pmd_t *pmd) {} 121 static inline void kvm_clean_pte(pte_t *pte) {} 122 static inline void kvm_clean_pte_entry(pte_t *pte) {} 123 124 static inline void kvm_set_s2pte_writable(pte_t *pte) 125 { 126 pte_val(*pte) |= PTE_S2_RDWR; 127 } 128 129 static inline void kvm_set_s2pmd_writable(pmd_t *pmd) 130 { 131 pmd_val(*pmd) |= PMD_S2_RDWR; 132 } 133 134 static inline void kvm_set_s2pte_readonly(pte_t *pte) 135 { 136 pte_val(*pte) = (pte_val(*pte) & ~PTE_S2_RDWR) | PTE_S2_RDONLY; 137 } 138 139 static inline bool kvm_s2pte_readonly(pte_t *pte) 140 { 141 return (pte_val(*pte) & PTE_S2_RDWR) == PTE_S2_RDONLY; 142 } 143 144 static inline void kvm_set_s2pmd_readonly(pmd_t *pmd) 145 { 146 pmd_val(*pmd) = (pmd_val(*pmd) & ~PMD_S2_RDWR) | PMD_S2_RDONLY; 147 } 148 149 static inline bool kvm_s2pmd_readonly(pmd_t *pmd) 150 { 151 return (pmd_val(*pmd) & PMD_S2_RDWR) == PMD_S2_RDONLY; 152 } 153 154 155 #define kvm_pgd_addr_end(addr, end) pgd_addr_end(addr, end) 156 #define kvm_pud_addr_end(addr, end) pud_addr_end(addr, end) 157 #define kvm_pmd_addr_end(addr, end) pmd_addr_end(addr, end) 158 159 /* 160 * In the case where PGDIR_SHIFT is larger than KVM_PHYS_SHIFT, we can address 161 * the entire IPA input range with a single pgd entry, and we would only need 162 * one pgd entry. Note that in this case, the pgd is actually not used by 163 * the MMU for Stage-2 translations, but is merely a fake pgd used as a data 164 * structure for the kernel pgtable macros to work. 165 */ 166 #if PGDIR_SHIFT > KVM_PHYS_SHIFT 167 #define PTRS_PER_S2_PGD_SHIFT 0 168 #else 169 #define PTRS_PER_S2_PGD_SHIFT (KVM_PHYS_SHIFT - PGDIR_SHIFT) 170 #endif 171 #define PTRS_PER_S2_PGD (1 << PTRS_PER_S2_PGD_SHIFT) 172 173 #define kvm_pgd_index(addr) (((addr) >> PGDIR_SHIFT) & (PTRS_PER_S2_PGD - 1)) 174 175 /* 176 * If we are concatenating first level stage-2 page tables, we would have less 177 * than or equal to 16 pointers in the fake PGD, because that's what the 178 * architecture allows. In this case, (4 - CONFIG_PGTABLE_LEVELS) 179 * represents the first level for the host, and we add 1 to go to the next 180 * level (which uses contatenation) for the stage-2 tables. 181 */ 182 #if PTRS_PER_S2_PGD <= 16 183 #define KVM_PREALLOC_LEVEL (4 - CONFIG_PGTABLE_LEVELS + 1) 184 #else 185 #define KVM_PREALLOC_LEVEL (0) 186 #endif 187 188 static inline void *kvm_get_hwpgd(struct kvm *kvm) 189 { 190 pgd_t *pgd = kvm->arch.pgd; 191 pud_t *pud; 192 193 if (KVM_PREALLOC_LEVEL == 0) 194 return pgd; 195 196 pud = pud_offset(pgd, 0); 197 if (KVM_PREALLOC_LEVEL == 1) 198 return pud; 199 200 BUG_ON(KVM_PREALLOC_LEVEL != 2); 201 return pmd_offset(pud, 0); 202 } 203 204 static inline unsigned int kvm_get_hwpgd_size(void) 205 { 206 if (KVM_PREALLOC_LEVEL > 0) 207 return PTRS_PER_S2_PGD * PAGE_SIZE; 208 return PTRS_PER_S2_PGD * sizeof(pgd_t); 209 } 210 211 static inline bool kvm_page_empty(void *ptr) 212 { 213 struct page *ptr_page = virt_to_page(ptr); 214 return page_count(ptr_page) == 1; 215 } 216 217 #define kvm_pte_table_empty(kvm, ptep) kvm_page_empty(ptep) 218 219 #ifdef __PAGETABLE_PMD_FOLDED 220 #define kvm_pmd_table_empty(kvm, pmdp) (0) 221 #else 222 #define kvm_pmd_table_empty(kvm, pmdp) \ 223 (kvm_page_empty(pmdp) && (!(kvm) || KVM_PREALLOC_LEVEL < 2)) 224 #endif 225 226 #ifdef __PAGETABLE_PUD_FOLDED 227 #define kvm_pud_table_empty(kvm, pudp) (0) 228 #else 229 #define kvm_pud_table_empty(kvm, pudp) \ 230 (kvm_page_empty(pudp) && (!(kvm) || KVM_PREALLOC_LEVEL < 1)) 231 #endif 232 233 234 struct kvm; 235 236 #define kvm_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l)) 237 238 static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu) 239 { 240 return (vcpu_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101; 241 } 242 243 static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu, 244 kvm_pfn_t pfn, 245 unsigned long size, 246 bool ipa_uncached) 247 { 248 void *va = page_address(pfn_to_page(pfn)); 249 250 if (!vcpu_has_cache_enabled(vcpu) || ipa_uncached) 251 kvm_flush_dcache_to_poc(va, size); 252 253 if (!icache_is_aliasing()) { /* PIPT */ 254 flush_icache_range((unsigned long)va, 255 (unsigned long)va + size); 256 } else if (!icache_is_aivivt()) { /* non ASID-tagged VIVT */ 257 /* any kind of VIPT cache */ 258 __flush_icache_all(); 259 } 260 } 261 262 static inline void __kvm_flush_dcache_pte(pte_t pte) 263 { 264 struct page *page = pte_page(pte); 265 kvm_flush_dcache_to_poc(page_address(page), PAGE_SIZE); 266 } 267 268 static inline void __kvm_flush_dcache_pmd(pmd_t pmd) 269 { 270 struct page *page = pmd_page(pmd); 271 kvm_flush_dcache_to_poc(page_address(page), PMD_SIZE); 272 } 273 274 static inline void __kvm_flush_dcache_pud(pud_t pud) 275 { 276 struct page *page = pud_page(pud); 277 kvm_flush_dcache_to_poc(page_address(page), PUD_SIZE); 278 } 279 280 #define kvm_virt_to_phys(x) __virt_to_phys((unsigned long)(x)) 281 282 void kvm_set_way_flush(struct kvm_vcpu *vcpu); 283 void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled); 284 285 static inline bool __kvm_cpu_uses_extended_idmap(void) 286 { 287 return __cpu_uses_extended_idmap(); 288 } 289 290 static inline void __kvm_extend_hypmap(pgd_t *boot_hyp_pgd, 291 pgd_t *hyp_pgd, 292 pgd_t *merged_hyp_pgd, 293 unsigned long hyp_idmap_start) 294 { 295 int idmap_idx; 296 297 /* 298 * Use the first entry to access the HYP mappings. It is 299 * guaranteed to be free, otherwise we wouldn't use an 300 * extended idmap. 301 */ 302 VM_BUG_ON(pgd_val(merged_hyp_pgd[0])); 303 merged_hyp_pgd[0] = __pgd(__pa(hyp_pgd) | PMD_TYPE_TABLE); 304 305 /* 306 * Create another extended level entry that points to the boot HYP map, 307 * which contains an ID mapping of the HYP init code. We essentially 308 * merge the boot and runtime HYP maps by doing so, but they don't 309 * overlap anyway, so this is fine. 310 */ 311 idmap_idx = hyp_idmap_start >> VA_BITS; 312 VM_BUG_ON(pgd_val(merged_hyp_pgd[idmap_idx])); 313 merged_hyp_pgd[idmap_idx] = __pgd(__pa(boot_hyp_pgd) | PMD_TYPE_TABLE); 314 } 315 316 static inline unsigned int kvm_get_vmid_bits(void) 317 { 318 int reg = read_system_reg(SYS_ID_AA64MMFR1_EL1); 319 320 return (cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR1_VMIDBITS_SHIFT) == 2) ? 16 : 8; 321 } 322 323 #endif /* __ASSEMBLY__ */ 324 #endif /* __ARM64_KVM_MMU_H__ */ 325