137c43753SMarc Zyngier /* 237c43753SMarc Zyngier * Copyright (C) 2012,2013 - ARM Ltd 337c43753SMarc Zyngier * Author: Marc Zyngier <marc.zyngier@arm.com> 437c43753SMarc Zyngier * 537c43753SMarc Zyngier * This program is free software; you can redistribute it and/or modify 637c43753SMarc Zyngier * it under the terms of the GNU General Public License version 2 as 737c43753SMarc Zyngier * published by the Free Software Foundation. 837c43753SMarc Zyngier * 937c43753SMarc Zyngier * This program is distributed in the hope that it will be useful, 1037c43753SMarc Zyngier * but WITHOUT ANY WARRANTY; without even the implied warranty of 1137c43753SMarc Zyngier * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 1237c43753SMarc Zyngier * GNU General Public License for more details. 1337c43753SMarc Zyngier * 1437c43753SMarc Zyngier * You should have received a copy of the GNU General Public License 1537c43753SMarc Zyngier * along with this program. If not, see <http://www.gnu.org/licenses/>. 1637c43753SMarc Zyngier */ 1737c43753SMarc Zyngier 1837c43753SMarc Zyngier #ifndef __ARM64_KVM_MMU_H__ 1937c43753SMarc Zyngier #define __ARM64_KVM_MMU_H__ 2037c43753SMarc Zyngier 2137c43753SMarc Zyngier #include <asm/page.h> 2237c43753SMarc Zyngier #include <asm/memory.h> 2320475f78SVladimir Murzin #include <asm/cpufeature.h> 2437c43753SMarc Zyngier 2537c43753SMarc Zyngier /* 26cedbb8b7SMarc Zyngier * As ARMv8.0 only has the TTBR0_EL2 register, we cannot express 2737c43753SMarc Zyngier * "negative" addresses. This makes it impossible to directly share 2837c43753SMarc Zyngier * mappings with the kernel. 2937c43753SMarc Zyngier * 3037c43753SMarc Zyngier * Instead, give the HYP mode its own VA region at a fixed offset from 3137c43753SMarc Zyngier * the kernel by just masking the top bits (which are all ones for a 3282a81bffSMarc Zyngier * kernel address). We need to find out how many bits to mask. 33cedbb8b7SMarc Zyngier * 3482a81bffSMarc Zyngier * We want to build a set of page tables that cover both parts of the 3582a81bffSMarc Zyngier * idmap (the trampoline page used to initialize EL2), and our normal 3682a81bffSMarc Zyngier * runtime VA space, at the same time. 3782a81bffSMarc Zyngier * 3882a81bffSMarc Zyngier * Given that the kernel uses VA_BITS for its entire address space, 3982a81bffSMarc Zyngier * and that half of that space (VA_BITS - 1) is used for the linear 4082a81bffSMarc Zyngier * mapping, we can also limit the EL2 space to (VA_BITS - 1). 4182a81bffSMarc Zyngier * 4282a81bffSMarc Zyngier * The main question is "Within the VA_BITS space, does EL2 use the 4382a81bffSMarc Zyngier * top or the bottom half of that space to shadow the kernel's linear 4482a81bffSMarc Zyngier * mapping?". As we need to idmap the trampoline page, this is 4582a81bffSMarc Zyngier * determined by the range in which this page lives. 4682a81bffSMarc Zyngier * 4782a81bffSMarc Zyngier * If the page is in the bottom half, we have to use the top half. If 4882a81bffSMarc Zyngier * the page is in the top half, we have to use the bottom half: 4982a81bffSMarc Zyngier * 5082a81bffSMarc Zyngier * T = __virt_to_phys(__hyp_idmap_text_start) 5182a81bffSMarc Zyngier * if (T & BIT(VA_BITS - 1)) 5282a81bffSMarc Zyngier * HYP_VA_MIN = 0 //idmap in upper half 5382a81bffSMarc Zyngier * else 5482a81bffSMarc Zyngier * HYP_VA_MIN = 1 << (VA_BITS - 1) 5582a81bffSMarc Zyngier * HYP_VA_MAX = HYP_VA_MIN + (1 << (VA_BITS - 1)) - 1 5682a81bffSMarc Zyngier * 5782a81bffSMarc Zyngier * This of course assumes that the trampoline page exists within the 5882a81bffSMarc Zyngier * VA_BITS range. If it doesn't, then it means we're in the odd case 5982a81bffSMarc Zyngier * where the kernel idmap (as well as HYP) uses more levels than the 6082a81bffSMarc Zyngier * kernel runtime page tables (as seen when the kernel is configured 6182a81bffSMarc Zyngier * for 4k pages, 39bits VA, and yet memory lives just above that 6282a81bffSMarc Zyngier * limit, forcing the idmap to use 4 levels of page tables while the 6382a81bffSMarc Zyngier * kernel itself only uses 3). In this particular case, it doesn't 6482a81bffSMarc Zyngier * matter which side of VA_BITS we use, as we're guaranteed not to 6582a81bffSMarc Zyngier * conflict with anything. 6682a81bffSMarc Zyngier * 6782a81bffSMarc Zyngier * When using VHE, there are no separate hyp mappings and all KVM 6882a81bffSMarc Zyngier * functionality is already mapped as part of the main kernel 6982a81bffSMarc Zyngier * mappings, and none of this applies in that case. 7037c43753SMarc Zyngier */ 71d53d9bc6SMarc Zyngier 72d53d9bc6SMarc Zyngier #define HYP_PAGE_OFFSET_HIGH_MASK ((UL(1) << VA_BITS) - 1) 73d53d9bc6SMarc Zyngier #define HYP_PAGE_OFFSET_LOW_MASK ((UL(1) << (VA_BITS - 1)) - 1) 74d53d9bc6SMarc Zyngier 75d53d9bc6SMarc Zyngier /* Temporary compat define */ 76d53d9bc6SMarc Zyngier #define HYP_PAGE_OFFSET_MASK HYP_PAGE_OFFSET_HIGH_MASK 7737c43753SMarc Zyngier 7837c43753SMarc Zyngier /* 7937c43753SMarc Zyngier * Our virtual mapping for the idmap-ed MMU-enable code. Must be 8037c43753SMarc Zyngier * shared across all the page-tables. Conveniently, we use the last 8137c43753SMarc Zyngier * possible page, where no kernel mapping will ever exist. 8237c43753SMarc Zyngier */ 8337c43753SMarc Zyngier #define TRAMPOLINE_VA (HYP_PAGE_OFFSET_MASK & PAGE_MASK) 8437c43753SMarc Zyngier 8537c43753SMarc Zyngier #ifdef __ASSEMBLY__ 8637c43753SMarc Zyngier 87cedbb8b7SMarc Zyngier #include <asm/alternative.h> 88cedbb8b7SMarc Zyngier #include <asm/cpufeature.h> 89cedbb8b7SMarc Zyngier 9037c43753SMarc Zyngier /* 9137c43753SMarc Zyngier * Convert a kernel VA into a HYP VA. 9237c43753SMarc Zyngier * reg: VA to be converted. 93*fd81e6bfSMarc Zyngier * 94*fd81e6bfSMarc Zyngier * This generates the following sequences: 95*fd81e6bfSMarc Zyngier * - High mask: 96*fd81e6bfSMarc Zyngier * and x0, x0, #HYP_PAGE_OFFSET_HIGH_MASK 97*fd81e6bfSMarc Zyngier * nop 98*fd81e6bfSMarc Zyngier * - Low mask: 99*fd81e6bfSMarc Zyngier * and x0, x0, #HYP_PAGE_OFFSET_HIGH_MASK 100*fd81e6bfSMarc Zyngier * and x0, x0, #HYP_PAGE_OFFSET_LOW_MASK 101*fd81e6bfSMarc Zyngier * - VHE: 102*fd81e6bfSMarc Zyngier * nop 103*fd81e6bfSMarc Zyngier * nop 104*fd81e6bfSMarc Zyngier * 105*fd81e6bfSMarc Zyngier * The "low mask" version works because the mask is a strict subset of 106*fd81e6bfSMarc Zyngier * the "high mask", hence performing the first mask for nothing. 107*fd81e6bfSMarc Zyngier * Should be completely invisible on any viable CPU. 10837c43753SMarc Zyngier */ 10937c43753SMarc Zyngier .macro kern_hyp_va reg 110cedbb8b7SMarc Zyngier alternative_if_not ARM64_HAS_VIRT_HOST_EXTN 111*fd81e6bfSMarc Zyngier and \reg, \reg, #HYP_PAGE_OFFSET_HIGH_MASK 112cedbb8b7SMarc Zyngier alternative_else 113cedbb8b7SMarc Zyngier nop 114cedbb8b7SMarc Zyngier alternative_endif 115*fd81e6bfSMarc Zyngier alternative_if_not ARM64_HYP_OFFSET_LOW 116*fd81e6bfSMarc Zyngier nop 117*fd81e6bfSMarc Zyngier alternative_else 118*fd81e6bfSMarc Zyngier and \reg, \reg, #HYP_PAGE_OFFSET_LOW_MASK 119*fd81e6bfSMarc Zyngier alternative_endif 12037c43753SMarc Zyngier .endm 12137c43753SMarc Zyngier 12237c43753SMarc Zyngier #else 12337c43753SMarc Zyngier 12438f791a4SChristoffer Dall #include <asm/pgalloc.h> 12537c43753SMarc Zyngier #include <asm/cachetype.h> 12637c43753SMarc Zyngier #include <asm/cacheflush.h> 127e4c5a685SArd Biesheuvel #include <asm/mmu_context.h> 128e4c5a685SArd Biesheuvel #include <asm/pgtable.h> 12937c43753SMarc Zyngier 130*fd81e6bfSMarc Zyngier static inline unsigned long __kern_hyp_va(unsigned long v) 131*fd81e6bfSMarc Zyngier { 132*fd81e6bfSMarc Zyngier asm volatile(ALTERNATIVE("and %0, %0, %1", 133*fd81e6bfSMarc Zyngier "nop", 134*fd81e6bfSMarc Zyngier ARM64_HAS_VIRT_HOST_EXTN) 135*fd81e6bfSMarc Zyngier : "+r" (v) 136*fd81e6bfSMarc Zyngier : "i" (HYP_PAGE_OFFSET_HIGH_MASK)); 137*fd81e6bfSMarc Zyngier asm volatile(ALTERNATIVE("nop", 138*fd81e6bfSMarc Zyngier "and %0, %0, %1", 139*fd81e6bfSMarc Zyngier ARM64_HYP_OFFSET_LOW) 140*fd81e6bfSMarc Zyngier : "+r" (v) 141*fd81e6bfSMarc Zyngier : "i" (HYP_PAGE_OFFSET_LOW_MASK)); 142*fd81e6bfSMarc Zyngier return v; 143*fd81e6bfSMarc Zyngier } 144*fd81e6bfSMarc Zyngier 145*fd81e6bfSMarc Zyngier #define kern_hyp_va(v) (typeof(v))(__kern_hyp_va((unsigned long)(v))) 146*fd81e6bfSMarc Zyngier #define KERN_TO_HYP(v) kern_hyp_va(v) 14737c43753SMarc Zyngier 14837c43753SMarc Zyngier /* 149dbff124eSJoel Schopp * We currently only support a 40bit IPA. 15037c43753SMarc Zyngier */ 151dbff124eSJoel Schopp #define KVM_PHYS_SHIFT (40) 15237c43753SMarc Zyngier #define KVM_PHYS_SIZE (1UL << KVM_PHYS_SHIFT) 15337c43753SMarc Zyngier #define KVM_PHYS_MASK (KVM_PHYS_SIZE - 1UL) 15437c43753SMarc Zyngier 155c0ef6326SSuzuki K Poulose #include <asm/stage2_pgtable.h> 156c0ef6326SSuzuki K Poulose 157c8dddecdSMarc Zyngier int create_hyp_mappings(void *from, void *to, pgprot_t prot); 15837c43753SMarc Zyngier int create_hyp_io_mappings(void *from, void *to, phys_addr_t); 15937c43753SMarc Zyngier void free_boot_hyp_pgd(void); 16037c43753SMarc Zyngier void free_hyp_pgds(void); 16137c43753SMarc Zyngier 162957db105SChristoffer Dall void stage2_unmap_vm(struct kvm *kvm); 16337c43753SMarc Zyngier int kvm_alloc_stage2_pgd(struct kvm *kvm); 16437c43753SMarc Zyngier void kvm_free_stage2_pgd(struct kvm *kvm); 16537c43753SMarc Zyngier int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, 166c40f2f8fSArd Biesheuvel phys_addr_t pa, unsigned long size, bool writable); 16737c43753SMarc Zyngier 16837c43753SMarc Zyngier int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run); 16937c43753SMarc Zyngier 17037c43753SMarc Zyngier void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu); 17137c43753SMarc Zyngier 17237c43753SMarc Zyngier phys_addr_t kvm_mmu_get_httbr(void); 17337c43753SMarc Zyngier phys_addr_t kvm_mmu_get_boot_httbr(void); 17437c43753SMarc Zyngier phys_addr_t kvm_get_idmap_vector(void); 17567f69197SAKASHI Takahiro phys_addr_t kvm_get_idmap_start(void); 17637c43753SMarc Zyngier int kvm_mmu_init(void); 17737c43753SMarc Zyngier void kvm_clear_hyp_idmap(void); 17837c43753SMarc Zyngier 17937c43753SMarc Zyngier #define kvm_set_pte(ptep, pte) set_pte(ptep, pte) 180ad361f09SChristoffer Dall #define kvm_set_pmd(pmdp, pmd) set_pmd(pmdp, pmd) 18137c43753SMarc Zyngier 18237c43753SMarc Zyngier static inline void kvm_clean_pgd(pgd_t *pgd) {} 18338f791a4SChristoffer Dall static inline void kvm_clean_pmd(pmd_t *pmd) {} 18437c43753SMarc Zyngier static inline void kvm_clean_pmd_entry(pmd_t *pmd) {} 18537c43753SMarc Zyngier static inline void kvm_clean_pte(pte_t *pte) {} 18637c43753SMarc Zyngier static inline void kvm_clean_pte_entry(pte_t *pte) {} 18737c43753SMarc Zyngier 18806485053SCatalin Marinas static inline pte_t kvm_s2pte_mkwrite(pte_t pte) 18937c43753SMarc Zyngier { 19006485053SCatalin Marinas pte_val(pte) |= PTE_S2_RDWR; 19106485053SCatalin Marinas return pte; 19237c43753SMarc Zyngier } 19337c43753SMarc Zyngier 19406485053SCatalin Marinas static inline pmd_t kvm_s2pmd_mkwrite(pmd_t pmd) 195ad361f09SChristoffer Dall { 19606485053SCatalin Marinas pmd_val(pmd) |= PMD_S2_RDWR; 19706485053SCatalin Marinas return pmd; 198ad361f09SChristoffer Dall } 199ad361f09SChristoffer Dall 2008199ed0eSMario Smarduch static inline void kvm_set_s2pte_readonly(pte_t *pte) 2018199ed0eSMario Smarduch { 20206485053SCatalin Marinas pteval_t pteval; 20306485053SCatalin Marinas unsigned long tmp; 20406485053SCatalin Marinas 20506485053SCatalin Marinas asm volatile("// kvm_set_s2pte_readonly\n" 20606485053SCatalin Marinas " prfm pstl1strm, %2\n" 20706485053SCatalin Marinas "1: ldxr %0, %2\n" 20806485053SCatalin Marinas " and %0, %0, %3 // clear PTE_S2_RDWR\n" 20906485053SCatalin Marinas " orr %0, %0, %4 // set PTE_S2_RDONLY\n" 21006485053SCatalin Marinas " stxr %w1, %0, %2\n" 21106485053SCatalin Marinas " cbnz %w1, 1b\n" 21206485053SCatalin Marinas : "=&r" (pteval), "=&r" (tmp), "+Q" (pte_val(*pte)) 21306485053SCatalin Marinas : "L" (~PTE_S2_RDWR), "L" (PTE_S2_RDONLY)); 2148199ed0eSMario Smarduch } 2158199ed0eSMario Smarduch 2168199ed0eSMario Smarduch static inline bool kvm_s2pte_readonly(pte_t *pte) 2178199ed0eSMario Smarduch { 2188199ed0eSMario Smarduch return (pte_val(*pte) & PTE_S2_RDWR) == PTE_S2_RDONLY; 2198199ed0eSMario Smarduch } 2208199ed0eSMario Smarduch 2218199ed0eSMario Smarduch static inline void kvm_set_s2pmd_readonly(pmd_t *pmd) 2228199ed0eSMario Smarduch { 22306485053SCatalin Marinas kvm_set_s2pte_readonly((pte_t *)pmd); 2248199ed0eSMario Smarduch } 2258199ed0eSMario Smarduch 2268199ed0eSMario Smarduch static inline bool kvm_s2pmd_readonly(pmd_t *pmd) 2278199ed0eSMario Smarduch { 22806485053SCatalin Marinas return kvm_s2pte_readonly((pte_t *)pmd); 22938f791a4SChristoffer Dall } 23038f791a4SChristoffer Dall 2314f853a71SChristoffer Dall static inline bool kvm_page_empty(void *ptr) 2324f853a71SChristoffer Dall { 2334f853a71SChristoffer Dall struct page *ptr_page = virt_to_page(ptr); 2344f853a71SChristoffer Dall return page_count(ptr_page) == 1; 2354f853a71SChristoffer Dall } 2364f853a71SChristoffer Dall 23766f877faSSuzuki K Poulose #define hyp_pte_table_empty(ptep) kvm_page_empty(ptep) 23838f791a4SChristoffer Dall 23938f791a4SChristoffer Dall #ifdef __PAGETABLE_PMD_FOLDED 24066f877faSSuzuki K Poulose #define hyp_pmd_table_empty(pmdp) (0) 2414f853a71SChristoffer Dall #else 24266f877faSSuzuki K Poulose #define hyp_pmd_table_empty(pmdp) kvm_page_empty(pmdp) 2434f853a71SChristoffer Dall #endif 24438f791a4SChristoffer Dall 24538f791a4SChristoffer Dall #ifdef __PAGETABLE_PUD_FOLDED 24666f877faSSuzuki K Poulose #define hyp_pud_table_empty(pudp) (0) 24738f791a4SChristoffer Dall #else 24866f877faSSuzuki K Poulose #define hyp_pud_table_empty(pudp) kvm_page_empty(pudp) 24938f791a4SChristoffer Dall #endif 2504f853a71SChristoffer Dall 25137c43753SMarc Zyngier struct kvm; 25237c43753SMarc Zyngier 2532d58b733SMarc Zyngier #define kvm_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l)) 2542d58b733SMarc Zyngier 2552d58b733SMarc Zyngier static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu) 2562d58b733SMarc Zyngier { 2572d58b733SMarc Zyngier return (vcpu_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101; 2582d58b733SMarc Zyngier } 2592d58b733SMarc Zyngier 260ba049e93SDan Williams static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu, 261ba049e93SDan Williams kvm_pfn_t pfn, 262840f4bfbSLaszlo Ersek unsigned long size, 263840f4bfbSLaszlo Ersek bool ipa_uncached) 26437c43753SMarc Zyngier { 2650d3e4d4fSMarc Zyngier void *va = page_address(pfn_to_page(pfn)); 2660d3e4d4fSMarc Zyngier 267840f4bfbSLaszlo Ersek if (!vcpu_has_cache_enabled(vcpu) || ipa_uncached) 2680d3e4d4fSMarc Zyngier kvm_flush_dcache_to_poc(va, size); 2692d58b733SMarc Zyngier 27037c43753SMarc Zyngier if (!icache_is_aliasing()) { /* PIPT */ 2710d3e4d4fSMarc Zyngier flush_icache_range((unsigned long)va, 2720d3e4d4fSMarc Zyngier (unsigned long)va + size); 27337c43753SMarc Zyngier } else if (!icache_is_aivivt()) { /* non ASID-tagged VIVT */ 27437c43753SMarc Zyngier /* any kind of VIPT cache */ 27537c43753SMarc Zyngier __flush_icache_all(); 27637c43753SMarc Zyngier } 27737c43753SMarc Zyngier } 27837c43753SMarc Zyngier 279363ef89fSMarc Zyngier static inline void __kvm_flush_dcache_pte(pte_t pte) 280363ef89fSMarc Zyngier { 281363ef89fSMarc Zyngier struct page *page = pte_page(pte); 282363ef89fSMarc Zyngier kvm_flush_dcache_to_poc(page_address(page), PAGE_SIZE); 283363ef89fSMarc Zyngier } 284363ef89fSMarc Zyngier 285363ef89fSMarc Zyngier static inline void __kvm_flush_dcache_pmd(pmd_t pmd) 286363ef89fSMarc Zyngier { 287363ef89fSMarc Zyngier struct page *page = pmd_page(pmd); 288363ef89fSMarc Zyngier kvm_flush_dcache_to_poc(page_address(page), PMD_SIZE); 289363ef89fSMarc Zyngier } 290363ef89fSMarc Zyngier 291363ef89fSMarc Zyngier static inline void __kvm_flush_dcache_pud(pud_t pud) 292363ef89fSMarc Zyngier { 293363ef89fSMarc Zyngier struct page *page = pud_page(pud); 294363ef89fSMarc Zyngier kvm_flush_dcache_to_poc(page_address(page), PUD_SIZE); 295363ef89fSMarc Zyngier } 296363ef89fSMarc Zyngier 2974fda342cSSantosh Shilimkar #define kvm_virt_to_phys(x) __virt_to_phys((unsigned long)(x)) 29837c43753SMarc Zyngier 2993c1e7165SMarc Zyngier void kvm_set_way_flush(struct kvm_vcpu *vcpu); 3003c1e7165SMarc Zyngier void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled); 3019d218a1fSMarc Zyngier 302e4c5a685SArd Biesheuvel static inline bool __kvm_cpu_uses_extended_idmap(void) 303e4c5a685SArd Biesheuvel { 304e4c5a685SArd Biesheuvel return __cpu_uses_extended_idmap(); 305e4c5a685SArd Biesheuvel } 306e4c5a685SArd Biesheuvel 307e4c5a685SArd Biesheuvel static inline void __kvm_extend_hypmap(pgd_t *boot_hyp_pgd, 308e4c5a685SArd Biesheuvel pgd_t *hyp_pgd, 309e4c5a685SArd Biesheuvel pgd_t *merged_hyp_pgd, 310e4c5a685SArd Biesheuvel unsigned long hyp_idmap_start) 311e4c5a685SArd Biesheuvel { 312e4c5a685SArd Biesheuvel int idmap_idx; 313e4c5a685SArd Biesheuvel 314e4c5a685SArd Biesheuvel /* 315e4c5a685SArd Biesheuvel * Use the first entry to access the HYP mappings. It is 316e4c5a685SArd Biesheuvel * guaranteed to be free, otherwise we wouldn't use an 317e4c5a685SArd Biesheuvel * extended idmap. 318e4c5a685SArd Biesheuvel */ 319e4c5a685SArd Biesheuvel VM_BUG_ON(pgd_val(merged_hyp_pgd[0])); 320e4c5a685SArd Biesheuvel merged_hyp_pgd[0] = __pgd(__pa(hyp_pgd) | PMD_TYPE_TABLE); 321e4c5a685SArd Biesheuvel 322e4c5a685SArd Biesheuvel /* 323e4c5a685SArd Biesheuvel * Create another extended level entry that points to the boot HYP map, 324e4c5a685SArd Biesheuvel * which contains an ID mapping of the HYP init code. We essentially 325e4c5a685SArd Biesheuvel * merge the boot and runtime HYP maps by doing so, but they don't 326e4c5a685SArd Biesheuvel * overlap anyway, so this is fine. 327e4c5a685SArd Biesheuvel */ 328e4c5a685SArd Biesheuvel idmap_idx = hyp_idmap_start >> VA_BITS; 329e4c5a685SArd Biesheuvel VM_BUG_ON(pgd_val(merged_hyp_pgd[idmap_idx])); 330e4c5a685SArd Biesheuvel merged_hyp_pgd[idmap_idx] = __pgd(__pa(boot_hyp_pgd) | PMD_TYPE_TABLE); 331e4c5a685SArd Biesheuvel } 332e4c5a685SArd Biesheuvel 33320475f78SVladimir Murzin static inline unsigned int kvm_get_vmid_bits(void) 33420475f78SVladimir Murzin { 33520475f78SVladimir Murzin int reg = read_system_reg(SYS_ID_AA64MMFR1_EL1); 33620475f78SVladimir Murzin 33728c5dcb2SSuzuki K Poulose return (cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR1_VMIDBITS_SHIFT) == 2) ? 16 : 8; 33820475f78SVladimir Murzin } 33920475f78SVladimir Murzin 34037c43753SMarc Zyngier #endif /* __ASSEMBLY__ */ 34137c43753SMarc Zyngier #endif /* __ARM64_KVM_MMU_H__ */ 342