/* * Copyright (C) 2016 - ARM Ltd * * stage2 page table helpers * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ #ifndef __ARM64_S2_PGTABLE_H_ #define __ARM64_S2_PGTABLE_H_ #include #include /* * The hardware supports concatenation of up to 16 tables at stage2 entry level * and we use the feature whenever possible. * * Now, the minimum number of bits resolved at any level is (PAGE_SHIFT - 3). * On arm64, the smallest PAGE_SIZE supported is 4k, which means * (PAGE_SHIFT - 3) > 4 holds for all page sizes. * This implies, the total number of page table levels at stage2 expected * by the hardware is actually the number of levels required for (KVM_PHYS_SHIFT - 4) * in normal translations(e.g, stage1), since we cannot have another level in * the range (KVM_PHYS_SHIFT, KVM_PHYS_SHIFT - 4). */ #define STAGE2_PGTABLE_LEVELS ARM64_HW_PGTABLE_LEVELS(KVM_PHYS_SHIFT - 4) /* * With all the supported VA_BITs and 40bit guest IPA, the following condition * is always true: * * STAGE2_PGTABLE_LEVELS <= CONFIG_PGTABLE_LEVELS * * We base our stage-2 page table walker helpers on this assumption and * fall back to using the host version of the helper wherever possible. * i.e, if a particular level is not folded (e.g, PUD) at stage2, we fall back * to using the host version, since it is guaranteed it is not folded at host. * * If the condition breaks in the future, we can rearrange the host level * definitions and reuse them for stage2. Till then... */ #if STAGE2_PGTABLE_LEVELS > CONFIG_PGTABLE_LEVELS #error "Unsupported combination of guest IPA and host VA_BITS." #endif /* S2_PGDIR_SHIFT is the size mapped by top-level stage2 entry */ #define S2_PGDIR_SHIFT ARM64_HW_PGTABLE_LEVEL_SHIFT(4 - STAGE2_PGTABLE_LEVELS) #define S2_PGDIR_SIZE (1UL << S2_PGDIR_SHIFT) #define S2_PGDIR_MASK (~(S2_PGDIR_SIZE - 1)) /* * The number of PTRS across all concatenated stage2 tables given by the * number of bits resolved at the initial level. */ #define PTRS_PER_S2_PGD (1 << (KVM_PHYS_SHIFT - S2_PGDIR_SHIFT)) /* * kvm_mmmu_cache_min_pages() is the number of pages required to install * a stage-2 translation. We pre-allocate the entry level page table at * the VM creation. */ #define kvm_mmu_cache_min_pages(kvm) (STAGE2_PGTABLE_LEVELS - 1) /* Stage2 PUD definitions when the level is present */ #define STAGE2_PGTABLE_HAS_PUD (STAGE2_PGTABLE_LEVELS > 3) #define S2_PUD_SHIFT ARM64_HW_PGTABLE_LEVEL_SHIFT(1) #define S2_PUD_SIZE (1UL << S2_PUD_SHIFT) #define S2_PUD_MASK (~(S2_PUD_SIZE - 1)) static inline bool stage2_pgd_none(struct kvm *kvm, pgd_t pgd) { if (STAGE2_PGTABLE_HAS_PUD) return pgd_none(pgd); else return 0; } static inline void stage2_pgd_clear(struct kvm *kvm, pgd_t *pgdp) { if (STAGE2_PGTABLE_HAS_PUD) pgd_clear(pgdp); } static inline bool stage2_pgd_present(struct kvm *kvm, pgd_t pgd) { if (STAGE2_PGTABLE_HAS_PUD) return pgd_present(pgd); else return 1; } static inline void stage2_pgd_populate(struct kvm *kvm, pgd_t *pgd, pud_t *pud) { if (STAGE2_PGTABLE_HAS_PUD) pgd_populate(NULL, pgd, pud); } static inline pud_t *stage2_pud_offset(struct kvm *kvm, pgd_t *pgd, unsigned long address) { if (STAGE2_PGTABLE_HAS_PUD) return pud_offset(pgd, address); else return (pud_t *)pgd; } static inline void stage2_pud_free(struct kvm *kvm, pud_t *pud) { if (STAGE2_PGTABLE_HAS_PUD) pud_free(NULL, pud); } static inline bool stage2_pud_table_empty(struct kvm *kvm, pud_t *pudp) { if (STAGE2_PGTABLE_HAS_PUD) return kvm_page_empty(pudp); else return false; } static inline phys_addr_t stage2_pud_addr_end(struct kvm *kvm, phys_addr_t addr, phys_addr_t end) { if (STAGE2_PGTABLE_HAS_PUD) { phys_addr_t boundary = (addr + S2_PUD_SIZE) & S2_PUD_MASK; return (boundary - 1 < end - 1) ? boundary : end; } else { return end; } } /* Stage2 PMD definitions when the level is present */ #define STAGE2_PGTABLE_HAS_PMD (STAGE2_PGTABLE_LEVELS > 2) #define S2_PMD_SHIFT ARM64_HW_PGTABLE_LEVEL_SHIFT(2) #define S2_PMD_SIZE (1UL << S2_PMD_SHIFT) #define S2_PMD_MASK (~(S2_PMD_SIZE - 1)) static inline bool stage2_pud_none(struct kvm *kvm, pud_t pud) { if (STAGE2_PGTABLE_HAS_PMD) return pud_none(pud); else return 0; } static inline void stage2_pud_clear(struct kvm *kvm, pud_t *pud) { if (STAGE2_PGTABLE_HAS_PMD) pud_clear(pud); } static inline bool stage2_pud_present(struct kvm *kvm, pud_t pud) { if (STAGE2_PGTABLE_HAS_PMD) return pud_present(pud); else return 1; } static inline void stage2_pud_populate(struct kvm *kvm, pud_t *pud, pmd_t *pmd) { if (STAGE2_PGTABLE_HAS_PMD) pud_populate(NULL, pud, pmd); } static inline pmd_t *stage2_pmd_offset(struct kvm *kvm, pud_t *pud, unsigned long address) { if (STAGE2_PGTABLE_HAS_PMD) return pmd_offset(pud, address); else return (pmd_t *)pud; } static inline void stage2_pmd_free(struct kvm *kvm, pmd_t *pmd) { if (STAGE2_PGTABLE_HAS_PMD) pmd_free(NULL, pmd); } static inline bool stage2_pud_huge(struct kvm *kvm, pud_t pud) { if (STAGE2_PGTABLE_HAS_PMD) return pud_huge(pud); else return 0; } static inline bool stage2_pmd_table_empty(struct kvm *kvm, pmd_t *pmdp) { if (STAGE2_PGTABLE_HAS_PMD) return kvm_page_empty(pmdp); else return 0; } static inline phys_addr_t stage2_pmd_addr_end(struct kvm *kvm, phys_addr_t addr, phys_addr_t end) { if (STAGE2_PGTABLE_HAS_PMD) { phys_addr_t boundary = (addr + S2_PMD_SIZE) & S2_PMD_MASK; return (boundary - 1 < end - 1) ? boundary : end; } else { return end; } } static inline bool stage2_pte_table_empty(struct kvm *kvm, pte_t *ptep) { return kvm_page_empty(ptep); } #define stage2_pgd_size(kvm) (PTRS_PER_S2_PGD * sizeof(pgd_t)) static inline unsigned long stage2_pgd_index(struct kvm *kvm, phys_addr_t addr) { return (((addr) >> S2_PGDIR_SHIFT) & (PTRS_PER_S2_PGD - 1)); } static inline phys_addr_t stage2_pgd_addr_end(struct kvm *kvm, phys_addr_t addr, phys_addr_t end) { phys_addr_t boundary = (addr + S2_PGDIR_SIZE) & S2_PGDIR_MASK; return (boundary - 1 < end - 1) ? boundary : end; } #endif /* __ARM64_S2_PGTABLE_H_ */