1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 #ifndef __KVM_HYP_MM_H 3 #define __KVM_HYP_MM_H 4 5 #include <asm/kvm_pgtable.h> 6 #include <asm/spectre.h> 7 #include <linux/memblock.h> 8 #include <linux/types.h> 9 10 #include <nvhe/memory.h> 11 #include <nvhe/spinlock.h> 12 13 #define HYP_MEMBLOCK_REGIONS 128 14 extern struct memblock_region kvm_nvhe_sym(hyp_memory)[]; 15 extern unsigned int kvm_nvhe_sym(hyp_memblock_nr); 16 extern struct kvm_pgtable pkvm_pgtable; 17 extern hyp_spinlock_t pkvm_pgd_lock; 18 extern struct hyp_pool hpool; 19 extern u64 __io_map_base; 20 21 int hyp_create_idmap(u32 hyp_va_bits); 22 int hyp_map_vectors(void); 23 int hyp_back_vmemmap(phys_addr_t phys, unsigned long size, phys_addr_t back); 24 int pkvm_cpu_set_vector(enum arm64_hyp_spectre_vector slot); 25 int pkvm_create_mappings(void *from, void *to, enum kvm_pgtable_prot prot); 26 int __pkvm_create_mappings(unsigned long start, unsigned long size, 27 unsigned long phys, enum kvm_pgtable_prot prot); 28 unsigned long __pkvm_create_private_mapping(phys_addr_t phys, size_t size, 29 enum kvm_pgtable_prot prot); 30 31 static inline void hyp_vmemmap_range(phys_addr_t phys, unsigned long size, 32 unsigned long *start, unsigned long *end) 33 { 34 unsigned long nr_pages = size >> PAGE_SHIFT; 35 struct hyp_page *p = hyp_phys_to_page(phys); 36 37 *start = (unsigned long)p; 38 *end = *start + nr_pages * sizeof(struct hyp_page); 39 *start = ALIGN_DOWN(*start, PAGE_SIZE); 40 *end = ALIGN(*end, PAGE_SIZE); 41 } 42 43 static inline unsigned long __hyp_pgtable_max_pages(unsigned long nr_pages) 44 { 45 unsigned long total = 0, i; 46 47 /* Provision the worst case scenario */ 48 for (i = 0; i < KVM_PGTABLE_MAX_LEVELS; i++) { 49 nr_pages = DIV_ROUND_UP(nr_pages, PTRS_PER_PTE); 50 total += nr_pages; 51 } 52 53 return total; 54 } 55 56 static inline unsigned long __hyp_pgtable_total_pages(void) 57 { 58 unsigned long res = 0, i; 59 60 /* Cover all of memory with page-granularity */ 61 for (i = 0; i < kvm_nvhe_sym(hyp_memblock_nr); i++) { 62 struct memblock_region *reg = &kvm_nvhe_sym(hyp_memory)[i]; 63 res += __hyp_pgtable_max_pages(reg->size >> PAGE_SHIFT); 64 } 65 66 return res; 67 } 68 69 static inline unsigned long hyp_s1_pgtable_pages(void) 70 { 71 unsigned long res; 72 73 res = __hyp_pgtable_total_pages(); 74 75 /* Allow 1 GiB for private mappings */ 76 res += __hyp_pgtable_max_pages(SZ_1G >> PAGE_SHIFT); 77 78 return res; 79 } 80 81 static inline unsigned long host_s2_mem_pgtable_pages(void) 82 { 83 /* 84 * Include an extra 16 pages to safely upper-bound the worst case of 85 * concatenated pgds. 86 */ 87 return __hyp_pgtable_total_pages() + 16; 88 } 89 90 static inline unsigned long host_s2_dev_pgtable_pages(void) 91 { 92 /* Allow 1 GiB for MMIO mappings */ 93 return __hyp_pgtable_max_pages(SZ_1G >> PAGE_SHIFT); 94 } 95 96 #endif /* __KVM_HYP_MM_H */ 97