xref: /openbmc/linux/arch/arm64/include/asm/kvm_pkvm.h (revision 0a9f15fd)
19429f4b0SWill Deacon // SPDX-License-Identifier: GPL-2.0
29429f4b0SWill Deacon /*
39429f4b0SWill Deacon  * Copyright (C) 2020 - Google LLC
49429f4b0SWill Deacon  * Author: Quentin Perret <qperret@google.com>
59429f4b0SWill Deacon  */
69429f4b0SWill Deacon #ifndef __ARM64_KVM_PKVM_H__
79429f4b0SWill Deacon #define __ARM64_KVM_PKVM_H__
89429f4b0SWill Deacon 
9*0a9f15fdSQuentin Perret #include <linux/arm_ffa.h>
109429f4b0SWill Deacon #include <linux/memblock.h>
11*0a9f15fdSQuentin Perret #include <linux/scatterlist.h>
129429f4b0SWill Deacon #include <asm/kvm_pgtable.h>
139429f4b0SWill Deacon 
14a1ec5c70SFuad Tabba /* Maximum number of VMs that can co-exist under pKVM. */
15a1ec5c70SFuad Tabba #define KVM_MAX_PVMS 255
16a1ec5c70SFuad Tabba 
179429f4b0SWill Deacon #define HYP_MEMBLOCK_REGIONS 128
189429f4b0SWill Deacon 
199d0c063aSFuad Tabba int pkvm_init_host_vm(struct kvm *kvm);
209d0c063aSFuad Tabba int pkvm_create_hyp_vm(struct kvm *kvm);
219d0c063aSFuad Tabba void pkvm_destroy_hyp_vm(struct kvm *kvm);
229d0c063aSFuad Tabba 
239429f4b0SWill Deacon extern struct memblock_region kvm_nvhe_sym(hyp_memory)[];
249429f4b0SWill Deacon extern unsigned int kvm_nvhe_sym(hyp_memblock_nr);
259429f4b0SWill Deacon 
268e6bcc3aSQuentin Perret static inline unsigned long
hyp_vmemmap_memblock_size(struct memblock_region * reg,size_t vmemmap_entry_size)278e6bcc3aSQuentin Perret hyp_vmemmap_memblock_size(struct memblock_region *reg, size_t vmemmap_entry_size)
288e6bcc3aSQuentin Perret {
298e6bcc3aSQuentin Perret 	unsigned long nr_pages = reg->size >> PAGE_SHIFT;
308e6bcc3aSQuentin Perret 	unsigned long start, end;
318e6bcc3aSQuentin Perret 
328e6bcc3aSQuentin Perret 	start = (reg->base >> PAGE_SHIFT) * vmemmap_entry_size;
338e6bcc3aSQuentin Perret 	end = start + nr_pages * vmemmap_entry_size;
348e6bcc3aSQuentin Perret 	start = ALIGN_DOWN(start, PAGE_SIZE);
358e6bcc3aSQuentin Perret 	end = ALIGN(end, PAGE_SIZE);
368e6bcc3aSQuentin Perret 
378e6bcc3aSQuentin Perret 	return end - start;
388e6bcc3aSQuentin Perret }
398e6bcc3aSQuentin Perret 
hyp_vmemmap_pages(size_t vmemmap_entry_size)408e6bcc3aSQuentin Perret static inline unsigned long hyp_vmemmap_pages(size_t vmemmap_entry_size)
418e6bcc3aSQuentin Perret {
428e6bcc3aSQuentin Perret 	unsigned long res = 0, i;
438e6bcc3aSQuentin Perret 
448e6bcc3aSQuentin Perret 	for (i = 0; i < kvm_nvhe_sym(hyp_memblock_nr); i++) {
458e6bcc3aSQuentin Perret 		res += hyp_vmemmap_memblock_size(&kvm_nvhe_sym(hyp_memory)[i],
468e6bcc3aSQuentin Perret 						 vmemmap_entry_size);
478e6bcc3aSQuentin Perret 	}
488e6bcc3aSQuentin Perret 
498e6bcc3aSQuentin Perret 	return res >> PAGE_SHIFT;
508e6bcc3aSQuentin Perret }
518e6bcc3aSQuentin Perret 
hyp_vm_table_pages(void)52a1ec5c70SFuad Tabba static inline unsigned long hyp_vm_table_pages(void)
53a1ec5c70SFuad Tabba {
54a1ec5c70SFuad Tabba 	return PAGE_ALIGN(KVM_MAX_PVMS * sizeof(void *)) >> PAGE_SHIFT;
55a1ec5c70SFuad Tabba }
56a1ec5c70SFuad Tabba 
__hyp_pgtable_max_pages(unsigned long nr_pages)579429f4b0SWill Deacon static inline unsigned long __hyp_pgtable_max_pages(unsigned long nr_pages)
589429f4b0SWill Deacon {
599429f4b0SWill Deacon 	unsigned long total = 0, i;
609429f4b0SWill Deacon 
619429f4b0SWill Deacon 	/* Provision the worst case scenario */
629429f4b0SWill Deacon 	for (i = 0; i < KVM_PGTABLE_MAX_LEVELS; i++) {
639429f4b0SWill Deacon 		nr_pages = DIV_ROUND_UP(nr_pages, PTRS_PER_PTE);
649429f4b0SWill Deacon 		total += nr_pages;
659429f4b0SWill Deacon 	}
669429f4b0SWill Deacon 
679429f4b0SWill Deacon 	return total;
689429f4b0SWill Deacon }
699429f4b0SWill Deacon 
__hyp_pgtable_total_pages(void)709429f4b0SWill Deacon static inline unsigned long __hyp_pgtable_total_pages(void)
719429f4b0SWill Deacon {
729429f4b0SWill Deacon 	unsigned long res = 0, i;
739429f4b0SWill Deacon 
749429f4b0SWill Deacon 	/* Cover all of memory with page-granularity */
759429f4b0SWill Deacon 	for (i = 0; i < kvm_nvhe_sym(hyp_memblock_nr); i++) {
769429f4b0SWill Deacon 		struct memblock_region *reg = &kvm_nvhe_sym(hyp_memory)[i];
779429f4b0SWill Deacon 		res += __hyp_pgtable_max_pages(reg->size >> PAGE_SHIFT);
789429f4b0SWill Deacon 	}
799429f4b0SWill Deacon 
809429f4b0SWill Deacon 	return res;
819429f4b0SWill Deacon }
829429f4b0SWill Deacon 
hyp_s1_pgtable_pages(void)839429f4b0SWill Deacon static inline unsigned long hyp_s1_pgtable_pages(void)
849429f4b0SWill Deacon {
859429f4b0SWill Deacon 	unsigned long res;
869429f4b0SWill Deacon 
879429f4b0SWill Deacon 	res = __hyp_pgtable_total_pages();
889429f4b0SWill Deacon 
899429f4b0SWill Deacon 	/* Allow 1 GiB for private mappings */
909429f4b0SWill Deacon 	res += __hyp_pgtable_max_pages(SZ_1G >> PAGE_SHIFT);
919429f4b0SWill Deacon 
929429f4b0SWill Deacon 	return res;
939429f4b0SWill Deacon }
949429f4b0SWill Deacon 
host_s2_pgtable_pages(void)959429f4b0SWill Deacon static inline unsigned long host_s2_pgtable_pages(void)
969429f4b0SWill Deacon {
979429f4b0SWill Deacon 	unsigned long res;
989429f4b0SWill Deacon 
999429f4b0SWill Deacon 	/*
1009429f4b0SWill Deacon 	 * Include an extra 16 pages to safely upper-bound the worst case of
1019429f4b0SWill Deacon 	 * concatenated pgds.
1029429f4b0SWill Deacon 	 */
1039429f4b0SWill Deacon 	res = __hyp_pgtable_total_pages() + 16;
1049429f4b0SWill Deacon 
1059429f4b0SWill Deacon 	/* Allow 1 GiB for MMIO mappings */
1069429f4b0SWill Deacon 	res += __hyp_pgtable_max_pages(SZ_1G >> PAGE_SHIFT);
1079429f4b0SWill Deacon 
1089429f4b0SWill Deacon 	return res;
1099429f4b0SWill Deacon }
1109429f4b0SWill Deacon 
111bc3888a0SWill Deacon #define KVM_FFA_MBOX_NR_PAGES	1
112bc3888a0SWill Deacon 
hyp_ffa_proxy_pages(void)113bc3888a0SWill Deacon static inline unsigned long hyp_ffa_proxy_pages(void)
114bc3888a0SWill Deacon {
115*0a9f15fdSQuentin Perret 	size_t desc_max;
116*0a9f15fdSQuentin Perret 
117*0a9f15fdSQuentin Perret 	/*
118*0a9f15fdSQuentin Perret 	 * The hypervisor FFA proxy needs enough memory to buffer a fragmented
119*0a9f15fdSQuentin Perret 	 * descriptor returned from EL3 in response to a RETRIEVE_REQ call.
120*0a9f15fdSQuentin Perret 	 */
121*0a9f15fdSQuentin Perret 	desc_max = sizeof(struct ffa_mem_region) +
122*0a9f15fdSQuentin Perret 		   sizeof(struct ffa_mem_region_attributes) +
123*0a9f15fdSQuentin Perret 		   sizeof(struct ffa_composite_mem_region) +
124*0a9f15fdSQuentin Perret 		   SG_MAX_SEGMENTS * sizeof(struct ffa_mem_region_addr_range);
125*0a9f15fdSQuentin Perret 
126*0a9f15fdSQuentin Perret 	/* Plus a page each for the hypervisor's RX and TX mailboxes. */
127*0a9f15fdSQuentin Perret 	return (2 * KVM_FFA_MBOX_NR_PAGES) + DIV_ROUND_UP(desc_max, PAGE_SIZE);
128bc3888a0SWill Deacon }
129bc3888a0SWill Deacon 
1309429f4b0SWill Deacon #endif	/* __ARM64_KVM_PKVM_H__ */
131