xref: /openbmc/linux/arch/arm64/kvm/hyp/nvhe/pkvm.c (revision 2612e3bbc0386368a850140a6c9b990cd496a5ec)
12a0c3433SFuad Tabba // SPDX-License-Identifier: GPL-2.0-only
22a0c3433SFuad Tabba /*
32a0c3433SFuad Tabba  * Copyright (C) 2021 Google LLC
42a0c3433SFuad Tabba  * Author: Fuad Tabba <tabba@google.com>
52a0c3433SFuad Tabba  */
62a0c3433SFuad Tabba 
72a0c3433SFuad Tabba #include <linux/kvm_host.h>
82a0c3433SFuad Tabba #include <linux/mm.h>
93061725dSMarc Zyngier #include <nvhe/fixed_config.h>
10a1ec5c70SFuad Tabba #include <nvhe/mem_protect.h>
11a1ec5c70SFuad Tabba #include <nvhe/memory.h>
12a1ec5c70SFuad Tabba #include <nvhe/pkvm.h>
132a0c3433SFuad Tabba #include <nvhe/trap_handler.h>
142a0c3433SFuad Tabba 
1513e248aaSWill Deacon /* Used by icache_is_vpipt(). */
1613e248aaSWill Deacon unsigned long __icache_flags;
1713e248aaSWill Deacon 
1873f38ef2SWill Deacon /* Used by kvm_get_vttbr(). */
1973f38ef2SWill Deacon unsigned int kvm_arm_vmid_bits;
2073f38ef2SWill Deacon 
212a0c3433SFuad Tabba /*
222a0c3433SFuad Tabba  * Set trap register values based on features in ID_AA64PFR0.
232a0c3433SFuad Tabba  */
pvm_init_traps_aa64pfr0(struct kvm_vcpu * vcpu)242a0c3433SFuad Tabba static void pvm_init_traps_aa64pfr0(struct kvm_vcpu *vcpu)
252a0c3433SFuad Tabba {
26ce759167SMarc Zyngier 	const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64PFR0_EL1);
272a0c3433SFuad Tabba 	u64 hcr_set = HCR_RW;
282a0c3433SFuad Tabba 	u64 hcr_clear = 0;
292a0c3433SFuad Tabba 	u64 cptr_set = 0;
3075c76ab5SMarc Zyngier 	u64 cptr_clear = 0;
312a0c3433SFuad Tabba 
322a0c3433SFuad Tabba 	/* Protected KVM does not support AArch32 guests. */
3355adc08dSMark Brown 	BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL0),
3455adc08dSMark Brown 		PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) != ID_AA64PFR0_EL1_ELx_64BIT_ONLY);
3555adc08dSMark Brown 	BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL1),
3655adc08dSMark Brown 		PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) != ID_AA64PFR0_EL1_ELx_64BIT_ONLY);
372a0c3433SFuad Tabba 
382a0c3433SFuad Tabba 	/*
392a0c3433SFuad Tabba 	 * Linux guests assume support for floating-point and Advanced SIMD. Do
402a0c3433SFuad Tabba 	 * not change the trapping behavior for these from the KVM default.
412a0c3433SFuad Tabba 	 */
4255adc08dSMark Brown 	BUILD_BUG_ON(!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_FP),
432a0c3433SFuad Tabba 				PVM_ID_AA64PFR0_ALLOW));
445620b4b0SMark Brown 	BUILD_BUG_ON(!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AdvSIMD),
452a0c3433SFuad Tabba 				PVM_ID_AA64PFR0_ALLOW));
462a0c3433SFuad Tabba 
47*38cba550SMarc Zyngier 	if (has_hvhe())
48*38cba550SMarc Zyngier 		hcr_set |= HCR_E2H;
49*38cba550SMarc Zyngier 
502a0c3433SFuad Tabba 	/* Trap RAS unless all current versions are supported */
5155adc08dSMark Brown 	if (FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_RAS), feature_ids) <
5255adc08dSMark Brown 	    ID_AA64PFR0_EL1_RAS_V1P1) {
532a0c3433SFuad Tabba 		hcr_set |= HCR_TERR | HCR_TEA;
542a0c3433SFuad Tabba 		hcr_clear |= HCR_FIEN;
552a0c3433SFuad Tabba 	}
562a0c3433SFuad Tabba 
572a0c3433SFuad Tabba 	/* Trap AMU */
5855adc08dSMark Brown 	if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AMU), feature_ids)) {
592a0c3433SFuad Tabba 		hcr_clear |= HCR_AMVOFFEN;
602a0c3433SFuad Tabba 		cptr_set |= CPTR_EL2_TAM;
612a0c3433SFuad Tabba 	}
622a0c3433SFuad Tabba 
632a0c3433SFuad Tabba 	/* Trap SVE */
6475c76ab5SMarc Zyngier 	if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_SVE), feature_ids)) {
6575c76ab5SMarc Zyngier 		if (has_hvhe())
6675c76ab5SMarc Zyngier 			cptr_clear |= CPACR_EL1_ZEN_EL0EN | CPACR_EL1_ZEN_EL1EN;
6775c76ab5SMarc Zyngier 		else
682a0c3433SFuad Tabba 			cptr_set |= CPTR_EL2_TZ;
6975c76ab5SMarc Zyngier 	}
702a0c3433SFuad Tabba 
712a0c3433SFuad Tabba 	vcpu->arch.hcr_el2 |= hcr_set;
722a0c3433SFuad Tabba 	vcpu->arch.hcr_el2 &= ~hcr_clear;
732a0c3433SFuad Tabba 	vcpu->arch.cptr_el2 |= cptr_set;
7475c76ab5SMarc Zyngier 	vcpu->arch.cptr_el2 &= ~cptr_clear;
752a0c3433SFuad Tabba }
762a0c3433SFuad Tabba 
772a0c3433SFuad Tabba /*
782a0c3433SFuad Tabba  * Set trap register values based on features in ID_AA64PFR1.
792a0c3433SFuad Tabba  */
pvm_init_traps_aa64pfr1(struct kvm_vcpu * vcpu)802a0c3433SFuad Tabba static void pvm_init_traps_aa64pfr1(struct kvm_vcpu *vcpu)
812a0c3433SFuad Tabba {
82ce759167SMarc Zyngier 	const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64PFR1_EL1);
832a0c3433SFuad Tabba 	u64 hcr_set = 0;
842a0c3433SFuad Tabba 	u64 hcr_clear = 0;
852a0c3433SFuad Tabba 
862a0c3433SFuad Tabba 	/* Memory Tagging: Trap and Treat as Untagged if not supported. */
876ca2b9caSMark Brown 	if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE), feature_ids)) {
882a0c3433SFuad Tabba 		hcr_set |= HCR_TID5;
892a0c3433SFuad Tabba 		hcr_clear |= HCR_DCT | HCR_ATA;
902a0c3433SFuad Tabba 	}
912a0c3433SFuad Tabba 
922a0c3433SFuad Tabba 	vcpu->arch.hcr_el2 |= hcr_set;
932a0c3433SFuad Tabba 	vcpu->arch.hcr_el2 &= ~hcr_clear;
942a0c3433SFuad Tabba }
952a0c3433SFuad Tabba 
962a0c3433SFuad Tabba /*
972a0c3433SFuad Tabba  * Set trap register values based on features in ID_AA64DFR0.
982a0c3433SFuad Tabba  */
pvm_init_traps_aa64dfr0(struct kvm_vcpu * vcpu)992a0c3433SFuad Tabba static void pvm_init_traps_aa64dfr0(struct kvm_vcpu *vcpu)
1002a0c3433SFuad Tabba {
101ce759167SMarc Zyngier 	const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64DFR0_EL1);
1022a0c3433SFuad Tabba 	u64 mdcr_set = 0;
1032a0c3433SFuad Tabba 	u64 mdcr_clear = 0;
1042a0c3433SFuad Tabba 	u64 cptr_set = 0;
1052a0c3433SFuad Tabba 
1062a0c3433SFuad Tabba 	/* Trap/constrain PMU */
107fcf37b38SMark Brown 	if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), feature_ids)) {
1082a0c3433SFuad Tabba 		mdcr_set |= MDCR_EL2_TPM | MDCR_EL2_TPMCR;
1092a0c3433SFuad Tabba 		mdcr_clear |= MDCR_EL2_HPME | MDCR_EL2_MTPME |
1102a0c3433SFuad Tabba 			      MDCR_EL2_HPMN_MASK;
1112a0c3433SFuad Tabba 	}
1122a0c3433SFuad Tabba 
1132a0c3433SFuad Tabba 	/* Trap Debug */
114fcf37b38SMark Brown 	if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_DebugVer), feature_ids))
1152a0c3433SFuad Tabba 		mdcr_set |= MDCR_EL2_TDRA | MDCR_EL2_TDA | MDCR_EL2_TDE;
1162a0c3433SFuad Tabba 
1172a0c3433SFuad Tabba 	/* Trap OS Double Lock */
118fcf37b38SMark Brown 	if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_DoubleLock), feature_ids))
1192a0c3433SFuad Tabba 		mdcr_set |= MDCR_EL2_TDOSA;
1202a0c3433SFuad Tabba 
1212a0c3433SFuad Tabba 	/* Trap SPE */
122fcf37b38SMark Brown 	if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMSVer), feature_ids)) {
1232a0c3433SFuad Tabba 		mdcr_set |= MDCR_EL2_TPMS;
1242a0c3433SFuad Tabba 		mdcr_clear |= MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT;
1252a0c3433SFuad Tabba 	}
1262a0c3433SFuad Tabba 
1272a0c3433SFuad Tabba 	/* Trap Trace Filter */
128fcf37b38SMark Brown 	if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_TraceFilt), feature_ids))
1292a0c3433SFuad Tabba 		mdcr_set |= MDCR_EL2_TTRF;
1302a0c3433SFuad Tabba 
1312a0c3433SFuad Tabba 	/* Trap Trace */
13275c76ab5SMarc Zyngier 	if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_TraceVer), feature_ids)) {
13375c76ab5SMarc Zyngier 		if (has_hvhe())
13475c76ab5SMarc Zyngier 			cptr_set |= CPACR_EL1_TTA;
13575c76ab5SMarc Zyngier 		else
1362a0c3433SFuad Tabba 			cptr_set |= CPTR_EL2_TTA;
13775c76ab5SMarc Zyngier 	}
1382a0c3433SFuad Tabba 
1392a0c3433SFuad Tabba 	vcpu->arch.mdcr_el2 |= mdcr_set;
1402a0c3433SFuad Tabba 	vcpu->arch.mdcr_el2 &= ~mdcr_clear;
1412a0c3433SFuad Tabba 	vcpu->arch.cptr_el2 |= cptr_set;
1422a0c3433SFuad Tabba }
1432a0c3433SFuad Tabba 
1442a0c3433SFuad Tabba /*
1452a0c3433SFuad Tabba  * Set trap register values based on features in ID_AA64MMFR0.
1462a0c3433SFuad Tabba  */
pvm_init_traps_aa64mmfr0(struct kvm_vcpu * vcpu)1472a0c3433SFuad Tabba static void pvm_init_traps_aa64mmfr0(struct kvm_vcpu *vcpu)
1482a0c3433SFuad Tabba {
149ce759167SMarc Zyngier 	const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64MMFR0_EL1);
1502a0c3433SFuad Tabba 	u64 mdcr_set = 0;
1512a0c3433SFuad Tabba 
1522a0c3433SFuad Tabba 	/* Trap Debug Communications Channel registers */
1532d987e64SMark Brown 	if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_FGT), feature_ids))
1542a0c3433SFuad Tabba 		mdcr_set |= MDCR_EL2_TDCC;
1552a0c3433SFuad Tabba 
1562a0c3433SFuad Tabba 	vcpu->arch.mdcr_el2 |= mdcr_set;
1572a0c3433SFuad Tabba }
1582a0c3433SFuad Tabba 
1592a0c3433SFuad Tabba /*
1602a0c3433SFuad Tabba  * Set trap register values based on features in ID_AA64MMFR1.
1612a0c3433SFuad Tabba  */
pvm_init_traps_aa64mmfr1(struct kvm_vcpu * vcpu)1622a0c3433SFuad Tabba static void pvm_init_traps_aa64mmfr1(struct kvm_vcpu *vcpu)
1632a0c3433SFuad Tabba {
164ce759167SMarc Zyngier 	const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64MMFR1_EL1);
1652a0c3433SFuad Tabba 	u64 hcr_set = 0;
1662a0c3433SFuad Tabba 
1672a0c3433SFuad Tabba 	/* Trap LOR */
1686fcd0193SKristina Martsenko 	if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR1_EL1_LO), feature_ids))
1692a0c3433SFuad Tabba 		hcr_set |= HCR_TLOR;
1702a0c3433SFuad Tabba 
1712a0c3433SFuad Tabba 	vcpu->arch.hcr_el2 |= hcr_set;
1722a0c3433SFuad Tabba }
1732a0c3433SFuad Tabba 
1742a0c3433SFuad Tabba /*
1752a0c3433SFuad Tabba  * Set baseline trap register values.
1762a0c3433SFuad Tabba  */
pvm_init_trap_regs(struct kvm_vcpu * vcpu)1772a0c3433SFuad Tabba static void pvm_init_trap_regs(struct kvm_vcpu *vcpu)
1782a0c3433SFuad Tabba {
1792a0c3433SFuad Tabba 	const u64 hcr_trap_feat_regs = HCR_TID3;
1802a0c3433SFuad Tabba 	const u64 hcr_trap_impdef = HCR_TACR | HCR_TIDCP | HCR_TID1;
1812a0c3433SFuad Tabba 
1822a0c3433SFuad Tabba 	/*
1832a0c3433SFuad Tabba 	 * Always trap:
1842a0c3433SFuad Tabba 	 * - Feature id registers: to control features exposed to guests
1852a0c3433SFuad Tabba 	 * - Implementation-defined features
1862a0c3433SFuad Tabba 	 */
1872a0c3433SFuad Tabba 	vcpu->arch.hcr_el2 |= hcr_trap_feat_regs | hcr_trap_impdef;
1882a0c3433SFuad Tabba 
1892a0c3433SFuad Tabba 	/* Clear res0 and set res1 bits to trap potential new features. */
1902a0c3433SFuad Tabba 	vcpu->arch.hcr_el2 &= ~(HCR_RES0);
1912a0c3433SFuad Tabba 	vcpu->arch.mdcr_el2 &= ~(MDCR_EL2_RES0);
19275c76ab5SMarc Zyngier 	if (!has_hvhe()) {
1932a0c3433SFuad Tabba 		vcpu->arch.cptr_el2 |= CPTR_NVHE_EL2_RES1;
1942a0c3433SFuad Tabba 		vcpu->arch.cptr_el2 &= ~(CPTR_NVHE_EL2_RES0);
1952a0c3433SFuad Tabba 	}
19675c76ab5SMarc Zyngier }
1972a0c3433SFuad Tabba 
1982a0c3433SFuad Tabba /*
1992a0c3433SFuad Tabba  * Initialize trap register values for protected VMs.
2002a0c3433SFuad Tabba  */
__pkvm_vcpu_init_traps(struct kvm_vcpu * vcpu)2012a0c3433SFuad Tabba void __pkvm_vcpu_init_traps(struct kvm_vcpu *vcpu)
2022a0c3433SFuad Tabba {
2032a0c3433SFuad Tabba 	pvm_init_trap_regs(vcpu);
2042a0c3433SFuad Tabba 	pvm_init_traps_aa64pfr0(vcpu);
2052a0c3433SFuad Tabba 	pvm_init_traps_aa64pfr1(vcpu);
2062a0c3433SFuad Tabba 	pvm_init_traps_aa64dfr0(vcpu);
2072a0c3433SFuad Tabba 	pvm_init_traps_aa64mmfr0(vcpu);
2082a0c3433SFuad Tabba 	pvm_init_traps_aa64mmfr1(vcpu);
2092a0c3433SFuad Tabba }
210a1ec5c70SFuad Tabba 
211a1ec5c70SFuad Tabba /*
212a1ec5c70SFuad Tabba  * Start the VM table handle at the offset defined instead of at 0.
213a1ec5c70SFuad Tabba  * Mainly for sanity checking and debugging.
214a1ec5c70SFuad Tabba  */
215a1ec5c70SFuad Tabba #define HANDLE_OFFSET 0x1000
216a1ec5c70SFuad Tabba 
vm_handle_to_idx(pkvm_handle_t handle)217a1ec5c70SFuad Tabba static unsigned int vm_handle_to_idx(pkvm_handle_t handle)
218a1ec5c70SFuad Tabba {
219a1ec5c70SFuad Tabba 	return handle - HANDLE_OFFSET;
220a1ec5c70SFuad Tabba }
221a1ec5c70SFuad Tabba 
idx_to_vm_handle(unsigned int idx)222a1ec5c70SFuad Tabba static pkvm_handle_t idx_to_vm_handle(unsigned int idx)
223a1ec5c70SFuad Tabba {
224a1ec5c70SFuad Tabba 	return idx + HANDLE_OFFSET;
225a1ec5c70SFuad Tabba }
226a1ec5c70SFuad Tabba 
227a1ec5c70SFuad Tabba /*
228a1ec5c70SFuad Tabba  * Spinlock for protecting state related to the VM table. Protects writes
229a1ec5c70SFuad Tabba  * to 'vm_table' and 'nr_table_entries' as well as reads and writes to
230a1ec5c70SFuad Tabba  * 'last_hyp_vcpu_lookup'.
231a1ec5c70SFuad Tabba  */
232a1ec5c70SFuad Tabba static DEFINE_HYP_SPINLOCK(vm_table_lock);
233a1ec5c70SFuad Tabba 
234a1ec5c70SFuad Tabba /*
235a1ec5c70SFuad Tabba  * The table of VM entries for protected VMs in hyp.
236a1ec5c70SFuad Tabba  * Allocated at hyp initialization and setup.
237a1ec5c70SFuad Tabba  */
238a1ec5c70SFuad Tabba static struct pkvm_hyp_vm **vm_table;
239a1ec5c70SFuad Tabba 
pkvm_hyp_vm_table_init(void * tbl)240a1ec5c70SFuad Tabba void pkvm_hyp_vm_table_init(void *tbl)
241a1ec5c70SFuad Tabba {
242a1ec5c70SFuad Tabba 	WARN_ON(vm_table);
243a1ec5c70SFuad Tabba 	vm_table = tbl;
244a1ec5c70SFuad Tabba }
245a1ec5c70SFuad Tabba 
246a1ec5c70SFuad Tabba /*
247a1ec5c70SFuad Tabba  * Return the hyp vm structure corresponding to the handle.
248a1ec5c70SFuad Tabba  */
get_vm_by_handle(pkvm_handle_t handle)249a1ec5c70SFuad Tabba static struct pkvm_hyp_vm *get_vm_by_handle(pkvm_handle_t handle)
250a1ec5c70SFuad Tabba {
251a1ec5c70SFuad Tabba 	unsigned int idx = vm_handle_to_idx(handle);
252a1ec5c70SFuad Tabba 
253a1ec5c70SFuad Tabba 	if (unlikely(idx >= KVM_MAX_PVMS))
254a1ec5c70SFuad Tabba 		return NULL;
255a1ec5c70SFuad Tabba 
256a1ec5c70SFuad Tabba 	return vm_table[idx];
257a1ec5c70SFuad Tabba }
258a1ec5c70SFuad Tabba 
pkvm_load_hyp_vcpu(pkvm_handle_t handle,unsigned int vcpu_idx)259be66e67fSWill Deacon struct pkvm_hyp_vcpu *pkvm_load_hyp_vcpu(pkvm_handle_t handle,
260be66e67fSWill Deacon 					 unsigned int vcpu_idx)
261be66e67fSWill Deacon {
262be66e67fSWill Deacon 	struct pkvm_hyp_vcpu *hyp_vcpu = NULL;
263be66e67fSWill Deacon 	struct pkvm_hyp_vm *hyp_vm;
264be66e67fSWill Deacon 
265be66e67fSWill Deacon 	hyp_spin_lock(&vm_table_lock);
266be66e67fSWill Deacon 	hyp_vm = get_vm_by_handle(handle);
267be66e67fSWill Deacon 	if (!hyp_vm || hyp_vm->nr_vcpus <= vcpu_idx)
268be66e67fSWill Deacon 		goto unlock;
269be66e67fSWill Deacon 
270be66e67fSWill Deacon 	hyp_vcpu = hyp_vm->vcpus[vcpu_idx];
271be66e67fSWill Deacon 	hyp_page_ref_inc(hyp_virt_to_page(hyp_vm));
272be66e67fSWill Deacon unlock:
273be66e67fSWill Deacon 	hyp_spin_unlock(&vm_table_lock);
274be66e67fSWill Deacon 	return hyp_vcpu;
275be66e67fSWill Deacon }
276be66e67fSWill Deacon 
pkvm_put_hyp_vcpu(struct pkvm_hyp_vcpu * hyp_vcpu)277be66e67fSWill Deacon void pkvm_put_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu)
278be66e67fSWill Deacon {
279be66e67fSWill Deacon 	struct pkvm_hyp_vm *hyp_vm = pkvm_hyp_vcpu_to_hyp_vm(hyp_vcpu);
280be66e67fSWill Deacon 
281be66e67fSWill Deacon 	hyp_spin_lock(&vm_table_lock);
282be66e67fSWill Deacon 	hyp_page_ref_dec(hyp_virt_to_page(hyp_vm));
283be66e67fSWill Deacon 	hyp_spin_unlock(&vm_table_lock);
284be66e67fSWill Deacon }
285be66e67fSWill Deacon 
unpin_host_vcpu(struct kvm_vcpu * host_vcpu)286a1ec5c70SFuad Tabba static void unpin_host_vcpu(struct kvm_vcpu *host_vcpu)
287a1ec5c70SFuad Tabba {
288a1ec5c70SFuad Tabba 	if (host_vcpu)
289a1ec5c70SFuad Tabba 		hyp_unpin_shared_mem(host_vcpu, host_vcpu + 1);
290a1ec5c70SFuad Tabba }
291a1ec5c70SFuad Tabba 
unpin_host_vcpus(struct pkvm_hyp_vcpu * hyp_vcpus[],unsigned int nr_vcpus)292a1ec5c70SFuad Tabba static void unpin_host_vcpus(struct pkvm_hyp_vcpu *hyp_vcpus[],
293a1ec5c70SFuad Tabba 			     unsigned int nr_vcpus)
294a1ec5c70SFuad Tabba {
295a1ec5c70SFuad Tabba 	int i;
296a1ec5c70SFuad Tabba 
297a1ec5c70SFuad Tabba 	for (i = 0; i < nr_vcpus; i++)
298a1ec5c70SFuad Tabba 		unpin_host_vcpu(hyp_vcpus[i]->host_vcpu);
299a1ec5c70SFuad Tabba }
300a1ec5c70SFuad Tabba 
init_pkvm_hyp_vm(struct kvm * host_kvm,struct pkvm_hyp_vm * hyp_vm,unsigned int nr_vcpus)301a1ec5c70SFuad Tabba static void init_pkvm_hyp_vm(struct kvm *host_kvm, struct pkvm_hyp_vm *hyp_vm,
302a1ec5c70SFuad Tabba 			     unsigned int nr_vcpus)
303a1ec5c70SFuad Tabba {
304a1ec5c70SFuad Tabba 	hyp_vm->host_kvm = host_kvm;
305a1ec5c70SFuad Tabba 	hyp_vm->kvm.created_vcpus = nr_vcpus;
306a1ec5c70SFuad Tabba 	hyp_vm->kvm.arch.vtcr = host_mmu.arch.vtcr;
307a1ec5c70SFuad Tabba }
308a1ec5c70SFuad Tabba 
init_pkvm_hyp_vcpu(struct pkvm_hyp_vcpu * hyp_vcpu,struct pkvm_hyp_vm * hyp_vm,struct kvm_vcpu * host_vcpu,unsigned int vcpu_idx)309a1ec5c70SFuad Tabba static int init_pkvm_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu,
310a1ec5c70SFuad Tabba 			      struct pkvm_hyp_vm *hyp_vm,
311a1ec5c70SFuad Tabba 			      struct kvm_vcpu *host_vcpu,
312a1ec5c70SFuad Tabba 			      unsigned int vcpu_idx)
313a1ec5c70SFuad Tabba {
314a1ec5c70SFuad Tabba 	int ret = 0;
315a1ec5c70SFuad Tabba 
316a1ec5c70SFuad Tabba 	if (hyp_pin_shared_mem(host_vcpu, host_vcpu + 1))
317a1ec5c70SFuad Tabba 		return -EBUSY;
318a1ec5c70SFuad Tabba 
319a1ec5c70SFuad Tabba 	if (host_vcpu->vcpu_idx != vcpu_idx) {
320a1ec5c70SFuad Tabba 		ret = -EINVAL;
321a1ec5c70SFuad Tabba 		goto done;
322a1ec5c70SFuad Tabba 	}
323a1ec5c70SFuad Tabba 
324a1ec5c70SFuad Tabba 	hyp_vcpu->host_vcpu = host_vcpu;
325a1ec5c70SFuad Tabba 
326a1ec5c70SFuad Tabba 	hyp_vcpu->vcpu.kvm = &hyp_vm->kvm;
327a1ec5c70SFuad Tabba 	hyp_vcpu->vcpu.vcpu_id = READ_ONCE(host_vcpu->vcpu_id);
328a1ec5c70SFuad Tabba 	hyp_vcpu->vcpu.vcpu_idx = vcpu_idx;
329a1ec5c70SFuad Tabba 
330a1ec5c70SFuad Tabba 	hyp_vcpu->vcpu.arch.hw_mmu = &hyp_vm->kvm.arch.mmu;
331be66e67fSWill Deacon 	hyp_vcpu->vcpu.arch.cflags = READ_ONCE(host_vcpu->arch.cflags);
332a1ec5c70SFuad Tabba done:
333a1ec5c70SFuad Tabba 	if (ret)
334a1ec5c70SFuad Tabba 		unpin_host_vcpu(host_vcpu);
335a1ec5c70SFuad Tabba 	return ret;
336a1ec5c70SFuad Tabba }
337a1ec5c70SFuad Tabba 
find_free_vm_table_entry(struct kvm * host_kvm)338a1ec5c70SFuad Tabba static int find_free_vm_table_entry(struct kvm *host_kvm)
339a1ec5c70SFuad Tabba {
340a1ec5c70SFuad Tabba 	int i;
341a1ec5c70SFuad Tabba 
342a1ec5c70SFuad Tabba 	for (i = 0; i < KVM_MAX_PVMS; ++i) {
343a1ec5c70SFuad Tabba 		if (!vm_table[i])
344a1ec5c70SFuad Tabba 			return i;
345a1ec5c70SFuad Tabba 	}
346a1ec5c70SFuad Tabba 
347a1ec5c70SFuad Tabba 	return -ENOMEM;
348a1ec5c70SFuad Tabba }
349a1ec5c70SFuad Tabba 
350a1ec5c70SFuad Tabba /*
351a1ec5c70SFuad Tabba  * Allocate a VM table entry and insert a pointer to the new vm.
352a1ec5c70SFuad Tabba  *
353a1ec5c70SFuad Tabba  * Return a unique handle to the protected VM on success,
354a1ec5c70SFuad Tabba  * negative error code on failure.
355a1ec5c70SFuad Tabba  */
insert_vm_table_entry(struct kvm * host_kvm,struct pkvm_hyp_vm * hyp_vm)356a1ec5c70SFuad Tabba static pkvm_handle_t insert_vm_table_entry(struct kvm *host_kvm,
357a1ec5c70SFuad Tabba 					   struct pkvm_hyp_vm *hyp_vm)
358a1ec5c70SFuad Tabba {
359a1ec5c70SFuad Tabba 	struct kvm_s2_mmu *mmu = &hyp_vm->kvm.arch.mmu;
360a1ec5c70SFuad Tabba 	int idx;
361a1ec5c70SFuad Tabba 
362a1ec5c70SFuad Tabba 	hyp_assert_lock_held(&vm_table_lock);
363a1ec5c70SFuad Tabba 
364a1ec5c70SFuad Tabba 	/*
365a1ec5c70SFuad Tabba 	 * Initializing protected state might have failed, yet a malicious
366a1ec5c70SFuad Tabba 	 * host could trigger this function. Thus, ensure that 'vm_table'
367a1ec5c70SFuad Tabba 	 * exists.
368a1ec5c70SFuad Tabba 	 */
369a1ec5c70SFuad Tabba 	if (unlikely(!vm_table))
370a1ec5c70SFuad Tabba 		return -EINVAL;
371a1ec5c70SFuad Tabba 
372a1ec5c70SFuad Tabba 	idx = find_free_vm_table_entry(host_kvm);
373a1ec5c70SFuad Tabba 	if (idx < 0)
374a1ec5c70SFuad Tabba 		return idx;
375a1ec5c70SFuad Tabba 
3769d0c063aSFuad Tabba 	hyp_vm->kvm.arch.pkvm.handle = idx_to_vm_handle(idx);
377a1ec5c70SFuad Tabba 
378a1ec5c70SFuad Tabba 	/* VMID 0 is reserved for the host */
379a1ec5c70SFuad Tabba 	atomic64_set(&mmu->vmid.id, idx + 1);
380a1ec5c70SFuad Tabba 
381a1ec5c70SFuad Tabba 	mmu->arch = &hyp_vm->kvm.arch;
382a1ec5c70SFuad Tabba 	mmu->pgt = &hyp_vm->pgt;
383a1ec5c70SFuad Tabba 
384a1ec5c70SFuad Tabba 	vm_table[idx] = hyp_vm;
3859d0c063aSFuad Tabba 	return hyp_vm->kvm.arch.pkvm.handle;
386a1ec5c70SFuad Tabba }
387a1ec5c70SFuad Tabba 
388a1ec5c70SFuad Tabba /*
389a1ec5c70SFuad Tabba  * Deallocate and remove the VM table entry corresponding to the handle.
390a1ec5c70SFuad Tabba  */
remove_vm_table_entry(pkvm_handle_t handle)391a1ec5c70SFuad Tabba static void remove_vm_table_entry(pkvm_handle_t handle)
392a1ec5c70SFuad Tabba {
393a1ec5c70SFuad Tabba 	hyp_assert_lock_held(&vm_table_lock);
394a1ec5c70SFuad Tabba 	vm_table[vm_handle_to_idx(handle)] = NULL;
395a1ec5c70SFuad Tabba }
396a1ec5c70SFuad Tabba 
pkvm_get_hyp_vm_size(unsigned int nr_vcpus)397a1ec5c70SFuad Tabba static size_t pkvm_get_hyp_vm_size(unsigned int nr_vcpus)
398a1ec5c70SFuad Tabba {
399a1ec5c70SFuad Tabba 	return size_add(sizeof(struct pkvm_hyp_vm),
400a1ec5c70SFuad Tabba 		size_mul(sizeof(struct pkvm_hyp_vcpu *), nr_vcpus));
401a1ec5c70SFuad Tabba }
402a1ec5c70SFuad Tabba 
map_donated_memory_noclear(unsigned long host_va,size_t size)403a1ec5c70SFuad Tabba static void *map_donated_memory_noclear(unsigned long host_va, size_t size)
404a1ec5c70SFuad Tabba {
405a1ec5c70SFuad Tabba 	void *va = (void *)kern_hyp_va(host_va);
406a1ec5c70SFuad Tabba 
407a1ec5c70SFuad Tabba 	if (!PAGE_ALIGNED(va))
408a1ec5c70SFuad Tabba 		return NULL;
409a1ec5c70SFuad Tabba 
410a1ec5c70SFuad Tabba 	if (__pkvm_host_donate_hyp(hyp_virt_to_pfn(va),
411a1ec5c70SFuad Tabba 				   PAGE_ALIGN(size) >> PAGE_SHIFT))
412a1ec5c70SFuad Tabba 		return NULL;
413a1ec5c70SFuad Tabba 
414a1ec5c70SFuad Tabba 	return va;
415a1ec5c70SFuad Tabba }
416a1ec5c70SFuad Tabba 
map_donated_memory(unsigned long host_va,size_t size)417a1ec5c70SFuad Tabba static void *map_donated_memory(unsigned long host_va, size_t size)
418a1ec5c70SFuad Tabba {
419a1ec5c70SFuad Tabba 	void *va = map_donated_memory_noclear(host_va, size);
420a1ec5c70SFuad Tabba 
421a1ec5c70SFuad Tabba 	if (va)
422a1ec5c70SFuad Tabba 		memset(va, 0, size);
423a1ec5c70SFuad Tabba 
424a1ec5c70SFuad Tabba 	return va;
425a1ec5c70SFuad Tabba }
426a1ec5c70SFuad Tabba 
__unmap_donated_memory(void * va,size_t size)427a1ec5c70SFuad Tabba static void __unmap_donated_memory(void *va, size_t size)
428a1ec5c70SFuad Tabba {
429a1ec5c70SFuad Tabba 	WARN_ON(__pkvm_hyp_donate_host(hyp_virt_to_pfn(va),
430a1ec5c70SFuad Tabba 				       PAGE_ALIGN(size) >> PAGE_SHIFT));
431a1ec5c70SFuad Tabba }
432a1ec5c70SFuad Tabba 
unmap_donated_memory(void * va,size_t size)433a1ec5c70SFuad Tabba static void unmap_donated_memory(void *va, size_t size)
434a1ec5c70SFuad Tabba {
435a1ec5c70SFuad Tabba 	if (!va)
436a1ec5c70SFuad Tabba 		return;
437a1ec5c70SFuad Tabba 
438a1ec5c70SFuad Tabba 	memset(va, 0, size);
439a1ec5c70SFuad Tabba 	__unmap_donated_memory(va, size);
440a1ec5c70SFuad Tabba }
441a1ec5c70SFuad Tabba 
unmap_donated_memory_noclear(void * va,size_t size)442f41dff4eSQuentin Perret static void unmap_donated_memory_noclear(void *va, size_t size)
443a1ec5c70SFuad Tabba {
444a1ec5c70SFuad Tabba 	if (!va)
445a1ec5c70SFuad Tabba 		return;
446a1ec5c70SFuad Tabba 
447a1ec5c70SFuad Tabba 	__unmap_donated_memory(va, size);
448a1ec5c70SFuad Tabba }
449a1ec5c70SFuad Tabba 
450a1ec5c70SFuad Tabba /*
451a1ec5c70SFuad Tabba  * Initialize the hypervisor copy of the protected VM state using the
452a1ec5c70SFuad Tabba  * memory donated by the host.
453a1ec5c70SFuad Tabba  *
454a1ec5c70SFuad Tabba  * Unmaps the donated memory from the host at stage 2.
455a1ec5c70SFuad Tabba  *
456a1ec5c70SFuad Tabba  * host_kvm: A pointer to the host's struct kvm.
457a1ec5c70SFuad Tabba  * vm_hva: The host va of the area being donated for the VM state.
458a1ec5c70SFuad Tabba  *	   Must be page aligned.
459a1ec5c70SFuad Tabba  * pgd_hva: The host va of the area being donated for the stage-2 PGD for
460a1ec5c70SFuad Tabba  *	    the VM. Must be page aligned. Its size is implied by the VM's
461a1ec5c70SFuad Tabba  *	    VTCR.
462a1ec5c70SFuad Tabba  *
463a1ec5c70SFuad Tabba  * Return a unique handle to the protected VM on success,
464a1ec5c70SFuad Tabba  * negative error code on failure.
465a1ec5c70SFuad Tabba  */
__pkvm_init_vm(struct kvm * host_kvm,unsigned long vm_hva,unsigned long pgd_hva)466a1ec5c70SFuad Tabba int __pkvm_init_vm(struct kvm *host_kvm, unsigned long vm_hva,
467a1ec5c70SFuad Tabba 		   unsigned long pgd_hva)
468a1ec5c70SFuad Tabba {
469a1ec5c70SFuad Tabba 	struct pkvm_hyp_vm *hyp_vm = NULL;
470a1ec5c70SFuad Tabba 	size_t vm_size, pgd_size;
471a1ec5c70SFuad Tabba 	unsigned int nr_vcpus;
472a1ec5c70SFuad Tabba 	void *pgd = NULL;
473a1ec5c70SFuad Tabba 	int ret;
474a1ec5c70SFuad Tabba 
475a1ec5c70SFuad Tabba 	ret = hyp_pin_shared_mem(host_kvm, host_kvm + 1);
476a1ec5c70SFuad Tabba 	if (ret)
477a1ec5c70SFuad Tabba 		return ret;
478a1ec5c70SFuad Tabba 
479a1ec5c70SFuad Tabba 	nr_vcpus = READ_ONCE(host_kvm->created_vcpus);
480a1ec5c70SFuad Tabba 	if (nr_vcpus < 1) {
481a1ec5c70SFuad Tabba 		ret = -EINVAL;
482a1ec5c70SFuad Tabba 		goto err_unpin_kvm;
483a1ec5c70SFuad Tabba 	}
484a1ec5c70SFuad Tabba 
485a1ec5c70SFuad Tabba 	vm_size = pkvm_get_hyp_vm_size(nr_vcpus);
486a1ec5c70SFuad Tabba 	pgd_size = kvm_pgtable_stage2_pgd_size(host_mmu.arch.vtcr);
487a1ec5c70SFuad Tabba 
488a1ec5c70SFuad Tabba 	ret = -ENOMEM;
489a1ec5c70SFuad Tabba 
490a1ec5c70SFuad Tabba 	hyp_vm = map_donated_memory(vm_hva, vm_size);
491a1ec5c70SFuad Tabba 	if (!hyp_vm)
492a1ec5c70SFuad Tabba 		goto err_remove_mappings;
493a1ec5c70SFuad Tabba 
494a1ec5c70SFuad Tabba 	pgd = map_donated_memory_noclear(pgd_hva, pgd_size);
495a1ec5c70SFuad Tabba 	if (!pgd)
496a1ec5c70SFuad Tabba 		goto err_remove_mappings;
497a1ec5c70SFuad Tabba 
498a1ec5c70SFuad Tabba 	init_pkvm_hyp_vm(host_kvm, hyp_vm, nr_vcpus);
499a1ec5c70SFuad Tabba 
500a1ec5c70SFuad Tabba 	hyp_spin_lock(&vm_table_lock);
501a1ec5c70SFuad Tabba 	ret = insert_vm_table_entry(host_kvm, hyp_vm);
502a1ec5c70SFuad Tabba 	if (ret < 0)
503a1ec5c70SFuad Tabba 		goto err_unlock;
504a1ec5c70SFuad Tabba 
505a1ec5c70SFuad Tabba 	ret = kvm_guest_prepare_stage2(hyp_vm, pgd);
506a1ec5c70SFuad Tabba 	if (ret)
507a1ec5c70SFuad Tabba 		goto err_remove_vm_table_entry;
508a1ec5c70SFuad Tabba 	hyp_spin_unlock(&vm_table_lock);
509a1ec5c70SFuad Tabba 
5109d0c063aSFuad Tabba 	return hyp_vm->kvm.arch.pkvm.handle;
511a1ec5c70SFuad Tabba 
512a1ec5c70SFuad Tabba err_remove_vm_table_entry:
5139d0c063aSFuad Tabba 	remove_vm_table_entry(hyp_vm->kvm.arch.pkvm.handle);
514a1ec5c70SFuad Tabba err_unlock:
515a1ec5c70SFuad Tabba 	hyp_spin_unlock(&vm_table_lock);
516a1ec5c70SFuad Tabba err_remove_mappings:
517a1ec5c70SFuad Tabba 	unmap_donated_memory(hyp_vm, vm_size);
518a1ec5c70SFuad Tabba 	unmap_donated_memory(pgd, pgd_size);
519a1ec5c70SFuad Tabba err_unpin_kvm:
520a1ec5c70SFuad Tabba 	hyp_unpin_shared_mem(host_kvm, host_kvm + 1);
521a1ec5c70SFuad Tabba 	return ret;
522a1ec5c70SFuad Tabba }
523a1ec5c70SFuad Tabba 
524a1ec5c70SFuad Tabba /*
525a1ec5c70SFuad Tabba  * Initialize the hypervisor copy of the protected vCPU state using the
526a1ec5c70SFuad Tabba  * memory donated by the host.
527a1ec5c70SFuad Tabba  *
528a1ec5c70SFuad Tabba  * handle: The handle for the protected vm.
529a1ec5c70SFuad Tabba  * host_vcpu: A pointer to the corresponding host vcpu.
530a1ec5c70SFuad Tabba  * vcpu_hva: The host va of the area being donated for the vcpu state.
531a1ec5c70SFuad Tabba  *	     Must be page aligned. The size of the area must be equal to
532a1ec5c70SFuad Tabba  *	     the page-aligned size of 'struct pkvm_hyp_vcpu'.
533a1ec5c70SFuad Tabba  * Return 0 on success, negative error code on failure.
534a1ec5c70SFuad Tabba  */
__pkvm_init_vcpu(pkvm_handle_t handle,struct kvm_vcpu * host_vcpu,unsigned long vcpu_hva)535a1ec5c70SFuad Tabba int __pkvm_init_vcpu(pkvm_handle_t handle, struct kvm_vcpu *host_vcpu,
536a1ec5c70SFuad Tabba 		     unsigned long vcpu_hva)
537a1ec5c70SFuad Tabba {
538a1ec5c70SFuad Tabba 	struct pkvm_hyp_vcpu *hyp_vcpu;
539a1ec5c70SFuad Tabba 	struct pkvm_hyp_vm *hyp_vm;
540a1ec5c70SFuad Tabba 	unsigned int idx;
541a1ec5c70SFuad Tabba 	int ret;
542a1ec5c70SFuad Tabba 
543a1ec5c70SFuad Tabba 	hyp_vcpu = map_donated_memory(vcpu_hva, sizeof(*hyp_vcpu));
544a1ec5c70SFuad Tabba 	if (!hyp_vcpu)
545a1ec5c70SFuad Tabba 		return -ENOMEM;
546a1ec5c70SFuad Tabba 
547a1ec5c70SFuad Tabba 	hyp_spin_lock(&vm_table_lock);
548a1ec5c70SFuad Tabba 
549a1ec5c70SFuad Tabba 	hyp_vm = get_vm_by_handle(handle);
550a1ec5c70SFuad Tabba 	if (!hyp_vm) {
551a1ec5c70SFuad Tabba 		ret = -ENOENT;
552a1ec5c70SFuad Tabba 		goto unlock;
553a1ec5c70SFuad Tabba 	}
554a1ec5c70SFuad Tabba 
555a1ec5c70SFuad Tabba 	idx = hyp_vm->nr_vcpus;
556a1ec5c70SFuad Tabba 	if (idx >= hyp_vm->kvm.created_vcpus) {
557a1ec5c70SFuad Tabba 		ret = -EINVAL;
558a1ec5c70SFuad Tabba 		goto unlock;
559a1ec5c70SFuad Tabba 	}
560a1ec5c70SFuad Tabba 
561a1ec5c70SFuad Tabba 	ret = init_pkvm_hyp_vcpu(hyp_vcpu, hyp_vm, host_vcpu, idx);
562a1ec5c70SFuad Tabba 	if (ret)
563a1ec5c70SFuad Tabba 		goto unlock;
564a1ec5c70SFuad Tabba 
565a1ec5c70SFuad Tabba 	hyp_vm->vcpus[idx] = hyp_vcpu;
566a1ec5c70SFuad Tabba 	hyp_vm->nr_vcpus++;
567a1ec5c70SFuad Tabba unlock:
568a1ec5c70SFuad Tabba 	hyp_spin_unlock(&vm_table_lock);
569a1ec5c70SFuad Tabba 
570a1ec5c70SFuad Tabba 	if (ret)
571a1ec5c70SFuad Tabba 		unmap_donated_memory(hyp_vcpu, sizeof(*hyp_vcpu));
572a1ec5c70SFuad Tabba 
573a1ec5c70SFuad Tabba 	return ret;
574a1ec5c70SFuad Tabba }
575a1ec5c70SFuad Tabba 
576f41dff4eSQuentin Perret static void
teardown_donated_memory(struct kvm_hyp_memcache * mc,void * addr,size_t size)577f41dff4eSQuentin Perret teardown_donated_memory(struct kvm_hyp_memcache *mc, void *addr, size_t size)
578f41dff4eSQuentin Perret {
579f41dff4eSQuentin Perret 	size = PAGE_ALIGN(size);
580f41dff4eSQuentin Perret 	memset(addr, 0, size);
581f41dff4eSQuentin Perret 
582f41dff4eSQuentin Perret 	for (void *start = addr; start < addr + size; start += PAGE_SIZE)
583f41dff4eSQuentin Perret 		push_hyp_memcache(mc, start, hyp_virt_to_phys);
584f41dff4eSQuentin Perret 
585f41dff4eSQuentin Perret 	unmap_donated_memory_noclear(addr, size);
586f41dff4eSQuentin Perret }
587f41dff4eSQuentin Perret 
__pkvm_teardown_vm(pkvm_handle_t handle)588a1ec5c70SFuad Tabba int __pkvm_teardown_vm(pkvm_handle_t handle)
589a1ec5c70SFuad Tabba {
590f41dff4eSQuentin Perret 	struct kvm_hyp_memcache *mc;
591a1ec5c70SFuad Tabba 	struct pkvm_hyp_vm *hyp_vm;
592a1ec5c70SFuad Tabba 	struct kvm *host_kvm;
5939d0c063aSFuad Tabba 	unsigned int idx;
594a1ec5c70SFuad Tabba 	size_t vm_size;
595a1ec5c70SFuad Tabba 	int err;
596a1ec5c70SFuad Tabba 
597a1ec5c70SFuad Tabba 	hyp_spin_lock(&vm_table_lock);
598a1ec5c70SFuad Tabba 	hyp_vm = get_vm_by_handle(handle);
599a1ec5c70SFuad Tabba 	if (!hyp_vm) {
600a1ec5c70SFuad Tabba 		err = -ENOENT;
601a1ec5c70SFuad Tabba 		goto err_unlock;
602a1ec5c70SFuad Tabba 	}
603a1ec5c70SFuad Tabba 
604a1ec5c70SFuad Tabba 	if (WARN_ON(hyp_page_count(hyp_vm))) {
605a1ec5c70SFuad Tabba 		err = -EBUSY;
606a1ec5c70SFuad Tabba 		goto err_unlock;
607a1ec5c70SFuad Tabba 	}
608a1ec5c70SFuad Tabba 
609f41dff4eSQuentin Perret 	host_kvm = hyp_vm->host_kvm;
610f41dff4eSQuentin Perret 
611a1ec5c70SFuad Tabba 	/* Ensure the VMID is clean before it can be reallocated */
612a1ec5c70SFuad Tabba 	__kvm_tlb_flush_vmid(&hyp_vm->kvm.arch.mmu);
613a1ec5c70SFuad Tabba 	remove_vm_table_entry(handle);
614a1ec5c70SFuad Tabba 	hyp_spin_unlock(&vm_table_lock);
615a1ec5c70SFuad Tabba 
616a1ec5c70SFuad Tabba 	/* Reclaim guest pages (including page-table pages) */
617f41dff4eSQuentin Perret 	mc = &host_kvm->arch.pkvm.teardown_mc;
618f41dff4eSQuentin Perret 	reclaim_guest_pages(hyp_vm, mc);
619a1ec5c70SFuad Tabba 	unpin_host_vcpus(hyp_vm->vcpus, hyp_vm->nr_vcpus);
620a1ec5c70SFuad Tabba 
621f41dff4eSQuentin Perret 	/* Push the metadata pages to the teardown memcache */
6229d0c063aSFuad Tabba 	for (idx = 0; idx < hyp_vm->nr_vcpus; ++idx) {
6239d0c063aSFuad Tabba 		struct pkvm_hyp_vcpu *hyp_vcpu = hyp_vm->vcpus[idx];
6249d0c063aSFuad Tabba 
625f41dff4eSQuentin Perret 		teardown_donated_memory(mc, hyp_vcpu, sizeof(*hyp_vcpu));
6269d0c063aSFuad Tabba 	}
6279d0c063aSFuad Tabba 
628a1ec5c70SFuad Tabba 	vm_size = pkvm_get_hyp_vm_size(hyp_vm->kvm.created_vcpus);
629f41dff4eSQuentin Perret 	teardown_donated_memory(mc, hyp_vm, vm_size);
630a1ec5c70SFuad Tabba 	hyp_unpin_shared_mem(host_kvm, host_kvm + 1);
631a1ec5c70SFuad Tabba 	return 0;
632a1ec5c70SFuad Tabba 
633a1ec5c70SFuad Tabba err_unlock:
634a1ec5c70SFuad Tabba 	hyp_spin_unlock(&vm_table_lock);
635a1ec5c70SFuad Tabba 	return err;
636a1ec5c70SFuad Tabba }
637