1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2021 Google LLC
4 * Author: Fuad Tabba <tabba@google.com>
5 */
6
7 #include <linux/kvm_host.h>
8 #include <linux/mm.h>
9 #include <nvhe/fixed_config.h>
10 #include <nvhe/mem_protect.h>
11 #include <nvhe/memory.h>
12 #include <nvhe/pkvm.h>
13 #include <nvhe/trap_handler.h>
14
15 /* Used by icache_is_vpipt(). */
16 unsigned long __icache_flags;
17
18 /* Used by kvm_get_vttbr(). */
19 unsigned int kvm_arm_vmid_bits;
20
21 unsigned int kvm_host_sve_max_vl;
22
23 /*
24 * Set trap register values based on features in ID_AA64PFR0.
25 */
pvm_init_traps_aa64pfr0(struct kvm_vcpu * vcpu)26 static void pvm_init_traps_aa64pfr0(struct kvm_vcpu *vcpu)
27 {
28 const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64PFR0_EL1);
29 u64 hcr_set = HCR_RW;
30 u64 hcr_clear = 0;
31
32 /* Protected KVM does not support AArch32 guests. */
33 BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL0),
34 PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) != ID_AA64PFR0_EL1_ELx_64BIT_ONLY);
35 BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL1),
36 PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) != ID_AA64PFR0_EL1_ELx_64BIT_ONLY);
37
38 /*
39 * Linux guests assume support for floating-point and Advanced SIMD. Do
40 * not change the trapping behavior for these from the KVM default.
41 */
42 BUILD_BUG_ON(!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_FP),
43 PVM_ID_AA64PFR0_ALLOW));
44 BUILD_BUG_ON(!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AdvSIMD),
45 PVM_ID_AA64PFR0_ALLOW));
46
47 if (has_hvhe())
48 hcr_set |= HCR_E2H;
49
50 /* Trap RAS unless all current versions are supported */
51 if (FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_RAS), feature_ids) <
52 ID_AA64PFR0_EL1_RAS_V1P1) {
53 hcr_set |= HCR_TERR | HCR_TEA;
54 hcr_clear |= HCR_FIEN;
55 }
56
57 /* Trap AMU */
58 if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AMU), feature_ids)) {
59 hcr_clear |= HCR_AMVOFFEN;
60 }
61
62 vcpu->arch.hcr_el2 |= hcr_set;
63 vcpu->arch.hcr_el2 &= ~hcr_clear;
64 }
65
66 /*
67 * Set trap register values based on features in ID_AA64PFR1.
68 */
pvm_init_traps_aa64pfr1(struct kvm_vcpu * vcpu)69 static void pvm_init_traps_aa64pfr1(struct kvm_vcpu *vcpu)
70 {
71 const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64PFR1_EL1);
72 u64 hcr_set = 0;
73 u64 hcr_clear = 0;
74
75 /* Memory Tagging: Trap and Treat as Untagged if not supported. */
76 if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE), feature_ids)) {
77 hcr_set |= HCR_TID5;
78 hcr_clear |= HCR_DCT | HCR_ATA;
79 }
80
81 vcpu->arch.hcr_el2 |= hcr_set;
82 vcpu->arch.hcr_el2 &= ~hcr_clear;
83 }
84
85 /*
86 * Set trap register values based on features in ID_AA64DFR0.
87 */
pvm_init_traps_aa64dfr0(struct kvm_vcpu * vcpu)88 static void pvm_init_traps_aa64dfr0(struct kvm_vcpu *vcpu)
89 {
90 const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64DFR0_EL1);
91 u64 mdcr_set = 0;
92 u64 mdcr_clear = 0;
93
94 /* Trap/constrain PMU */
95 if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), feature_ids)) {
96 mdcr_set |= MDCR_EL2_TPM | MDCR_EL2_TPMCR;
97 mdcr_clear |= MDCR_EL2_HPME | MDCR_EL2_MTPME |
98 MDCR_EL2_HPMN_MASK;
99 }
100
101 /* Trap Debug */
102 if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_DebugVer), feature_ids))
103 mdcr_set |= MDCR_EL2_TDRA | MDCR_EL2_TDA | MDCR_EL2_TDE;
104
105 /* Trap OS Double Lock */
106 if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_DoubleLock), feature_ids))
107 mdcr_set |= MDCR_EL2_TDOSA;
108
109 /* Trap SPE */
110 if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMSVer), feature_ids)) {
111 mdcr_set |= MDCR_EL2_TPMS;
112 mdcr_clear |= MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT;
113 }
114
115 /* Trap Trace Filter */
116 if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_TraceFilt), feature_ids))
117 mdcr_set |= MDCR_EL2_TTRF;
118
119 vcpu->arch.mdcr_el2 |= mdcr_set;
120 vcpu->arch.mdcr_el2 &= ~mdcr_clear;
121 }
122
123 /*
124 * Set trap register values based on features in ID_AA64MMFR0.
125 */
pvm_init_traps_aa64mmfr0(struct kvm_vcpu * vcpu)126 static void pvm_init_traps_aa64mmfr0(struct kvm_vcpu *vcpu)
127 {
128 const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64MMFR0_EL1);
129 u64 mdcr_set = 0;
130
131 /* Trap Debug Communications Channel registers */
132 if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_FGT), feature_ids))
133 mdcr_set |= MDCR_EL2_TDCC;
134
135 vcpu->arch.mdcr_el2 |= mdcr_set;
136 }
137
138 /*
139 * Set trap register values based on features in ID_AA64MMFR1.
140 */
pvm_init_traps_aa64mmfr1(struct kvm_vcpu * vcpu)141 static void pvm_init_traps_aa64mmfr1(struct kvm_vcpu *vcpu)
142 {
143 const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64MMFR1_EL1);
144 u64 hcr_set = 0;
145
146 /* Trap LOR */
147 if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR1_EL1_LO), feature_ids))
148 hcr_set |= HCR_TLOR;
149
150 vcpu->arch.hcr_el2 |= hcr_set;
151 }
152
153 /*
154 * Set baseline trap register values.
155 */
pvm_init_trap_regs(struct kvm_vcpu * vcpu)156 static void pvm_init_trap_regs(struct kvm_vcpu *vcpu)
157 {
158 const u64 hcr_trap_feat_regs = HCR_TID3;
159 const u64 hcr_trap_impdef = HCR_TACR | HCR_TIDCP | HCR_TID1;
160
161 /*
162 * Always trap:
163 * - Feature id registers: to control features exposed to guests
164 * - Implementation-defined features
165 */
166 vcpu->arch.hcr_el2 |= hcr_trap_feat_regs | hcr_trap_impdef;
167
168 /* Clear res0 and set res1 bits to trap potential new features. */
169 vcpu->arch.hcr_el2 &= ~(HCR_RES0);
170 vcpu->arch.mdcr_el2 &= ~(MDCR_EL2_RES0);
171 }
172
173 /*
174 * Initialize trap register values for protected VMs.
175 */
__pkvm_vcpu_init_traps(struct kvm_vcpu * vcpu)176 void __pkvm_vcpu_init_traps(struct kvm_vcpu *vcpu)
177 {
178 pvm_init_trap_regs(vcpu);
179 pvm_init_traps_aa64pfr0(vcpu);
180 pvm_init_traps_aa64pfr1(vcpu);
181 pvm_init_traps_aa64dfr0(vcpu);
182 pvm_init_traps_aa64mmfr0(vcpu);
183 pvm_init_traps_aa64mmfr1(vcpu);
184 }
185
186 /*
187 * Start the VM table handle at the offset defined instead of at 0.
188 * Mainly for sanity checking and debugging.
189 */
190 #define HANDLE_OFFSET 0x1000
191
vm_handle_to_idx(pkvm_handle_t handle)192 static unsigned int vm_handle_to_idx(pkvm_handle_t handle)
193 {
194 return handle - HANDLE_OFFSET;
195 }
196
idx_to_vm_handle(unsigned int idx)197 static pkvm_handle_t idx_to_vm_handle(unsigned int idx)
198 {
199 return idx + HANDLE_OFFSET;
200 }
201
202 /*
203 * Spinlock for protecting state related to the VM table. Protects writes
204 * to 'vm_table' and 'nr_table_entries' as well as reads and writes to
205 * 'last_hyp_vcpu_lookup'.
206 */
207 static DEFINE_HYP_SPINLOCK(vm_table_lock);
208
209 /*
210 * The table of VM entries for protected VMs in hyp.
211 * Allocated at hyp initialization and setup.
212 */
213 static struct pkvm_hyp_vm **vm_table;
214
pkvm_hyp_vm_table_init(void * tbl)215 void pkvm_hyp_vm_table_init(void *tbl)
216 {
217 WARN_ON(vm_table);
218 vm_table = tbl;
219 }
220
221 /*
222 * Return the hyp vm structure corresponding to the handle.
223 */
get_vm_by_handle(pkvm_handle_t handle)224 static struct pkvm_hyp_vm *get_vm_by_handle(pkvm_handle_t handle)
225 {
226 unsigned int idx = vm_handle_to_idx(handle);
227
228 if (unlikely(idx >= KVM_MAX_PVMS))
229 return NULL;
230
231 return vm_table[idx];
232 }
233
pkvm_load_hyp_vcpu(pkvm_handle_t handle,unsigned int vcpu_idx)234 struct pkvm_hyp_vcpu *pkvm_load_hyp_vcpu(pkvm_handle_t handle,
235 unsigned int vcpu_idx)
236 {
237 struct pkvm_hyp_vcpu *hyp_vcpu = NULL;
238 struct pkvm_hyp_vm *hyp_vm;
239
240 hyp_spin_lock(&vm_table_lock);
241 hyp_vm = get_vm_by_handle(handle);
242 if (!hyp_vm || hyp_vm->nr_vcpus <= vcpu_idx)
243 goto unlock;
244
245 hyp_vcpu = hyp_vm->vcpus[vcpu_idx];
246 hyp_page_ref_inc(hyp_virt_to_page(hyp_vm));
247 unlock:
248 hyp_spin_unlock(&vm_table_lock);
249 return hyp_vcpu;
250 }
251
pkvm_put_hyp_vcpu(struct pkvm_hyp_vcpu * hyp_vcpu)252 void pkvm_put_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu)
253 {
254 struct pkvm_hyp_vm *hyp_vm = pkvm_hyp_vcpu_to_hyp_vm(hyp_vcpu);
255
256 hyp_spin_lock(&vm_table_lock);
257 hyp_page_ref_dec(hyp_virt_to_page(hyp_vm));
258 hyp_spin_unlock(&vm_table_lock);
259 }
260
unpin_host_vcpu(struct kvm_vcpu * host_vcpu)261 static void unpin_host_vcpu(struct kvm_vcpu *host_vcpu)
262 {
263 if (host_vcpu)
264 hyp_unpin_shared_mem(host_vcpu, host_vcpu + 1);
265 }
266
unpin_host_vcpus(struct pkvm_hyp_vcpu * hyp_vcpus[],unsigned int nr_vcpus)267 static void unpin_host_vcpus(struct pkvm_hyp_vcpu *hyp_vcpus[],
268 unsigned int nr_vcpus)
269 {
270 int i;
271
272 for (i = 0; i < nr_vcpus; i++)
273 unpin_host_vcpu(hyp_vcpus[i]->host_vcpu);
274 }
275
init_pkvm_hyp_vm(struct kvm * host_kvm,struct pkvm_hyp_vm * hyp_vm,unsigned int nr_vcpus)276 static void init_pkvm_hyp_vm(struct kvm *host_kvm, struct pkvm_hyp_vm *hyp_vm,
277 unsigned int nr_vcpus)
278 {
279 hyp_vm->host_kvm = host_kvm;
280 hyp_vm->kvm.created_vcpus = nr_vcpus;
281 hyp_vm->kvm.arch.vtcr = host_mmu.arch.vtcr;
282 }
283
init_pkvm_hyp_vcpu(struct pkvm_hyp_vcpu * hyp_vcpu,struct pkvm_hyp_vm * hyp_vm,struct kvm_vcpu * host_vcpu,unsigned int vcpu_idx)284 static int init_pkvm_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu,
285 struct pkvm_hyp_vm *hyp_vm,
286 struct kvm_vcpu *host_vcpu,
287 unsigned int vcpu_idx)
288 {
289 int ret = 0;
290
291 if (hyp_pin_shared_mem(host_vcpu, host_vcpu + 1))
292 return -EBUSY;
293
294 if (host_vcpu->vcpu_idx != vcpu_idx) {
295 ret = -EINVAL;
296 goto done;
297 }
298
299 hyp_vcpu->host_vcpu = host_vcpu;
300
301 hyp_vcpu->vcpu.kvm = &hyp_vm->kvm;
302 hyp_vcpu->vcpu.vcpu_id = READ_ONCE(host_vcpu->vcpu_id);
303 hyp_vcpu->vcpu.vcpu_idx = vcpu_idx;
304
305 hyp_vcpu->vcpu.arch.hw_mmu = &hyp_vm->kvm.arch.mmu;
306 hyp_vcpu->vcpu.arch.cflags = READ_ONCE(host_vcpu->arch.cflags);
307 done:
308 if (ret)
309 unpin_host_vcpu(host_vcpu);
310 return ret;
311 }
312
find_free_vm_table_entry(struct kvm * host_kvm)313 static int find_free_vm_table_entry(struct kvm *host_kvm)
314 {
315 int i;
316
317 for (i = 0; i < KVM_MAX_PVMS; ++i) {
318 if (!vm_table[i])
319 return i;
320 }
321
322 return -ENOMEM;
323 }
324
325 /*
326 * Allocate a VM table entry and insert a pointer to the new vm.
327 *
328 * Return a unique handle to the protected VM on success,
329 * negative error code on failure.
330 */
insert_vm_table_entry(struct kvm * host_kvm,struct pkvm_hyp_vm * hyp_vm)331 static pkvm_handle_t insert_vm_table_entry(struct kvm *host_kvm,
332 struct pkvm_hyp_vm *hyp_vm)
333 {
334 struct kvm_s2_mmu *mmu = &hyp_vm->kvm.arch.mmu;
335 int idx;
336
337 hyp_assert_lock_held(&vm_table_lock);
338
339 /*
340 * Initializing protected state might have failed, yet a malicious
341 * host could trigger this function. Thus, ensure that 'vm_table'
342 * exists.
343 */
344 if (unlikely(!vm_table))
345 return -EINVAL;
346
347 idx = find_free_vm_table_entry(host_kvm);
348 if (idx < 0)
349 return idx;
350
351 hyp_vm->kvm.arch.pkvm.handle = idx_to_vm_handle(idx);
352
353 /* VMID 0 is reserved for the host */
354 atomic64_set(&mmu->vmid.id, idx + 1);
355
356 mmu->arch = &hyp_vm->kvm.arch;
357 mmu->pgt = &hyp_vm->pgt;
358
359 vm_table[idx] = hyp_vm;
360 return hyp_vm->kvm.arch.pkvm.handle;
361 }
362
363 /*
364 * Deallocate and remove the VM table entry corresponding to the handle.
365 */
remove_vm_table_entry(pkvm_handle_t handle)366 static void remove_vm_table_entry(pkvm_handle_t handle)
367 {
368 hyp_assert_lock_held(&vm_table_lock);
369 vm_table[vm_handle_to_idx(handle)] = NULL;
370 }
371
pkvm_get_hyp_vm_size(unsigned int nr_vcpus)372 static size_t pkvm_get_hyp_vm_size(unsigned int nr_vcpus)
373 {
374 return size_add(sizeof(struct pkvm_hyp_vm),
375 size_mul(sizeof(struct pkvm_hyp_vcpu *), nr_vcpus));
376 }
377
map_donated_memory_noclear(unsigned long host_va,size_t size)378 static void *map_donated_memory_noclear(unsigned long host_va, size_t size)
379 {
380 void *va = (void *)kern_hyp_va(host_va);
381
382 if (!PAGE_ALIGNED(va))
383 return NULL;
384
385 if (__pkvm_host_donate_hyp(hyp_virt_to_pfn(va),
386 PAGE_ALIGN(size) >> PAGE_SHIFT))
387 return NULL;
388
389 return va;
390 }
391
map_donated_memory(unsigned long host_va,size_t size)392 static void *map_donated_memory(unsigned long host_va, size_t size)
393 {
394 void *va = map_donated_memory_noclear(host_va, size);
395
396 if (va)
397 memset(va, 0, size);
398
399 return va;
400 }
401
__unmap_donated_memory(void * va,size_t size)402 static void __unmap_donated_memory(void *va, size_t size)
403 {
404 WARN_ON(__pkvm_hyp_donate_host(hyp_virt_to_pfn(va),
405 PAGE_ALIGN(size) >> PAGE_SHIFT));
406 }
407
unmap_donated_memory(void * va,size_t size)408 static void unmap_donated_memory(void *va, size_t size)
409 {
410 if (!va)
411 return;
412
413 memset(va, 0, size);
414 __unmap_donated_memory(va, size);
415 }
416
unmap_donated_memory_noclear(void * va,size_t size)417 static void unmap_donated_memory_noclear(void *va, size_t size)
418 {
419 if (!va)
420 return;
421
422 __unmap_donated_memory(va, size);
423 }
424
425 /*
426 * Initialize the hypervisor copy of the protected VM state using the
427 * memory donated by the host.
428 *
429 * Unmaps the donated memory from the host at stage 2.
430 *
431 * host_kvm: A pointer to the host's struct kvm.
432 * vm_hva: The host va of the area being donated for the VM state.
433 * Must be page aligned.
434 * pgd_hva: The host va of the area being donated for the stage-2 PGD for
435 * the VM. Must be page aligned. Its size is implied by the VM's
436 * VTCR.
437 *
438 * Return a unique handle to the protected VM on success,
439 * negative error code on failure.
440 */
__pkvm_init_vm(struct kvm * host_kvm,unsigned long vm_hva,unsigned long pgd_hva)441 int __pkvm_init_vm(struct kvm *host_kvm, unsigned long vm_hva,
442 unsigned long pgd_hva)
443 {
444 struct pkvm_hyp_vm *hyp_vm = NULL;
445 size_t vm_size, pgd_size;
446 unsigned int nr_vcpus;
447 void *pgd = NULL;
448 int ret;
449
450 ret = hyp_pin_shared_mem(host_kvm, host_kvm + 1);
451 if (ret)
452 return ret;
453
454 nr_vcpus = READ_ONCE(host_kvm->created_vcpus);
455 if (nr_vcpus < 1) {
456 ret = -EINVAL;
457 goto err_unpin_kvm;
458 }
459
460 vm_size = pkvm_get_hyp_vm_size(nr_vcpus);
461 pgd_size = kvm_pgtable_stage2_pgd_size(host_mmu.arch.vtcr);
462
463 ret = -ENOMEM;
464
465 hyp_vm = map_donated_memory(vm_hva, vm_size);
466 if (!hyp_vm)
467 goto err_remove_mappings;
468
469 pgd = map_donated_memory_noclear(pgd_hva, pgd_size);
470 if (!pgd)
471 goto err_remove_mappings;
472
473 init_pkvm_hyp_vm(host_kvm, hyp_vm, nr_vcpus);
474
475 hyp_spin_lock(&vm_table_lock);
476 ret = insert_vm_table_entry(host_kvm, hyp_vm);
477 if (ret < 0)
478 goto err_unlock;
479
480 ret = kvm_guest_prepare_stage2(hyp_vm, pgd);
481 if (ret)
482 goto err_remove_vm_table_entry;
483 hyp_spin_unlock(&vm_table_lock);
484
485 return hyp_vm->kvm.arch.pkvm.handle;
486
487 err_remove_vm_table_entry:
488 remove_vm_table_entry(hyp_vm->kvm.arch.pkvm.handle);
489 err_unlock:
490 hyp_spin_unlock(&vm_table_lock);
491 err_remove_mappings:
492 unmap_donated_memory(hyp_vm, vm_size);
493 unmap_donated_memory(pgd, pgd_size);
494 err_unpin_kvm:
495 hyp_unpin_shared_mem(host_kvm, host_kvm + 1);
496 return ret;
497 }
498
499 /*
500 * Initialize the hypervisor copy of the protected vCPU state using the
501 * memory donated by the host.
502 *
503 * handle: The handle for the protected vm.
504 * host_vcpu: A pointer to the corresponding host vcpu.
505 * vcpu_hva: The host va of the area being donated for the vcpu state.
506 * Must be page aligned. The size of the area must be equal to
507 * the page-aligned size of 'struct pkvm_hyp_vcpu'.
508 * Return 0 on success, negative error code on failure.
509 */
__pkvm_init_vcpu(pkvm_handle_t handle,struct kvm_vcpu * host_vcpu,unsigned long vcpu_hva)510 int __pkvm_init_vcpu(pkvm_handle_t handle, struct kvm_vcpu *host_vcpu,
511 unsigned long vcpu_hva)
512 {
513 struct pkvm_hyp_vcpu *hyp_vcpu;
514 struct pkvm_hyp_vm *hyp_vm;
515 unsigned int idx;
516 int ret;
517
518 hyp_vcpu = map_donated_memory(vcpu_hva, sizeof(*hyp_vcpu));
519 if (!hyp_vcpu)
520 return -ENOMEM;
521
522 hyp_spin_lock(&vm_table_lock);
523
524 hyp_vm = get_vm_by_handle(handle);
525 if (!hyp_vm) {
526 ret = -ENOENT;
527 goto unlock;
528 }
529
530 idx = hyp_vm->nr_vcpus;
531 if (idx >= hyp_vm->kvm.created_vcpus) {
532 ret = -EINVAL;
533 goto unlock;
534 }
535
536 ret = init_pkvm_hyp_vcpu(hyp_vcpu, hyp_vm, host_vcpu, idx);
537 if (ret)
538 goto unlock;
539
540 hyp_vm->vcpus[idx] = hyp_vcpu;
541 hyp_vm->nr_vcpus++;
542 unlock:
543 hyp_spin_unlock(&vm_table_lock);
544
545 if (ret)
546 unmap_donated_memory(hyp_vcpu, sizeof(*hyp_vcpu));
547
548 return ret;
549 }
550
551 static void
teardown_donated_memory(struct kvm_hyp_memcache * mc,void * addr,size_t size)552 teardown_donated_memory(struct kvm_hyp_memcache *mc, void *addr, size_t size)
553 {
554 size = PAGE_ALIGN(size);
555 memset(addr, 0, size);
556
557 for (void *start = addr; start < addr + size; start += PAGE_SIZE)
558 push_hyp_memcache(mc, start, hyp_virt_to_phys);
559
560 unmap_donated_memory_noclear(addr, size);
561 }
562
__pkvm_teardown_vm(pkvm_handle_t handle)563 int __pkvm_teardown_vm(pkvm_handle_t handle)
564 {
565 struct kvm_hyp_memcache *mc;
566 struct pkvm_hyp_vm *hyp_vm;
567 struct kvm *host_kvm;
568 unsigned int idx;
569 size_t vm_size;
570 int err;
571
572 hyp_spin_lock(&vm_table_lock);
573 hyp_vm = get_vm_by_handle(handle);
574 if (!hyp_vm) {
575 err = -ENOENT;
576 goto err_unlock;
577 }
578
579 if (WARN_ON(hyp_page_count(hyp_vm))) {
580 err = -EBUSY;
581 goto err_unlock;
582 }
583
584 host_kvm = hyp_vm->host_kvm;
585
586 /* Ensure the VMID is clean before it can be reallocated */
587 __kvm_tlb_flush_vmid(&hyp_vm->kvm.arch.mmu);
588 remove_vm_table_entry(handle);
589 hyp_spin_unlock(&vm_table_lock);
590
591 /* Reclaim guest pages (including page-table pages) */
592 mc = &host_kvm->arch.pkvm.teardown_mc;
593 reclaim_guest_pages(hyp_vm, mc);
594 unpin_host_vcpus(hyp_vm->vcpus, hyp_vm->nr_vcpus);
595
596 /* Push the metadata pages to the teardown memcache */
597 for (idx = 0; idx < hyp_vm->nr_vcpus; ++idx) {
598 struct pkvm_hyp_vcpu *hyp_vcpu = hyp_vm->vcpus[idx];
599
600 teardown_donated_memory(mc, hyp_vcpu, sizeof(*hyp_vcpu));
601 }
602
603 vm_size = pkvm_get_hyp_vm_size(hyp_vm->kvm.created_vcpus);
604 teardown_donated_memory(mc, hyp_vm, vm_size);
605 hyp_unpin_shared_mem(host_kvm, host_kvm + 1);
606 return 0;
607
608 err_unlock:
609 hyp_spin_unlock(&vm_table_lock);
610 return err;
611 }
612