Lines Matching +full:- +full:kvm

1 // SPDX-License-Identifier: GPL-2.0
8 #include <linux/kvm.h>
19 #include "kvm-s390.h"
21 bool kvm_s390_pv_is_protected(struct kvm *kvm) in kvm_s390_pv_is_protected() argument
23 lockdep_assert_held(&kvm->lock); in kvm_s390_pv_is_protected()
24 return !!kvm_s390_pv_get_handle(kvm); in kvm_s390_pv_is_protected()
30 lockdep_assert_held(&vcpu->mutex); in kvm_s390_pv_cpu_is_protected()
36 * struct pv_vm_to_be_destroyed - Represents a protected VM that needs to
46 * but which does not correspond any longer to an active KVM VM. It should
58 static void kvm_s390_clear_pv_state(struct kvm *kvm) in kvm_s390_clear_pv_state() argument
60 kvm->arch.pv.handle = 0; in kvm_s390_clear_pv_state()
61 kvm->arch.pv.guest_len = 0; in kvm_s390_clear_pv_state()
62 kvm->arch.pv.stor_base = 0; in kvm_s390_clear_pv_state()
63 kvm->arch.pv.stor_var = NULL; in kvm_s390_clear_pv_state()
75 KVM_UV_EVENT(vcpu->kvm, 3, "PROTVIRT DESTROY VCPU %d: rc %x rrc %x", in kvm_s390_pv_destroy_cpu()
76 vcpu->vcpu_id, *rc, *rrc); in kvm_s390_pv_destroy_cpu()
81 free_pages(vcpu->arch.pv.stor_base, in kvm_s390_pv_destroy_cpu()
84 free_page((unsigned long)sida_addr(vcpu->arch.sie_block)); in kvm_s390_pv_destroy_cpu()
85 vcpu->arch.sie_block->pv_handle_cpu = 0; in kvm_s390_pv_destroy_cpu()
86 vcpu->arch.sie_block->pv_handle_config = 0; in kvm_s390_pv_destroy_cpu()
87 memset(&vcpu->arch.pv, 0, sizeof(vcpu->arch.pv)); in kvm_s390_pv_destroy_cpu()
88 vcpu->arch.sie_block->sdf = 0; in kvm_s390_pv_destroy_cpu()
94 vcpu->arch.sie_block->gbea = 1; in kvm_s390_pv_destroy_cpu()
110 return -EINVAL; in kvm_s390_pv_create_cpu()
112 vcpu->arch.pv.stor_base = __get_free_pages(GFP_KERNEL_ACCOUNT, in kvm_s390_pv_create_cpu()
114 if (!vcpu->arch.pv.stor_base) in kvm_s390_pv_create_cpu()
115 return -ENOMEM; in kvm_s390_pv_create_cpu()
118 uvcb.guest_handle = kvm_s390_pv_get_handle(vcpu->kvm); in kvm_s390_pv_create_cpu()
119 uvcb.num = vcpu->arch.sie_block->icpua; in kvm_s390_pv_create_cpu()
120 uvcb.state_origin = virt_to_phys(vcpu->arch.sie_block); in kvm_s390_pv_create_cpu()
121 uvcb.stor_origin = virt_to_phys((void *)vcpu->arch.pv.stor_base); in kvm_s390_pv_create_cpu()
126 free_pages(vcpu->arch.pv.stor_base, in kvm_s390_pv_create_cpu()
128 return -ENOMEM; in kvm_s390_pv_create_cpu()
130 vcpu->arch.sie_block->sidad = virt_to_phys(sida_addr); in kvm_s390_pv_create_cpu()
135 KVM_UV_EVENT(vcpu->kvm, 3, in kvm_s390_pv_create_cpu()
137 vcpu->vcpu_id, uvcb.cpu_handle, uvcb.header.rc, in kvm_s390_pv_create_cpu()
144 return -EIO; in kvm_s390_pv_create_cpu()
148 vcpu->arch.pv.handle = uvcb.cpu_handle; in kvm_s390_pv_create_cpu()
149 vcpu->arch.sie_block->pv_handle_cpu = uvcb.cpu_handle; in kvm_s390_pv_create_cpu()
150 vcpu->arch.sie_block->pv_handle_config = kvm_s390_pv_get_handle(vcpu->kvm); in kvm_s390_pv_create_cpu()
151 vcpu->arch.sie_block->sdf = 2; in kvm_s390_pv_create_cpu()
157 static void kvm_s390_pv_dealloc_vm(struct kvm *kvm) in kvm_s390_pv_dealloc_vm() argument
159 vfree(kvm->arch.pv.stor_var); in kvm_s390_pv_dealloc_vm()
160 free_pages(kvm->arch.pv.stor_base, in kvm_s390_pv_dealloc_vm()
162 kvm_s390_clear_pv_state(kvm); in kvm_s390_pv_dealloc_vm()
165 static int kvm_s390_pv_alloc_vm(struct kvm *kvm) in kvm_s390_pv_alloc_vm() argument
171 kvm->arch.pv.stor_var = NULL; in kvm_s390_pv_alloc_vm()
172 kvm->arch.pv.stor_base = __get_free_pages(GFP_KERNEL_ACCOUNT, get_order(base)); in kvm_s390_pv_alloc_vm()
173 if (!kvm->arch.pv.stor_base) in kvm_s390_pv_alloc_vm()
174 return -ENOMEM; in kvm_s390_pv_alloc_vm()
182 mutex_lock(&kvm->slots_lock); in kvm_s390_pv_alloc_vm()
183 npages = kvm_s390_get_gfn_end(kvm_memslots(kvm)); in kvm_s390_pv_alloc_vm()
184 mutex_unlock(&kvm->slots_lock); in kvm_s390_pv_alloc_vm()
186 kvm->arch.pv.guest_len = npages * PAGE_SIZE; in kvm_s390_pv_alloc_vm()
191 kvm->arch.pv.stor_var = vzalloc(vlen); in kvm_s390_pv_alloc_vm()
192 if (!kvm->arch.pv.stor_var) in kvm_s390_pv_alloc_vm()
197 kvm_s390_pv_dealloc_vm(kvm); in kvm_s390_pv_alloc_vm()
198 return -ENOMEM; in kvm_s390_pv_alloc_vm()
202 * kvm_s390_pv_dispose_one_leftover - Clean up one leftover protected VM.
203 * @kvm: the KVM that was associated with this leftover protected VM
209 * On success, kvm->mm->context.protected_count will be decremented atomically
214 static int kvm_s390_pv_dispose_one_leftover(struct kvm *kvm, in kvm_s390_pv_dispose_one_leftover() argument
220 /* It used the destroy-fast UVC, nothing left to do here */ in kvm_s390_pv_dispose_one_leftover()
221 if (!leftover->handle) in kvm_s390_pv_dispose_one_leftover()
223 cc = uv_cmd_nodata(leftover->handle, UVC_CMD_DESTROY_SEC_CONF, rc, rrc); in kvm_s390_pv_dispose_one_leftover()
224 KVM_UV_EVENT(kvm, 3, "PROTVIRT DESTROY LEFTOVER VM: rc %x rrc %x", *rc, *rrc); in kvm_s390_pv_dispose_one_leftover()
231 * This can only happen in case of a serious KVM or hardware bug; it in kvm_s390_pv_dispose_one_leftover()
234 free_pages(leftover->stor_base, get_order(uv_info.guest_base_stor_len)); in kvm_s390_pv_dispose_one_leftover()
235 free_pages(leftover->old_gmap_table, CRST_ALLOC_ORDER); in kvm_s390_pv_dispose_one_leftover()
236 vfree(leftover->stor_var); in kvm_s390_pv_dispose_one_leftover()
238 atomic_dec(&kvm->mm->context.protected_count); in kvm_s390_pv_dispose_one_leftover()
243 * kvm_s390_destroy_lower_2g - Destroy the first 2GB of protected guest memory.
244 * @kvm: the VM whose memory is to be cleared.
249 static void kvm_s390_destroy_lower_2g(struct kvm *kvm) in kvm_s390_destroy_lower_2g() argument
256 srcu_idx = srcu_read_lock(&kvm->srcu); in kvm_s390_destroy_lower_2g()
259 slot = gfn_to_memslot(kvm, 0); in kvm_s390_destroy_lower_2g()
261 while (slot && slot->base_gfn < pages_2g) { in kvm_s390_destroy_lower_2g()
262 len = min_t(u64, slot->npages, pages_2g - slot->base_gfn) * PAGE_SIZE; in kvm_s390_destroy_lower_2g()
263 s390_uv_destroy_range(kvm->mm, slot->userspace_addr, slot->userspace_addr + len); in kvm_s390_destroy_lower_2g()
265 slot = gfn_to_memslot(kvm, slot->base_gfn + slot->npages); in kvm_s390_destroy_lower_2g()
268 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvm_s390_destroy_lower_2g()
271 static int kvm_s390_pv_deinit_vm_fast(struct kvm *kvm, u16 *rc, u16 *rrc) in kvm_s390_pv_deinit_vm_fast() argument
276 .handle = kvm_s390_pv_get_handle(kvm), in kvm_s390_pv_deinit_vm_fast()
285 WRITE_ONCE(kvm->arch.gmap->guest_handle, 0); in kvm_s390_pv_deinit_vm_fast()
286 KVM_UV_EVENT(kvm, 3, "PROTVIRT DESTROY VM FAST: rc %x rrc %x", in kvm_s390_pv_deinit_vm_fast()
290 kvm_s390_pv_get_handle(kvm), uvcb.header.rc, uvcb.header.rrc); in kvm_s390_pv_deinit_vm_fast()
293 kvm_s390_pv_dealloc_vm(kvm); in kvm_s390_pv_deinit_vm_fast()
294 return cc ? -EIO : 0; in kvm_s390_pv_deinit_vm_fast()
303 * kvm_s390_pv_set_aside - Set aside a protected VM for later teardown.
304 * @kvm: the VM
309 * to continue immediately as a non-secure VM, and the information needed to
315 * Context: kvm->lock needs to be held
317 * Return: 0 in case of success, -EINVAL if another protected VM was already set
318 * aside, -ENOMEM if the system ran out of memory.
320 int kvm_s390_pv_set_aside(struct kvm *kvm, u16 *rc, u16 *rrc) in kvm_s390_pv_set_aside() argument
325 lockdep_assert_held(&kvm->lock); in kvm_s390_pv_set_aside()
330 if (kvm->arch.pv.set_aside) in kvm_s390_pv_set_aside()
331 return -EINVAL; in kvm_s390_pv_set_aside()
334 if ((kvm->arch.gmap->asce & _ASCE_TYPE_MASK) == _ASCE_TYPE_SEGMENT) in kvm_s390_pv_set_aside()
335 return -EINVAL; in kvm_s390_pv_set_aside()
339 return -ENOMEM; in kvm_s390_pv_set_aside()
342 res = kvm_s390_pv_deinit_vm_fast(kvm, rc, rrc); in kvm_s390_pv_set_aside()
344 priv->stor_var = kvm->arch.pv.stor_var; in kvm_s390_pv_set_aside()
345 priv->stor_base = kvm->arch.pv.stor_base; in kvm_s390_pv_set_aside()
346 priv->handle = kvm_s390_pv_get_handle(kvm); in kvm_s390_pv_set_aside()
347 priv->old_gmap_table = (unsigned long)kvm->arch.gmap->table; in kvm_s390_pv_set_aside()
348 WRITE_ONCE(kvm->arch.gmap->guest_handle, 0); in kvm_s390_pv_set_aside()
349 if (s390_replace_asce(kvm->arch.gmap)) in kvm_s390_pv_set_aside()
350 res = -ENOMEM; in kvm_s390_pv_set_aside()
358 kvm_s390_destroy_lower_2g(kvm); in kvm_s390_pv_set_aside()
359 kvm_s390_clear_pv_state(kvm); in kvm_s390_pv_set_aside()
360 kvm->arch.pv.set_aside = priv; in kvm_s390_pv_set_aside()
368 * kvm_s390_pv_deinit_vm - Deinitialize the current protected VM
369 * @kvm: the KVM whose protected VM needs to be deinitialized
382 * Context: kvm->lock needs to be held
384 * Return: 0 in case of success, otherwise -EIO
386 int kvm_s390_pv_deinit_vm(struct kvm *kvm, u16 *rc, u16 *rrc) in kvm_s390_pv_deinit_vm() argument
390 cc = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm), in kvm_s390_pv_deinit_vm()
392 WRITE_ONCE(kvm->arch.gmap->guest_handle, 0); in kvm_s390_pv_deinit_vm()
394 atomic_dec(&kvm->mm->context.protected_count); in kvm_s390_pv_deinit_vm()
395 kvm_s390_pv_dealloc_vm(kvm); in kvm_s390_pv_deinit_vm()
398 s390_replace_asce(kvm->arch.gmap); in kvm_s390_pv_deinit_vm()
400 KVM_UV_EVENT(kvm, 3, "PROTVIRT DESTROY VM: rc %x rrc %x", *rc, *rrc); in kvm_s390_pv_deinit_vm()
403 return cc ? -EIO : 0; in kvm_s390_pv_deinit_vm()
407 * kvm_s390_pv_deinit_cleanup_all - Clean up all protected VMs associated
408 * with a specific KVM.
409 * @kvm: the KVM to be cleaned up
413 * This function will clean up all protected VMs associated with a KVM.
417 * Context: kvm->lock needs to be held unless being called from
420 * Return: 0 if all VMs are successfully cleaned up, otherwise -EIO
422 int kvm_s390_pv_deinit_cleanup_all(struct kvm *kvm, u16 *rc, u16 *rrc) in kvm_s390_pv_deinit_cleanup_all() argument
433 if (!atomic_inc_not_zero(&kvm->mm->context.protected_count)) in kvm_s390_pv_deinit_cleanup_all()
438 if (kvm_s390_pv_get_handle(kvm)) { in kvm_s390_pv_deinit_cleanup_all()
439 cc = kvm_s390_pv_deinit_vm(kvm, rc, rrc); in kvm_s390_pv_deinit_cleanup_all()
444 if (kvm->arch.pv.set_aside) { in kvm_s390_pv_deinit_cleanup_all()
445 list_add(kvm->arch.pv.set_aside, &kvm->arch.pv.need_cleanup); in kvm_s390_pv_deinit_cleanup_all()
446 kvm->arch.pv.set_aside = NULL; in kvm_s390_pv_deinit_cleanup_all()
450 while (!list_empty(&kvm->arch.pv.need_cleanup)) { in kvm_s390_pv_deinit_cleanup_all()
451 cur = list_first_entry(&kvm->arch.pv.need_cleanup, typeof(*cur), list); in kvm_s390_pv_deinit_cleanup_all()
453 if (kvm_s390_pv_dispose_one_leftover(kvm, cur, &_rc, &_rrc)) { in kvm_s390_pv_deinit_cleanup_all()
465 list_del(&cur->list); in kvm_s390_pv_deinit_cleanup_all()
474 if (need_zap && mmget_not_zero(kvm->mm)) { in kvm_s390_pv_deinit_cleanup_all()
475 s390_uv_destroy_range(kvm->mm, 0, TASK_SIZE); in kvm_s390_pv_deinit_cleanup_all()
476 mmput(kvm->mm); in kvm_s390_pv_deinit_cleanup_all()
480 atomic_dec(&kvm->mm->context.protected_count); in kvm_s390_pv_deinit_cleanup_all()
481 return cc ? -EIO : 0; in kvm_s390_pv_deinit_cleanup_all()
485 * kvm_s390_pv_deinit_aside_vm - Teardown a previously set aside protected VM.
486 * @kvm: the VM previously associated with the protected VM
494 * Context: kvm->lock must not be held.
496 * Return: 0 in case of success, -EINVAL if no protected VM had been
497 * prepared for asynchronous teardowm, -EIO in case of other errors.
499 int kvm_s390_pv_deinit_aside_vm(struct kvm *kvm, u16 *rc, u16 *rrc) in kvm_s390_pv_deinit_aside_vm() argument
504 lockdep_assert_not_held(&kvm->lock); in kvm_s390_pv_deinit_aside_vm()
505 mutex_lock(&kvm->lock); in kvm_s390_pv_deinit_aside_vm()
506 p = kvm->arch.pv.set_aside; in kvm_s390_pv_deinit_aside_vm()
507 kvm->arch.pv.set_aside = NULL; in kvm_s390_pv_deinit_aside_vm()
508 mutex_unlock(&kvm->lock); in kvm_s390_pv_deinit_aside_vm()
510 return -EINVAL; in kvm_s390_pv_deinit_aside_vm()
513 if (s390_uv_destroy_range_interruptible(kvm->mm, 0, TASK_SIZE_MAX)) in kvm_s390_pv_deinit_aside_vm()
515 if (kvm_s390_pv_dispose_one_leftover(kvm, p, rc, rrc)) in kvm_s390_pv_deinit_aside_vm()
516 ret = -EIO; in kvm_s390_pv_deinit_aside_vm()
525 mutex_lock(&kvm->lock); in kvm_s390_pv_deinit_aside_vm()
526 list_add(&p->list, &kvm->arch.pv.need_cleanup); in kvm_s390_pv_deinit_aside_vm()
527 mutex_unlock(&kvm->lock); in kvm_s390_pv_deinit_aside_vm()
538 struct kvm *kvm = container_of(subscription, struct kvm, arch.pv.mmu_notifier); in kvm_s390_pv_mmu_notifier_release() local
545 * When the struct kvm gets deinitialized, this notifier is also in kvm_s390_pv_mmu_notifier_release()
547 * struct kvm is still valid. in kvm_s390_pv_mmu_notifier_release()
549 r = kvm_s390_cpus_from_pv(kvm, &dummy, &dummy); in kvm_s390_pv_mmu_notifier_release()
550 if (!r && is_destroy_fast_available() && kvm_s390_pv_get_handle(kvm)) in kvm_s390_pv_mmu_notifier_release()
551 kvm_s390_pv_deinit_vm_fast(kvm, &dummy, &dummy); in kvm_s390_pv_mmu_notifier_release()
558 int kvm_s390_pv_init_vm(struct kvm *kvm, u16 *rc, u16 *rrc) in kvm_s390_pv_init_vm() argument
567 ret = kvm_s390_pv_alloc_vm(kvm); in kvm_s390_pv_init_vm()
572 uvcb.guest_stor_origin = 0; /* MSO is 0 for KVM */ in kvm_s390_pv_init_vm()
573 uvcb.guest_stor_len = kvm->arch.pv.guest_len; in kvm_s390_pv_init_vm()
574 uvcb.guest_asce = kvm->arch.gmap->asce; in kvm_s390_pv_init_vm()
575 uvcb.guest_sca = virt_to_phys(kvm->arch.sca); in kvm_s390_pv_init_vm()
577 virt_to_phys((void *)kvm->arch.pv.stor_base); in kvm_s390_pv_init_vm()
578 uvcb.conf_virt_stor_origin = (u64)kvm->arch.pv.stor_var; in kvm_s390_pv_init_vm()
579 uvcb.flags.ap_allow_instr = kvm->arch.model.uv_feat_guest.ap; in kvm_s390_pv_init_vm()
580 uvcb.flags.ap_instr_intr = kvm->arch.model.uv_feat_guest.ap_intr; in kvm_s390_pv_init_vm()
585 KVM_UV_EVENT(kvm, 3, "PROTVIRT CREATE VM: handle %llx len %llx rc %x rrc %x flags %04x", in kvm_s390_pv_init_vm()
589 kvm->arch.pv.handle = uvcb.guest_handle; in kvm_s390_pv_init_vm()
591 atomic_inc(&kvm->mm->context.protected_count); in kvm_s390_pv_init_vm()
594 kvm_s390_pv_deinit_vm(kvm, &dummy, &dummy); in kvm_s390_pv_init_vm()
596 atomic_dec(&kvm->mm->context.protected_count); in kvm_s390_pv_init_vm()
597 kvm_s390_pv_dealloc_vm(kvm); in kvm_s390_pv_init_vm()
599 return -EIO; in kvm_s390_pv_init_vm()
601 kvm->arch.gmap->guest_handle = uvcb.guest_handle; in kvm_s390_pv_init_vm()
602 /* Add the notifier only once. No races because we hold kvm->lock */ in kvm_s390_pv_init_vm()
603 if (kvm->arch.pv.mmu_notifier.ops != &kvm_s390_pv_mmu_notifier_ops) { in kvm_s390_pv_init_vm()
604 kvm->arch.pv.mmu_notifier.ops = &kvm_s390_pv_mmu_notifier_ops; in kvm_s390_pv_init_vm()
605 mmu_notifier_register(&kvm->arch.pv.mmu_notifier, kvm->mm); in kvm_s390_pv_init_vm()
610 int kvm_s390_pv_set_sec_parms(struct kvm *kvm, void *hdr, u64 length, u16 *rc, in kvm_s390_pv_set_sec_parms() argument
618 .guest_handle = kvm_s390_pv_get_handle(kvm), in kvm_s390_pv_set_sec_parms()
624 KVM_UV_EVENT(kvm, 3, "PROTVIRT VM SET PARMS: rc %x rrc %x", in kvm_s390_pv_set_sec_parms()
626 return cc ? -EINVAL : 0; in kvm_s390_pv_set_sec_parms()
629 static int unpack_one(struct kvm *kvm, unsigned long addr, u64 tweak, in unpack_one() argument
635 .guest_handle = kvm_s390_pv_get_handle(kvm), in unpack_one()
640 int ret = gmap_make_secure(kvm->arch.gmap, addr, &uvcb); in unpack_one()
645 if (ret && ret != -EAGAIN) in unpack_one()
646 KVM_UV_EVENT(kvm, 3, "PROTVIRT VM UNPACK: failed addr %llx with rc %x rrc %x", in unpack_one()
651 int kvm_s390_pv_unpack(struct kvm *kvm, unsigned long addr, unsigned long size, in kvm_s390_pv_unpack() argument
658 return -EINVAL; in kvm_s390_pv_unpack()
660 KVM_UV_EVENT(kvm, 3, "PROTVIRT VM UNPACK: start addr %lx size %lx", in kvm_s390_pv_unpack()
664 ret = unpack_one(kvm, addr, tweak, offset, rc, rrc); in kvm_s390_pv_unpack()
665 if (ret == -EAGAIN) { in kvm_s390_pv_unpack()
677 KVM_UV_EVENT(kvm, 3, "%s", "PROTVIRT VM UNPACK: successful"); in kvm_s390_pv_unpack()
692 KVM_UV_EVENT(vcpu->kvm, 3, "PROTVIRT SET CPU %d STATE %d rc %x rrc %x", in kvm_s390_pv_set_cpu_state()
693 vcpu->vcpu_id, state, uvcb.header.rc, uvcb.header.rrc); in kvm_s390_pv_set_cpu_state()
695 return -EINVAL; in kvm_s390_pv_set_cpu_state()
704 .cpu_handle = vcpu->arch.pv.handle, in kvm_s390_pv_dump_cpu()
721 * @kvm: pointer to the guest's KVM struct
736 * Context: kvm->lock needs to be held
740 * -ENOMEM if allocating the cache fails
741 * -EINVAL if gaddr is not aligned to 1MB
742 * -EINVAL if buff_user_len is not aligned to uv_info.conf_dump_storage_state_len
743 * -EINVAL if the UV call fails, rc and rrc will be set in this case
744 * -EFAULT if copying the result to buff_user failed
746 int kvm_s390_pv_dump_stor_state(struct kvm *kvm, void __user *buff_user, in kvm_s390_pv_dump_stor_state() argument
752 .config_handle = kvm->arch.pv.handle, in kvm_s390_pv_dump_stor_state()
762 ret = -EINVAL; in kvm_s390_pv_dump_stor_state()
782 ret = -ENOMEM; in kvm_s390_pv_dump_stor_state()
797 ret = -EINVAL; in kvm_s390_pv_dump_stor_state()
803 buff_user_len -= increment_len; in kvm_s390_pv_dump_stor_state()
806 /* KVM Buffer full, time to copy to the process */ in kvm_s390_pv_dump_stor_state()
809 ret = -EFAULT; in kvm_s390_pv_dump_stor_state()
825 KVM_UV_EVENT(kvm, 3, in kvm_s390_pv_dump_stor_state()
838 * @kvm: pointer to the guest's KVM struct
846 * Context: kvm->lock needs to be held
850 * -ENOMEM if allocating the completion buffer fails
851 * -EINVAL if the UV call fails, rc and rrc will be set in this case
852 * -EFAULT if copying the result to buff_user failed
854 int kvm_s390_pv_dump_complete(struct kvm *kvm, void __user *buff_user, in kvm_s390_pv_dump_complete() argument
860 .config_handle = kvm_s390_pv_get_handle(kvm), in kvm_s390_pv_dump_complete()
868 return -ENOMEM; in kvm_s390_pv_dump_complete()
874 KVM_UV_EVENT(kvm, 3, "PROTVIRT DUMP COMPLETE: rc %x rrc %x", in kvm_s390_pv_dump_complete()
883 kvm->arch.pv.dumping = false; in kvm_s390_pv_dump_complete()
884 kvm_s390_vcpu_unblock_all(kvm); in kvm_s390_pv_dump_complete()
887 ret = -EFAULT; in kvm_s390_pv_dump_complete()
890 /* If the UVC returned an error, translate it to -EINVAL */ in kvm_s390_pv_dump_complete()
892 ret = -EINVAL; in kvm_s390_pv_dump_complete()