1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Hosting Protected Virtual Machines 4 * 5 * Copyright IBM Corp. 2019, 2020 6 * Author(s): Janosch Frank <frankja@linux.ibm.com> 7 */ 8 #include <linux/kvm.h> 9 #include <linux/kvm_host.h> 10 #include <linux/minmax.h> 11 #include <linux/pagemap.h> 12 #include <linux/sched/signal.h> 13 #include <asm/gmap.h> 14 #include <asm/uv.h> 15 #include <asm/mman.h> 16 #include <linux/pagewalk.h> 17 #include <linux/sched/mm.h> 18 #include <linux/mmu_notifier.h> 19 #include "kvm-s390.h" 20 21 /** 22 * struct pv_vm_to_be_destroyed - Represents a protected VM that needs to 23 * be destroyed 24 * 25 * @list: list head for the list of leftover VMs 26 * @old_gmap_table: the gmap table of the leftover protected VM 27 * @handle: the handle of the leftover protected VM 28 * @stor_var: pointer to the variable storage of the leftover protected VM 29 * @stor_base: address of the base storage of the leftover protected VM 30 * 31 * Represents a protected VM that is still registered with the Ultravisor, 32 * but which does not correspond any longer to an active KVM VM. It should 33 * be destroyed at some point later, either asynchronously or when the 34 * process terminates. 35 */ 36 struct pv_vm_to_be_destroyed { 37 struct list_head list; 38 unsigned long old_gmap_table; 39 u64 handle; 40 void *stor_var; 41 unsigned long stor_base; 42 }; 43 44 static void kvm_s390_clear_pv_state(struct kvm *kvm) 45 { 46 kvm->arch.pv.handle = 0; 47 kvm->arch.pv.guest_len = 0; 48 kvm->arch.pv.stor_base = 0; 49 kvm->arch.pv.stor_var = NULL; 50 } 51 52 int kvm_s390_pv_destroy_cpu(struct kvm_vcpu *vcpu, u16 *rc, u16 *rrc) 53 { 54 int cc; 55 56 if (!kvm_s390_pv_cpu_get_handle(vcpu)) 57 return 0; 58 59 cc = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu), UVC_CMD_DESTROY_SEC_CPU, rc, rrc); 60 61 KVM_UV_EVENT(vcpu->kvm, 3, "PROTVIRT DESTROY VCPU %d: rc %x rrc %x", 62 vcpu->vcpu_id, *rc, *rrc); 63 WARN_ONCE(cc, "protvirt destroy cpu failed rc %x rrc %x", *rc, *rrc); 64 65 /* Intended memory leak for something that should never happen. */ 66 if (!cc) 67 free_pages(vcpu->arch.pv.stor_base, 68 get_order(uv_info.guest_cpu_stor_len)); 69 70 free_page((unsigned long)sida_addr(vcpu->arch.sie_block)); 71 vcpu->arch.sie_block->pv_handle_cpu = 0; 72 vcpu->arch.sie_block->pv_handle_config = 0; 73 memset(&vcpu->arch.pv, 0, sizeof(vcpu->arch.pv)); 74 vcpu->arch.sie_block->sdf = 0; 75 /* 76 * The sidad field (for sdf == 2) is now the gbea field (for sdf == 0). 77 * Use the reset value of gbea to avoid leaking the kernel pointer of 78 * the just freed sida. 79 */ 80 vcpu->arch.sie_block->gbea = 1; 81 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); 82 83 return cc ? EIO : 0; 84 } 85 86 int kvm_s390_pv_create_cpu(struct kvm_vcpu *vcpu, u16 *rc, u16 *rrc) 87 { 88 struct uv_cb_csc uvcb = { 89 .header.cmd = UVC_CMD_CREATE_SEC_CPU, 90 .header.len = sizeof(uvcb), 91 }; 92 void *sida_addr; 93 int cc; 94 95 if (kvm_s390_pv_cpu_get_handle(vcpu)) 96 return -EINVAL; 97 98 vcpu->arch.pv.stor_base = __get_free_pages(GFP_KERNEL_ACCOUNT, 99 get_order(uv_info.guest_cpu_stor_len)); 100 if (!vcpu->arch.pv.stor_base) 101 return -ENOMEM; 102 103 /* Input */ 104 uvcb.guest_handle = kvm_s390_pv_get_handle(vcpu->kvm); 105 uvcb.num = vcpu->arch.sie_block->icpua; 106 uvcb.state_origin = virt_to_phys(vcpu->arch.sie_block); 107 uvcb.stor_origin = virt_to_phys((void *)vcpu->arch.pv.stor_base); 108 109 /* Alloc Secure Instruction Data Area Designation */ 110 sida_addr = (void *)__get_free_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO); 111 if (!sida_addr) { 112 free_pages(vcpu->arch.pv.stor_base, 113 get_order(uv_info.guest_cpu_stor_len)); 114 return -ENOMEM; 115 } 116 vcpu->arch.sie_block->sidad = virt_to_phys(sida_addr); 117 118 cc = uv_call(0, (u64)&uvcb); 119 *rc = uvcb.header.rc; 120 *rrc = uvcb.header.rrc; 121 KVM_UV_EVENT(vcpu->kvm, 3, 122 "PROTVIRT CREATE VCPU: cpu %d handle %llx rc %x rrc %x", 123 vcpu->vcpu_id, uvcb.cpu_handle, uvcb.header.rc, 124 uvcb.header.rrc); 125 126 if (cc) { 127 u16 dummy; 128 129 kvm_s390_pv_destroy_cpu(vcpu, &dummy, &dummy); 130 return -EIO; 131 } 132 133 /* Output */ 134 vcpu->arch.pv.handle = uvcb.cpu_handle; 135 vcpu->arch.sie_block->pv_handle_cpu = uvcb.cpu_handle; 136 vcpu->arch.sie_block->pv_handle_config = kvm_s390_pv_get_handle(vcpu->kvm); 137 vcpu->arch.sie_block->sdf = 2; 138 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); 139 return 0; 140 } 141 142 /* only free resources when the destroy was successful */ 143 static void kvm_s390_pv_dealloc_vm(struct kvm *kvm) 144 { 145 vfree(kvm->arch.pv.stor_var); 146 free_pages(kvm->arch.pv.stor_base, 147 get_order(uv_info.guest_base_stor_len)); 148 kvm_s390_clear_pv_state(kvm); 149 } 150 151 static int kvm_s390_pv_alloc_vm(struct kvm *kvm) 152 { 153 unsigned long base = uv_info.guest_base_stor_len; 154 unsigned long virt = uv_info.guest_virt_var_stor_len; 155 unsigned long npages = 0, vlen = 0; 156 157 kvm->arch.pv.stor_var = NULL; 158 kvm->arch.pv.stor_base = __get_free_pages(GFP_KERNEL_ACCOUNT, get_order(base)); 159 if (!kvm->arch.pv.stor_base) 160 return -ENOMEM; 161 162 /* 163 * Calculate current guest storage for allocation of the 164 * variable storage, which is based on the length in MB. 165 * 166 * Slots are sorted by GFN 167 */ 168 mutex_lock(&kvm->slots_lock); 169 npages = kvm_s390_get_gfn_end(kvm_memslots(kvm)); 170 mutex_unlock(&kvm->slots_lock); 171 172 kvm->arch.pv.guest_len = npages * PAGE_SIZE; 173 174 /* Allocate variable storage */ 175 vlen = ALIGN(virt * ((npages * PAGE_SIZE) / HPAGE_SIZE), PAGE_SIZE); 176 vlen += uv_info.guest_virt_base_stor_len; 177 kvm->arch.pv.stor_var = vzalloc(vlen); 178 if (!kvm->arch.pv.stor_var) 179 goto out_err; 180 return 0; 181 182 out_err: 183 kvm_s390_pv_dealloc_vm(kvm); 184 return -ENOMEM; 185 } 186 187 /** 188 * kvm_s390_pv_dispose_one_leftover - Clean up one leftover protected VM. 189 * @kvm: the KVM that was associated with this leftover protected VM 190 * @leftover: details about the leftover protected VM that needs a clean up 191 * @rc: the RC code of the Destroy Secure Configuration UVC 192 * @rrc: the RRC code of the Destroy Secure Configuration UVC 193 * 194 * Destroy one leftover protected VM. 195 * On success, kvm->mm->context.protected_count will be decremented atomically 196 * and all other resources used by the VM will be freed. 197 * 198 * Return: 0 in case of success, otherwise 1 199 */ 200 static int kvm_s390_pv_dispose_one_leftover(struct kvm *kvm, 201 struct pv_vm_to_be_destroyed *leftover, 202 u16 *rc, u16 *rrc) 203 { 204 int cc; 205 206 cc = uv_cmd_nodata(leftover->handle, UVC_CMD_DESTROY_SEC_CONF, rc, rrc); 207 KVM_UV_EVENT(kvm, 3, "PROTVIRT DESTROY LEFTOVER VM: rc %x rrc %x", *rc, *rrc); 208 WARN_ONCE(cc, "protvirt destroy leftover vm failed rc %x rrc %x", *rc, *rrc); 209 if (cc) 210 return cc; 211 /* 212 * Intentionally leak unusable memory. If the UVC fails, the memory 213 * used for the VM and its metadata is permanently unusable. 214 * This can only happen in case of a serious KVM or hardware bug; it 215 * is not expected to happen in normal operation. 216 */ 217 free_pages(leftover->stor_base, get_order(uv_info.guest_base_stor_len)); 218 free_pages(leftover->old_gmap_table, CRST_ALLOC_ORDER); 219 vfree(leftover->stor_var); 220 atomic_dec(&kvm->mm->context.protected_count); 221 return 0; 222 } 223 224 /** 225 * kvm_s390_destroy_lower_2g - Destroy the first 2GB of protected guest memory. 226 * @kvm: the VM whose memory is to be cleared. 227 * 228 * Destroy the first 2GB of guest memory, to avoid prefix issues after reboot. 229 * The CPUs of the protected VM need to be destroyed beforehand. 230 */ 231 static void kvm_s390_destroy_lower_2g(struct kvm *kvm) 232 { 233 const unsigned long pages_2g = SZ_2G / PAGE_SIZE; 234 struct kvm_memory_slot *slot; 235 unsigned long len; 236 int srcu_idx; 237 238 srcu_idx = srcu_read_lock(&kvm->srcu); 239 240 /* Take the memslot containing guest absolute address 0 */ 241 slot = gfn_to_memslot(kvm, 0); 242 /* Clear all slots or parts thereof that are below 2GB */ 243 while (slot && slot->base_gfn < pages_2g) { 244 len = min_t(u64, slot->npages, pages_2g - slot->base_gfn) * PAGE_SIZE; 245 s390_uv_destroy_range(kvm->mm, slot->userspace_addr, slot->userspace_addr + len); 246 /* Take the next memslot */ 247 slot = gfn_to_memslot(kvm, slot->base_gfn + slot->npages); 248 } 249 250 srcu_read_unlock(&kvm->srcu, srcu_idx); 251 } 252 253 /** 254 * kvm_s390_pv_set_aside - Set aside a protected VM for later teardown. 255 * @kvm: the VM 256 * @rc: return value for the RC field of the UVCB 257 * @rrc: return value for the RRC field of the UVCB 258 * 259 * Set aside the protected VM for a subsequent teardown. The VM will be able 260 * to continue immediately as a non-secure VM, and the information needed to 261 * properly tear down the protected VM is set aside. If another protected VM 262 * was already set aside without starting its teardown, this function will 263 * fail. 264 * The CPUs of the protected VM need to be destroyed beforehand. 265 * 266 * Context: kvm->lock needs to be held 267 * 268 * Return: 0 in case of success, -EINVAL if another protected VM was already set 269 * aside, -ENOMEM if the system ran out of memory. 270 */ 271 int kvm_s390_pv_set_aside(struct kvm *kvm, u16 *rc, u16 *rrc) 272 { 273 struct pv_vm_to_be_destroyed *priv; 274 275 lockdep_assert_held(&kvm->lock); 276 /* 277 * If another protected VM was already prepared for teardown, refuse. 278 * A normal deinitialization has to be performed instead. 279 */ 280 if (kvm->arch.pv.set_aside) 281 return -EINVAL; 282 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 283 if (!priv) 284 return -ENOMEM; 285 286 priv->stor_var = kvm->arch.pv.stor_var; 287 priv->stor_base = kvm->arch.pv.stor_base; 288 priv->handle = kvm_s390_pv_get_handle(kvm); 289 priv->old_gmap_table = (unsigned long)kvm->arch.gmap->table; 290 WRITE_ONCE(kvm->arch.gmap->guest_handle, 0); 291 if (s390_replace_asce(kvm->arch.gmap)) { 292 kfree(priv); 293 return -ENOMEM; 294 } 295 296 kvm_s390_destroy_lower_2g(kvm); 297 kvm_s390_clear_pv_state(kvm); 298 kvm->arch.pv.set_aside = priv; 299 300 *rc = UVC_RC_EXECUTED; 301 *rrc = 42; 302 return 0; 303 } 304 305 /** 306 * kvm_s390_pv_deinit_vm - Deinitialize the current protected VM 307 * @kvm: the KVM whose protected VM needs to be deinitialized 308 * @rc: the RC code of the UVC 309 * @rrc: the RRC code of the UVC 310 * 311 * Deinitialize the current protected VM. This function will destroy and 312 * cleanup the current protected VM, but it will not cleanup the guest 313 * memory. This function should only be called when the protected VM has 314 * just been created and therefore does not have any guest memory, or when 315 * the caller cleans up the guest memory separately. 316 * 317 * This function should not fail, but if it does, the donated memory must 318 * not be freed. 319 * 320 * Context: kvm->lock needs to be held 321 * 322 * Return: 0 in case of success, otherwise -EIO 323 */ 324 int kvm_s390_pv_deinit_vm(struct kvm *kvm, u16 *rc, u16 *rrc) 325 { 326 int cc; 327 328 cc = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm), 329 UVC_CMD_DESTROY_SEC_CONF, rc, rrc); 330 WRITE_ONCE(kvm->arch.gmap->guest_handle, 0); 331 if (!cc) { 332 atomic_dec(&kvm->mm->context.protected_count); 333 kvm_s390_pv_dealloc_vm(kvm); 334 } else { 335 /* Intended memory leak on "impossible" error */ 336 s390_replace_asce(kvm->arch.gmap); 337 } 338 KVM_UV_EVENT(kvm, 3, "PROTVIRT DESTROY VM: rc %x rrc %x", *rc, *rrc); 339 WARN_ONCE(cc, "protvirt destroy vm failed rc %x rrc %x", *rc, *rrc); 340 341 return cc ? -EIO : 0; 342 } 343 344 /** 345 * kvm_s390_pv_deinit_cleanup_all - Clean up all protected VMs associated 346 * with a specific KVM. 347 * @kvm: the KVM to be cleaned up 348 * @rc: the RC code of the first failing UVC 349 * @rrc: the RRC code of the first failing UVC 350 * 351 * This function will clean up all protected VMs associated with a KVM. 352 * This includes the active one, the one prepared for deinitialization with 353 * kvm_s390_pv_set_aside, and any still pending in the need_cleanup list. 354 * 355 * Context: kvm->lock needs to be held unless being called from 356 * kvm_arch_destroy_vm. 357 * 358 * Return: 0 if all VMs are successfully cleaned up, otherwise -EIO 359 */ 360 int kvm_s390_pv_deinit_cleanup_all(struct kvm *kvm, u16 *rc, u16 *rrc) 361 { 362 struct pv_vm_to_be_destroyed *cur; 363 bool need_zap = false; 364 u16 _rc, _rrc; 365 int cc = 0; 366 367 /* Make sure the counter does not reach 0 before calling s390_uv_destroy_range */ 368 atomic_inc(&kvm->mm->context.protected_count); 369 370 *rc = 1; 371 /* If the current VM is protected, destroy it */ 372 if (kvm_s390_pv_get_handle(kvm)) { 373 cc = kvm_s390_pv_deinit_vm(kvm, rc, rrc); 374 need_zap = true; 375 } 376 377 /* If a previous protected VM was set aside, put it in the need_cleanup list */ 378 if (kvm->arch.pv.set_aside) { 379 list_add(kvm->arch.pv.set_aside, &kvm->arch.pv.need_cleanup); 380 kvm->arch.pv.set_aside = NULL; 381 } 382 383 /* Cleanup all protected VMs in the need_cleanup list */ 384 while (!list_empty(&kvm->arch.pv.need_cleanup)) { 385 cur = list_first_entry(&kvm->arch.pv.need_cleanup, typeof(*cur), list); 386 need_zap = true; 387 if (kvm_s390_pv_dispose_one_leftover(kvm, cur, &_rc, &_rrc)) { 388 cc = 1; 389 /* 390 * Only return the first error rc and rrc, so make 391 * sure it is not overwritten. All destroys will 392 * additionally be reported via KVM_UV_EVENT(). 393 */ 394 if (*rc == UVC_RC_EXECUTED) { 395 *rc = _rc; 396 *rrc = _rrc; 397 } 398 } 399 list_del(&cur->list); 400 kfree(cur); 401 } 402 403 /* 404 * If the mm still has a mapping, try to mark all its pages as 405 * accessible. The counter should not reach zero before this 406 * cleanup has been performed. 407 */ 408 if (need_zap && mmget_not_zero(kvm->mm)) { 409 s390_uv_destroy_range(kvm->mm, 0, TASK_SIZE); 410 mmput(kvm->mm); 411 } 412 413 /* Now the counter can safely reach 0 */ 414 atomic_dec(&kvm->mm->context.protected_count); 415 return cc ? -EIO : 0; 416 } 417 418 /** 419 * kvm_s390_pv_deinit_aside_vm - Teardown a previously set aside protected VM. 420 * @kvm: the VM previously associated with the protected VM 421 * @rc: return value for the RC field of the UVCB 422 * @rrc: return value for the RRC field of the UVCB 423 * 424 * Tear down the protected VM that had been previously prepared for teardown 425 * using kvm_s390_pv_set_aside_vm. Ideally this should be called by 426 * userspace asynchronously from a separate thread. 427 * 428 * Context: kvm->lock must not be held. 429 * 430 * Return: 0 in case of success, -EINVAL if no protected VM had been 431 * prepared for asynchronous teardowm, -EIO in case of other errors. 432 */ 433 int kvm_s390_pv_deinit_aside_vm(struct kvm *kvm, u16 *rc, u16 *rrc) 434 { 435 struct pv_vm_to_be_destroyed *p; 436 int ret = 0; 437 438 lockdep_assert_not_held(&kvm->lock); 439 mutex_lock(&kvm->lock); 440 p = kvm->arch.pv.set_aside; 441 kvm->arch.pv.set_aside = NULL; 442 mutex_unlock(&kvm->lock); 443 if (!p) 444 return -EINVAL; 445 446 /* When a fatal signal is received, stop immediately */ 447 if (s390_uv_destroy_range_interruptible(kvm->mm, 0, TASK_SIZE_MAX)) 448 goto done; 449 if (kvm_s390_pv_dispose_one_leftover(kvm, p, rc, rrc)) 450 ret = -EIO; 451 kfree(p); 452 p = NULL; 453 done: 454 /* 455 * p is not NULL if we aborted because of a fatal signal, in which 456 * case queue the leftover for later cleanup. 457 */ 458 if (p) { 459 mutex_lock(&kvm->lock); 460 list_add(&p->list, &kvm->arch.pv.need_cleanup); 461 mutex_unlock(&kvm->lock); 462 /* Did not finish, but pretend things went well */ 463 *rc = UVC_RC_EXECUTED; 464 *rrc = 42; 465 } 466 return ret; 467 } 468 469 static void kvm_s390_pv_mmu_notifier_release(struct mmu_notifier *subscription, 470 struct mm_struct *mm) 471 { 472 struct kvm *kvm = container_of(subscription, struct kvm, arch.pv.mmu_notifier); 473 u16 dummy; 474 475 /* 476 * No locking is needed since this is the last thread of the last user of this 477 * struct mm. 478 * When the struct kvm gets deinitialized, this notifier is also 479 * unregistered. This means that if this notifier runs, then the 480 * struct kvm is still valid. 481 */ 482 kvm_s390_cpus_from_pv(kvm, &dummy, &dummy); 483 } 484 485 static const struct mmu_notifier_ops kvm_s390_pv_mmu_notifier_ops = { 486 .release = kvm_s390_pv_mmu_notifier_release, 487 }; 488 489 int kvm_s390_pv_init_vm(struct kvm *kvm, u16 *rc, u16 *rrc) 490 { 491 struct uv_cb_cgc uvcb = { 492 .header.cmd = UVC_CMD_CREATE_SEC_CONF, 493 .header.len = sizeof(uvcb) 494 }; 495 int cc, ret; 496 u16 dummy; 497 498 ret = kvm_s390_pv_alloc_vm(kvm); 499 if (ret) 500 return ret; 501 502 /* Inputs */ 503 uvcb.guest_stor_origin = 0; /* MSO is 0 for KVM */ 504 uvcb.guest_stor_len = kvm->arch.pv.guest_len; 505 uvcb.guest_asce = kvm->arch.gmap->asce; 506 uvcb.guest_sca = virt_to_phys(kvm->arch.sca); 507 uvcb.conf_base_stor_origin = 508 virt_to_phys((void *)kvm->arch.pv.stor_base); 509 uvcb.conf_virt_stor_origin = (u64)kvm->arch.pv.stor_var; 510 511 cc = uv_call_sched(0, (u64)&uvcb); 512 *rc = uvcb.header.rc; 513 *rrc = uvcb.header.rrc; 514 KVM_UV_EVENT(kvm, 3, "PROTVIRT CREATE VM: handle %llx len %llx rc %x rrc %x", 515 uvcb.guest_handle, uvcb.guest_stor_len, *rc, *rrc); 516 517 /* Outputs */ 518 kvm->arch.pv.handle = uvcb.guest_handle; 519 520 atomic_inc(&kvm->mm->context.protected_count); 521 if (cc) { 522 if (uvcb.header.rc & UVC_RC_NEED_DESTROY) { 523 kvm_s390_pv_deinit_vm(kvm, &dummy, &dummy); 524 } else { 525 atomic_dec(&kvm->mm->context.protected_count); 526 kvm_s390_pv_dealloc_vm(kvm); 527 } 528 return -EIO; 529 } 530 kvm->arch.gmap->guest_handle = uvcb.guest_handle; 531 /* Add the notifier only once. No races because we hold kvm->lock */ 532 if (kvm->arch.pv.mmu_notifier.ops != &kvm_s390_pv_mmu_notifier_ops) { 533 kvm->arch.pv.mmu_notifier.ops = &kvm_s390_pv_mmu_notifier_ops; 534 mmu_notifier_register(&kvm->arch.pv.mmu_notifier, kvm->mm); 535 } 536 return 0; 537 } 538 539 int kvm_s390_pv_set_sec_parms(struct kvm *kvm, void *hdr, u64 length, u16 *rc, 540 u16 *rrc) 541 { 542 struct uv_cb_ssc uvcb = { 543 .header.cmd = UVC_CMD_SET_SEC_CONF_PARAMS, 544 .header.len = sizeof(uvcb), 545 .sec_header_origin = (u64)hdr, 546 .sec_header_len = length, 547 .guest_handle = kvm_s390_pv_get_handle(kvm), 548 }; 549 int cc = uv_call(0, (u64)&uvcb); 550 551 *rc = uvcb.header.rc; 552 *rrc = uvcb.header.rrc; 553 KVM_UV_EVENT(kvm, 3, "PROTVIRT VM SET PARMS: rc %x rrc %x", 554 *rc, *rrc); 555 return cc ? -EINVAL : 0; 556 } 557 558 static int unpack_one(struct kvm *kvm, unsigned long addr, u64 tweak, 559 u64 offset, u16 *rc, u16 *rrc) 560 { 561 struct uv_cb_unp uvcb = { 562 .header.cmd = UVC_CMD_UNPACK_IMG, 563 .header.len = sizeof(uvcb), 564 .guest_handle = kvm_s390_pv_get_handle(kvm), 565 .gaddr = addr, 566 .tweak[0] = tweak, 567 .tweak[1] = offset, 568 }; 569 int ret = gmap_make_secure(kvm->arch.gmap, addr, &uvcb); 570 571 *rc = uvcb.header.rc; 572 *rrc = uvcb.header.rrc; 573 574 if (ret && ret != -EAGAIN) 575 KVM_UV_EVENT(kvm, 3, "PROTVIRT VM UNPACK: failed addr %llx with rc %x rrc %x", 576 uvcb.gaddr, *rc, *rrc); 577 return ret; 578 } 579 580 int kvm_s390_pv_unpack(struct kvm *kvm, unsigned long addr, unsigned long size, 581 unsigned long tweak, u16 *rc, u16 *rrc) 582 { 583 u64 offset = 0; 584 int ret = 0; 585 586 if (addr & ~PAGE_MASK || !size || size & ~PAGE_MASK) 587 return -EINVAL; 588 589 KVM_UV_EVENT(kvm, 3, "PROTVIRT VM UNPACK: start addr %lx size %lx", 590 addr, size); 591 592 while (offset < size) { 593 ret = unpack_one(kvm, addr, tweak, offset, rc, rrc); 594 if (ret == -EAGAIN) { 595 cond_resched(); 596 if (fatal_signal_pending(current)) 597 break; 598 continue; 599 } 600 if (ret) 601 break; 602 addr += PAGE_SIZE; 603 offset += PAGE_SIZE; 604 } 605 if (!ret) 606 KVM_UV_EVENT(kvm, 3, "%s", "PROTVIRT VM UNPACK: successful"); 607 return ret; 608 } 609 610 int kvm_s390_pv_set_cpu_state(struct kvm_vcpu *vcpu, u8 state) 611 { 612 struct uv_cb_cpu_set_state uvcb = { 613 .header.cmd = UVC_CMD_CPU_SET_STATE, 614 .header.len = sizeof(uvcb), 615 .cpu_handle = kvm_s390_pv_cpu_get_handle(vcpu), 616 .state = state, 617 }; 618 int cc; 619 620 cc = uv_call(0, (u64)&uvcb); 621 KVM_UV_EVENT(vcpu->kvm, 3, "PROTVIRT SET CPU %d STATE %d rc %x rrc %x", 622 vcpu->vcpu_id, state, uvcb.header.rc, uvcb.header.rrc); 623 if (cc) 624 return -EINVAL; 625 return 0; 626 } 627 628 int kvm_s390_pv_dump_cpu(struct kvm_vcpu *vcpu, void *buff, u16 *rc, u16 *rrc) 629 { 630 struct uv_cb_dump_cpu uvcb = { 631 .header.cmd = UVC_CMD_DUMP_CPU, 632 .header.len = sizeof(uvcb), 633 .cpu_handle = vcpu->arch.pv.handle, 634 .dump_area_origin = (u64)buff, 635 }; 636 int cc; 637 638 cc = uv_call_sched(0, (u64)&uvcb); 639 *rc = uvcb.header.rc; 640 *rrc = uvcb.header.rrc; 641 return cc; 642 } 643 644 /* Size of the cache for the storage state dump data. 1MB for now */ 645 #define DUMP_BUFF_LEN HPAGE_SIZE 646 647 /** 648 * kvm_s390_pv_dump_stor_state 649 * 650 * @kvm: pointer to the guest's KVM struct 651 * @buff_user: Userspace pointer where we will write the results to 652 * @gaddr: Starting absolute guest address for which the storage state 653 * is requested. 654 * @buff_user_len: Length of the buff_user buffer 655 * @rc: Pointer to where the uvcb return code is stored 656 * @rrc: Pointer to where the uvcb return reason code is stored 657 * 658 * Stores buff_len bytes of tweak component values to buff_user 659 * starting with the 1MB block specified by the absolute guest address 660 * (gaddr). The gaddr pointer will be updated with the last address 661 * for which data was written when returning to userspace. buff_user 662 * might be written to even if an error rc is returned. For instance 663 * if we encounter a fault after writing the first page of data. 664 * 665 * Context: kvm->lock needs to be held 666 * 667 * Return: 668 * 0 on success 669 * -ENOMEM if allocating the cache fails 670 * -EINVAL if gaddr is not aligned to 1MB 671 * -EINVAL if buff_user_len is not aligned to uv_info.conf_dump_storage_state_len 672 * -EINVAL if the UV call fails, rc and rrc will be set in this case 673 * -EFAULT if copying the result to buff_user failed 674 */ 675 int kvm_s390_pv_dump_stor_state(struct kvm *kvm, void __user *buff_user, 676 u64 *gaddr, u64 buff_user_len, u16 *rc, u16 *rrc) 677 { 678 struct uv_cb_dump_stor_state uvcb = { 679 .header.cmd = UVC_CMD_DUMP_CONF_STOR_STATE, 680 .header.len = sizeof(uvcb), 681 .config_handle = kvm->arch.pv.handle, 682 .gaddr = *gaddr, 683 .dump_area_origin = 0, 684 }; 685 const u64 increment_len = uv_info.conf_dump_storage_state_len; 686 size_t buff_kvm_size; 687 size_t size_done = 0; 688 u8 *buff_kvm = NULL; 689 int cc, ret; 690 691 ret = -EINVAL; 692 /* UV call processes 1MB guest storage chunks at a time */ 693 if (!IS_ALIGNED(*gaddr, HPAGE_SIZE)) 694 goto out; 695 696 /* 697 * We provide the storage state for 1MB chunks of guest 698 * storage. The buffer will need to be aligned to 699 * conf_dump_storage_state_len so we don't end on a partial 700 * chunk. 701 */ 702 if (!buff_user_len || 703 !IS_ALIGNED(buff_user_len, increment_len)) 704 goto out; 705 706 /* 707 * Allocate a buffer from which we will later copy to the user 708 * process. We don't want userspace to dictate our buffer size 709 * so we limit it to DUMP_BUFF_LEN. 710 */ 711 ret = -ENOMEM; 712 buff_kvm_size = min_t(u64, buff_user_len, DUMP_BUFF_LEN); 713 buff_kvm = vzalloc(buff_kvm_size); 714 if (!buff_kvm) 715 goto out; 716 717 ret = 0; 718 uvcb.dump_area_origin = (u64)buff_kvm; 719 /* We will loop until the user buffer is filled or an error occurs */ 720 do { 721 /* Get 1MB worth of guest storage state data */ 722 cc = uv_call_sched(0, (u64)&uvcb); 723 724 /* All or nothing */ 725 if (cc) { 726 ret = -EINVAL; 727 break; 728 } 729 730 size_done += increment_len; 731 uvcb.dump_area_origin += increment_len; 732 buff_user_len -= increment_len; 733 uvcb.gaddr += HPAGE_SIZE; 734 735 /* KVM Buffer full, time to copy to the process */ 736 if (!buff_user_len || size_done == DUMP_BUFF_LEN) { 737 if (copy_to_user(buff_user, buff_kvm, size_done)) { 738 ret = -EFAULT; 739 break; 740 } 741 742 buff_user += size_done; 743 size_done = 0; 744 uvcb.dump_area_origin = (u64)buff_kvm; 745 } 746 } while (buff_user_len); 747 748 /* Report back where we ended dumping */ 749 *gaddr = uvcb.gaddr; 750 751 /* Lets only log errors, we don't want to spam */ 752 out: 753 if (ret) 754 KVM_UV_EVENT(kvm, 3, 755 "PROTVIRT DUMP STORAGE STATE: addr %llx ret %d, uvcb rc %x rrc %x", 756 uvcb.gaddr, ret, uvcb.header.rc, uvcb.header.rrc); 757 *rc = uvcb.header.rc; 758 *rrc = uvcb.header.rrc; 759 vfree(buff_kvm); 760 761 return ret; 762 } 763 764 /** 765 * kvm_s390_pv_dump_complete 766 * 767 * @kvm: pointer to the guest's KVM struct 768 * @buff_user: Userspace pointer where we will write the results to 769 * @rc: Pointer to where the uvcb return code is stored 770 * @rrc: Pointer to where the uvcb return reason code is stored 771 * 772 * Completes the dumping operation and writes the completion data to 773 * user space. 774 * 775 * Context: kvm->lock needs to be held 776 * 777 * Return: 778 * 0 on success 779 * -ENOMEM if allocating the completion buffer fails 780 * -EINVAL if the UV call fails, rc and rrc will be set in this case 781 * -EFAULT if copying the result to buff_user failed 782 */ 783 int kvm_s390_pv_dump_complete(struct kvm *kvm, void __user *buff_user, 784 u16 *rc, u16 *rrc) 785 { 786 struct uv_cb_dump_complete complete = { 787 .header.len = sizeof(complete), 788 .header.cmd = UVC_CMD_DUMP_COMPLETE, 789 .config_handle = kvm_s390_pv_get_handle(kvm), 790 }; 791 u64 *compl_data; 792 int ret; 793 794 /* Allocate dump area */ 795 compl_data = vzalloc(uv_info.conf_dump_finalize_len); 796 if (!compl_data) 797 return -ENOMEM; 798 complete.dump_area_origin = (u64)compl_data; 799 800 ret = uv_call_sched(0, (u64)&complete); 801 *rc = complete.header.rc; 802 *rrc = complete.header.rrc; 803 KVM_UV_EVENT(kvm, 3, "PROTVIRT DUMP COMPLETE: rc %x rrc %x", 804 complete.header.rc, complete.header.rrc); 805 806 if (!ret) { 807 /* 808 * kvm_s390_pv_dealloc_vm() will also (mem)set 809 * this to false on a reboot or other destroy 810 * operation for this vm. 811 */ 812 kvm->arch.pv.dumping = false; 813 kvm_s390_vcpu_unblock_all(kvm); 814 ret = copy_to_user(buff_user, compl_data, uv_info.conf_dump_finalize_len); 815 if (ret) 816 ret = -EFAULT; 817 } 818 vfree(compl_data); 819 /* If the UVC returned an error, translate it to -EINVAL */ 820 if (ret > 0) 821 ret = -EINVAL; 822 return ret; 823 } 824