1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Kernel-based Virtual Machine driver for Linux 4 * 5 * AMD SVM-SEV support 6 * 7 * Copyright 2010 Red Hat, Inc. and/or its affiliates. 8 */ 9 10 #include <linux/kvm_types.h> 11 #include <linux/kvm_host.h> 12 #include <linux/kernel.h> 13 #include <linux/highmem.h> 14 #include <linux/psp-sev.h> 15 #include <linux/pagemap.h> 16 #include <linux/swap.h> 17 #include <linux/misc_cgroup.h> 18 #include <linux/processor.h> 19 #include <linux/trace_events.h> 20 #include <asm/fpu/internal.h> 21 22 #include <asm/trapnr.h> 23 24 #include "x86.h" 25 #include "svm.h" 26 #include "svm_ops.h" 27 #include "cpuid.h" 28 #include "trace.h" 29 30 #define __ex(x) __kvm_handle_fault_on_reboot(x) 31 32 #ifndef CONFIG_KVM_AMD_SEV 33 /* 34 * When this config is not defined, SEV feature is not supported and APIs in 35 * this file are not used but this file still gets compiled into the KVM AMD 36 * module. 37 * 38 * We will not have MISC_CG_RES_SEV and MISC_CG_RES_SEV_ES entries in the enum 39 * misc_res_type {} defined in linux/misc_cgroup.h. 40 * 41 * Below macros allow compilation to succeed. 42 */ 43 #define MISC_CG_RES_SEV MISC_CG_RES_TYPES 44 #define MISC_CG_RES_SEV_ES MISC_CG_RES_TYPES 45 #endif 46 47 #ifdef CONFIG_KVM_AMD_SEV 48 /* enable/disable SEV support */ 49 static bool sev_enabled = true; 50 module_param_named(sev, sev_enabled, bool, 0444); 51 52 /* enable/disable SEV-ES support */ 53 static bool sev_es_enabled = true; 54 module_param_named(sev_es, sev_es_enabled, bool, 0444); 55 #else 56 #define sev_enabled false 57 #define sev_es_enabled false 58 #endif /* CONFIG_KVM_AMD_SEV */ 59 60 static u8 sev_enc_bit; 61 static DECLARE_RWSEM(sev_deactivate_lock); 62 static DEFINE_MUTEX(sev_bitmap_lock); 63 unsigned int max_sev_asid; 64 static unsigned int min_sev_asid; 65 static unsigned long sev_me_mask; 66 static unsigned long *sev_asid_bitmap; 67 static unsigned long *sev_reclaim_asid_bitmap; 68 69 struct enc_region { 70 struct list_head list; 71 unsigned long npages; 72 struct page **pages; 73 unsigned long uaddr; 74 unsigned long size; 75 }; 76 77 /* Called with the sev_bitmap_lock held, or on shutdown */ 78 static int sev_flush_asids(int min_asid, int max_asid) 79 { 80 int ret, pos, error = 0; 81 82 /* Check if there are any ASIDs to reclaim before performing a flush */ 83 pos = find_next_bit(sev_reclaim_asid_bitmap, max_asid, min_asid); 84 if (pos >= max_asid) 85 return -EBUSY; 86 87 /* 88 * DEACTIVATE will clear the WBINVD indicator causing DF_FLUSH to fail, 89 * so it must be guarded. 90 */ 91 down_write(&sev_deactivate_lock); 92 93 wbinvd_on_all_cpus(); 94 ret = sev_guest_df_flush(&error); 95 96 up_write(&sev_deactivate_lock); 97 98 if (ret) 99 pr_err("SEV: DF_FLUSH failed, ret=%d, error=%#x\n", ret, error); 100 101 return ret; 102 } 103 104 static inline bool is_mirroring_enc_context(struct kvm *kvm) 105 { 106 return !!to_kvm_svm(kvm)->sev_info.enc_context_owner; 107 } 108 109 /* Must be called with the sev_bitmap_lock held */ 110 static bool __sev_recycle_asids(int min_asid, int max_asid) 111 { 112 if (sev_flush_asids(min_asid, max_asid)) 113 return false; 114 115 /* The flush process will flush all reclaimable SEV and SEV-ES ASIDs */ 116 bitmap_xor(sev_asid_bitmap, sev_asid_bitmap, sev_reclaim_asid_bitmap, 117 max_sev_asid); 118 bitmap_zero(sev_reclaim_asid_bitmap, max_sev_asid); 119 120 return true; 121 } 122 123 static int sev_asid_new(struct kvm_sev_info *sev) 124 { 125 int pos, min_asid, max_asid, ret; 126 bool retry = true; 127 enum misc_res_type type; 128 129 type = sev->es_active ? MISC_CG_RES_SEV_ES : MISC_CG_RES_SEV; 130 WARN_ON(sev->misc_cg); 131 sev->misc_cg = get_current_misc_cg(); 132 ret = misc_cg_try_charge(type, sev->misc_cg, 1); 133 if (ret) { 134 put_misc_cg(sev->misc_cg); 135 sev->misc_cg = NULL; 136 return ret; 137 } 138 139 mutex_lock(&sev_bitmap_lock); 140 141 /* 142 * SEV-enabled guests must use asid from min_sev_asid to max_sev_asid. 143 * SEV-ES-enabled guest can use from 1 to min_sev_asid - 1. 144 */ 145 min_asid = sev->es_active ? 0 : min_sev_asid - 1; 146 max_asid = sev->es_active ? min_sev_asid - 1 : max_sev_asid; 147 again: 148 pos = find_next_zero_bit(sev_asid_bitmap, max_sev_asid, min_asid); 149 if (pos >= max_asid) { 150 if (retry && __sev_recycle_asids(min_asid, max_asid)) { 151 retry = false; 152 goto again; 153 } 154 mutex_unlock(&sev_bitmap_lock); 155 ret = -EBUSY; 156 goto e_uncharge; 157 } 158 159 __set_bit(pos, sev_asid_bitmap); 160 161 mutex_unlock(&sev_bitmap_lock); 162 163 return pos + 1; 164 e_uncharge: 165 misc_cg_uncharge(type, sev->misc_cg, 1); 166 put_misc_cg(sev->misc_cg); 167 sev->misc_cg = NULL; 168 return ret; 169 } 170 171 static int sev_get_asid(struct kvm *kvm) 172 { 173 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; 174 175 return sev->asid; 176 } 177 178 static void sev_asid_free(struct kvm_sev_info *sev) 179 { 180 struct svm_cpu_data *sd; 181 int cpu, pos; 182 enum misc_res_type type; 183 184 mutex_lock(&sev_bitmap_lock); 185 186 pos = sev->asid - 1; 187 __set_bit(pos, sev_reclaim_asid_bitmap); 188 189 for_each_possible_cpu(cpu) { 190 sd = per_cpu(svm_data, cpu); 191 sd->sev_vmcbs[pos] = NULL; 192 } 193 194 mutex_unlock(&sev_bitmap_lock); 195 196 type = sev->es_active ? MISC_CG_RES_SEV_ES : MISC_CG_RES_SEV; 197 misc_cg_uncharge(type, sev->misc_cg, 1); 198 put_misc_cg(sev->misc_cg); 199 sev->misc_cg = NULL; 200 } 201 202 static void sev_unbind_asid(struct kvm *kvm, unsigned int handle) 203 { 204 struct sev_data_decommission decommission; 205 struct sev_data_deactivate deactivate; 206 207 if (!handle) 208 return; 209 210 deactivate.handle = handle; 211 212 /* Guard DEACTIVATE against WBINVD/DF_FLUSH used in ASID recycling */ 213 down_read(&sev_deactivate_lock); 214 sev_guest_deactivate(&deactivate, NULL); 215 up_read(&sev_deactivate_lock); 216 217 /* decommission handle */ 218 decommission.handle = handle; 219 sev_guest_decommission(&decommission, NULL); 220 } 221 222 static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp) 223 { 224 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; 225 bool es_active = argp->id == KVM_SEV_ES_INIT; 226 int asid, ret; 227 228 if (kvm->created_vcpus) 229 return -EINVAL; 230 231 ret = -EBUSY; 232 if (unlikely(sev->active)) 233 return ret; 234 235 sev->es_active = es_active; 236 asid = sev_asid_new(sev); 237 if (asid < 0) 238 goto e_no_asid; 239 sev->asid = asid; 240 241 ret = sev_platform_init(&argp->error); 242 if (ret) 243 goto e_free; 244 245 sev->active = true; 246 sev->asid = asid; 247 INIT_LIST_HEAD(&sev->regions_list); 248 249 return 0; 250 251 e_free: 252 sev_asid_free(sev); 253 sev->asid = 0; 254 e_no_asid: 255 sev->es_active = false; 256 return ret; 257 } 258 259 static int sev_bind_asid(struct kvm *kvm, unsigned int handle, int *error) 260 { 261 struct sev_data_activate activate; 262 int asid = sev_get_asid(kvm); 263 int ret; 264 265 /* activate ASID on the given handle */ 266 activate.handle = handle; 267 activate.asid = asid; 268 ret = sev_guest_activate(&activate, error); 269 270 return ret; 271 } 272 273 static int __sev_issue_cmd(int fd, int id, void *data, int *error) 274 { 275 struct fd f; 276 int ret; 277 278 f = fdget(fd); 279 if (!f.file) 280 return -EBADF; 281 282 ret = sev_issue_cmd_external_user(f.file, id, data, error); 283 284 fdput(f); 285 return ret; 286 } 287 288 static int sev_issue_cmd(struct kvm *kvm, int id, void *data, int *error) 289 { 290 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; 291 292 return __sev_issue_cmd(sev->fd, id, data, error); 293 } 294 295 static int sev_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp) 296 { 297 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; 298 struct sev_data_launch_start start; 299 struct kvm_sev_launch_start params; 300 void *dh_blob, *session_blob; 301 int *error = &argp->error; 302 int ret; 303 304 if (!sev_guest(kvm)) 305 return -ENOTTY; 306 307 if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, sizeof(params))) 308 return -EFAULT; 309 310 memset(&start, 0, sizeof(start)); 311 312 dh_blob = NULL; 313 if (params.dh_uaddr) { 314 dh_blob = psp_copy_user_blob(params.dh_uaddr, params.dh_len); 315 if (IS_ERR(dh_blob)) 316 return PTR_ERR(dh_blob); 317 318 start.dh_cert_address = __sme_set(__pa(dh_blob)); 319 start.dh_cert_len = params.dh_len; 320 } 321 322 session_blob = NULL; 323 if (params.session_uaddr) { 324 session_blob = psp_copy_user_blob(params.session_uaddr, params.session_len); 325 if (IS_ERR(session_blob)) { 326 ret = PTR_ERR(session_blob); 327 goto e_free_dh; 328 } 329 330 start.session_address = __sme_set(__pa(session_blob)); 331 start.session_len = params.session_len; 332 } 333 334 start.handle = params.handle; 335 start.policy = params.policy; 336 337 /* create memory encryption context */ 338 ret = __sev_issue_cmd(argp->sev_fd, SEV_CMD_LAUNCH_START, &start, error); 339 if (ret) 340 goto e_free_session; 341 342 /* Bind ASID to this guest */ 343 ret = sev_bind_asid(kvm, start.handle, error); 344 if (ret) 345 goto e_free_session; 346 347 /* return handle to userspace */ 348 params.handle = start.handle; 349 if (copy_to_user((void __user *)(uintptr_t)argp->data, ¶ms, sizeof(params))) { 350 sev_unbind_asid(kvm, start.handle); 351 ret = -EFAULT; 352 goto e_free_session; 353 } 354 355 sev->handle = start.handle; 356 sev->fd = argp->sev_fd; 357 358 e_free_session: 359 kfree(session_blob); 360 e_free_dh: 361 kfree(dh_blob); 362 return ret; 363 } 364 365 static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr, 366 unsigned long ulen, unsigned long *n, 367 int write) 368 { 369 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; 370 unsigned long npages, size; 371 int npinned; 372 unsigned long locked, lock_limit; 373 struct page **pages; 374 unsigned long first, last; 375 int ret; 376 377 lockdep_assert_held(&kvm->lock); 378 379 if (ulen == 0 || uaddr + ulen < uaddr) 380 return ERR_PTR(-EINVAL); 381 382 /* Calculate number of pages. */ 383 first = (uaddr & PAGE_MASK) >> PAGE_SHIFT; 384 last = ((uaddr + ulen - 1) & PAGE_MASK) >> PAGE_SHIFT; 385 npages = (last - first + 1); 386 387 locked = sev->pages_locked + npages; 388 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; 389 if (locked > lock_limit && !capable(CAP_IPC_LOCK)) { 390 pr_err("SEV: %lu locked pages exceed the lock limit of %lu.\n", locked, lock_limit); 391 return ERR_PTR(-ENOMEM); 392 } 393 394 if (WARN_ON_ONCE(npages > INT_MAX)) 395 return ERR_PTR(-EINVAL); 396 397 /* Avoid using vmalloc for smaller buffers. */ 398 size = npages * sizeof(struct page *); 399 if (size > PAGE_SIZE) 400 pages = __vmalloc(size, GFP_KERNEL_ACCOUNT | __GFP_ZERO); 401 else 402 pages = kmalloc(size, GFP_KERNEL_ACCOUNT); 403 404 if (!pages) 405 return ERR_PTR(-ENOMEM); 406 407 /* Pin the user virtual address. */ 408 npinned = pin_user_pages_fast(uaddr, npages, write ? FOLL_WRITE : 0, pages); 409 if (npinned != npages) { 410 pr_err("SEV: Failure locking %lu pages.\n", npages); 411 ret = -ENOMEM; 412 goto err; 413 } 414 415 *n = npages; 416 sev->pages_locked = locked; 417 418 return pages; 419 420 err: 421 if (npinned > 0) 422 unpin_user_pages(pages, npinned); 423 424 kvfree(pages); 425 return ERR_PTR(ret); 426 } 427 428 static void sev_unpin_memory(struct kvm *kvm, struct page **pages, 429 unsigned long npages) 430 { 431 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; 432 433 unpin_user_pages(pages, npages); 434 kvfree(pages); 435 sev->pages_locked -= npages; 436 } 437 438 static void sev_clflush_pages(struct page *pages[], unsigned long npages) 439 { 440 uint8_t *page_virtual; 441 unsigned long i; 442 443 if (this_cpu_has(X86_FEATURE_SME_COHERENT) || npages == 0 || 444 pages == NULL) 445 return; 446 447 for (i = 0; i < npages; i++) { 448 page_virtual = kmap_atomic(pages[i]); 449 clflush_cache_range(page_virtual, PAGE_SIZE); 450 kunmap_atomic(page_virtual); 451 } 452 } 453 454 static unsigned long get_num_contig_pages(unsigned long idx, 455 struct page **inpages, unsigned long npages) 456 { 457 unsigned long paddr, next_paddr; 458 unsigned long i = idx + 1, pages = 1; 459 460 /* find the number of contiguous pages starting from idx */ 461 paddr = __sme_page_pa(inpages[idx]); 462 while (i < npages) { 463 next_paddr = __sme_page_pa(inpages[i++]); 464 if ((paddr + PAGE_SIZE) == next_paddr) { 465 pages++; 466 paddr = next_paddr; 467 continue; 468 } 469 break; 470 } 471 472 return pages; 473 } 474 475 static int sev_launch_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp) 476 { 477 unsigned long vaddr, vaddr_end, next_vaddr, npages, pages, size, i; 478 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; 479 struct kvm_sev_launch_update_data params; 480 struct sev_data_launch_update_data data; 481 struct page **inpages; 482 int ret; 483 484 if (!sev_guest(kvm)) 485 return -ENOTTY; 486 487 if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, sizeof(params))) 488 return -EFAULT; 489 490 vaddr = params.uaddr; 491 size = params.len; 492 vaddr_end = vaddr + size; 493 494 /* Lock the user memory. */ 495 inpages = sev_pin_memory(kvm, vaddr, size, &npages, 1); 496 if (IS_ERR(inpages)) 497 return PTR_ERR(inpages); 498 499 /* 500 * Flush (on non-coherent CPUs) before LAUNCH_UPDATE encrypts pages in 501 * place; the cache may contain the data that was written unencrypted. 502 */ 503 sev_clflush_pages(inpages, npages); 504 505 data.reserved = 0; 506 data.handle = sev->handle; 507 508 for (i = 0; vaddr < vaddr_end; vaddr = next_vaddr, i += pages) { 509 int offset, len; 510 511 /* 512 * If the user buffer is not page-aligned, calculate the offset 513 * within the page. 514 */ 515 offset = vaddr & (PAGE_SIZE - 1); 516 517 /* Calculate the number of pages that can be encrypted in one go. */ 518 pages = get_num_contig_pages(i, inpages, npages); 519 520 len = min_t(size_t, ((pages * PAGE_SIZE) - offset), size); 521 522 data.len = len; 523 data.address = __sme_page_pa(inpages[i]) + offset; 524 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_DATA, &data, &argp->error); 525 if (ret) 526 goto e_unpin; 527 528 size -= len; 529 next_vaddr = vaddr + len; 530 } 531 532 e_unpin: 533 /* content of memory is updated, mark pages dirty */ 534 for (i = 0; i < npages; i++) { 535 set_page_dirty_lock(inpages[i]); 536 mark_page_accessed(inpages[i]); 537 } 538 /* unlock the user pages */ 539 sev_unpin_memory(kvm, inpages, npages); 540 return ret; 541 } 542 543 static int sev_es_sync_vmsa(struct vcpu_svm *svm) 544 { 545 struct vmcb_save_area *save = &svm->vmcb->save; 546 547 /* Check some debug related fields before encrypting the VMSA */ 548 if (svm->vcpu.guest_debug || (save->dr7 & ~DR7_FIXED_1)) 549 return -EINVAL; 550 551 /* Sync registgers */ 552 save->rax = svm->vcpu.arch.regs[VCPU_REGS_RAX]; 553 save->rbx = svm->vcpu.arch.regs[VCPU_REGS_RBX]; 554 save->rcx = svm->vcpu.arch.regs[VCPU_REGS_RCX]; 555 save->rdx = svm->vcpu.arch.regs[VCPU_REGS_RDX]; 556 save->rsp = svm->vcpu.arch.regs[VCPU_REGS_RSP]; 557 save->rbp = svm->vcpu.arch.regs[VCPU_REGS_RBP]; 558 save->rsi = svm->vcpu.arch.regs[VCPU_REGS_RSI]; 559 save->rdi = svm->vcpu.arch.regs[VCPU_REGS_RDI]; 560 #ifdef CONFIG_X86_64 561 save->r8 = svm->vcpu.arch.regs[VCPU_REGS_R8]; 562 save->r9 = svm->vcpu.arch.regs[VCPU_REGS_R9]; 563 save->r10 = svm->vcpu.arch.regs[VCPU_REGS_R10]; 564 save->r11 = svm->vcpu.arch.regs[VCPU_REGS_R11]; 565 save->r12 = svm->vcpu.arch.regs[VCPU_REGS_R12]; 566 save->r13 = svm->vcpu.arch.regs[VCPU_REGS_R13]; 567 save->r14 = svm->vcpu.arch.regs[VCPU_REGS_R14]; 568 save->r15 = svm->vcpu.arch.regs[VCPU_REGS_R15]; 569 #endif 570 save->rip = svm->vcpu.arch.regs[VCPU_REGS_RIP]; 571 572 /* Sync some non-GPR registers before encrypting */ 573 save->xcr0 = svm->vcpu.arch.xcr0; 574 save->pkru = svm->vcpu.arch.pkru; 575 save->xss = svm->vcpu.arch.ia32_xss; 576 577 /* 578 * SEV-ES will use a VMSA that is pointed to by the VMCB, not 579 * the traditional VMSA that is part of the VMCB. Copy the 580 * traditional VMSA as it has been built so far (in prep 581 * for LAUNCH_UPDATE_VMSA) to be the initial SEV-ES state. 582 */ 583 memcpy(svm->vmsa, save, sizeof(*save)); 584 585 return 0; 586 } 587 588 static int sev_launch_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp) 589 { 590 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; 591 struct sev_data_launch_update_vmsa vmsa; 592 struct kvm_vcpu *vcpu; 593 int i, ret; 594 595 if (!sev_es_guest(kvm)) 596 return -ENOTTY; 597 598 vmsa.reserved = 0; 599 600 kvm_for_each_vcpu(i, vcpu, kvm) { 601 struct vcpu_svm *svm = to_svm(vcpu); 602 603 /* Perform some pre-encryption checks against the VMSA */ 604 ret = sev_es_sync_vmsa(svm); 605 if (ret) 606 return ret; 607 608 /* 609 * The LAUNCH_UPDATE_VMSA command will perform in-place 610 * encryption of the VMSA memory content (i.e it will write 611 * the same memory region with the guest's key), so invalidate 612 * it first. 613 */ 614 clflush_cache_range(svm->vmsa, PAGE_SIZE); 615 616 vmsa.handle = sev->handle; 617 vmsa.address = __sme_pa(svm->vmsa); 618 vmsa.len = PAGE_SIZE; 619 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_VMSA, &vmsa, 620 &argp->error); 621 if (ret) 622 return ret; 623 624 svm->vcpu.arch.guest_state_protected = true; 625 } 626 627 return 0; 628 } 629 630 static int sev_launch_measure(struct kvm *kvm, struct kvm_sev_cmd *argp) 631 { 632 void __user *measure = (void __user *)(uintptr_t)argp->data; 633 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; 634 struct sev_data_launch_measure data; 635 struct kvm_sev_launch_measure params; 636 void __user *p = NULL; 637 void *blob = NULL; 638 int ret; 639 640 if (!sev_guest(kvm)) 641 return -ENOTTY; 642 643 if (copy_from_user(¶ms, measure, sizeof(params))) 644 return -EFAULT; 645 646 memset(&data, 0, sizeof(data)); 647 648 /* User wants to query the blob length */ 649 if (!params.len) 650 goto cmd; 651 652 p = (void __user *)(uintptr_t)params.uaddr; 653 if (p) { 654 if (params.len > SEV_FW_BLOB_MAX_SIZE) 655 return -EINVAL; 656 657 blob = kmalloc(params.len, GFP_KERNEL_ACCOUNT); 658 if (!blob) 659 return -ENOMEM; 660 661 data.address = __psp_pa(blob); 662 data.len = params.len; 663 } 664 665 cmd: 666 data.handle = sev->handle; 667 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_MEASURE, &data, &argp->error); 668 669 /* 670 * If we query the session length, FW responded with expected data. 671 */ 672 if (!params.len) 673 goto done; 674 675 if (ret) 676 goto e_free_blob; 677 678 if (blob) { 679 if (copy_to_user(p, blob, params.len)) 680 ret = -EFAULT; 681 } 682 683 done: 684 params.len = data.len; 685 if (copy_to_user(measure, ¶ms, sizeof(params))) 686 ret = -EFAULT; 687 e_free_blob: 688 kfree(blob); 689 return ret; 690 } 691 692 static int sev_launch_finish(struct kvm *kvm, struct kvm_sev_cmd *argp) 693 { 694 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; 695 struct sev_data_launch_finish data; 696 697 if (!sev_guest(kvm)) 698 return -ENOTTY; 699 700 data.handle = sev->handle; 701 return sev_issue_cmd(kvm, SEV_CMD_LAUNCH_FINISH, &data, &argp->error); 702 } 703 704 static int sev_guest_status(struct kvm *kvm, struct kvm_sev_cmd *argp) 705 { 706 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; 707 struct kvm_sev_guest_status params; 708 struct sev_data_guest_status data; 709 int ret; 710 711 if (!sev_guest(kvm)) 712 return -ENOTTY; 713 714 memset(&data, 0, sizeof(data)); 715 716 data.handle = sev->handle; 717 ret = sev_issue_cmd(kvm, SEV_CMD_GUEST_STATUS, &data, &argp->error); 718 if (ret) 719 return ret; 720 721 params.policy = data.policy; 722 params.state = data.state; 723 params.handle = data.handle; 724 725 if (copy_to_user((void __user *)(uintptr_t)argp->data, ¶ms, sizeof(params))) 726 ret = -EFAULT; 727 728 return ret; 729 } 730 731 static int __sev_issue_dbg_cmd(struct kvm *kvm, unsigned long src, 732 unsigned long dst, int size, 733 int *error, bool enc) 734 { 735 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; 736 struct sev_data_dbg data; 737 738 data.reserved = 0; 739 data.handle = sev->handle; 740 data.dst_addr = dst; 741 data.src_addr = src; 742 data.len = size; 743 744 return sev_issue_cmd(kvm, 745 enc ? SEV_CMD_DBG_ENCRYPT : SEV_CMD_DBG_DECRYPT, 746 &data, error); 747 } 748 749 static int __sev_dbg_decrypt(struct kvm *kvm, unsigned long src_paddr, 750 unsigned long dst_paddr, int sz, int *err) 751 { 752 int offset; 753 754 /* 755 * Its safe to read more than we are asked, caller should ensure that 756 * destination has enough space. 757 */ 758 offset = src_paddr & 15; 759 src_paddr = round_down(src_paddr, 16); 760 sz = round_up(sz + offset, 16); 761 762 return __sev_issue_dbg_cmd(kvm, src_paddr, dst_paddr, sz, err, false); 763 } 764 765 static int __sev_dbg_decrypt_user(struct kvm *kvm, unsigned long paddr, 766 void __user *dst_uaddr, 767 unsigned long dst_paddr, 768 int size, int *err) 769 { 770 struct page *tpage = NULL; 771 int ret, offset; 772 773 /* if inputs are not 16-byte then use intermediate buffer */ 774 if (!IS_ALIGNED(dst_paddr, 16) || 775 !IS_ALIGNED(paddr, 16) || 776 !IS_ALIGNED(size, 16)) { 777 tpage = (void *)alloc_page(GFP_KERNEL); 778 if (!tpage) 779 return -ENOMEM; 780 781 dst_paddr = __sme_page_pa(tpage); 782 } 783 784 ret = __sev_dbg_decrypt(kvm, paddr, dst_paddr, size, err); 785 if (ret) 786 goto e_free; 787 788 if (tpage) { 789 offset = paddr & 15; 790 if (copy_to_user(dst_uaddr, page_address(tpage) + offset, size)) 791 ret = -EFAULT; 792 } 793 794 e_free: 795 if (tpage) 796 __free_page(tpage); 797 798 return ret; 799 } 800 801 static int __sev_dbg_encrypt_user(struct kvm *kvm, unsigned long paddr, 802 void __user *vaddr, 803 unsigned long dst_paddr, 804 void __user *dst_vaddr, 805 int size, int *error) 806 { 807 struct page *src_tpage = NULL; 808 struct page *dst_tpage = NULL; 809 int ret, len = size; 810 811 /* If source buffer is not aligned then use an intermediate buffer */ 812 if (!IS_ALIGNED((unsigned long)vaddr, 16)) { 813 src_tpage = alloc_page(GFP_KERNEL); 814 if (!src_tpage) 815 return -ENOMEM; 816 817 if (copy_from_user(page_address(src_tpage), vaddr, size)) { 818 __free_page(src_tpage); 819 return -EFAULT; 820 } 821 822 paddr = __sme_page_pa(src_tpage); 823 } 824 825 /* 826 * If destination buffer or length is not aligned then do read-modify-write: 827 * - decrypt destination in an intermediate buffer 828 * - copy the source buffer in an intermediate buffer 829 * - use the intermediate buffer as source buffer 830 */ 831 if (!IS_ALIGNED((unsigned long)dst_vaddr, 16) || !IS_ALIGNED(size, 16)) { 832 int dst_offset; 833 834 dst_tpage = alloc_page(GFP_KERNEL); 835 if (!dst_tpage) { 836 ret = -ENOMEM; 837 goto e_free; 838 } 839 840 ret = __sev_dbg_decrypt(kvm, dst_paddr, 841 __sme_page_pa(dst_tpage), size, error); 842 if (ret) 843 goto e_free; 844 845 /* 846 * If source is kernel buffer then use memcpy() otherwise 847 * copy_from_user(). 848 */ 849 dst_offset = dst_paddr & 15; 850 851 if (src_tpage) 852 memcpy(page_address(dst_tpage) + dst_offset, 853 page_address(src_tpage), size); 854 else { 855 if (copy_from_user(page_address(dst_tpage) + dst_offset, 856 vaddr, size)) { 857 ret = -EFAULT; 858 goto e_free; 859 } 860 } 861 862 paddr = __sme_page_pa(dst_tpage); 863 dst_paddr = round_down(dst_paddr, 16); 864 len = round_up(size, 16); 865 } 866 867 ret = __sev_issue_dbg_cmd(kvm, paddr, dst_paddr, len, error, true); 868 869 e_free: 870 if (src_tpage) 871 __free_page(src_tpage); 872 if (dst_tpage) 873 __free_page(dst_tpage); 874 return ret; 875 } 876 877 static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec) 878 { 879 unsigned long vaddr, vaddr_end, next_vaddr; 880 unsigned long dst_vaddr; 881 struct page **src_p, **dst_p; 882 struct kvm_sev_dbg debug; 883 unsigned long n; 884 unsigned int size; 885 int ret; 886 887 if (!sev_guest(kvm)) 888 return -ENOTTY; 889 890 if (copy_from_user(&debug, (void __user *)(uintptr_t)argp->data, sizeof(debug))) 891 return -EFAULT; 892 893 if (!debug.len || debug.src_uaddr + debug.len < debug.src_uaddr) 894 return -EINVAL; 895 if (!debug.dst_uaddr) 896 return -EINVAL; 897 898 vaddr = debug.src_uaddr; 899 size = debug.len; 900 vaddr_end = vaddr + size; 901 dst_vaddr = debug.dst_uaddr; 902 903 for (; vaddr < vaddr_end; vaddr = next_vaddr) { 904 int len, s_off, d_off; 905 906 /* lock userspace source and destination page */ 907 src_p = sev_pin_memory(kvm, vaddr & PAGE_MASK, PAGE_SIZE, &n, 0); 908 if (IS_ERR(src_p)) 909 return PTR_ERR(src_p); 910 911 dst_p = sev_pin_memory(kvm, dst_vaddr & PAGE_MASK, PAGE_SIZE, &n, 1); 912 if (IS_ERR(dst_p)) { 913 sev_unpin_memory(kvm, src_p, n); 914 return PTR_ERR(dst_p); 915 } 916 917 /* 918 * Flush (on non-coherent CPUs) before DBG_{DE,EN}CRYPT read or modify 919 * the pages; flush the destination too so that future accesses do not 920 * see stale data. 921 */ 922 sev_clflush_pages(src_p, 1); 923 sev_clflush_pages(dst_p, 1); 924 925 /* 926 * Since user buffer may not be page aligned, calculate the 927 * offset within the page. 928 */ 929 s_off = vaddr & ~PAGE_MASK; 930 d_off = dst_vaddr & ~PAGE_MASK; 931 len = min_t(size_t, (PAGE_SIZE - s_off), size); 932 933 if (dec) 934 ret = __sev_dbg_decrypt_user(kvm, 935 __sme_page_pa(src_p[0]) + s_off, 936 (void __user *)dst_vaddr, 937 __sme_page_pa(dst_p[0]) + d_off, 938 len, &argp->error); 939 else 940 ret = __sev_dbg_encrypt_user(kvm, 941 __sme_page_pa(src_p[0]) + s_off, 942 (void __user *)vaddr, 943 __sme_page_pa(dst_p[0]) + d_off, 944 (void __user *)dst_vaddr, 945 len, &argp->error); 946 947 sev_unpin_memory(kvm, src_p, n); 948 sev_unpin_memory(kvm, dst_p, n); 949 950 if (ret) 951 goto err; 952 953 next_vaddr = vaddr + len; 954 dst_vaddr = dst_vaddr + len; 955 size -= len; 956 } 957 err: 958 return ret; 959 } 960 961 static int sev_launch_secret(struct kvm *kvm, struct kvm_sev_cmd *argp) 962 { 963 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; 964 struct sev_data_launch_secret data; 965 struct kvm_sev_launch_secret params; 966 struct page **pages; 967 void *blob, *hdr; 968 unsigned long n, i; 969 int ret, offset; 970 971 if (!sev_guest(kvm)) 972 return -ENOTTY; 973 974 if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, sizeof(params))) 975 return -EFAULT; 976 977 pages = sev_pin_memory(kvm, params.guest_uaddr, params.guest_len, &n, 1); 978 if (IS_ERR(pages)) 979 return PTR_ERR(pages); 980 981 /* 982 * Flush (on non-coherent CPUs) before LAUNCH_SECRET encrypts pages in 983 * place; the cache may contain the data that was written unencrypted. 984 */ 985 sev_clflush_pages(pages, n); 986 987 /* 988 * The secret must be copied into contiguous memory region, lets verify 989 * that userspace memory pages are contiguous before we issue command. 990 */ 991 if (get_num_contig_pages(0, pages, n) != n) { 992 ret = -EINVAL; 993 goto e_unpin_memory; 994 } 995 996 memset(&data, 0, sizeof(data)); 997 998 offset = params.guest_uaddr & (PAGE_SIZE - 1); 999 data.guest_address = __sme_page_pa(pages[0]) + offset; 1000 data.guest_len = params.guest_len; 1001 1002 blob = psp_copy_user_blob(params.trans_uaddr, params.trans_len); 1003 if (IS_ERR(blob)) { 1004 ret = PTR_ERR(blob); 1005 goto e_unpin_memory; 1006 } 1007 1008 data.trans_address = __psp_pa(blob); 1009 data.trans_len = params.trans_len; 1010 1011 hdr = psp_copy_user_blob(params.hdr_uaddr, params.hdr_len); 1012 if (IS_ERR(hdr)) { 1013 ret = PTR_ERR(hdr); 1014 goto e_free_blob; 1015 } 1016 data.hdr_address = __psp_pa(hdr); 1017 data.hdr_len = params.hdr_len; 1018 1019 data.handle = sev->handle; 1020 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_SECRET, &data, &argp->error); 1021 1022 kfree(hdr); 1023 1024 e_free_blob: 1025 kfree(blob); 1026 e_unpin_memory: 1027 /* content of memory is updated, mark pages dirty */ 1028 for (i = 0; i < n; i++) { 1029 set_page_dirty_lock(pages[i]); 1030 mark_page_accessed(pages[i]); 1031 } 1032 sev_unpin_memory(kvm, pages, n); 1033 return ret; 1034 } 1035 1036 static int sev_get_attestation_report(struct kvm *kvm, struct kvm_sev_cmd *argp) 1037 { 1038 void __user *report = (void __user *)(uintptr_t)argp->data; 1039 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; 1040 struct sev_data_attestation_report data; 1041 struct kvm_sev_attestation_report params; 1042 void __user *p; 1043 void *blob = NULL; 1044 int ret; 1045 1046 if (!sev_guest(kvm)) 1047 return -ENOTTY; 1048 1049 if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, sizeof(params))) 1050 return -EFAULT; 1051 1052 memset(&data, 0, sizeof(data)); 1053 1054 /* User wants to query the blob length */ 1055 if (!params.len) 1056 goto cmd; 1057 1058 p = (void __user *)(uintptr_t)params.uaddr; 1059 if (p) { 1060 if (params.len > SEV_FW_BLOB_MAX_SIZE) 1061 return -EINVAL; 1062 1063 blob = kmalloc(params.len, GFP_KERNEL_ACCOUNT); 1064 if (!blob) 1065 return -ENOMEM; 1066 1067 data.address = __psp_pa(blob); 1068 data.len = params.len; 1069 memcpy(data.mnonce, params.mnonce, sizeof(params.mnonce)); 1070 } 1071 cmd: 1072 data.handle = sev->handle; 1073 ret = sev_issue_cmd(kvm, SEV_CMD_ATTESTATION_REPORT, &data, &argp->error); 1074 /* 1075 * If we query the session length, FW responded with expected data. 1076 */ 1077 if (!params.len) 1078 goto done; 1079 1080 if (ret) 1081 goto e_free_blob; 1082 1083 if (blob) { 1084 if (copy_to_user(p, blob, params.len)) 1085 ret = -EFAULT; 1086 } 1087 1088 done: 1089 params.len = data.len; 1090 if (copy_to_user(report, ¶ms, sizeof(params))) 1091 ret = -EFAULT; 1092 e_free_blob: 1093 kfree(blob); 1094 return ret; 1095 } 1096 1097 /* Userspace wants to query session length. */ 1098 static int 1099 __sev_send_start_query_session_length(struct kvm *kvm, struct kvm_sev_cmd *argp, 1100 struct kvm_sev_send_start *params) 1101 { 1102 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; 1103 struct sev_data_send_start data; 1104 int ret; 1105 1106 data.handle = sev->handle; 1107 ret = sev_issue_cmd(kvm, SEV_CMD_SEND_START, &data, &argp->error); 1108 if (ret < 0) 1109 return ret; 1110 1111 params->session_len = data.session_len; 1112 if (copy_to_user((void __user *)(uintptr_t)argp->data, params, 1113 sizeof(struct kvm_sev_send_start))) 1114 ret = -EFAULT; 1115 1116 return ret; 1117 } 1118 1119 static int sev_send_start(struct kvm *kvm, struct kvm_sev_cmd *argp) 1120 { 1121 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; 1122 struct sev_data_send_start data; 1123 struct kvm_sev_send_start params; 1124 void *amd_certs, *session_data; 1125 void *pdh_cert, *plat_certs; 1126 int ret; 1127 1128 if (!sev_guest(kvm)) 1129 return -ENOTTY; 1130 1131 if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, 1132 sizeof(struct kvm_sev_send_start))) 1133 return -EFAULT; 1134 1135 /* if session_len is zero, userspace wants to query the session length */ 1136 if (!params.session_len) 1137 return __sev_send_start_query_session_length(kvm, argp, 1138 ¶ms); 1139 1140 /* some sanity checks */ 1141 if (!params.pdh_cert_uaddr || !params.pdh_cert_len || 1142 !params.session_uaddr || params.session_len > SEV_FW_BLOB_MAX_SIZE) 1143 return -EINVAL; 1144 1145 /* allocate the memory to hold the session data blob */ 1146 session_data = kmalloc(params.session_len, GFP_KERNEL_ACCOUNT); 1147 if (!session_data) 1148 return -ENOMEM; 1149 1150 /* copy the certificate blobs from userspace */ 1151 pdh_cert = psp_copy_user_blob(params.pdh_cert_uaddr, 1152 params.pdh_cert_len); 1153 if (IS_ERR(pdh_cert)) { 1154 ret = PTR_ERR(pdh_cert); 1155 goto e_free_session; 1156 } 1157 1158 plat_certs = psp_copy_user_blob(params.plat_certs_uaddr, 1159 params.plat_certs_len); 1160 if (IS_ERR(plat_certs)) { 1161 ret = PTR_ERR(plat_certs); 1162 goto e_free_pdh; 1163 } 1164 1165 amd_certs = psp_copy_user_blob(params.amd_certs_uaddr, 1166 params.amd_certs_len); 1167 if (IS_ERR(amd_certs)) { 1168 ret = PTR_ERR(amd_certs); 1169 goto e_free_plat_cert; 1170 } 1171 1172 /* populate the FW SEND_START field with system physical address */ 1173 memset(&data, 0, sizeof(data)); 1174 data.pdh_cert_address = __psp_pa(pdh_cert); 1175 data.pdh_cert_len = params.pdh_cert_len; 1176 data.plat_certs_address = __psp_pa(plat_certs); 1177 data.plat_certs_len = params.plat_certs_len; 1178 data.amd_certs_address = __psp_pa(amd_certs); 1179 data.amd_certs_len = params.amd_certs_len; 1180 data.session_address = __psp_pa(session_data); 1181 data.session_len = params.session_len; 1182 data.handle = sev->handle; 1183 1184 ret = sev_issue_cmd(kvm, SEV_CMD_SEND_START, &data, &argp->error); 1185 1186 if (!ret && copy_to_user((void __user *)(uintptr_t)params.session_uaddr, 1187 session_data, params.session_len)) { 1188 ret = -EFAULT; 1189 goto e_free_amd_cert; 1190 } 1191 1192 params.policy = data.policy; 1193 params.session_len = data.session_len; 1194 if (copy_to_user((void __user *)(uintptr_t)argp->data, ¶ms, 1195 sizeof(struct kvm_sev_send_start))) 1196 ret = -EFAULT; 1197 1198 e_free_amd_cert: 1199 kfree(amd_certs); 1200 e_free_plat_cert: 1201 kfree(plat_certs); 1202 e_free_pdh: 1203 kfree(pdh_cert); 1204 e_free_session: 1205 kfree(session_data); 1206 return ret; 1207 } 1208 1209 /* Userspace wants to query either header or trans length. */ 1210 static int 1211 __sev_send_update_data_query_lengths(struct kvm *kvm, struct kvm_sev_cmd *argp, 1212 struct kvm_sev_send_update_data *params) 1213 { 1214 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; 1215 struct sev_data_send_update_data data; 1216 int ret; 1217 1218 data.handle = sev->handle; 1219 ret = sev_issue_cmd(kvm, SEV_CMD_SEND_UPDATE_DATA, &data, &argp->error); 1220 if (ret < 0) 1221 return ret; 1222 1223 params->hdr_len = data.hdr_len; 1224 params->trans_len = data.trans_len; 1225 1226 if (copy_to_user((void __user *)(uintptr_t)argp->data, params, 1227 sizeof(struct kvm_sev_send_update_data))) 1228 ret = -EFAULT; 1229 1230 return ret; 1231 } 1232 1233 static int sev_send_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp) 1234 { 1235 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; 1236 struct sev_data_send_update_data data; 1237 struct kvm_sev_send_update_data params; 1238 void *hdr, *trans_data; 1239 struct page **guest_page; 1240 unsigned long n; 1241 int ret, offset; 1242 1243 if (!sev_guest(kvm)) 1244 return -ENOTTY; 1245 1246 if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, 1247 sizeof(struct kvm_sev_send_update_data))) 1248 return -EFAULT; 1249 1250 /* userspace wants to query either header or trans length */ 1251 if (!params.trans_len || !params.hdr_len) 1252 return __sev_send_update_data_query_lengths(kvm, argp, ¶ms); 1253 1254 if (!params.trans_uaddr || !params.guest_uaddr || 1255 !params.guest_len || !params.hdr_uaddr) 1256 return -EINVAL; 1257 1258 /* Check if we are crossing the page boundary */ 1259 offset = params.guest_uaddr & (PAGE_SIZE - 1); 1260 if ((params.guest_len + offset > PAGE_SIZE)) 1261 return -EINVAL; 1262 1263 /* Pin guest memory */ 1264 guest_page = sev_pin_memory(kvm, params.guest_uaddr & PAGE_MASK, 1265 PAGE_SIZE, &n, 0); 1266 if (!guest_page) 1267 return -EFAULT; 1268 1269 /* allocate memory for header and transport buffer */ 1270 ret = -ENOMEM; 1271 hdr = kmalloc(params.hdr_len, GFP_KERNEL_ACCOUNT); 1272 if (!hdr) 1273 goto e_unpin; 1274 1275 trans_data = kmalloc(params.trans_len, GFP_KERNEL_ACCOUNT); 1276 if (!trans_data) 1277 goto e_free_hdr; 1278 1279 memset(&data, 0, sizeof(data)); 1280 data.hdr_address = __psp_pa(hdr); 1281 data.hdr_len = params.hdr_len; 1282 data.trans_address = __psp_pa(trans_data); 1283 data.trans_len = params.trans_len; 1284 1285 /* The SEND_UPDATE_DATA command requires C-bit to be always set. */ 1286 data.guest_address = (page_to_pfn(guest_page[0]) << PAGE_SHIFT) + offset; 1287 data.guest_address |= sev_me_mask; 1288 data.guest_len = params.guest_len; 1289 data.handle = sev->handle; 1290 1291 ret = sev_issue_cmd(kvm, SEV_CMD_SEND_UPDATE_DATA, &data, &argp->error); 1292 1293 if (ret) 1294 goto e_free_trans_data; 1295 1296 /* copy transport buffer to user space */ 1297 if (copy_to_user((void __user *)(uintptr_t)params.trans_uaddr, 1298 trans_data, params.trans_len)) { 1299 ret = -EFAULT; 1300 goto e_free_trans_data; 1301 } 1302 1303 /* Copy packet header to userspace. */ 1304 ret = copy_to_user((void __user *)(uintptr_t)params.hdr_uaddr, hdr, 1305 params.hdr_len); 1306 1307 e_free_trans_data: 1308 kfree(trans_data); 1309 e_free_hdr: 1310 kfree(hdr); 1311 e_unpin: 1312 sev_unpin_memory(kvm, guest_page, n); 1313 1314 return ret; 1315 } 1316 1317 static int sev_send_finish(struct kvm *kvm, struct kvm_sev_cmd *argp) 1318 { 1319 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; 1320 struct sev_data_send_finish data; 1321 1322 if (!sev_guest(kvm)) 1323 return -ENOTTY; 1324 1325 data.handle = sev->handle; 1326 return sev_issue_cmd(kvm, SEV_CMD_SEND_FINISH, &data, &argp->error); 1327 } 1328 1329 static int sev_send_cancel(struct kvm *kvm, struct kvm_sev_cmd *argp) 1330 { 1331 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; 1332 struct sev_data_send_cancel data; 1333 1334 if (!sev_guest(kvm)) 1335 return -ENOTTY; 1336 1337 data.handle = sev->handle; 1338 return sev_issue_cmd(kvm, SEV_CMD_SEND_CANCEL, &data, &argp->error); 1339 } 1340 1341 static int sev_receive_start(struct kvm *kvm, struct kvm_sev_cmd *argp) 1342 { 1343 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; 1344 struct sev_data_receive_start start; 1345 struct kvm_sev_receive_start params; 1346 int *error = &argp->error; 1347 void *session_data; 1348 void *pdh_data; 1349 int ret; 1350 1351 if (!sev_guest(kvm)) 1352 return -ENOTTY; 1353 1354 /* Get parameter from the userspace */ 1355 if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, 1356 sizeof(struct kvm_sev_receive_start))) 1357 return -EFAULT; 1358 1359 /* some sanity checks */ 1360 if (!params.pdh_uaddr || !params.pdh_len || 1361 !params.session_uaddr || !params.session_len) 1362 return -EINVAL; 1363 1364 pdh_data = psp_copy_user_blob(params.pdh_uaddr, params.pdh_len); 1365 if (IS_ERR(pdh_data)) 1366 return PTR_ERR(pdh_data); 1367 1368 session_data = psp_copy_user_blob(params.session_uaddr, 1369 params.session_len); 1370 if (IS_ERR(session_data)) { 1371 ret = PTR_ERR(session_data); 1372 goto e_free_pdh; 1373 } 1374 1375 memset(&start, 0, sizeof(start)); 1376 start.handle = params.handle; 1377 start.policy = params.policy; 1378 start.pdh_cert_address = __psp_pa(pdh_data); 1379 start.pdh_cert_len = params.pdh_len; 1380 start.session_address = __psp_pa(session_data); 1381 start.session_len = params.session_len; 1382 1383 /* create memory encryption context */ 1384 ret = __sev_issue_cmd(argp->sev_fd, SEV_CMD_RECEIVE_START, &start, 1385 error); 1386 if (ret) 1387 goto e_free_session; 1388 1389 /* Bind ASID to this guest */ 1390 ret = sev_bind_asid(kvm, start.handle, error); 1391 if (ret) 1392 goto e_free_session; 1393 1394 params.handle = start.handle; 1395 if (copy_to_user((void __user *)(uintptr_t)argp->data, 1396 ¶ms, sizeof(struct kvm_sev_receive_start))) { 1397 ret = -EFAULT; 1398 sev_unbind_asid(kvm, start.handle); 1399 goto e_free_session; 1400 } 1401 1402 sev->handle = start.handle; 1403 sev->fd = argp->sev_fd; 1404 1405 e_free_session: 1406 kfree(session_data); 1407 e_free_pdh: 1408 kfree(pdh_data); 1409 1410 return ret; 1411 } 1412 1413 static int sev_receive_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp) 1414 { 1415 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; 1416 struct kvm_sev_receive_update_data params; 1417 struct sev_data_receive_update_data data; 1418 void *hdr = NULL, *trans = NULL; 1419 struct page **guest_page; 1420 unsigned long n; 1421 int ret, offset; 1422 1423 if (!sev_guest(kvm)) 1424 return -EINVAL; 1425 1426 if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, 1427 sizeof(struct kvm_sev_receive_update_data))) 1428 return -EFAULT; 1429 1430 if (!params.hdr_uaddr || !params.hdr_len || 1431 !params.guest_uaddr || !params.guest_len || 1432 !params.trans_uaddr || !params.trans_len) 1433 return -EINVAL; 1434 1435 /* Check if we are crossing the page boundary */ 1436 offset = params.guest_uaddr & (PAGE_SIZE - 1); 1437 if ((params.guest_len + offset > PAGE_SIZE)) 1438 return -EINVAL; 1439 1440 hdr = psp_copy_user_blob(params.hdr_uaddr, params.hdr_len); 1441 if (IS_ERR(hdr)) 1442 return PTR_ERR(hdr); 1443 1444 trans = psp_copy_user_blob(params.trans_uaddr, params.trans_len); 1445 if (IS_ERR(trans)) { 1446 ret = PTR_ERR(trans); 1447 goto e_free_hdr; 1448 } 1449 1450 memset(&data, 0, sizeof(data)); 1451 data.hdr_address = __psp_pa(hdr); 1452 data.hdr_len = params.hdr_len; 1453 data.trans_address = __psp_pa(trans); 1454 data.trans_len = params.trans_len; 1455 1456 /* Pin guest memory */ 1457 ret = -EFAULT; 1458 guest_page = sev_pin_memory(kvm, params.guest_uaddr & PAGE_MASK, 1459 PAGE_SIZE, &n, 0); 1460 if (!guest_page) 1461 goto e_free_trans; 1462 1463 /* The RECEIVE_UPDATE_DATA command requires C-bit to be always set. */ 1464 data.guest_address = (page_to_pfn(guest_page[0]) << PAGE_SHIFT) + offset; 1465 data.guest_address |= sev_me_mask; 1466 data.guest_len = params.guest_len; 1467 data.handle = sev->handle; 1468 1469 ret = sev_issue_cmd(kvm, SEV_CMD_RECEIVE_UPDATE_DATA, &data, 1470 &argp->error); 1471 1472 sev_unpin_memory(kvm, guest_page, n); 1473 1474 e_free_trans: 1475 kfree(trans); 1476 e_free_hdr: 1477 kfree(hdr); 1478 1479 return ret; 1480 } 1481 1482 static int sev_receive_finish(struct kvm *kvm, struct kvm_sev_cmd *argp) 1483 { 1484 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; 1485 struct sev_data_receive_finish data; 1486 1487 if (!sev_guest(kvm)) 1488 return -ENOTTY; 1489 1490 data.handle = sev->handle; 1491 return sev_issue_cmd(kvm, SEV_CMD_RECEIVE_FINISH, &data, &argp->error); 1492 } 1493 1494 int svm_mem_enc_op(struct kvm *kvm, void __user *argp) 1495 { 1496 struct kvm_sev_cmd sev_cmd; 1497 int r; 1498 1499 if (!sev_enabled) 1500 return -ENOTTY; 1501 1502 if (!argp) 1503 return 0; 1504 1505 if (copy_from_user(&sev_cmd, argp, sizeof(struct kvm_sev_cmd))) 1506 return -EFAULT; 1507 1508 mutex_lock(&kvm->lock); 1509 1510 /* enc_context_owner handles all memory enc operations */ 1511 if (is_mirroring_enc_context(kvm)) { 1512 r = -EINVAL; 1513 goto out; 1514 } 1515 1516 switch (sev_cmd.id) { 1517 case KVM_SEV_ES_INIT: 1518 if (!sev_es_enabled) { 1519 r = -ENOTTY; 1520 goto out; 1521 } 1522 fallthrough; 1523 case KVM_SEV_INIT: 1524 r = sev_guest_init(kvm, &sev_cmd); 1525 break; 1526 case KVM_SEV_LAUNCH_START: 1527 r = sev_launch_start(kvm, &sev_cmd); 1528 break; 1529 case KVM_SEV_LAUNCH_UPDATE_DATA: 1530 r = sev_launch_update_data(kvm, &sev_cmd); 1531 break; 1532 case KVM_SEV_LAUNCH_UPDATE_VMSA: 1533 r = sev_launch_update_vmsa(kvm, &sev_cmd); 1534 break; 1535 case KVM_SEV_LAUNCH_MEASURE: 1536 r = sev_launch_measure(kvm, &sev_cmd); 1537 break; 1538 case KVM_SEV_LAUNCH_FINISH: 1539 r = sev_launch_finish(kvm, &sev_cmd); 1540 break; 1541 case KVM_SEV_GUEST_STATUS: 1542 r = sev_guest_status(kvm, &sev_cmd); 1543 break; 1544 case KVM_SEV_DBG_DECRYPT: 1545 r = sev_dbg_crypt(kvm, &sev_cmd, true); 1546 break; 1547 case KVM_SEV_DBG_ENCRYPT: 1548 r = sev_dbg_crypt(kvm, &sev_cmd, false); 1549 break; 1550 case KVM_SEV_LAUNCH_SECRET: 1551 r = sev_launch_secret(kvm, &sev_cmd); 1552 break; 1553 case KVM_SEV_GET_ATTESTATION_REPORT: 1554 r = sev_get_attestation_report(kvm, &sev_cmd); 1555 break; 1556 case KVM_SEV_SEND_START: 1557 r = sev_send_start(kvm, &sev_cmd); 1558 break; 1559 case KVM_SEV_SEND_UPDATE_DATA: 1560 r = sev_send_update_data(kvm, &sev_cmd); 1561 break; 1562 case KVM_SEV_SEND_FINISH: 1563 r = sev_send_finish(kvm, &sev_cmd); 1564 break; 1565 case KVM_SEV_SEND_CANCEL: 1566 r = sev_send_cancel(kvm, &sev_cmd); 1567 break; 1568 case KVM_SEV_RECEIVE_START: 1569 r = sev_receive_start(kvm, &sev_cmd); 1570 break; 1571 case KVM_SEV_RECEIVE_UPDATE_DATA: 1572 r = sev_receive_update_data(kvm, &sev_cmd); 1573 break; 1574 case KVM_SEV_RECEIVE_FINISH: 1575 r = sev_receive_finish(kvm, &sev_cmd); 1576 break; 1577 default: 1578 r = -EINVAL; 1579 goto out; 1580 } 1581 1582 if (copy_to_user(argp, &sev_cmd, sizeof(struct kvm_sev_cmd))) 1583 r = -EFAULT; 1584 1585 out: 1586 mutex_unlock(&kvm->lock); 1587 return r; 1588 } 1589 1590 int svm_register_enc_region(struct kvm *kvm, 1591 struct kvm_enc_region *range) 1592 { 1593 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; 1594 struct enc_region *region; 1595 int ret = 0; 1596 1597 if (!sev_guest(kvm)) 1598 return -ENOTTY; 1599 1600 /* If kvm is mirroring encryption context it isn't responsible for it */ 1601 if (is_mirroring_enc_context(kvm)) 1602 return -EINVAL; 1603 1604 if (range->addr > ULONG_MAX || range->size > ULONG_MAX) 1605 return -EINVAL; 1606 1607 region = kzalloc(sizeof(*region), GFP_KERNEL_ACCOUNT); 1608 if (!region) 1609 return -ENOMEM; 1610 1611 mutex_lock(&kvm->lock); 1612 region->pages = sev_pin_memory(kvm, range->addr, range->size, ®ion->npages, 1); 1613 if (IS_ERR(region->pages)) { 1614 ret = PTR_ERR(region->pages); 1615 mutex_unlock(&kvm->lock); 1616 goto e_free; 1617 } 1618 1619 region->uaddr = range->addr; 1620 region->size = range->size; 1621 1622 list_add_tail(®ion->list, &sev->regions_list); 1623 mutex_unlock(&kvm->lock); 1624 1625 /* 1626 * The guest may change the memory encryption attribute from C=0 -> C=1 1627 * or vice versa for this memory range. Lets make sure caches are 1628 * flushed to ensure that guest data gets written into memory with 1629 * correct C-bit. 1630 */ 1631 sev_clflush_pages(region->pages, region->npages); 1632 1633 return ret; 1634 1635 e_free: 1636 kfree(region); 1637 return ret; 1638 } 1639 1640 static struct enc_region * 1641 find_enc_region(struct kvm *kvm, struct kvm_enc_region *range) 1642 { 1643 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; 1644 struct list_head *head = &sev->regions_list; 1645 struct enc_region *i; 1646 1647 list_for_each_entry(i, head, list) { 1648 if (i->uaddr == range->addr && 1649 i->size == range->size) 1650 return i; 1651 } 1652 1653 return NULL; 1654 } 1655 1656 static void __unregister_enc_region_locked(struct kvm *kvm, 1657 struct enc_region *region) 1658 { 1659 sev_unpin_memory(kvm, region->pages, region->npages); 1660 list_del(®ion->list); 1661 kfree(region); 1662 } 1663 1664 int svm_unregister_enc_region(struct kvm *kvm, 1665 struct kvm_enc_region *range) 1666 { 1667 struct enc_region *region; 1668 int ret; 1669 1670 /* If kvm is mirroring encryption context it isn't responsible for it */ 1671 if (is_mirroring_enc_context(kvm)) 1672 return -EINVAL; 1673 1674 mutex_lock(&kvm->lock); 1675 1676 if (!sev_guest(kvm)) { 1677 ret = -ENOTTY; 1678 goto failed; 1679 } 1680 1681 region = find_enc_region(kvm, range); 1682 if (!region) { 1683 ret = -EINVAL; 1684 goto failed; 1685 } 1686 1687 /* 1688 * Ensure that all guest tagged cache entries are flushed before 1689 * releasing the pages back to the system for use. CLFLUSH will 1690 * not do this, so issue a WBINVD. 1691 */ 1692 wbinvd_on_all_cpus(); 1693 1694 __unregister_enc_region_locked(kvm, region); 1695 1696 mutex_unlock(&kvm->lock); 1697 return 0; 1698 1699 failed: 1700 mutex_unlock(&kvm->lock); 1701 return ret; 1702 } 1703 1704 int svm_vm_copy_asid_from(struct kvm *kvm, unsigned int source_fd) 1705 { 1706 struct file *source_kvm_file; 1707 struct kvm *source_kvm; 1708 struct kvm_sev_info *mirror_sev; 1709 unsigned int asid; 1710 int ret; 1711 1712 source_kvm_file = fget(source_fd); 1713 if (!file_is_kvm(source_kvm_file)) { 1714 ret = -EBADF; 1715 goto e_source_put; 1716 } 1717 1718 source_kvm = source_kvm_file->private_data; 1719 mutex_lock(&source_kvm->lock); 1720 1721 if (!sev_guest(source_kvm)) { 1722 ret = -EINVAL; 1723 goto e_source_unlock; 1724 } 1725 1726 /* Mirrors of mirrors should work, but let's not get silly */ 1727 if (is_mirroring_enc_context(source_kvm) || source_kvm == kvm) { 1728 ret = -EINVAL; 1729 goto e_source_unlock; 1730 } 1731 1732 asid = to_kvm_svm(source_kvm)->sev_info.asid; 1733 1734 /* 1735 * The mirror kvm holds an enc_context_owner ref so its asid can't 1736 * disappear until we're done with it 1737 */ 1738 kvm_get_kvm(source_kvm); 1739 1740 fput(source_kvm_file); 1741 mutex_unlock(&source_kvm->lock); 1742 mutex_lock(&kvm->lock); 1743 1744 if (sev_guest(kvm)) { 1745 ret = -EINVAL; 1746 goto e_mirror_unlock; 1747 } 1748 1749 /* Set enc_context_owner and copy its encryption context over */ 1750 mirror_sev = &to_kvm_svm(kvm)->sev_info; 1751 mirror_sev->enc_context_owner = source_kvm; 1752 mirror_sev->asid = asid; 1753 mirror_sev->active = true; 1754 1755 mutex_unlock(&kvm->lock); 1756 return 0; 1757 1758 e_mirror_unlock: 1759 mutex_unlock(&kvm->lock); 1760 kvm_put_kvm(source_kvm); 1761 return ret; 1762 e_source_unlock: 1763 mutex_unlock(&source_kvm->lock); 1764 e_source_put: 1765 if (source_kvm_file) 1766 fput(source_kvm_file); 1767 return ret; 1768 } 1769 1770 void sev_vm_destroy(struct kvm *kvm) 1771 { 1772 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; 1773 struct list_head *head = &sev->regions_list; 1774 struct list_head *pos, *q; 1775 1776 if (!sev_guest(kvm)) 1777 return; 1778 1779 /* If this is a mirror_kvm release the enc_context_owner and skip sev cleanup */ 1780 if (is_mirroring_enc_context(kvm)) { 1781 kvm_put_kvm(sev->enc_context_owner); 1782 return; 1783 } 1784 1785 mutex_lock(&kvm->lock); 1786 1787 /* 1788 * Ensure that all guest tagged cache entries are flushed before 1789 * releasing the pages back to the system for use. CLFLUSH will 1790 * not do this, so issue a WBINVD. 1791 */ 1792 wbinvd_on_all_cpus(); 1793 1794 /* 1795 * if userspace was terminated before unregistering the memory regions 1796 * then lets unpin all the registered memory. 1797 */ 1798 if (!list_empty(head)) { 1799 list_for_each_safe(pos, q, head) { 1800 __unregister_enc_region_locked(kvm, 1801 list_entry(pos, struct enc_region, list)); 1802 cond_resched(); 1803 } 1804 } 1805 1806 mutex_unlock(&kvm->lock); 1807 1808 sev_unbind_asid(kvm, sev->handle); 1809 sev_asid_free(sev); 1810 } 1811 1812 void __init sev_set_cpu_caps(void) 1813 { 1814 if (!sev_enabled) 1815 kvm_cpu_cap_clear(X86_FEATURE_SEV); 1816 if (!sev_es_enabled) 1817 kvm_cpu_cap_clear(X86_FEATURE_SEV_ES); 1818 } 1819 1820 void __init sev_hardware_setup(void) 1821 { 1822 #ifdef CONFIG_KVM_AMD_SEV 1823 unsigned int eax, ebx, ecx, edx, sev_asid_count, sev_es_asid_count; 1824 bool sev_es_supported = false; 1825 bool sev_supported = false; 1826 1827 if (!sev_enabled || !npt_enabled) 1828 goto out; 1829 1830 /* Does the CPU support SEV? */ 1831 if (!boot_cpu_has(X86_FEATURE_SEV)) 1832 goto out; 1833 1834 /* Retrieve SEV CPUID information */ 1835 cpuid(0x8000001f, &eax, &ebx, &ecx, &edx); 1836 1837 /* Set encryption bit location for SEV-ES guests */ 1838 sev_enc_bit = ebx & 0x3f; 1839 1840 /* Maximum number of encrypted guests supported simultaneously */ 1841 max_sev_asid = ecx; 1842 if (!max_sev_asid) 1843 goto out; 1844 1845 /* Minimum ASID value that should be used for SEV guest */ 1846 min_sev_asid = edx; 1847 sev_me_mask = 1UL << (ebx & 0x3f); 1848 1849 /* Initialize SEV ASID bitmaps */ 1850 sev_asid_bitmap = bitmap_zalloc(max_sev_asid, GFP_KERNEL); 1851 if (!sev_asid_bitmap) 1852 goto out; 1853 1854 sev_reclaim_asid_bitmap = bitmap_zalloc(max_sev_asid, GFP_KERNEL); 1855 if (!sev_reclaim_asid_bitmap) { 1856 bitmap_free(sev_asid_bitmap); 1857 sev_asid_bitmap = NULL; 1858 goto out; 1859 } 1860 1861 sev_asid_count = max_sev_asid - min_sev_asid + 1; 1862 if (misc_cg_set_capacity(MISC_CG_RES_SEV, sev_asid_count)) 1863 goto out; 1864 1865 pr_info("SEV supported: %u ASIDs\n", sev_asid_count); 1866 sev_supported = true; 1867 1868 /* SEV-ES support requested? */ 1869 if (!sev_es_enabled) 1870 goto out; 1871 1872 /* Does the CPU support SEV-ES? */ 1873 if (!boot_cpu_has(X86_FEATURE_SEV_ES)) 1874 goto out; 1875 1876 /* Has the system been allocated ASIDs for SEV-ES? */ 1877 if (min_sev_asid == 1) 1878 goto out; 1879 1880 sev_es_asid_count = min_sev_asid - 1; 1881 if (misc_cg_set_capacity(MISC_CG_RES_SEV_ES, sev_es_asid_count)) 1882 goto out; 1883 1884 pr_info("SEV-ES supported: %u ASIDs\n", sev_es_asid_count); 1885 sev_es_supported = true; 1886 1887 out: 1888 sev_enabled = sev_supported; 1889 sev_es_enabled = sev_es_supported; 1890 #endif 1891 } 1892 1893 void sev_hardware_teardown(void) 1894 { 1895 if (!sev_enabled) 1896 return; 1897 1898 /* No need to take sev_bitmap_lock, all VMs have been destroyed. */ 1899 sev_flush_asids(0, max_sev_asid); 1900 1901 bitmap_free(sev_asid_bitmap); 1902 bitmap_free(sev_reclaim_asid_bitmap); 1903 1904 misc_cg_set_capacity(MISC_CG_RES_SEV, 0); 1905 misc_cg_set_capacity(MISC_CG_RES_SEV_ES, 0); 1906 } 1907 1908 int sev_cpu_init(struct svm_cpu_data *sd) 1909 { 1910 if (!sev_enabled) 1911 return 0; 1912 1913 sd->sev_vmcbs = kcalloc(max_sev_asid + 1, sizeof(void *), GFP_KERNEL); 1914 if (!sd->sev_vmcbs) 1915 return -ENOMEM; 1916 1917 return 0; 1918 } 1919 1920 /* 1921 * Pages used by hardware to hold guest encrypted state must be flushed before 1922 * returning them to the system. 1923 */ 1924 static void sev_flush_guest_memory(struct vcpu_svm *svm, void *va, 1925 unsigned long len) 1926 { 1927 /* 1928 * If hardware enforced cache coherency for encrypted mappings of the 1929 * same physical page is supported, nothing to do. 1930 */ 1931 if (boot_cpu_has(X86_FEATURE_SME_COHERENT)) 1932 return; 1933 1934 /* 1935 * If the VM Page Flush MSR is supported, use it to flush the page 1936 * (using the page virtual address and the guest ASID). 1937 */ 1938 if (boot_cpu_has(X86_FEATURE_VM_PAGE_FLUSH)) { 1939 struct kvm_sev_info *sev; 1940 unsigned long va_start; 1941 u64 start, stop; 1942 1943 /* Align start and stop to page boundaries. */ 1944 va_start = (unsigned long)va; 1945 start = (u64)va_start & PAGE_MASK; 1946 stop = PAGE_ALIGN((u64)va_start + len); 1947 1948 if (start < stop) { 1949 sev = &to_kvm_svm(svm->vcpu.kvm)->sev_info; 1950 1951 while (start < stop) { 1952 wrmsrl(MSR_AMD64_VM_PAGE_FLUSH, 1953 start | sev->asid); 1954 1955 start += PAGE_SIZE; 1956 } 1957 1958 return; 1959 } 1960 1961 WARN(1, "Address overflow, using WBINVD\n"); 1962 } 1963 1964 /* 1965 * Hardware should always have one of the above features, 1966 * but if not, use WBINVD and issue a warning. 1967 */ 1968 WARN_ONCE(1, "Using WBINVD to flush guest memory\n"); 1969 wbinvd_on_all_cpus(); 1970 } 1971 1972 void sev_free_vcpu(struct kvm_vcpu *vcpu) 1973 { 1974 struct vcpu_svm *svm; 1975 1976 if (!sev_es_guest(vcpu->kvm)) 1977 return; 1978 1979 svm = to_svm(vcpu); 1980 1981 if (vcpu->arch.guest_state_protected) 1982 sev_flush_guest_memory(svm, svm->vmsa, PAGE_SIZE); 1983 __free_page(virt_to_page(svm->vmsa)); 1984 1985 if (svm->ghcb_sa_free) 1986 kfree(svm->ghcb_sa); 1987 } 1988 1989 static void dump_ghcb(struct vcpu_svm *svm) 1990 { 1991 struct ghcb *ghcb = svm->ghcb; 1992 unsigned int nbits; 1993 1994 /* Re-use the dump_invalid_vmcb module parameter */ 1995 if (!dump_invalid_vmcb) { 1996 pr_warn_ratelimited("set kvm_amd.dump_invalid_vmcb=1 to dump internal KVM state.\n"); 1997 return; 1998 } 1999 2000 nbits = sizeof(ghcb->save.valid_bitmap) * 8; 2001 2002 pr_err("GHCB (GPA=%016llx):\n", svm->vmcb->control.ghcb_gpa); 2003 pr_err("%-20s%016llx is_valid: %u\n", "sw_exit_code", 2004 ghcb->save.sw_exit_code, ghcb_sw_exit_code_is_valid(ghcb)); 2005 pr_err("%-20s%016llx is_valid: %u\n", "sw_exit_info_1", 2006 ghcb->save.sw_exit_info_1, ghcb_sw_exit_info_1_is_valid(ghcb)); 2007 pr_err("%-20s%016llx is_valid: %u\n", "sw_exit_info_2", 2008 ghcb->save.sw_exit_info_2, ghcb_sw_exit_info_2_is_valid(ghcb)); 2009 pr_err("%-20s%016llx is_valid: %u\n", "sw_scratch", 2010 ghcb->save.sw_scratch, ghcb_sw_scratch_is_valid(ghcb)); 2011 pr_err("%-20s%*pb\n", "valid_bitmap", nbits, ghcb->save.valid_bitmap); 2012 } 2013 2014 static void sev_es_sync_to_ghcb(struct vcpu_svm *svm) 2015 { 2016 struct kvm_vcpu *vcpu = &svm->vcpu; 2017 struct ghcb *ghcb = svm->ghcb; 2018 2019 /* 2020 * The GHCB protocol so far allows for the following data 2021 * to be returned: 2022 * GPRs RAX, RBX, RCX, RDX 2023 * 2024 * Copy their values, even if they may not have been written during the 2025 * VM-Exit. It's the guest's responsibility to not consume random data. 2026 */ 2027 ghcb_set_rax(ghcb, vcpu->arch.regs[VCPU_REGS_RAX]); 2028 ghcb_set_rbx(ghcb, vcpu->arch.regs[VCPU_REGS_RBX]); 2029 ghcb_set_rcx(ghcb, vcpu->arch.regs[VCPU_REGS_RCX]); 2030 ghcb_set_rdx(ghcb, vcpu->arch.regs[VCPU_REGS_RDX]); 2031 } 2032 2033 static void sev_es_sync_from_ghcb(struct vcpu_svm *svm) 2034 { 2035 struct vmcb_control_area *control = &svm->vmcb->control; 2036 struct kvm_vcpu *vcpu = &svm->vcpu; 2037 struct ghcb *ghcb = svm->ghcb; 2038 u64 exit_code; 2039 2040 /* 2041 * The GHCB protocol so far allows for the following data 2042 * to be supplied: 2043 * GPRs RAX, RBX, RCX, RDX 2044 * XCR0 2045 * CPL 2046 * 2047 * VMMCALL allows the guest to provide extra registers. KVM also 2048 * expects RSI for hypercalls, so include that, too. 2049 * 2050 * Copy their values to the appropriate location if supplied. 2051 */ 2052 memset(vcpu->arch.regs, 0, sizeof(vcpu->arch.regs)); 2053 2054 vcpu->arch.regs[VCPU_REGS_RAX] = ghcb_get_rax_if_valid(ghcb); 2055 vcpu->arch.regs[VCPU_REGS_RBX] = ghcb_get_rbx_if_valid(ghcb); 2056 vcpu->arch.regs[VCPU_REGS_RCX] = ghcb_get_rcx_if_valid(ghcb); 2057 vcpu->arch.regs[VCPU_REGS_RDX] = ghcb_get_rdx_if_valid(ghcb); 2058 vcpu->arch.regs[VCPU_REGS_RSI] = ghcb_get_rsi_if_valid(ghcb); 2059 2060 svm->vmcb->save.cpl = ghcb_get_cpl_if_valid(ghcb); 2061 2062 if (ghcb_xcr0_is_valid(ghcb)) { 2063 vcpu->arch.xcr0 = ghcb_get_xcr0(ghcb); 2064 kvm_update_cpuid_runtime(vcpu); 2065 } 2066 2067 /* Copy the GHCB exit information into the VMCB fields */ 2068 exit_code = ghcb_get_sw_exit_code(ghcb); 2069 control->exit_code = lower_32_bits(exit_code); 2070 control->exit_code_hi = upper_32_bits(exit_code); 2071 control->exit_info_1 = ghcb_get_sw_exit_info_1(ghcb); 2072 control->exit_info_2 = ghcb_get_sw_exit_info_2(ghcb); 2073 2074 /* Clear the valid entries fields */ 2075 memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap)); 2076 } 2077 2078 static int sev_es_validate_vmgexit(struct vcpu_svm *svm) 2079 { 2080 struct kvm_vcpu *vcpu; 2081 struct ghcb *ghcb; 2082 u64 exit_code = 0; 2083 2084 ghcb = svm->ghcb; 2085 2086 /* Only GHCB Usage code 0 is supported */ 2087 if (ghcb->ghcb_usage) 2088 goto vmgexit_err; 2089 2090 /* 2091 * Retrieve the exit code now even though is may not be marked valid 2092 * as it could help with debugging. 2093 */ 2094 exit_code = ghcb_get_sw_exit_code(ghcb); 2095 2096 if (!ghcb_sw_exit_code_is_valid(ghcb) || 2097 !ghcb_sw_exit_info_1_is_valid(ghcb) || 2098 !ghcb_sw_exit_info_2_is_valid(ghcb)) 2099 goto vmgexit_err; 2100 2101 switch (ghcb_get_sw_exit_code(ghcb)) { 2102 case SVM_EXIT_READ_DR7: 2103 break; 2104 case SVM_EXIT_WRITE_DR7: 2105 if (!ghcb_rax_is_valid(ghcb)) 2106 goto vmgexit_err; 2107 break; 2108 case SVM_EXIT_RDTSC: 2109 break; 2110 case SVM_EXIT_RDPMC: 2111 if (!ghcb_rcx_is_valid(ghcb)) 2112 goto vmgexit_err; 2113 break; 2114 case SVM_EXIT_CPUID: 2115 if (!ghcb_rax_is_valid(ghcb) || 2116 !ghcb_rcx_is_valid(ghcb)) 2117 goto vmgexit_err; 2118 if (ghcb_get_rax(ghcb) == 0xd) 2119 if (!ghcb_xcr0_is_valid(ghcb)) 2120 goto vmgexit_err; 2121 break; 2122 case SVM_EXIT_INVD: 2123 break; 2124 case SVM_EXIT_IOIO: 2125 if (ghcb_get_sw_exit_info_1(ghcb) & SVM_IOIO_STR_MASK) { 2126 if (!ghcb_sw_scratch_is_valid(ghcb)) 2127 goto vmgexit_err; 2128 } else { 2129 if (!(ghcb_get_sw_exit_info_1(ghcb) & SVM_IOIO_TYPE_MASK)) 2130 if (!ghcb_rax_is_valid(ghcb)) 2131 goto vmgexit_err; 2132 } 2133 break; 2134 case SVM_EXIT_MSR: 2135 if (!ghcb_rcx_is_valid(ghcb)) 2136 goto vmgexit_err; 2137 if (ghcb_get_sw_exit_info_1(ghcb)) { 2138 if (!ghcb_rax_is_valid(ghcb) || 2139 !ghcb_rdx_is_valid(ghcb)) 2140 goto vmgexit_err; 2141 } 2142 break; 2143 case SVM_EXIT_VMMCALL: 2144 if (!ghcb_rax_is_valid(ghcb) || 2145 !ghcb_cpl_is_valid(ghcb)) 2146 goto vmgexit_err; 2147 break; 2148 case SVM_EXIT_RDTSCP: 2149 break; 2150 case SVM_EXIT_WBINVD: 2151 break; 2152 case SVM_EXIT_MONITOR: 2153 if (!ghcb_rax_is_valid(ghcb) || 2154 !ghcb_rcx_is_valid(ghcb) || 2155 !ghcb_rdx_is_valid(ghcb)) 2156 goto vmgexit_err; 2157 break; 2158 case SVM_EXIT_MWAIT: 2159 if (!ghcb_rax_is_valid(ghcb) || 2160 !ghcb_rcx_is_valid(ghcb)) 2161 goto vmgexit_err; 2162 break; 2163 case SVM_VMGEXIT_MMIO_READ: 2164 case SVM_VMGEXIT_MMIO_WRITE: 2165 if (!ghcb_sw_scratch_is_valid(ghcb)) 2166 goto vmgexit_err; 2167 break; 2168 case SVM_VMGEXIT_NMI_COMPLETE: 2169 case SVM_VMGEXIT_AP_HLT_LOOP: 2170 case SVM_VMGEXIT_AP_JUMP_TABLE: 2171 case SVM_VMGEXIT_UNSUPPORTED_EVENT: 2172 break; 2173 default: 2174 goto vmgexit_err; 2175 } 2176 2177 return 0; 2178 2179 vmgexit_err: 2180 vcpu = &svm->vcpu; 2181 2182 if (ghcb->ghcb_usage) { 2183 vcpu_unimpl(vcpu, "vmgexit: ghcb usage %#x is not valid\n", 2184 ghcb->ghcb_usage); 2185 } else { 2186 vcpu_unimpl(vcpu, "vmgexit: exit reason %#llx is not valid\n", 2187 exit_code); 2188 dump_ghcb(svm); 2189 } 2190 2191 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 2192 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON; 2193 vcpu->run->internal.ndata = 2; 2194 vcpu->run->internal.data[0] = exit_code; 2195 vcpu->run->internal.data[1] = vcpu->arch.last_vmentry_cpu; 2196 2197 return -EINVAL; 2198 } 2199 2200 void sev_es_unmap_ghcb(struct vcpu_svm *svm) 2201 { 2202 if (!svm->ghcb) 2203 return; 2204 2205 if (svm->ghcb_sa_free) { 2206 /* 2207 * The scratch area lives outside the GHCB, so there is a 2208 * buffer that, depending on the operation performed, may 2209 * need to be synced, then freed. 2210 */ 2211 if (svm->ghcb_sa_sync) { 2212 kvm_write_guest(svm->vcpu.kvm, 2213 ghcb_get_sw_scratch(svm->ghcb), 2214 svm->ghcb_sa, svm->ghcb_sa_len); 2215 svm->ghcb_sa_sync = false; 2216 } 2217 2218 kfree(svm->ghcb_sa); 2219 svm->ghcb_sa = NULL; 2220 svm->ghcb_sa_free = false; 2221 } 2222 2223 trace_kvm_vmgexit_exit(svm->vcpu.vcpu_id, svm->ghcb); 2224 2225 sev_es_sync_to_ghcb(svm); 2226 2227 kvm_vcpu_unmap(&svm->vcpu, &svm->ghcb_map, true); 2228 svm->ghcb = NULL; 2229 } 2230 2231 void pre_sev_run(struct vcpu_svm *svm, int cpu) 2232 { 2233 struct svm_cpu_data *sd = per_cpu(svm_data, cpu); 2234 int asid = sev_get_asid(svm->vcpu.kvm); 2235 2236 /* Assign the asid allocated with this SEV guest */ 2237 svm->asid = asid; 2238 2239 /* 2240 * Flush guest TLB: 2241 * 2242 * 1) when different VMCB for the same ASID is to be run on the same host CPU. 2243 * 2) or this VMCB was executed on different host CPU in previous VMRUNs. 2244 */ 2245 if (sd->sev_vmcbs[asid] == svm->vmcb && 2246 svm->vcpu.arch.last_vmentry_cpu == cpu) 2247 return; 2248 2249 sd->sev_vmcbs[asid] = svm->vmcb; 2250 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID; 2251 vmcb_mark_dirty(svm->vmcb, VMCB_ASID); 2252 } 2253 2254 #define GHCB_SCRATCH_AREA_LIMIT (16ULL * PAGE_SIZE) 2255 static bool setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len) 2256 { 2257 struct vmcb_control_area *control = &svm->vmcb->control; 2258 struct ghcb *ghcb = svm->ghcb; 2259 u64 ghcb_scratch_beg, ghcb_scratch_end; 2260 u64 scratch_gpa_beg, scratch_gpa_end; 2261 void *scratch_va; 2262 2263 scratch_gpa_beg = ghcb_get_sw_scratch(ghcb); 2264 if (!scratch_gpa_beg) { 2265 pr_err("vmgexit: scratch gpa not provided\n"); 2266 return false; 2267 } 2268 2269 scratch_gpa_end = scratch_gpa_beg + len; 2270 if (scratch_gpa_end < scratch_gpa_beg) { 2271 pr_err("vmgexit: scratch length (%#llx) not valid for scratch address (%#llx)\n", 2272 len, scratch_gpa_beg); 2273 return false; 2274 } 2275 2276 if ((scratch_gpa_beg & PAGE_MASK) == control->ghcb_gpa) { 2277 /* Scratch area begins within GHCB */ 2278 ghcb_scratch_beg = control->ghcb_gpa + 2279 offsetof(struct ghcb, shared_buffer); 2280 ghcb_scratch_end = control->ghcb_gpa + 2281 offsetof(struct ghcb, reserved_1); 2282 2283 /* 2284 * If the scratch area begins within the GHCB, it must be 2285 * completely contained in the GHCB shared buffer area. 2286 */ 2287 if (scratch_gpa_beg < ghcb_scratch_beg || 2288 scratch_gpa_end > ghcb_scratch_end) { 2289 pr_err("vmgexit: scratch area is outside of GHCB shared buffer area (%#llx - %#llx)\n", 2290 scratch_gpa_beg, scratch_gpa_end); 2291 return false; 2292 } 2293 2294 scratch_va = (void *)svm->ghcb; 2295 scratch_va += (scratch_gpa_beg - control->ghcb_gpa); 2296 } else { 2297 /* 2298 * The guest memory must be read into a kernel buffer, so 2299 * limit the size 2300 */ 2301 if (len > GHCB_SCRATCH_AREA_LIMIT) { 2302 pr_err("vmgexit: scratch area exceeds KVM limits (%#llx requested, %#llx limit)\n", 2303 len, GHCB_SCRATCH_AREA_LIMIT); 2304 return false; 2305 } 2306 scratch_va = kzalloc(len, GFP_KERNEL_ACCOUNT); 2307 if (!scratch_va) 2308 return false; 2309 2310 if (kvm_read_guest(svm->vcpu.kvm, scratch_gpa_beg, scratch_va, len)) { 2311 /* Unable to copy scratch area from guest */ 2312 pr_err("vmgexit: kvm_read_guest for scratch area failed\n"); 2313 2314 kfree(scratch_va); 2315 return false; 2316 } 2317 2318 /* 2319 * The scratch area is outside the GHCB. The operation will 2320 * dictate whether the buffer needs to be synced before running 2321 * the vCPU next time (i.e. a read was requested so the data 2322 * must be written back to the guest memory). 2323 */ 2324 svm->ghcb_sa_sync = sync; 2325 svm->ghcb_sa_free = true; 2326 } 2327 2328 svm->ghcb_sa = scratch_va; 2329 svm->ghcb_sa_len = len; 2330 2331 return true; 2332 } 2333 2334 static void set_ghcb_msr_bits(struct vcpu_svm *svm, u64 value, u64 mask, 2335 unsigned int pos) 2336 { 2337 svm->vmcb->control.ghcb_gpa &= ~(mask << pos); 2338 svm->vmcb->control.ghcb_gpa |= (value & mask) << pos; 2339 } 2340 2341 static u64 get_ghcb_msr_bits(struct vcpu_svm *svm, u64 mask, unsigned int pos) 2342 { 2343 return (svm->vmcb->control.ghcb_gpa >> pos) & mask; 2344 } 2345 2346 static void set_ghcb_msr(struct vcpu_svm *svm, u64 value) 2347 { 2348 svm->vmcb->control.ghcb_gpa = value; 2349 } 2350 2351 static int sev_handle_vmgexit_msr_protocol(struct vcpu_svm *svm) 2352 { 2353 struct vmcb_control_area *control = &svm->vmcb->control; 2354 struct kvm_vcpu *vcpu = &svm->vcpu; 2355 u64 ghcb_info; 2356 int ret = 1; 2357 2358 ghcb_info = control->ghcb_gpa & GHCB_MSR_INFO_MASK; 2359 2360 trace_kvm_vmgexit_msr_protocol_enter(svm->vcpu.vcpu_id, 2361 control->ghcb_gpa); 2362 2363 switch (ghcb_info) { 2364 case GHCB_MSR_SEV_INFO_REQ: 2365 set_ghcb_msr(svm, GHCB_MSR_SEV_INFO(GHCB_VERSION_MAX, 2366 GHCB_VERSION_MIN, 2367 sev_enc_bit)); 2368 break; 2369 case GHCB_MSR_CPUID_REQ: { 2370 u64 cpuid_fn, cpuid_reg, cpuid_value; 2371 2372 cpuid_fn = get_ghcb_msr_bits(svm, 2373 GHCB_MSR_CPUID_FUNC_MASK, 2374 GHCB_MSR_CPUID_FUNC_POS); 2375 2376 /* Initialize the registers needed by the CPUID intercept */ 2377 vcpu->arch.regs[VCPU_REGS_RAX] = cpuid_fn; 2378 vcpu->arch.regs[VCPU_REGS_RCX] = 0; 2379 2380 ret = svm_invoke_exit_handler(vcpu, SVM_EXIT_CPUID); 2381 if (!ret) { 2382 ret = -EINVAL; 2383 break; 2384 } 2385 2386 cpuid_reg = get_ghcb_msr_bits(svm, 2387 GHCB_MSR_CPUID_REG_MASK, 2388 GHCB_MSR_CPUID_REG_POS); 2389 if (cpuid_reg == 0) 2390 cpuid_value = vcpu->arch.regs[VCPU_REGS_RAX]; 2391 else if (cpuid_reg == 1) 2392 cpuid_value = vcpu->arch.regs[VCPU_REGS_RBX]; 2393 else if (cpuid_reg == 2) 2394 cpuid_value = vcpu->arch.regs[VCPU_REGS_RCX]; 2395 else 2396 cpuid_value = vcpu->arch.regs[VCPU_REGS_RDX]; 2397 2398 set_ghcb_msr_bits(svm, cpuid_value, 2399 GHCB_MSR_CPUID_VALUE_MASK, 2400 GHCB_MSR_CPUID_VALUE_POS); 2401 2402 set_ghcb_msr_bits(svm, GHCB_MSR_CPUID_RESP, 2403 GHCB_MSR_INFO_MASK, 2404 GHCB_MSR_INFO_POS); 2405 break; 2406 } 2407 case GHCB_MSR_TERM_REQ: { 2408 u64 reason_set, reason_code; 2409 2410 reason_set = get_ghcb_msr_bits(svm, 2411 GHCB_MSR_TERM_REASON_SET_MASK, 2412 GHCB_MSR_TERM_REASON_SET_POS); 2413 reason_code = get_ghcb_msr_bits(svm, 2414 GHCB_MSR_TERM_REASON_MASK, 2415 GHCB_MSR_TERM_REASON_POS); 2416 pr_info("SEV-ES guest requested termination: %#llx:%#llx\n", 2417 reason_set, reason_code); 2418 fallthrough; 2419 } 2420 default: 2421 ret = -EINVAL; 2422 } 2423 2424 trace_kvm_vmgexit_msr_protocol_exit(svm->vcpu.vcpu_id, 2425 control->ghcb_gpa, ret); 2426 2427 return ret; 2428 } 2429 2430 int sev_handle_vmgexit(struct kvm_vcpu *vcpu) 2431 { 2432 struct vcpu_svm *svm = to_svm(vcpu); 2433 struct vmcb_control_area *control = &svm->vmcb->control; 2434 u64 ghcb_gpa, exit_code; 2435 struct ghcb *ghcb; 2436 int ret; 2437 2438 /* Validate the GHCB */ 2439 ghcb_gpa = control->ghcb_gpa; 2440 if (ghcb_gpa & GHCB_MSR_INFO_MASK) 2441 return sev_handle_vmgexit_msr_protocol(svm); 2442 2443 if (!ghcb_gpa) { 2444 vcpu_unimpl(vcpu, "vmgexit: GHCB gpa is not set\n"); 2445 return -EINVAL; 2446 } 2447 2448 if (kvm_vcpu_map(vcpu, ghcb_gpa >> PAGE_SHIFT, &svm->ghcb_map)) { 2449 /* Unable to map GHCB from guest */ 2450 vcpu_unimpl(vcpu, "vmgexit: error mapping GHCB [%#llx] from guest\n", 2451 ghcb_gpa); 2452 return -EINVAL; 2453 } 2454 2455 svm->ghcb = svm->ghcb_map.hva; 2456 ghcb = svm->ghcb_map.hva; 2457 2458 trace_kvm_vmgexit_enter(vcpu->vcpu_id, ghcb); 2459 2460 exit_code = ghcb_get_sw_exit_code(ghcb); 2461 2462 ret = sev_es_validate_vmgexit(svm); 2463 if (ret) 2464 return ret; 2465 2466 sev_es_sync_from_ghcb(svm); 2467 ghcb_set_sw_exit_info_1(ghcb, 0); 2468 ghcb_set_sw_exit_info_2(ghcb, 0); 2469 2470 ret = -EINVAL; 2471 switch (exit_code) { 2472 case SVM_VMGEXIT_MMIO_READ: 2473 if (!setup_vmgexit_scratch(svm, true, control->exit_info_2)) 2474 break; 2475 2476 ret = kvm_sev_es_mmio_read(vcpu, 2477 control->exit_info_1, 2478 control->exit_info_2, 2479 svm->ghcb_sa); 2480 break; 2481 case SVM_VMGEXIT_MMIO_WRITE: 2482 if (!setup_vmgexit_scratch(svm, false, control->exit_info_2)) 2483 break; 2484 2485 ret = kvm_sev_es_mmio_write(vcpu, 2486 control->exit_info_1, 2487 control->exit_info_2, 2488 svm->ghcb_sa); 2489 break; 2490 case SVM_VMGEXIT_NMI_COMPLETE: 2491 ret = svm_invoke_exit_handler(vcpu, SVM_EXIT_IRET); 2492 break; 2493 case SVM_VMGEXIT_AP_HLT_LOOP: 2494 ret = kvm_emulate_ap_reset_hold(vcpu); 2495 break; 2496 case SVM_VMGEXIT_AP_JUMP_TABLE: { 2497 struct kvm_sev_info *sev = &to_kvm_svm(vcpu->kvm)->sev_info; 2498 2499 switch (control->exit_info_1) { 2500 case 0: 2501 /* Set AP jump table address */ 2502 sev->ap_jump_table = control->exit_info_2; 2503 break; 2504 case 1: 2505 /* Get AP jump table address */ 2506 ghcb_set_sw_exit_info_2(ghcb, sev->ap_jump_table); 2507 break; 2508 default: 2509 pr_err("svm: vmgexit: unsupported AP jump table request - exit_info_1=%#llx\n", 2510 control->exit_info_1); 2511 ghcb_set_sw_exit_info_1(ghcb, 1); 2512 ghcb_set_sw_exit_info_2(ghcb, 2513 X86_TRAP_UD | 2514 SVM_EVTINJ_TYPE_EXEPT | 2515 SVM_EVTINJ_VALID); 2516 } 2517 2518 ret = 1; 2519 break; 2520 } 2521 case SVM_VMGEXIT_UNSUPPORTED_EVENT: 2522 vcpu_unimpl(vcpu, 2523 "vmgexit: unsupported event - exit_info_1=%#llx, exit_info_2=%#llx\n", 2524 control->exit_info_1, control->exit_info_2); 2525 break; 2526 default: 2527 ret = svm_invoke_exit_handler(vcpu, exit_code); 2528 } 2529 2530 return ret; 2531 } 2532 2533 int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in) 2534 { 2535 if (!setup_vmgexit_scratch(svm, in, svm->vmcb->control.exit_info_2)) 2536 return -EINVAL; 2537 2538 return kvm_sev_es_string_io(&svm->vcpu, size, port, 2539 svm->ghcb_sa, svm->ghcb_sa_len, in); 2540 } 2541 2542 void sev_es_init_vmcb(struct vcpu_svm *svm) 2543 { 2544 struct kvm_vcpu *vcpu = &svm->vcpu; 2545 2546 svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ES_ENABLE; 2547 svm->vmcb->control.virt_ext |= LBR_CTL_ENABLE_MASK; 2548 2549 /* 2550 * An SEV-ES guest requires a VMSA area that is a separate from the 2551 * VMCB page. Do not include the encryption mask on the VMSA physical 2552 * address since hardware will access it using the guest key. 2553 */ 2554 svm->vmcb->control.vmsa_pa = __pa(svm->vmsa); 2555 2556 /* Can't intercept CR register access, HV can't modify CR registers */ 2557 svm_clr_intercept(svm, INTERCEPT_CR0_READ); 2558 svm_clr_intercept(svm, INTERCEPT_CR4_READ); 2559 svm_clr_intercept(svm, INTERCEPT_CR8_READ); 2560 svm_clr_intercept(svm, INTERCEPT_CR0_WRITE); 2561 svm_clr_intercept(svm, INTERCEPT_CR4_WRITE); 2562 svm_clr_intercept(svm, INTERCEPT_CR8_WRITE); 2563 2564 svm_clr_intercept(svm, INTERCEPT_SELECTIVE_CR0); 2565 2566 /* Track EFER/CR register changes */ 2567 svm_set_intercept(svm, TRAP_EFER_WRITE); 2568 svm_set_intercept(svm, TRAP_CR0_WRITE); 2569 svm_set_intercept(svm, TRAP_CR4_WRITE); 2570 svm_set_intercept(svm, TRAP_CR8_WRITE); 2571 2572 /* No support for enable_vmware_backdoor */ 2573 clr_exception_intercept(svm, GP_VECTOR); 2574 2575 /* Can't intercept XSETBV, HV can't modify XCR0 directly */ 2576 svm_clr_intercept(svm, INTERCEPT_XSETBV); 2577 2578 /* Clear intercepts on selected MSRs */ 2579 set_msr_interception(vcpu, svm->msrpm, MSR_EFER, 1, 1); 2580 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_CR_PAT, 1, 1); 2581 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHFROMIP, 1, 1); 2582 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1); 2583 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTFROMIP, 1, 1); 2584 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTTOIP, 1, 1); 2585 } 2586 2587 void sev_es_create_vcpu(struct vcpu_svm *svm) 2588 { 2589 /* 2590 * Set the GHCB MSR value as per the GHCB specification when creating 2591 * a vCPU for an SEV-ES guest. 2592 */ 2593 set_ghcb_msr(svm, GHCB_MSR_SEV_INFO(GHCB_VERSION_MAX, 2594 GHCB_VERSION_MIN, 2595 sev_enc_bit)); 2596 } 2597 2598 void sev_es_prepare_guest_switch(struct vcpu_svm *svm, unsigned int cpu) 2599 { 2600 struct svm_cpu_data *sd = per_cpu(svm_data, cpu); 2601 struct vmcb_save_area *hostsa; 2602 2603 /* 2604 * As an SEV-ES guest, hardware will restore the host state on VMEXIT, 2605 * of which one step is to perform a VMLOAD. Since hardware does not 2606 * perform a VMSAVE on VMRUN, the host savearea must be updated. 2607 */ 2608 vmsave(__sme_page_pa(sd->save_area)); 2609 2610 /* XCR0 is restored on VMEXIT, save the current host value */ 2611 hostsa = (struct vmcb_save_area *)(page_address(sd->save_area) + 0x400); 2612 hostsa->xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK); 2613 2614 /* PKRU is restored on VMEXIT, save the current host value */ 2615 hostsa->pkru = read_pkru(); 2616 2617 /* MSR_IA32_XSS is restored on VMEXIT, save the currnet host value */ 2618 hostsa->xss = host_xss; 2619 } 2620 2621 void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector) 2622 { 2623 struct vcpu_svm *svm = to_svm(vcpu); 2624 2625 /* First SIPI: Use the values as initially set by the VMM */ 2626 if (!svm->received_first_sipi) { 2627 svm->received_first_sipi = true; 2628 return; 2629 } 2630 2631 /* 2632 * Subsequent SIPI: Return from an AP Reset Hold VMGEXIT, where 2633 * the guest will set the CS and RIP. Set SW_EXIT_INFO_2 to a 2634 * non-zero value. 2635 */ 2636 if (!svm->ghcb) 2637 return; 2638 2639 ghcb_set_sw_exit_info_2(svm->ghcb, 1); 2640 } 2641