1 /* 2 * QEMU SEV support 3 * 4 * Copyright Advanced Micro Devices 2016-2018 5 * 6 * Author: 7 * Brijesh Singh <brijesh.singh@amd.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2 or later. 10 * See the COPYING file in the top-level directory. 11 * 12 */ 13 14 #include "qemu/osdep.h" 15 16 #include <linux/kvm.h> 17 #include <linux/kvm_para.h> 18 #include <linux/psp-sev.h> 19 20 #include <sys/ioctl.h> 21 22 #include "qapi/error.h" 23 #include "qom/object_interfaces.h" 24 #include "qemu/base64.h" 25 #include "qemu/module.h" 26 #include "qemu/uuid.h" 27 #include "qemu/error-report.h" 28 #include "crypto/hash.h" 29 #include "sysemu/kvm.h" 30 #include "kvm/kvm_i386.h" 31 #include "sev.h" 32 #include "sysemu/sysemu.h" 33 #include "sysemu/runstate.h" 34 #include "trace.h" 35 #include "migration/blocker.h" 36 #include "qom/object.h" 37 #include "monitor/monitor.h" 38 #include "monitor/hmp-target.h" 39 #include "qapi/qapi-commands-misc-target.h" 40 #include "confidential-guest.h" 41 #include "hw/i386/pc.h" 42 #include "exec/address-spaces.h" 43 #include "qemu/queue.h" 44 45 OBJECT_DECLARE_TYPE(SevCommonState, SevCommonStateClass, SEV_COMMON) 46 OBJECT_DECLARE_TYPE(SevGuestState, SevCommonStateClass, SEV_GUEST) 47 OBJECT_DECLARE_TYPE(SevSnpGuestState, SevCommonStateClass, SEV_SNP_GUEST) 48 49 /* hard code sha256 digest size */ 50 #define HASH_SIZE 32 51 52 typedef struct QEMU_PACKED SevHashTableEntry { 53 QemuUUID guid; 54 uint16_t len; 55 uint8_t hash[HASH_SIZE]; 56 } SevHashTableEntry; 57 58 typedef struct QEMU_PACKED SevHashTable { 59 QemuUUID guid; 60 uint16_t len; 61 SevHashTableEntry cmdline; 62 SevHashTableEntry initrd; 63 SevHashTableEntry kernel; 64 } SevHashTable; 65 66 /* 67 * Data encrypted by sev_encrypt_flash() must be padded to a multiple of 68 * 16 bytes. 69 */ 70 typedef struct QEMU_PACKED PaddedSevHashTable { 71 SevHashTable ht; 72 uint8_t padding[ROUND_UP(sizeof(SevHashTable), 16) - sizeof(SevHashTable)]; 73 } PaddedSevHashTable; 74 75 QEMU_BUILD_BUG_ON(sizeof(PaddedSevHashTable) % 16 != 0); 76 77 #define SEV_INFO_BLOCK_GUID "00f771de-1a7e-4fcb-890e-68c77e2fb44e" 78 typedef struct __attribute__((__packed__)) SevInfoBlock { 79 /* SEV-ES Reset Vector Address */ 80 uint32_t reset_addr; 81 } SevInfoBlock; 82 83 #define SEV_HASH_TABLE_RV_GUID "7255371f-3a3b-4b04-927b-1da6efa8d454" 84 typedef struct QEMU_PACKED SevHashTableDescriptor { 85 /* SEV hash table area guest address */ 86 uint32_t base; 87 /* SEV hash table area size (in bytes) */ 88 uint32_t size; 89 } SevHashTableDescriptor; 90 91 struct SevCommonState { 92 X86ConfidentialGuest parent_obj; 93 94 int kvm_type; 95 96 /* configuration parameters */ 97 char *sev_device; 98 uint32_t cbitpos; 99 uint32_t reduced_phys_bits; 100 bool kernel_hashes; 101 102 /* runtime state */ 103 uint8_t api_major; 104 uint8_t api_minor; 105 uint8_t build_id; 106 int sev_fd; 107 SevState state; 108 109 uint32_t reset_cs; 110 uint32_t reset_ip; 111 bool reset_data_valid; 112 }; 113 114 struct SevCommonStateClass { 115 X86ConfidentialGuestClass parent_class; 116 117 /* public */ 118 bool (*build_kernel_loader_hashes)(SevCommonState *sev_common, 119 SevHashTableDescriptor *area, 120 SevKernelLoaderContext *ctx, 121 Error **errp); 122 int (*launch_start)(SevCommonState *sev_common); 123 void (*launch_finish)(SevCommonState *sev_common); 124 int (*launch_update_data)(SevCommonState *sev_common, hwaddr gpa, uint8_t *ptr, size_t len); 125 int (*kvm_init)(ConfidentialGuestSupport *cgs, Error **errp); 126 }; 127 128 /** 129 * SevGuestState: 130 * 131 * The SevGuestState object is used for creating and managing a SEV 132 * guest. 133 * 134 * # $QEMU \ 135 * -object sev-guest,id=sev0 \ 136 * -machine ...,memory-encryption=sev0 137 */ 138 struct SevGuestState { 139 SevCommonState parent_obj; 140 gchar *measurement; 141 142 /* configuration parameters */ 143 uint32_t handle; 144 uint32_t policy; 145 char *dh_cert_file; 146 char *session_file; 147 OnOffAuto legacy_vm_type; 148 }; 149 150 struct SevSnpGuestState { 151 SevCommonState parent_obj; 152 153 /* configuration parameters */ 154 char *guest_visible_workarounds; 155 char *id_block_base64; 156 uint8_t *id_block; 157 char *id_auth_base64; 158 uint8_t *id_auth; 159 char *host_data; 160 161 struct kvm_sev_snp_launch_start kvm_start_conf; 162 struct kvm_sev_snp_launch_finish kvm_finish_conf; 163 164 uint32_t kernel_hashes_offset; 165 PaddedSevHashTable *kernel_hashes_data; 166 }; 167 168 #define DEFAULT_GUEST_POLICY 0x1 /* disable debug */ 169 #define DEFAULT_SEV_DEVICE "/dev/sev" 170 #define DEFAULT_SEV_SNP_POLICY 0x30000 171 172 typedef struct SevLaunchUpdateData { 173 QTAILQ_ENTRY(SevLaunchUpdateData) next; 174 hwaddr gpa; 175 void *hva; 176 size_t len; 177 int type; 178 } SevLaunchUpdateData; 179 180 static QTAILQ_HEAD(, SevLaunchUpdateData) launch_update; 181 182 static Error *sev_mig_blocker; 183 184 static const char *const sev_fw_errlist[] = { 185 [SEV_RET_SUCCESS] = "", 186 [SEV_RET_INVALID_PLATFORM_STATE] = "Platform state is invalid", 187 [SEV_RET_INVALID_GUEST_STATE] = "Guest state is invalid", 188 [SEV_RET_INAVLID_CONFIG] = "Platform configuration is invalid", 189 [SEV_RET_INVALID_LEN] = "Buffer too small", 190 [SEV_RET_ALREADY_OWNED] = "Platform is already owned", 191 [SEV_RET_INVALID_CERTIFICATE] = "Certificate is invalid", 192 [SEV_RET_POLICY_FAILURE] = "Policy is not allowed", 193 [SEV_RET_INACTIVE] = "Guest is not active", 194 [SEV_RET_INVALID_ADDRESS] = "Invalid address", 195 [SEV_RET_BAD_SIGNATURE] = "Bad signature", 196 [SEV_RET_BAD_MEASUREMENT] = "Bad measurement", 197 [SEV_RET_ASID_OWNED] = "ASID is already owned", 198 [SEV_RET_INVALID_ASID] = "Invalid ASID", 199 [SEV_RET_WBINVD_REQUIRED] = "WBINVD is required", 200 [SEV_RET_DFFLUSH_REQUIRED] = "DF_FLUSH is required", 201 [SEV_RET_INVALID_GUEST] = "Guest handle is invalid", 202 [SEV_RET_INVALID_COMMAND] = "Invalid command", 203 [SEV_RET_ACTIVE] = "Guest is active", 204 [SEV_RET_HWSEV_RET_PLATFORM] = "Hardware error", 205 [SEV_RET_HWSEV_RET_UNSAFE] = "Hardware unsafe", 206 [SEV_RET_UNSUPPORTED] = "Feature not supported", 207 [SEV_RET_INVALID_PARAM] = "Invalid parameter", 208 [SEV_RET_RESOURCE_LIMIT] = "Required firmware resource depleted", 209 [SEV_RET_SECURE_DATA_INVALID] = "Part-specific integrity check failure", 210 }; 211 212 #define SEV_FW_MAX_ERROR ARRAY_SIZE(sev_fw_errlist) 213 214 /* <linux/kvm.h> doesn't expose this, so re-use the max from kvm.c */ 215 #define KVM_MAX_CPUID_ENTRIES 100 216 217 typedef struct KvmCpuidInfo { 218 struct kvm_cpuid2 cpuid; 219 struct kvm_cpuid_entry2 entries[KVM_MAX_CPUID_ENTRIES]; 220 } KvmCpuidInfo; 221 222 #define SNP_CPUID_FUNCTION_MAXCOUNT 64 223 #define SNP_CPUID_FUNCTION_UNKNOWN 0xFFFFFFFF 224 225 typedef struct { 226 uint32_t eax_in; 227 uint32_t ecx_in; 228 uint64_t xcr0_in; 229 uint64_t xss_in; 230 uint32_t eax; 231 uint32_t ebx; 232 uint32_t ecx; 233 uint32_t edx; 234 uint64_t reserved; 235 } __attribute__((packed)) SnpCpuidFunc; 236 237 typedef struct { 238 uint32_t count; 239 uint32_t reserved1; 240 uint64_t reserved2; 241 SnpCpuidFunc entries[SNP_CPUID_FUNCTION_MAXCOUNT]; 242 } __attribute__((packed)) SnpCpuidInfo; 243 244 static int 245 sev_ioctl(int fd, int cmd, void *data, int *error) 246 { 247 int r; 248 struct kvm_sev_cmd input; 249 250 memset(&input, 0x0, sizeof(input)); 251 252 input.id = cmd; 253 input.sev_fd = fd; 254 input.data = (uintptr_t)data; 255 256 r = kvm_vm_ioctl(kvm_state, KVM_MEMORY_ENCRYPT_OP, &input); 257 258 if (error) { 259 *error = input.error; 260 } 261 262 return r; 263 } 264 265 static int 266 sev_platform_ioctl(int fd, int cmd, void *data, int *error) 267 { 268 int r; 269 struct sev_issue_cmd arg; 270 271 arg.cmd = cmd; 272 arg.data = (unsigned long)data; 273 r = ioctl(fd, SEV_ISSUE_CMD, &arg); 274 if (error) { 275 *error = arg.error; 276 } 277 278 return r; 279 } 280 281 static const char * 282 fw_error_to_str(int code) 283 { 284 if (code < 0 || code >= SEV_FW_MAX_ERROR) { 285 return "unknown error"; 286 } 287 288 return sev_fw_errlist[code]; 289 } 290 291 static bool 292 sev_check_state(const SevCommonState *sev_common, SevState state) 293 { 294 assert(sev_common); 295 return sev_common->state == state ? true : false; 296 } 297 298 static void 299 sev_set_guest_state(SevCommonState *sev_common, SevState new_state) 300 { 301 assert(new_state < SEV_STATE__MAX); 302 assert(sev_common); 303 304 trace_kvm_sev_change_state(SevState_str(sev_common->state), 305 SevState_str(new_state)); 306 sev_common->state = new_state; 307 } 308 309 static void 310 sev_ram_block_added(RAMBlockNotifier *n, void *host, size_t size, 311 size_t max_size) 312 { 313 int r; 314 struct kvm_enc_region range; 315 ram_addr_t offset; 316 MemoryRegion *mr; 317 318 /* 319 * The RAM device presents a memory region that should be treated 320 * as IO region and should not be pinned. 321 */ 322 mr = memory_region_from_host(host, &offset); 323 if (mr && memory_region_is_ram_device(mr)) { 324 return; 325 } 326 327 range.addr = (uintptr_t)host; 328 range.size = max_size; 329 330 trace_kvm_memcrypt_register_region(host, max_size); 331 r = kvm_vm_ioctl(kvm_state, KVM_MEMORY_ENCRYPT_REG_REGION, &range); 332 if (r) { 333 error_report("%s: failed to register region (%p+%#zx) error '%s'", 334 __func__, host, max_size, strerror(errno)); 335 exit(1); 336 } 337 } 338 339 static void 340 sev_ram_block_removed(RAMBlockNotifier *n, void *host, size_t size, 341 size_t max_size) 342 { 343 int r; 344 struct kvm_enc_region range; 345 ram_addr_t offset; 346 MemoryRegion *mr; 347 348 /* 349 * The RAM device presents a memory region that should be treated 350 * as IO region and should not have been pinned. 351 */ 352 mr = memory_region_from_host(host, &offset); 353 if (mr && memory_region_is_ram_device(mr)) { 354 return; 355 } 356 357 range.addr = (uintptr_t)host; 358 range.size = max_size; 359 360 trace_kvm_memcrypt_unregister_region(host, max_size); 361 r = kvm_vm_ioctl(kvm_state, KVM_MEMORY_ENCRYPT_UNREG_REGION, &range); 362 if (r) { 363 error_report("%s: failed to unregister region (%p+%#zx)", 364 __func__, host, max_size); 365 } 366 } 367 368 static struct RAMBlockNotifier sev_ram_notifier = { 369 .ram_block_added = sev_ram_block_added, 370 .ram_block_removed = sev_ram_block_removed, 371 }; 372 373 bool 374 sev_enabled(void) 375 { 376 ConfidentialGuestSupport *cgs = MACHINE(qdev_get_machine())->cgs; 377 378 return !!object_dynamic_cast(OBJECT(cgs), TYPE_SEV_COMMON); 379 } 380 381 bool 382 sev_snp_enabled(void) 383 { 384 ConfidentialGuestSupport *cgs = MACHINE(qdev_get_machine())->cgs; 385 386 return !!object_dynamic_cast(OBJECT(cgs), TYPE_SEV_SNP_GUEST); 387 } 388 389 bool 390 sev_es_enabled(void) 391 { 392 ConfidentialGuestSupport *cgs = MACHINE(qdev_get_machine())->cgs; 393 394 return sev_snp_enabled() || 395 (sev_enabled() && SEV_GUEST(cgs)->policy & SEV_POLICY_ES); 396 } 397 398 uint32_t 399 sev_get_cbit_position(void) 400 { 401 SevCommonState *sev_common = SEV_COMMON(MACHINE(qdev_get_machine())->cgs); 402 403 return sev_common ? sev_common->cbitpos : 0; 404 } 405 406 uint32_t 407 sev_get_reduced_phys_bits(void) 408 { 409 SevCommonState *sev_common = SEV_COMMON(MACHINE(qdev_get_machine())->cgs); 410 411 return sev_common ? sev_common->reduced_phys_bits : 0; 412 } 413 414 static SevInfo *sev_get_info(void) 415 { 416 SevInfo *info; 417 SevCommonState *sev_common = SEV_COMMON(MACHINE(qdev_get_machine())->cgs); 418 419 info = g_new0(SevInfo, 1); 420 info->enabled = sev_enabled(); 421 422 if (info->enabled) { 423 info->api_major = sev_common->api_major; 424 info->api_minor = sev_common->api_minor; 425 info->build_id = sev_common->build_id; 426 info->state = sev_common->state; 427 428 if (sev_snp_enabled()) { 429 info->sev_type = SEV_GUEST_TYPE_SEV_SNP; 430 info->u.sev_snp.snp_policy = 431 object_property_get_uint(OBJECT(sev_common), "policy", NULL); 432 } else { 433 info->sev_type = SEV_GUEST_TYPE_SEV; 434 info->u.sev.handle = SEV_GUEST(sev_common)->handle; 435 info->u.sev.policy = 436 (uint32_t)object_property_get_uint(OBJECT(sev_common), 437 "policy", NULL); 438 } 439 } 440 441 return info; 442 } 443 444 SevInfo *qmp_query_sev(Error **errp) 445 { 446 SevInfo *info; 447 448 info = sev_get_info(); 449 if (!info) { 450 error_setg(errp, "SEV feature is not available"); 451 return NULL; 452 } 453 454 return info; 455 } 456 457 void hmp_info_sev(Monitor *mon, const QDict *qdict) 458 { 459 SevInfo *info = sev_get_info(); 460 461 if (!info || !info->enabled) { 462 monitor_printf(mon, "SEV is not enabled\n"); 463 goto out; 464 } 465 466 monitor_printf(mon, "SEV type: %s\n", SevGuestType_str(info->sev_type)); 467 monitor_printf(mon, "state: %s\n", SevState_str(info->state)); 468 monitor_printf(mon, "build: %d\n", info->build_id); 469 monitor_printf(mon, "api version: %d.%d\n", info->api_major, 470 info->api_minor); 471 472 if (sev_snp_enabled()) { 473 monitor_printf(mon, "debug: %s\n", 474 info->u.sev_snp.snp_policy & SEV_SNP_POLICY_DBG ? "on" 475 : "off"); 476 monitor_printf(mon, "SMT allowed: %s\n", 477 info->u.sev_snp.snp_policy & SEV_SNP_POLICY_SMT ? "on" 478 : "off"); 479 } else { 480 monitor_printf(mon, "handle: %d\n", info->u.sev.handle); 481 monitor_printf(mon, "debug: %s\n", 482 info->u.sev.policy & SEV_POLICY_NODBG ? "off" : "on"); 483 monitor_printf(mon, "key-sharing: %s\n", 484 info->u.sev.policy & SEV_POLICY_NOKS ? "off" : "on"); 485 } 486 487 out: 488 qapi_free_SevInfo(info); 489 } 490 491 static int 492 sev_get_pdh_info(int fd, guchar **pdh, size_t *pdh_len, guchar **cert_chain, 493 size_t *cert_chain_len, Error **errp) 494 { 495 guchar *pdh_data = NULL; 496 guchar *cert_chain_data = NULL; 497 struct sev_user_data_pdh_cert_export export = {}; 498 int err, r; 499 500 /* query the certificate length */ 501 r = sev_platform_ioctl(fd, SEV_PDH_CERT_EXPORT, &export, &err); 502 if (r < 0) { 503 if (err != SEV_RET_INVALID_LEN) { 504 error_setg(errp, "SEV: Failed to export PDH cert" 505 " ret=%d fw_err=%d (%s)", 506 r, err, fw_error_to_str(err)); 507 return 1; 508 } 509 } 510 511 pdh_data = g_new(guchar, export.pdh_cert_len); 512 cert_chain_data = g_new(guchar, export.cert_chain_len); 513 export.pdh_cert_address = (unsigned long)pdh_data; 514 export.cert_chain_address = (unsigned long)cert_chain_data; 515 516 r = sev_platform_ioctl(fd, SEV_PDH_CERT_EXPORT, &export, &err); 517 if (r < 0) { 518 error_setg(errp, "SEV: Failed to export PDH cert ret=%d fw_err=%d (%s)", 519 r, err, fw_error_to_str(err)); 520 goto e_free; 521 } 522 523 *pdh = pdh_data; 524 *pdh_len = export.pdh_cert_len; 525 *cert_chain = cert_chain_data; 526 *cert_chain_len = export.cert_chain_len; 527 return 0; 528 529 e_free: 530 g_free(pdh_data); 531 g_free(cert_chain_data); 532 return 1; 533 } 534 535 static int sev_get_cpu0_id(int fd, guchar **id, size_t *id_len, Error **errp) 536 { 537 guchar *id_data; 538 struct sev_user_data_get_id2 get_id2 = {}; 539 int err, r; 540 541 /* query the ID length */ 542 r = sev_platform_ioctl(fd, SEV_GET_ID2, &get_id2, &err); 543 if (r < 0 && err != SEV_RET_INVALID_LEN) { 544 error_setg(errp, "SEV: Failed to get ID ret=%d fw_err=%d (%s)", 545 r, err, fw_error_to_str(err)); 546 return 1; 547 } 548 549 id_data = g_new(guchar, get_id2.length); 550 get_id2.address = (unsigned long)id_data; 551 552 r = sev_platform_ioctl(fd, SEV_GET_ID2, &get_id2, &err); 553 if (r < 0) { 554 error_setg(errp, "SEV: Failed to get ID ret=%d fw_err=%d (%s)", 555 r, err, fw_error_to_str(err)); 556 goto err; 557 } 558 559 *id = id_data; 560 *id_len = get_id2.length; 561 return 0; 562 563 err: 564 g_free(id_data); 565 return 1; 566 } 567 568 static SevCapability *sev_get_capabilities(Error **errp) 569 { 570 SevCapability *cap = NULL; 571 guchar *pdh_data = NULL; 572 guchar *cert_chain_data = NULL; 573 guchar *cpu0_id_data = NULL; 574 size_t pdh_len = 0, cert_chain_len = 0, cpu0_id_len = 0; 575 uint32_t ebx; 576 int fd; 577 SevCommonState *sev_common; 578 char *sev_device; 579 580 if (!kvm_enabled()) { 581 error_setg(errp, "KVM not enabled"); 582 return NULL; 583 } 584 if (kvm_vm_ioctl(kvm_state, KVM_MEMORY_ENCRYPT_OP, NULL) < 0) { 585 error_setg(errp, "SEV is not enabled in KVM"); 586 return NULL; 587 } 588 589 sev_common = SEV_COMMON(MACHINE(qdev_get_machine())->cgs); 590 if (sev_common) { 591 sev_device = object_property_get_str(OBJECT(sev_common), "sev-device", 592 &error_abort); 593 } else { 594 sev_device = g_strdup(DEFAULT_SEV_DEVICE); 595 } 596 597 fd = open(sev_device, O_RDWR); 598 if (fd < 0) { 599 error_setg_errno(errp, errno, "SEV: Failed to open %s", 600 sev_device); 601 g_free(sev_device); 602 return NULL; 603 } 604 g_free(sev_device); 605 606 if (sev_get_pdh_info(fd, &pdh_data, &pdh_len, 607 &cert_chain_data, &cert_chain_len, errp)) { 608 goto out; 609 } 610 611 if (sev_get_cpu0_id(fd, &cpu0_id_data, &cpu0_id_len, errp)) { 612 goto out; 613 } 614 615 cap = g_new0(SevCapability, 1); 616 cap->pdh = g_base64_encode(pdh_data, pdh_len); 617 cap->cert_chain = g_base64_encode(cert_chain_data, cert_chain_len); 618 cap->cpu0_id = g_base64_encode(cpu0_id_data, cpu0_id_len); 619 620 host_cpuid(0x8000001F, 0, NULL, &ebx, NULL, NULL); 621 cap->cbitpos = ebx & 0x3f; 622 623 /* 624 * When SEV feature is enabled, we loose one bit in guest physical 625 * addressing. 626 */ 627 cap->reduced_phys_bits = 1; 628 629 out: 630 g_free(cpu0_id_data); 631 g_free(pdh_data); 632 g_free(cert_chain_data); 633 close(fd); 634 return cap; 635 } 636 637 SevCapability *qmp_query_sev_capabilities(Error **errp) 638 { 639 return sev_get_capabilities(errp); 640 } 641 642 static OvmfSevMetadata *ovmf_sev_metadata_table; 643 644 #define OVMF_SEV_META_DATA_GUID "dc886566-984a-4798-A75e-5585a7bf67cc" 645 typedef struct __attribute__((__packed__)) OvmfSevMetadataOffset { 646 uint32_t offset; 647 } OvmfSevMetadataOffset; 648 649 OvmfSevMetadata *pc_system_get_ovmf_sev_metadata_ptr(void) 650 { 651 return ovmf_sev_metadata_table; 652 } 653 654 void pc_system_parse_sev_metadata(uint8_t *flash_ptr, size_t flash_size) 655 { 656 OvmfSevMetadata *metadata; 657 OvmfSevMetadataOffset *data; 658 659 if (!pc_system_ovmf_table_find(OVMF_SEV_META_DATA_GUID, (uint8_t **)&data, 660 NULL)) { 661 return; 662 } 663 664 metadata = (OvmfSevMetadata *)(flash_ptr + flash_size - data->offset); 665 if (memcmp(metadata->signature, "ASEV", 4) != 0 || 666 metadata->len < sizeof(OvmfSevMetadata) || 667 metadata->len > flash_size - data->offset) { 668 return; 669 } 670 671 ovmf_sev_metadata_table = g_memdup2(metadata, metadata->len); 672 } 673 674 static SevAttestationReport *sev_get_attestation_report(const char *mnonce, 675 Error **errp) 676 { 677 struct kvm_sev_attestation_report input = {}; 678 SevAttestationReport *report = NULL; 679 SevCommonState *sev_common; 680 g_autofree guchar *data = NULL; 681 g_autofree guchar *buf = NULL; 682 gsize len; 683 int err = 0, ret; 684 685 if (!sev_enabled()) { 686 error_setg(errp, "SEV is not enabled"); 687 return NULL; 688 } 689 690 /* lets decode the mnonce string */ 691 buf = g_base64_decode(mnonce, &len); 692 if (!buf) { 693 error_setg(errp, "SEV: failed to decode mnonce input"); 694 return NULL; 695 } 696 697 /* verify the input mnonce length */ 698 if (len != sizeof(input.mnonce)) { 699 error_setg(errp, "SEV: mnonce must be %zu bytes (got %" G_GSIZE_FORMAT ")", 700 sizeof(input.mnonce), len); 701 return NULL; 702 } 703 704 sev_common = SEV_COMMON(MACHINE(qdev_get_machine())->cgs); 705 706 /* Query the report length */ 707 ret = sev_ioctl(sev_common->sev_fd, KVM_SEV_GET_ATTESTATION_REPORT, 708 &input, &err); 709 if (ret < 0) { 710 if (err != SEV_RET_INVALID_LEN) { 711 error_setg(errp, "SEV: Failed to query the attestation report" 712 " length ret=%d fw_err=%d (%s)", 713 ret, err, fw_error_to_str(err)); 714 return NULL; 715 } 716 } 717 718 data = g_malloc(input.len); 719 input.uaddr = (unsigned long)data; 720 memcpy(input.mnonce, buf, sizeof(input.mnonce)); 721 722 /* Query the report */ 723 ret = sev_ioctl(sev_common->sev_fd, KVM_SEV_GET_ATTESTATION_REPORT, 724 &input, &err); 725 if (ret) { 726 error_setg_errno(errp, errno, "SEV: Failed to get attestation report" 727 " ret=%d fw_err=%d (%s)", ret, err, fw_error_to_str(err)); 728 return NULL; 729 } 730 731 report = g_new0(SevAttestationReport, 1); 732 report->data = g_base64_encode(data, input.len); 733 734 trace_kvm_sev_attestation_report(mnonce, report->data); 735 736 return report; 737 } 738 739 SevAttestationReport *qmp_query_sev_attestation_report(const char *mnonce, 740 Error **errp) 741 { 742 return sev_get_attestation_report(mnonce, errp); 743 } 744 745 static int 746 sev_read_file_base64(const char *filename, guchar **data, gsize *len) 747 { 748 gsize sz; 749 g_autofree gchar *base64 = NULL; 750 GError *error = NULL; 751 752 if (!g_file_get_contents(filename, &base64, &sz, &error)) { 753 error_report("SEV: Failed to read '%s' (%s)", filename, error->message); 754 g_error_free(error); 755 return -1; 756 } 757 758 *data = g_base64_decode(base64, len); 759 return 0; 760 } 761 762 static int 763 sev_snp_launch_start(SevCommonState *sev_common) 764 { 765 int fw_error, rc; 766 SevSnpGuestState *sev_snp_guest = SEV_SNP_GUEST(sev_common); 767 struct kvm_sev_snp_launch_start *start = &sev_snp_guest->kvm_start_conf; 768 769 trace_kvm_sev_snp_launch_start(start->policy, 770 sev_snp_guest->guest_visible_workarounds); 771 772 if (!kvm_enable_hypercall(BIT_ULL(KVM_HC_MAP_GPA_RANGE))) { 773 return 1; 774 } 775 776 rc = sev_ioctl(sev_common->sev_fd, KVM_SEV_SNP_LAUNCH_START, 777 start, &fw_error); 778 if (rc < 0) { 779 error_report("%s: SNP_LAUNCH_START ret=%d fw_error=%d '%s'", 780 __func__, rc, fw_error, fw_error_to_str(fw_error)); 781 return 1; 782 } 783 784 QTAILQ_INIT(&launch_update); 785 786 sev_set_guest_state(sev_common, SEV_STATE_LAUNCH_UPDATE); 787 788 return 0; 789 } 790 791 static int 792 sev_launch_start(SevCommonState *sev_common) 793 { 794 gsize sz; 795 int ret = 1; 796 int fw_error, rc; 797 SevGuestState *sev_guest = SEV_GUEST(sev_common); 798 struct kvm_sev_launch_start start = { 799 .handle = sev_guest->handle, .policy = sev_guest->policy 800 }; 801 guchar *session = NULL, *dh_cert = NULL; 802 803 if (sev_guest->session_file) { 804 if (sev_read_file_base64(sev_guest->session_file, &session, &sz) < 0) { 805 goto out; 806 } 807 start.session_uaddr = (unsigned long)session; 808 start.session_len = sz; 809 } 810 811 if (sev_guest->dh_cert_file) { 812 if (sev_read_file_base64(sev_guest->dh_cert_file, &dh_cert, &sz) < 0) { 813 goto out; 814 } 815 start.dh_uaddr = (unsigned long)dh_cert; 816 start.dh_len = sz; 817 } 818 819 trace_kvm_sev_launch_start(start.policy, session, dh_cert); 820 rc = sev_ioctl(sev_common->sev_fd, KVM_SEV_LAUNCH_START, &start, &fw_error); 821 if (rc < 0) { 822 error_report("%s: LAUNCH_START ret=%d fw_error=%d '%s'", 823 __func__, ret, fw_error, fw_error_to_str(fw_error)); 824 goto out; 825 } 826 827 sev_set_guest_state(sev_common, SEV_STATE_LAUNCH_UPDATE); 828 sev_guest->handle = start.handle; 829 ret = 0; 830 831 out: 832 g_free(session); 833 g_free(dh_cert); 834 return ret; 835 } 836 837 static void 838 sev_snp_cpuid_report_mismatches(SnpCpuidInfo *old, 839 SnpCpuidInfo *new) 840 { 841 size_t i; 842 843 if (old->count != new->count) { 844 error_report("SEV-SNP: CPUID validation failed due to count mismatch, " 845 "provided: %d, expected: %d", old->count, new->count); 846 return; 847 } 848 849 for (i = 0; i < old->count; i++) { 850 SnpCpuidFunc *old_func, *new_func; 851 852 old_func = &old->entries[i]; 853 new_func = &new->entries[i]; 854 855 if (memcmp(old_func, new_func, sizeof(SnpCpuidFunc))) { 856 error_report("SEV-SNP: CPUID validation failed for function 0x%x, index: 0x%x, " 857 "provided: eax:0x%08x, ebx: 0x%08x, ecx: 0x%08x, edx: 0x%08x, " 858 "expected: eax:0x%08x, ebx: 0x%08x, ecx: 0x%08x, edx: 0x%08x", 859 old_func->eax_in, old_func->ecx_in, 860 old_func->eax, old_func->ebx, old_func->ecx, old_func->edx, 861 new_func->eax, new_func->ebx, new_func->ecx, new_func->edx); 862 } 863 } 864 } 865 866 static const char * 867 snp_page_type_to_str(int type) 868 { 869 switch (type) { 870 case KVM_SEV_SNP_PAGE_TYPE_NORMAL: return "Normal"; 871 case KVM_SEV_SNP_PAGE_TYPE_ZERO: return "Zero"; 872 case KVM_SEV_SNP_PAGE_TYPE_UNMEASURED: return "Unmeasured"; 873 case KVM_SEV_SNP_PAGE_TYPE_SECRETS: return "Secrets"; 874 case KVM_SEV_SNP_PAGE_TYPE_CPUID: return "Cpuid"; 875 default: return "unknown"; 876 } 877 } 878 879 static int 880 sev_snp_launch_update(SevSnpGuestState *sev_snp_guest, 881 SevLaunchUpdateData *data) 882 { 883 int ret, fw_error; 884 SnpCpuidInfo snp_cpuid_info; 885 struct kvm_sev_snp_launch_update update = {0}; 886 887 if (!data->hva || !data->len) { 888 error_report("SNP_LAUNCH_UPDATE called with invalid address" 889 "/ length: %p / %zx", 890 data->hva, data->len); 891 return 1; 892 } 893 894 if (data->type == KVM_SEV_SNP_PAGE_TYPE_CPUID) { 895 /* Save a copy for comparison in case the LAUNCH_UPDATE fails */ 896 memcpy(&snp_cpuid_info, data->hva, sizeof(snp_cpuid_info)); 897 } 898 899 update.uaddr = (__u64)(unsigned long)data->hva; 900 update.gfn_start = data->gpa >> TARGET_PAGE_BITS; 901 update.len = data->len; 902 update.type = data->type; 903 904 /* 905 * KVM_SEV_SNP_LAUNCH_UPDATE requires that GPA ranges have the private 906 * memory attribute set in advance. 907 */ 908 ret = kvm_set_memory_attributes_private(data->gpa, data->len); 909 if (ret) { 910 error_report("SEV-SNP: failed to configure initial" 911 "private guest memory"); 912 goto out; 913 } 914 915 while (update.len || ret == -EAGAIN) { 916 trace_kvm_sev_snp_launch_update(update.uaddr, update.gfn_start << 917 TARGET_PAGE_BITS, update.len, 918 snp_page_type_to_str(update.type)); 919 920 ret = sev_ioctl(SEV_COMMON(sev_snp_guest)->sev_fd, 921 KVM_SEV_SNP_LAUNCH_UPDATE, 922 &update, &fw_error); 923 if (ret && ret != -EAGAIN) { 924 error_report("SNP_LAUNCH_UPDATE ret=%d fw_error=%d '%s'", 925 ret, fw_error, fw_error_to_str(fw_error)); 926 927 if (data->type == KVM_SEV_SNP_PAGE_TYPE_CPUID) { 928 sev_snp_cpuid_report_mismatches(&snp_cpuid_info, data->hva); 929 error_report("SEV-SNP: failed update CPUID page"); 930 } 931 break; 932 } 933 } 934 935 out: 936 if (!ret && update.gfn_start << TARGET_PAGE_BITS != data->gpa + data->len) { 937 error_report("SEV-SNP: expected update of GPA range %" 938 HWADDR_PRIx "-%" HWADDR_PRIx "," 939 "got GPA range %" HWADDR_PRIx "-%llx", 940 data->gpa, data->gpa + data->len, data->gpa, 941 update.gfn_start << TARGET_PAGE_BITS); 942 ret = -EIO; 943 } 944 945 return ret; 946 } 947 948 static uint32_t 949 sev_snp_mask_cpuid_features(X86ConfidentialGuest *cg, uint32_t feature, uint32_t index, 950 int reg, uint32_t value) 951 { 952 switch (feature) { 953 case 1: 954 if (reg == R_ECX) { 955 return value & ~CPUID_EXT_TSC_DEADLINE_TIMER; 956 } 957 break; 958 case 7: 959 if (index == 0 && reg == R_EBX) { 960 return value & ~CPUID_7_0_EBX_TSC_ADJUST; 961 } 962 if (index == 0 && reg == R_EDX) { 963 return value & ~(CPUID_7_0_EDX_SPEC_CTRL | 964 CPUID_7_0_EDX_STIBP | 965 CPUID_7_0_EDX_FLUSH_L1D | 966 CPUID_7_0_EDX_ARCH_CAPABILITIES | 967 CPUID_7_0_EDX_CORE_CAPABILITY | 968 CPUID_7_0_EDX_SPEC_CTRL_SSBD); 969 } 970 break; 971 case 0x80000008: 972 if (reg == R_EBX) { 973 return value & ~CPUID_8000_0008_EBX_VIRT_SSBD; 974 } 975 break; 976 } 977 return value; 978 } 979 980 static int 981 sev_launch_update_data(SevCommonState *sev_common, hwaddr gpa, 982 uint8_t *addr, size_t len) 983 { 984 int ret, fw_error; 985 struct kvm_sev_launch_update_data update; 986 987 if (!addr || !len) { 988 return 1; 989 } 990 991 update.uaddr = (uintptr_t)addr; 992 update.len = len; 993 trace_kvm_sev_launch_update_data(addr, len); 994 ret = sev_ioctl(sev_common->sev_fd, KVM_SEV_LAUNCH_UPDATE_DATA, 995 &update, &fw_error); 996 if (ret) { 997 error_report("%s: LAUNCH_UPDATE ret=%d fw_error=%d '%s'", 998 __func__, ret, fw_error, fw_error_to_str(fw_error)); 999 } 1000 1001 return ret; 1002 } 1003 1004 static int 1005 sev_launch_update_vmsa(SevGuestState *sev_guest) 1006 { 1007 int ret, fw_error; 1008 1009 ret = sev_ioctl(SEV_COMMON(sev_guest)->sev_fd, KVM_SEV_LAUNCH_UPDATE_VMSA, 1010 NULL, &fw_error); 1011 if (ret) { 1012 error_report("%s: LAUNCH_UPDATE_VMSA ret=%d fw_error=%d '%s'", 1013 __func__, ret, fw_error, fw_error_to_str(fw_error)); 1014 } 1015 1016 return ret; 1017 } 1018 1019 static void 1020 sev_launch_get_measure(Notifier *notifier, void *unused) 1021 { 1022 SevCommonState *sev_common = SEV_COMMON(MACHINE(qdev_get_machine())->cgs); 1023 SevGuestState *sev_guest = SEV_GUEST(sev_common); 1024 int ret, error; 1025 g_autofree guchar *data = NULL; 1026 struct kvm_sev_launch_measure measurement = {}; 1027 1028 if (!sev_check_state(sev_common, SEV_STATE_LAUNCH_UPDATE)) { 1029 return; 1030 } 1031 1032 if (sev_es_enabled()) { 1033 /* measure all the VM save areas before getting launch_measure */ 1034 ret = sev_launch_update_vmsa(sev_guest); 1035 if (ret) { 1036 exit(1); 1037 } 1038 kvm_mark_guest_state_protected(); 1039 } 1040 1041 /* query the measurement blob length */ 1042 ret = sev_ioctl(sev_common->sev_fd, KVM_SEV_LAUNCH_MEASURE, 1043 &measurement, &error); 1044 if (!measurement.len) { 1045 error_report("%s: LAUNCH_MEASURE ret=%d fw_error=%d '%s'", 1046 __func__, ret, error, fw_error_to_str(errno)); 1047 return; 1048 } 1049 1050 data = g_new0(guchar, measurement.len); 1051 measurement.uaddr = (unsigned long)data; 1052 1053 /* get the measurement blob */ 1054 ret = sev_ioctl(sev_common->sev_fd, KVM_SEV_LAUNCH_MEASURE, 1055 &measurement, &error); 1056 if (ret) { 1057 error_report("%s: LAUNCH_MEASURE ret=%d fw_error=%d '%s'", 1058 __func__, ret, error, fw_error_to_str(errno)); 1059 return; 1060 } 1061 1062 sev_set_guest_state(sev_common, SEV_STATE_LAUNCH_SECRET); 1063 1064 /* encode the measurement value and emit the event */ 1065 sev_guest->measurement = g_base64_encode(data, measurement.len); 1066 trace_kvm_sev_launch_measurement(sev_guest->measurement); 1067 } 1068 1069 static char *sev_get_launch_measurement(void) 1070 { 1071 ConfidentialGuestSupport *cgs = MACHINE(qdev_get_machine())->cgs; 1072 SevGuestState *sev_guest = 1073 (SevGuestState *)object_dynamic_cast(OBJECT(cgs), TYPE_SEV_GUEST); 1074 1075 if (sev_guest && 1076 SEV_COMMON(sev_guest)->state >= SEV_STATE_LAUNCH_SECRET) { 1077 return g_strdup(sev_guest->measurement); 1078 } 1079 1080 return NULL; 1081 } 1082 1083 SevLaunchMeasureInfo *qmp_query_sev_launch_measure(Error **errp) 1084 { 1085 char *data; 1086 SevLaunchMeasureInfo *info; 1087 1088 data = sev_get_launch_measurement(); 1089 if (!data) { 1090 error_setg(errp, "SEV launch measurement is not available"); 1091 return NULL; 1092 } 1093 1094 info = g_malloc0(sizeof(*info)); 1095 info->data = data; 1096 1097 return info; 1098 } 1099 1100 static Notifier sev_machine_done_notify = { 1101 .notify = sev_launch_get_measure, 1102 }; 1103 1104 static void 1105 sev_launch_finish(SevCommonState *sev_common) 1106 { 1107 int ret, error; 1108 1109 trace_kvm_sev_launch_finish(); 1110 ret = sev_ioctl(sev_common->sev_fd, KVM_SEV_LAUNCH_FINISH, 0, 1111 &error); 1112 if (ret) { 1113 error_report("%s: LAUNCH_FINISH ret=%d fw_error=%d '%s'", 1114 __func__, ret, error, fw_error_to_str(error)); 1115 exit(1); 1116 } 1117 1118 sev_set_guest_state(sev_common, SEV_STATE_RUNNING); 1119 1120 /* add migration blocker */ 1121 error_setg(&sev_mig_blocker, 1122 "SEV: Migration is not implemented"); 1123 migrate_add_blocker(&sev_mig_blocker, &error_fatal); 1124 } 1125 1126 static int 1127 snp_launch_update_data(uint64_t gpa, void *hva, size_t len, int type) 1128 { 1129 SevLaunchUpdateData *data; 1130 1131 data = g_new0(SevLaunchUpdateData, 1); 1132 data->gpa = gpa; 1133 data->hva = hva; 1134 data->len = len; 1135 data->type = type; 1136 1137 QTAILQ_INSERT_TAIL(&launch_update, data, next); 1138 1139 return 0; 1140 } 1141 1142 static int 1143 sev_snp_launch_update_data(SevCommonState *sev_common, hwaddr gpa, 1144 uint8_t *ptr, size_t len) 1145 { 1146 int ret = snp_launch_update_data(gpa, ptr, len, 1147 KVM_SEV_SNP_PAGE_TYPE_NORMAL); 1148 return ret; 1149 } 1150 1151 static int 1152 sev_snp_cpuid_info_fill(SnpCpuidInfo *snp_cpuid_info, 1153 const KvmCpuidInfo *kvm_cpuid_info) 1154 { 1155 size_t i; 1156 1157 if (kvm_cpuid_info->cpuid.nent > SNP_CPUID_FUNCTION_MAXCOUNT) { 1158 error_report("SEV-SNP: CPUID entry count (%d) exceeds max (%d)", 1159 kvm_cpuid_info->cpuid.nent, SNP_CPUID_FUNCTION_MAXCOUNT); 1160 return -1; 1161 } 1162 1163 memset(snp_cpuid_info, 0, sizeof(*snp_cpuid_info)); 1164 1165 for (i = 0; i < kvm_cpuid_info->cpuid.nent; i++) { 1166 const struct kvm_cpuid_entry2 *kvm_cpuid_entry; 1167 SnpCpuidFunc *snp_cpuid_entry; 1168 1169 kvm_cpuid_entry = &kvm_cpuid_info->entries[i]; 1170 snp_cpuid_entry = &snp_cpuid_info->entries[i]; 1171 1172 snp_cpuid_entry->eax_in = kvm_cpuid_entry->function; 1173 if (kvm_cpuid_entry->flags == KVM_CPUID_FLAG_SIGNIFCANT_INDEX) { 1174 snp_cpuid_entry->ecx_in = kvm_cpuid_entry->index; 1175 } 1176 snp_cpuid_entry->eax = kvm_cpuid_entry->eax; 1177 snp_cpuid_entry->ebx = kvm_cpuid_entry->ebx; 1178 snp_cpuid_entry->ecx = kvm_cpuid_entry->ecx; 1179 snp_cpuid_entry->edx = kvm_cpuid_entry->edx; 1180 1181 /* 1182 * Guest kernels will calculate EBX themselves using the 0xD 1183 * subfunctions corresponding to the individual XSAVE areas, so only 1184 * encode the base XSAVE size in the initial leaves, corresponding 1185 * to the initial XCR0=1 state. 1186 */ 1187 if (snp_cpuid_entry->eax_in == 0xD && 1188 (snp_cpuid_entry->ecx_in == 0x0 || snp_cpuid_entry->ecx_in == 0x1)) { 1189 snp_cpuid_entry->ebx = 0x240; 1190 snp_cpuid_entry->xcr0_in = 1; 1191 snp_cpuid_entry->xss_in = 0; 1192 } 1193 } 1194 1195 snp_cpuid_info->count = i; 1196 1197 return 0; 1198 } 1199 1200 static int 1201 snp_launch_update_cpuid(uint32_t cpuid_addr, void *hva, size_t cpuid_len) 1202 { 1203 KvmCpuidInfo kvm_cpuid_info = {0}; 1204 SnpCpuidInfo snp_cpuid_info; 1205 CPUState *cs = first_cpu; 1206 int ret; 1207 uint32_t i = 0; 1208 1209 assert(sizeof(snp_cpuid_info) <= cpuid_len); 1210 1211 /* get the cpuid list from KVM */ 1212 do { 1213 kvm_cpuid_info.cpuid.nent = ++i; 1214 ret = kvm_vcpu_ioctl(cs, KVM_GET_CPUID2, &kvm_cpuid_info); 1215 } while (ret == -E2BIG); 1216 1217 if (ret) { 1218 error_report("SEV-SNP: unable to query CPUID values for CPU: '%s'", 1219 strerror(-ret)); 1220 return 1; 1221 } 1222 1223 ret = sev_snp_cpuid_info_fill(&snp_cpuid_info, &kvm_cpuid_info); 1224 if (ret) { 1225 error_report("SEV-SNP: failed to generate CPUID table information"); 1226 return 1; 1227 } 1228 1229 memcpy(hva, &snp_cpuid_info, sizeof(snp_cpuid_info)); 1230 1231 return snp_launch_update_data(cpuid_addr, hva, cpuid_len, 1232 KVM_SEV_SNP_PAGE_TYPE_CPUID); 1233 } 1234 1235 static int 1236 snp_launch_update_kernel_hashes(SevSnpGuestState *sev_snp, uint32_t addr, 1237 void *hva, uint32_t len) 1238 { 1239 int type = KVM_SEV_SNP_PAGE_TYPE_ZERO; 1240 if (sev_snp->parent_obj.kernel_hashes) { 1241 assert(sev_snp->kernel_hashes_data); 1242 assert((sev_snp->kernel_hashes_offset + 1243 sizeof(*sev_snp->kernel_hashes_data)) <= len); 1244 memset(hva, 0, len); 1245 memcpy(hva + sev_snp->kernel_hashes_offset, sev_snp->kernel_hashes_data, 1246 sizeof(*sev_snp->kernel_hashes_data)); 1247 type = KVM_SEV_SNP_PAGE_TYPE_NORMAL; 1248 } 1249 return snp_launch_update_data(addr, hva, len, type); 1250 } 1251 1252 static int 1253 snp_metadata_desc_to_page_type(int desc_type) 1254 { 1255 switch (desc_type) { 1256 /* Add the umeasured prevalidated pages as a zero page */ 1257 case SEV_DESC_TYPE_SNP_SEC_MEM: return KVM_SEV_SNP_PAGE_TYPE_ZERO; 1258 case SEV_DESC_TYPE_SNP_SECRETS: return KVM_SEV_SNP_PAGE_TYPE_SECRETS; 1259 case SEV_DESC_TYPE_CPUID: return KVM_SEV_SNP_PAGE_TYPE_CPUID; 1260 default: 1261 return KVM_SEV_SNP_PAGE_TYPE_ZERO; 1262 } 1263 } 1264 1265 static void 1266 snp_populate_metadata_pages(SevSnpGuestState *sev_snp, 1267 OvmfSevMetadata *metadata) 1268 { 1269 OvmfSevMetadataDesc *desc; 1270 int type, ret, i; 1271 void *hva; 1272 MemoryRegion *mr = NULL; 1273 1274 for (i = 0; i < metadata->num_desc; i++) { 1275 desc = &metadata->descs[i]; 1276 1277 type = snp_metadata_desc_to_page_type(desc->type); 1278 1279 hva = gpa2hva(&mr, desc->base, desc->len, NULL); 1280 if (!hva) { 1281 error_report("%s: Failed to get HVA for GPA 0x%x sz 0x%x", 1282 __func__, desc->base, desc->len); 1283 exit(1); 1284 } 1285 1286 if (type == KVM_SEV_SNP_PAGE_TYPE_CPUID) { 1287 ret = snp_launch_update_cpuid(desc->base, hva, desc->len); 1288 } else if (desc->type == SEV_DESC_TYPE_SNP_KERNEL_HASHES) { 1289 ret = snp_launch_update_kernel_hashes(sev_snp, desc->base, hva, 1290 desc->len); 1291 } else { 1292 ret = snp_launch_update_data(desc->base, hva, desc->len, type); 1293 } 1294 1295 if (ret) { 1296 error_report("%s: Failed to add metadata page gpa 0x%x+%x type %d", 1297 __func__, desc->base, desc->len, desc->type); 1298 exit(1); 1299 } 1300 } 1301 } 1302 1303 static void 1304 sev_snp_launch_finish(SevCommonState *sev_common) 1305 { 1306 int ret, error; 1307 Error *local_err = NULL; 1308 OvmfSevMetadata *metadata; 1309 SevLaunchUpdateData *data; 1310 SevSnpGuestState *sev_snp = SEV_SNP_GUEST(sev_common); 1311 struct kvm_sev_snp_launch_finish *finish = &sev_snp->kvm_finish_conf; 1312 1313 /* 1314 * To boot the SNP guest, the hypervisor is required to populate the CPUID 1315 * and Secrets page before finalizing the launch flow. The location of 1316 * the secrets and CPUID page is available through the OVMF metadata GUID. 1317 */ 1318 metadata = pc_system_get_ovmf_sev_metadata_ptr(); 1319 if (metadata == NULL) { 1320 error_report("%s: Failed to locate SEV metadata header", __func__); 1321 exit(1); 1322 } 1323 1324 /* Populate all the metadata pages */ 1325 snp_populate_metadata_pages(sev_snp, metadata); 1326 1327 QTAILQ_FOREACH(data, &launch_update, next) { 1328 ret = sev_snp_launch_update(sev_snp, data); 1329 if (ret) { 1330 exit(1); 1331 } 1332 } 1333 1334 trace_kvm_sev_snp_launch_finish(sev_snp->id_block_base64, sev_snp->id_auth_base64, 1335 sev_snp->host_data); 1336 ret = sev_ioctl(sev_common->sev_fd, KVM_SEV_SNP_LAUNCH_FINISH, 1337 finish, &error); 1338 if (ret) { 1339 error_report("SNP_LAUNCH_FINISH ret=%d fw_error=%d '%s'", 1340 ret, error, fw_error_to_str(error)); 1341 exit(1); 1342 } 1343 1344 kvm_mark_guest_state_protected(); 1345 sev_set_guest_state(sev_common, SEV_STATE_RUNNING); 1346 1347 /* add migration blocker */ 1348 error_setg(&sev_mig_blocker, 1349 "SEV-SNP: Migration is not implemented"); 1350 ret = migrate_add_blocker(&sev_mig_blocker, &local_err); 1351 if (local_err) { 1352 error_report_err(local_err); 1353 error_free(sev_mig_blocker); 1354 exit(1); 1355 } 1356 } 1357 1358 1359 static void 1360 sev_vm_state_change(void *opaque, bool running, RunState state) 1361 { 1362 SevCommonState *sev_common = opaque; 1363 SevCommonStateClass *klass = SEV_COMMON_GET_CLASS(opaque); 1364 1365 if (running) { 1366 if (!sev_check_state(sev_common, SEV_STATE_RUNNING)) { 1367 klass->launch_finish(sev_common); 1368 } 1369 } 1370 } 1371 1372 /* 1373 * This helper is to examine sev-guest properties and determine if any options 1374 * have been set which rely on the newer KVM_SEV_INIT2 interface and associated 1375 * KVM VM types. 1376 */ 1377 static bool sev_init2_required(SevGuestState *sev_guest) 1378 { 1379 /* Currently no KVM_SEV_INIT2-specific options are exposed via QEMU */ 1380 return false; 1381 } 1382 1383 static int sev_kvm_type(X86ConfidentialGuest *cg) 1384 { 1385 SevCommonState *sev_common = SEV_COMMON(cg); 1386 SevGuestState *sev_guest = SEV_GUEST(sev_common); 1387 int kvm_type; 1388 1389 if (sev_common->kvm_type != -1) { 1390 goto out; 1391 } 1392 1393 /* These are the only cases where legacy VM types can be used. */ 1394 if (sev_guest->legacy_vm_type == ON_OFF_AUTO_ON || 1395 (sev_guest->legacy_vm_type == ON_OFF_AUTO_AUTO && 1396 !sev_init2_required(sev_guest))) { 1397 sev_common->kvm_type = KVM_X86_DEFAULT_VM; 1398 goto out; 1399 } 1400 1401 /* 1402 * Newer VM types are required, either explicitly via legacy-vm-type=on, or 1403 * implicitly via legacy-vm-type=auto along with additional sev-guest 1404 * properties that require the newer VM types. 1405 */ 1406 kvm_type = (sev_guest->policy & SEV_POLICY_ES) ? 1407 KVM_X86_SEV_ES_VM : KVM_X86_SEV_VM; 1408 if (!kvm_is_vm_type_supported(kvm_type)) { 1409 if (sev_guest->legacy_vm_type == ON_OFF_AUTO_AUTO) { 1410 error_report("SEV: host kernel does not support requested %s VM type, which is required " 1411 "for the set of options specified. To allow use of the legacy " 1412 "KVM_X86_DEFAULT_VM VM type, please disable any options that are not " 1413 "compatible with the legacy VM type, or upgrade your kernel.", 1414 kvm_type == KVM_X86_SEV_VM ? "KVM_X86_SEV_VM" : "KVM_X86_SEV_ES_VM"); 1415 } else { 1416 error_report("SEV: host kernel does not support requested %s VM type. To allow use of " 1417 "the legacy KVM_X86_DEFAULT_VM VM type, the 'legacy-vm-type' argument " 1418 "must be set to 'on' or 'auto' for the sev-guest object.", 1419 kvm_type == KVM_X86_SEV_VM ? "KVM_X86_SEV_VM" : "KVM_X86_SEV_ES_VM"); 1420 } 1421 1422 return -1; 1423 } 1424 1425 sev_common->kvm_type = kvm_type; 1426 out: 1427 return sev_common->kvm_type; 1428 } 1429 1430 static int sev_snp_kvm_type(X86ConfidentialGuest *cg) 1431 { 1432 return KVM_X86_SNP_VM; 1433 } 1434 1435 static int sev_common_kvm_init(ConfidentialGuestSupport *cgs, Error **errp) 1436 { 1437 char *devname; 1438 int ret, fw_error, cmd; 1439 uint32_t ebx; 1440 uint32_t host_cbitpos; 1441 struct sev_user_data_status status = {}; 1442 SevCommonState *sev_common = SEV_COMMON(cgs); 1443 SevCommonStateClass *klass = SEV_COMMON_GET_CLASS(cgs); 1444 X86ConfidentialGuestClass *x86_klass = 1445 X86_CONFIDENTIAL_GUEST_GET_CLASS(cgs); 1446 1447 sev_common->state = SEV_STATE_UNINIT; 1448 1449 host_cpuid(0x8000001F, 0, NULL, &ebx, NULL, NULL); 1450 host_cbitpos = ebx & 0x3f; 1451 1452 /* 1453 * The cbitpos value will be placed in bit positions 5:0 of the EBX 1454 * register of CPUID 0x8000001F. No need to verify the range as the 1455 * comparison against the host value accomplishes that. 1456 */ 1457 if (host_cbitpos != sev_common->cbitpos) { 1458 error_setg(errp, "%s: cbitpos check failed, host '%d' requested '%d'", 1459 __func__, host_cbitpos, sev_common->cbitpos); 1460 return -1; 1461 } 1462 1463 /* 1464 * The reduced-phys-bits value will be placed in bit positions 11:6 of 1465 * the EBX register of CPUID 0x8000001F, so verify the supplied value 1466 * is in the range of 1 to 63. 1467 */ 1468 if (sev_common->reduced_phys_bits < 1 || 1469 sev_common->reduced_phys_bits > 63) { 1470 error_setg(errp, "%s: reduced_phys_bits check failed," 1471 " it should be in the range of 1 to 63, requested '%d'", 1472 __func__, sev_common->reduced_phys_bits); 1473 return -1; 1474 } 1475 1476 devname = object_property_get_str(OBJECT(sev_common), "sev-device", NULL); 1477 sev_common->sev_fd = open(devname, O_RDWR); 1478 if (sev_common->sev_fd < 0) { 1479 error_setg(errp, "%s: Failed to open %s '%s'", __func__, 1480 devname, strerror(errno)); 1481 g_free(devname); 1482 return -1; 1483 } 1484 g_free(devname); 1485 1486 ret = sev_platform_ioctl(sev_common->sev_fd, SEV_PLATFORM_STATUS, &status, 1487 &fw_error); 1488 if (ret) { 1489 error_setg(errp, "%s: failed to get platform status ret=%d " 1490 "fw_error='%d: %s'", __func__, ret, fw_error, 1491 fw_error_to_str(fw_error)); 1492 return -1; 1493 } 1494 sev_common->build_id = status.build; 1495 sev_common->api_major = status.api_major; 1496 sev_common->api_minor = status.api_minor; 1497 1498 if (sev_es_enabled()) { 1499 if (!kvm_kernel_irqchip_allowed()) { 1500 error_setg(errp, "%s: SEV-ES guests require in-kernel irqchip" 1501 "support", __func__); 1502 return -1; 1503 } 1504 } 1505 1506 if (sev_es_enabled() && !sev_snp_enabled()) { 1507 if (!(status.flags & SEV_STATUS_FLAGS_CONFIG_ES)) { 1508 error_setg(errp, "%s: guest policy requires SEV-ES, but " 1509 "host SEV-ES support unavailable", 1510 __func__); 1511 return -1; 1512 } 1513 } 1514 1515 trace_kvm_sev_init(); 1516 switch (x86_klass->kvm_type(X86_CONFIDENTIAL_GUEST(sev_common))) { 1517 case KVM_X86_DEFAULT_VM: 1518 cmd = sev_es_enabled() ? KVM_SEV_ES_INIT : KVM_SEV_INIT; 1519 1520 ret = sev_ioctl(sev_common->sev_fd, cmd, NULL, &fw_error); 1521 break; 1522 case KVM_X86_SEV_VM: 1523 case KVM_X86_SEV_ES_VM: 1524 case KVM_X86_SNP_VM: { 1525 struct kvm_sev_init args = { 0 }; 1526 1527 ret = sev_ioctl(sev_common->sev_fd, KVM_SEV_INIT2, &args, &fw_error); 1528 break; 1529 } 1530 default: 1531 error_setg(errp, "%s: host kernel does not support the requested SEV configuration.", 1532 __func__); 1533 return -1; 1534 } 1535 1536 if (ret) { 1537 error_setg(errp, "%s: failed to initialize ret=%d fw_error=%d '%s'", 1538 __func__, ret, fw_error, fw_error_to_str(fw_error)); 1539 return -1; 1540 } 1541 1542 ret = klass->launch_start(sev_common); 1543 1544 if (ret) { 1545 error_setg(errp, "%s: failed to create encryption context", __func__); 1546 return -1; 1547 } 1548 1549 if (klass->kvm_init && klass->kvm_init(cgs, errp)) { 1550 return -1; 1551 } 1552 1553 qemu_add_vm_change_state_handler(sev_vm_state_change, sev_common); 1554 1555 cgs->ready = true; 1556 1557 return 0; 1558 } 1559 1560 static int sev_kvm_init(ConfidentialGuestSupport *cgs, Error **errp) 1561 { 1562 int ret; 1563 1564 /* 1565 * SEV/SEV-ES rely on pinned memory to back guest RAM so discarding 1566 * isn't actually possible. With SNP, only guest_memfd pages are used 1567 * for private guest memory, so discarding of shared memory is still 1568 * possible.. 1569 */ 1570 ret = ram_block_discard_disable(true); 1571 if (ret) { 1572 error_setg(errp, "%s: cannot disable RAM discard", __func__); 1573 return -1; 1574 } 1575 1576 /* 1577 * SEV uses these notifiers to register/pin pages prior to guest use, 1578 * but SNP relies on guest_memfd for private pages, which has its 1579 * own internal mechanisms for registering/pinning private memory. 1580 */ 1581 ram_block_notifier_add(&sev_ram_notifier); 1582 1583 /* 1584 * The machine done notify event is used for SEV guests to get the 1585 * measurement of the encrypted images. When SEV-SNP is enabled, the 1586 * measurement is part of the guest attestation process where it can 1587 * be collected without any reliance on the VMM. So skip registering 1588 * the notifier for SNP in favor of using guest attestation instead. 1589 */ 1590 qemu_add_machine_init_done_notifier(&sev_machine_done_notify); 1591 1592 return 0; 1593 } 1594 1595 static int sev_snp_kvm_init(ConfidentialGuestSupport *cgs, Error **errp) 1596 { 1597 MachineState *ms = MACHINE(qdev_get_machine()); 1598 X86MachineState *x86ms = X86_MACHINE(ms); 1599 1600 if (x86ms->smm == ON_OFF_AUTO_AUTO) { 1601 x86ms->smm = ON_OFF_AUTO_OFF; 1602 } else if (x86ms->smm == ON_OFF_AUTO_ON) { 1603 error_setg(errp, "SEV-SNP does not support SMM."); 1604 return -1; 1605 } 1606 1607 return 0; 1608 } 1609 1610 int 1611 sev_encrypt_flash(hwaddr gpa, uint8_t *ptr, uint64_t len, Error **errp) 1612 { 1613 SevCommonState *sev_common = SEV_COMMON(MACHINE(qdev_get_machine())->cgs); 1614 SevCommonStateClass *klass; 1615 1616 if (!sev_common) { 1617 return 0; 1618 } 1619 klass = SEV_COMMON_GET_CLASS(sev_common); 1620 1621 /* if SEV is in update state then encrypt the data else do nothing */ 1622 if (sev_check_state(sev_common, SEV_STATE_LAUNCH_UPDATE)) { 1623 int ret; 1624 1625 ret = klass->launch_update_data(sev_common, gpa, ptr, len); 1626 if (ret < 0) { 1627 error_setg(errp, "SEV: Failed to encrypt pflash rom"); 1628 return ret; 1629 } 1630 } 1631 1632 return 0; 1633 } 1634 1635 int sev_inject_launch_secret(const char *packet_hdr, const char *secret, 1636 uint64_t gpa, Error **errp) 1637 { 1638 ERRP_GUARD(); 1639 struct kvm_sev_launch_secret input; 1640 g_autofree guchar *data = NULL, *hdr = NULL; 1641 int error, ret = 1; 1642 void *hva; 1643 gsize hdr_sz = 0, data_sz = 0; 1644 MemoryRegion *mr = NULL; 1645 SevCommonState *sev_common = SEV_COMMON(MACHINE(qdev_get_machine())->cgs); 1646 1647 if (!sev_common) { 1648 error_setg(errp, "SEV not enabled for guest"); 1649 return 1; 1650 } 1651 1652 /* secret can be injected only in this state */ 1653 if (!sev_check_state(sev_common, SEV_STATE_LAUNCH_SECRET)) { 1654 error_setg(errp, "SEV: Not in correct state. (LSECRET) %x", 1655 sev_common->state); 1656 return 1; 1657 } 1658 1659 hdr = g_base64_decode(packet_hdr, &hdr_sz); 1660 if (!hdr || !hdr_sz) { 1661 error_setg(errp, "SEV: Failed to decode sequence header"); 1662 return 1; 1663 } 1664 1665 data = g_base64_decode(secret, &data_sz); 1666 if (!data || !data_sz) { 1667 error_setg(errp, "SEV: Failed to decode data"); 1668 return 1; 1669 } 1670 1671 hva = gpa2hva(&mr, gpa, data_sz, errp); 1672 if (!hva) { 1673 error_prepend(errp, "SEV: Failed to calculate guest address: "); 1674 return 1; 1675 } 1676 1677 input.hdr_uaddr = (uint64_t)(unsigned long)hdr; 1678 input.hdr_len = hdr_sz; 1679 1680 input.trans_uaddr = (uint64_t)(unsigned long)data; 1681 input.trans_len = data_sz; 1682 1683 input.guest_uaddr = (uint64_t)(unsigned long)hva; 1684 input.guest_len = data_sz; 1685 1686 trace_kvm_sev_launch_secret(gpa, input.guest_uaddr, 1687 input.trans_uaddr, input.trans_len); 1688 1689 ret = sev_ioctl(sev_common->sev_fd, KVM_SEV_LAUNCH_SECRET, 1690 &input, &error); 1691 if (ret) { 1692 error_setg(errp, "SEV: failed to inject secret ret=%d fw_error=%d '%s'", 1693 ret, error, fw_error_to_str(error)); 1694 return ret; 1695 } 1696 1697 return 0; 1698 } 1699 1700 #define SEV_SECRET_GUID "4c2eb361-7d9b-4cc3-8081-127c90d3d294" 1701 struct sev_secret_area { 1702 uint32_t base; 1703 uint32_t size; 1704 }; 1705 1706 void qmp_sev_inject_launch_secret(const char *packet_hdr, 1707 const char *secret, 1708 bool has_gpa, uint64_t gpa, 1709 Error **errp) 1710 { 1711 if (!sev_enabled()) { 1712 error_setg(errp, "SEV not enabled for guest"); 1713 return; 1714 } 1715 if (!has_gpa) { 1716 uint8_t *data; 1717 struct sev_secret_area *area; 1718 1719 if (!pc_system_ovmf_table_find(SEV_SECRET_GUID, &data, NULL)) { 1720 error_setg(errp, "SEV: no secret area found in OVMF," 1721 " gpa must be specified."); 1722 return; 1723 } 1724 area = (struct sev_secret_area *)data; 1725 gpa = area->base; 1726 } 1727 1728 sev_inject_launch_secret(packet_hdr, secret, gpa, errp); 1729 } 1730 1731 static int 1732 sev_es_parse_reset_block(SevInfoBlock *info, uint32_t *addr) 1733 { 1734 if (!info->reset_addr) { 1735 error_report("SEV-ES reset address is zero"); 1736 return 1; 1737 } 1738 1739 *addr = info->reset_addr; 1740 1741 return 0; 1742 } 1743 1744 static int 1745 sev_es_find_reset_vector(void *flash_ptr, uint64_t flash_size, 1746 uint32_t *addr) 1747 { 1748 QemuUUID info_guid, *guid; 1749 SevInfoBlock *info; 1750 uint8_t *data; 1751 uint16_t *len; 1752 1753 /* 1754 * Initialize the address to zero. An address of zero with a successful 1755 * return code indicates that SEV-ES is not active. 1756 */ 1757 *addr = 0; 1758 1759 /* 1760 * Extract the AP reset vector for SEV-ES guests by locating the SEV GUID. 1761 * The SEV GUID is located on its own (original implementation) or within 1762 * the Firmware GUID Table (new implementation), either of which are 1763 * located 32 bytes from the end of the flash. 1764 * 1765 * Check the Firmware GUID Table first. 1766 */ 1767 if (pc_system_ovmf_table_find(SEV_INFO_BLOCK_GUID, &data, NULL)) { 1768 return sev_es_parse_reset_block((SevInfoBlock *)data, addr); 1769 } 1770 1771 /* 1772 * SEV info block not found in the Firmware GUID Table (or there isn't 1773 * a Firmware GUID Table), fall back to the original implementation. 1774 */ 1775 data = flash_ptr + flash_size - 0x20; 1776 1777 qemu_uuid_parse(SEV_INFO_BLOCK_GUID, &info_guid); 1778 info_guid = qemu_uuid_bswap(info_guid); /* GUIDs are LE */ 1779 1780 guid = (QemuUUID *)(data - sizeof(info_guid)); 1781 if (!qemu_uuid_is_equal(guid, &info_guid)) { 1782 error_report("SEV information block/Firmware GUID Table block not found in pflash rom"); 1783 return 1; 1784 } 1785 1786 len = (uint16_t *)((uint8_t *)guid - sizeof(*len)); 1787 info = (SevInfoBlock *)(data - le16_to_cpu(*len)); 1788 1789 return sev_es_parse_reset_block(info, addr); 1790 } 1791 1792 void sev_es_set_reset_vector(CPUState *cpu) 1793 { 1794 X86CPU *x86; 1795 CPUX86State *env; 1796 ConfidentialGuestSupport *cgs = MACHINE(qdev_get_machine())->cgs; 1797 SevCommonState *sev_common = SEV_COMMON( 1798 object_dynamic_cast(OBJECT(cgs), TYPE_SEV_COMMON)); 1799 1800 /* Only update if we have valid reset information */ 1801 if (!sev_common || !sev_common->reset_data_valid) { 1802 return; 1803 } 1804 1805 /* Do not update the BSP reset state */ 1806 if (cpu->cpu_index == 0) { 1807 return; 1808 } 1809 1810 x86 = X86_CPU(cpu); 1811 env = &x86->env; 1812 1813 cpu_x86_load_seg_cache(env, R_CS, 0xf000, sev_common->reset_cs, 0xffff, 1814 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK | 1815 DESC_R_MASK | DESC_A_MASK); 1816 1817 env->eip = sev_common->reset_ip; 1818 } 1819 1820 int sev_es_save_reset_vector(void *flash_ptr, uint64_t flash_size) 1821 { 1822 CPUState *cpu; 1823 uint32_t addr; 1824 int ret; 1825 SevCommonState *sev_common = SEV_COMMON(MACHINE(qdev_get_machine())->cgs); 1826 1827 if (!sev_es_enabled()) { 1828 return 0; 1829 } 1830 1831 addr = 0; 1832 ret = sev_es_find_reset_vector(flash_ptr, flash_size, 1833 &addr); 1834 if (ret) { 1835 return ret; 1836 } 1837 1838 if (addr) { 1839 sev_common->reset_cs = addr & 0xffff0000; 1840 sev_common->reset_ip = addr & 0x0000ffff; 1841 sev_common->reset_data_valid = true; 1842 1843 CPU_FOREACH(cpu) { 1844 sev_es_set_reset_vector(cpu); 1845 } 1846 } 1847 1848 return 0; 1849 } 1850 1851 static const QemuUUID sev_hash_table_header_guid = { 1852 .data = UUID_LE(0x9438d606, 0x4f22, 0x4cc9, 0xb4, 0x79, 0xa7, 0x93, 1853 0xd4, 0x11, 0xfd, 0x21) 1854 }; 1855 1856 static const QemuUUID sev_kernel_entry_guid = { 1857 .data = UUID_LE(0x4de79437, 0xabd2, 0x427f, 0xb8, 0x35, 0xd5, 0xb1, 1858 0x72, 0xd2, 0x04, 0x5b) 1859 }; 1860 static const QemuUUID sev_initrd_entry_guid = { 1861 .data = UUID_LE(0x44baf731, 0x3a2f, 0x4bd7, 0x9a, 0xf1, 0x41, 0xe2, 1862 0x91, 0x69, 0x78, 0x1d) 1863 }; 1864 static const QemuUUID sev_cmdline_entry_guid = { 1865 .data = UUID_LE(0x97d02dd8, 0xbd20, 0x4c94, 0xaa, 0x78, 0xe7, 0x71, 1866 0x4d, 0x36, 0xab, 0x2a) 1867 }; 1868 1869 static bool build_kernel_loader_hashes(PaddedSevHashTable *padded_ht, 1870 SevKernelLoaderContext *ctx, 1871 Error **errp) 1872 { 1873 SevHashTable *ht; 1874 uint8_t cmdline_hash[HASH_SIZE]; 1875 uint8_t initrd_hash[HASH_SIZE]; 1876 uint8_t kernel_hash[HASH_SIZE]; 1877 uint8_t *hashp; 1878 size_t hash_len = HASH_SIZE; 1879 1880 /* 1881 * Calculate hash of kernel command-line with the terminating null byte. If 1882 * the user doesn't supply a command-line via -append, the 1-byte "\0" will 1883 * be used. 1884 */ 1885 hashp = cmdline_hash; 1886 if (qcrypto_hash_bytes(QCRYPTO_HASH_ALG_SHA256, ctx->cmdline_data, 1887 ctx->cmdline_size, &hashp, &hash_len, errp) < 0) { 1888 return false; 1889 } 1890 assert(hash_len == HASH_SIZE); 1891 1892 /* 1893 * Calculate hash of initrd. If the user doesn't supply an initrd via 1894 * -initrd, an empty buffer will be used (ctx->initrd_size == 0). 1895 */ 1896 hashp = initrd_hash; 1897 if (qcrypto_hash_bytes(QCRYPTO_HASH_ALG_SHA256, ctx->initrd_data, 1898 ctx->initrd_size, &hashp, &hash_len, errp) < 0) { 1899 return false; 1900 } 1901 assert(hash_len == HASH_SIZE); 1902 1903 /* Calculate hash of the kernel */ 1904 hashp = kernel_hash; 1905 struct iovec iov[2] = { 1906 { .iov_base = ctx->setup_data, .iov_len = ctx->setup_size }, 1907 { .iov_base = ctx->kernel_data, .iov_len = ctx->kernel_size } 1908 }; 1909 if (qcrypto_hash_bytesv(QCRYPTO_HASH_ALG_SHA256, iov, ARRAY_SIZE(iov), 1910 &hashp, &hash_len, errp) < 0) { 1911 return false; 1912 } 1913 assert(hash_len == HASH_SIZE); 1914 1915 ht = &padded_ht->ht; 1916 1917 ht->guid = sev_hash_table_header_guid; 1918 ht->len = sizeof(*ht); 1919 1920 ht->cmdline.guid = sev_cmdline_entry_guid; 1921 ht->cmdline.len = sizeof(ht->cmdline); 1922 memcpy(ht->cmdline.hash, cmdline_hash, sizeof(ht->cmdline.hash)); 1923 1924 ht->initrd.guid = sev_initrd_entry_guid; 1925 ht->initrd.len = sizeof(ht->initrd); 1926 memcpy(ht->initrd.hash, initrd_hash, sizeof(ht->initrd.hash)); 1927 1928 ht->kernel.guid = sev_kernel_entry_guid; 1929 ht->kernel.len = sizeof(ht->kernel); 1930 memcpy(ht->kernel.hash, kernel_hash, sizeof(ht->kernel.hash)); 1931 1932 /* zero the excess data so the measurement can be reliably calculated */ 1933 memset(padded_ht->padding, 0, sizeof(padded_ht->padding)); 1934 1935 return true; 1936 } 1937 1938 static bool sev_snp_build_kernel_loader_hashes(SevCommonState *sev_common, 1939 SevHashTableDescriptor *area, 1940 SevKernelLoaderContext *ctx, 1941 Error **errp) 1942 { 1943 /* 1944 * SNP: Populate the hashes table in an area that later in 1945 * snp_launch_update_kernel_hashes() will be copied to the guest memory 1946 * and encrypted. 1947 */ 1948 SevSnpGuestState *sev_snp_guest = SEV_SNP_GUEST(sev_common); 1949 sev_snp_guest->kernel_hashes_offset = area->base & ~TARGET_PAGE_MASK; 1950 sev_snp_guest->kernel_hashes_data = g_new0(PaddedSevHashTable, 1); 1951 return build_kernel_loader_hashes(sev_snp_guest->kernel_hashes_data, ctx, errp); 1952 } 1953 1954 static bool sev_build_kernel_loader_hashes(SevCommonState *sev_common, 1955 SevHashTableDescriptor *area, 1956 SevKernelLoaderContext *ctx, 1957 Error **errp) 1958 { 1959 PaddedSevHashTable *padded_ht; 1960 hwaddr mapped_len = sizeof(*padded_ht); 1961 MemTxAttrs attrs = { 0 }; 1962 bool ret = true; 1963 1964 /* 1965 * Populate the hashes table in the guest's memory at the OVMF-designated 1966 * area for the SEV hashes table 1967 */ 1968 padded_ht = address_space_map(&address_space_memory, area->base, 1969 &mapped_len, true, attrs); 1970 if (!padded_ht || mapped_len != sizeof(*padded_ht)) { 1971 error_setg(errp, "SEV: cannot map hashes table guest memory area"); 1972 return false; 1973 } 1974 1975 if (build_kernel_loader_hashes(padded_ht, ctx, errp)) { 1976 if (sev_encrypt_flash(area->base, (uint8_t *)padded_ht, 1977 sizeof(*padded_ht), errp) < 0) { 1978 ret = false; 1979 } 1980 } else { 1981 ret = false; 1982 } 1983 1984 address_space_unmap(&address_space_memory, padded_ht, 1985 mapped_len, true, mapped_len); 1986 1987 return ret; 1988 } 1989 1990 /* 1991 * Add the hashes of the linux kernel/initrd/cmdline to an encrypted guest page 1992 * which is included in SEV's initial memory measurement. 1993 */ 1994 bool sev_add_kernel_loader_hashes(SevKernelLoaderContext *ctx, Error **errp) 1995 { 1996 uint8_t *data; 1997 SevHashTableDescriptor *area; 1998 SevCommonState *sev_common = SEV_COMMON(MACHINE(qdev_get_machine())->cgs); 1999 SevCommonStateClass *klass = SEV_COMMON_GET_CLASS(sev_common); 2000 2001 /* 2002 * Only add the kernel hashes if the sev-guest configuration explicitly 2003 * stated kernel-hashes=on. 2004 */ 2005 if (!sev_common->kernel_hashes) { 2006 return false; 2007 } 2008 2009 if (!pc_system_ovmf_table_find(SEV_HASH_TABLE_RV_GUID, &data, NULL)) { 2010 error_setg(errp, "SEV: kernel specified but guest firmware " 2011 "has no hashes table GUID"); 2012 return false; 2013 } 2014 2015 area = (SevHashTableDescriptor *)data; 2016 if (!area->base || area->size < sizeof(PaddedSevHashTable)) { 2017 error_setg(errp, "SEV: guest firmware hashes table area is invalid " 2018 "(base=0x%x size=0x%x)", area->base, area->size); 2019 return false; 2020 } 2021 2022 return klass->build_kernel_loader_hashes(sev_common, area, ctx, errp); 2023 } 2024 2025 static char * 2026 sev_common_get_sev_device(Object *obj, Error **errp) 2027 { 2028 return g_strdup(SEV_COMMON(obj)->sev_device); 2029 } 2030 2031 static void 2032 sev_common_set_sev_device(Object *obj, const char *value, Error **errp) 2033 { 2034 SEV_COMMON(obj)->sev_device = g_strdup(value); 2035 } 2036 2037 static bool sev_common_get_kernel_hashes(Object *obj, Error **errp) 2038 { 2039 return SEV_COMMON(obj)->kernel_hashes; 2040 } 2041 2042 static void sev_common_set_kernel_hashes(Object *obj, bool value, Error **errp) 2043 { 2044 SEV_COMMON(obj)->kernel_hashes = value; 2045 } 2046 2047 static void 2048 sev_common_class_init(ObjectClass *oc, void *data) 2049 { 2050 ConfidentialGuestSupportClass *klass = CONFIDENTIAL_GUEST_SUPPORT_CLASS(oc); 2051 2052 klass->kvm_init = sev_common_kvm_init; 2053 2054 object_class_property_add_str(oc, "sev-device", 2055 sev_common_get_sev_device, 2056 sev_common_set_sev_device); 2057 object_class_property_set_description(oc, "sev-device", 2058 "SEV device to use"); 2059 object_class_property_add_bool(oc, "kernel-hashes", 2060 sev_common_get_kernel_hashes, 2061 sev_common_set_kernel_hashes); 2062 object_class_property_set_description(oc, "kernel-hashes", 2063 "add kernel hashes to guest firmware for measured Linux boot"); 2064 } 2065 2066 static void 2067 sev_common_instance_init(Object *obj) 2068 { 2069 SevCommonState *sev_common = SEV_COMMON(obj); 2070 2071 sev_common->kvm_type = -1; 2072 2073 sev_common->sev_device = g_strdup(DEFAULT_SEV_DEVICE); 2074 2075 object_property_add_uint32_ptr(obj, "cbitpos", &sev_common->cbitpos, 2076 OBJ_PROP_FLAG_READWRITE); 2077 object_property_add_uint32_ptr(obj, "reduced-phys-bits", 2078 &sev_common->reduced_phys_bits, 2079 OBJ_PROP_FLAG_READWRITE); 2080 } 2081 2082 /* sev guest info common to sev/sev-es/sev-snp */ 2083 static const TypeInfo sev_common_info = { 2084 .parent = TYPE_X86_CONFIDENTIAL_GUEST, 2085 .name = TYPE_SEV_COMMON, 2086 .instance_size = sizeof(SevCommonState), 2087 .instance_init = sev_common_instance_init, 2088 .class_size = sizeof(SevCommonStateClass), 2089 .class_init = sev_common_class_init, 2090 .abstract = true, 2091 .interfaces = (InterfaceInfo[]) { 2092 { TYPE_USER_CREATABLE }, 2093 { } 2094 } 2095 }; 2096 2097 static char * 2098 sev_guest_get_dh_cert_file(Object *obj, Error **errp) 2099 { 2100 return g_strdup(SEV_GUEST(obj)->dh_cert_file); 2101 } 2102 2103 static void 2104 sev_guest_set_dh_cert_file(Object *obj, const char *value, Error **errp) 2105 { 2106 SEV_GUEST(obj)->dh_cert_file = g_strdup(value); 2107 } 2108 2109 static char * 2110 sev_guest_get_session_file(Object *obj, Error **errp) 2111 { 2112 SevGuestState *sev_guest = SEV_GUEST(obj); 2113 2114 return sev_guest->session_file ? g_strdup(sev_guest->session_file) : NULL; 2115 } 2116 2117 static void 2118 sev_guest_set_session_file(Object *obj, const char *value, Error **errp) 2119 { 2120 SEV_GUEST(obj)->session_file = g_strdup(value); 2121 } 2122 2123 static void sev_guest_get_legacy_vm_type(Object *obj, Visitor *v, 2124 const char *name, void *opaque, 2125 Error **errp) 2126 { 2127 SevGuestState *sev_guest = SEV_GUEST(obj); 2128 OnOffAuto legacy_vm_type = sev_guest->legacy_vm_type; 2129 2130 visit_type_OnOffAuto(v, name, &legacy_vm_type, errp); 2131 } 2132 2133 static void sev_guest_set_legacy_vm_type(Object *obj, Visitor *v, 2134 const char *name, void *opaque, 2135 Error **errp) 2136 { 2137 SevGuestState *sev_guest = SEV_GUEST(obj); 2138 2139 visit_type_OnOffAuto(v, name, &sev_guest->legacy_vm_type, errp); 2140 } 2141 2142 static void 2143 sev_guest_class_init(ObjectClass *oc, void *data) 2144 { 2145 SevCommonStateClass *klass = SEV_COMMON_CLASS(oc); 2146 X86ConfidentialGuestClass *x86_klass = X86_CONFIDENTIAL_GUEST_CLASS(oc); 2147 2148 klass->build_kernel_loader_hashes = sev_build_kernel_loader_hashes; 2149 klass->launch_start = sev_launch_start; 2150 klass->launch_finish = sev_launch_finish; 2151 klass->launch_update_data = sev_launch_update_data; 2152 klass->kvm_init = sev_kvm_init; 2153 x86_klass->kvm_type = sev_kvm_type; 2154 2155 object_class_property_add_str(oc, "dh-cert-file", 2156 sev_guest_get_dh_cert_file, 2157 sev_guest_set_dh_cert_file); 2158 object_class_property_set_description(oc, "dh-cert-file", 2159 "guest owners DH certificate (encoded with base64)"); 2160 object_class_property_add_str(oc, "session-file", 2161 sev_guest_get_session_file, 2162 sev_guest_set_session_file); 2163 object_class_property_set_description(oc, "session-file", 2164 "guest owners session parameters (encoded with base64)"); 2165 object_class_property_add(oc, "legacy-vm-type", "OnOffAuto", 2166 sev_guest_get_legacy_vm_type, 2167 sev_guest_set_legacy_vm_type, NULL, NULL); 2168 object_class_property_set_description(oc, "legacy-vm-type", 2169 "use legacy VM type to maintain measurement compatibility with older QEMU or kernel versions."); 2170 } 2171 2172 static void 2173 sev_guest_instance_init(Object *obj) 2174 { 2175 SevGuestState *sev_guest = SEV_GUEST(obj); 2176 2177 sev_guest->policy = DEFAULT_GUEST_POLICY; 2178 object_property_add_uint32_ptr(obj, "handle", &sev_guest->handle, 2179 OBJ_PROP_FLAG_READWRITE); 2180 object_property_add_uint32_ptr(obj, "policy", &sev_guest->policy, 2181 OBJ_PROP_FLAG_READWRITE); 2182 object_apply_compat_props(obj); 2183 2184 sev_guest->legacy_vm_type = ON_OFF_AUTO_AUTO; 2185 } 2186 2187 /* guest info specific sev/sev-es */ 2188 static const TypeInfo sev_guest_info = { 2189 .parent = TYPE_SEV_COMMON, 2190 .name = TYPE_SEV_GUEST, 2191 .instance_size = sizeof(SevGuestState), 2192 .instance_init = sev_guest_instance_init, 2193 .class_init = sev_guest_class_init, 2194 }; 2195 2196 static void 2197 sev_snp_guest_get_policy(Object *obj, Visitor *v, const char *name, 2198 void *opaque, Error **errp) 2199 { 2200 visit_type_uint64(v, name, 2201 (uint64_t *)&SEV_SNP_GUEST(obj)->kvm_start_conf.policy, 2202 errp); 2203 } 2204 2205 static void 2206 sev_snp_guest_set_policy(Object *obj, Visitor *v, const char *name, 2207 void *opaque, Error **errp) 2208 { 2209 visit_type_uint64(v, name, 2210 (uint64_t *)&SEV_SNP_GUEST(obj)->kvm_start_conf.policy, 2211 errp); 2212 } 2213 2214 static char * 2215 sev_snp_guest_get_guest_visible_workarounds(Object *obj, Error **errp) 2216 { 2217 return g_strdup(SEV_SNP_GUEST(obj)->guest_visible_workarounds); 2218 } 2219 2220 static void 2221 sev_snp_guest_set_guest_visible_workarounds(Object *obj, const char *value, 2222 Error **errp) 2223 { 2224 SevSnpGuestState *sev_snp_guest = SEV_SNP_GUEST(obj); 2225 struct kvm_sev_snp_launch_start *start = &sev_snp_guest->kvm_start_conf; 2226 g_autofree guchar *blob; 2227 gsize len; 2228 2229 g_free(sev_snp_guest->guest_visible_workarounds); 2230 2231 /* store the base64 str so we don't need to re-encode in getter */ 2232 sev_snp_guest->guest_visible_workarounds = g_strdup(value); 2233 2234 blob = qbase64_decode(sev_snp_guest->guest_visible_workarounds, 2235 -1, &len, errp); 2236 if (!blob) { 2237 return; 2238 } 2239 2240 if (len != sizeof(start->gosvw)) { 2241 error_setg(errp, "parameter length of %" G_GSIZE_FORMAT 2242 " exceeds max of %zu", 2243 len, sizeof(start->gosvw)); 2244 return; 2245 } 2246 2247 memcpy(start->gosvw, blob, len); 2248 } 2249 2250 static char * 2251 sev_snp_guest_get_id_block(Object *obj, Error **errp) 2252 { 2253 SevSnpGuestState *sev_snp_guest = SEV_SNP_GUEST(obj); 2254 2255 return g_strdup(sev_snp_guest->id_block_base64); 2256 } 2257 2258 static void 2259 sev_snp_guest_set_id_block(Object *obj, const char *value, Error **errp) 2260 { 2261 SevSnpGuestState *sev_snp_guest = SEV_SNP_GUEST(obj); 2262 struct kvm_sev_snp_launch_finish *finish = &sev_snp_guest->kvm_finish_conf; 2263 gsize len; 2264 2265 finish->id_block_en = 0; 2266 g_free(sev_snp_guest->id_block); 2267 g_free(sev_snp_guest->id_block_base64); 2268 2269 /* store the base64 str so we don't need to re-encode in getter */ 2270 sev_snp_guest->id_block_base64 = g_strdup(value); 2271 sev_snp_guest->id_block = 2272 qbase64_decode(sev_snp_guest->id_block_base64, -1, &len, errp); 2273 2274 if (!sev_snp_guest->id_block) { 2275 return; 2276 } 2277 2278 if (len != KVM_SEV_SNP_ID_BLOCK_SIZE) { 2279 error_setg(errp, "parameter length of %" G_GSIZE_FORMAT 2280 " not equal to %u", 2281 len, KVM_SEV_SNP_ID_BLOCK_SIZE); 2282 return; 2283 } 2284 2285 finish->id_block_en = 1; 2286 finish->id_block_uaddr = (uintptr_t)sev_snp_guest->id_block; 2287 } 2288 2289 static char * 2290 sev_snp_guest_get_id_auth(Object *obj, Error **errp) 2291 { 2292 SevSnpGuestState *sev_snp_guest = SEV_SNP_GUEST(obj); 2293 2294 return g_strdup(sev_snp_guest->id_auth_base64); 2295 } 2296 2297 static void 2298 sev_snp_guest_set_id_auth(Object *obj, const char *value, Error **errp) 2299 { 2300 SevSnpGuestState *sev_snp_guest = SEV_SNP_GUEST(obj); 2301 struct kvm_sev_snp_launch_finish *finish = &sev_snp_guest->kvm_finish_conf; 2302 gsize len; 2303 2304 finish->id_auth_uaddr = 0; 2305 g_free(sev_snp_guest->id_auth); 2306 g_free(sev_snp_guest->id_auth_base64); 2307 2308 /* store the base64 str so we don't need to re-encode in getter */ 2309 sev_snp_guest->id_auth_base64 = g_strdup(value); 2310 sev_snp_guest->id_auth = 2311 qbase64_decode(sev_snp_guest->id_auth_base64, -1, &len, errp); 2312 2313 if (!sev_snp_guest->id_auth) { 2314 return; 2315 } 2316 2317 if (len > KVM_SEV_SNP_ID_AUTH_SIZE) { 2318 error_setg(errp, "parameter length:ID_AUTH %" G_GSIZE_FORMAT 2319 " exceeds max of %u", 2320 len, KVM_SEV_SNP_ID_AUTH_SIZE); 2321 return; 2322 } 2323 2324 finish->id_auth_uaddr = (uintptr_t)sev_snp_guest->id_auth; 2325 } 2326 2327 static bool 2328 sev_snp_guest_get_author_key_enabled(Object *obj, Error **errp) 2329 { 2330 SevSnpGuestState *sev_snp_guest = SEV_SNP_GUEST(obj); 2331 2332 return !!sev_snp_guest->kvm_finish_conf.auth_key_en; 2333 } 2334 2335 static void 2336 sev_snp_guest_set_author_key_enabled(Object *obj, bool value, Error **errp) 2337 { 2338 SevSnpGuestState *sev_snp_guest = SEV_SNP_GUEST(obj); 2339 2340 sev_snp_guest->kvm_finish_conf.auth_key_en = value; 2341 } 2342 2343 static bool 2344 sev_snp_guest_get_vcek_disabled(Object *obj, Error **errp) 2345 { 2346 SevSnpGuestState *sev_snp_guest = SEV_SNP_GUEST(obj); 2347 2348 return !!sev_snp_guest->kvm_finish_conf.vcek_disabled; 2349 } 2350 2351 static void 2352 sev_snp_guest_set_vcek_disabled(Object *obj, bool value, Error **errp) 2353 { 2354 SevSnpGuestState *sev_snp_guest = SEV_SNP_GUEST(obj); 2355 2356 sev_snp_guest->kvm_finish_conf.vcek_disabled = value; 2357 } 2358 2359 static char * 2360 sev_snp_guest_get_host_data(Object *obj, Error **errp) 2361 { 2362 SevSnpGuestState *sev_snp_guest = SEV_SNP_GUEST(obj); 2363 2364 return g_strdup(sev_snp_guest->host_data); 2365 } 2366 2367 static void 2368 sev_snp_guest_set_host_data(Object *obj, const char *value, Error **errp) 2369 { 2370 SevSnpGuestState *sev_snp_guest = SEV_SNP_GUEST(obj); 2371 struct kvm_sev_snp_launch_finish *finish = &sev_snp_guest->kvm_finish_conf; 2372 g_autofree guchar *blob; 2373 gsize len; 2374 2375 g_free(sev_snp_guest->host_data); 2376 2377 /* store the base64 str so we don't need to re-encode in getter */ 2378 sev_snp_guest->host_data = g_strdup(value); 2379 2380 blob = qbase64_decode(sev_snp_guest->host_data, -1, &len, errp); 2381 2382 if (!blob) { 2383 return; 2384 } 2385 2386 if (len != sizeof(finish->host_data)) { 2387 error_setg(errp, "parameter length of %" G_GSIZE_FORMAT 2388 " not equal to %zu", 2389 len, sizeof(finish->host_data)); 2390 return; 2391 } 2392 2393 memcpy(finish->host_data, blob, len); 2394 } 2395 2396 static void 2397 sev_snp_guest_class_init(ObjectClass *oc, void *data) 2398 { 2399 SevCommonStateClass *klass = SEV_COMMON_CLASS(oc); 2400 X86ConfidentialGuestClass *x86_klass = X86_CONFIDENTIAL_GUEST_CLASS(oc); 2401 2402 klass->build_kernel_loader_hashes = sev_snp_build_kernel_loader_hashes; 2403 klass->launch_start = sev_snp_launch_start; 2404 klass->launch_finish = sev_snp_launch_finish; 2405 klass->launch_update_data = sev_snp_launch_update_data; 2406 klass->kvm_init = sev_snp_kvm_init; 2407 x86_klass->mask_cpuid_features = sev_snp_mask_cpuid_features; 2408 x86_klass->kvm_type = sev_snp_kvm_type; 2409 2410 object_class_property_add(oc, "policy", "uint64", 2411 sev_snp_guest_get_policy, 2412 sev_snp_guest_set_policy, NULL, NULL); 2413 object_class_property_add_str(oc, "guest-visible-workarounds", 2414 sev_snp_guest_get_guest_visible_workarounds, 2415 sev_snp_guest_set_guest_visible_workarounds); 2416 object_class_property_add_str(oc, "id-block", 2417 sev_snp_guest_get_id_block, 2418 sev_snp_guest_set_id_block); 2419 object_class_property_add_str(oc, "id-auth", 2420 sev_snp_guest_get_id_auth, 2421 sev_snp_guest_set_id_auth); 2422 object_class_property_add_bool(oc, "author-key-enabled", 2423 sev_snp_guest_get_author_key_enabled, 2424 sev_snp_guest_set_author_key_enabled); 2425 object_class_property_add_bool(oc, "vcek-disabled", 2426 sev_snp_guest_get_vcek_disabled, 2427 sev_snp_guest_set_vcek_disabled); 2428 object_class_property_add_str(oc, "host-data", 2429 sev_snp_guest_get_host_data, 2430 sev_snp_guest_set_host_data); 2431 } 2432 2433 static void 2434 sev_snp_guest_instance_init(Object *obj) 2435 { 2436 ConfidentialGuestSupport *cgs = CONFIDENTIAL_GUEST_SUPPORT(obj); 2437 SevSnpGuestState *sev_snp_guest = SEV_SNP_GUEST(obj); 2438 2439 cgs->require_guest_memfd = true; 2440 2441 /* default init/start/finish params for kvm */ 2442 sev_snp_guest->kvm_start_conf.policy = DEFAULT_SEV_SNP_POLICY; 2443 } 2444 2445 /* guest info specific to sev-snp */ 2446 static const TypeInfo sev_snp_guest_info = { 2447 .parent = TYPE_SEV_COMMON, 2448 .name = TYPE_SEV_SNP_GUEST, 2449 .instance_size = sizeof(SevSnpGuestState), 2450 .class_init = sev_snp_guest_class_init, 2451 .instance_init = sev_snp_guest_instance_init, 2452 }; 2453 2454 static void 2455 sev_register_types(void) 2456 { 2457 type_register_static(&sev_common_info); 2458 type_register_static(&sev_guest_info); 2459 type_register_static(&sev_snp_guest_info); 2460 } 2461 2462 type_init(sev_register_types); 2463