1 /* 2 * QEMU SEV support 3 * 4 * Copyright Advanced Micro Devices 2016-2018 5 * 6 * Author: 7 * Brijesh Singh <brijesh.singh@amd.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2 or later. 10 * See the COPYING file in the top-level directory. 11 * 12 */ 13 14 #include "qemu/osdep.h" 15 16 #include <linux/kvm.h> 17 #include <linux/kvm_para.h> 18 #include <linux/psp-sev.h> 19 20 #include <sys/ioctl.h> 21 22 #include "qapi/error.h" 23 #include "qom/object_interfaces.h" 24 #include "qemu/base64.h" 25 #include "qemu/module.h" 26 #include "qemu/uuid.h" 27 #include "qemu/error-report.h" 28 #include "crypto/hash.h" 29 #include "sysemu/kvm.h" 30 #include "kvm/kvm_i386.h" 31 #include "sev.h" 32 #include "sysemu/sysemu.h" 33 #include "sysemu/runstate.h" 34 #include "trace.h" 35 #include "migration/blocker.h" 36 #include "qom/object.h" 37 #include "monitor/monitor.h" 38 #include "monitor/hmp-target.h" 39 #include "qapi/qapi-commands-misc-target.h" 40 #include "confidential-guest.h" 41 #include "hw/i386/pc.h" 42 #include "exec/address-spaces.h" 43 #include "qemu/queue.h" 44 45 OBJECT_DECLARE_TYPE(SevCommonState, SevCommonStateClass, SEV_COMMON) 46 OBJECT_DECLARE_TYPE(SevGuestState, SevCommonStateClass, SEV_GUEST) 47 OBJECT_DECLARE_TYPE(SevSnpGuestState, SevCommonStateClass, SEV_SNP_GUEST) 48 49 /* hard code sha256 digest size */ 50 #define HASH_SIZE 32 51 52 typedef struct QEMU_PACKED SevHashTableEntry { 53 QemuUUID guid; 54 uint16_t len; 55 uint8_t hash[HASH_SIZE]; 56 } SevHashTableEntry; 57 58 typedef struct QEMU_PACKED SevHashTable { 59 QemuUUID guid; 60 uint16_t len; 61 SevHashTableEntry cmdline; 62 SevHashTableEntry initrd; 63 SevHashTableEntry kernel; 64 } SevHashTable; 65 66 /* 67 * Data encrypted by sev_encrypt_flash() must be padded to a multiple of 68 * 16 bytes. 69 */ 70 typedef struct QEMU_PACKED PaddedSevHashTable { 71 SevHashTable ht; 72 uint8_t padding[ROUND_UP(sizeof(SevHashTable), 16) - sizeof(SevHashTable)]; 73 } PaddedSevHashTable; 74 75 QEMU_BUILD_BUG_ON(sizeof(PaddedSevHashTable) % 16 != 0); 76 77 #define SEV_INFO_BLOCK_GUID "00f771de-1a7e-4fcb-890e-68c77e2fb44e" 78 typedef struct __attribute__((__packed__)) SevInfoBlock { 79 /* SEV-ES Reset Vector Address */ 80 uint32_t reset_addr; 81 } SevInfoBlock; 82 83 #define SEV_HASH_TABLE_RV_GUID "7255371f-3a3b-4b04-927b-1da6efa8d454" 84 typedef struct QEMU_PACKED SevHashTableDescriptor { 85 /* SEV hash table area guest address */ 86 uint32_t base; 87 /* SEV hash table area size (in bytes) */ 88 uint32_t size; 89 } SevHashTableDescriptor; 90 91 struct SevCommonState { 92 X86ConfidentialGuest parent_obj; 93 94 int kvm_type; 95 96 /* configuration parameters */ 97 char *sev_device; 98 uint32_t cbitpos; 99 uint32_t reduced_phys_bits; 100 bool kernel_hashes; 101 102 /* runtime state */ 103 uint8_t api_major; 104 uint8_t api_minor; 105 uint8_t build_id; 106 int sev_fd; 107 SevState state; 108 109 uint32_t reset_cs; 110 uint32_t reset_ip; 111 bool reset_data_valid; 112 }; 113 114 struct SevCommonStateClass { 115 X86ConfidentialGuestClass parent_class; 116 117 /* public */ 118 bool (*build_kernel_loader_hashes)(SevCommonState *sev_common, 119 SevHashTableDescriptor *area, 120 SevKernelLoaderContext *ctx, 121 Error **errp); 122 int (*launch_start)(SevCommonState *sev_common); 123 void (*launch_finish)(SevCommonState *sev_common); 124 int (*launch_update_data)(SevCommonState *sev_common, hwaddr gpa, uint8_t *ptr, size_t len); 125 int (*kvm_init)(ConfidentialGuestSupport *cgs, Error **errp); 126 }; 127 128 /** 129 * SevGuestState: 130 * 131 * The SevGuestState object is used for creating and managing a SEV 132 * guest. 133 * 134 * # $QEMU \ 135 * -object sev-guest,id=sev0 \ 136 * -machine ...,memory-encryption=sev0 137 */ 138 struct SevGuestState { 139 SevCommonState parent_obj; 140 gchar *measurement; 141 142 /* configuration parameters */ 143 uint32_t handle; 144 uint32_t policy; 145 char *dh_cert_file; 146 char *session_file; 147 bool legacy_vm_type; 148 }; 149 150 struct SevSnpGuestState { 151 SevCommonState parent_obj; 152 153 /* configuration parameters */ 154 char *guest_visible_workarounds; 155 char *id_block_base64; 156 uint8_t *id_block; 157 char *id_auth_base64; 158 uint8_t *id_auth; 159 char *host_data; 160 161 struct kvm_sev_snp_launch_start kvm_start_conf; 162 struct kvm_sev_snp_launch_finish kvm_finish_conf; 163 164 uint32_t kernel_hashes_offset; 165 PaddedSevHashTable *kernel_hashes_data; 166 }; 167 168 #define DEFAULT_GUEST_POLICY 0x1 /* disable debug */ 169 #define DEFAULT_SEV_DEVICE "/dev/sev" 170 #define DEFAULT_SEV_SNP_POLICY 0x30000 171 172 typedef struct SevLaunchUpdateData { 173 QTAILQ_ENTRY(SevLaunchUpdateData) next; 174 hwaddr gpa; 175 void *hva; 176 size_t len; 177 int type; 178 } SevLaunchUpdateData; 179 180 static QTAILQ_HEAD(, SevLaunchUpdateData) launch_update; 181 182 static Error *sev_mig_blocker; 183 184 static const char *const sev_fw_errlist[] = { 185 [SEV_RET_SUCCESS] = "", 186 [SEV_RET_INVALID_PLATFORM_STATE] = "Platform state is invalid", 187 [SEV_RET_INVALID_GUEST_STATE] = "Guest state is invalid", 188 [SEV_RET_INAVLID_CONFIG] = "Platform configuration is invalid", 189 [SEV_RET_INVALID_LEN] = "Buffer too small", 190 [SEV_RET_ALREADY_OWNED] = "Platform is already owned", 191 [SEV_RET_INVALID_CERTIFICATE] = "Certificate is invalid", 192 [SEV_RET_POLICY_FAILURE] = "Policy is not allowed", 193 [SEV_RET_INACTIVE] = "Guest is not active", 194 [SEV_RET_INVALID_ADDRESS] = "Invalid address", 195 [SEV_RET_BAD_SIGNATURE] = "Bad signature", 196 [SEV_RET_BAD_MEASUREMENT] = "Bad measurement", 197 [SEV_RET_ASID_OWNED] = "ASID is already owned", 198 [SEV_RET_INVALID_ASID] = "Invalid ASID", 199 [SEV_RET_WBINVD_REQUIRED] = "WBINVD is required", 200 [SEV_RET_DFFLUSH_REQUIRED] = "DF_FLUSH is required", 201 [SEV_RET_INVALID_GUEST] = "Guest handle is invalid", 202 [SEV_RET_INVALID_COMMAND] = "Invalid command", 203 [SEV_RET_ACTIVE] = "Guest is active", 204 [SEV_RET_HWSEV_RET_PLATFORM] = "Hardware error", 205 [SEV_RET_HWSEV_RET_UNSAFE] = "Hardware unsafe", 206 [SEV_RET_UNSUPPORTED] = "Feature not supported", 207 [SEV_RET_INVALID_PARAM] = "Invalid parameter", 208 [SEV_RET_RESOURCE_LIMIT] = "Required firmware resource depleted", 209 [SEV_RET_SECURE_DATA_INVALID] = "Part-specific integrity check failure", 210 }; 211 212 #define SEV_FW_MAX_ERROR ARRAY_SIZE(sev_fw_errlist) 213 214 /* <linux/kvm.h> doesn't expose this, so re-use the max from kvm.c */ 215 #define KVM_MAX_CPUID_ENTRIES 100 216 217 typedef struct KvmCpuidInfo { 218 struct kvm_cpuid2 cpuid; 219 struct kvm_cpuid_entry2 entries[KVM_MAX_CPUID_ENTRIES]; 220 } KvmCpuidInfo; 221 222 #define SNP_CPUID_FUNCTION_MAXCOUNT 64 223 #define SNP_CPUID_FUNCTION_UNKNOWN 0xFFFFFFFF 224 225 typedef struct { 226 uint32_t eax_in; 227 uint32_t ecx_in; 228 uint64_t xcr0_in; 229 uint64_t xss_in; 230 uint32_t eax; 231 uint32_t ebx; 232 uint32_t ecx; 233 uint32_t edx; 234 uint64_t reserved; 235 } __attribute__((packed)) SnpCpuidFunc; 236 237 typedef struct { 238 uint32_t count; 239 uint32_t reserved1; 240 uint64_t reserved2; 241 SnpCpuidFunc entries[SNP_CPUID_FUNCTION_MAXCOUNT]; 242 } __attribute__((packed)) SnpCpuidInfo; 243 244 static int 245 sev_ioctl(int fd, int cmd, void *data, int *error) 246 { 247 int r; 248 struct kvm_sev_cmd input; 249 250 memset(&input, 0x0, sizeof(input)); 251 252 input.id = cmd; 253 input.sev_fd = fd; 254 input.data = (uintptr_t)data; 255 256 r = kvm_vm_ioctl(kvm_state, KVM_MEMORY_ENCRYPT_OP, &input); 257 258 if (error) { 259 *error = input.error; 260 } 261 262 return r; 263 } 264 265 static int 266 sev_platform_ioctl(int fd, int cmd, void *data, int *error) 267 { 268 int r; 269 struct sev_issue_cmd arg; 270 271 arg.cmd = cmd; 272 arg.data = (unsigned long)data; 273 r = ioctl(fd, SEV_ISSUE_CMD, &arg); 274 if (error) { 275 *error = arg.error; 276 } 277 278 return r; 279 } 280 281 static const char * 282 fw_error_to_str(int code) 283 { 284 if (code < 0 || code >= SEV_FW_MAX_ERROR) { 285 return "unknown error"; 286 } 287 288 return sev_fw_errlist[code]; 289 } 290 291 static bool 292 sev_check_state(const SevCommonState *sev_common, SevState state) 293 { 294 assert(sev_common); 295 return sev_common->state == state ? true : false; 296 } 297 298 static void 299 sev_set_guest_state(SevCommonState *sev_common, SevState new_state) 300 { 301 assert(new_state < SEV_STATE__MAX); 302 assert(sev_common); 303 304 trace_kvm_sev_change_state(SevState_str(sev_common->state), 305 SevState_str(new_state)); 306 sev_common->state = new_state; 307 } 308 309 static void 310 sev_ram_block_added(RAMBlockNotifier *n, void *host, size_t size, 311 size_t max_size) 312 { 313 int r; 314 struct kvm_enc_region range; 315 ram_addr_t offset; 316 MemoryRegion *mr; 317 318 /* 319 * The RAM device presents a memory region that should be treated 320 * as IO region and should not be pinned. 321 */ 322 mr = memory_region_from_host(host, &offset); 323 if (mr && memory_region_is_ram_device(mr)) { 324 return; 325 } 326 327 range.addr = (uintptr_t)host; 328 range.size = max_size; 329 330 trace_kvm_memcrypt_register_region(host, max_size); 331 r = kvm_vm_ioctl(kvm_state, KVM_MEMORY_ENCRYPT_REG_REGION, &range); 332 if (r) { 333 error_report("%s: failed to register region (%p+%#zx) error '%s'", 334 __func__, host, max_size, strerror(errno)); 335 exit(1); 336 } 337 } 338 339 static void 340 sev_ram_block_removed(RAMBlockNotifier *n, void *host, size_t size, 341 size_t max_size) 342 { 343 int r; 344 struct kvm_enc_region range; 345 ram_addr_t offset; 346 MemoryRegion *mr; 347 348 /* 349 * The RAM device presents a memory region that should be treated 350 * as IO region and should not have been pinned. 351 */ 352 mr = memory_region_from_host(host, &offset); 353 if (mr && memory_region_is_ram_device(mr)) { 354 return; 355 } 356 357 range.addr = (uintptr_t)host; 358 range.size = max_size; 359 360 trace_kvm_memcrypt_unregister_region(host, max_size); 361 r = kvm_vm_ioctl(kvm_state, KVM_MEMORY_ENCRYPT_UNREG_REGION, &range); 362 if (r) { 363 error_report("%s: failed to unregister region (%p+%#zx)", 364 __func__, host, max_size); 365 } 366 } 367 368 static struct RAMBlockNotifier sev_ram_notifier = { 369 .ram_block_added = sev_ram_block_added, 370 .ram_block_removed = sev_ram_block_removed, 371 }; 372 373 bool 374 sev_enabled(void) 375 { 376 ConfidentialGuestSupport *cgs = MACHINE(qdev_get_machine())->cgs; 377 378 return !!object_dynamic_cast(OBJECT(cgs), TYPE_SEV_COMMON); 379 } 380 381 bool 382 sev_snp_enabled(void) 383 { 384 ConfidentialGuestSupport *cgs = MACHINE(qdev_get_machine())->cgs; 385 386 return !!object_dynamic_cast(OBJECT(cgs), TYPE_SEV_SNP_GUEST); 387 } 388 389 bool 390 sev_es_enabled(void) 391 { 392 ConfidentialGuestSupport *cgs = MACHINE(qdev_get_machine())->cgs; 393 394 return sev_snp_enabled() || 395 (sev_enabled() && SEV_GUEST(cgs)->policy & SEV_POLICY_ES); 396 } 397 398 uint32_t 399 sev_get_cbit_position(void) 400 { 401 SevCommonState *sev_common = SEV_COMMON(MACHINE(qdev_get_machine())->cgs); 402 403 return sev_common ? sev_common->cbitpos : 0; 404 } 405 406 uint32_t 407 sev_get_reduced_phys_bits(void) 408 { 409 SevCommonState *sev_common = SEV_COMMON(MACHINE(qdev_get_machine())->cgs); 410 411 return sev_common ? sev_common->reduced_phys_bits : 0; 412 } 413 414 static SevInfo *sev_get_info(void) 415 { 416 SevInfo *info; 417 SevCommonState *sev_common = SEV_COMMON(MACHINE(qdev_get_machine())->cgs); 418 419 info = g_new0(SevInfo, 1); 420 info->enabled = sev_enabled(); 421 422 if (info->enabled) { 423 info->api_major = sev_common->api_major; 424 info->api_minor = sev_common->api_minor; 425 info->build_id = sev_common->build_id; 426 info->state = sev_common->state; 427 428 if (sev_snp_enabled()) { 429 info->sev_type = SEV_GUEST_TYPE_SEV_SNP; 430 info->u.sev_snp.snp_policy = 431 object_property_get_uint(OBJECT(sev_common), "policy", NULL); 432 } else { 433 info->sev_type = SEV_GUEST_TYPE_SEV; 434 info->u.sev.handle = SEV_GUEST(sev_common)->handle; 435 info->u.sev.policy = 436 (uint32_t)object_property_get_uint(OBJECT(sev_common), 437 "policy", NULL); 438 } 439 } 440 441 return info; 442 } 443 444 SevInfo *qmp_query_sev(Error **errp) 445 { 446 SevInfo *info; 447 448 info = sev_get_info(); 449 if (!info) { 450 error_setg(errp, "SEV feature is not available"); 451 return NULL; 452 } 453 454 return info; 455 } 456 457 void hmp_info_sev(Monitor *mon, const QDict *qdict) 458 { 459 SevInfo *info = sev_get_info(); 460 461 if (!info || !info->enabled) { 462 monitor_printf(mon, "SEV is not enabled\n"); 463 goto out; 464 } 465 466 monitor_printf(mon, "SEV type: %s\n", SevGuestType_str(info->sev_type)); 467 monitor_printf(mon, "state: %s\n", SevState_str(info->state)); 468 monitor_printf(mon, "build: %d\n", info->build_id); 469 monitor_printf(mon, "api version: %d.%d\n", info->api_major, 470 info->api_minor); 471 472 if (sev_snp_enabled()) { 473 monitor_printf(mon, "debug: %s\n", 474 info->u.sev_snp.snp_policy & SEV_SNP_POLICY_DBG ? "on" 475 : "off"); 476 monitor_printf(mon, "SMT allowed: %s\n", 477 info->u.sev_snp.snp_policy & SEV_SNP_POLICY_SMT ? "on" 478 : "off"); 479 } else { 480 monitor_printf(mon, "handle: %d\n", info->u.sev.handle); 481 monitor_printf(mon, "debug: %s\n", 482 info->u.sev.policy & SEV_POLICY_NODBG ? "off" : "on"); 483 monitor_printf(mon, "key-sharing: %s\n", 484 info->u.sev.policy & SEV_POLICY_NOKS ? "off" : "on"); 485 } 486 487 out: 488 qapi_free_SevInfo(info); 489 } 490 491 static int 492 sev_get_pdh_info(int fd, guchar **pdh, size_t *pdh_len, guchar **cert_chain, 493 size_t *cert_chain_len, Error **errp) 494 { 495 guchar *pdh_data = NULL; 496 guchar *cert_chain_data = NULL; 497 struct sev_user_data_pdh_cert_export export = {}; 498 int err, r; 499 500 /* query the certificate length */ 501 r = sev_platform_ioctl(fd, SEV_PDH_CERT_EXPORT, &export, &err); 502 if (r < 0) { 503 if (err != SEV_RET_INVALID_LEN) { 504 error_setg(errp, "SEV: Failed to export PDH cert" 505 " ret=%d fw_err=%d (%s)", 506 r, err, fw_error_to_str(err)); 507 return 1; 508 } 509 } 510 511 pdh_data = g_new(guchar, export.pdh_cert_len); 512 cert_chain_data = g_new(guchar, export.cert_chain_len); 513 export.pdh_cert_address = (unsigned long)pdh_data; 514 export.cert_chain_address = (unsigned long)cert_chain_data; 515 516 r = sev_platform_ioctl(fd, SEV_PDH_CERT_EXPORT, &export, &err); 517 if (r < 0) { 518 error_setg(errp, "SEV: Failed to export PDH cert ret=%d fw_err=%d (%s)", 519 r, err, fw_error_to_str(err)); 520 goto e_free; 521 } 522 523 *pdh = pdh_data; 524 *pdh_len = export.pdh_cert_len; 525 *cert_chain = cert_chain_data; 526 *cert_chain_len = export.cert_chain_len; 527 return 0; 528 529 e_free: 530 g_free(pdh_data); 531 g_free(cert_chain_data); 532 return 1; 533 } 534 535 static int sev_get_cpu0_id(int fd, guchar **id, size_t *id_len, Error **errp) 536 { 537 guchar *id_data; 538 struct sev_user_data_get_id2 get_id2 = {}; 539 int err, r; 540 541 /* query the ID length */ 542 r = sev_platform_ioctl(fd, SEV_GET_ID2, &get_id2, &err); 543 if (r < 0 && err != SEV_RET_INVALID_LEN) { 544 error_setg(errp, "SEV: Failed to get ID ret=%d fw_err=%d (%s)", 545 r, err, fw_error_to_str(err)); 546 return 1; 547 } 548 549 id_data = g_new(guchar, get_id2.length); 550 get_id2.address = (unsigned long)id_data; 551 552 r = sev_platform_ioctl(fd, SEV_GET_ID2, &get_id2, &err); 553 if (r < 0) { 554 error_setg(errp, "SEV: Failed to get ID ret=%d fw_err=%d (%s)", 555 r, err, fw_error_to_str(err)); 556 goto err; 557 } 558 559 *id = id_data; 560 *id_len = get_id2.length; 561 return 0; 562 563 err: 564 g_free(id_data); 565 return 1; 566 } 567 568 static SevCapability *sev_get_capabilities(Error **errp) 569 { 570 SevCapability *cap = NULL; 571 guchar *pdh_data = NULL; 572 guchar *cert_chain_data = NULL; 573 guchar *cpu0_id_data = NULL; 574 size_t pdh_len = 0, cert_chain_len = 0, cpu0_id_len = 0; 575 uint32_t ebx; 576 int fd; 577 SevCommonState *sev_common; 578 char *sev_device; 579 580 if (!kvm_enabled()) { 581 error_setg(errp, "KVM not enabled"); 582 return NULL; 583 } 584 if (kvm_vm_ioctl(kvm_state, KVM_MEMORY_ENCRYPT_OP, NULL) < 0) { 585 error_setg(errp, "SEV is not enabled in KVM"); 586 return NULL; 587 } 588 589 sev_common = SEV_COMMON(MACHINE(qdev_get_machine())->cgs); 590 if (sev_common) { 591 sev_device = object_property_get_str(OBJECT(sev_common), "sev-device", 592 &error_abort); 593 } else { 594 sev_device = g_strdup(DEFAULT_SEV_DEVICE); 595 } 596 597 fd = open(sev_device, O_RDWR); 598 if (fd < 0) { 599 error_setg_errno(errp, errno, "SEV: Failed to open %s", 600 sev_device); 601 g_free(sev_device); 602 return NULL; 603 } 604 g_free(sev_device); 605 606 if (sev_get_pdh_info(fd, &pdh_data, &pdh_len, 607 &cert_chain_data, &cert_chain_len, errp)) { 608 goto out; 609 } 610 611 if (sev_get_cpu0_id(fd, &cpu0_id_data, &cpu0_id_len, errp)) { 612 goto out; 613 } 614 615 cap = g_new0(SevCapability, 1); 616 cap->pdh = g_base64_encode(pdh_data, pdh_len); 617 cap->cert_chain = g_base64_encode(cert_chain_data, cert_chain_len); 618 cap->cpu0_id = g_base64_encode(cpu0_id_data, cpu0_id_len); 619 620 host_cpuid(0x8000001F, 0, NULL, &ebx, NULL, NULL); 621 cap->cbitpos = ebx & 0x3f; 622 623 /* 624 * When SEV feature is enabled, we loose one bit in guest physical 625 * addressing. 626 */ 627 cap->reduced_phys_bits = 1; 628 629 out: 630 g_free(cpu0_id_data); 631 g_free(pdh_data); 632 g_free(cert_chain_data); 633 close(fd); 634 return cap; 635 } 636 637 SevCapability *qmp_query_sev_capabilities(Error **errp) 638 { 639 return sev_get_capabilities(errp); 640 } 641 642 static OvmfSevMetadata *ovmf_sev_metadata_table; 643 644 #define OVMF_SEV_META_DATA_GUID "dc886566-984a-4798-A75e-5585a7bf67cc" 645 typedef struct __attribute__((__packed__)) OvmfSevMetadataOffset { 646 uint32_t offset; 647 } OvmfSevMetadataOffset; 648 649 OvmfSevMetadata *pc_system_get_ovmf_sev_metadata_ptr(void) 650 { 651 return ovmf_sev_metadata_table; 652 } 653 654 void pc_system_parse_sev_metadata(uint8_t *flash_ptr, size_t flash_size) 655 { 656 OvmfSevMetadata *metadata; 657 OvmfSevMetadataOffset *data; 658 659 if (!pc_system_ovmf_table_find(OVMF_SEV_META_DATA_GUID, (uint8_t **)&data, 660 NULL)) { 661 return; 662 } 663 664 metadata = (OvmfSevMetadata *)(flash_ptr + flash_size - data->offset); 665 if (memcmp(metadata->signature, "ASEV", 4) != 0 || 666 metadata->len < sizeof(OvmfSevMetadata) || 667 metadata->len > flash_size - data->offset) { 668 return; 669 } 670 671 ovmf_sev_metadata_table = g_memdup2(metadata, metadata->len); 672 } 673 674 static SevAttestationReport *sev_get_attestation_report(const char *mnonce, 675 Error **errp) 676 { 677 struct kvm_sev_attestation_report input = {}; 678 SevAttestationReport *report = NULL; 679 SevCommonState *sev_common; 680 g_autofree guchar *data = NULL; 681 g_autofree guchar *buf = NULL; 682 gsize len; 683 int err = 0, ret; 684 685 if (!sev_enabled()) { 686 error_setg(errp, "SEV is not enabled"); 687 return NULL; 688 } 689 690 /* lets decode the mnonce string */ 691 buf = g_base64_decode(mnonce, &len); 692 if (!buf) { 693 error_setg(errp, "SEV: failed to decode mnonce input"); 694 return NULL; 695 } 696 697 /* verify the input mnonce length */ 698 if (len != sizeof(input.mnonce)) { 699 error_setg(errp, "SEV: mnonce must be %zu bytes (got %" G_GSIZE_FORMAT ")", 700 sizeof(input.mnonce), len); 701 return NULL; 702 } 703 704 sev_common = SEV_COMMON(MACHINE(qdev_get_machine())->cgs); 705 706 /* Query the report length */ 707 ret = sev_ioctl(sev_common->sev_fd, KVM_SEV_GET_ATTESTATION_REPORT, 708 &input, &err); 709 if (ret < 0) { 710 if (err != SEV_RET_INVALID_LEN) { 711 error_setg(errp, "SEV: Failed to query the attestation report" 712 " length ret=%d fw_err=%d (%s)", 713 ret, err, fw_error_to_str(err)); 714 return NULL; 715 } 716 } 717 718 data = g_malloc(input.len); 719 input.uaddr = (unsigned long)data; 720 memcpy(input.mnonce, buf, sizeof(input.mnonce)); 721 722 /* Query the report */ 723 ret = sev_ioctl(sev_common->sev_fd, KVM_SEV_GET_ATTESTATION_REPORT, 724 &input, &err); 725 if (ret) { 726 error_setg_errno(errp, errno, "SEV: Failed to get attestation report" 727 " ret=%d fw_err=%d (%s)", ret, err, fw_error_to_str(err)); 728 return NULL; 729 } 730 731 report = g_new0(SevAttestationReport, 1); 732 report->data = g_base64_encode(data, input.len); 733 734 trace_kvm_sev_attestation_report(mnonce, report->data); 735 736 return report; 737 } 738 739 SevAttestationReport *qmp_query_sev_attestation_report(const char *mnonce, 740 Error **errp) 741 { 742 return sev_get_attestation_report(mnonce, errp); 743 } 744 745 static int 746 sev_read_file_base64(const char *filename, guchar **data, gsize *len) 747 { 748 gsize sz; 749 g_autofree gchar *base64 = NULL; 750 GError *error = NULL; 751 752 if (!g_file_get_contents(filename, &base64, &sz, &error)) { 753 error_report("SEV: Failed to read '%s' (%s)", filename, error->message); 754 g_error_free(error); 755 return -1; 756 } 757 758 *data = g_base64_decode(base64, len); 759 return 0; 760 } 761 762 static int 763 sev_snp_launch_start(SevCommonState *sev_common) 764 { 765 int fw_error, rc; 766 SevSnpGuestState *sev_snp_guest = SEV_SNP_GUEST(sev_common); 767 struct kvm_sev_snp_launch_start *start = &sev_snp_guest->kvm_start_conf; 768 769 trace_kvm_sev_snp_launch_start(start->policy, 770 sev_snp_guest->guest_visible_workarounds); 771 772 if (!kvm_enable_hypercall(BIT_ULL(KVM_HC_MAP_GPA_RANGE))) { 773 return 1; 774 } 775 776 rc = sev_ioctl(sev_common->sev_fd, KVM_SEV_SNP_LAUNCH_START, 777 start, &fw_error); 778 if (rc < 0) { 779 error_report("%s: SNP_LAUNCH_START ret=%d fw_error=%d '%s'", 780 __func__, rc, fw_error, fw_error_to_str(fw_error)); 781 return 1; 782 } 783 784 QTAILQ_INIT(&launch_update); 785 786 sev_set_guest_state(sev_common, SEV_STATE_LAUNCH_UPDATE); 787 788 return 0; 789 } 790 791 static int 792 sev_launch_start(SevCommonState *sev_common) 793 { 794 gsize sz; 795 int ret = 1; 796 int fw_error, rc; 797 SevGuestState *sev_guest = SEV_GUEST(sev_common); 798 struct kvm_sev_launch_start start = { 799 .handle = sev_guest->handle, .policy = sev_guest->policy 800 }; 801 guchar *session = NULL, *dh_cert = NULL; 802 803 if (sev_guest->session_file) { 804 if (sev_read_file_base64(sev_guest->session_file, &session, &sz) < 0) { 805 goto out; 806 } 807 start.session_uaddr = (unsigned long)session; 808 start.session_len = sz; 809 } 810 811 if (sev_guest->dh_cert_file) { 812 if (sev_read_file_base64(sev_guest->dh_cert_file, &dh_cert, &sz) < 0) { 813 goto out; 814 } 815 start.dh_uaddr = (unsigned long)dh_cert; 816 start.dh_len = sz; 817 } 818 819 trace_kvm_sev_launch_start(start.policy, session, dh_cert); 820 rc = sev_ioctl(sev_common->sev_fd, KVM_SEV_LAUNCH_START, &start, &fw_error); 821 if (rc < 0) { 822 error_report("%s: LAUNCH_START ret=%d fw_error=%d '%s'", 823 __func__, ret, fw_error, fw_error_to_str(fw_error)); 824 goto out; 825 } 826 827 sev_set_guest_state(sev_common, SEV_STATE_LAUNCH_UPDATE); 828 sev_guest->handle = start.handle; 829 ret = 0; 830 831 out: 832 g_free(session); 833 g_free(dh_cert); 834 return ret; 835 } 836 837 static void 838 sev_snp_cpuid_report_mismatches(SnpCpuidInfo *old, 839 SnpCpuidInfo *new) 840 { 841 size_t i; 842 843 if (old->count != new->count) { 844 error_report("SEV-SNP: CPUID validation failed due to count mismatch, " 845 "provided: %d, expected: %d", old->count, new->count); 846 return; 847 } 848 849 for (i = 0; i < old->count; i++) { 850 SnpCpuidFunc *old_func, *new_func; 851 852 old_func = &old->entries[i]; 853 new_func = &new->entries[i]; 854 855 if (memcmp(old_func, new_func, sizeof(SnpCpuidFunc))) { 856 error_report("SEV-SNP: CPUID validation failed for function 0x%x, index: 0x%x, " 857 "provided: eax:0x%08x, ebx: 0x%08x, ecx: 0x%08x, edx: 0x%08x, " 858 "expected: eax:0x%08x, ebx: 0x%08x, ecx: 0x%08x, edx: 0x%08x", 859 old_func->eax_in, old_func->ecx_in, 860 old_func->eax, old_func->ebx, old_func->ecx, old_func->edx, 861 new_func->eax, new_func->ebx, new_func->ecx, new_func->edx); 862 } 863 } 864 } 865 866 static const char * 867 snp_page_type_to_str(int type) 868 { 869 switch (type) { 870 case KVM_SEV_SNP_PAGE_TYPE_NORMAL: return "Normal"; 871 case KVM_SEV_SNP_PAGE_TYPE_ZERO: return "Zero"; 872 case KVM_SEV_SNP_PAGE_TYPE_UNMEASURED: return "Unmeasured"; 873 case KVM_SEV_SNP_PAGE_TYPE_SECRETS: return "Secrets"; 874 case KVM_SEV_SNP_PAGE_TYPE_CPUID: return "Cpuid"; 875 default: return "unknown"; 876 } 877 } 878 879 static int 880 sev_snp_launch_update(SevSnpGuestState *sev_snp_guest, 881 SevLaunchUpdateData *data) 882 { 883 int ret, fw_error; 884 SnpCpuidInfo snp_cpuid_info; 885 struct kvm_sev_snp_launch_update update = {0}; 886 887 if (!data->hva || !data->len) { 888 error_report("SNP_LAUNCH_UPDATE called with invalid address" 889 "/ length: %p / %zx", 890 data->hva, data->len); 891 return 1; 892 } 893 894 if (data->type == KVM_SEV_SNP_PAGE_TYPE_CPUID) { 895 /* Save a copy for comparison in case the LAUNCH_UPDATE fails */ 896 memcpy(&snp_cpuid_info, data->hva, sizeof(snp_cpuid_info)); 897 } 898 899 update.uaddr = (__u64)(unsigned long)data->hva; 900 update.gfn_start = data->gpa >> TARGET_PAGE_BITS; 901 update.len = data->len; 902 update.type = data->type; 903 904 /* 905 * KVM_SEV_SNP_LAUNCH_UPDATE requires that GPA ranges have the private 906 * memory attribute set in advance. 907 */ 908 ret = kvm_set_memory_attributes_private(data->gpa, data->len); 909 if (ret) { 910 error_report("SEV-SNP: failed to configure initial" 911 "private guest memory"); 912 goto out; 913 } 914 915 while (update.len || ret == -EAGAIN) { 916 trace_kvm_sev_snp_launch_update(update.uaddr, update.gfn_start << 917 TARGET_PAGE_BITS, update.len, 918 snp_page_type_to_str(update.type)); 919 920 ret = sev_ioctl(SEV_COMMON(sev_snp_guest)->sev_fd, 921 KVM_SEV_SNP_LAUNCH_UPDATE, 922 &update, &fw_error); 923 if (ret && ret != -EAGAIN) { 924 error_report("SNP_LAUNCH_UPDATE ret=%d fw_error=%d '%s'", 925 ret, fw_error, fw_error_to_str(fw_error)); 926 927 if (data->type == KVM_SEV_SNP_PAGE_TYPE_CPUID) { 928 sev_snp_cpuid_report_mismatches(&snp_cpuid_info, data->hva); 929 error_report("SEV-SNP: failed update CPUID page"); 930 } 931 break; 932 } 933 } 934 935 out: 936 if (!ret && update.gfn_start << TARGET_PAGE_BITS != data->gpa + data->len) { 937 error_report("SEV-SNP: expected update of GPA range %" 938 HWADDR_PRIx "-%" HWADDR_PRIx "," 939 "got GPA range %" HWADDR_PRIx "-%llx", 940 data->gpa, data->gpa + data->len, data->gpa, 941 update.gfn_start << TARGET_PAGE_BITS); 942 ret = -EIO; 943 } 944 945 return ret; 946 } 947 948 static uint32_t 949 sev_snp_mask_cpuid_features(X86ConfidentialGuest *cg, uint32_t feature, uint32_t index, 950 int reg, uint32_t value) 951 { 952 switch (feature) { 953 case 1: 954 if (reg == R_ECX) { 955 return value & ~CPUID_EXT_TSC_DEADLINE_TIMER; 956 } 957 break; 958 case 7: 959 if (index == 0 && reg == R_EBX) { 960 return value & ~CPUID_7_0_EBX_TSC_ADJUST; 961 } 962 if (index == 0 && reg == R_EDX) { 963 return value & ~(CPUID_7_0_EDX_SPEC_CTRL | 964 CPUID_7_0_EDX_STIBP | 965 CPUID_7_0_EDX_FLUSH_L1D | 966 CPUID_7_0_EDX_ARCH_CAPABILITIES | 967 CPUID_7_0_EDX_CORE_CAPABILITY | 968 CPUID_7_0_EDX_SPEC_CTRL_SSBD); 969 } 970 break; 971 case 0x80000008: 972 if (reg == R_EBX) { 973 return value & ~CPUID_8000_0008_EBX_VIRT_SSBD; 974 } 975 break; 976 } 977 return value; 978 } 979 980 static int 981 sev_launch_update_data(SevCommonState *sev_common, hwaddr gpa, 982 uint8_t *addr, size_t len) 983 { 984 int ret, fw_error; 985 struct kvm_sev_launch_update_data update; 986 987 if (!addr || !len) { 988 return 1; 989 } 990 991 update.uaddr = (uintptr_t)addr; 992 update.len = len; 993 trace_kvm_sev_launch_update_data(addr, len); 994 ret = sev_ioctl(sev_common->sev_fd, KVM_SEV_LAUNCH_UPDATE_DATA, 995 &update, &fw_error); 996 if (ret) { 997 error_report("%s: LAUNCH_UPDATE ret=%d fw_error=%d '%s'", 998 __func__, ret, fw_error, fw_error_to_str(fw_error)); 999 } 1000 1001 return ret; 1002 } 1003 1004 static int 1005 sev_launch_update_vmsa(SevGuestState *sev_guest) 1006 { 1007 int ret, fw_error; 1008 1009 ret = sev_ioctl(SEV_COMMON(sev_guest)->sev_fd, KVM_SEV_LAUNCH_UPDATE_VMSA, 1010 NULL, &fw_error); 1011 if (ret) { 1012 error_report("%s: LAUNCH_UPDATE_VMSA ret=%d fw_error=%d '%s'", 1013 __func__, ret, fw_error, fw_error_to_str(fw_error)); 1014 } 1015 1016 return ret; 1017 } 1018 1019 static void 1020 sev_launch_get_measure(Notifier *notifier, void *unused) 1021 { 1022 SevCommonState *sev_common = SEV_COMMON(MACHINE(qdev_get_machine())->cgs); 1023 SevGuestState *sev_guest = SEV_GUEST(sev_common); 1024 int ret, error; 1025 g_autofree guchar *data = NULL; 1026 struct kvm_sev_launch_measure measurement = {}; 1027 1028 if (!sev_check_state(sev_common, SEV_STATE_LAUNCH_UPDATE)) { 1029 return; 1030 } 1031 1032 if (sev_es_enabled()) { 1033 /* measure all the VM save areas before getting launch_measure */ 1034 ret = sev_launch_update_vmsa(sev_guest); 1035 if (ret) { 1036 exit(1); 1037 } 1038 kvm_mark_guest_state_protected(); 1039 } 1040 1041 /* query the measurement blob length */ 1042 ret = sev_ioctl(sev_common->sev_fd, KVM_SEV_LAUNCH_MEASURE, 1043 &measurement, &error); 1044 if (!measurement.len) { 1045 error_report("%s: LAUNCH_MEASURE ret=%d fw_error=%d '%s'", 1046 __func__, ret, error, fw_error_to_str(errno)); 1047 return; 1048 } 1049 1050 data = g_new0(guchar, measurement.len); 1051 measurement.uaddr = (unsigned long)data; 1052 1053 /* get the measurement blob */ 1054 ret = sev_ioctl(sev_common->sev_fd, KVM_SEV_LAUNCH_MEASURE, 1055 &measurement, &error); 1056 if (ret) { 1057 error_report("%s: LAUNCH_MEASURE ret=%d fw_error=%d '%s'", 1058 __func__, ret, error, fw_error_to_str(errno)); 1059 return; 1060 } 1061 1062 sev_set_guest_state(sev_common, SEV_STATE_LAUNCH_SECRET); 1063 1064 /* encode the measurement value and emit the event */ 1065 sev_guest->measurement = g_base64_encode(data, measurement.len); 1066 trace_kvm_sev_launch_measurement(sev_guest->measurement); 1067 } 1068 1069 static char *sev_get_launch_measurement(void) 1070 { 1071 ConfidentialGuestSupport *cgs = MACHINE(qdev_get_machine())->cgs; 1072 SevGuestState *sev_guest = 1073 (SevGuestState *)object_dynamic_cast(OBJECT(cgs), TYPE_SEV_GUEST); 1074 1075 if (sev_guest && 1076 SEV_COMMON(sev_guest)->state >= SEV_STATE_LAUNCH_SECRET) { 1077 return g_strdup(sev_guest->measurement); 1078 } 1079 1080 return NULL; 1081 } 1082 1083 SevLaunchMeasureInfo *qmp_query_sev_launch_measure(Error **errp) 1084 { 1085 char *data; 1086 SevLaunchMeasureInfo *info; 1087 1088 data = sev_get_launch_measurement(); 1089 if (!data) { 1090 error_setg(errp, "SEV launch measurement is not available"); 1091 return NULL; 1092 } 1093 1094 info = g_malloc0(sizeof(*info)); 1095 info->data = data; 1096 1097 return info; 1098 } 1099 1100 static Notifier sev_machine_done_notify = { 1101 .notify = sev_launch_get_measure, 1102 }; 1103 1104 static void 1105 sev_launch_finish(SevCommonState *sev_common) 1106 { 1107 int ret, error; 1108 1109 trace_kvm_sev_launch_finish(); 1110 ret = sev_ioctl(sev_common->sev_fd, KVM_SEV_LAUNCH_FINISH, 0, 1111 &error); 1112 if (ret) { 1113 error_report("%s: LAUNCH_FINISH ret=%d fw_error=%d '%s'", 1114 __func__, ret, error, fw_error_to_str(error)); 1115 exit(1); 1116 } 1117 1118 sev_set_guest_state(sev_common, SEV_STATE_RUNNING); 1119 1120 /* add migration blocker */ 1121 error_setg(&sev_mig_blocker, 1122 "SEV: Migration is not implemented"); 1123 migrate_add_blocker(&sev_mig_blocker, &error_fatal); 1124 } 1125 1126 static int 1127 snp_launch_update_data(uint64_t gpa, void *hva, size_t len, int type) 1128 { 1129 SevLaunchUpdateData *data; 1130 1131 data = g_new0(SevLaunchUpdateData, 1); 1132 data->gpa = gpa; 1133 data->hva = hva; 1134 data->len = len; 1135 data->type = type; 1136 1137 QTAILQ_INSERT_TAIL(&launch_update, data, next); 1138 1139 return 0; 1140 } 1141 1142 static int 1143 sev_snp_launch_update_data(SevCommonState *sev_common, hwaddr gpa, 1144 uint8_t *ptr, size_t len) 1145 { 1146 int ret = snp_launch_update_data(gpa, ptr, len, 1147 KVM_SEV_SNP_PAGE_TYPE_NORMAL); 1148 return ret; 1149 } 1150 1151 static int 1152 sev_snp_cpuid_info_fill(SnpCpuidInfo *snp_cpuid_info, 1153 const KvmCpuidInfo *kvm_cpuid_info) 1154 { 1155 size_t i; 1156 1157 if (kvm_cpuid_info->cpuid.nent > SNP_CPUID_FUNCTION_MAXCOUNT) { 1158 error_report("SEV-SNP: CPUID entry count (%d) exceeds max (%d)", 1159 kvm_cpuid_info->cpuid.nent, SNP_CPUID_FUNCTION_MAXCOUNT); 1160 return -1; 1161 } 1162 1163 memset(snp_cpuid_info, 0, sizeof(*snp_cpuid_info)); 1164 1165 for (i = 0; i < kvm_cpuid_info->cpuid.nent; i++) { 1166 const struct kvm_cpuid_entry2 *kvm_cpuid_entry; 1167 SnpCpuidFunc *snp_cpuid_entry; 1168 1169 kvm_cpuid_entry = &kvm_cpuid_info->entries[i]; 1170 snp_cpuid_entry = &snp_cpuid_info->entries[i]; 1171 1172 snp_cpuid_entry->eax_in = kvm_cpuid_entry->function; 1173 if (kvm_cpuid_entry->flags == KVM_CPUID_FLAG_SIGNIFCANT_INDEX) { 1174 snp_cpuid_entry->ecx_in = kvm_cpuid_entry->index; 1175 } 1176 snp_cpuid_entry->eax = kvm_cpuid_entry->eax; 1177 snp_cpuid_entry->ebx = kvm_cpuid_entry->ebx; 1178 snp_cpuid_entry->ecx = kvm_cpuid_entry->ecx; 1179 snp_cpuid_entry->edx = kvm_cpuid_entry->edx; 1180 1181 /* 1182 * Guest kernels will calculate EBX themselves using the 0xD 1183 * subfunctions corresponding to the individual XSAVE areas, so only 1184 * encode the base XSAVE size in the initial leaves, corresponding 1185 * to the initial XCR0=1 state. 1186 */ 1187 if (snp_cpuid_entry->eax_in == 0xD && 1188 (snp_cpuid_entry->ecx_in == 0x0 || snp_cpuid_entry->ecx_in == 0x1)) { 1189 snp_cpuid_entry->ebx = 0x240; 1190 snp_cpuid_entry->xcr0_in = 1; 1191 snp_cpuid_entry->xss_in = 0; 1192 } 1193 } 1194 1195 snp_cpuid_info->count = i; 1196 1197 return 0; 1198 } 1199 1200 static int 1201 snp_launch_update_cpuid(uint32_t cpuid_addr, void *hva, size_t cpuid_len) 1202 { 1203 KvmCpuidInfo kvm_cpuid_info = {0}; 1204 SnpCpuidInfo snp_cpuid_info; 1205 CPUState *cs = first_cpu; 1206 int ret; 1207 uint32_t i = 0; 1208 1209 assert(sizeof(snp_cpuid_info) <= cpuid_len); 1210 1211 /* get the cpuid list from KVM */ 1212 do { 1213 kvm_cpuid_info.cpuid.nent = ++i; 1214 ret = kvm_vcpu_ioctl(cs, KVM_GET_CPUID2, &kvm_cpuid_info); 1215 } while (ret == -E2BIG); 1216 1217 if (ret) { 1218 error_report("SEV-SNP: unable to query CPUID values for CPU: '%s'", 1219 strerror(-ret)); 1220 return 1; 1221 } 1222 1223 ret = sev_snp_cpuid_info_fill(&snp_cpuid_info, &kvm_cpuid_info); 1224 if (ret) { 1225 error_report("SEV-SNP: failed to generate CPUID table information"); 1226 return 1; 1227 } 1228 1229 memcpy(hva, &snp_cpuid_info, sizeof(snp_cpuid_info)); 1230 1231 return snp_launch_update_data(cpuid_addr, hva, cpuid_len, 1232 KVM_SEV_SNP_PAGE_TYPE_CPUID); 1233 } 1234 1235 static int 1236 snp_launch_update_kernel_hashes(SevSnpGuestState *sev_snp, uint32_t addr, 1237 void *hva, uint32_t len) 1238 { 1239 int type = KVM_SEV_SNP_PAGE_TYPE_ZERO; 1240 if (sev_snp->parent_obj.kernel_hashes) { 1241 assert(sev_snp->kernel_hashes_data); 1242 assert((sev_snp->kernel_hashes_offset + 1243 sizeof(*sev_snp->kernel_hashes_data)) <= len); 1244 memset(hva, 0, len); 1245 memcpy(hva + sev_snp->kernel_hashes_offset, sev_snp->kernel_hashes_data, 1246 sizeof(*sev_snp->kernel_hashes_data)); 1247 type = KVM_SEV_SNP_PAGE_TYPE_NORMAL; 1248 } 1249 return snp_launch_update_data(addr, hva, len, type); 1250 } 1251 1252 static int 1253 snp_metadata_desc_to_page_type(int desc_type) 1254 { 1255 switch (desc_type) { 1256 /* Add the umeasured prevalidated pages as a zero page */ 1257 case SEV_DESC_TYPE_SNP_SEC_MEM: return KVM_SEV_SNP_PAGE_TYPE_ZERO; 1258 case SEV_DESC_TYPE_SNP_SECRETS: return KVM_SEV_SNP_PAGE_TYPE_SECRETS; 1259 case SEV_DESC_TYPE_CPUID: return KVM_SEV_SNP_PAGE_TYPE_CPUID; 1260 default: 1261 return KVM_SEV_SNP_PAGE_TYPE_ZERO; 1262 } 1263 } 1264 1265 static void 1266 snp_populate_metadata_pages(SevSnpGuestState *sev_snp, 1267 OvmfSevMetadata *metadata) 1268 { 1269 OvmfSevMetadataDesc *desc; 1270 int type, ret, i; 1271 void *hva; 1272 MemoryRegion *mr = NULL; 1273 1274 for (i = 0; i < metadata->num_desc; i++) { 1275 desc = &metadata->descs[i]; 1276 1277 type = snp_metadata_desc_to_page_type(desc->type); 1278 1279 hva = gpa2hva(&mr, desc->base, desc->len, NULL); 1280 if (!hva) { 1281 error_report("%s: Failed to get HVA for GPA 0x%x sz 0x%x", 1282 __func__, desc->base, desc->len); 1283 exit(1); 1284 } 1285 1286 if (type == KVM_SEV_SNP_PAGE_TYPE_CPUID) { 1287 ret = snp_launch_update_cpuid(desc->base, hva, desc->len); 1288 } else if (desc->type == SEV_DESC_TYPE_SNP_KERNEL_HASHES) { 1289 ret = snp_launch_update_kernel_hashes(sev_snp, desc->base, hva, 1290 desc->len); 1291 } else { 1292 ret = snp_launch_update_data(desc->base, hva, desc->len, type); 1293 } 1294 1295 if (ret) { 1296 error_report("%s: Failed to add metadata page gpa 0x%x+%x type %d", 1297 __func__, desc->base, desc->len, desc->type); 1298 exit(1); 1299 } 1300 } 1301 } 1302 1303 static void 1304 sev_snp_launch_finish(SevCommonState *sev_common) 1305 { 1306 int ret, error; 1307 Error *local_err = NULL; 1308 OvmfSevMetadata *metadata; 1309 SevLaunchUpdateData *data; 1310 SevSnpGuestState *sev_snp = SEV_SNP_GUEST(sev_common); 1311 struct kvm_sev_snp_launch_finish *finish = &sev_snp->kvm_finish_conf; 1312 1313 /* 1314 * To boot the SNP guest, the hypervisor is required to populate the CPUID 1315 * and Secrets page before finalizing the launch flow. The location of 1316 * the secrets and CPUID page is available through the OVMF metadata GUID. 1317 */ 1318 metadata = pc_system_get_ovmf_sev_metadata_ptr(); 1319 if (metadata == NULL) { 1320 error_report("%s: Failed to locate SEV metadata header", __func__); 1321 exit(1); 1322 } 1323 1324 /* Populate all the metadata pages */ 1325 snp_populate_metadata_pages(sev_snp, metadata); 1326 1327 QTAILQ_FOREACH(data, &launch_update, next) { 1328 ret = sev_snp_launch_update(sev_snp, data); 1329 if (ret) { 1330 exit(1); 1331 } 1332 } 1333 1334 trace_kvm_sev_snp_launch_finish(sev_snp->id_block_base64, sev_snp->id_auth_base64, 1335 sev_snp->host_data); 1336 ret = sev_ioctl(sev_common->sev_fd, KVM_SEV_SNP_LAUNCH_FINISH, 1337 finish, &error); 1338 if (ret) { 1339 error_report("SNP_LAUNCH_FINISH ret=%d fw_error=%d '%s'", 1340 ret, error, fw_error_to_str(error)); 1341 exit(1); 1342 } 1343 1344 kvm_mark_guest_state_protected(); 1345 sev_set_guest_state(sev_common, SEV_STATE_RUNNING); 1346 1347 /* add migration blocker */ 1348 error_setg(&sev_mig_blocker, 1349 "SEV-SNP: Migration is not implemented"); 1350 ret = migrate_add_blocker(&sev_mig_blocker, &local_err); 1351 if (local_err) { 1352 error_report_err(local_err); 1353 error_free(sev_mig_blocker); 1354 exit(1); 1355 } 1356 } 1357 1358 1359 static void 1360 sev_vm_state_change(void *opaque, bool running, RunState state) 1361 { 1362 SevCommonState *sev_common = opaque; 1363 SevCommonStateClass *klass = SEV_COMMON_GET_CLASS(opaque); 1364 1365 if (running) { 1366 if (!sev_check_state(sev_common, SEV_STATE_RUNNING)) { 1367 klass->launch_finish(sev_common); 1368 } 1369 } 1370 } 1371 1372 static int sev_kvm_type(X86ConfidentialGuest *cg) 1373 { 1374 SevCommonState *sev_common = SEV_COMMON(cg); 1375 SevGuestState *sev_guest = SEV_GUEST(sev_common); 1376 int kvm_type; 1377 1378 if (sev_common->kvm_type != -1) { 1379 goto out; 1380 } 1381 1382 kvm_type = (sev_guest->policy & SEV_POLICY_ES) ? 1383 KVM_X86_SEV_ES_VM : KVM_X86_SEV_VM; 1384 if (kvm_is_vm_type_supported(kvm_type) && !sev_guest->legacy_vm_type) { 1385 sev_common->kvm_type = kvm_type; 1386 } else { 1387 sev_common->kvm_type = KVM_X86_DEFAULT_VM; 1388 } 1389 1390 out: 1391 return sev_common->kvm_type; 1392 } 1393 1394 static int sev_snp_kvm_type(X86ConfidentialGuest *cg) 1395 { 1396 return KVM_X86_SNP_VM; 1397 } 1398 1399 static int sev_common_kvm_init(ConfidentialGuestSupport *cgs, Error **errp) 1400 { 1401 char *devname; 1402 int ret, fw_error, cmd; 1403 uint32_t ebx; 1404 uint32_t host_cbitpos; 1405 struct sev_user_data_status status = {}; 1406 SevCommonState *sev_common = SEV_COMMON(cgs); 1407 SevCommonStateClass *klass = SEV_COMMON_GET_CLASS(cgs); 1408 X86ConfidentialGuestClass *x86_klass = 1409 X86_CONFIDENTIAL_GUEST_GET_CLASS(cgs); 1410 1411 sev_common->state = SEV_STATE_UNINIT; 1412 1413 host_cpuid(0x8000001F, 0, NULL, &ebx, NULL, NULL); 1414 host_cbitpos = ebx & 0x3f; 1415 1416 /* 1417 * The cbitpos value will be placed in bit positions 5:0 of the EBX 1418 * register of CPUID 0x8000001F. No need to verify the range as the 1419 * comparison against the host value accomplishes that. 1420 */ 1421 if (host_cbitpos != sev_common->cbitpos) { 1422 error_setg(errp, "%s: cbitpos check failed, host '%d' requested '%d'", 1423 __func__, host_cbitpos, sev_common->cbitpos); 1424 return -1; 1425 } 1426 1427 /* 1428 * The reduced-phys-bits value will be placed in bit positions 11:6 of 1429 * the EBX register of CPUID 0x8000001F, so verify the supplied value 1430 * is in the range of 1 to 63. 1431 */ 1432 if (sev_common->reduced_phys_bits < 1 || 1433 sev_common->reduced_phys_bits > 63) { 1434 error_setg(errp, "%s: reduced_phys_bits check failed," 1435 " it should be in the range of 1 to 63, requested '%d'", 1436 __func__, sev_common->reduced_phys_bits); 1437 return -1; 1438 } 1439 1440 devname = object_property_get_str(OBJECT(sev_common), "sev-device", NULL); 1441 sev_common->sev_fd = open(devname, O_RDWR); 1442 if (sev_common->sev_fd < 0) { 1443 error_setg(errp, "%s: Failed to open %s '%s'", __func__, 1444 devname, strerror(errno)); 1445 g_free(devname); 1446 return -1; 1447 } 1448 g_free(devname); 1449 1450 ret = sev_platform_ioctl(sev_common->sev_fd, SEV_PLATFORM_STATUS, &status, 1451 &fw_error); 1452 if (ret) { 1453 error_setg(errp, "%s: failed to get platform status ret=%d " 1454 "fw_error='%d: %s'", __func__, ret, fw_error, 1455 fw_error_to_str(fw_error)); 1456 return -1; 1457 } 1458 sev_common->build_id = status.build; 1459 sev_common->api_major = status.api_major; 1460 sev_common->api_minor = status.api_minor; 1461 1462 if (sev_es_enabled()) { 1463 if (!kvm_kernel_irqchip_allowed()) { 1464 error_setg(errp, "%s: SEV-ES guests require in-kernel irqchip" 1465 "support", __func__); 1466 return -1; 1467 } 1468 } 1469 1470 if (sev_es_enabled() && !sev_snp_enabled()) { 1471 if (!(status.flags & SEV_STATUS_FLAGS_CONFIG_ES)) { 1472 error_setg(errp, "%s: guest policy requires SEV-ES, but " 1473 "host SEV-ES support unavailable", 1474 __func__); 1475 return -1; 1476 } 1477 } 1478 1479 trace_kvm_sev_init(); 1480 if (x86_klass->kvm_type(X86_CONFIDENTIAL_GUEST(sev_common)) == KVM_X86_DEFAULT_VM) { 1481 cmd = sev_es_enabled() ? KVM_SEV_ES_INIT : KVM_SEV_INIT; 1482 1483 ret = sev_ioctl(sev_common->sev_fd, cmd, NULL, &fw_error); 1484 } else { 1485 struct kvm_sev_init args = { 0 }; 1486 1487 ret = sev_ioctl(sev_common->sev_fd, KVM_SEV_INIT2, &args, &fw_error); 1488 } 1489 1490 if (ret) { 1491 error_setg(errp, "%s: failed to initialize ret=%d fw_error=%d '%s'", 1492 __func__, ret, fw_error, fw_error_to_str(fw_error)); 1493 return -1; 1494 } 1495 1496 ret = klass->launch_start(sev_common); 1497 1498 if (ret) { 1499 error_setg(errp, "%s: failed to create encryption context", __func__); 1500 return -1; 1501 } 1502 1503 if (klass->kvm_init && klass->kvm_init(cgs, errp)) { 1504 return -1; 1505 } 1506 1507 qemu_add_vm_change_state_handler(sev_vm_state_change, sev_common); 1508 1509 cgs->ready = true; 1510 1511 return 0; 1512 } 1513 1514 static int sev_kvm_init(ConfidentialGuestSupport *cgs, Error **errp) 1515 { 1516 int ret; 1517 1518 /* 1519 * SEV/SEV-ES rely on pinned memory to back guest RAM so discarding 1520 * isn't actually possible. With SNP, only guest_memfd pages are used 1521 * for private guest memory, so discarding of shared memory is still 1522 * possible.. 1523 */ 1524 ret = ram_block_discard_disable(true); 1525 if (ret) { 1526 error_setg(errp, "%s: cannot disable RAM discard", __func__); 1527 return -1; 1528 } 1529 1530 /* 1531 * SEV uses these notifiers to register/pin pages prior to guest use, 1532 * but SNP relies on guest_memfd for private pages, which has its 1533 * own internal mechanisms for registering/pinning private memory. 1534 */ 1535 ram_block_notifier_add(&sev_ram_notifier); 1536 1537 /* 1538 * The machine done notify event is used for SEV guests to get the 1539 * measurement of the encrypted images. When SEV-SNP is enabled, the 1540 * measurement is part of the guest attestation process where it can 1541 * be collected without any reliance on the VMM. So skip registering 1542 * the notifier for SNP in favor of using guest attestation instead. 1543 */ 1544 qemu_add_machine_init_done_notifier(&sev_machine_done_notify); 1545 1546 return 0; 1547 } 1548 1549 static int sev_snp_kvm_init(ConfidentialGuestSupport *cgs, Error **errp) 1550 { 1551 MachineState *ms = MACHINE(qdev_get_machine()); 1552 X86MachineState *x86ms = X86_MACHINE(ms); 1553 1554 if (x86ms->smm == ON_OFF_AUTO_AUTO) { 1555 x86ms->smm = ON_OFF_AUTO_OFF; 1556 } else if (x86ms->smm == ON_OFF_AUTO_ON) { 1557 error_setg(errp, "SEV-SNP does not support SMM."); 1558 return -1; 1559 } 1560 1561 return 0; 1562 } 1563 1564 int 1565 sev_encrypt_flash(hwaddr gpa, uint8_t *ptr, uint64_t len, Error **errp) 1566 { 1567 SevCommonState *sev_common = SEV_COMMON(MACHINE(qdev_get_machine())->cgs); 1568 SevCommonStateClass *klass; 1569 1570 if (!sev_common) { 1571 return 0; 1572 } 1573 klass = SEV_COMMON_GET_CLASS(sev_common); 1574 1575 /* if SEV is in update state then encrypt the data else do nothing */ 1576 if (sev_check_state(sev_common, SEV_STATE_LAUNCH_UPDATE)) { 1577 int ret; 1578 1579 ret = klass->launch_update_data(sev_common, gpa, ptr, len); 1580 if (ret < 0) { 1581 error_setg(errp, "SEV: Failed to encrypt pflash rom"); 1582 return ret; 1583 } 1584 } 1585 1586 return 0; 1587 } 1588 1589 int sev_inject_launch_secret(const char *packet_hdr, const char *secret, 1590 uint64_t gpa, Error **errp) 1591 { 1592 ERRP_GUARD(); 1593 struct kvm_sev_launch_secret input; 1594 g_autofree guchar *data = NULL, *hdr = NULL; 1595 int error, ret = 1; 1596 void *hva; 1597 gsize hdr_sz = 0, data_sz = 0; 1598 MemoryRegion *mr = NULL; 1599 SevCommonState *sev_common = SEV_COMMON(MACHINE(qdev_get_machine())->cgs); 1600 1601 if (!sev_common) { 1602 error_setg(errp, "SEV not enabled for guest"); 1603 return 1; 1604 } 1605 1606 /* secret can be injected only in this state */ 1607 if (!sev_check_state(sev_common, SEV_STATE_LAUNCH_SECRET)) { 1608 error_setg(errp, "SEV: Not in correct state. (LSECRET) %x", 1609 sev_common->state); 1610 return 1; 1611 } 1612 1613 hdr = g_base64_decode(packet_hdr, &hdr_sz); 1614 if (!hdr || !hdr_sz) { 1615 error_setg(errp, "SEV: Failed to decode sequence header"); 1616 return 1; 1617 } 1618 1619 data = g_base64_decode(secret, &data_sz); 1620 if (!data || !data_sz) { 1621 error_setg(errp, "SEV: Failed to decode data"); 1622 return 1; 1623 } 1624 1625 hva = gpa2hva(&mr, gpa, data_sz, errp); 1626 if (!hva) { 1627 error_prepend(errp, "SEV: Failed to calculate guest address: "); 1628 return 1; 1629 } 1630 1631 input.hdr_uaddr = (uint64_t)(unsigned long)hdr; 1632 input.hdr_len = hdr_sz; 1633 1634 input.trans_uaddr = (uint64_t)(unsigned long)data; 1635 input.trans_len = data_sz; 1636 1637 input.guest_uaddr = (uint64_t)(unsigned long)hva; 1638 input.guest_len = data_sz; 1639 1640 trace_kvm_sev_launch_secret(gpa, input.guest_uaddr, 1641 input.trans_uaddr, input.trans_len); 1642 1643 ret = sev_ioctl(sev_common->sev_fd, KVM_SEV_LAUNCH_SECRET, 1644 &input, &error); 1645 if (ret) { 1646 error_setg(errp, "SEV: failed to inject secret ret=%d fw_error=%d '%s'", 1647 ret, error, fw_error_to_str(error)); 1648 return ret; 1649 } 1650 1651 return 0; 1652 } 1653 1654 #define SEV_SECRET_GUID "4c2eb361-7d9b-4cc3-8081-127c90d3d294" 1655 struct sev_secret_area { 1656 uint32_t base; 1657 uint32_t size; 1658 }; 1659 1660 void qmp_sev_inject_launch_secret(const char *packet_hdr, 1661 const char *secret, 1662 bool has_gpa, uint64_t gpa, 1663 Error **errp) 1664 { 1665 if (!sev_enabled()) { 1666 error_setg(errp, "SEV not enabled for guest"); 1667 return; 1668 } 1669 if (!has_gpa) { 1670 uint8_t *data; 1671 struct sev_secret_area *area; 1672 1673 if (!pc_system_ovmf_table_find(SEV_SECRET_GUID, &data, NULL)) { 1674 error_setg(errp, "SEV: no secret area found in OVMF," 1675 " gpa must be specified."); 1676 return; 1677 } 1678 area = (struct sev_secret_area *)data; 1679 gpa = area->base; 1680 } 1681 1682 sev_inject_launch_secret(packet_hdr, secret, gpa, errp); 1683 } 1684 1685 static int 1686 sev_es_parse_reset_block(SevInfoBlock *info, uint32_t *addr) 1687 { 1688 if (!info->reset_addr) { 1689 error_report("SEV-ES reset address is zero"); 1690 return 1; 1691 } 1692 1693 *addr = info->reset_addr; 1694 1695 return 0; 1696 } 1697 1698 static int 1699 sev_es_find_reset_vector(void *flash_ptr, uint64_t flash_size, 1700 uint32_t *addr) 1701 { 1702 QemuUUID info_guid, *guid; 1703 SevInfoBlock *info; 1704 uint8_t *data; 1705 uint16_t *len; 1706 1707 /* 1708 * Initialize the address to zero. An address of zero with a successful 1709 * return code indicates that SEV-ES is not active. 1710 */ 1711 *addr = 0; 1712 1713 /* 1714 * Extract the AP reset vector for SEV-ES guests by locating the SEV GUID. 1715 * The SEV GUID is located on its own (original implementation) or within 1716 * the Firmware GUID Table (new implementation), either of which are 1717 * located 32 bytes from the end of the flash. 1718 * 1719 * Check the Firmware GUID Table first. 1720 */ 1721 if (pc_system_ovmf_table_find(SEV_INFO_BLOCK_GUID, &data, NULL)) { 1722 return sev_es_parse_reset_block((SevInfoBlock *)data, addr); 1723 } 1724 1725 /* 1726 * SEV info block not found in the Firmware GUID Table (or there isn't 1727 * a Firmware GUID Table), fall back to the original implementation. 1728 */ 1729 data = flash_ptr + flash_size - 0x20; 1730 1731 qemu_uuid_parse(SEV_INFO_BLOCK_GUID, &info_guid); 1732 info_guid = qemu_uuid_bswap(info_guid); /* GUIDs are LE */ 1733 1734 guid = (QemuUUID *)(data - sizeof(info_guid)); 1735 if (!qemu_uuid_is_equal(guid, &info_guid)) { 1736 error_report("SEV information block/Firmware GUID Table block not found in pflash rom"); 1737 return 1; 1738 } 1739 1740 len = (uint16_t *)((uint8_t *)guid - sizeof(*len)); 1741 info = (SevInfoBlock *)(data - le16_to_cpu(*len)); 1742 1743 return sev_es_parse_reset_block(info, addr); 1744 } 1745 1746 void sev_es_set_reset_vector(CPUState *cpu) 1747 { 1748 X86CPU *x86; 1749 CPUX86State *env; 1750 ConfidentialGuestSupport *cgs = MACHINE(qdev_get_machine())->cgs; 1751 SevCommonState *sev_common = SEV_COMMON( 1752 object_dynamic_cast(OBJECT(cgs), TYPE_SEV_COMMON)); 1753 1754 /* Only update if we have valid reset information */ 1755 if (!sev_common || !sev_common->reset_data_valid) { 1756 return; 1757 } 1758 1759 /* Do not update the BSP reset state */ 1760 if (cpu->cpu_index == 0) { 1761 return; 1762 } 1763 1764 x86 = X86_CPU(cpu); 1765 env = &x86->env; 1766 1767 cpu_x86_load_seg_cache(env, R_CS, 0xf000, sev_common->reset_cs, 0xffff, 1768 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK | 1769 DESC_R_MASK | DESC_A_MASK); 1770 1771 env->eip = sev_common->reset_ip; 1772 } 1773 1774 int sev_es_save_reset_vector(void *flash_ptr, uint64_t flash_size) 1775 { 1776 CPUState *cpu; 1777 uint32_t addr; 1778 int ret; 1779 SevCommonState *sev_common = SEV_COMMON(MACHINE(qdev_get_machine())->cgs); 1780 1781 if (!sev_es_enabled()) { 1782 return 0; 1783 } 1784 1785 addr = 0; 1786 ret = sev_es_find_reset_vector(flash_ptr, flash_size, 1787 &addr); 1788 if (ret) { 1789 return ret; 1790 } 1791 1792 if (addr) { 1793 sev_common->reset_cs = addr & 0xffff0000; 1794 sev_common->reset_ip = addr & 0x0000ffff; 1795 sev_common->reset_data_valid = true; 1796 1797 CPU_FOREACH(cpu) { 1798 sev_es_set_reset_vector(cpu); 1799 } 1800 } 1801 1802 return 0; 1803 } 1804 1805 static const QemuUUID sev_hash_table_header_guid = { 1806 .data = UUID_LE(0x9438d606, 0x4f22, 0x4cc9, 0xb4, 0x79, 0xa7, 0x93, 1807 0xd4, 0x11, 0xfd, 0x21) 1808 }; 1809 1810 static const QemuUUID sev_kernel_entry_guid = { 1811 .data = UUID_LE(0x4de79437, 0xabd2, 0x427f, 0xb8, 0x35, 0xd5, 0xb1, 1812 0x72, 0xd2, 0x04, 0x5b) 1813 }; 1814 static const QemuUUID sev_initrd_entry_guid = { 1815 .data = UUID_LE(0x44baf731, 0x3a2f, 0x4bd7, 0x9a, 0xf1, 0x41, 0xe2, 1816 0x91, 0x69, 0x78, 0x1d) 1817 }; 1818 static const QemuUUID sev_cmdline_entry_guid = { 1819 .data = UUID_LE(0x97d02dd8, 0xbd20, 0x4c94, 0xaa, 0x78, 0xe7, 0x71, 1820 0x4d, 0x36, 0xab, 0x2a) 1821 }; 1822 1823 static bool build_kernel_loader_hashes(PaddedSevHashTable *padded_ht, 1824 SevKernelLoaderContext *ctx, 1825 Error **errp) 1826 { 1827 SevHashTable *ht; 1828 uint8_t cmdline_hash[HASH_SIZE]; 1829 uint8_t initrd_hash[HASH_SIZE]; 1830 uint8_t kernel_hash[HASH_SIZE]; 1831 uint8_t *hashp; 1832 size_t hash_len = HASH_SIZE; 1833 1834 /* 1835 * Calculate hash of kernel command-line with the terminating null byte. If 1836 * the user doesn't supply a command-line via -append, the 1-byte "\0" will 1837 * be used. 1838 */ 1839 hashp = cmdline_hash; 1840 if (qcrypto_hash_bytes(QCRYPTO_HASH_ALG_SHA256, ctx->cmdline_data, 1841 ctx->cmdline_size, &hashp, &hash_len, errp) < 0) { 1842 return false; 1843 } 1844 assert(hash_len == HASH_SIZE); 1845 1846 /* 1847 * Calculate hash of initrd. If the user doesn't supply an initrd via 1848 * -initrd, an empty buffer will be used (ctx->initrd_size == 0). 1849 */ 1850 hashp = initrd_hash; 1851 if (qcrypto_hash_bytes(QCRYPTO_HASH_ALG_SHA256, ctx->initrd_data, 1852 ctx->initrd_size, &hashp, &hash_len, errp) < 0) { 1853 return false; 1854 } 1855 assert(hash_len == HASH_SIZE); 1856 1857 /* Calculate hash of the kernel */ 1858 hashp = kernel_hash; 1859 struct iovec iov[2] = { 1860 { .iov_base = ctx->setup_data, .iov_len = ctx->setup_size }, 1861 { .iov_base = ctx->kernel_data, .iov_len = ctx->kernel_size } 1862 }; 1863 if (qcrypto_hash_bytesv(QCRYPTO_HASH_ALG_SHA256, iov, ARRAY_SIZE(iov), 1864 &hashp, &hash_len, errp) < 0) { 1865 return false; 1866 } 1867 assert(hash_len == HASH_SIZE); 1868 1869 ht = &padded_ht->ht; 1870 1871 ht->guid = sev_hash_table_header_guid; 1872 ht->len = sizeof(*ht); 1873 1874 ht->cmdline.guid = sev_cmdline_entry_guid; 1875 ht->cmdline.len = sizeof(ht->cmdline); 1876 memcpy(ht->cmdline.hash, cmdline_hash, sizeof(ht->cmdline.hash)); 1877 1878 ht->initrd.guid = sev_initrd_entry_guid; 1879 ht->initrd.len = sizeof(ht->initrd); 1880 memcpy(ht->initrd.hash, initrd_hash, sizeof(ht->initrd.hash)); 1881 1882 ht->kernel.guid = sev_kernel_entry_guid; 1883 ht->kernel.len = sizeof(ht->kernel); 1884 memcpy(ht->kernel.hash, kernel_hash, sizeof(ht->kernel.hash)); 1885 1886 /* zero the excess data so the measurement can be reliably calculated */ 1887 memset(padded_ht->padding, 0, sizeof(padded_ht->padding)); 1888 1889 return true; 1890 } 1891 1892 static bool sev_snp_build_kernel_loader_hashes(SevCommonState *sev_common, 1893 SevHashTableDescriptor *area, 1894 SevKernelLoaderContext *ctx, 1895 Error **errp) 1896 { 1897 /* 1898 * SNP: Populate the hashes table in an area that later in 1899 * snp_launch_update_kernel_hashes() will be copied to the guest memory 1900 * and encrypted. 1901 */ 1902 SevSnpGuestState *sev_snp_guest = SEV_SNP_GUEST(sev_common); 1903 sev_snp_guest->kernel_hashes_offset = area->base & ~TARGET_PAGE_MASK; 1904 sev_snp_guest->kernel_hashes_data = g_new0(PaddedSevHashTable, 1); 1905 return build_kernel_loader_hashes(sev_snp_guest->kernel_hashes_data, ctx, errp); 1906 } 1907 1908 static bool sev_build_kernel_loader_hashes(SevCommonState *sev_common, 1909 SevHashTableDescriptor *area, 1910 SevKernelLoaderContext *ctx, 1911 Error **errp) 1912 { 1913 PaddedSevHashTable *padded_ht; 1914 hwaddr mapped_len = sizeof(*padded_ht); 1915 MemTxAttrs attrs = { 0 }; 1916 bool ret = true; 1917 1918 /* 1919 * Populate the hashes table in the guest's memory at the OVMF-designated 1920 * area for the SEV hashes table 1921 */ 1922 padded_ht = address_space_map(&address_space_memory, area->base, 1923 &mapped_len, true, attrs); 1924 if (!padded_ht || mapped_len != sizeof(*padded_ht)) { 1925 error_setg(errp, "SEV: cannot map hashes table guest memory area"); 1926 return false; 1927 } 1928 1929 if (build_kernel_loader_hashes(padded_ht, ctx, errp)) { 1930 if (sev_encrypt_flash(area->base, (uint8_t *)padded_ht, 1931 sizeof(*padded_ht), errp) < 0) { 1932 ret = false; 1933 } 1934 } else { 1935 ret = false; 1936 } 1937 1938 address_space_unmap(&address_space_memory, padded_ht, 1939 mapped_len, true, mapped_len); 1940 1941 return ret; 1942 } 1943 1944 /* 1945 * Add the hashes of the linux kernel/initrd/cmdline to an encrypted guest page 1946 * which is included in SEV's initial memory measurement. 1947 */ 1948 bool sev_add_kernel_loader_hashes(SevKernelLoaderContext *ctx, Error **errp) 1949 { 1950 uint8_t *data; 1951 SevHashTableDescriptor *area; 1952 SevCommonState *sev_common = SEV_COMMON(MACHINE(qdev_get_machine())->cgs); 1953 SevCommonStateClass *klass = SEV_COMMON_GET_CLASS(sev_common); 1954 1955 /* 1956 * Only add the kernel hashes if the sev-guest configuration explicitly 1957 * stated kernel-hashes=on. 1958 */ 1959 if (!sev_common->kernel_hashes) { 1960 return false; 1961 } 1962 1963 if (!pc_system_ovmf_table_find(SEV_HASH_TABLE_RV_GUID, &data, NULL)) { 1964 error_setg(errp, "SEV: kernel specified but guest firmware " 1965 "has no hashes table GUID"); 1966 return false; 1967 } 1968 1969 area = (SevHashTableDescriptor *)data; 1970 if (!area->base || area->size < sizeof(PaddedSevHashTable)) { 1971 error_setg(errp, "SEV: guest firmware hashes table area is invalid " 1972 "(base=0x%x size=0x%x)", area->base, area->size); 1973 return false; 1974 } 1975 1976 return klass->build_kernel_loader_hashes(sev_common, area, ctx, errp); 1977 } 1978 1979 static char * 1980 sev_common_get_sev_device(Object *obj, Error **errp) 1981 { 1982 return g_strdup(SEV_COMMON(obj)->sev_device); 1983 } 1984 1985 static void 1986 sev_common_set_sev_device(Object *obj, const char *value, Error **errp) 1987 { 1988 SEV_COMMON(obj)->sev_device = g_strdup(value); 1989 } 1990 1991 static bool sev_common_get_kernel_hashes(Object *obj, Error **errp) 1992 { 1993 return SEV_COMMON(obj)->kernel_hashes; 1994 } 1995 1996 static void sev_common_set_kernel_hashes(Object *obj, bool value, Error **errp) 1997 { 1998 SEV_COMMON(obj)->kernel_hashes = value; 1999 } 2000 2001 static void 2002 sev_common_class_init(ObjectClass *oc, void *data) 2003 { 2004 ConfidentialGuestSupportClass *klass = CONFIDENTIAL_GUEST_SUPPORT_CLASS(oc); 2005 2006 klass->kvm_init = sev_common_kvm_init; 2007 2008 object_class_property_add_str(oc, "sev-device", 2009 sev_common_get_sev_device, 2010 sev_common_set_sev_device); 2011 object_class_property_set_description(oc, "sev-device", 2012 "SEV device to use"); 2013 object_class_property_add_bool(oc, "kernel-hashes", 2014 sev_common_get_kernel_hashes, 2015 sev_common_set_kernel_hashes); 2016 object_class_property_set_description(oc, "kernel-hashes", 2017 "add kernel hashes to guest firmware for measured Linux boot"); 2018 } 2019 2020 static void 2021 sev_common_instance_init(Object *obj) 2022 { 2023 SevCommonState *sev_common = SEV_COMMON(obj); 2024 2025 sev_common->kvm_type = -1; 2026 2027 sev_common->sev_device = g_strdup(DEFAULT_SEV_DEVICE); 2028 2029 object_property_add_uint32_ptr(obj, "cbitpos", &sev_common->cbitpos, 2030 OBJ_PROP_FLAG_READWRITE); 2031 object_property_add_uint32_ptr(obj, "reduced-phys-bits", 2032 &sev_common->reduced_phys_bits, 2033 OBJ_PROP_FLAG_READWRITE); 2034 } 2035 2036 /* sev guest info common to sev/sev-es/sev-snp */ 2037 static const TypeInfo sev_common_info = { 2038 .parent = TYPE_X86_CONFIDENTIAL_GUEST, 2039 .name = TYPE_SEV_COMMON, 2040 .instance_size = sizeof(SevCommonState), 2041 .instance_init = sev_common_instance_init, 2042 .class_size = sizeof(SevCommonStateClass), 2043 .class_init = sev_common_class_init, 2044 .abstract = true, 2045 .interfaces = (InterfaceInfo[]) { 2046 { TYPE_USER_CREATABLE }, 2047 { } 2048 } 2049 }; 2050 2051 static char * 2052 sev_guest_get_dh_cert_file(Object *obj, Error **errp) 2053 { 2054 return g_strdup(SEV_GUEST(obj)->dh_cert_file); 2055 } 2056 2057 static void 2058 sev_guest_set_dh_cert_file(Object *obj, const char *value, Error **errp) 2059 { 2060 SEV_GUEST(obj)->dh_cert_file = g_strdup(value); 2061 } 2062 2063 static char * 2064 sev_guest_get_session_file(Object *obj, Error **errp) 2065 { 2066 SevGuestState *sev_guest = SEV_GUEST(obj); 2067 2068 return sev_guest->session_file ? g_strdup(sev_guest->session_file) : NULL; 2069 } 2070 2071 static void 2072 sev_guest_set_session_file(Object *obj, const char *value, Error **errp) 2073 { 2074 SEV_GUEST(obj)->session_file = g_strdup(value); 2075 } 2076 2077 static bool sev_guest_get_legacy_vm_type(Object *obj, Error **errp) 2078 { 2079 return SEV_GUEST(obj)->legacy_vm_type; 2080 } 2081 2082 static void sev_guest_set_legacy_vm_type(Object *obj, bool value, Error **errp) 2083 { 2084 SEV_GUEST(obj)->legacy_vm_type = value; 2085 } 2086 2087 static void 2088 sev_guest_class_init(ObjectClass *oc, void *data) 2089 { 2090 SevCommonStateClass *klass = SEV_COMMON_CLASS(oc); 2091 X86ConfidentialGuestClass *x86_klass = X86_CONFIDENTIAL_GUEST_CLASS(oc); 2092 2093 klass->build_kernel_loader_hashes = sev_build_kernel_loader_hashes; 2094 klass->launch_start = sev_launch_start; 2095 klass->launch_finish = sev_launch_finish; 2096 klass->launch_update_data = sev_launch_update_data; 2097 klass->kvm_init = sev_kvm_init; 2098 x86_klass->kvm_type = sev_kvm_type; 2099 2100 object_class_property_add_str(oc, "dh-cert-file", 2101 sev_guest_get_dh_cert_file, 2102 sev_guest_set_dh_cert_file); 2103 object_class_property_set_description(oc, "dh-cert-file", 2104 "guest owners DH certificate (encoded with base64)"); 2105 object_class_property_add_str(oc, "session-file", 2106 sev_guest_get_session_file, 2107 sev_guest_set_session_file); 2108 object_class_property_set_description(oc, "session-file", 2109 "guest owners session parameters (encoded with base64)"); 2110 object_class_property_add_bool(oc, "legacy-vm-type", 2111 sev_guest_get_legacy_vm_type, 2112 sev_guest_set_legacy_vm_type); 2113 object_class_property_set_description(oc, "legacy-vm-type", 2114 "use legacy VM type to maintain measurement compatibility with older QEMU or kernel versions."); 2115 } 2116 2117 static void 2118 sev_guest_instance_init(Object *obj) 2119 { 2120 SevGuestState *sev_guest = SEV_GUEST(obj); 2121 2122 sev_guest->policy = DEFAULT_GUEST_POLICY; 2123 object_property_add_uint32_ptr(obj, "handle", &sev_guest->handle, 2124 OBJ_PROP_FLAG_READWRITE); 2125 object_property_add_uint32_ptr(obj, "policy", &sev_guest->policy, 2126 OBJ_PROP_FLAG_READWRITE); 2127 object_apply_compat_props(obj); 2128 } 2129 2130 /* guest info specific sev/sev-es */ 2131 static const TypeInfo sev_guest_info = { 2132 .parent = TYPE_SEV_COMMON, 2133 .name = TYPE_SEV_GUEST, 2134 .instance_size = sizeof(SevGuestState), 2135 .instance_init = sev_guest_instance_init, 2136 .class_init = sev_guest_class_init, 2137 }; 2138 2139 static void 2140 sev_snp_guest_get_policy(Object *obj, Visitor *v, const char *name, 2141 void *opaque, Error **errp) 2142 { 2143 visit_type_uint64(v, name, 2144 (uint64_t *)&SEV_SNP_GUEST(obj)->kvm_start_conf.policy, 2145 errp); 2146 } 2147 2148 static void 2149 sev_snp_guest_set_policy(Object *obj, Visitor *v, const char *name, 2150 void *opaque, Error **errp) 2151 { 2152 visit_type_uint64(v, name, 2153 (uint64_t *)&SEV_SNP_GUEST(obj)->kvm_start_conf.policy, 2154 errp); 2155 } 2156 2157 static char * 2158 sev_snp_guest_get_guest_visible_workarounds(Object *obj, Error **errp) 2159 { 2160 return g_strdup(SEV_SNP_GUEST(obj)->guest_visible_workarounds); 2161 } 2162 2163 static void 2164 sev_snp_guest_set_guest_visible_workarounds(Object *obj, const char *value, 2165 Error **errp) 2166 { 2167 SevSnpGuestState *sev_snp_guest = SEV_SNP_GUEST(obj); 2168 struct kvm_sev_snp_launch_start *start = &sev_snp_guest->kvm_start_conf; 2169 g_autofree guchar *blob; 2170 gsize len; 2171 2172 g_free(sev_snp_guest->guest_visible_workarounds); 2173 2174 /* store the base64 str so we don't need to re-encode in getter */ 2175 sev_snp_guest->guest_visible_workarounds = g_strdup(value); 2176 2177 blob = qbase64_decode(sev_snp_guest->guest_visible_workarounds, 2178 -1, &len, errp); 2179 if (!blob) { 2180 return; 2181 } 2182 2183 if (len != sizeof(start->gosvw)) { 2184 error_setg(errp, "parameter length of %" G_GSIZE_FORMAT 2185 " exceeds max of %zu", 2186 len, sizeof(start->gosvw)); 2187 return; 2188 } 2189 2190 memcpy(start->gosvw, blob, len); 2191 } 2192 2193 static char * 2194 sev_snp_guest_get_id_block(Object *obj, Error **errp) 2195 { 2196 SevSnpGuestState *sev_snp_guest = SEV_SNP_GUEST(obj); 2197 2198 return g_strdup(sev_snp_guest->id_block_base64); 2199 } 2200 2201 static void 2202 sev_snp_guest_set_id_block(Object *obj, const char *value, Error **errp) 2203 { 2204 SevSnpGuestState *sev_snp_guest = SEV_SNP_GUEST(obj); 2205 struct kvm_sev_snp_launch_finish *finish = &sev_snp_guest->kvm_finish_conf; 2206 gsize len; 2207 2208 finish->id_block_en = 0; 2209 g_free(sev_snp_guest->id_block); 2210 g_free(sev_snp_guest->id_block_base64); 2211 2212 /* store the base64 str so we don't need to re-encode in getter */ 2213 sev_snp_guest->id_block_base64 = g_strdup(value); 2214 sev_snp_guest->id_block = 2215 qbase64_decode(sev_snp_guest->id_block_base64, -1, &len, errp); 2216 2217 if (!sev_snp_guest->id_block) { 2218 return; 2219 } 2220 2221 if (len != KVM_SEV_SNP_ID_BLOCK_SIZE) { 2222 error_setg(errp, "parameter length of %" G_GSIZE_FORMAT 2223 " not equal to %u", 2224 len, KVM_SEV_SNP_ID_BLOCK_SIZE); 2225 return; 2226 } 2227 2228 finish->id_block_en = 1; 2229 finish->id_block_uaddr = (uintptr_t)sev_snp_guest->id_block; 2230 } 2231 2232 static char * 2233 sev_snp_guest_get_id_auth(Object *obj, Error **errp) 2234 { 2235 SevSnpGuestState *sev_snp_guest = SEV_SNP_GUEST(obj); 2236 2237 return g_strdup(sev_snp_guest->id_auth_base64); 2238 } 2239 2240 static void 2241 sev_snp_guest_set_id_auth(Object *obj, const char *value, Error **errp) 2242 { 2243 SevSnpGuestState *sev_snp_guest = SEV_SNP_GUEST(obj); 2244 struct kvm_sev_snp_launch_finish *finish = &sev_snp_guest->kvm_finish_conf; 2245 gsize len; 2246 2247 finish->id_auth_uaddr = 0; 2248 g_free(sev_snp_guest->id_auth); 2249 g_free(sev_snp_guest->id_auth_base64); 2250 2251 /* store the base64 str so we don't need to re-encode in getter */ 2252 sev_snp_guest->id_auth_base64 = g_strdup(value); 2253 sev_snp_guest->id_auth = 2254 qbase64_decode(sev_snp_guest->id_auth_base64, -1, &len, errp); 2255 2256 if (!sev_snp_guest->id_auth) { 2257 return; 2258 } 2259 2260 if (len > KVM_SEV_SNP_ID_AUTH_SIZE) { 2261 error_setg(errp, "parameter length:ID_AUTH %" G_GSIZE_FORMAT 2262 " exceeds max of %u", 2263 len, KVM_SEV_SNP_ID_AUTH_SIZE); 2264 return; 2265 } 2266 2267 finish->id_auth_uaddr = (uintptr_t)sev_snp_guest->id_auth; 2268 } 2269 2270 static bool 2271 sev_snp_guest_get_author_key_enabled(Object *obj, Error **errp) 2272 { 2273 SevSnpGuestState *sev_snp_guest = SEV_SNP_GUEST(obj); 2274 2275 return !!sev_snp_guest->kvm_finish_conf.auth_key_en; 2276 } 2277 2278 static void 2279 sev_snp_guest_set_author_key_enabled(Object *obj, bool value, Error **errp) 2280 { 2281 SevSnpGuestState *sev_snp_guest = SEV_SNP_GUEST(obj); 2282 2283 sev_snp_guest->kvm_finish_conf.auth_key_en = value; 2284 } 2285 2286 static bool 2287 sev_snp_guest_get_vcek_disabled(Object *obj, Error **errp) 2288 { 2289 SevSnpGuestState *sev_snp_guest = SEV_SNP_GUEST(obj); 2290 2291 return !!sev_snp_guest->kvm_finish_conf.vcek_disabled; 2292 } 2293 2294 static void 2295 sev_snp_guest_set_vcek_disabled(Object *obj, bool value, Error **errp) 2296 { 2297 SevSnpGuestState *sev_snp_guest = SEV_SNP_GUEST(obj); 2298 2299 sev_snp_guest->kvm_finish_conf.vcek_disabled = value; 2300 } 2301 2302 static char * 2303 sev_snp_guest_get_host_data(Object *obj, Error **errp) 2304 { 2305 SevSnpGuestState *sev_snp_guest = SEV_SNP_GUEST(obj); 2306 2307 return g_strdup(sev_snp_guest->host_data); 2308 } 2309 2310 static void 2311 sev_snp_guest_set_host_data(Object *obj, const char *value, Error **errp) 2312 { 2313 SevSnpGuestState *sev_snp_guest = SEV_SNP_GUEST(obj); 2314 struct kvm_sev_snp_launch_finish *finish = &sev_snp_guest->kvm_finish_conf; 2315 g_autofree guchar *blob; 2316 gsize len; 2317 2318 g_free(sev_snp_guest->host_data); 2319 2320 /* store the base64 str so we don't need to re-encode in getter */ 2321 sev_snp_guest->host_data = g_strdup(value); 2322 2323 blob = qbase64_decode(sev_snp_guest->host_data, -1, &len, errp); 2324 2325 if (!blob) { 2326 return; 2327 } 2328 2329 if (len != sizeof(finish->host_data)) { 2330 error_setg(errp, "parameter length of %" G_GSIZE_FORMAT 2331 " not equal to %zu", 2332 len, sizeof(finish->host_data)); 2333 return; 2334 } 2335 2336 memcpy(finish->host_data, blob, len); 2337 } 2338 2339 static void 2340 sev_snp_guest_class_init(ObjectClass *oc, void *data) 2341 { 2342 SevCommonStateClass *klass = SEV_COMMON_CLASS(oc); 2343 X86ConfidentialGuestClass *x86_klass = X86_CONFIDENTIAL_GUEST_CLASS(oc); 2344 2345 klass->build_kernel_loader_hashes = sev_snp_build_kernel_loader_hashes; 2346 klass->launch_start = sev_snp_launch_start; 2347 klass->launch_finish = sev_snp_launch_finish; 2348 klass->launch_update_data = sev_snp_launch_update_data; 2349 klass->kvm_init = sev_snp_kvm_init; 2350 x86_klass->mask_cpuid_features = sev_snp_mask_cpuid_features; 2351 x86_klass->kvm_type = sev_snp_kvm_type; 2352 2353 object_class_property_add(oc, "policy", "uint64", 2354 sev_snp_guest_get_policy, 2355 sev_snp_guest_set_policy, NULL, NULL); 2356 object_class_property_add_str(oc, "guest-visible-workarounds", 2357 sev_snp_guest_get_guest_visible_workarounds, 2358 sev_snp_guest_set_guest_visible_workarounds); 2359 object_class_property_add_str(oc, "id-block", 2360 sev_snp_guest_get_id_block, 2361 sev_snp_guest_set_id_block); 2362 object_class_property_add_str(oc, "id-auth", 2363 sev_snp_guest_get_id_auth, 2364 sev_snp_guest_set_id_auth); 2365 object_class_property_add_bool(oc, "author-key-enabled", 2366 sev_snp_guest_get_author_key_enabled, 2367 sev_snp_guest_set_author_key_enabled); 2368 object_class_property_add_bool(oc, "vcek-required", 2369 sev_snp_guest_get_vcek_disabled, 2370 sev_snp_guest_set_vcek_disabled); 2371 object_class_property_add_str(oc, "host-data", 2372 sev_snp_guest_get_host_data, 2373 sev_snp_guest_set_host_data); 2374 } 2375 2376 static void 2377 sev_snp_guest_instance_init(Object *obj) 2378 { 2379 ConfidentialGuestSupport *cgs = CONFIDENTIAL_GUEST_SUPPORT(obj); 2380 SevSnpGuestState *sev_snp_guest = SEV_SNP_GUEST(obj); 2381 2382 cgs->require_guest_memfd = true; 2383 2384 /* default init/start/finish params for kvm */ 2385 sev_snp_guest->kvm_start_conf.policy = DEFAULT_SEV_SNP_POLICY; 2386 } 2387 2388 /* guest info specific to sev-snp */ 2389 static const TypeInfo sev_snp_guest_info = { 2390 .parent = TYPE_SEV_COMMON, 2391 .name = TYPE_SEV_SNP_GUEST, 2392 .instance_size = sizeof(SevSnpGuestState), 2393 .class_init = sev_snp_guest_class_init, 2394 .instance_init = sev_snp_guest_instance_init, 2395 }; 2396 2397 static void 2398 sev_register_types(void) 2399 { 2400 type_register_static(&sev_common_info); 2401 type_register_static(&sev_guest_info); 2402 type_register_static(&sev_snp_guest_info); 2403 } 2404 2405 type_init(sev_register_types); 2406