1 /* 2 * QEMU TDX support 3 * 4 * Copyright (c) 2025 Intel Corporation 5 * 6 * Author: 7 * Xiaoyao Li <xiaoyao.li@intel.com> 8 * 9 * SPDX-License-Identifier: GPL-2.0-or-later 10 */ 11 12 #include "qemu/osdep.h" 13 #include "qemu/error-report.h" 14 #include "qemu/base64.h" 15 #include "qemu/mmap-alloc.h" 16 #include "qapi/error.h" 17 #include "qom/object_interfaces.h" 18 #include "crypto/hash.h" 19 #include "system/kvm_int.h" 20 #include "system/runstate.h" 21 #include "system/system.h" 22 #include "system/ramblock.h" 23 24 #include <linux/kvm_para.h> 25 26 #include "cpu.h" 27 #include "cpu-internal.h" 28 #include "host-cpu.h" 29 #include "hw/i386/e820_memory_layout.h" 30 #include "hw/i386/tdvf.h" 31 #include "hw/i386/x86.h" 32 #include "hw/i386/tdvf-hob.h" 33 #include "kvm_i386.h" 34 #include "tdx.h" 35 36 #include "standard-headers/asm-x86/kvm_para.h" 37 38 #define TDX_MIN_TSC_FREQUENCY_KHZ (100 * 1000) 39 #define TDX_MAX_TSC_FREQUENCY_KHZ (10 * 1000 * 1000) 40 41 #define TDX_TD_ATTRIBUTES_DEBUG BIT_ULL(0) 42 #define TDX_TD_ATTRIBUTES_SEPT_VE_DISABLE BIT_ULL(28) 43 #define TDX_TD_ATTRIBUTES_PKS BIT_ULL(30) 44 #define TDX_TD_ATTRIBUTES_PERFMON BIT_ULL(63) 45 46 #define TDX_SUPPORTED_TD_ATTRS (TDX_TD_ATTRIBUTES_SEPT_VE_DISABLE |\ 47 TDX_TD_ATTRIBUTES_PKS | \ 48 TDX_TD_ATTRIBUTES_PERFMON) 49 50 #define TDX_SUPPORTED_KVM_FEATURES ((1U << KVM_FEATURE_NOP_IO_DELAY) | \ 51 (1U << KVM_FEATURE_PV_UNHALT) | \ 52 (1U << KVM_FEATURE_PV_TLB_FLUSH) | \ 53 (1U << KVM_FEATURE_PV_SEND_IPI) | \ 54 (1U << KVM_FEATURE_POLL_CONTROL) | \ 55 (1U << KVM_FEATURE_PV_SCHED_YIELD) | \ 56 (1U << KVM_FEATURE_MSI_EXT_DEST_ID)) 57 58 static TdxGuest *tdx_guest; 59 60 static struct kvm_tdx_capabilities *tdx_caps; 61 static struct kvm_cpuid2 *tdx_supported_cpuid; 62 63 /* Valid after kvm_arch_init()->confidential_guest_kvm_init()->tdx_kvm_init() */ 64 bool is_tdx_vm(void) 65 { 66 return !!tdx_guest; 67 } 68 69 enum tdx_ioctl_level { 70 TDX_VM_IOCTL, 71 TDX_VCPU_IOCTL, 72 }; 73 74 static int tdx_ioctl_internal(enum tdx_ioctl_level level, void *state, 75 int cmd_id, __u32 flags, void *data, 76 Error **errp) 77 { 78 struct kvm_tdx_cmd tdx_cmd = {}; 79 int r; 80 81 const char *tdx_ioctl_name[] = { 82 [KVM_TDX_CAPABILITIES] = "KVM_TDX_CAPABILITIES", 83 [KVM_TDX_INIT_VM] = "KVM_TDX_INIT_VM", 84 [KVM_TDX_INIT_VCPU] = "KVM_TDX_INIT_VCPU", 85 [KVM_TDX_INIT_MEM_REGION] = "KVM_TDX_INIT_MEM_REGION", 86 [KVM_TDX_FINALIZE_VM] = "KVM_TDX_FINALIZE_VM", 87 [KVM_TDX_GET_CPUID] = "KVM_TDX_GET_CPUID", 88 }; 89 90 tdx_cmd.id = cmd_id; 91 tdx_cmd.flags = flags; 92 tdx_cmd.data = (__u64)(unsigned long)data; 93 94 switch (level) { 95 case TDX_VM_IOCTL: 96 r = kvm_vm_ioctl(kvm_state, KVM_MEMORY_ENCRYPT_OP, &tdx_cmd); 97 break; 98 case TDX_VCPU_IOCTL: 99 r = kvm_vcpu_ioctl(state, KVM_MEMORY_ENCRYPT_OP, &tdx_cmd); 100 break; 101 default: 102 error_setg(errp, "Invalid tdx_ioctl_level %d", level); 103 return -EINVAL; 104 } 105 106 if (r < 0) { 107 error_setg_errno(errp, -r, "TDX ioctl %s failed, hw_errors: 0x%llx", 108 tdx_ioctl_name[cmd_id], tdx_cmd.hw_error); 109 } 110 return r; 111 } 112 113 static inline int tdx_vm_ioctl(int cmd_id, __u32 flags, void *data, 114 Error **errp) 115 { 116 return tdx_ioctl_internal(TDX_VM_IOCTL, NULL, cmd_id, flags, data, errp); 117 } 118 119 static inline int tdx_vcpu_ioctl(CPUState *cpu, int cmd_id, __u32 flags, 120 void *data, Error **errp) 121 { 122 return tdx_ioctl_internal(TDX_VCPU_IOCTL, cpu, cmd_id, flags, data, errp); 123 } 124 125 static int get_tdx_capabilities(Error **errp) 126 { 127 struct kvm_tdx_capabilities *caps; 128 /* 1st generation of TDX reports 6 cpuid configs */ 129 int nr_cpuid_configs = 6; 130 size_t size; 131 int r; 132 133 do { 134 Error *local_err = NULL; 135 size = sizeof(struct kvm_tdx_capabilities) + 136 nr_cpuid_configs * sizeof(struct kvm_cpuid_entry2); 137 caps = g_malloc0(size); 138 caps->cpuid.nent = nr_cpuid_configs; 139 140 r = tdx_vm_ioctl(KVM_TDX_CAPABILITIES, 0, caps, &local_err); 141 if (r == -E2BIG) { 142 g_free(caps); 143 nr_cpuid_configs *= 2; 144 if (nr_cpuid_configs > KVM_MAX_CPUID_ENTRIES) { 145 error_report("KVM TDX seems broken that number of CPUID entries" 146 " in kvm_tdx_capabilities exceeds limit: %d", 147 KVM_MAX_CPUID_ENTRIES); 148 error_propagate(errp, local_err); 149 return r; 150 } 151 error_free(local_err); 152 } else if (r < 0) { 153 g_free(caps); 154 error_propagate(errp, local_err); 155 return r; 156 } 157 } while (r == -E2BIG); 158 159 tdx_caps = caps; 160 161 return 0; 162 } 163 164 void tdx_set_tdvf_region(MemoryRegion *tdvf_mr) 165 { 166 assert(!tdx_guest->tdvf_mr); 167 tdx_guest->tdvf_mr = tdvf_mr; 168 } 169 170 static TdxFirmwareEntry *tdx_get_hob_entry(TdxGuest *tdx) 171 { 172 TdxFirmwareEntry *entry; 173 174 for_each_tdx_fw_entry(&tdx->tdvf, entry) { 175 if (entry->type == TDVF_SECTION_TYPE_TD_HOB) { 176 return entry; 177 } 178 } 179 error_report("TDVF metadata doesn't specify TD_HOB location."); 180 exit(1); 181 } 182 183 static void tdx_add_ram_entry(uint64_t address, uint64_t length, 184 enum TdxRamType type) 185 { 186 uint32_t nr_entries = tdx_guest->nr_ram_entries; 187 tdx_guest->ram_entries = g_renew(TdxRamEntry, tdx_guest->ram_entries, 188 nr_entries + 1); 189 190 tdx_guest->ram_entries[nr_entries].address = address; 191 tdx_guest->ram_entries[nr_entries].length = length; 192 tdx_guest->ram_entries[nr_entries].type = type; 193 tdx_guest->nr_ram_entries++; 194 } 195 196 static int tdx_accept_ram_range(uint64_t address, uint64_t length) 197 { 198 uint64_t head_start, tail_start, head_length, tail_length; 199 uint64_t tmp_address, tmp_length; 200 TdxRamEntry *e; 201 int i = 0; 202 203 do { 204 if (i == tdx_guest->nr_ram_entries) { 205 return -1; 206 } 207 208 e = &tdx_guest->ram_entries[i++]; 209 } while (address + length <= e->address || address >= e->address + e->length); 210 211 /* 212 * The to-be-accepted ram range must be fully contained by one 213 * RAM entry. 214 */ 215 if (e->address > address || 216 e->address + e->length < address + length) { 217 return -1; 218 } 219 220 if (e->type == TDX_RAM_ADDED) { 221 return 0; 222 } 223 224 tmp_address = e->address; 225 tmp_length = e->length; 226 227 e->address = address; 228 e->length = length; 229 e->type = TDX_RAM_ADDED; 230 231 head_length = address - tmp_address; 232 if (head_length > 0) { 233 head_start = tmp_address; 234 tdx_add_ram_entry(head_start, head_length, TDX_RAM_UNACCEPTED); 235 } 236 237 tail_start = address + length; 238 if (tail_start < tmp_address + tmp_length) { 239 tail_length = tmp_address + tmp_length - tail_start; 240 tdx_add_ram_entry(tail_start, tail_length, TDX_RAM_UNACCEPTED); 241 } 242 243 return 0; 244 } 245 246 static int tdx_ram_entry_compare(const void *lhs_, const void* rhs_) 247 { 248 const TdxRamEntry *lhs = lhs_; 249 const TdxRamEntry *rhs = rhs_; 250 251 if (lhs->address == rhs->address) { 252 return 0; 253 } 254 if (le64_to_cpu(lhs->address) > le64_to_cpu(rhs->address)) { 255 return 1; 256 } 257 return -1; 258 } 259 260 static void tdx_init_ram_entries(void) 261 { 262 unsigned i, j, nr_e820_entries; 263 264 nr_e820_entries = e820_get_table(NULL); 265 tdx_guest->ram_entries = g_new(TdxRamEntry, nr_e820_entries); 266 267 for (i = 0, j = 0; i < nr_e820_entries; i++) { 268 uint64_t addr, len; 269 270 if (e820_get_entry(i, E820_RAM, &addr, &len)) { 271 tdx_guest->ram_entries[j].address = addr; 272 tdx_guest->ram_entries[j].length = len; 273 tdx_guest->ram_entries[j].type = TDX_RAM_UNACCEPTED; 274 j++; 275 } 276 } 277 tdx_guest->nr_ram_entries = j; 278 } 279 280 static void tdx_post_init_vcpus(void) 281 { 282 TdxFirmwareEntry *hob; 283 CPUState *cpu; 284 285 hob = tdx_get_hob_entry(tdx_guest); 286 CPU_FOREACH(cpu) { 287 tdx_vcpu_ioctl(cpu, KVM_TDX_INIT_VCPU, 0, (void *)(uintptr_t)hob->address, 288 &error_fatal); 289 } 290 } 291 292 static void tdx_finalize_vm(Notifier *notifier, void *unused) 293 { 294 TdxFirmware *tdvf = &tdx_guest->tdvf; 295 TdxFirmwareEntry *entry; 296 RAMBlock *ram_block; 297 Error *local_err = NULL; 298 int r; 299 300 tdx_init_ram_entries(); 301 302 for_each_tdx_fw_entry(tdvf, entry) { 303 switch (entry->type) { 304 case TDVF_SECTION_TYPE_BFV: 305 case TDVF_SECTION_TYPE_CFV: 306 entry->mem_ptr = tdvf->mem_ptr + entry->data_offset; 307 break; 308 case TDVF_SECTION_TYPE_TD_HOB: 309 case TDVF_SECTION_TYPE_TEMP_MEM: 310 entry->mem_ptr = qemu_ram_mmap(-1, entry->size, 311 qemu_real_host_page_size(), 0, 0); 312 if (entry->mem_ptr == MAP_FAILED) { 313 error_report("Failed to mmap memory for TDVF section %d", 314 entry->type); 315 exit(1); 316 } 317 if (tdx_accept_ram_range(entry->address, entry->size)) { 318 error_report("Failed to accept memory for TDVF section %d", 319 entry->type); 320 qemu_ram_munmap(-1, entry->mem_ptr, entry->size); 321 exit(1); 322 } 323 break; 324 default: 325 error_report("Unsupported TDVF section %d", entry->type); 326 exit(1); 327 } 328 } 329 330 qsort(tdx_guest->ram_entries, tdx_guest->nr_ram_entries, 331 sizeof(TdxRamEntry), &tdx_ram_entry_compare); 332 333 tdvf_hob_create(tdx_guest, tdx_get_hob_entry(tdx_guest)); 334 335 tdx_post_init_vcpus(); 336 337 for_each_tdx_fw_entry(tdvf, entry) { 338 struct kvm_tdx_init_mem_region region; 339 uint32_t flags; 340 341 region = (struct kvm_tdx_init_mem_region) { 342 .source_addr = (uintptr_t)entry->mem_ptr, 343 .gpa = entry->address, 344 .nr_pages = entry->size >> 12, 345 }; 346 347 flags = entry->attributes & TDVF_SECTION_ATTRIBUTES_MR_EXTEND ? 348 KVM_TDX_MEASURE_MEMORY_REGION : 0; 349 350 do { 351 error_free(local_err); 352 local_err = NULL; 353 r = tdx_vcpu_ioctl(first_cpu, KVM_TDX_INIT_MEM_REGION, flags, 354 ®ion, &local_err); 355 } while (r == -EAGAIN || r == -EINTR); 356 if (r < 0) { 357 error_report_err(local_err); 358 exit(1); 359 } 360 361 if (entry->type == TDVF_SECTION_TYPE_TD_HOB || 362 entry->type == TDVF_SECTION_TYPE_TEMP_MEM) { 363 qemu_ram_munmap(-1, entry->mem_ptr, entry->size); 364 entry->mem_ptr = NULL; 365 } 366 } 367 368 /* 369 * TDVF image has been copied into private region above via 370 * KVM_MEMORY_MAPPING. It becomes useless. 371 */ 372 ram_block = tdx_guest->tdvf_mr->ram_block; 373 ram_block_discard_range(ram_block, 0, ram_block->max_length); 374 375 tdx_vm_ioctl(KVM_TDX_FINALIZE_VM, 0, NULL, &error_fatal); 376 CONFIDENTIAL_GUEST_SUPPORT(tdx_guest)->ready = true; 377 } 378 379 static Notifier tdx_machine_done_notify = { 380 .notify = tdx_finalize_vm, 381 }; 382 383 /* 384 * Some CPUID bits change from fixed1 to configurable bits when TDX module 385 * supports TDX_FEATURES0.VE_REDUCTION. e.g., MCA/MCE/MTRR/CORE_CAPABILITY. 386 * 387 * To make QEMU work with all the versions of TDX module, keep the fixed1 bits 388 * here if they are ever fixed1 bits in any of the version though not fixed1 in 389 * the latest version. Otherwise, with the older version of TDX module, QEMU may 390 * treat the fixed1 bit as unsupported. 391 * 392 * For newer TDX module, it does no harm to keep them in tdx_fixed1_bits even 393 * though they changed to configurable bits. Because tdx_fixed1_bits is used to 394 * setup the supported bits. 395 */ 396 KvmCpuidInfo tdx_fixed1_bits = { 397 .cpuid.nent = 8, 398 .entries[0] = { 399 .function = 0x1, 400 .index = 0, 401 .ecx = CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_DTES64 | 402 CPUID_EXT_DSCPL | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | 403 CPUID_EXT_PDCM | CPUID_EXT_PCID | CPUID_EXT_SSE41 | 404 CPUID_EXT_SSE42 | CPUID_EXT_X2APIC | CPUID_EXT_MOVBE | 405 CPUID_EXT_POPCNT | CPUID_EXT_AES | CPUID_EXT_XSAVE | 406 CPUID_EXT_RDRAND | CPUID_EXT_HYPERVISOR, 407 .edx = CPUID_FP87 | CPUID_VME | CPUID_DE | CPUID_PSE | CPUID_TSC | 408 CPUID_MSR | CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | 409 CPUID_SEP | CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | 410 CPUID_PAT | CPUID_CLFLUSH | CPUID_DTS | CPUID_MMX | CPUID_FXSR | 411 CPUID_SSE | CPUID_SSE2, 412 }, 413 .entries[1] = { 414 .function = 0x6, 415 .index = 0, 416 .eax = CPUID_6_EAX_ARAT, 417 }, 418 .entries[2] = { 419 .function = 0x7, 420 .index = 0, 421 .flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX, 422 .ebx = CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_FDP_EXCPTN_ONLY | 423 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_INVPCID | 424 CPUID_7_0_EBX_ZERO_FCS_FDS | CPUID_7_0_EBX_RDSEED | 425 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT | 426 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_SHA_NI, 427 .ecx = CPUID_7_0_ECX_BUS_LOCK_DETECT | CPUID_7_0_ECX_MOVDIRI | 428 CPUID_7_0_ECX_MOVDIR64B, 429 .edx = CPUID_7_0_EDX_MD_CLEAR | CPUID_7_0_EDX_SPEC_CTRL | 430 CPUID_7_0_EDX_STIBP | CPUID_7_0_EDX_FLUSH_L1D | 431 CPUID_7_0_EDX_ARCH_CAPABILITIES | CPUID_7_0_EDX_CORE_CAPABILITY | 432 CPUID_7_0_EDX_SPEC_CTRL_SSBD, 433 }, 434 .entries[3] = { 435 .function = 0x7, 436 .index = 2, 437 .flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX, 438 .edx = CPUID_7_2_EDX_PSFD | CPUID_7_2_EDX_IPRED_CTRL | 439 CPUID_7_2_EDX_RRSBA_CTRL | CPUID_7_2_EDX_BHI_CTRL, 440 }, 441 .entries[4] = { 442 .function = 0xD, 443 .index = 0, 444 .flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX, 445 .eax = XSTATE_FP_MASK | XSTATE_SSE_MASK, 446 }, 447 .entries[5] = { 448 .function = 0xD, 449 .index = 1, 450 .flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX, 451 .eax = CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC| 452 CPUID_XSAVE_XGETBV1 | CPUID_XSAVE_XSAVES, 453 }, 454 .entries[6] = { 455 .function = 0x80000001, 456 .index = 0, 457 .ecx = CPUID_EXT3_LAHF_LM | CPUID_EXT3_ABM | CPUID_EXT3_3DNOWPREFETCH, 458 /* 459 * Strictly speaking, SYSCALL is not fixed1 bit since it depends on 460 * the CPU to be in 64-bit mode. But here fixed1 is used to serve the 461 * purpose of supported bits for TDX. In this sense, SYACALL is always 462 * supported. 463 */ 464 .edx = CPUID_EXT2_SYSCALL | CPUID_EXT2_NX | CPUID_EXT2_PDPE1GB | 465 CPUID_EXT2_RDTSCP | CPUID_EXT2_LM, 466 }, 467 .entries[7] = { 468 .function = 0x80000007, 469 .index = 0, 470 .edx = CPUID_APM_INVTSC, 471 }, 472 }; 473 474 typedef struct TdxAttrsMap { 475 uint32_t attr_index; 476 uint32_t cpuid_leaf; 477 uint32_t cpuid_subleaf; 478 int cpuid_reg; 479 uint32_t feat_mask; 480 } TdxAttrsMap; 481 482 static TdxAttrsMap tdx_attrs_maps[] = { 483 {.attr_index = 27, 484 .cpuid_leaf = 7, 485 .cpuid_subleaf = 1, 486 .cpuid_reg = R_EAX, 487 .feat_mask = CPUID_7_1_EAX_LASS,}, 488 489 {.attr_index = 30, 490 .cpuid_leaf = 7, 491 .cpuid_subleaf = 0, 492 .cpuid_reg = R_ECX, 493 .feat_mask = CPUID_7_0_ECX_PKS,}, 494 495 {.attr_index = 31, 496 .cpuid_leaf = 7, 497 .cpuid_subleaf = 0, 498 .cpuid_reg = R_ECX, 499 .feat_mask = CPUID_7_0_ECX_KeyLocker,}, 500 }; 501 502 typedef struct TdxXFAMDep { 503 int xfam_bit; 504 FeatureMask feat_mask; 505 } TdxXFAMDep; 506 507 /* 508 * Note, only the CPUID bits whose virtualization type are "XFAM & Native" are 509 * defiend here. 510 * 511 * For those whose virtualization type are "XFAM & Configured & Native", they 512 * are reported as configurable bits. And they are not supported if not in the 513 * configureable bits list from KVM even if the corresponding XFAM bit is 514 * supported. 515 */ 516 TdxXFAMDep tdx_xfam_deps[] = { 517 { XSTATE_YMM_BIT, { FEAT_1_ECX, CPUID_EXT_FMA }}, 518 { XSTATE_YMM_BIT, { FEAT_7_0_EBX, CPUID_7_0_EBX_AVX2 }}, 519 { XSTATE_OPMASK_BIT, { FEAT_7_0_ECX, CPUID_7_0_ECX_AVX512_VBMI}}, 520 { XSTATE_OPMASK_BIT, { FEAT_7_0_EDX, CPUID_7_0_EDX_AVX512_FP16}}, 521 { XSTATE_PT_BIT, { FEAT_7_0_EBX, CPUID_7_0_EBX_INTEL_PT}}, 522 { XSTATE_PKRU_BIT, { FEAT_7_0_ECX, CPUID_7_0_ECX_PKU}}, 523 { XSTATE_XTILE_CFG_BIT, { FEAT_7_0_EDX, CPUID_7_0_EDX_AMX_BF16 }}, 524 { XSTATE_XTILE_CFG_BIT, { FEAT_7_0_EDX, CPUID_7_0_EDX_AMX_TILE }}, 525 { XSTATE_XTILE_CFG_BIT, { FEAT_7_0_EDX, CPUID_7_0_EDX_AMX_INT8 }}, 526 }; 527 528 static struct kvm_cpuid_entry2 *find_in_supported_entry(uint32_t function, 529 uint32_t index) 530 { 531 struct kvm_cpuid_entry2 *e; 532 533 e = cpuid_find_entry(tdx_supported_cpuid, function, index); 534 if (!e) { 535 if (tdx_supported_cpuid->nent >= KVM_MAX_CPUID_ENTRIES) { 536 error_report("tdx_supported_cpuid requries more space than %d entries", 537 KVM_MAX_CPUID_ENTRIES); 538 exit(1); 539 } 540 e = &tdx_supported_cpuid->entries[tdx_supported_cpuid->nent++]; 541 e->function = function; 542 e->index = index; 543 } 544 545 return e; 546 } 547 548 static void tdx_add_supported_cpuid_by_fixed1_bits(void) 549 { 550 struct kvm_cpuid_entry2 *e, *e1; 551 int i; 552 553 for (i = 0; i < tdx_fixed1_bits.cpuid.nent; i++) { 554 e = &tdx_fixed1_bits.entries[i]; 555 556 e1 = find_in_supported_entry(e->function, e->index); 557 e1->eax |= e->eax; 558 e1->ebx |= e->ebx; 559 e1->ecx |= e->ecx; 560 e1->edx |= e->edx; 561 } 562 } 563 564 static void tdx_add_supported_cpuid_by_attrs(void) 565 { 566 struct kvm_cpuid_entry2 *e; 567 TdxAttrsMap *map; 568 int i; 569 570 for (i = 0; i < ARRAY_SIZE(tdx_attrs_maps); i++) { 571 map = &tdx_attrs_maps[i]; 572 if (!((1ULL << map->attr_index) & tdx_caps->supported_attrs)) { 573 continue; 574 } 575 576 e = find_in_supported_entry(map->cpuid_leaf, map->cpuid_subleaf); 577 578 switch(map->cpuid_reg) { 579 case R_EAX: 580 e->eax |= map->feat_mask; 581 break; 582 case R_EBX: 583 e->ebx |= map->feat_mask; 584 break; 585 case R_ECX: 586 e->ecx |= map->feat_mask; 587 break; 588 case R_EDX: 589 e->edx |= map->feat_mask; 590 break; 591 } 592 } 593 } 594 595 static void tdx_add_supported_cpuid_by_xfam(void) 596 { 597 struct kvm_cpuid_entry2 *e; 598 int i; 599 600 const TdxXFAMDep *xfam_dep; 601 const FeatureWordInfo *f; 602 for (i = 0; i < ARRAY_SIZE(tdx_xfam_deps); i++) { 603 xfam_dep = &tdx_xfam_deps[i]; 604 if (!((1ULL << xfam_dep->xfam_bit) & tdx_caps->supported_xfam)) { 605 continue; 606 } 607 608 f = &feature_word_info[xfam_dep->feat_mask.index]; 609 if (f->type != CPUID_FEATURE_WORD) { 610 continue; 611 } 612 613 e = find_in_supported_entry(f->cpuid.eax, f->cpuid.ecx); 614 switch(f->cpuid.reg) { 615 case R_EAX: 616 e->eax |= xfam_dep->feat_mask.mask; 617 break; 618 case R_EBX: 619 e->ebx |= xfam_dep->feat_mask.mask; 620 break; 621 case R_ECX: 622 e->ecx |= xfam_dep->feat_mask.mask; 623 break; 624 case R_EDX: 625 e->edx |= xfam_dep->feat_mask.mask; 626 break; 627 } 628 } 629 630 e = find_in_supported_entry(0xd, 0); 631 e->eax |= (tdx_caps->supported_xfam & CPUID_XSTATE_XCR0_MASK); 632 e->edx |= (tdx_caps->supported_xfam & CPUID_XSTATE_XCR0_MASK) >> 32; 633 634 e = find_in_supported_entry(0xd, 1); 635 /* 636 * Mark XFD always support for TDX, it will be cleared finally in 637 * tdx_adjust_cpuid_features() if XFD is unavailable on the hardware 638 * because in this case the original data has it as 0. 639 */ 640 e->eax |= CPUID_XSAVE_XFD; 641 e->ecx |= (tdx_caps->supported_xfam & CPUID_XSTATE_XSS_MASK); 642 e->edx |= (tdx_caps->supported_xfam & CPUID_XSTATE_XSS_MASK) >> 32; 643 } 644 645 static void tdx_add_supported_kvm_features(void) 646 { 647 struct kvm_cpuid_entry2 *e; 648 649 e = find_in_supported_entry(0x40000001, 0); 650 e->eax = TDX_SUPPORTED_KVM_FEATURES; 651 } 652 653 static void tdx_setup_supported_cpuid(void) 654 { 655 if (tdx_supported_cpuid) { 656 return; 657 } 658 659 tdx_supported_cpuid = g_malloc0(sizeof(*tdx_supported_cpuid) + 660 KVM_MAX_CPUID_ENTRIES * sizeof(struct kvm_cpuid_entry2)); 661 662 memcpy(tdx_supported_cpuid->entries, tdx_caps->cpuid.entries, 663 tdx_caps->cpuid.nent * sizeof(struct kvm_cpuid_entry2)); 664 tdx_supported_cpuid->nent = tdx_caps->cpuid.nent; 665 666 tdx_add_supported_cpuid_by_fixed1_bits(); 667 tdx_add_supported_cpuid_by_attrs(); 668 tdx_add_supported_cpuid_by_xfam(); 669 670 tdx_add_supported_kvm_features(); 671 } 672 673 static int tdx_kvm_init(ConfidentialGuestSupport *cgs, Error **errp) 674 { 675 MachineState *ms = MACHINE(qdev_get_machine()); 676 X86MachineState *x86ms = X86_MACHINE(ms); 677 TdxGuest *tdx = TDX_GUEST(cgs); 678 int r = 0; 679 680 kvm_mark_guest_state_protected(); 681 682 if (x86ms->smm == ON_OFF_AUTO_AUTO) { 683 x86ms->smm = ON_OFF_AUTO_OFF; 684 } else if (x86ms->smm == ON_OFF_AUTO_ON) { 685 error_setg(errp, "TDX VM doesn't support SMM"); 686 return -EINVAL; 687 } 688 689 if (x86ms->pic == ON_OFF_AUTO_AUTO) { 690 x86ms->pic = ON_OFF_AUTO_OFF; 691 } else if (x86ms->pic == ON_OFF_AUTO_ON) { 692 error_setg(errp, "TDX VM doesn't support PIC"); 693 return -EINVAL; 694 } 695 696 if (kvm_state->kernel_irqchip_split == ON_OFF_AUTO_AUTO) { 697 kvm_state->kernel_irqchip_split = ON_OFF_AUTO_ON; 698 } else if (kvm_state->kernel_irqchip_split != ON_OFF_AUTO_ON) { 699 error_setg(errp, "TDX VM requires kernel_irqchip to be split"); 700 return -EINVAL; 701 } 702 703 if (!tdx_caps) { 704 r = get_tdx_capabilities(errp); 705 if (r) { 706 return r; 707 } 708 } 709 710 tdx_setup_supported_cpuid(); 711 712 /* TDX relies on KVM_HC_MAP_GPA_RANGE to handle TDG.VP.VMCALL<MapGPA> */ 713 if (!kvm_enable_hypercall(BIT_ULL(KVM_HC_MAP_GPA_RANGE))) { 714 return -EOPNOTSUPP; 715 } 716 717 /* 718 * Set kvm_readonly_mem_allowed to false, because TDX only supports readonly 719 * memory for shared memory but not for private memory. Besides, whether a 720 * memslot is private or shared is not determined by QEMU. 721 * 722 * Thus, just mark readonly memory not supported for simplicity. 723 */ 724 kvm_readonly_mem_allowed = false; 725 726 qemu_add_machine_init_done_notifier(&tdx_machine_done_notify); 727 728 tdx_guest = tdx; 729 return 0; 730 } 731 732 static int tdx_kvm_type(X86ConfidentialGuest *cg) 733 { 734 /* Do the object check */ 735 TDX_GUEST(cg); 736 737 return KVM_X86_TDX_VM; 738 } 739 740 static void tdx_cpu_instance_init(X86ConfidentialGuest *cg, CPUState *cpu) 741 { 742 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu); 743 X86CPU *x86cpu = X86_CPU(cpu); 744 745 if (xcc->model) { 746 error_report("Named cpu model is not supported for TDX yet!"); 747 exit(1); 748 } 749 750 object_property_set_bool(OBJECT(cpu), "pmu", false, &error_abort); 751 752 /* invtsc is fixed1 for TD guest */ 753 object_property_set_bool(OBJECT(cpu), "invtsc", true, &error_abort); 754 755 x86cpu->force_cpuid_0x1f = true; 756 } 757 758 static uint32_t tdx_adjust_cpuid_features(X86ConfidentialGuest *cg, 759 uint32_t feature, uint32_t index, 760 int reg, uint32_t value) 761 { 762 struct kvm_cpuid_entry2 *e; 763 764 e = cpuid_find_entry(&tdx_fixed1_bits.cpuid, feature, index); 765 if (e) { 766 value |= cpuid_entry_get_reg(e, reg); 767 } 768 769 if (is_feature_word_cpuid(feature, index, reg)) { 770 e = cpuid_find_entry(tdx_supported_cpuid, feature, index); 771 if (e) { 772 value &= cpuid_entry_get_reg(e, reg); 773 } 774 } 775 776 return value; 777 } 778 779 static struct kvm_cpuid2 *tdx_fetch_cpuid(CPUState *cpu, int *ret) 780 { 781 struct kvm_cpuid2 *fetch_cpuid; 782 int size = KVM_MAX_CPUID_ENTRIES; 783 Error *local_err = NULL; 784 int r; 785 786 do { 787 error_free(local_err); 788 local_err = NULL; 789 790 fetch_cpuid = g_malloc0(sizeof(*fetch_cpuid) + 791 sizeof(struct kvm_cpuid_entry2) * size); 792 fetch_cpuid->nent = size; 793 r = tdx_vcpu_ioctl(cpu, KVM_TDX_GET_CPUID, 0, fetch_cpuid, &local_err); 794 if (r == -E2BIG) { 795 g_free(fetch_cpuid); 796 size = fetch_cpuid->nent; 797 } 798 } while (r == -E2BIG); 799 800 if (r < 0) { 801 error_report_err(local_err); 802 *ret = r; 803 return NULL; 804 } 805 806 return fetch_cpuid; 807 } 808 809 static int tdx_check_features(X86ConfidentialGuest *cg, CPUState *cs) 810 { 811 uint64_t actual, requested, unavailable, forced_on; 812 g_autofree struct kvm_cpuid2 *fetch_cpuid; 813 const char *forced_on_prefix = NULL; 814 const char *unav_prefix = NULL; 815 struct kvm_cpuid_entry2 *entry; 816 X86CPU *cpu = X86_CPU(cs); 817 CPUX86State *env = &cpu->env; 818 FeatureWordInfo *wi; 819 FeatureWord w; 820 bool mismatch = false; 821 int r; 822 823 fetch_cpuid = tdx_fetch_cpuid(cs, &r); 824 if (!fetch_cpuid) { 825 return r; 826 } 827 828 if (cpu->check_cpuid || cpu->enforce_cpuid) { 829 unav_prefix = "TDX doesn't support requested feature"; 830 forced_on_prefix = "TDX forcibly sets the feature"; 831 } 832 833 for (w = 0; w < FEATURE_WORDS; w++) { 834 wi = &feature_word_info[w]; 835 actual = 0; 836 837 switch (wi->type) { 838 case CPUID_FEATURE_WORD: 839 entry = cpuid_find_entry(fetch_cpuid, wi->cpuid.eax, wi->cpuid.ecx); 840 if (!entry) { 841 /* 842 * If KVM doesn't report it means it's totally configurable 843 * by QEMU 844 */ 845 continue; 846 } 847 848 actual = cpuid_entry_get_reg(entry, wi->cpuid.reg); 849 break; 850 case MSR_FEATURE_WORD: 851 /* 852 * TODO: 853 * validate MSR features when KVM has interface report them. 854 */ 855 continue; 856 } 857 858 /* Fixup for special cases */ 859 switch (w) { 860 case FEAT_8000_0001_EDX: 861 /* 862 * Intel enumerates SYSCALL bit as 1 only when processor in 64-bit 863 * mode and before vcpu running it's not in 64-bit mode. 864 */ 865 actual |= CPUID_EXT2_SYSCALL; 866 break; 867 default: 868 break; 869 } 870 871 requested = env->features[w]; 872 unavailable = requested & ~actual; 873 mark_unavailable_features(cpu, w, unavailable, unav_prefix); 874 if (unavailable) { 875 mismatch = true; 876 } 877 878 forced_on = actual & ~requested; 879 mark_forced_on_features(cpu, w, forced_on, forced_on_prefix); 880 if (forced_on) { 881 mismatch = true; 882 } 883 } 884 885 if (cpu->enforce_cpuid && mismatch) { 886 return -EINVAL; 887 } 888 889 if (cpu->phys_bits != host_cpu_phys_bits()) { 890 error_report("TDX requires guest CPU physical bits (%u) " 891 "to match host CPU physical bits (%u)", 892 cpu->phys_bits, host_cpu_phys_bits()); 893 return -EINVAL; 894 } 895 896 return 0; 897 } 898 899 static int tdx_validate_attributes(TdxGuest *tdx, Error **errp) 900 { 901 if ((tdx->attributes & ~tdx_caps->supported_attrs)) { 902 error_setg(errp, "Invalid attributes 0x%"PRIx64" for TDX VM " 903 "(KVM supported: 0x%"PRIx64")", tdx->attributes, 904 (uint64_t)tdx_caps->supported_attrs); 905 return -1; 906 } 907 908 if (tdx->attributes & ~TDX_SUPPORTED_TD_ATTRS) { 909 error_setg(errp, "Some QEMU unsupported TD attribute bits being " 910 "requested: 0x%"PRIx64" (QEMU supported: 0x%"PRIx64")", 911 tdx->attributes, (uint64_t)TDX_SUPPORTED_TD_ATTRS); 912 return -1; 913 } 914 915 return 0; 916 } 917 918 static int setup_td_guest_attributes(X86CPU *x86cpu, Error **errp) 919 { 920 CPUX86State *env = &x86cpu->env; 921 922 tdx_guest->attributes |= (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_PKS) ? 923 TDX_TD_ATTRIBUTES_PKS : 0; 924 tdx_guest->attributes |= x86cpu->enable_pmu ? TDX_TD_ATTRIBUTES_PERFMON : 0; 925 926 return tdx_validate_attributes(tdx_guest, errp); 927 } 928 929 static int setup_td_xfam(X86CPU *x86cpu, Error **errp) 930 { 931 CPUX86State *env = &x86cpu->env; 932 uint64_t xfam; 933 934 xfam = env->features[FEAT_XSAVE_XCR0_LO] | 935 env->features[FEAT_XSAVE_XCR0_HI] | 936 env->features[FEAT_XSAVE_XSS_LO] | 937 env->features[FEAT_XSAVE_XSS_HI]; 938 939 if (xfam & ~tdx_caps->supported_xfam) { 940 error_setg(errp, "Invalid XFAM 0x%"PRIx64" for TDX VM (supported: 0x%"PRIx64"))", 941 xfam, (uint64_t)tdx_caps->supported_xfam); 942 return -1; 943 } 944 945 tdx_guest->xfam = xfam; 946 return 0; 947 } 948 949 static void tdx_filter_cpuid(struct kvm_cpuid2 *cpuids) 950 { 951 int i, dest_cnt = 0; 952 struct kvm_cpuid_entry2 *src, *dest, *conf; 953 954 for (i = 0; i < cpuids->nent; i++) { 955 src = cpuids->entries + i; 956 conf = cpuid_find_entry(&tdx_caps->cpuid, src->function, src->index); 957 if (!conf) { 958 continue; 959 } 960 dest = cpuids->entries + dest_cnt; 961 962 dest->function = src->function; 963 dest->index = src->index; 964 dest->flags = src->flags; 965 dest->eax = src->eax & conf->eax; 966 dest->ebx = src->ebx & conf->ebx; 967 dest->ecx = src->ecx & conf->ecx; 968 dest->edx = src->edx & conf->edx; 969 970 dest_cnt++; 971 } 972 cpuids->nent = dest_cnt++; 973 } 974 975 int tdx_pre_create_vcpu(CPUState *cpu, Error **errp) 976 { 977 X86CPU *x86cpu = X86_CPU(cpu); 978 CPUX86State *env = &x86cpu->env; 979 g_autofree struct kvm_tdx_init_vm *init_vm = NULL; 980 Error *local_err = NULL; 981 size_t data_len; 982 int retry = 10000; 983 int r = 0; 984 985 QEMU_LOCK_GUARD(&tdx_guest->lock); 986 if (tdx_guest->initialized) { 987 return r; 988 } 989 990 init_vm = g_malloc0(sizeof(struct kvm_tdx_init_vm) + 991 sizeof(struct kvm_cpuid_entry2) * KVM_MAX_CPUID_ENTRIES); 992 993 if (!kvm_check_extension(kvm_state, KVM_CAP_X86_APIC_BUS_CYCLES_NS)) { 994 error_setg(errp, "KVM doesn't support KVM_CAP_X86_APIC_BUS_CYCLES_NS"); 995 return -EOPNOTSUPP; 996 } 997 998 r = kvm_vm_enable_cap(kvm_state, KVM_CAP_X86_APIC_BUS_CYCLES_NS, 999 0, TDX_APIC_BUS_CYCLES_NS); 1000 if (r < 0) { 1001 error_setg_errno(errp, -r, 1002 "Unable to set core crystal clock frequency to 25MHz"); 1003 return r; 1004 } 1005 1006 if (env->tsc_khz && (env->tsc_khz < TDX_MIN_TSC_FREQUENCY_KHZ || 1007 env->tsc_khz > TDX_MAX_TSC_FREQUENCY_KHZ)) { 1008 error_setg(errp, "Invalid TSC %"PRId64" KHz, must specify cpu_frequency " 1009 "between [%d, %d] kHz", env->tsc_khz, 1010 TDX_MIN_TSC_FREQUENCY_KHZ, TDX_MAX_TSC_FREQUENCY_KHZ); 1011 return -EINVAL; 1012 } 1013 1014 if (env->tsc_khz % (25 * 1000)) { 1015 error_setg(errp, "Invalid TSC %"PRId64" KHz, it must be multiple of 25MHz", 1016 env->tsc_khz); 1017 return -EINVAL; 1018 } 1019 1020 /* it's safe even env->tsc_khz is 0. KVM uses host's tsc_khz in this case */ 1021 r = kvm_vm_ioctl(kvm_state, KVM_SET_TSC_KHZ, env->tsc_khz); 1022 if (r < 0) { 1023 error_setg_errno(errp, -r, "Unable to set TSC frequency to %"PRId64" kHz", 1024 env->tsc_khz); 1025 return r; 1026 } 1027 1028 if (tdx_guest->mrconfigid) { 1029 g_autofree uint8_t *data = qbase64_decode(tdx_guest->mrconfigid, 1030 strlen(tdx_guest->mrconfigid), &data_len, errp); 1031 if (!data) { 1032 return -1; 1033 } 1034 if (data_len != QCRYPTO_HASH_DIGEST_LEN_SHA384) { 1035 error_setg(errp, "TDX 'mrconfigid' sha384 digest was %ld bytes, " 1036 "expected %d bytes", data_len, 1037 QCRYPTO_HASH_DIGEST_LEN_SHA384); 1038 return -1; 1039 } 1040 memcpy(init_vm->mrconfigid, data, data_len); 1041 } 1042 1043 if (tdx_guest->mrowner) { 1044 g_autofree uint8_t *data = qbase64_decode(tdx_guest->mrowner, 1045 strlen(tdx_guest->mrowner), &data_len, errp); 1046 if (!data) { 1047 return -1; 1048 } 1049 if (data_len != QCRYPTO_HASH_DIGEST_LEN_SHA384) { 1050 error_setg(errp, "TDX 'mrowner' sha384 digest was %ld bytes, " 1051 "expected %d bytes", data_len, 1052 QCRYPTO_HASH_DIGEST_LEN_SHA384); 1053 return -1; 1054 } 1055 memcpy(init_vm->mrowner, data, data_len); 1056 } 1057 1058 if (tdx_guest->mrownerconfig) { 1059 g_autofree uint8_t *data = qbase64_decode(tdx_guest->mrownerconfig, 1060 strlen(tdx_guest->mrownerconfig), &data_len, errp); 1061 if (!data) { 1062 return -1; 1063 } 1064 if (data_len != QCRYPTO_HASH_DIGEST_LEN_SHA384) { 1065 error_setg(errp, "TDX 'mrownerconfig' sha384 digest was %ld bytes, " 1066 "expected %d bytes", data_len, 1067 QCRYPTO_HASH_DIGEST_LEN_SHA384); 1068 return -1; 1069 } 1070 memcpy(init_vm->mrownerconfig, data, data_len); 1071 } 1072 1073 r = setup_td_guest_attributes(x86cpu, errp); 1074 if (r) { 1075 return r; 1076 } 1077 1078 r = setup_td_xfam(x86cpu, errp); 1079 if (r) { 1080 return r; 1081 } 1082 1083 init_vm->cpuid.nent = kvm_x86_build_cpuid(env, init_vm->cpuid.entries, 0); 1084 tdx_filter_cpuid(&init_vm->cpuid); 1085 1086 init_vm->attributes = tdx_guest->attributes; 1087 init_vm->xfam = tdx_guest->xfam; 1088 1089 /* 1090 * KVM_TDX_INIT_VM gets -EAGAIN when KVM side SEAMCALL(TDH_MNG_CREATE) 1091 * gets TDX_RND_NO_ENTROPY due to Random number generation (e.g., RDRAND or 1092 * RDSEED) is busy. 1093 * 1094 * Retry for the case. 1095 */ 1096 do { 1097 error_free(local_err); 1098 local_err = NULL; 1099 r = tdx_vm_ioctl(KVM_TDX_INIT_VM, 0, init_vm, &local_err); 1100 } while (r == -EAGAIN && --retry); 1101 1102 if (r < 0) { 1103 if (!retry) { 1104 error_append_hint(&local_err, "Hardware RNG (Random Number " 1105 "Generator) is busy occupied by someone (via RDRAND/RDSEED) " 1106 "maliciously, which leads to KVM_TDX_INIT_VM keeping failure " 1107 "due to lack of entropy.\n"); 1108 } 1109 error_propagate(errp, local_err); 1110 return r; 1111 } 1112 1113 tdx_guest->initialized = true; 1114 1115 return 0; 1116 } 1117 1118 int tdx_parse_tdvf(void *flash_ptr, int size) 1119 { 1120 return tdvf_parse_metadata(&tdx_guest->tdvf, flash_ptr, size); 1121 } 1122 1123 static void tdx_panicked_on_fatal_error(X86CPU *cpu, uint64_t error_code, 1124 char *message, uint64_t gpa) 1125 { 1126 GuestPanicInformation *panic_info; 1127 1128 panic_info = g_new0(GuestPanicInformation, 1); 1129 panic_info->type = GUEST_PANIC_INFORMATION_TYPE_TDX; 1130 panic_info->u.tdx.error_code = (uint32_t) error_code; 1131 panic_info->u.tdx.message = message; 1132 panic_info->u.tdx.gpa = gpa; 1133 1134 qemu_system_guest_panicked(panic_info); 1135 } 1136 1137 /* 1138 * Only 8 registers can contain valid ASCII byte stream to form the fatal 1139 * message, and their sequence is: R14, R15, RBX, RDI, RSI, R8, R9, RDX 1140 */ 1141 #define TDX_FATAL_MESSAGE_MAX 64 1142 1143 #define TDX_REPORT_FATAL_ERROR_GPA_VALID BIT_ULL(63) 1144 1145 int tdx_handle_report_fatal_error(X86CPU *cpu, struct kvm_run *run) 1146 { 1147 uint64_t error_code = run->system_event.data[R_R12]; 1148 uint64_t reg_mask = run->system_event.data[R_ECX]; 1149 char *message = NULL; 1150 uint64_t *tmp; 1151 uint64_t gpa = -1ull; 1152 1153 if (error_code & 0xffff) { 1154 error_report("TDX: REPORT_FATAL_ERROR: invalid error code: 0x%"PRIx64, 1155 error_code); 1156 return -1; 1157 } 1158 1159 if (reg_mask) { 1160 message = g_malloc0(TDX_FATAL_MESSAGE_MAX + 1); 1161 tmp = (uint64_t *)message; 1162 1163 #define COPY_REG(REG) \ 1164 do { \ 1165 if (reg_mask & BIT_ULL(REG)) { \ 1166 *(tmp++) = run->system_event.data[REG]; \ 1167 } \ 1168 } while (0) 1169 1170 COPY_REG(R_R14); 1171 COPY_REG(R_R15); 1172 COPY_REG(R_EBX); 1173 COPY_REG(R_EDI); 1174 COPY_REG(R_ESI); 1175 COPY_REG(R_R8); 1176 COPY_REG(R_R9); 1177 COPY_REG(R_EDX); 1178 *((char *)tmp) = '\0'; 1179 } 1180 #undef COPY_REG 1181 1182 if (error_code & TDX_REPORT_FATAL_ERROR_GPA_VALID) { 1183 gpa = run->system_event.data[R_R13]; 1184 } 1185 1186 tdx_panicked_on_fatal_error(cpu, error_code, message, gpa); 1187 1188 return -1; 1189 } 1190 1191 static bool tdx_guest_get_sept_ve_disable(Object *obj, Error **errp) 1192 { 1193 TdxGuest *tdx = TDX_GUEST(obj); 1194 1195 return !!(tdx->attributes & TDX_TD_ATTRIBUTES_SEPT_VE_DISABLE); 1196 } 1197 1198 static void tdx_guest_set_sept_ve_disable(Object *obj, bool value, Error **errp) 1199 { 1200 TdxGuest *tdx = TDX_GUEST(obj); 1201 1202 if (value) { 1203 tdx->attributes |= TDX_TD_ATTRIBUTES_SEPT_VE_DISABLE; 1204 } else { 1205 tdx->attributes &= ~TDX_TD_ATTRIBUTES_SEPT_VE_DISABLE; 1206 } 1207 } 1208 1209 static char *tdx_guest_get_mrconfigid(Object *obj, Error **errp) 1210 { 1211 TdxGuest *tdx = TDX_GUEST(obj); 1212 1213 return g_strdup(tdx->mrconfigid); 1214 } 1215 1216 static void tdx_guest_set_mrconfigid(Object *obj, const char *value, Error **errp) 1217 { 1218 TdxGuest *tdx = TDX_GUEST(obj); 1219 1220 g_free(tdx->mrconfigid); 1221 tdx->mrconfigid = g_strdup(value); 1222 } 1223 1224 static char *tdx_guest_get_mrowner(Object *obj, Error **errp) 1225 { 1226 TdxGuest *tdx = TDX_GUEST(obj); 1227 1228 return g_strdup(tdx->mrowner); 1229 } 1230 1231 static void tdx_guest_set_mrowner(Object *obj, const char *value, Error **errp) 1232 { 1233 TdxGuest *tdx = TDX_GUEST(obj); 1234 1235 g_free(tdx->mrowner); 1236 tdx->mrowner = g_strdup(value); 1237 } 1238 1239 static char *tdx_guest_get_mrownerconfig(Object *obj, Error **errp) 1240 { 1241 TdxGuest *tdx = TDX_GUEST(obj); 1242 1243 return g_strdup(tdx->mrownerconfig); 1244 } 1245 1246 static void tdx_guest_set_mrownerconfig(Object *obj, const char *value, Error **errp) 1247 { 1248 TdxGuest *tdx = TDX_GUEST(obj); 1249 1250 g_free(tdx->mrownerconfig); 1251 tdx->mrownerconfig = g_strdup(value); 1252 } 1253 1254 /* tdx guest */ 1255 OBJECT_DEFINE_TYPE_WITH_INTERFACES(TdxGuest, 1256 tdx_guest, 1257 TDX_GUEST, 1258 X86_CONFIDENTIAL_GUEST, 1259 { TYPE_USER_CREATABLE }, 1260 { NULL }) 1261 1262 static void tdx_guest_init(Object *obj) 1263 { 1264 ConfidentialGuestSupport *cgs = CONFIDENTIAL_GUEST_SUPPORT(obj); 1265 TdxGuest *tdx = TDX_GUEST(obj); 1266 1267 qemu_mutex_init(&tdx->lock); 1268 1269 cgs->require_guest_memfd = true; 1270 tdx->attributes = TDX_TD_ATTRIBUTES_SEPT_VE_DISABLE; 1271 1272 object_property_add_uint64_ptr(obj, "attributes", &tdx->attributes, 1273 OBJ_PROP_FLAG_READWRITE); 1274 object_property_add_bool(obj, "sept-ve-disable", 1275 tdx_guest_get_sept_ve_disable, 1276 tdx_guest_set_sept_ve_disable); 1277 object_property_add_str(obj, "mrconfigid", 1278 tdx_guest_get_mrconfigid, 1279 tdx_guest_set_mrconfigid); 1280 object_property_add_str(obj, "mrowner", 1281 tdx_guest_get_mrowner, tdx_guest_set_mrowner); 1282 object_property_add_str(obj, "mrownerconfig", 1283 tdx_guest_get_mrownerconfig, 1284 tdx_guest_set_mrownerconfig); 1285 } 1286 1287 static void tdx_guest_finalize(Object *obj) 1288 { 1289 } 1290 1291 static void tdx_guest_class_init(ObjectClass *oc, const void *data) 1292 { 1293 ConfidentialGuestSupportClass *klass = CONFIDENTIAL_GUEST_SUPPORT_CLASS(oc); 1294 X86ConfidentialGuestClass *x86_klass = X86_CONFIDENTIAL_GUEST_CLASS(oc); 1295 1296 klass->kvm_init = tdx_kvm_init; 1297 x86_klass->kvm_type = tdx_kvm_type; 1298 x86_klass->cpu_instance_init = tdx_cpu_instance_init; 1299 x86_klass->adjust_cpuid_features = tdx_adjust_cpuid_features; 1300 x86_klass->check_features = tdx_check_features; 1301 } 1302