1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Hyper-V Isolation VM interface with paravisor and hypervisor 4 * 5 * Author: 6 * Tianyu Lan <Tianyu.Lan@microsoft.com> 7 */ 8 9 #include <linux/bitfield.h> 10 #include <linux/hyperv.h> 11 #include <linux/types.h> 12 #include <linux/slab.h> 13 #include <asm/svm.h> 14 #include <asm/sev.h> 15 #include <asm/io.h> 16 #include <asm/coco.h> 17 #include <asm/mem_encrypt.h> 18 #include <asm/mshyperv.h> 19 #include <asm/hypervisor.h> 20 #include <asm/mtrr.h> 21 #include <asm/io_apic.h> 22 #include <asm/realmode.h> 23 #include <asm/e820/api.h> 24 #include <asm/desc.h> 25 #include <uapi/asm/vmx.h> 26 27 #ifdef CONFIG_AMD_MEM_ENCRYPT 28 29 #define GHCB_USAGE_HYPERV_CALL 1 30 31 union hv_ghcb { 32 struct ghcb ghcb; 33 struct { 34 u64 hypercalldata[509]; 35 u64 outputgpa; 36 union { 37 union { 38 struct { 39 u32 callcode : 16; 40 u32 isfast : 1; 41 u32 reserved1 : 14; 42 u32 isnested : 1; 43 u32 countofelements : 12; 44 u32 reserved2 : 4; 45 u32 repstartindex : 12; 46 u32 reserved3 : 4; 47 }; 48 u64 asuint64; 49 } hypercallinput; 50 union { 51 struct { 52 u16 callstatus; 53 u16 reserved1; 54 u32 elementsprocessed : 12; 55 u32 reserved2 : 20; 56 }; 57 u64 asunit64; 58 } hypercalloutput; 59 }; 60 u64 reserved2; 61 } hypercall; 62 } __packed __aligned(HV_HYP_PAGE_SIZE); 63 64 /* Only used in an SNP VM with the paravisor */ 65 static u16 hv_ghcb_version __ro_after_init; 66 67 /* Functions only used in an SNP VM with the paravisor go here. */ 68 u64 hv_ghcb_hypercall(u64 control, void *input, void *output, u32 input_size) 69 { 70 union hv_ghcb *hv_ghcb; 71 void **ghcb_base; 72 unsigned long flags; 73 u64 status; 74 75 if (!hv_ghcb_pg) 76 return -EFAULT; 77 78 WARN_ON(in_nmi()); 79 80 local_irq_save(flags); 81 ghcb_base = (void **)this_cpu_ptr(hv_ghcb_pg); 82 hv_ghcb = (union hv_ghcb *)*ghcb_base; 83 if (!hv_ghcb) { 84 local_irq_restore(flags); 85 return -EFAULT; 86 } 87 88 hv_ghcb->ghcb.protocol_version = GHCB_PROTOCOL_MAX; 89 hv_ghcb->ghcb.ghcb_usage = GHCB_USAGE_HYPERV_CALL; 90 91 hv_ghcb->hypercall.outputgpa = (u64)output; 92 hv_ghcb->hypercall.hypercallinput.asuint64 = 0; 93 hv_ghcb->hypercall.hypercallinput.callcode = control; 94 95 if (input_size) 96 memcpy(hv_ghcb->hypercall.hypercalldata, input, input_size); 97 98 VMGEXIT(); 99 100 hv_ghcb->ghcb.ghcb_usage = 0xffffffff; 101 memset(hv_ghcb->ghcb.save.valid_bitmap, 0, 102 sizeof(hv_ghcb->ghcb.save.valid_bitmap)); 103 104 status = hv_ghcb->hypercall.hypercalloutput.callstatus; 105 106 local_irq_restore(flags); 107 108 return status; 109 } 110 111 static inline u64 rd_ghcb_msr(void) 112 { 113 return __rdmsr(MSR_AMD64_SEV_ES_GHCB); 114 } 115 116 static inline void wr_ghcb_msr(u64 val) 117 { 118 native_wrmsrl(MSR_AMD64_SEV_ES_GHCB, val); 119 } 120 121 static enum es_result hv_ghcb_hv_call(struct ghcb *ghcb, u64 exit_code, 122 u64 exit_info_1, u64 exit_info_2) 123 { 124 /* Fill in protocol and format specifiers */ 125 ghcb->protocol_version = hv_ghcb_version; 126 ghcb->ghcb_usage = GHCB_DEFAULT_USAGE; 127 128 ghcb_set_sw_exit_code(ghcb, exit_code); 129 ghcb_set_sw_exit_info_1(ghcb, exit_info_1); 130 ghcb_set_sw_exit_info_2(ghcb, exit_info_2); 131 132 VMGEXIT(); 133 134 if (ghcb->save.sw_exit_info_1 & GENMASK_ULL(31, 0)) 135 return ES_VMM_ERROR; 136 else 137 return ES_OK; 138 } 139 140 void __noreturn hv_ghcb_terminate(unsigned int set, unsigned int reason) 141 { 142 u64 val = GHCB_MSR_TERM_REQ; 143 144 /* Tell the hypervisor what went wrong. */ 145 val |= GHCB_SEV_TERM_REASON(set, reason); 146 147 /* Request Guest Termination from Hypvervisor */ 148 wr_ghcb_msr(val); 149 VMGEXIT(); 150 151 while (true) 152 asm volatile("hlt\n" : : : "memory"); 153 } 154 155 bool hv_ghcb_negotiate_protocol(void) 156 { 157 u64 ghcb_gpa; 158 u64 val; 159 160 /* Save ghcb page gpa. */ 161 ghcb_gpa = rd_ghcb_msr(); 162 163 /* Do the GHCB protocol version negotiation */ 164 wr_ghcb_msr(GHCB_MSR_SEV_INFO_REQ); 165 VMGEXIT(); 166 val = rd_ghcb_msr(); 167 168 if (GHCB_MSR_INFO(val) != GHCB_MSR_SEV_INFO_RESP) 169 return false; 170 171 if (GHCB_MSR_PROTO_MAX(val) < GHCB_PROTOCOL_MIN || 172 GHCB_MSR_PROTO_MIN(val) > GHCB_PROTOCOL_MAX) 173 return false; 174 175 hv_ghcb_version = min_t(size_t, GHCB_MSR_PROTO_MAX(val), 176 GHCB_PROTOCOL_MAX); 177 178 /* Write ghcb page back after negotiating protocol. */ 179 wr_ghcb_msr(ghcb_gpa); 180 VMGEXIT(); 181 182 return true; 183 } 184 185 static void hv_ghcb_msr_write(u64 msr, u64 value) 186 { 187 union hv_ghcb *hv_ghcb; 188 void **ghcb_base; 189 unsigned long flags; 190 191 if (!hv_ghcb_pg) 192 return; 193 194 WARN_ON(in_nmi()); 195 196 local_irq_save(flags); 197 ghcb_base = (void **)this_cpu_ptr(hv_ghcb_pg); 198 hv_ghcb = (union hv_ghcb *)*ghcb_base; 199 if (!hv_ghcb) { 200 local_irq_restore(flags); 201 return; 202 } 203 204 ghcb_set_rcx(&hv_ghcb->ghcb, msr); 205 ghcb_set_rax(&hv_ghcb->ghcb, lower_32_bits(value)); 206 ghcb_set_rdx(&hv_ghcb->ghcb, upper_32_bits(value)); 207 208 if (hv_ghcb_hv_call(&hv_ghcb->ghcb, SVM_EXIT_MSR, 1, 0)) 209 pr_warn("Fail to write msr via ghcb %llx.\n", msr); 210 211 local_irq_restore(flags); 212 } 213 214 static void hv_ghcb_msr_read(u64 msr, u64 *value) 215 { 216 union hv_ghcb *hv_ghcb; 217 void **ghcb_base; 218 unsigned long flags; 219 220 /* Check size of union hv_ghcb here. */ 221 BUILD_BUG_ON(sizeof(union hv_ghcb) != HV_HYP_PAGE_SIZE); 222 223 if (!hv_ghcb_pg) 224 return; 225 226 WARN_ON(in_nmi()); 227 228 local_irq_save(flags); 229 ghcb_base = (void **)this_cpu_ptr(hv_ghcb_pg); 230 hv_ghcb = (union hv_ghcb *)*ghcb_base; 231 if (!hv_ghcb) { 232 local_irq_restore(flags); 233 return; 234 } 235 236 ghcb_set_rcx(&hv_ghcb->ghcb, msr); 237 if (hv_ghcb_hv_call(&hv_ghcb->ghcb, SVM_EXIT_MSR, 0, 0)) 238 pr_warn("Fail to read msr via ghcb %llx.\n", msr); 239 else 240 *value = (u64)lower_32_bits(hv_ghcb->ghcb.save.rax) 241 | ((u64)lower_32_bits(hv_ghcb->ghcb.save.rdx) << 32); 242 local_irq_restore(flags); 243 } 244 245 /* Only used in a fully enlightened SNP VM, i.e. without the paravisor */ 246 static u8 ap_start_input_arg[PAGE_SIZE] __bss_decrypted __aligned(PAGE_SIZE); 247 static u8 ap_start_stack[PAGE_SIZE] __aligned(PAGE_SIZE); 248 static DEFINE_PER_CPU(struct sev_es_save_area *, hv_sev_vmsa); 249 250 /* Functions only used in an SNP VM without the paravisor go here. */ 251 252 #define hv_populate_vmcb_seg(seg, gdtr_base) \ 253 do { \ 254 if (seg.selector) { \ 255 seg.base = 0; \ 256 seg.limit = HV_AP_SEGMENT_LIMIT; \ 257 seg.attrib = *(u16 *)(gdtr_base + seg.selector + 5); \ 258 seg.attrib = (seg.attrib & 0xFF) | ((seg.attrib >> 4) & 0xF00); \ 259 } \ 260 } while (0) \ 261 262 static int snp_set_vmsa(void *va, bool vmsa) 263 { 264 u64 attrs; 265 266 /* 267 * Running at VMPL0 allows the kernel to change the VMSA bit for a page 268 * using the RMPADJUST instruction. However, for the instruction to 269 * succeed it must target the permissions of a lesser privileged 270 * (higher numbered) VMPL level, so use VMPL1 (refer to the RMPADJUST 271 * instruction in the AMD64 APM Volume 3). 272 */ 273 attrs = 1; 274 if (vmsa) 275 attrs |= RMPADJUST_VMSA_PAGE_BIT; 276 277 return rmpadjust((unsigned long)va, RMP_PG_SIZE_4K, attrs); 278 } 279 280 static void snp_cleanup_vmsa(struct sev_es_save_area *vmsa) 281 { 282 int err; 283 284 err = snp_set_vmsa(vmsa, false); 285 if (err) 286 pr_err("clear VMSA page failed (%u), leaking page\n", err); 287 else 288 free_page((unsigned long)vmsa); 289 } 290 291 int hv_snp_boot_ap(int cpu, unsigned long start_ip) 292 { 293 struct sev_es_save_area *vmsa = (struct sev_es_save_area *) 294 __get_free_page(GFP_KERNEL | __GFP_ZERO); 295 struct sev_es_save_area *cur_vmsa; 296 struct desc_ptr gdtr; 297 u64 ret, retry = 5; 298 struct hv_enable_vp_vtl *start_vp_input; 299 unsigned long flags; 300 301 if (!vmsa) 302 return -ENOMEM; 303 304 native_store_gdt(&gdtr); 305 306 vmsa->gdtr.base = gdtr.address; 307 vmsa->gdtr.limit = gdtr.size; 308 309 asm volatile("movl %%es, %%eax;" : "=a" (vmsa->es.selector)); 310 hv_populate_vmcb_seg(vmsa->es, vmsa->gdtr.base); 311 312 asm volatile("movl %%cs, %%eax;" : "=a" (vmsa->cs.selector)); 313 hv_populate_vmcb_seg(vmsa->cs, vmsa->gdtr.base); 314 315 asm volatile("movl %%ss, %%eax;" : "=a" (vmsa->ss.selector)); 316 hv_populate_vmcb_seg(vmsa->ss, vmsa->gdtr.base); 317 318 asm volatile("movl %%ds, %%eax;" : "=a" (vmsa->ds.selector)); 319 hv_populate_vmcb_seg(vmsa->ds, vmsa->gdtr.base); 320 321 vmsa->efer = native_read_msr(MSR_EFER); 322 323 asm volatile("movq %%cr4, %%rax;" : "=a" (vmsa->cr4)); 324 asm volatile("movq %%cr3, %%rax;" : "=a" (vmsa->cr3)); 325 asm volatile("movq %%cr0, %%rax;" : "=a" (vmsa->cr0)); 326 327 vmsa->xcr0 = 1; 328 vmsa->g_pat = HV_AP_INIT_GPAT_DEFAULT; 329 vmsa->rip = (u64)secondary_startup_64_no_verify; 330 vmsa->rsp = (u64)&ap_start_stack[PAGE_SIZE]; 331 332 /* 333 * Set the SNP-specific fields for this VMSA: 334 * VMPL level 335 * SEV_FEATURES (matches the SEV STATUS MSR right shifted 2 bits) 336 */ 337 vmsa->vmpl = 0; 338 vmsa->sev_features = sev_status >> 2; 339 340 ret = snp_set_vmsa(vmsa, true); 341 if (!ret) { 342 pr_err("RMPADJUST(%llx) failed: %llx\n", (u64)vmsa, ret); 343 free_page((u64)vmsa); 344 return ret; 345 } 346 347 local_irq_save(flags); 348 start_vp_input = (struct hv_enable_vp_vtl *)ap_start_input_arg; 349 memset(start_vp_input, 0, sizeof(*start_vp_input)); 350 start_vp_input->partition_id = -1; 351 start_vp_input->vp_index = cpu; 352 start_vp_input->target_vtl.target_vtl = ms_hyperv.vtl; 353 *(u64 *)&start_vp_input->vp_context = __pa(vmsa) | 1; 354 355 do { 356 ret = hv_do_hypercall(HVCALL_START_VP, 357 start_vp_input, NULL); 358 } while (hv_result(ret) == HV_STATUS_TIME_OUT && retry--); 359 360 local_irq_restore(flags); 361 362 if (!hv_result_success(ret)) { 363 pr_err("HvCallStartVirtualProcessor failed: %llx\n", ret); 364 snp_cleanup_vmsa(vmsa); 365 vmsa = NULL; 366 } 367 368 cur_vmsa = per_cpu(hv_sev_vmsa, cpu); 369 /* Free up any previous VMSA page */ 370 if (cur_vmsa) 371 snp_cleanup_vmsa(cur_vmsa); 372 373 /* Record the current VMSA page */ 374 per_cpu(hv_sev_vmsa, cpu) = vmsa; 375 376 return ret; 377 } 378 379 #else 380 static inline void hv_ghcb_msr_write(u64 msr, u64 value) {} 381 static inline void hv_ghcb_msr_read(u64 msr, u64 *value) {} 382 #endif /* CONFIG_AMD_MEM_ENCRYPT */ 383 384 #ifdef CONFIG_INTEL_TDX_GUEST 385 static void hv_tdx_msr_write(u64 msr, u64 val) 386 { 387 struct tdx_hypercall_args args = { 388 .r10 = TDX_HYPERCALL_STANDARD, 389 .r11 = EXIT_REASON_MSR_WRITE, 390 .r12 = msr, 391 .r13 = val, 392 }; 393 394 u64 ret = __tdx_hypercall(&args); 395 396 WARN_ONCE(ret, "Failed to emulate MSR write: %lld\n", ret); 397 } 398 399 static void hv_tdx_msr_read(u64 msr, u64 *val) 400 { 401 struct tdx_hypercall_args args = { 402 .r10 = TDX_HYPERCALL_STANDARD, 403 .r11 = EXIT_REASON_MSR_READ, 404 .r12 = msr, 405 }; 406 407 u64 ret = __tdx_hypercall_ret(&args); 408 409 if (WARN_ONCE(ret, "Failed to emulate MSR read: %lld\n", ret)) 410 *val = 0; 411 else 412 *val = args.r11; 413 } 414 415 u64 hv_tdx_hypercall(u64 control, u64 param1, u64 param2) 416 { 417 struct tdx_hypercall_args args = { }; 418 419 args.r10 = control; 420 args.rdx = param1; 421 args.r8 = param2; 422 423 (void)__tdx_hypercall_ret(&args); 424 425 return args.r11; 426 } 427 428 #else 429 static inline void hv_tdx_msr_write(u64 msr, u64 value) {} 430 static inline void hv_tdx_msr_read(u64 msr, u64 *value) {} 431 #endif /* CONFIG_INTEL_TDX_GUEST */ 432 433 #if defined(CONFIG_AMD_MEM_ENCRYPT) || defined(CONFIG_INTEL_TDX_GUEST) 434 void hv_ivm_msr_write(u64 msr, u64 value) 435 { 436 if (!ms_hyperv.paravisor_present) 437 return; 438 439 if (hv_isolation_type_tdx()) 440 hv_tdx_msr_write(msr, value); 441 else if (hv_isolation_type_snp()) 442 hv_ghcb_msr_write(msr, value); 443 } 444 445 void hv_ivm_msr_read(u64 msr, u64 *value) 446 { 447 if (!ms_hyperv.paravisor_present) 448 return; 449 450 if (hv_isolation_type_tdx()) 451 hv_tdx_msr_read(msr, value); 452 else if (hv_isolation_type_snp()) 453 hv_ghcb_msr_read(msr, value); 454 } 455 456 /* 457 * hv_mark_gpa_visibility - Set pages visible to host via hvcall. 458 * 459 * In Isolation VM, all guest memory is encrypted from host and guest 460 * needs to set memory visible to host via hvcall before sharing memory 461 * with host. 462 */ 463 static int hv_mark_gpa_visibility(u16 count, const u64 pfn[], 464 enum hv_mem_host_visibility visibility) 465 { 466 struct hv_gpa_range_for_visibility **input_pcpu, *input; 467 u16 pages_processed; 468 u64 hv_status; 469 unsigned long flags; 470 471 /* no-op if partition isolation is not enabled */ 472 if (!hv_is_isolation_supported()) 473 return 0; 474 475 if (count > HV_MAX_MODIFY_GPA_REP_COUNT) { 476 pr_err("Hyper-V: GPA count:%d exceeds supported:%lu\n", count, 477 HV_MAX_MODIFY_GPA_REP_COUNT); 478 return -EINVAL; 479 } 480 481 local_irq_save(flags); 482 input_pcpu = (struct hv_gpa_range_for_visibility **) 483 this_cpu_ptr(hyperv_pcpu_input_arg); 484 input = *input_pcpu; 485 if (unlikely(!input)) { 486 local_irq_restore(flags); 487 return -EINVAL; 488 } 489 490 input->partition_id = HV_PARTITION_ID_SELF; 491 input->host_visibility = visibility; 492 input->reserved0 = 0; 493 input->reserved1 = 0; 494 memcpy((void *)input->gpa_page_list, pfn, count * sizeof(*pfn)); 495 hv_status = hv_do_rep_hypercall( 496 HVCALL_MODIFY_SPARSE_GPA_PAGE_HOST_VISIBILITY, count, 497 0, input, &pages_processed); 498 local_irq_restore(flags); 499 500 if (hv_result_success(hv_status)) 501 return 0; 502 else 503 return -EFAULT; 504 } 505 506 /* 507 * hv_vtom_set_host_visibility - Set specified memory visible to host. 508 * 509 * In Isolation VM, all guest memory is encrypted from host and guest 510 * needs to set memory visible to host via hvcall before sharing memory 511 * with host. This function works as wrap of hv_mark_gpa_visibility() 512 * with memory base and size. 513 */ 514 static bool hv_vtom_set_host_visibility(unsigned long kbuffer, int pagecount, bool enc) 515 { 516 enum hv_mem_host_visibility visibility = enc ? 517 VMBUS_PAGE_NOT_VISIBLE : VMBUS_PAGE_VISIBLE_READ_WRITE; 518 u64 *pfn_array; 519 int ret = 0; 520 bool result = true; 521 int i, pfn; 522 523 pfn_array = kmalloc(HV_HYP_PAGE_SIZE, GFP_KERNEL); 524 if (!pfn_array) 525 return false; 526 527 for (i = 0, pfn = 0; i < pagecount; i++) { 528 pfn_array[pfn] = virt_to_hvpfn((void *)kbuffer + i * HV_HYP_PAGE_SIZE); 529 pfn++; 530 531 if (pfn == HV_MAX_MODIFY_GPA_REP_COUNT || i == pagecount - 1) { 532 ret = hv_mark_gpa_visibility(pfn, pfn_array, 533 visibility); 534 if (ret) { 535 result = false; 536 goto err_free_pfn_array; 537 } 538 pfn = 0; 539 } 540 } 541 542 err_free_pfn_array: 543 kfree(pfn_array); 544 return result; 545 } 546 547 static bool hv_vtom_tlb_flush_required(bool private) 548 { 549 return true; 550 } 551 552 static bool hv_vtom_cache_flush_required(void) 553 { 554 return false; 555 } 556 557 static bool hv_is_private_mmio(u64 addr) 558 { 559 /* 560 * Hyper-V always provides a single IO-APIC in a guest VM. 561 * When a paravisor is used, it is emulated by the paravisor 562 * in the guest context and must be mapped private. 563 */ 564 if (addr >= HV_IOAPIC_BASE_ADDRESS && 565 addr < (HV_IOAPIC_BASE_ADDRESS + PAGE_SIZE)) 566 return true; 567 568 /* Same with a vTPM */ 569 if (addr >= VTPM_BASE_ADDRESS && 570 addr < (VTPM_BASE_ADDRESS + PAGE_SIZE)) 571 return true; 572 573 return false; 574 } 575 576 void __init hv_vtom_init(void) 577 { 578 enum hv_isolation_type type = hv_get_isolation_type(); 579 580 switch (type) { 581 case HV_ISOLATION_TYPE_VBS: 582 fallthrough; 583 /* 584 * By design, a VM using vTOM doesn't see the SEV setting, 585 * so SEV initialization is bypassed and sev_status isn't set. 586 * Set it here to indicate a vTOM VM. 587 * 588 * Note: if CONFIG_AMD_MEM_ENCRYPT is not set, sev_status is 589 * defined as 0ULL, to which we can't assigned a value. 590 */ 591 #ifdef CONFIG_AMD_MEM_ENCRYPT 592 case HV_ISOLATION_TYPE_SNP: 593 sev_status = MSR_AMD64_SNP_VTOM; 594 cc_vendor = CC_VENDOR_AMD; 595 break; 596 #endif 597 598 case HV_ISOLATION_TYPE_TDX: 599 cc_vendor = CC_VENDOR_INTEL; 600 break; 601 602 default: 603 panic("hv_vtom_init: unsupported isolation type %d\n", type); 604 } 605 606 cc_set_mask(ms_hyperv.shared_gpa_boundary); 607 physical_mask &= ms_hyperv.shared_gpa_boundary - 1; 608 609 x86_platform.hyper.is_private_mmio = hv_is_private_mmio; 610 x86_platform.guest.enc_cache_flush_required = hv_vtom_cache_flush_required; 611 x86_platform.guest.enc_tlb_flush_required = hv_vtom_tlb_flush_required; 612 x86_platform.guest.enc_status_change_finish = hv_vtom_set_host_visibility; 613 614 /* Set WB as the default cache mode. */ 615 mtrr_overwrite_state(NULL, 0, MTRR_TYPE_WRBACK); 616 } 617 618 #endif /* defined(CONFIG_AMD_MEM_ENCRYPT) || defined(CONFIG_INTEL_TDX_GUEST) */ 619 620 enum hv_isolation_type hv_get_isolation_type(void) 621 { 622 if (!(ms_hyperv.priv_high & HV_ISOLATION)) 623 return HV_ISOLATION_TYPE_NONE; 624 return FIELD_GET(HV_ISOLATION_TYPE, ms_hyperv.isolation_config_b); 625 } 626 EXPORT_SYMBOL_GPL(hv_get_isolation_type); 627 628 /* 629 * hv_is_isolation_supported - Check system runs in the Hyper-V 630 * isolation VM. 631 */ 632 bool hv_is_isolation_supported(void) 633 { 634 if (!cpu_feature_enabled(X86_FEATURE_HYPERVISOR)) 635 return false; 636 637 if (!hypervisor_is_type(X86_HYPER_MS_HYPERV)) 638 return false; 639 640 return hv_get_isolation_type() != HV_ISOLATION_TYPE_NONE; 641 } 642 643 DEFINE_STATIC_KEY_FALSE(isolation_type_snp); 644 645 /* 646 * hv_isolation_type_snp - Check if the system runs in an AMD SEV-SNP based 647 * isolation VM. 648 */ 649 bool hv_isolation_type_snp(void) 650 { 651 return static_branch_unlikely(&isolation_type_snp); 652 } 653 654 DEFINE_STATIC_KEY_FALSE(isolation_type_tdx); 655 /* 656 * hv_isolation_type_tdx - Check if the system runs in an Intel TDX based 657 * isolated VM. 658 */ 659 bool hv_isolation_type_tdx(void) 660 { 661 return static_branch_unlikely(&isolation_type_tdx); 662 } 663