1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (C) 2021-2022 Intel Corporation */ 3 4 #undef pr_fmt 5 #define pr_fmt(fmt) "tdx: " fmt 6 7 #include <linux/cpufeature.h> 8 #include <linux/export.h> 9 #include <linux/io.h> 10 #include <asm/coco.h> 11 #include <asm/tdx.h> 12 #include <asm/vmx.h> 13 #include <asm/insn.h> 14 #include <asm/insn-eval.h> 15 #include <asm/pgtable.h> 16 17 /* TDX module Call Leaf IDs */ 18 #define TDX_GET_INFO 1 19 #define TDX_GET_VEINFO 3 20 #define TDX_GET_REPORT 4 21 #define TDX_ACCEPT_PAGE 6 22 23 /* TDX hypercall Leaf IDs */ 24 #define TDVMCALL_MAP_GPA 0x10001 25 26 /* MMIO direction */ 27 #define EPT_READ 0 28 #define EPT_WRITE 1 29 30 /* Port I/O direction */ 31 #define PORT_READ 0 32 #define PORT_WRITE 1 33 34 /* See Exit Qualification for I/O Instructions in VMX documentation */ 35 #define VE_IS_IO_IN(e) ((e) & BIT(3)) 36 #define VE_GET_IO_SIZE(e) (((e) & GENMASK(2, 0)) + 1) 37 #define VE_GET_PORT_NUM(e) ((e) >> 16) 38 #define VE_IS_IO_STRING(e) ((e) & BIT(4)) 39 40 #define ATTR_SEPT_VE_DISABLE BIT(28) 41 42 /* TDX Module call error codes */ 43 #define TDCALL_RETURN_CODE(a) ((a) >> 32) 44 #define TDCALL_INVALID_OPERAND 0xc0000100 45 46 #define TDREPORT_SUBTYPE_0 0 47 48 /* 49 * Wrapper for standard use of __tdx_hypercall with no output aside from 50 * return code. 51 */ 52 static inline u64 _tdx_hypercall(u64 fn, u64 r12, u64 r13, u64 r14, u64 r15) 53 { 54 struct tdx_hypercall_args args = { 55 .r10 = TDX_HYPERCALL_STANDARD, 56 .r11 = fn, 57 .r12 = r12, 58 .r13 = r13, 59 .r14 = r14, 60 .r15 = r15, 61 }; 62 63 return __tdx_hypercall(&args, 0); 64 } 65 66 /* Called from __tdx_hypercall() for unrecoverable failure */ 67 void __tdx_hypercall_failed(void) 68 { 69 panic("TDVMCALL failed. TDX module bug?"); 70 } 71 72 /* 73 * The TDG.VP.VMCALL-Instruction-execution sub-functions are defined 74 * independently from but are currently matched 1:1 with VMX EXIT_REASONs. 75 * Reusing the KVM EXIT_REASON macros makes it easier to connect the host and 76 * guest sides of these calls. 77 */ 78 static u64 hcall_func(u64 exit_reason) 79 { 80 return exit_reason; 81 } 82 83 #ifdef CONFIG_KVM_GUEST 84 long tdx_kvm_hypercall(unsigned int nr, unsigned long p1, unsigned long p2, 85 unsigned long p3, unsigned long p4) 86 { 87 struct tdx_hypercall_args args = { 88 .r10 = nr, 89 .r11 = p1, 90 .r12 = p2, 91 .r13 = p3, 92 .r14 = p4, 93 }; 94 95 return __tdx_hypercall(&args, 0); 96 } 97 EXPORT_SYMBOL_GPL(tdx_kvm_hypercall); 98 #endif 99 100 /* 101 * Used for TDX guests to make calls directly to the TD module. This 102 * should only be used for calls that have no legitimate reason to fail 103 * or where the kernel can not survive the call failing. 104 */ 105 static inline void tdx_module_call(u64 fn, u64 rcx, u64 rdx, u64 r8, u64 r9, 106 struct tdx_module_output *out) 107 { 108 if (__tdx_module_call(fn, rcx, rdx, r8, r9, out)) 109 panic("TDCALL %lld failed (Buggy TDX module!)\n", fn); 110 } 111 112 /** 113 * tdx_mcall_get_report0() - Wrapper to get TDREPORT0 (a.k.a. TDREPORT 114 * subtype 0) using TDG.MR.REPORT TDCALL. 115 * @reportdata: Address of the input buffer which contains user-defined 116 * REPORTDATA to be included into TDREPORT. 117 * @tdreport: Address of the output buffer to store TDREPORT. 118 * 119 * Refer to section titled "TDG.MR.REPORT leaf" in the TDX Module 120 * v1.0 specification for more information on TDG.MR.REPORT TDCALL. 121 * It is used in the TDX guest driver module to get the TDREPORT0. 122 * 123 * Return 0 on success, -EINVAL for invalid operands, or -EIO on 124 * other TDCALL failures. 125 */ 126 int tdx_mcall_get_report0(u8 *reportdata, u8 *tdreport) 127 { 128 u64 ret; 129 130 ret = __tdx_module_call(TDX_GET_REPORT, virt_to_phys(tdreport), 131 virt_to_phys(reportdata), TDREPORT_SUBTYPE_0, 132 0, NULL); 133 if (ret) { 134 if (TDCALL_RETURN_CODE(ret) == TDCALL_INVALID_OPERAND) 135 return -EINVAL; 136 return -EIO; 137 } 138 139 return 0; 140 } 141 EXPORT_SYMBOL_GPL(tdx_mcall_get_report0); 142 143 static void tdx_parse_tdinfo(u64 *cc_mask) 144 { 145 struct tdx_module_output out; 146 unsigned int gpa_width; 147 u64 td_attr; 148 149 /* 150 * TDINFO TDX module call is used to get the TD execution environment 151 * information like GPA width, number of available vcpus, debug mode 152 * information, etc. More details about the ABI can be found in TDX 153 * Guest-Host-Communication Interface (GHCI), section 2.4.2 TDCALL 154 * [TDG.VP.INFO]. 155 */ 156 tdx_module_call(TDX_GET_INFO, 0, 0, 0, 0, &out); 157 158 /* 159 * The highest bit of a guest physical address is the "sharing" bit. 160 * Set it for shared pages and clear it for private pages. 161 * 162 * The GPA width that comes out of this call is critical. TDX guests 163 * can not meaningfully run without it. 164 */ 165 gpa_width = out.rcx & GENMASK(5, 0); 166 *cc_mask = BIT_ULL(gpa_width - 1); 167 168 /* 169 * The kernel can not handle #VE's when accessing normal kernel 170 * memory. Ensure that no #VE will be delivered for accesses to 171 * TD-private memory. Only VMM-shared memory (MMIO) will #VE. 172 */ 173 td_attr = out.rdx; 174 if (!(td_attr & ATTR_SEPT_VE_DISABLE)) 175 panic("TD misconfiguration: SEPT_VE_DISABLE attibute must be set.\n"); 176 } 177 178 /* 179 * The TDX module spec states that #VE may be injected for a limited set of 180 * reasons: 181 * 182 * - Emulation of the architectural #VE injection on EPT violation; 183 * 184 * - As a result of guest TD execution of a disallowed instruction, 185 * a disallowed MSR access, or CPUID virtualization; 186 * 187 * - A notification to the guest TD about anomalous behavior; 188 * 189 * The last one is opt-in and is not used by the kernel. 190 * 191 * The Intel Software Developer's Manual describes cases when instruction 192 * length field can be used in section "Information for VM Exits Due to 193 * Instruction Execution". 194 * 195 * For TDX, it ultimately means GET_VEINFO provides reliable instruction length 196 * information if #VE occurred due to instruction execution, but not for EPT 197 * violations. 198 */ 199 static int ve_instr_len(struct ve_info *ve) 200 { 201 switch (ve->exit_reason) { 202 case EXIT_REASON_HLT: 203 case EXIT_REASON_MSR_READ: 204 case EXIT_REASON_MSR_WRITE: 205 case EXIT_REASON_CPUID: 206 case EXIT_REASON_IO_INSTRUCTION: 207 /* It is safe to use ve->instr_len for #VE due instructions */ 208 return ve->instr_len; 209 case EXIT_REASON_EPT_VIOLATION: 210 /* 211 * For EPT violations, ve->insn_len is not defined. For those, 212 * the kernel must decode instructions manually and should not 213 * be using this function. 214 */ 215 WARN_ONCE(1, "ve->instr_len is not defined for EPT violations"); 216 return 0; 217 default: 218 WARN_ONCE(1, "Unexpected #VE-type: %lld\n", ve->exit_reason); 219 return ve->instr_len; 220 } 221 } 222 223 static u64 __cpuidle __halt(const bool irq_disabled, const bool do_sti) 224 { 225 struct tdx_hypercall_args args = { 226 .r10 = TDX_HYPERCALL_STANDARD, 227 .r11 = hcall_func(EXIT_REASON_HLT), 228 .r12 = irq_disabled, 229 }; 230 231 /* 232 * Emulate HLT operation via hypercall. More info about ABI 233 * can be found in TDX Guest-Host-Communication Interface 234 * (GHCI), section 3.8 TDG.VP.VMCALL<Instruction.HLT>. 235 * 236 * The VMM uses the "IRQ disabled" param to understand IRQ 237 * enabled status (RFLAGS.IF) of the TD guest and to determine 238 * whether or not it should schedule the halted vCPU if an 239 * IRQ becomes pending. E.g. if IRQs are disabled, the VMM 240 * can keep the vCPU in virtual HLT, even if an IRQ is 241 * pending, without hanging/breaking the guest. 242 */ 243 return __tdx_hypercall(&args, do_sti ? TDX_HCALL_ISSUE_STI : 0); 244 } 245 246 static int handle_halt(struct ve_info *ve) 247 { 248 /* 249 * Since non safe halt is mainly used in CPU offlining 250 * and the guest will always stay in the halt state, don't 251 * call the STI instruction (set do_sti as false). 252 */ 253 const bool irq_disabled = irqs_disabled(); 254 const bool do_sti = false; 255 256 if (__halt(irq_disabled, do_sti)) 257 return -EIO; 258 259 return ve_instr_len(ve); 260 } 261 262 void __cpuidle tdx_safe_halt(void) 263 { 264 /* 265 * For do_sti=true case, __tdx_hypercall() function enables 266 * interrupts using the STI instruction before the TDCALL. So 267 * set irq_disabled as false. 268 */ 269 const bool irq_disabled = false; 270 const bool do_sti = true; 271 272 /* 273 * Use WARN_ONCE() to report the failure. 274 */ 275 if (__halt(irq_disabled, do_sti)) 276 WARN_ONCE(1, "HLT instruction emulation failed\n"); 277 } 278 279 static int read_msr(struct pt_regs *regs, struct ve_info *ve) 280 { 281 struct tdx_hypercall_args args = { 282 .r10 = TDX_HYPERCALL_STANDARD, 283 .r11 = hcall_func(EXIT_REASON_MSR_READ), 284 .r12 = regs->cx, 285 }; 286 287 /* 288 * Emulate the MSR read via hypercall. More info about ABI 289 * can be found in TDX Guest-Host-Communication Interface 290 * (GHCI), section titled "TDG.VP.VMCALL<Instruction.RDMSR>". 291 */ 292 if (__tdx_hypercall(&args, TDX_HCALL_HAS_OUTPUT)) 293 return -EIO; 294 295 regs->ax = lower_32_bits(args.r11); 296 regs->dx = upper_32_bits(args.r11); 297 return ve_instr_len(ve); 298 } 299 300 static int write_msr(struct pt_regs *regs, struct ve_info *ve) 301 { 302 struct tdx_hypercall_args args = { 303 .r10 = TDX_HYPERCALL_STANDARD, 304 .r11 = hcall_func(EXIT_REASON_MSR_WRITE), 305 .r12 = regs->cx, 306 .r13 = (u64)regs->dx << 32 | regs->ax, 307 }; 308 309 /* 310 * Emulate the MSR write via hypercall. More info about ABI 311 * can be found in TDX Guest-Host-Communication Interface 312 * (GHCI) section titled "TDG.VP.VMCALL<Instruction.WRMSR>". 313 */ 314 if (__tdx_hypercall(&args, 0)) 315 return -EIO; 316 317 return ve_instr_len(ve); 318 } 319 320 static int handle_cpuid(struct pt_regs *regs, struct ve_info *ve) 321 { 322 struct tdx_hypercall_args args = { 323 .r10 = TDX_HYPERCALL_STANDARD, 324 .r11 = hcall_func(EXIT_REASON_CPUID), 325 .r12 = regs->ax, 326 .r13 = regs->cx, 327 }; 328 329 /* 330 * Only allow VMM to control range reserved for hypervisor 331 * communication. 332 * 333 * Return all-zeros for any CPUID outside the range. It matches CPU 334 * behaviour for non-supported leaf. 335 */ 336 if (regs->ax < 0x40000000 || regs->ax > 0x4FFFFFFF) { 337 regs->ax = regs->bx = regs->cx = regs->dx = 0; 338 return ve_instr_len(ve); 339 } 340 341 /* 342 * Emulate the CPUID instruction via a hypercall. More info about 343 * ABI can be found in TDX Guest-Host-Communication Interface 344 * (GHCI), section titled "VP.VMCALL<Instruction.CPUID>". 345 */ 346 if (__tdx_hypercall(&args, TDX_HCALL_HAS_OUTPUT)) 347 return -EIO; 348 349 /* 350 * As per TDX GHCI CPUID ABI, r12-r15 registers contain contents of 351 * EAX, EBX, ECX, EDX registers after the CPUID instruction execution. 352 * So copy the register contents back to pt_regs. 353 */ 354 regs->ax = args.r12; 355 regs->bx = args.r13; 356 regs->cx = args.r14; 357 regs->dx = args.r15; 358 359 return ve_instr_len(ve); 360 } 361 362 static bool mmio_read(int size, unsigned long addr, unsigned long *val) 363 { 364 struct tdx_hypercall_args args = { 365 .r10 = TDX_HYPERCALL_STANDARD, 366 .r11 = hcall_func(EXIT_REASON_EPT_VIOLATION), 367 .r12 = size, 368 .r13 = EPT_READ, 369 .r14 = addr, 370 .r15 = *val, 371 }; 372 373 if (__tdx_hypercall(&args, TDX_HCALL_HAS_OUTPUT)) 374 return false; 375 *val = args.r11; 376 return true; 377 } 378 379 static bool mmio_write(int size, unsigned long addr, unsigned long val) 380 { 381 return !_tdx_hypercall(hcall_func(EXIT_REASON_EPT_VIOLATION), size, 382 EPT_WRITE, addr, val); 383 } 384 385 static int handle_mmio(struct pt_regs *regs, struct ve_info *ve) 386 { 387 unsigned long *reg, val, vaddr; 388 char buffer[MAX_INSN_SIZE]; 389 enum insn_mmio_type mmio; 390 struct insn insn = {}; 391 int size, extend_size; 392 u8 extend_val = 0; 393 394 /* Only in-kernel MMIO is supported */ 395 if (WARN_ON_ONCE(user_mode(regs))) 396 return -EFAULT; 397 398 if (copy_from_kernel_nofault(buffer, (void *)regs->ip, MAX_INSN_SIZE)) 399 return -EFAULT; 400 401 if (insn_decode(&insn, buffer, MAX_INSN_SIZE, INSN_MODE_64)) 402 return -EINVAL; 403 404 mmio = insn_decode_mmio(&insn, &size); 405 if (WARN_ON_ONCE(mmio == INSN_MMIO_DECODE_FAILED)) 406 return -EINVAL; 407 408 if (mmio != INSN_MMIO_WRITE_IMM && mmio != INSN_MMIO_MOVS) { 409 reg = insn_get_modrm_reg_ptr(&insn, regs); 410 if (!reg) 411 return -EINVAL; 412 } 413 414 /* 415 * Reject EPT violation #VEs that split pages. 416 * 417 * MMIO accesses are supposed to be naturally aligned and therefore 418 * never cross page boundaries. Seeing split page accesses indicates 419 * a bug or a load_unaligned_zeropad() that stepped into an MMIO page. 420 * 421 * load_unaligned_zeropad() will recover using exception fixups. 422 */ 423 vaddr = (unsigned long)insn_get_addr_ref(&insn, regs); 424 if (vaddr / PAGE_SIZE != (vaddr + size - 1) / PAGE_SIZE) 425 return -EFAULT; 426 427 /* Handle writes first */ 428 switch (mmio) { 429 case INSN_MMIO_WRITE: 430 memcpy(&val, reg, size); 431 if (!mmio_write(size, ve->gpa, val)) 432 return -EIO; 433 return insn.length; 434 case INSN_MMIO_WRITE_IMM: 435 val = insn.immediate.value; 436 if (!mmio_write(size, ve->gpa, val)) 437 return -EIO; 438 return insn.length; 439 case INSN_MMIO_READ: 440 case INSN_MMIO_READ_ZERO_EXTEND: 441 case INSN_MMIO_READ_SIGN_EXTEND: 442 /* Reads are handled below */ 443 break; 444 case INSN_MMIO_MOVS: 445 case INSN_MMIO_DECODE_FAILED: 446 /* 447 * MMIO was accessed with an instruction that could not be 448 * decoded or handled properly. It was likely not using io.h 449 * helpers or accessed MMIO accidentally. 450 */ 451 return -EINVAL; 452 default: 453 WARN_ONCE(1, "Unknown insn_decode_mmio() decode value?"); 454 return -EINVAL; 455 } 456 457 /* Handle reads */ 458 if (!mmio_read(size, ve->gpa, &val)) 459 return -EIO; 460 461 switch (mmio) { 462 case INSN_MMIO_READ: 463 /* Zero-extend for 32-bit operation */ 464 extend_size = size == 4 ? sizeof(*reg) : 0; 465 break; 466 case INSN_MMIO_READ_ZERO_EXTEND: 467 /* Zero extend based on operand size */ 468 extend_size = insn.opnd_bytes; 469 break; 470 case INSN_MMIO_READ_SIGN_EXTEND: 471 /* Sign extend based on operand size */ 472 extend_size = insn.opnd_bytes; 473 if (size == 1 && val & BIT(7)) 474 extend_val = 0xFF; 475 else if (size > 1 && val & BIT(15)) 476 extend_val = 0xFF; 477 break; 478 default: 479 /* All other cases has to be covered with the first switch() */ 480 WARN_ON_ONCE(1); 481 return -EINVAL; 482 } 483 484 if (extend_size) 485 memset(reg, extend_val, extend_size); 486 memcpy(reg, &val, size); 487 return insn.length; 488 } 489 490 static bool handle_in(struct pt_regs *regs, int size, int port) 491 { 492 struct tdx_hypercall_args args = { 493 .r10 = TDX_HYPERCALL_STANDARD, 494 .r11 = hcall_func(EXIT_REASON_IO_INSTRUCTION), 495 .r12 = size, 496 .r13 = PORT_READ, 497 .r14 = port, 498 }; 499 u64 mask = GENMASK(BITS_PER_BYTE * size, 0); 500 bool success; 501 502 /* 503 * Emulate the I/O read via hypercall. More info about ABI can be found 504 * in TDX Guest-Host-Communication Interface (GHCI) section titled 505 * "TDG.VP.VMCALL<Instruction.IO>". 506 */ 507 success = !__tdx_hypercall(&args, TDX_HCALL_HAS_OUTPUT); 508 509 /* Update part of the register affected by the emulated instruction */ 510 regs->ax &= ~mask; 511 if (success) 512 regs->ax |= args.r11 & mask; 513 514 return success; 515 } 516 517 static bool handle_out(struct pt_regs *regs, int size, int port) 518 { 519 u64 mask = GENMASK(BITS_PER_BYTE * size, 0); 520 521 /* 522 * Emulate the I/O write via hypercall. More info about ABI can be found 523 * in TDX Guest-Host-Communication Interface (GHCI) section titled 524 * "TDG.VP.VMCALL<Instruction.IO>". 525 */ 526 return !_tdx_hypercall(hcall_func(EXIT_REASON_IO_INSTRUCTION), size, 527 PORT_WRITE, port, regs->ax & mask); 528 } 529 530 /* 531 * Emulate I/O using hypercall. 532 * 533 * Assumes the IO instruction was using ax, which is enforced 534 * by the standard io.h macros. 535 * 536 * Return True on success or False on failure. 537 */ 538 static int handle_io(struct pt_regs *regs, struct ve_info *ve) 539 { 540 u32 exit_qual = ve->exit_qual; 541 int size, port; 542 bool in, ret; 543 544 if (VE_IS_IO_STRING(exit_qual)) 545 return -EIO; 546 547 in = VE_IS_IO_IN(exit_qual); 548 size = VE_GET_IO_SIZE(exit_qual); 549 port = VE_GET_PORT_NUM(exit_qual); 550 551 552 if (in) 553 ret = handle_in(regs, size, port); 554 else 555 ret = handle_out(regs, size, port); 556 if (!ret) 557 return -EIO; 558 559 return ve_instr_len(ve); 560 } 561 562 /* 563 * Early #VE exception handler. Only handles a subset of port I/O. 564 * Intended only for earlyprintk. If failed, return false. 565 */ 566 __init bool tdx_early_handle_ve(struct pt_regs *regs) 567 { 568 struct ve_info ve; 569 int insn_len; 570 571 tdx_get_ve_info(&ve); 572 573 if (ve.exit_reason != EXIT_REASON_IO_INSTRUCTION) 574 return false; 575 576 insn_len = handle_io(regs, &ve); 577 if (insn_len < 0) 578 return false; 579 580 regs->ip += insn_len; 581 return true; 582 } 583 584 void tdx_get_ve_info(struct ve_info *ve) 585 { 586 struct tdx_module_output out; 587 588 /* 589 * Called during #VE handling to retrieve the #VE info from the 590 * TDX module. 591 * 592 * This has to be called early in #VE handling. A "nested" #VE which 593 * occurs before this will raise a #DF and is not recoverable. 594 * 595 * The call retrieves the #VE info from the TDX module, which also 596 * clears the "#VE valid" flag. This must be done before anything else 597 * because any #VE that occurs while the valid flag is set will lead to 598 * #DF. 599 * 600 * Note, the TDX module treats virtual NMIs as inhibited if the #VE 601 * valid flag is set. It means that NMI=>#VE will not result in a #DF. 602 */ 603 tdx_module_call(TDX_GET_VEINFO, 0, 0, 0, 0, &out); 604 605 /* Transfer the output parameters */ 606 ve->exit_reason = out.rcx; 607 ve->exit_qual = out.rdx; 608 ve->gla = out.r8; 609 ve->gpa = out.r9; 610 ve->instr_len = lower_32_bits(out.r10); 611 ve->instr_info = upper_32_bits(out.r10); 612 } 613 614 /* 615 * Handle the user initiated #VE. 616 * 617 * On success, returns the number of bytes RIP should be incremented (>=0) 618 * or -errno on error. 619 */ 620 static int virt_exception_user(struct pt_regs *regs, struct ve_info *ve) 621 { 622 switch (ve->exit_reason) { 623 case EXIT_REASON_CPUID: 624 return handle_cpuid(regs, ve); 625 default: 626 pr_warn("Unexpected #VE: %lld\n", ve->exit_reason); 627 return -EIO; 628 } 629 } 630 631 /* 632 * Handle the kernel #VE. 633 * 634 * On success, returns the number of bytes RIP should be incremented (>=0) 635 * or -errno on error. 636 */ 637 static int virt_exception_kernel(struct pt_regs *regs, struct ve_info *ve) 638 { 639 switch (ve->exit_reason) { 640 case EXIT_REASON_HLT: 641 return handle_halt(ve); 642 case EXIT_REASON_MSR_READ: 643 return read_msr(regs, ve); 644 case EXIT_REASON_MSR_WRITE: 645 return write_msr(regs, ve); 646 case EXIT_REASON_CPUID: 647 return handle_cpuid(regs, ve); 648 case EXIT_REASON_EPT_VIOLATION: 649 return handle_mmio(regs, ve); 650 case EXIT_REASON_IO_INSTRUCTION: 651 return handle_io(regs, ve); 652 default: 653 pr_warn("Unexpected #VE: %lld\n", ve->exit_reason); 654 return -EIO; 655 } 656 } 657 658 bool tdx_handle_virt_exception(struct pt_regs *regs, struct ve_info *ve) 659 { 660 int insn_len; 661 662 if (user_mode(regs)) 663 insn_len = virt_exception_user(regs, ve); 664 else 665 insn_len = virt_exception_kernel(regs, ve); 666 if (insn_len < 0) 667 return false; 668 669 /* After successful #VE handling, move the IP */ 670 regs->ip += insn_len; 671 672 return true; 673 } 674 675 static bool tdx_tlb_flush_required(bool private) 676 { 677 /* 678 * TDX guest is responsible for flushing TLB on private->shared 679 * transition. VMM is responsible for flushing on shared->private. 680 * 681 * The VMM _can't_ flush private addresses as it can't generate PAs 682 * with the guest's HKID. Shared memory isn't subject to integrity 683 * checking, i.e. the VMM doesn't need to flush for its own protection. 684 * 685 * There's no need to flush when converting from shared to private, 686 * as flushing is the VMM's responsibility in this case, e.g. it must 687 * flush to avoid integrity failures in the face of a buggy or 688 * malicious guest. 689 */ 690 return !private; 691 } 692 693 static bool tdx_cache_flush_required(void) 694 { 695 /* 696 * AMD SME/SEV can avoid cache flushing if HW enforces cache coherence. 697 * TDX doesn't have such capability. 698 * 699 * Flush cache unconditionally. 700 */ 701 return true; 702 } 703 704 static bool try_accept_one(phys_addr_t *start, unsigned long len, 705 enum pg_level pg_level) 706 { 707 unsigned long accept_size = page_level_size(pg_level); 708 u64 tdcall_rcx; 709 u8 page_size; 710 711 if (!IS_ALIGNED(*start, accept_size)) 712 return false; 713 714 if (len < accept_size) 715 return false; 716 717 /* 718 * Pass the page physical address to the TDX module to accept the 719 * pending, private page. 720 * 721 * Bits 2:0 of RCX encode page size: 0 - 4K, 1 - 2M, 2 - 1G. 722 */ 723 switch (pg_level) { 724 case PG_LEVEL_4K: 725 page_size = 0; 726 break; 727 case PG_LEVEL_2M: 728 page_size = 1; 729 break; 730 case PG_LEVEL_1G: 731 page_size = 2; 732 break; 733 default: 734 return false; 735 } 736 737 tdcall_rcx = *start | page_size; 738 if (__tdx_module_call(TDX_ACCEPT_PAGE, tdcall_rcx, 0, 0, 0, NULL)) 739 return false; 740 741 *start += accept_size; 742 return true; 743 } 744 745 /* 746 * Inform the VMM of the guest's intent for this physical page: shared with 747 * the VMM or private to the guest. The VMM is expected to change its mapping 748 * of the page in response. 749 */ 750 static bool tdx_enc_status_changed(unsigned long vaddr, int numpages, bool enc) 751 { 752 phys_addr_t start = __pa(vaddr); 753 phys_addr_t end = __pa(vaddr + numpages * PAGE_SIZE); 754 755 if (!enc) { 756 /* Set the shared (decrypted) bits: */ 757 start |= cc_mkdec(0); 758 end |= cc_mkdec(0); 759 } 760 761 /* 762 * Notify the VMM about page mapping conversion. More info about ABI 763 * can be found in TDX Guest-Host-Communication Interface (GHCI), 764 * section "TDG.VP.VMCALL<MapGPA>" 765 */ 766 if (_tdx_hypercall(TDVMCALL_MAP_GPA, start, end - start, 0, 0)) 767 return false; 768 769 /* private->shared conversion requires only MapGPA call */ 770 if (!enc) 771 return true; 772 773 /* 774 * For shared->private conversion, accept the page using 775 * TDX_ACCEPT_PAGE TDX module call. 776 */ 777 while (start < end) { 778 unsigned long len = end - start; 779 780 /* 781 * Try larger accepts first. It gives chance to VMM to keep 782 * 1G/2M SEPT entries where possible and speeds up process by 783 * cutting number of hypercalls (if successful). 784 */ 785 786 if (try_accept_one(&start, len, PG_LEVEL_1G)) 787 continue; 788 789 if (try_accept_one(&start, len, PG_LEVEL_2M)) 790 continue; 791 792 if (!try_accept_one(&start, len, PG_LEVEL_4K)) 793 return false; 794 } 795 796 return true; 797 } 798 799 void __init tdx_early_init(void) 800 { 801 u64 cc_mask; 802 u32 eax, sig[3]; 803 804 cpuid_count(TDX_CPUID_LEAF_ID, 0, &eax, &sig[0], &sig[2], &sig[1]); 805 806 if (memcmp(TDX_IDENT, sig, sizeof(sig))) 807 return; 808 809 setup_force_cpu_cap(X86_FEATURE_TDX_GUEST); 810 811 cc_set_vendor(CC_VENDOR_INTEL); 812 tdx_parse_tdinfo(&cc_mask); 813 cc_set_mask(cc_mask); 814 815 /* 816 * All bits above GPA width are reserved and kernel treats shared bit 817 * as flag, not as part of physical address. 818 * 819 * Adjust physical mask to only cover valid GPA bits. 820 */ 821 physical_mask &= cc_mask - 1; 822 823 x86_platform.guest.enc_cache_flush_required = tdx_cache_flush_required; 824 x86_platform.guest.enc_tlb_flush_required = tdx_tlb_flush_required; 825 x86_platform.guest.enc_status_change_finish = tdx_enc_status_changed; 826 827 pr_info("Guest detected\n"); 828 } 829