1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (C) 2021-2022 Intel Corporation */ 3 4 #undef pr_fmt 5 #define pr_fmt(fmt) "tdx: " fmt 6 7 #include <linux/cpufeature.h> 8 #include <linux/export.h> 9 #include <linux/io.h> 10 #include <asm/coco.h> 11 #include <asm/tdx.h> 12 #include <asm/vmx.h> 13 #include <asm/ia32.h> 14 #include <asm/insn.h> 15 #include <asm/insn-eval.h> 16 #include <asm/pgtable.h> 17 #include <asm/traps.h> 18 19 /* MMIO direction */ 20 #define EPT_READ 0 21 #define EPT_WRITE 1 22 23 /* Port I/O direction */ 24 #define PORT_READ 0 25 #define PORT_WRITE 1 26 27 /* See Exit Qualification for I/O Instructions in VMX documentation */ 28 #define VE_IS_IO_IN(e) ((e) & BIT(3)) 29 #define VE_GET_IO_SIZE(e) (((e) & GENMASK(2, 0)) + 1) 30 #define VE_GET_PORT_NUM(e) ((e) >> 16) 31 #define VE_IS_IO_STRING(e) ((e) & BIT(4)) 32 33 #define ATTR_DEBUG BIT(0) 34 #define ATTR_SEPT_VE_DISABLE BIT(28) 35 36 /* TDX Module call error codes */ 37 #define TDCALL_RETURN_CODE(a) ((a) >> 32) 38 #define TDCALL_INVALID_OPERAND 0xc0000100 39 40 #define TDREPORT_SUBTYPE_0 0 41 42 /* Called from __tdx_hypercall() for unrecoverable failure */ 43 noinstr void __tdx_hypercall_failed(void) 44 { 45 instrumentation_begin(); 46 panic("TDVMCALL failed. TDX module bug?"); 47 } 48 49 #ifdef CONFIG_KVM_GUEST 50 long tdx_kvm_hypercall(unsigned int nr, unsigned long p1, unsigned long p2, 51 unsigned long p3, unsigned long p4) 52 { 53 struct tdx_hypercall_args args = { 54 .r10 = nr, 55 .r11 = p1, 56 .r12 = p2, 57 .r13 = p3, 58 .r14 = p4, 59 }; 60 61 return __tdx_hypercall(&args); 62 } 63 EXPORT_SYMBOL_GPL(tdx_kvm_hypercall); 64 #endif 65 66 /* 67 * Used for TDX guests to make calls directly to the TD module. This 68 * should only be used for calls that have no legitimate reason to fail 69 * or where the kernel can not survive the call failing. 70 */ 71 static inline void tdcall(u64 fn, struct tdx_module_args *args) 72 { 73 if (__tdcall_ret(fn, args)) 74 panic("TDCALL %lld failed (Buggy TDX module!)\n", fn); 75 } 76 77 /* Read TD-scoped metadata */ 78 static inline u64 __maybe_unused tdg_vm_rd(u64 field, u64 *value) 79 { 80 struct tdx_module_args args = { 81 .rdx = field, 82 }; 83 u64 ret; 84 85 ret = __tdcall_ret(TDG_VM_RD, &args); 86 *value = args.r8; 87 88 return ret; 89 } 90 91 /* Write TD-scoped metadata */ 92 static inline u64 tdg_vm_wr(u64 field, u64 value, u64 mask) 93 { 94 struct tdx_module_args args = { 95 .rdx = field, 96 .r8 = value, 97 .r9 = mask, 98 }; 99 100 return __tdcall(TDG_VM_WR, &args); 101 } 102 103 /** 104 * tdx_mcall_get_report0() - Wrapper to get TDREPORT0 (a.k.a. TDREPORT 105 * subtype 0) using TDG.MR.REPORT TDCALL. 106 * @reportdata: Address of the input buffer which contains user-defined 107 * REPORTDATA to be included into TDREPORT. 108 * @tdreport: Address of the output buffer to store TDREPORT. 109 * 110 * Refer to section titled "TDG.MR.REPORT leaf" in the TDX Module 111 * v1.0 specification for more information on TDG.MR.REPORT TDCALL. 112 * It is used in the TDX guest driver module to get the TDREPORT0. 113 * 114 * Return 0 on success, -EINVAL for invalid operands, or -EIO on 115 * other TDCALL failures. 116 */ 117 int tdx_mcall_get_report0(u8 *reportdata, u8 *tdreport) 118 { 119 struct tdx_module_args args = { 120 .rcx = virt_to_phys(tdreport), 121 .rdx = virt_to_phys(reportdata), 122 .r8 = TDREPORT_SUBTYPE_0, 123 }; 124 u64 ret; 125 126 ret = __tdcall(TDG_MR_REPORT, &args); 127 if (ret) { 128 if (TDCALL_RETURN_CODE(ret) == TDCALL_INVALID_OPERAND) 129 return -EINVAL; 130 return -EIO; 131 } 132 133 return 0; 134 } 135 EXPORT_SYMBOL_GPL(tdx_mcall_get_report0); 136 137 static void __noreturn tdx_panic(const char *msg) 138 { 139 struct tdx_hypercall_args args = { 140 .r10 = TDX_HYPERCALL_STANDARD, 141 .r11 = TDVMCALL_REPORT_FATAL_ERROR, 142 .r12 = 0, /* Error code: 0 is Panic */ 143 }; 144 union { 145 /* Define register order according to the GHCI */ 146 struct { u64 r14, r15, rbx, rdi, rsi, r8, r9, rdx; }; 147 148 char str[64]; 149 } message; 150 151 /* VMM assumes '\0' in byte 65, if the message took all 64 bytes */ 152 strncpy(message.str, msg, 64); 153 154 args.r8 = message.r8; 155 args.r9 = message.r9; 156 args.r14 = message.r14; 157 args.r15 = message.r15; 158 args.rdi = message.rdi; 159 args.rsi = message.rsi; 160 args.rbx = message.rbx; 161 args.rdx = message.rdx; 162 163 /* 164 * This hypercall should never return and it is not safe 165 * to keep the guest running. Call it forever if it 166 * happens to return. 167 */ 168 while (1) 169 __tdx_hypercall(&args); 170 } 171 172 static void tdx_parse_tdinfo(u64 *cc_mask) 173 { 174 struct tdx_module_args args = {}; 175 unsigned int gpa_width; 176 u64 td_attr; 177 178 /* 179 * TDINFO TDX module call is used to get the TD execution environment 180 * information like GPA width, number of available vcpus, debug mode 181 * information, etc. More details about the ABI can be found in TDX 182 * Guest-Host-Communication Interface (GHCI), section 2.4.2 TDCALL 183 * [TDG.VP.INFO]. 184 */ 185 tdcall(TDG_VP_INFO, &args); 186 187 /* 188 * The highest bit of a guest physical address is the "sharing" bit. 189 * Set it for shared pages and clear it for private pages. 190 * 191 * The GPA width that comes out of this call is critical. TDX guests 192 * can not meaningfully run without it. 193 */ 194 gpa_width = args.rcx & GENMASK(5, 0); 195 *cc_mask = BIT_ULL(gpa_width - 1); 196 197 /* 198 * The kernel can not handle #VE's when accessing normal kernel 199 * memory. Ensure that no #VE will be delivered for accesses to 200 * TD-private memory. Only VMM-shared memory (MMIO) will #VE. 201 */ 202 td_attr = args.rdx; 203 if (!(td_attr & ATTR_SEPT_VE_DISABLE)) { 204 const char *msg = "TD misconfiguration: SEPT_VE_DISABLE attribute must be set."; 205 206 /* Relax SEPT_VE_DISABLE check for debug TD. */ 207 if (td_attr & ATTR_DEBUG) 208 pr_warn("%s\n", msg); 209 else 210 tdx_panic(msg); 211 } 212 } 213 214 /* 215 * The TDX module spec states that #VE may be injected for a limited set of 216 * reasons: 217 * 218 * - Emulation of the architectural #VE injection on EPT violation; 219 * 220 * - As a result of guest TD execution of a disallowed instruction, 221 * a disallowed MSR access, or CPUID virtualization; 222 * 223 * - A notification to the guest TD about anomalous behavior; 224 * 225 * The last one is opt-in and is not used by the kernel. 226 * 227 * The Intel Software Developer's Manual describes cases when instruction 228 * length field can be used in section "Information for VM Exits Due to 229 * Instruction Execution". 230 * 231 * For TDX, it ultimately means GET_VEINFO provides reliable instruction length 232 * information if #VE occurred due to instruction execution, but not for EPT 233 * violations. 234 */ 235 static int ve_instr_len(struct ve_info *ve) 236 { 237 switch (ve->exit_reason) { 238 case EXIT_REASON_HLT: 239 case EXIT_REASON_MSR_READ: 240 case EXIT_REASON_MSR_WRITE: 241 case EXIT_REASON_CPUID: 242 case EXIT_REASON_IO_INSTRUCTION: 243 /* It is safe to use ve->instr_len for #VE due instructions */ 244 return ve->instr_len; 245 case EXIT_REASON_EPT_VIOLATION: 246 /* 247 * For EPT violations, ve->insn_len is not defined. For those, 248 * the kernel must decode instructions manually and should not 249 * be using this function. 250 */ 251 WARN_ONCE(1, "ve->instr_len is not defined for EPT violations"); 252 return 0; 253 default: 254 WARN_ONCE(1, "Unexpected #VE-type: %lld\n", ve->exit_reason); 255 return ve->instr_len; 256 } 257 } 258 259 static u64 __cpuidle __halt(const bool irq_disabled) 260 { 261 struct tdx_hypercall_args args = { 262 .r10 = TDX_HYPERCALL_STANDARD, 263 .r11 = hcall_func(EXIT_REASON_HLT), 264 .r12 = irq_disabled, 265 }; 266 267 /* 268 * Emulate HLT operation via hypercall. More info about ABI 269 * can be found in TDX Guest-Host-Communication Interface 270 * (GHCI), section 3.8 TDG.VP.VMCALL<Instruction.HLT>. 271 * 272 * The VMM uses the "IRQ disabled" param to understand IRQ 273 * enabled status (RFLAGS.IF) of the TD guest and to determine 274 * whether or not it should schedule the halted vCPU if an 275 * IRQ becomes pending. E.g. if IRQs are disabled, the VMM 276 * can keep the vCPU in virtual HLT, even if an IRQ is 277 * pending, without hanging/breaking the guest. 278 */ 279 return __tdx_hypercall(&args); 280 } 281 282 static int handle_halt(struct ve_info *ve) 283 { 284 const bool irq_disabled = irqs_disabled(); 285 286 if (__halt(irq_disabled)) 287 return -EIO; 288 289 return ve_instr_len(ve); 290 } 291 292 void __cpuidle tdx_safe_halt(void) 293 { 294 const bool irq_disabled = false; 295 296 /* 297 * Use WARN_ONCE() to report the failure. 298 */ 299 if (__halt(irq_disabled)) 300 WARN_ONCE(1, "HLT instruction emulation failed\n"); 301 } 302 303 static int read_msr(struct pt_regs *regs, struct ve_info *ve) 304 { 305 struct tdx_hypercall_args args = { 306 .r10 = TDX_HYPERCALL_STANDARD, 307 .r11 = hcall_func(EXIT_REASON_MSR_READ), 308 .r12 = regs->cx, 309 }; 310 311 /* 312 * Emulate the MSR read via hypercall. More info about ABI 313 * can be found in TDX Guest-Host-Communication Interface 314 * (GHCI), section titled "TDG.VP.VMCALL<Instruction.RDMSR>". 315 */ 316 if (__tdx_hypercall_ret(&args)) 317 return -EIO; 318 319 regs->ax = lower_32_bits(args.r11); 320 regs->dx = upper_32_bits(args.r11); 321 return ve_instr_len(ve); 322 } 323 324 static int write_msr(struct pt_regs *regs, struct ve_info *ve) 325 { 326 struct tdx_hypercall_args args = { 327 .r10 = TDX_HYPERCALL_STANDARD, 328 .r11 = hcall_func(EXIT_REASON_MSR_WRITE), 329 .r12 = regs->cx, 330 .r13 = (u64)regs->dx << 32 | regs->ax, 331 }; 332 333 /* 334 * Emulate the MSR write via hypercall. More info about ABI 335 * can be found in TDX Guest-Host-Communication Interface 336 * (GHCI) section titled "TDG.VP.VMCALL<Instruction.WRMSR>". 337 */ 338 if (__tdx_hypercall(&args)) 339 return -EIO; 340 341 return ve_instr_len(ve); 342 } 343 344 static int handle_cpuid(struct pt_regs *regs, struct ve_info *ve) 345 { 346 struct tdx_hypercall_args args = { 347 .r10 = TDX_HYPERCALL_STANDARD, 348 .r11 = hcall_func(EXIT_REASON_CPUID), 349 .r12 = regs->ax, 350 .r13 = regs->cx, 351 }; 352 353 /* 354 * Only allow VMM to control range reserved for hypervisor 355 * communication. 356 * 357 * Return all-zeros for any CPUID outside the range. It matches CPU 358 * behaviour for non-supported leaf. 359 */ 360 if (regs->ax < 0x40000000 || regs->ax > 0x4FFFFFFF) { 361 regs->ax = regs->bx = regs->cx = regs->dx = 0; 362 return ve_instr_len(ve); 363 } 364 365 /* 366 * Emulate the CPUID instruction via a hypercall. More info about 367 * ABI can be found in TDX Guest-Host-Communication Interface 368 * (GHCI), section titled "VP.VMCALL<Instruction.CPUID>". 369 */ 370 if (__tdx_hypercall_ret(&args)) 371 return -EIO; 372 373 /* 374 * As per TDX GHCI CPUID ABI, r12-r15 registers contain contents of 375 * EAX, EBX, ECX, EDX registers after the CPUID instruction execution. 376 * So copy the register contents back to pt_regs. 377 */ 378 regs->ax = args.r12; 379 regs->bx = args.r13; 380 regs->cx = args.r14; 381 regs->dx = args.r15; 382 383 return ve_instr_len(ve); 384 } 385 386 static bool mmio_read(int size, unsigned long addr, unsigned long *val) 387 { 388 struct tdx_hypercall_args args = { 389 .r10 = TDX_HYPERCALL_STANDARD, 390 .r11 = hcall_func(EXIT_REASON_EPT_VIOLATION), 391 .r12 = size, 392 .r13 = EPT_READ, 393 .r14 = addr, 394 }; 395 396 if (__tdx_hypercall_ret(&args)) 397 return false; 398 *val = args.r11; 399 return true; 400 } 401 402 static bool mmio_write(int size, unsigned long addr, unsigned long val) 403 { 404 return !_tdx_hypercall(hcall_func(EXIT_REASON_EPT_VIOLATION), size, 405 EPT_WRITE, addr, val); 406 } 407 408 static int handle_mmio(struct pt_regs *regs, struct ve_info *ve) 409 { 410 unsigned long *reg, val, vaddr; 411 char buffer[MAX_INSN_SIZE]; 412 enum insn_mmio_type mmio; 413 struct insn insn = {}; 414 int size, extend_size; 415 u8 extend_val = 0; 416 417 /* Only in-kernel MMIO is supported */ 418 if (WARN_ON_ONCE(user_mode(regs))) 419 return -EFAULT; 420 421 if (copy_from_kernel_nofault(buffer, (void *)regs->ip, MAX_INSN_SIZE)) 422 return -EFAULT; 423 424 if (insn_decode(&insn, buffer, MAX_INSN_SIZE, INSN_MODE_64)) 425 return -EINVAL; 426 427 mmio = insn_decode_mmio(&insn, &size); 428 if (WARN_ON_ONCE(mmio == INSN_MMIO_DECODE_FAILED)) 429 return -EINVAL; 430 431 if (mmio != INSN_MMIO_WRITE_IMM && mmio != INSN_MMIO_MOVS) { 432 reg = insn_get_modrm_reg_ptr(&insn, regs); 433 if (!reg) 434 return -EINVAL; 435 } 436 437 if (!fault_in_kernel_space(ve->gla)) { 438 WARN_ONCE(1, "Access to userspace address is not supported"); 439 return -EINVAL; 440 } 441 442 /* 443 * Reject EPT violation #VEs that split pages. 444 * 445 * MMIO accesses are supposed to be naturally aligned and therefore 446 * never cross page boundaries. Seeing split page accesses indicates 447 * a bug or a load_unaligned_zeropad() that stepped into an MMIO page. 448 * 449 * load_unaligned_zeropad() will recover using exception fixups. 450 */ 451 vaddr = (unsigned long)insn_get_addr_ref(&insn, regs); 452 if (vaddr / PAGE_SIZE != (vaddr + size - 1) / PAGE_SIZE) 453 return -EFAULT; 454 455 /* Handle writes first */ 456 switch (mmio) { 457 case INSN_MMIO_WRITE: 458 memcpy(&val, reg, size); 459 if (!mmio_write(size, ve->gpa, val)) 460 return -EIO; 461 return insn.length; 462 case INSN_MMIO_WRITE_IMM: 463 val = insn.immediate.value; 464 if (!mmio_write(size, ve->gpa, val)) 465 return -EIO; 466 return insn.length; 467 case INSN_MMIO_READ: 468 case INSN_MMIO_READ_ZERO_EXTEND: 469 case INSN_MMIO_READ_SIGN_EXTEND: 470 /* Reads are handled below */ 471 break; 472 case INSN_MMIO_MOVS: 473 case INSN_MMIO_DECODE_FAILED: 474 /* 475 * MMIO was accessed with an instruction that could not be 476 * decoded or handled properly. It was likely not using io.h 477 * helpers or accessed MMIO accidentally. 478 */ 479 return -EINVAL; 480 default: 481 WARN_ONCE(1, "Unknown insn_decode_mmio() decode value?"); 482 return -EINVAL; 483 } 484 485 /* Handle reads */ 486 if (!mmio_read(size, ve->gpa, &val)) 487 return -EIO; 488 489 switch (mmio) { 490 case INSN_MMIO_READ: 491 /* Zero-extend for 32-bit operation */ 492 extend_size = size == 4 ? sizeof(*reg) : 0; 493 break; 494 case INSN_MMIO_READ_ZERO_EXTEND: 495 /* Zero extend based on operand size */ 496 extend_size = insn.opnd_bytes; 497 break; 498 case INSN_MMIO_READ_SIGN_EXTEND: 499 /* Sign extend based on operand size */ 500 extend_size = insn.opnd_bytes; 501 if (size == 1 && val & BIT(7)) 502 extend_val = 0xFF; 503 else if (size > 1 && val & BIT(15)) 504 extend_val = 0xFF; 505 break; 506 default: 507 /* All other cases has to be covered with the first switch() */ 508 WARN_ON_ONCE(1); 509 return -EINVAL; 510 } 511 512 if (extend_size) 513 memset(reg, extend_val, extend_size); 514 memcpy(reg, &val, size); 515 return insn.length; 516 } 517 518 static bool handle_in(struct pt_regs *regs, int size, int port) 519 { 520 struct tdx_hypercall_args args = { 521 .r10 = TDX_HYPERCALL_STANDARD, 522 .r11 = hcall_func(EXIT_REASON_IO_INSTRUCTION), 523 .r12 = size, 524 .r13 = PORT_READ, 525 .r14 = port, 526 }; 527 u64 mask = GENMASK(BITS_PER_BYTE * size, 0); 528 bool success; 529 530 /* 531 * Emulate the I/O read via hypercall. More info about ABI can be found 532 * in TDX Guest-Host-Communication Interface (GHCI) section titled 533 * "TDG.VP.VMCALL<Instruction.IO>". 534 */ 535 success = !__tdx_hypercall_ret(&args); 536 537 /* Update part of the register affected by the emulated instruction */ 538 regs->ax &= ~mask; 539 if (success) 540 regs->ax |= args.r11 & mask; 541 542 return success; 543 } 544 545 static bool handle_out(struct pt_regs *regs, int size, int port) 546 { 547 u64 mask = GENMASK(BITS_PER_BYTE * size, 0); 548 549 /* 550 * Emulate the I/O write via hypercall. More info about ABI can be found 551 * in TDX Guest-Host-Communication Interface (GHCI) section titled 552 * "TDG.VP.VMCALL<Instruction.IO>". 553 */ 554 return !_tdx_hypercall(hcall_func(EXIT_REASON_IO_INSTRUCTION), size, 555 PORT_WRITE, port, regs->ax & mask); 556 } 557 558 /* 559 * Emulate I/O using hypercall. 560 * 561 * Assumes the IO instruction was using ax, which is enforced 562 * by the standard io.h macros. 563 * 564 * Return True on success or False on failure. 565 */ 566 static int handle_io(struct pt_regs *regs, struct ve_info *ve) 567 { 568 u32 exit_qual = ve->exit_qual; 569 int size, port; 570 bool in, ret; 571 572 if (VE_IS_IO_STRING(exit_qual)) 573 return -EIO; 574 575 in = VE_IS_IO_IN(exit_qual); 576 size = VE_GET_IO_SIZE(exit_qual); 577 port = VE_GET_PORT_NUM(exit_qual); 578 579 580 if (in) 581 ret = handle_in(regs, size, port); 582 else 583 ret = handle_out(regs, size, port); 584 if (!ret) 585 return -EIO; 586 587 return ve_instr_len(ve); 588 } 589 590 /* 591 * Early #VE exception handler. Only handles a subset of port I/O. 592 * Intended only for earlyprintk. If failed, return false. 593 */ 594 __init bool tdx_early_handle_ve(struct pt_regs *regs) 595 { 596 struct ve_info ve; 597 int insn_len; 598 599 tdx_get_ve_info(&ve); 600 601 if (ve.exit_reason != EXIT_REASON_IO_INSTRUCTION) 602 return false; 603 604 insn_len = handle_io(regs, &ve); 605 if (insn_len < 0) 606 return false; 607 608 regs->ip += insn_len; 609 return true; 610 } 611 612 void tdx_get_ve_info(struct ve_info *ve) 613 { 614 struct tdx_module_args args = {}; 615 616 /* 617 * Called during #VE handling to retrieve the #VE info from the 618 * TDX module. 619 * 620 * This has to be called early in #VE handling. A "nested" #VE which 621 * occurs before this will raise a #DF and is not recoverable. 622 * 623 * The call retrieves the #VE info from the TDX module, which also 624 * clears the "#VE valid" flag. This must be done before anything else 625 * because any #VE that occurs while the valid flag is set will lead to 626 * #DF. 627 * 628 * Note, the TDX module treats virtual NMIs as inhibited if the #VE 629 * valid flag is set. It means that NMI=>#VE will not result in a #DF. 630 */ 631 tdcall(TDG_VP_VEINFO_GET, &args); 632 633 /* Transfer the output parameters */ 634 ve->exit_reason = args.rcx; 635 ve->exit_qual = args.rdx; 636 ve->gla = args.r8; 637 ve->gpa = args.r9; 638 ve->instr_len = lower_32_bits(args.r10); 639 ve->instr_info = upper_32_bits(args.r10); 640 } 641 642 /* 643 * Handle the user initiated #VE. 644 * 645 * On success, returns the number of bytes RIP should be incremented (>=0) 646 * or -errno on error. 647 */ 648 static int virt_exception_user(struct pt_regs *regs, struct ve_info *ve) 649 { 650 switch (ve->exit_reason) { 651 case EXIT_REASON_CPUID: 652 return handle_cpuid(regs, ve); 653 default: 654 pr_warn("Unexpected #VE: %lld\n", ve->exit_reason); 655 return -EIO; 656 } 657 } 658 659 static inline bool is_private_gpa(u64 gpa) 660 { 661 return gpa == cc_mkenc(gpa); 662 } 663 664 /* 665 * Handle the kernel #VE. 666 * 667 * On success, returns the number of bytes RIP should be incremented (>=0) 668 * or -errno on error. 669 */ 670 static int virt_exception_kernel(struct pt_regs *regs, struct ve_info *ve) 671 { 672 switch (ve->exit_reason) { 673 case EXIT_REASON_HLT: 674 return handle_halt(ve); 675 case EXIT_REASON_MSR_READ: 676 return read_msr(regs, ve); 677 case EXIT_REASON_MSR_WRITE: 678 return write_msr(regs, ve); 679 case EXIT_REASON_CPUID: 680 return handle_cpuid(regs, ve); 681 case EXIT_REASON_EPT_VIOLATION: 682 if (is_private_gpa(ve->gpa)) 683 panic("Unexpected EPT-violation on private memory."); 684 return handle_mmio(regs, ve); 685 case EXIT_REASON_IO_INSTRUCTION: 686 return handle_io(regs, ve); 687 default: 688 pr_warn("Unexpected #VE: %lld\n", ve->exit_reason); 689 return -EIO; 690 } 691 } 692 693 bool tdx_handle_virt_exception(struct pt_regs *regs, struct ve_info *ve) 694 { 695 int insn_len; 696 697 if (user_mode(regs)) 698 insn_len = virt_exception_user(regs, ve); 699 else 700 insn_len = virt_exception_kernel(regs, ve); 701 if (insn_len < 0) 702 return false; 703 704 /* After successful #VE handling, move the IP */ 705 regs->ip += insn_len; 706 707 return true; 708 } 709 710 static bool tdx_tlb_flush_required(bool private) 711 { 712 /* 713 * TDX guest is responsible for flushing TLB on private->shared 714 * transition. VMM is responsible for flushing on shared->private. 715 * 716 * The VMM _can't_ flush private addresses as it can't generate PAs 717 * with the guest's HKID. Shared memory isn't subject to integrity 718 * checking, i.e. the VMM doesn't need to flush for its own protection. 719 * 720 * There's no need to flush when converting from shared to private, 721 * as flushing is the VMM's responsibility in this case, e.g. it must 722 * flush to avoid integrity failures in the face of a buggy or 723 * malicious guest. 724 */ 725 return !private; 726 } 727 728 static bool tdx_cache_flush_required(void) 729 { 730 /* 731 * AMD SME/SEV can avoid cache flushing if HW enforces cache coherence. 732 * TDX doesn't have such capability. 733 * 734 * Flush cache unconditionally. 735 */ 736 return true; 737 } 738 739 /* 740 * Inform the VMM of the guest's intent for this physical page: shared with 741 * the VMM or private to the guest. The VMM is expected to change its mapping 742 * of the page in response. 743 */ 744 static bool tdx_enc_status_changed(unsigned long vaddr, int numpages, bool enc) 745 { 746 phys_addr_t start = __pa(vaddr); 747 phys_addr_t end = __pa(vaddr + numpages * PAGE_SIZE); 748 749 if (!enc) { 750 /* Set the shared (decrypted) bits: */ 751 start |= cc_mkdec(0); 752 end |= cc_mkdec(0); 753 } 754 755 /* 756 * Notify the VMM about page mapping conversion. More info about ABI 757 * can be found in TDX Guest-Host-Communication Interface (GHCI), 758 * section "TDG.VP.VMCALL<MapGPA>" 759 */ 760 if (_tdx_hypercall(TDVMCALL_MAP_GPA, start, end - start, 0, 0)) 761 return false; 762 763 /* shared->private conversion requires memory to be accepted before use */ 764 if (enc) 765 return tdx_accept_memory(start, end); 766 767 return true; 768 } 769 770 static bool tdx_enc_status_change_prepare(unsigned long vaddr, int numpages, 771 bool enc) 772 { 773 /* 774 * Only handle shared->private conversion here. 775 * See the comment in tdx_early_init(). 776 */ 777 if (enc) 778 return tdx_enc_status_changed(vaddr, numpages, enc); 779 return true; 780 } 781 782 static bool tdx_enc_status_change_finish(unsigned long vaddr, int numpages, 783 bool enc) 784 { 785 /* 786 * Only handle private->shared conversion here. 787 * See the comment in tdx_early_init(). 788 */ 789 if (!enc) 790 return tdx_enc_status_changed(vaddr, numpages, enc); 791 return true; 792 } 793 794 void __init tdx_early_init(void) 795 { 796 u64 cc_mask; 797 u32 eax, sig[3]; 798 799 cpuid_count(TDX_CPUID_LEAF_ID, 0, &eax, &sig[0], &sig[2], &sig[1]); 800 801 if (memcmp(TDX_IDENT, sig, sizeof(sig))) 802 return; 803 804 setup_force_cpu_cap(X86_FEATURE_TDX_GUEST); 805 806 cc_vendor = CC_VENDOR_INTEL; 807 tdx_parse_tdinfo(&cc_mask); 808 cc_set_mask(cc_mask); 809 810 /* Kernel does not use NOTIFY_ENABLES and does not need random #VEs */ 811 tdg_vm_wr(TDCS_NOTIFY_ENABLES, 0, -1ULL); 812 813 /* 814 * All bits above GPA width are reserved and kernel treats shared bit 815 * as flag, not as part of physical address. 816 * 817 * Adjust physical mask to only cover valid GPA bits. 818 */ 819 physical_mask &= cc_mask - 1; 820 821 /* 822 * The kernel mapping should match the TDX metadata for the page. 823 * load_unaligned_zeropad() can touch memory *adjacent* to that which is 824 * owned by the caller and can catch even _momentary_ mismatches. Bad 825 * things happen on mismatch: 826 * 827 * - Private mapping => Shared Page == Guest shutdown 828 * - Shared mapping => Private Page == Recoverable #VE 829 * 830 * guest.enc_status_change_prepare() converts the page from 831 * shared=>private before the mapping becomes private. 832 * 833 * guest.enc_status_change_finish() converts the page from 834 * private=>shared after the mapping becomes private. 835 * 836 * In both cases there is a temporary shared mapping to a private page, 837 * which can result in a #VE. But, there is never a private mapping to 838 * a shared page. 839 */ 840 x86_platform.guest.enc_status_change_prepare = tdx_enc_status_change_prepare; 841 x86_platform.guest.enc_status_change_finish = tdx_enc_status_change_finish; 842 843 x86_platform.guest.enc_cache_flush_required = tdx_cache_flush_required; 844 x86_platform.guest.enc_tlb_flush_required = tdx_tlb_flush_required; 845 846 /* 847 * TDX intercepts the RDMSR to read the X2APIC ID in the parallel 848 * bringup low level code. That raises #VE which cannot be handled 849 * there. 850 * 851 * Intel-TDX has a secure RDMSR hypercall, but that needs to be 852 * implemented seperately in the low level startup ASM code. 853 * Until that is in place, disable parallel bringup for TDX. 854 */ 855 x86_cpuinit.parallel_bringup = false; 856 857 pr_info("Guest detected\n"); 858 } 859