1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2021-2022 Intel Corporation */
3
4 #undef pr_fmt
5 #define pr_fmt(fmt) "tdx: " fmt
6
7 #include <linux/cpufeature.h>
8 #include <linux/export.h>
9 #include <linux/io.h>
10 #include <asm/coco.h>
11 #include <asm/tdx.h>
12 #include <asm/vmx.h>
13 #include <asm/ia32.h>
14 #include <asm/insn.h>
15 #include <asm/insn-eval.h>
16 #include <asm/paravirt_types.h>
17 #include <asm/pgtable.h>
18 #include <asm/traps.h>
19
20 /* MMIO direction */
21 #define EPT_READ 0
22 #define EPT_WRITE 1
23
24 /* Port I/O direction */
25 #define PORT_READ 0
26 #define PORT_WRITE 1
27
28 /* See Exit Qualification for I/O Instructions in VMX documentation */
29 #define VE_IS_IO_IN(e) ((e) & BIT(3))
30 #define VE_GET_IO_SIZE(e) (((e) & GENMASK(2, 0)) + 1)
31 #define VE_GET_PORT_NUM(e) ((e) >> 16)
32 #define VE_IS_IO_STRING(e) ((e) & BIT(4))
33
34 #define ATTR_DEBUG BIT(0)
35 #define ATTR_SEPT_VE_DISABLE BIT(28)
36
37 /* TDX Module call error codes */
38 #define TDCALL_RETURN_CODE(a) ((a) >> 32)
39 #define TDCALL_INVALID_OPERAND 0xc0000100
40
41 #define TDREPORT_SUBTYPE_0 0
42
43 /* Called from __tdx_hypercall() for unrecoverable failure */
__tdx_hypercall_failed(void)44 noinstr void __tdx_hypercall_failed(void)
45 {
46 instrumentation_begin();
47 panic("TDVMCALL failed. TDX module bug?");
48 }
49
50 #ifdef CONFIG_KVM_GUEST
tdx_kvm_hypercall(unsigned int nr,unsigned long p1,unsigned long p2,unsigned long p3,unsigned long p4)51 long tdx_kvm_hypercall(unsigned int nr, unsigned long p1, unsigned long p2,
52 unsigned long p3, unsigned long p4)
53 {
54 struct tdx_hypercall_args args = {
55 .r10 = nr,
56 .r11 = p1,
57 .r12 = p2,
58 .r13 = p3,
59 .r14 = p4,
60 };
61
62 return __tdx_hypercall(&args);
63 }
64 EXPORT_SYMBOL_GPL(tdx_kvm_hypercall);
65 #endif
66
67 /*
68 * Used for TDX guests to make calls directly to the TD module. This
69 * should only be used for calls that have no legitimate reason to fail
70 * or where the kernel can not survive the call failing.
71 */
tdcall(u64 fn,struct tdx_module_args * args)72 static inline void tdcall(u64 fn, struct tdx_module_args *args)
73 {
74 if (__tdcall_ret(fn, args))
75 panic("TDCALL %lld failed (Buggy TDX module!)\n", fn);
76 }
77
78 /* Read TD-scoped metadata */
tdg_vm_rd(u64 field,u64 * value)79 static inline u64 tdg_vm_rd(u64 field, u64 *value)
80 {
81 struct tdx_module_args args = {
82 .rdx = field,
83 };
84 u64 ret;
85
86 ret = __tdcall_ret(TDG_VM_RD, &args);
87 *value = args.r8;
88
89 return ret;
90 }
91
92 /* Write TD-scoped metadata */
tdg_vm_wr(u64 field,u64 value,u64 mask)93 static inline u64 tdg_vm_wr(u64 field, u64 value, u64 mask)
94 {
95 struct tdx_module_args args = {
96 .rdx = field,
97 .r8 = value,
98 .r9 = mask,
99 };
100
101 return __tdcall(TDG_VM_WR, &args);
102 }
103
104 /**
105 * tdx_mcall_get_report0() - Wrapper to get TDREPORT0 (a.k.a. TDREPORT
106 * subtype 0) using TDG.MR.REPORT TDCALL.
107 * @reportdata: Address of the input buffer which contains user-defined
108 * REPORTDATA to be included into TDREPORT.
109 * @tdreport: Address of the output buffer to store TDREPORT.
110 *
111 * Refer to section titled "TDG.MR.REPORT leaf" in the TDX Module
112 * v1.0 specification for more information on TDG.MR.REPORT TDCALL.
113 * It is used in the TDX guest driver module to get the TDREPORT0.
114 *
115 * Return 0 on success, -EINVAL for invalid operands, or -EIO on
116 * other TDCALL failures.
117 */
tdx_mcall_get_report0(u8 * reportdata,u8 * tdreport)118 int tdx_mcall_get_report0(u8 *reportdata, u8 *tdreport)
119 {
120 struct tdx_module_args args = {
121 .rcx = virt_to_phys(tdreport),
122 .rdx = virt_to_phys(reportdata),
123 .r8 = TDREPORT_SUBTYPE_0,
124 };
125 u64 ret;
126
127 ret = __tdcall(TDG_MR_REPORT, &args);
128 if (ret) {
129 if (TDCALL_RETURN_CODE(ret) == TDCALL_INVALID_OPERAND)
130 return -EINVAL;
131 return -EIO;
132 }
133
134 return 0;
135 }
136 EXPORT_SYMBOL_GPL(tdx_mcall_get_report0);
137
tdx_panic(const char * msg)138 static void __noreturn tdx_panic(const char *msg)
139 {
140 struct tdx_hypercall_args args = {
141 .r10 = TDX_HYPERCALL_STANDARD,
142 .r11 = TDVMCALL_REPORT_FATAL_ERROR,
143 .r12 = 0, /* Error code: 0 is Panic */
144 };
145 union {
146 /* Define register order according to the GHCI */
147 struct { u64 r14, r15, rbx, rdi, rsi, r8, r9, rdx; };
148
149 char str[64];
150 } message;
151
152 /* VMM assumes '\0' in byte 65, if the message took all 64 bytes */
153 strncpy(message.str, msg, 64);
154
155 args.r8 = message.r8;
156 args.r9 = message.r9;
157 args.r14 = message.r14;
158 args.r15 = message.r15;
159 args.rdi = message.rdi;
160 args.rsi = message.rsi;
161 args.rbx = message.rbx;
162 args.rdx = message.rdx;
163
164 /*
165 * This hypercall should never return and it is not safe
166 * to keep the guest running. Call it forever if it
167 * happens to return.
168 */
169 while (1)
170 __tdx_hypercall(&args);
171 }
172
173 /*
174 * The kernel cannot handle #VEs when accessing normal kernel memory. Ensure
175 * that no #VE will be delivered for accesses to TD-private memory.
176 *
177 * TDX 1.0 does not allow the guest to disable SEPT #VE on its own. The VMM
178 * controls if the guest will receive such #VE with TD attribute
179 * ATTR_SEPT_VE_DISABLE.
180 *
181 * Newer TDX modules allow the guest to control if it wants to receive SEPT
182 * violation #VEs.
183 *
184 * Check if the feature is available and disable SEPT #VE if possible.
185 *
186 * If the TD is allowed to disable/enable SEPT #VEs, the ATTR_SEPT_VE_DISABLE
187 * attribute is no longer reliable. It reflects the initial state of the
188 * control for the TD, but it will not be updated if someone (e.g. bootloader)
189 * changes it before the kernel starts. Kernel must check TDCS_TD_CTLS bit to
190 * determine if SEPT #VEs are enabled or disabled.
191 */
disable_sept_ve(u64 td_attr)192 static void disable_sept_ve(u64 td_attr)
193 {
194 const char *msg = "TD misconfiguration: SEPT #VE has to be disabled";
195 bool debug = td_attr & ATTR_DEBUG;
196 u64 config, controls;
197
198 /* Is this TD allowed to disable SEPT #VE */
199 tdg_vm_rd(TDCS_CONFIG_FLAGS, &config);
200 if (!(config & TDCS_CONFIG_FLEXIBLE_PENDING_VE)) {
201 /* No SEPT #VE controls for the guest: check the attribute */
202 if (td_attr & ATTR_SEPT_VE_DISABLE)
203 return;
204
205 /* Relax SEPT_VE_DISABLE check for debug TD for backtraces */
206 if (debug)
207 pr_warn("%s\n", msg);
208 else
209 tdx_panic(msg);
210 return;
211 }
212
213 /* Check if SEPT #VE has been disabled before us */
214 tdg_vm_rd(TDCS_TD_CTLS, &controls);
215 if (controls & TD_CTLS_PENDING_VE_DISABLE)
216 return;
217
218 /* Keep #VEs enabled for splats in debugging environments */
219 if (debug)
220 return;
221
222 /* Disable SEPT #VEs */
223 tdg_vm_wr(TDCS_TD_CTLS, TD_CTLS_PENDING_VE_DISABLE,
224 TD_CTLS_PENDING_VE_DISABLE);
225 }
226
tdx_setup(u64 * cc_mask)227 static void tdx_setup(u64 *cc_mask)
228 {
229 struct tdx_module_args args = {};
230 unsigned int gpa_width;
231 u64 td_attr;
232
233 /*
234 * TDINFO TDX module call is used to get the TD execution environment
235 * information like GPA width, number of available vcpus, debug mode
236 * information, etc. More details about the ABI can be found in TDX
237 * Guest-Host-Communication Interface (GHCI), section 2.4.2 TDCALL
238 * [TDG.VP.INFO].
239 */
240 tdcall(TDG_VP_INFO, &args);
241
242 /*
243 * The highest bit of a guest physical address is the "sharing" bit.
244 * Set it for shared pages and clear it for private pages.
245 *
246 * The GPA width that comes out of this call is critical. TDX guests
247 * can not meaningfully run without it.
248 */
249 gpa_width = args.rcx & GENMASK(5, 0);
250 *cc_mask = BIT_ULL(gpa_width - 1);
251
252 td_attr = args.rdx;
253
254 /* Kernel does not use NOTIFY_ENABLES and does not need random #VEs */
255 tdg_vm_wr(TDCS_NOTIFY_ENABLES, 0, -1ULL);
256
257 disable_sept_ve(td_attr);
258 }
259
260 /*
261 * The TDX module spec states that #VE may be injected for a limited set of
262 * reasons:
263 *
264 * - Emulation of the architectural #VE injection on EPT violation;
265 *
266 * - As a result of guest TD execution of a disallowed instruction,
267 * a disallowed MSR access, or CPUID virtualization;
268 *
269 * - A notification to the guest TD about anomalous behavior;
270 *
271 * The last one is opt-in and is not used by the kernel.
272 *
273 * The Intel Software Developer's Manual describes cases when instruction
274 * length field can be used in section "Information for VM Exits Due to
275 * Instruction Execution".
276 *
277 * For TDX, it ultimately means GET_VEINFO provides reliable instruction length
278 * information if #VE occurred due to instruction execution, but not for EPT
279 * violations.
280 */
ve_instr_len(struct ve_info * ve)281 static int ve_instr_len(struct ve_info *ve)
282 {
283 switch (ve->exit_reason) {
284 case EXIT_REASON_HLT:
285 case EXIT_REASON_MSR_READ:
286 case EXIT_REASON_MSR_WRITE:
287 case EXIT_REASON_CPUID:
288 case EXIT_REASON_IO_INSTRUCTION:
289 /* It is safe to use ve->instr_len for #VE due instructions */
290 return ve->instr_len;
291 case EXIT_REASON_EPT_VIOLATION:
292 /*
293 * For EPT violations, ve->insn_len is not defined. For those,
294 * the kernel must decode instructions manually and should not
295 * be using this function.
296 */
297 WARN_ONCE(1, "ve->instr_len is not defined for EPT violations");
298 return 0;
299 default:
300 WARN_ONCE(1, "Unexpected #VE-type: %lld\n", ve->exit_reason);
301 return ve->instr_len;
302 }
303 }
304
__halt(const bool irq_disabled)305 static u64 __cpuidle __halt(const bool irq_disabled)
306 {
307 struct tdx_hypercall_args args = {
308 .r10 = TDX_HYPERCALL_STANDARD,
309 .r11 = hcall_func(EXIT_REASON_HLT),
310 .r12 = irq_disabled,
311 };
312
313 /*
314 * Emulate HLT operation via hypercall. More info about ABI
315 * can be found in TDX Guest-Host-Communication Interface
316 * (GHCI), section 3.8 TDG.VP.VMCALL<Instruction.HLT>.
317 *
318 * The VMM uses the "IRQ disabled" param to understand IRQ
319 * enabled status (RFLAGS.IF) of the TD guest and to determine
320 * whether or not it should schedule the halted vCPU if an
321 * IRQ becomes pending. E.g. if IRQs are disabled, the VMM
322 * can keep the vCPU in virtual HLT, even if an IRQ is
323 * pending, without hanging/breaking the guest.
324 */
325 return __tdx_hypercall(&args);
326 }
327
handle_halt(struct ve_info * ve)328 static int handle_halt(struct ve_info *ve)
329 {
330 const bool irq_disabled = irqs_disabled();
331
332 if (__halt(irq_disabled))
333 return -EIO;
334
335 return ve_instr_len(ve);
336 }
337
tdx_halt(void)338 void __cpuidle tdx_halt(void)
339 {
340 const bool irq_disabled = false;
341
342 /*
343 * Use WARN_ONCE() to report the failure.
344 */
345 if (__halt(irq_disabled))
346 WARN_ONCE(1, "HLT instruction emulation failed\n");
347 }
348
tdx_safe_halt(void)349 static void __cpuidle tdx_safe_halt(void)
350 {
351 tdx_halt();
352 /*
353 * "__cpuidle" section doesn't support instrumentation, so stick
354 * with raw_* variant that avoids tracing hooks.
355 */
356 raw_local_irq_enable();
357 }
358
read_msr(struct pt_regs * regs,struct ve_info * ve)359 static int read_msr(struct pt_regs *regs, struct ve_info *ve)
360 {
361 struct tdx_hypercall_args args = {
362 .r10 = TDX_HYPERCALL_STANDARD,
363 .r11 = hcall_func(EXIT_REASON_MSR_READ),
364 .r12 = regs->cx,
365 };
366
367 /*
368 * Emulate the MSR read via hypercall. More info about ABI
369 * can be found in TDX Guest-Host-Communication Interface
370 * (GHCI), section titled "TDG.VP.VMCALL<Instruction.RDMSR>".
371 */
372 if (__tdx_hypercall_ret(&args))
373 return -EIO;
374
375 regs->ax = lower_32_bits(args.r11);
376 regs->dx = upper_32_bits(args.r11);
377 return ve_instr_len(ve);
378 }
379
write_msr(struct pt_regs * regs,struct ve_info * ve)380 static int write_msr(struct pt_regs *regs, struct ve_info *ve)
381 {
382 struct tdx_hypercall_args args = {
383 .r10 = TDX_HYPERCALL_STANDARD,
384 .r11 = hcall_func(EXIT_REASON_MSR_WRITE),
385 .r12 = regs->cx,
386 .r13 = (u64)regs->dx << 32 | regs->ax,
387 };
388
389 /*
390 * Emulate the MSR write via hypercall. More info about ABI
391 * can be found in TDX Guest-Host-Communication Interface
392 * (GHCI) section titled "TDG.VP.VMCALL<Instruction.WRMSR>".
393 */
394 if (__tdx_hypercall(&args))
395 return -EIO;
396
397 return ve_instr_len(ve);
398 }
399
handle_cpuid(struct pt_regs * regs,struct ve_info * ve)400 static int handle_cpuid(struct pt_regs *regs, struct ve_info *ve)
401 {
402 struct tdx_hypercall_args args = {
403 .r10 = TDX_HYPERCALL_STANDARD,
404 .r11 = hcall_func(EXIT_REASON_CPUID),
405 .r12 = regs->ax,
406 .r13 = regs->cx,
407 };
408
409 /*
410 * Only allow VMM to control range reserved for hypervisor
411 * communication.
412 *
413 * Return all-zeros for any CPUID outside the range. It matches CPU
414 * behaviour for non-supported leaf.
415 */
416 if (regs->ax < 0x40000000 || regs->ax > 0x4FFFFFFF) {
417 regs->ax = regs->bx = regs->cx = regs->dx = 0;
418 return ve_instr_len(ve);
419 }
420
421 /*
422 * Emulate the CPUID instruction via a hypercall. More info about
423 * ABI can be found in TDX Guest-Host-Communication Interface
424 * (GHCI), section titled "VP.VMCALL<Instruction.CPUID>".
425 */
426 if (__tdx_hypercall_ret(&args))
427 return -EIO;
428
429 /*
430 * As per TDX GHCI CPUID ABI, r12-r15 registers contain contents of
431 * EAX, EBX, ECX, EDX registers after the CPUID instruction execution.
432 * So copy the register contents back to pt_regs.
433 */
434 regs->ax = args.r12;
435 regs->bx = args.r13;
436 regs->cx = args.r14;
437 regs->dx = args.r15;
438
439 return ve_instr_len(ve);
440 }
441
mmio_read(int size,unsigned long addr,unsigned long * val)442 static bool mmio_read(int size, unsigned long addr, unsigned long *val)
443 {
444 struct tdx_hypercall_args args = {
445 .r10 = TDX_HYPERCALL_STANDARD,
446 .r11 = hcall_func(EXIT_REASON_EPT_VIOLATION),
447 .r12 = size,
448 .r13 = EPT_READ,
449 .r14 = addr,
450 };
451
452 if (__tdx_hypercall_ret(&args))
453 return false;
454 *val = args.r11;
455 return true;
456 }
457
mmio_write(int size,unsigned long addr,unsigned long val)458 static bool mmio_write(int size, unsigned long addr, unsigned long val)
459 {
460 return !_tdx_hypercall(hcall_func(EXIT_REASON_EPT_VIOLATION), size,
461 EPT_WRITE, addr, val);
462 }
463
handle_mmio(struct pt_regs * regs,struct ve_info * ve)464 static int handle_mmio(struct pt_regs *regs, struct ve_info *ve)
465 {
466 unsigned long *reg, val, vaddr;
467 char buffer[MAX_INSN_SIZE];
468 enum insn_mmio_type mmio;
469 struct insn insn = {};
470 int size, extend_size;
471 u8 extend_val = 0;
472
473 /* Only in-kernel MMIO is supported */
474 if (WARN_ON_ONCE(user_mode(regs)))
475 return -EFAULT;
476
477 if (copy_from_kernel_nofault(buffer, (void *)regs->ip, MAX_INSN_SIZE))
478 return -EFAULT;
479
480 if (insn_decode(&insn, buffer, MAX_INSN_SIZE, INSN_MODE_64))
481 return -EINVAL;
482
483 mmio = insn_decode_mmio(&insn, &size);
484 if (WARN_ON_ONCE(mmio == INSN_MMIO_DECODE_FAILED))
485 return -EINVAL;
486
487 if (mmio != INSN_MMIO_WRITE_IMM && mmio != INSN_MMIO_MOVS) {
488 reg = insn_get_modrm_reg_ptr(&insn, regs);
489 if (!reg)
490 return -EINVAL;
491 }
492
493 if (!fault_in_kernel_space(ve->gla)) {
494 WARN_ONCE(1, "Access to userspace address is not supported");
495 return -EINVAL;
496 }
497
498 /*
499 * Reject EPT violation #VEs that split pages.
500 *
501 * MMIO accesses are supposed to be naturally aligned and therefore
502 * never cross page boundaries. Seeing split page accesses indicates
503 * a bug or a load_unaligned_zeropad() that stepped into an MMIO page.
504 *
505 * load_unaligned_zeropad() will recover using exception fixups.
506 */
507 vaddr = (unsigned long)insn_get_addr_ref(&insn, regs);
508 if (vaddr / PAGE_SIZE != (vaddr + size - 1) / PAGE_SIZE)
509 return -EFAULT;
510
511 /* Handle writes first */
512 switch (mmio) {
513 case INSN_MMIO_WRITE:
514 memcpy(&val, reg, size);
515 if (!mmio_write(size, ve->gpa, val))
516 return -EIO;
517 return insn.length;
518 case INSN_MMIO_WRITE_IMM:
519 val = insn.immediate.value;
520 if (!mmio_write(size, ve->gpa, val))
521 return -EIO;
522 return insn.length;
523 case INSN_MMIO_READ:
524 case INSN_MMIO_READ_ZERO_EXTEND:
525 case INSN_MMIO_READ_SIGN_EXTEND:
526 /* Reads are handled below */
527 break;
528 case INSN_MMIO_MOVS:
529 case INSN_MMIO_DECODE_FAILED:
530 /*
531 * MMIO was accessed with an instruction that could not be
532 * decoded or handled properly. It was likely not using io.h
533 * helpers or accessed MMIO accidentally.
534 */
535 return -EINVAL;
536 default:
537 WARN_ONCE(1, "Unknown insn_decode_mmio() decode value?");
538 return -EINVAL;
539 }
540
541 /* Handle reads */
542 if (!mmio_read(size, ve->gpa, &val))
543 return -EIO;
544
545 switch (mmio) {
546 case INSN_MMIO_READ:
547 /* Zero-extend for 32-bit operation */
548 extend_size = size == 4 ? sizeof(*reg) : 0;
549 break;
550 case INSN_MMIO_READ_ZERO_EXTEND:
551 /* Zero extend based on operand size */
552 extend_size = insn.opnd_bytes;
553 break;
554 case INSN_MMIO_READ_SIGN_EXTEND:
555 /* Sign extend based on operand size */
556 extend_size = insn.opnd_bytes;
557 if (size == 1 && val & BIT(7))
558 extend_val = 0xFF;
559 else if (size > 1 && val & BIT(15))
560 extend_val = 0xFF;
561 break;
562 default:
563 /* All other cases has to be covered with the first switch() */
564 WARN_ON_ONCE(1);
565 return -EINVAL;
566 }
567
568 if (extend_size)
569 memset(reg, extend_val, extend_size);
570 memcpy(reg, &val, size);
571 return insn.length;
572 }
573
handle_in(struct pt_regs * regs,int size,int port)574 static bool handle_in(struct pt_regs *regs, int size, int port)
575 {
576 struct tdx_hypercall_args args = {
577 .r10 = TDX_HYPERCALL_STANDARD,
578 .r11 = hcall_func(EXIT_REASON_IO_INSTRUCTION),
579 .r12 = size,
580 .r13 = PORT_READ,
581 .r14 = port,
582 };
583 u64 mask = GENMASK(BITS_PER_BYTE * size, 0);
584 bool success;
585
586 /*
587 * Emulate the I/O read via hypercall. More info about ABI can be found
588 * in TDX Guest-Host-Communication Interface (GHCI) section titled
589 * "TDG.VP.VMCALL<Instruction.IO>".
590 */
591 success = !__tdx_hypercall_ret(&args);
592
593 /* Update part of the register affected by the emulated instruction */
594 regs->ax &= ~mask;
595 if (success)
596 regs->ax |= args.r11 & mask;
597
598 return success;
599 }
600
handle_out(struct pt_regs * regs,int size,int port)601 static bool handle_out(struct pt_regs *regs, int size, int port)
602 {
603 u64 mask = GENMASK(BITS_PER_BYTE * size, 0);
604
605 /*
606 * Emulate the I/O write via hypercall. More info about ABI can be found
607 * in TDX Guest-Host-Communication Interface (GHCI) section titled
608 * "TDG.VP.VMCALL<Instruction.IO>".
609 */
610 return !_tdx_hypercall(hcall_func(EXIT_REASON_IO_INSTRUCTION), size,
611 PORT_WRITE, port, regs->ax & mask);
612 }
613
614 /*
615 * Emulate I/O using hypercall.
616 *
617 * Assumes the IO instruction was using ax, which is enforced
618 * by the standard io.h macros.
619 *
620 * Return True on success or False on failure.
621 */
handle_io(struct pt_regs * regs,struct ve_info * ve)622 static int handle_io(struct pt_regs *regs, struct ve_info *ve)
623 {
624 u32 exit_qual = ve->exit_qual;
625 int size, port;
626 bool in, ret;
627
628 if (VE_IS_IO_STRING(exit_qual))
629 return -EIO;
630
631 in = VE_IS_IO_IN(exit_qual);
632 size = VE_GET_IO_SIZE(exit_qual);
633 port = VE_GET_PORT_NUM(exit_qual);
634
635
636 if (in)
637 ret = handle_in(regs, size, port);
638 else
639 ret = handle_out(regs, size, port);
640 if (!ret)
641 return -EIO;
642
643 return ve_instr_len(ve);
644 }
645
646 /*
647 * Early #VE exception handler. Only handles a subset of port I/O.
648 * Intended only for earlyprintk. If failed, return false.
649 */
tdx_early_handle_ve(struct pt_regs * regs)650 __init bool tdx_early_handle_ve(struct pt_regs *regs)
651 {
652 struct ve_info ve;
653 int insn_len;
654
655 tdx_get_ve_info(&ve);
656
657 if (ve.exit_reason != EXIT_REASON_IO_INSTRUCTION)
658 return false;
659
660 insn_len = handle_io(regs, &ve);
661 if (insn_len < 0)
662 return false;
663
664 regs->ip += insn_len;
665 return true;
666 }
667
tdx_get_ve_info(struct ve_info * ve)668 void tdx_get_ve_info(struct ve_info *ve)
669 {
670 struct tdx_module_args args = {};
671
672 /*
673 * Called during #VE handling to retrieve the #VE info from the
674 * TDX module.
675 *
676 * This has to be called early in #VE handling. A "nested" #VE which
677 * occurs before this will raise a #DF and is not recoverable.
678 *
679 * The call retrieves the #VE info from the TDX module, which also
680 * clears the "#VE valid" flag. This must be done before anything else
681 * because any #VE that occurs while the valid flag is set will lead to
682 * #DF.
683 *
684 * Note, the TDX module treats virtual NMIs as inhibited if the #VE
685 * valid flag is set. It means that NMI=>#VE will not result in a #DF.
686 */
687 tdcall(TDG_VP_VEINFO_GET, &args);
688
689 /* Transfer the output parameters */
690 ve->exit_reason = args.rcx;
691 ve->exit_qual = args.rdx;
692 ve->gla = args.r8;
693 ve->gpa = args.r9;
694 ve->instr_len = lower_32_bits(args.r10);
695 ve->instr_info = upper_32_bits(args.r10);
696 }
697
698 /*
699 * Handle the user initiated #VE.
700 *
701 * On success, returns the number of bytes RIP should be incremented (>=0)
702 * or -errno on error.
703 */
virt_exception_user(struct pt_regs * regs,struct ve_info * ve)704 static int virt_exception_user(struct pt_regs *regs, struct ve_info *ve)
705 {
706 switch (ve->exit_reason) {
707 case EXIT_REASON_CPUID:
708 return handle_cpuid(regs, ve);
709 default:
710 pr_warn("Unexpected #VE: %lld\n", ve->exit_reason);
711 return -EIO;
712 }
713 }
714
is_private_gpa(u64 gpa)715 static inline bool is_private_gpa(u64 gpa)
716 {
717 return gpa == cc_mkenc(gpa);
718 }
719
720 /*
721 * Handle the kernel #VE.
722 *
723 * On success, returns the number of bytes RIP should be incremented (>=0)
724 * or -errno on error.
725 */
virt_exception_kernel(struct pt_regs * regs,struct ve_info * ve)726 static int virt_exception_kernel(struct pt_regs *regs, struct ve_info *ve)
727 {
728 switch (ve->exit_reason) {
729 case EXIT_REASON_HLT:
730 return handle_halt(ve);
731 case EXIT_REASON_MSR_READ:
732 return read_msr(regs, ve);
733 case EXIT_REASON_MSR_WRITE:
734 return write_msr(regs, ve);
735 case EXIT_REASON_CPUID:
736 return handle_cpuid(regs, ve);
737 case EXIT_REASON_EPT_VIOLATION:
738 if (is_private_gpa(ve->gpa))
739 panic("Unexpected EPT-violation on private memory.");
740 return handle_mmio(regs, ve);
741 case EXIT_REASON_IO_INSTRUCTION:
742 return handle_io(regs, ve);
743 default:
744 pr_warn("Unexpected #VE: %lld\n", ve->exit_reason);
745 return -EIO;
746 }
747 }
748
tdx_handle_virt_exception(struct pt_regs * regs,struct ve_info * ve)749 bool tdx_handle_virt_exception(struct pt_regs *regs, struct ve_info *ve)
750 {
751 int insn_len;
752
753 if (user_mode(regs))
754 insn_len = virt_exception_user(regs, ve);
755 else
756 insn_len = virt_exception_kernel(regs, ve);
757 if (insn_len < 0)
758 return false;
759
760 /* After successful #VE handling, move the IP */
761 regs->ip += insn_len;
762
763 return true;
764 }
765
tdx_tlb_flush_required(bool private)766 static bool tdx_tlb_flush_required(bool private)
767 {
768 /*
769 * TDX guest is responsible for flushing TLB on private->shared
770 * transition. VMM is responsible for flushing on shared->private.
771 *
772 * The VMM _can't_ flush private addresses as it can't generate PAs
773 * with the guest's HKID. Shared memory isn't subject to integrity
774 * checking, i.e. the VMM doesn't need to flush for its own protection.
775 *
776 * There's no need to flush when converting from shared to private,
777 * as flushing is the VMM's responsibility in this case, e.g. it must
778 * flush to avoid integrity failures in the face of a buggy or
779 * malicious guest.
780 */
781 return !private;
782 }
783
tdx_cache_flush_required(void)784 static bool tdx_cache_flush_required(void)
785 {
786 /*
787 * AMD SME/SEV can avoid cache flushing if HW enforces cache coherence.
788 * TDX doesn't have such capability.
789 *
790 * Flush cache unconditionally.
791 */
792 return true;
793 }
794
795 /*
796 * Inform the VMM of the guest's intent for this physical page: shared with
797 * the VMM or private to the guest. The VMM is expected to change its mapping
798 * of the page in response.
799 */
tdx_enc_status_changed(unsigned long vaddr,int numpages,bool enc)800 static bool tdx_enc_status_changed(unsigned long vaddr, int numpages, bool enc)
801 {
802 phys_addr_t start = __pa(vaddr);
803 phys_addr_t end = __pa(vaddr + numpages * PAGE_SIZE);
804
805 if (!enc) {
806 /* Set the shared (decrypted) bits: */
807 start |= cc_mkdec(0);
808 end |= cc_mkdec(0);
809 }
810
811 /*
812 * Notify the VMM about page mapping conversion. More info about ABI
813 * can be found in TDX Guest-Host-Communication Interface (GHCI),
814 * section "TDG.VP.VMCALL<MapGPA>"
815 */
816 if (_tdx_hypercall(TDVMCALL_MAP_GPA, start, end - start, 0, 0))
817 return false;
818
819 /* shared->private conversion requires memory to be accepted before use */
820 if (enc)
821 return tdx_accept_memory(start, end);
822
823 return true;
824 }
825
tdx_enc_status_change_prepare(unsigned long vaddr,int numpages,bool enc)826 static bool tdx_enc_status_change_prepare(unsigned long vaddr, int numpages,
827 bool enc)
828 {
829 /*
830 * Only handle shared->private conversion here.
831 * See the comment in tdx_early_init().
832 */
833 if (enc)
834 return tdx_enc_status_changed(vaddr, numpages, enc);
835 return true;
836 }
837
tdx_enc_status_change_finish(unsigned long vaddr,int numpages,bool enc)838 static bool tdx_enc_status_change_finish(unsigned long vaddr, int numpages,
839 bool enc)
840 {
841 /*
842 * Only handle private->shared conversion here.
843 * See the comment in tdx_early_init().
844 */
845 if (!enc)
846 return tdx_enc_status_changed(vaddr, numpages, enc);
847 return true;
848 }
849
tdx_early_init(void)850 void __init tdx_early_init(void)
851 {
852 u64 cc_mask;
853 u32 eax, sig[3];
854
855 cpuid_count(TDX_CPUID_LEAF_ID, 0, &eax, &sig[0], &sig[2], &sig[1]);
856
857 if (memcmp(TDX_IDENT, sig, sizeof(sig)))
858 return;
859
860 setup_force_cpu_cap(X86_FEATURE_TDX_GUEST);
861
862 cc_vendor = CC_VENDOR_INTEL;
863
864 /* Configure the TD */
865 tdx_setup(&cc_mask);
866
867 cc_set_mask(cc_mask);
868
869 /*
870 * All bits above GPA width are reserved and kernel treats shared bit
871 * as flag, not as part of physical address.
872 *
873 * Adjust physical mask to only cover valid GPA bits.
874 */
875 physical_mask &= cc_mask - 1;
876
877 /*
878 * The kernel mapping should match the TDX metadata for the page.
879 * load_unaligned_zeropad() can touch memory *adjacent* to that which is
880 * owned by the caller and can catch even _momentary_ mismatches. Bad
881 * things happen on mismatch:
882 *
883 * - Private mapping => Shared Page == Guest shutdown
884 * - Shared mapping => Private Page == Recoverable #VE
885 *
886 * guest.enc_status_change_prepare() converts the page from
887 * shared=>private before the mapping becomes private.
888 *
889 * guest.enc_status_change_finish() converts the page from
890 * private=>shared after the mapping becomes private.
891 *
892 * In both cases there is a temporary shared mapping to a private page,
893 * which can result in a #VE. But, there is never a private mapping to
894 * a shared page.
895 */
896 x86_platform.guest.enc_status_change_prepare = tdx_enc_status_change_prepare;
897 x86_platform.guest.enc_status_change_finish = tdx_enc_status_change_finish;
898
899 x86_platform.guest.enc_cache_flush_required = tdx_cache_flush_required;
900 x86_platform.guest.enc_tlb_flush_required = tdx_tlb_flush_required;
901
902 /*
903 * Avoid "sti;hlt" execution in TDX guests as HLT induces a #VE that
904 * will enable interrupts before HLT TDCALL invocation if executed
905 * in STI-shadow, possibly resulting in missed wakeup events.
906 *
907 * Modify all possible HLT execution paths to use TDX specific routines
908 * that directly execute TDCALL and toggle the interrupt state as
909 * needed after TDCALL completion. This also reduces HLT related #VEs
910 * in addition to having a reliable halt logic execution.
911 */
912 pv_ops.irq.safe_halt = tdx_safe_halt;
913 pv_ops.irq.halt = tdx_halt;
914
915 /*
916 * TDX intercepts the RDMSR to read the X2APIC ID in the parallel
917 * bringup low level code. That raises #VE which cannot be handled
918 * there.
919 *
920 * Intel-TDX has a secure RDMSR hypercall, but that needs to be
921 * implemented seperately in the low level startup ASM code.
922 * Until that is in place, disable parallel bringup for TDX.
923 */
924 x86_cpuinit.parallel_bringup = false;
925
926 pr_info("Guest detected\n");
927 }
928