1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2021-2022 Intel Corporation */
3
4 #undef pr_fmt
5 #define pr_fmt(fmt) "tdx: " fmt
6
7 #include <linux/cpufeature.h>
8 #include <linux/export.h>
9 #include <linux/io.h>
10 #include <asm/coco.h>
11 #include <asm/tdx.h>
12 #include <asm/vmx.h>
13 #include <asm/ia32.h>
14 #include <asm/insn.h>
15 #include <asm/insn-eval.h>
16 #include <asm/pgtable.h>
17 #include <asm/traps.h>
18
19 /* MMIO direction */
20 #define EPT_READ 0
21 #define EPT_WRITE 1
22
23 /* Port I/O direction */
24 #define PORT_READ 0
25 #define PORT_WRITE 1
26
27 /* See Exit Qualification for I/O Instructions in VMX documentation */
28 #define VE_IS_IO_IN(e) ((e) & BIT(3))
29 #define VE_GET_IO_SIZE(e) (((e) & GENMASK(2, 0)) + 1)
30 #define VE_GET_PORT_NUM(e) ((e) >> 16)
31 #define VE_IS_IO_STRING(e) ((e) & BIT(4))
32
33 #define ATTR_DEBUG BIT(0)
34 #define ATTR_SEPT_VE_DISABLE BIT(28)
35
36 /* TDX Module call error codes */
37 #define TDCALL_RETURN_CODE(a) ((a) >> 32)
38 #define TDCALL_INVALID_OPERAND 0xc0000100
39
40 #define TDREPORT_SUBTYPE_0 0
41
42 /* Called from __tdx_hypercall() for unrecoverable failure */
__tdx_hypercall_failed(void)43 noinstr void __tdx_hypercall_failed(void)
44 {
45 instrumentation_begin();
46 panic("TDVMCALL failed. TDX module bug?");
47 }
48
49 #ifdef CONFIG_KVM_GUEST
tdx_kvm_hypercall(unsigned int nr,unsigned long p1,unsigned long p2,unsigned long p3,unsigned long p4)50 long tdx_kvm_hypercall(unsigned int nr, unsigned long p1, unsigned long p2,
51 unsigned long p3, unsigned long p4)
52 {
53 struct tdx_hypercall_args args = {
54 .r10 = nr,
55 .r11 = p1,
56 .r12 = p2,
57 .r13 = p3,
58 .r14 = p4,
59 };
60
61 return __tdx_hypercall(&args);
62 }
63 EXPORT_SYMBOL_GPL(tdx_kvm_hypercall);
64 #endif
65
66 /*
67 * Used for TDX guests to make calls directly to the TD module. This
68 * should only be used for calls that have no legitimate reason to fail
69 * or where the kernel can not survive the call failing.
70 */
tdx_module_call(u64 fn,u64 rcx,u64 rdx,u64 r8,u64 r9,struct tdx_module_output * out)71 static inline void tdx_module_call(u64 fn, u64 rcx, u64 rdx, u64 r8, u64 r9,
72 struct tdx_module_output *out)
73 {
74 if (__tdx_module_call(fn, rcx, rdx, r8, r9, out))
75 panic("TDCALL %lld failed (Buggy TDX module!)\n", fn);
76 }
77
78 /**
79 * tdx_mcall_get_report0() - Wrapper to get TDREPORT0 (a.k.a. TDREPORT
80 * subtype 0) using TDG.MR.REPORT TDCALL.
81 * @reportdata: Address of the input buffer which contains user-defined
82 * REPORTDATA to be included into TDREPORT.
83 * @tdreport: Address of the output buffer to store TDREPORT.
84 *
85 * Refer to section titled "TDG.MR.REPORT leaf" in the TDX Module
86 * v1.0 specification for more information on TDG.MR.REPORT TDCALL.
87 * It is used in the TDX guest driver module to get the TDREPORT0.
88 *
89 * Return 0 on success, -EINVAL for invalid operands, or -EIO on
90 * other TDCALL failures.
91 */
tdx_mcall_get_report0(u8 * reportdata,u8 * tdreport)92 int tdx_mcall_get_report0(u8 *reportdata, u8 *tdreport)
93 {
94 u64 ret;
95
96 ret = __tdx_module_call(TDX_GET_REPORT, virt_to_phys(tdreport),
97 virt_to_phys(reportdata), TDREPORT_SUBTYPE_0,
98 0, NULL);
99 if (ret) {
100 if (TDCALL_RETURN_CODE(ret) == TDCALL_INVALID_OPERAND)
101 return -EINVAL;
102 return -EIO;
103 }
104
105 return 0;
106 }
107 EXPORT_SYMBOL_GPL(tdx_mcall_get_report0);
108
tdx_panic(const char * msg)109 static void __noreturn tdx_panic(const char *msg)
110 {
111 struct tdx_hypercall_args args = {
112 .r10 = TDX_HYPERCALL_STANDARD,
113 .r11 = TDVMCALL_REPORT_FATAL_ERROR,
114 .r12 = 0, /* Error code: 0 is Panic */
115 };
116 union {
117 /* Define register order according to the GHCI */
118 struct { u64 r14, r15, rbx, rdi, rsi, r8, r9, rdx; };
119
120 char str[64];
121 } message;
122
123 /* VMM assumes '\0' in byte 65, if the message took all 64 bytes */
124 strncpy(message.str, msg, 64);
125
126 args.r8 = message.r8;
127 args.r9 = message.r9;
128 args.r14 = message.r14;
129 args.r15 = message.r15;
130 args.rdi = message.rdi;
131 args.rsi = message.rsi;
132 args.rbx = message.rbx;
133 args.rdx = message.rdx;
134
135 /*
136 * This hypercall should never return and it is not safe
137 * to keep the guest running. Call it forever if it
138 * happens to return.
139 */
140 while (1)
141 __tdx_hypercall(&args);
142 }
143
tdx_parse_tdinfo(u64 * cc_mask)144 static void tdx_parse_tdinfo(u64 *cc_mask)
145 {
146 struct tdx_module_output out;
147 unsigned int gpa_width;
148 u64 td_attr;
149
150 /*
151 * TDINFO TDX module call is used to get the TD execution environment
152 * information like GPA width, number of available vcpus, debug mode
153 * information, etc. More details about the ABI can be found in TDX
154 * Guest-Host-Communication Interface (GHCI), section 2.4.2 TDCALL
155 * [TDG.VP.INFO].
156 */
157 tdx_module_call(TDX_GET_INFO, 0, 0, 0, 0, &out);
158
159 /*
160 * The highest bit of a guest physical address is the "sharing" bit.
161 * Set it for shared pages and clear it for private pages.
162 *
163 * The GPA width that comes out of this call is critical. TDX guests
164 * can not meaningfully run without it.
165 */
166 gpa_width = out.rcx & GENMASK(5, 0);
167 *cc_mask = BIT_ULL(gpa_width - 1);
168
169 /*
170 * The kernel can not handle #VE's when accessing normal kernel
171 * memory. Ensure that no #VE will be delivered for accesses to
172 * TD-private memory. Only VMM-shared memory (MMIO) will #VE.
173 */
174 td_attr = out.rdx;
175 if (!(td_attr & ATTR_SEPT_VE_DISABLE)) {
176 const char *msg = "TD misconfiguration: SEPT_VE_DISABLE attribute must be set.";
177
178 /* Relax SEPT_VE_DISABLE check for debug TD. */
179 if (td_attr & ATTR_DEBUG)
180 pr_warn("%s\n", msg);
181 else
182 tdx_panic(msg);
183 }
184 }
185
186 /*
187 * The TDX module spec states that #VE may be injected for a limited set of
188 * reasons:
189 *
190 * - Emulation of the architectural #VE injection on EPT violation;
191 *
192 * - As a result of guest TD execution of a disallowed instruction,
193 * a disallowed MSR access, or CPUID virtualization;
194 *
195 * - A notification to the guest TD about anomalous behavior;
196 *
197 * The last one is opt-in and is not used by the kernel.
198 *
199 * The Intel Software Developer's Manual describes cases when instruction
200 * length field can be used in section "Information for VM Exits Due to
201 * Instruction Execution".
202 *
203 * For TDX, it ultimately means GET_VEINFO provides reliable instruction length
204 * information if #VE occurred due to instruction execution, but not for EPT
205 * violations.
206 */
ve_instr_len(struct ve_info * ve)207 static int ve_instr_len(struct ve_info *ve)
208 {
209 switch (ve->exit_reason) {
210 case EXIT_REASON_HLT:
211 case EXIT_REASON_MSR_READ:
212 case EXIT_REASON_MSR_WRITE:
213 case EXIT_REASON_CPUID:
214 case EXIT_REASON_IO_INSTRUCTION:
215 /* It is safe to use ve->instr_len for #VE due instructions */
216 return ve->instr_len;
217 case EXIT_REASON_EPT_VIOLATION:
218 /*
219 * For EPT violations, ve->insn_len is not defined. For those,
220 * the kernel must decode instructions manually and should not
221 * be using this function.
222 */
223 WARN_ONCE(1, "ve->instr_len is not defined for EPT violations");
224 return 0;
225 default:
226 WARN_ONCE(1, "Unexpected #VE-type: %lld\n", ve->exit_reason);
227 return ve->instr_len;
228 }
229 }
230
__halt(const bool irq_disabled)231 static u64 __cpuidle __halt(const bool irq_disabled)
232 {
233 struct tdx_hypercall_args args = {
234 .r10 = TDX_HYPERCALL_STANDARD,
235 .r11 = hcall_func(EXIT_REASON_HLT),
236 .r12 = irq_disabled,
237 };
238
239 /*
240 * Emulate HLT operation via hypercall. More info about ABI
241 * can be found in TDX Guest-Host-Communication Interface
242 * (GHCI), section 3.8 TDG.VP.VMCALL<Instruction.HLT>.
243 *
244 * The VMM uses the "IRQ disabled" param to understand IRQ
245 * enabled status (RFLAGS.IF) of the TD guest and to determine
246 * whether or not it should schedule the halted vCPU if an
247 * IRQ becomes pending. E.g. if IRQs are disabled, the VMM
248 * can keep the vCPU in virtual HLT, even if an IRQ is
249 * pending, without hanging/breaking the guest.
250 */
251 return __tdx_hypercall(&args);
252 }
253
handle_halt(struct ve_info * ve)254 static int handle_halt(struct ve_info *ve)
255 {
256 const bool irq_disabled = irqs_disabled();
257
258 if (__halt(irq_disabled))
259 return -EIO;
260
261 return ve_instr_len(ve);
262 }
263
tdx_safe_halt(void)264 void __cpuidle tdx_safe_halt(void)
265 {
266 const bool irq_disabled = false;
267
268 /*
269 * Use WARN_ONCE() to report the failure.
270 */
271 if (__halt(irq_disabled))
272 WARN_ONCE(1, "HLT instruction emulation failed\n");
273 }
274
read_msr(struct pt_regs * regs,struct ve_info * ve)275 static int read_msr(struct pt_regs *regs, struct ve_info *ve)
276 {
277 struct tdx_hypercall_args args = {
278 .r10 = TDX_HYPERCALL_STANDARD,
279 .r11 = hcall_func(EXIT_REASON_MSR_READ),
280 .r12 = regs->cx,
281 };
282
283 /*
284 * Emulate the MSR read via hypercall. More info about ABI
285 * can be found in TDX Guest-Host-Communication Interface
286 * (GHCI), section titled "TDG.VP.VMCALL<Instruction.RDMSR>".
287 */
288 if (__tdx_hypercall_ret(&args))
289 return -EIO;
290
291 regs->ax = lower_32_bits(args.r11);
292 regs->dx = upper_32_bits(args.r11);
293 return ve_instr_len(ve);
294 }
295
write_msr(struct pt_regs * regs,struct ve_info * ve)296 static int write_msr(struct pt_regs *regs, struct ve_info *ve)
297 {
298 struct tdx_hypercall_args args = {
299 .r10 = TDX_HYPERCALL_STANDARD,
300 .r11 = hcall_func(EXIT_REASON_MSR_WRITE),
301 .r12 = regs->cx,
302 .r13 = (u64)regs->dx << 32 | regs->ax,
303 };
304
305 /*
306 * Emulate the MSR write via hypercall. More info about ABI
307 * can be found in TDX Guest-Host-Communication Interface
308 * (GHCI) section titled "TDG.VP.VMCALL<Instruction.WRMSR>".
309 */
310 if (__tdx_hypercall(&args))
311 return -EIO;
312
313 return ve_instr_len(ve);
314 }
315
handle_cpuid(struct pt_regs * regs,struct ve_info * ve)316 static int handle_cpuid(struct pt_regs *regs, struct ve_info *ve)
317 {
318 struct tdx_hypercall_args args = {
319 .r10 = TDX_HYPERCALL_STANDARD,
320 .r11 = hcall_func(EXIT_REASON_CPUID),
321 .r12 = regs->ax,
322 .r13 = regs->cx,
323 };
324
325 /*
326 * Only allow VMM to control range reserved for hypervisor
327 * communication.
328 *
329 * Return all-zeros for any CPUID outside the range. It matches CPU
330 * behaviour for non-supported leaf.
331 */
332 if (regs->ax < 0x40000000 || regs->ax > 0x4FFFFFFF) {
333 regs->ax = regs->bx = regs->cx = regs->dx = 0;
334 return ve_instr_len(ve);
335 }
336
337 /*
338 * Emulate the CPUID instruction via a hypercall. More info about
339 * ABI can be found in TDX Guest-Host-Communication Interface
340 * (GHCI), section titled "VP.VMCALL<Instruction.CPUID>".
341 */
342 if (__tdx_hypercall_ret(&args))
343 return -EIO;
344
345 /*
346 * As per TDX GHCI CPUID ABI, r12-r15 registers contain contents of
347 * EAX, EBX, ECX, EDX registers after the CPUID instruction execution.
348 * So copy the register contents back to pt_regs.
349 */
350 regs->ax = args.r12;
351 regs->bx = args.r13;
352 regs->cx = args.r14;
353 regs->dx = args.r15;
354
355 return ve_instr_len(ve);
356 }
357
mmio_read(int size,unsigned long addr,unsigned long * val)358 static bool mmio_read(int size, unsigned long addr, unsigned long *val)
359 {
360 struct tdx_hypercall_args args = {
361 .r10 = TDX_HYPERCALL_STANDARD,
362 .r11 = hcall_func(EXIT_REASON_EPT_VIOLATION),
363 .r12 = size,
364 .r13 = EPT_READ,
365 .r14 = addr,
366 };
367
368 if (__tdx_hypercall_ret(&args))
369 return false;
370 *val = args.r11;
371 return true;
372 }
373
mmio_write(int size,unsigned long addr,unsigned long val)374 static bool mmio_write(int size, unsigned long addr, unsigned long val)
375 {
376 return !_tdx_hypercall(hcall_func(EXIT_REASON_EPT_VIOLATION), size,
377 EPT_WRITE, addr, val);
378 }
379
handle_mmio(struct pt_regs * regs,struct ve_info * ve)380 static int handle_mmio(struct pt_regs *regs, struct ve_info *ve)
381 {
382 unsigned long *reg, val, vaddr;
383 char buffer[MAX_INSN_SIZE];
384 enum insn_mmio_type mmio;
385 struct insn insn = {};
386 int size, extend_size;
387 u8 extend_val = 0;
388
389 /* Only in-kernel MMIO is supported */
390 if (WARN_ON_ONCE(user_mode(regs)))
391 return -EFAULT;
392
393 if (copy_from_kernel_nofault(buffer, (void *)regs->ip, MAX_INSN_SIZE))
394 return -EFAULT;
395
396 if (insn_decode(&insn, buffer, MAX_INSN_SIZE, INSN_MODE_64))
397 return -EINVAL;
398
399 mmio = insn_decode_mmio(&insn, &size);
400 if (WARN_ON_ONCE(mmio == INSN_MMIO_DECODE_FAILED))
401 return -EINVAL;
402
403 if (mmio != INSN_MMIO_WRITE_IMM && mmio != INSN_MMIO_MOVS) {
404 reg = insn_get_modrm_reg_ptr(&insn, regs);
405 if (!reg)
406 return -EINVAL;
407 }
408
409 if (!fault_in_kernel_space(ve->gla)) {
410 WARN_ONCE(1, "Access to userspace address is not supported");
411 return -EINVAL;
412 }
413
414 /*
415 * Reject EPT violation #VEs that split pages.
416 *
417 * MMIO accesses are supposed to be naturally aligned and therefore
418 * never cross page boundaries. Seeing split page accesses indicates
419 * a bug or a load_unaligned_zeropad() that stepped into an MMIO page.
420 *
421 * load_unaligned_zeropad() will recover using exception fixups.
422 */
423 vaddr = (unsigned long)insn_get_addr_ref(&insn, regs);
424 if (vaddr / PAGE_SIZE != (vaddr + size - 1) / PAGE_SIZE)
425 return -EFAULT;
426
427 /* Handle writes first */
428 switch (mmio) {
429 case INSN_MMIO_WRITE:
430 memcpy(&val, reg, size);
431 if (!mmio_write(size, ve->gpa, val))
432 return -EIO;
433 return insn.length;
434 case INSN_MMIO_WRITE_IMM:
435 val = insn.immediate.value;
436 if (!mmio_write(size, ve->gpa, val))
437 return -EIO;
438 return insn.length;
439 case INSN_MMIO_READ:
440 case INSN_MMIO_READ_ZERO_EXTEND:
441 case INSN_MMIO_READ_SIGN_EXTEND:
442 /* Reads are handled below */
443 break;
444 case INSN_MMIO_MOVS:
445 case INSN_MMIO_DECODE_FAILED:
446 /*
447 * MMIO was accessed with an instruction that could not be
448 * decoded or handled properly. It was likely not using io.h
449 * helpers or accessed MMIO accidentally.
450 */
451 return -EINVAL;
452 default:
453 WARN_ONCE(1, "Unknown insn_decode_mmio() decode value?");
454 return -EINVAL;
455 }
456
457 /* Handle reads */
458 if (!mmio_read(size, ve->gpa, &val))
459 return -EIO;
460
461 switch (mmio) {
462 case INSN_MMIO_READ:
463 /* Zero-extend for 32-bit operation */
464 extend_size = size == 4 ? sizeof(*reg) : 0;
465 break;
466 case INSN_MMIO_READ_ZERO_EXTEND:
467 /* Zero extend based on operand size */
468 extend_size = insn.opnd_bytes;
469 break;
470 case INSN_MMIO_READ_SIGN_EXTEND:
471 /* Sign extend based on operand size */
472 extend_size = insn.opnd_bytes;
473 if (size == 1 && val & BIT(7))
474 extend_val = 0xFF;
475 else if (size > 1 && val & BIT(15))
476 extend_val = 0xFF;
477 break;
478 default:
479 /* All other cases has to be covered with the first switch() */
480 WARN_ON_ONCE(1);
481 return -EINVAL;
482 }
483
484 if (extend_size)
485 memset(reg, extend_val, extend_size);
486 memcpy(reg, &val, size);
487 return insn.length;
488 }
489
handle_in(struct pt_regs * regs,int size,int port)490 static bool handle_in(struct pt_regs *regs, int size, int port)
491 {
492 struct tdx_hypercall_args args = {
493 .r10 = TDX_HYPERCALL_STANDARD,
494 .r11 = hcall_func(EXIT_REASON_IO_INSTRUCTION),
495 .r12 = size,
496 .r13 = PORT_READ,
497 .r14 = port,
498 };
499 u64 mask = GENMASK(BITS_PER_BYTE * size, 0);
500 bool success;
501
502 /*
503 * Emulate the I/O read via hypercall. More info about ABI can be found
504 * in TDX Guest-Host-Communication Interface (GHCI) section titled
505 * "TDG.VP.VMCALL<Instruction.IO>".
506 */
507 success = !__tdx_hypercall_ret(&args);
508
509 /* Update part of the register affected by the emulated instruction */
510 regs->ax &= ~mask;
511 if (success)
512 regs->ax |= args.r11 & mask;
513
514 return success;
515 }
516
handle_out(struct pt_regs * regs,int size,int port)517 static bool handle_out(struct pt_regs *regs, int size, int port)
518 {
519 u64 mask = GENMASK(BITS_PER_BYTE * size, 0);
520
521 /*
522 * Emulate the I/O write via hypercall. More info about ABI can be found
523 * in TDX Guest-Host-Communication Interface (GHCI) section titled
524 * "TDG.VP.VMCALL<Instruction.IO>".
525 */
526 return !_tdx_hypercall(hcall_func(EXIT_REASON_IO_INSTRUCTION), size,
527 PORT_WRITE, port, regs->ax & mask);
528 }
529
530 /*
531 * Emulate I/O using hypercall.
532 *
533 * Assumes the IO instruction was using ax, which is enforced
534 * by the standard io.h macros.
535 *
536 * Return True on success or False on failure.
537 */
handle_io(struct pt_regs * regs,struct ve_info * ve)538 static int handle_io(struct pt_regs *regs, struct ve_info *ve)
539 {
540 u32 exit_qual = ve->exit_qual;
541 int size, port;
542 bool in, ret;
543
544 if (VE_IS_IO_STRING(exit_qual))
545 return -EIO;
546
547 in = VE_IS_IO_IN(exit_qual);
548 size = VE_GET_IO_SIZE(exit_qual);
549 port = VE_GET_PORT_NUM(exit_qual);
550
551
552 if (in)
553 ret = handle_in(regs, size, port);
554 else
555 ret = handle_out(regs, size, port);
556 if (!ret)
557 return -EIO;
558
559 return ve_instr_len(ve);
560 }
561
562 /*
563 * Early #VE exception handler. Only handles a subset of port I/O.
564 * Intended only for earlyprintk. If failed, return false.
565 */
tdx_early_handle_ve(struct pt_regs * regs)566 __init bool tdx_early_handle_ve(struct pt_regs *regs)
567 {
568 struct ve_info ve;
569 int insn_len;
570
571 tdx_get_ve_info(&ve);
572
573 if (ve.exit_reason != EXIT_REASON_IO_INSTRUCTION)
574 return false;
575
576 insn_len = handle_io(regs, &ve);
577 if (insn_len < 0)
578 return false;
579
580 regs->ip += insn_len;
581 return true;
582 }
583
tdx_get_ve_info(struct ve_info * ve)584 void tdx_get_ve_info(struct ve_info *ve)
585 {
586 struct tdx_module_output out;
587
588 /*
589 * Called during #VE handling to retrieve the #VE info from the
590 * TDX module.
591 *
592 * This has to be called early in #VE handling. A "nested" #VE which
593 * occurs before this will raise a #DF and is not recoverable.
594 *
595 * The call retrieves the #VE info from the TDX module, which also
596 * clears the "#VE valid" flag. This must be done before anything else
597 * because any #VE that occurs while the valid flag is set will lead to
598 * #DF.
599 *
600 * Note, the TDX module treats virtual NMIs as inhibited if the #VE
601 * valid flag is set. It means that NMI=>#VE will not result in a #DF.
602 */
603 tdx_module_call(TDX_GET_VEINFO, 0, 0, 0, 0, &out);
604
605 /* Transfer the output parameters */
606 ve->exit_reason = out.rcx;
607 ve->exit_qual = out.rdx;
608 ve->gla = out.r8;
609 ve->gpa = out.r9;
610 ve->instr_len = lower_32_bits(out.r10);
611 ve->instr_info = upper_32_bits(out.r10);
612 }
613
614 /*
615 * Handle the user initiated #VE.
616 *
617 * On success, returns the number of bytes RIP should be incremented (>=0)
618 * or -errno on error.
619 */
virt_exception_user(struct pt_regs * regs,struct ve_info * ve)620 static int virt_exception_user(struct pt_regs *regs, struct ve_info *ve)
621 {
622 switch (ve->exit_reason) {
623 case EXIT_REASON_CPUID:
624 return handle_cpuid(regs, ve);
625 default:
626 pr_warn("Unexpected #VE: %lld\n", ve->exit_reason);
627 return -EIO;
628 }
629 }
630
is_private_gpa(u64 gpa)631 static inline bool is_private_gpa(u64 gpa)
632 {
633 return gpa == cc_mkenc(gpa);
634 }
635
636 /*
637 * Handle the kernel #VE.
638 *
639 * On success, returns the number of bytes RIP should be incremented (>=0)
640 * or -errno on error.
641 */
virt_exception_kernel(struct pt_regs * regs,struct ve_info * ve)642 static int virt_exception_kernel(struct pt_regs *regs, struct ve_info *ve)
643 {
644 switch (ve->exit_reason) {
645 case EXIT_REASON_HLT:
646 return handle_halt(ve);
647 case EXIT_REASON_MSR_READ:
648 return read_msr(regs, ve);
649 case EXIT_REASON_MSR_WRITE:
650 return write_msr(regs, ve);
651 case EXIT_REASON_CPUID:
652 return handle_cpuid(regs, ve);
653 case EXIT_REASON_EPT_VIOLATION:
654 if (is_private_gpa(ve->gpa))
655 panic("Unexpected EPT-violation on private memory.");
656 return handle_mmio(regs, ve);
657 case EXIT_REASON_IO_INSTRUCTION:
658 return handle_io(regs, ve);
659 default:
660 pr_warn("Unexpected #VE: %lld\n", ve->exit_reason);
661 return -EIO;
662 }
663 }
664
tdx_handle_virt_exception(struct pt_regs * regs,struct ve_info * ve)665 bool tdx_handle_virt_exception(struct pt_regs *regs, struct ve_info *ve)
666 {
667 int insn_len;
668
669 if (user_mode(regs))
670 insn_len = virt_exception_user(regs, ve);
671 else
672 insn_len = virt_exception_kernel(regs, ve);
673 if (insn_len < 0)
674 return false;
675
676 /* After successful #VE handling, move the IP */
677 regs->ip += insn_len;
678
679 return true;
680 }
681
tdx_tlb_flush_required(bool private)682 static bool tdx_tlb_flush_required(bool private)
683 {
684 /*
685 * TDX guest is responsible for flushing TLB on private->shared
686 * transition. VMM is responsible for flushing on shared->private.
687 *
688 * The VMM _can't_ flush private addresses as it can't generate PAs
689 * with the guest's HKID. Shared memory isn't subject to integrity
690 * checking, i.e. the VMM doesn't need to flush for its own protection.
691 *
692 * There's no need to flush when converting from shared to private,
693 * as flushing is the VMM's responsibility in this case, e.g. it must
694 * flush to avoid integrity failures in the face of a buggy or
695 * malicious guest.
696 */
697 return !private;
698 }
699
tdx_cache_flush_required(void)700 static bool tdx_cache_flush_required(void)
701 {
702 /*
703 * AMD SME/SEV can avoid cache flushing if HW enforces cache coherence.
704 * TDX doesn't have such capability.
705 *
706 * Flush cache unconditionally.
707 */
708 return true;
709 }
710
711 /*
712 * Inform the VMM of the guest's intent for this physical page: shared with
713 * the VMM or private to the guest. The VMM is expected to change its mapping
714 * of the page in response.
715 */
tdx_enc_status_changed(unsigned long vaddr,int numpages,bool enc)716 static bool tdx_enc_status_changed(unsigned long vaddr, int numpages, bool enc)
717 {
718 phys_addr_t start = __pa(vaddr);
719 phys_addr_t end = __pa(vaddr + numpages * PAGE_SIZE);
720
721 if (!enc) {
722 /* Set the shared (decrypted) bits: */
723 start |= cc_mkdec(0);
724 end |= cc_mkdec(0);
725 }
726
727 /*
728 * Notify the VMM about page mapping conversion. More info about ABI
729 * can be found in TDX Guest-Host-Communication Interface (GHCI),
730 * section "TDG.VP.VMCALL<MapGPA>"
731 */
732 if (_tdx_hypercall(TDVMCALL_MAP_GPA, start, end - start, 0, 0))
733 return false;
734
735 /* shared->private conversion requires memory to be accepted before use */
736 if (enc)
737 return tdx_accept_memory(start, end);
738
739 return true;
740 }
741
tdx_enc_status_change_prepare(unsigned long vaddr,int numpages,bool enc)742 static bool tdx_enc_status_change_prepare(unsigned long vaddr, int numpages,
743 bool enc)
744 {
745 /*
746 * Only handle shared->private conversion here.
747 * See the comment in tdx_early_init().
748 */
749 if (enc)
750 return tdx_enc_status_changed(vaddr, numpages, enc);
751 return true;
752 }
753
tdx_enc_status_change_finish(unsigned long vaddr,int numpages,bool enc)754 static bool tdx_enc_status_change_finish(unsigned long vaddr, int numpages,
755 bool enc)
756 {
757 /*
758 * Only handle private->shared conversion here.
759 * See the comment in tdx_early_init().
760 */
761 if (!enc)
762 return tdx_enc_status_changed(vaddr, numpages, enc);
763 return true;
764 }
765
tdx_early_init(void)766 void __init tdx_early_init(void)
767 {
768 u64 cc_mask;
769 u32 eax, sig[3];
770
771 cpuid_count(TDX_CPUID_LEAF_ID, 0, &eax, &sig[0], &sig[2], &sig[1]);
772
773 if (memcmp(TDX_IDENT, sig, sizeof(sig)))
774 return;
775
776 setup_force_cpu_cap(X86_FEATURE_TDX_GUEST);
777
778 cc_vendor = CC_VENDOR_INTEL;
779 tdx_parse_tdinfo(&cc_mask);
780 cc_set_mask(cc_mask);
781
782 /* Kernel does not use NOTIFY_ENABLES and does not need random #VEs */
783 tdx_module_call(TDX_WR, 0, TDCS_NOTIFY_ENABLES, 0, -1ULL, NULL);
784
785 /*
786 * All bits above GPA width are reserved and kernel treats shared bit
787 * as flag, not as part of physical address.
788 *
789 * Adjust physical mask to only cover valid GPA bits.
790 */
791 physical_mask &= cc_mask - 1;
792
793 /*
794 * The kernel mapping should match the TDX metadata for the page.
795 * load_unaligned_zeropad() can touch memory *adjacent* to that which is
796 * owned by the caller and can catch even _momentary_ mismatches. Bad
797 * things happen on mismatch:
798 *
799 * - Private mapping => Shared Page == Guest shutdown
800 * - Shared mapping => Private Page == Recoverable #VE
801 *
802 * guest.enc_status_change_prepare() converts the page from
803 * shared=>private before the mapping becomes private.
804 *
805 * guest.enc_status_change_finish() converts the page from
806 * private=>shared after the mapping becomes private.
807 *
808 * In both cases there is a temporary shared mapping to a private page,
809 * which can result in a #VE. But, there is never a private mapping to
810 * a shared page.
811 */
812 x86_platform.guest.enc_status_change_prepare = tdx_enc_status_change_prepare;
813 x86_platform.guest.enc_status_change_finish = tdx_enc_status_change_finish;
814
815 x86_platform.guest.enc_cache_flush_required = tdx_cache_flush_required;
816 x86_platform.guest.enc_tlb_flush_required = tdx_tlb_flush_required;
817
818 /*
819 * TDX intercepts the RDMSR to read the X2APIC ID in the parallel
820 * bringup low level code. That raises #VE which cannot be handled
821 * there.
822 *
823 * Intel-TDX has a secure RDMSR hypercall, but that needs to be
824 * implemented seperately in the low level startup ASM code.
825 * Until that is in place, disable parallel bringup for TDX.
826 */
827 x86_cpuinit.parallel_bringup = false;
828
829 pr_info("Guest detected\n");
830 }
831