xref: /openbmc/linux/arch/x86/coco/tdx/tdx.c (revision 8ef9ea1503d0a129cc6f5cf48fb63633efa5d766)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2021-2022 Intel Corporation */
3 
4 #undef pr_fmt
5 #define pr_fmt(fmt)     "tdx: " fmt
6 
7 #include <linux/cpufeature.h>
8 #include <linux/export.h>
9 #include <linux/io.h>
10 #include <asm/coco.h>
11 #include <asm/tdx.h>
12 #include <asm/vmx.h>
13 #include <asm/ia32.h>
14 #include <asm/insn.h>
15 #include <asm/insn-eval.h>
16 #include <asm/pgtable.h>
17 #include <asm/traps.h>
18 
19 /* MMIO direction */
20 #define EPT_READ	0
21 #define EPT_WRITE	1
22 
23 /* Port I/O direction */
24 #define PORT_READ	0
25 #define PORT_WRITE	1
26 
27 /* See Exit Qualification for I/O Instructions in VMX documentation */
28 #define VE_IS_IO_IN(e)		((e) & BIT(3))
29 #define VE_GET_IO_SIZE(e)	(((e) & GENMASK(2, 0)) + 1)
30 #define VE_GET_PORT_NUM(e)	((e) >> 16)
31 #define VE_IS_IO_STRING(e)	((e) & BIT(4))
32 
33 #define ATTR_DEBUG		BIT(0)
34 #define ATTR_SEPT_VE_DISABLE	BIT(28)
35 
36 /* TDX Module call error codes */
37 #define TDCALL_RETURN_CODE(a)	((a) >> 32)
38 #define TDCALL_INVALID_OPERAND	0xc0000100
39 
40 #define TDREPORT_SUBTYPE_0	0
41 
42 /* Called from __tdx_hypercall() for unrecoverable failure */
43 noinstr void __tdx_hypercall_failed(void)
44 {
45 	instrumentation_begin();
46 	panic("TDVMCALL failed. TDX module bug?");
47 }
48 
49 #ifdef CONFIG_KVM_GUEST
50 long tdx_kvm_hypercall(unsigned int nr, unsigned long p1, unsigned long p2,
51 		       unsigned long p3, unsigned long p4)
52 {
53 	struct tdx_hypercall_args args = {
54 		.r10 = nr,
55 		.r11 = p1,
56 		.r12 = p2,
57 		.r13 = p3,
58 		.r14 = p4,
59 	};
60 
61 	return __tdx_hypercall(&args);
62 }
63 EXPORT_SYMBOL_GPL(tdx_kvm_hypercall);
64 #endif
65 
66 /*
67  * Used for TDX guests to make calls directly to the TD module.  This
68  * should only be used for calls that have no legitimate reason to fail
69  * or where the kernel can not survive the call failing.
70  */
71 static inline void tdcall(u64 fn, struct tdx_module_args *args)
72 {
73 	if (__tdcall_ret(fn, args))
74 		panic("TDCALL %lld failed (Buggy TDX module!)\n", fn);
75 }
76 
77 /* Read TD-scoped metadata */
78 static inline u64 tdg_vm_rd(u64 field, u64 *value)
79 {
80 	struct tdx_module_args args = {
81 		.rdx = field,
82 	};
83 	u64 ret;
84 
85 	ret = __tdcall_ret(TDG_VM_RD, &args);
86 	*value = args.r8;
87 
88 	return ret;
89 }
90 
91 /* Write TD-scoped metadata */
92 static inline u64 tdg_vm_wr(u64 field, u64 value, u64 mask)
93 {
94 	struct tdx_module_args args = {
95 		.rdx = field,
96 		.r8 = value,
97 		.r9 = mask,
98 	};
99 
100 	return __tdcall(TDG_VM_WR, &args);
101 }
102 
103 /**
104  * tdx_mcall_get_report0() - Wrapper to get TDREPORT0 (a.k.a. TDREPORT
105  *                           subtype 0) using TDG.MR.REPORT TDCALL.
106  * @reportdata: Address of the input buffer which contains user-defined
107  *              REPORTDATA to be included into TDREPORT.
108  * @tdreport: Address of the output buffer to store TDREPORT.
109  *
110  * Refer to section titled "TDG.MR.REPORT leaf" in the TDX Module
111  * v1.0 specification for more information on TDG.MR.REPORT TDCALL.
112  * It is used in the TDX guest driver module to get the TDREPORT0.
113  *
114  * Return 0 on success, -EINVAL for invalid operands, or -EIO on
115  * other TDCALL failures.
116  */
117 int tdx_mcall_get_report0(u8 *reportdata, u8 *tdreport)
118 {
119 	struct tdx_module_args args = {
120 		.rcx = virt_to_phys(tdreport),
121 		.rdx = virt_to_phys(reportdata),
122 		.r8 = TDREPORT_SUBTYPE_0,
123 	};
124 	u64 ret;
125 
126 	ret = __tdcall(TDG_MR_REPORT, &args);
127 	if (ret) {
128 		if (TDCALL_RETURN_CODE(ret) == TDCALL_INVALID_OPERAND)
129 			return -EINVAL;
130 		return -EIO;
131 	}
132 
133 	return 0;
134 }
135 EXPORT_SYMBOL_GPL(tdx_mcall_get_report0);
136 
137 static void __noreturn tdx_panic(const char *msg)
138 {
139 	struct tdx_hypercall_args args = {
140 		.r10 = TDX_HYPERCALL_STANDARD,
141 		.r11 = TDVMCALL_REPORT_FATAL_ERROR,
142 		.r12 = 0, /* Error code: 0 is Panic */
143 	};
144 	union {
145 		/* Define register order according to the GHCI */
146 		struct { u64 r14, r15, rbx, rdi, rsi, r8, r9, rdx; };
147 
148 		char str[64];
149 	} message;
150 
151 	/* VMM assumes '\0' in byte 65, if the message took all 64 bytes */
152 	strncpy(message.str, msg, 64);
153 
154 	args.r8  = message.r8;
155 	args.r9  = message.r9;
156 	args.r14 = message.r14;
157 	args.r15 = message.r15;
158 	args.rdi = message.rdi;
159 	args.rsi = message.rsi;
160 	args.rbx = message.rbx;
161 	args.rdx = message.rdx;
162 
163 	/*
164 	 * This hypercall should never return and it is not safe
165 	 * to keep the guest running. Call it forever if it
166 	 * happens to return.
167 	 */
168 	while (1)
169 		__tdx_hypercall(&args);
170 }
171 
172 /*
173  * The kernel cannot handle #VEs when accessing normal kernel memory. Ensure
174  * that no #VE will be delivered for accesses to TD-private memory.
175  *
176  * TDX 1.0 does not allow the guest to disable SEPT #VE on its own. The VMM
177  * controls if the guest will receive such #VE with TD attribute
178  * ATTR_SEPT_VE_DISABLE.
179  *
180  * Newer TDX modules allow the guest to control if it wants to receive SEPT
181  * violation #VEs.
182  *
183  * Check if the feature is available and disable SEPT #VE if possible.
184  *
185  * If the TD is allowed to disable/enable SEPT #VEs, the ATTR_SEPT_VE_DISABLE
186  * attribute is no longer reliable. It reflects the initial state of the
187  * control for the TD, but it will not be updated if someone (e.g. bootloader)
188  * changes it before the kernel starts. Kernel must check TDCS_TD_CTLS bit to
189  * determine if SEPT #VEs are enabled or disabled.
190  */
191 static void disable_sept_ve(u64 td_attr)
192 {
193 	const char *msg = "TD misconfiguration: SEPT #VE has to be disabled";
194 	bool debug = td_attr & ATTR_DEBUG;
195 	u64 config, controls;
196 
197 	/* Is this TD allowed to disable SEPT #VE */
198 	tdg_vm_rd(TDCS_CONFIG_FLAGS, &config);
199 	if (!(config & TDCS_CONFIG_FLEXIBLE_PENDING_VE)) {
200 		/* No SEPT #VE controls for the guest: check the attribute */
201 		if (td_attr & ATTR_SEPT_VE_DISABLE)
202 			return;
203 
204 		/* Relax SEPT_VE_DISABLE check for debug TD for backtraces */
205 		if (debug)
206 			pr_warn("%s\n", msg);
207 		else
208 			tdx_panic(msg);
209 		return;
210 	}
211 
212 	/* Check if SEPT #VE has been disabled before us */
213 	tdg_vm_rd(TDCS_TD_CTLS, &controls);
214 	if (controls & TD_CTLS_PENDING_VE_DISABLE)
215 		return;
216 
217 	/* Keep #VEs enabled for splats in debugging environments */
218 	if (debug)
219 		return;
220 
221 	/* Disable SEPT #VEs */
222 	tdg_vm_wr(TDCS_TD_CTLS, TD_CTLS_PENDING_VE_DISABLE,
223 		  TD_CTLS_PENDING_VE_DISABLE);
224 }
225 
226 static void tdx_setup(u64 *cc_mask)
227 {
228 	struct tdx_module_args args = {};
229 	unsigned int gpa_width;
230 	u64 td_attr;
231 
232 	/*
233 	 * TDINFO TDX module call is used to get the TD execution environment
234 	 * information like GPA width, number of available vcpus, debug mode
235 	 * information, etc. More details about the ABI can be found in TDX
236 	 * Guest-Host-Communication Interface (GHCI), section 2.4.2 TDCALL
237 	 * [TDG.VP.INFO].
238 	 */
239 	tdcall(TDG_VP_INFO, &args);
240 
241 	/*
242 	 * The highest bit of a guest physical address is the "sharing" bit.
243 	 * Set it for shared pages and clear it for private pages.
244 	 *
245 	 * The GPA width that comes out of this call is critical. TDX guests
246 	 * can not meaningfully run without it.
247 	 */
248 	gpa_width = args.rcx & GENMASK(5, 0);
249 	*cc_mask = BIT_ULL(gpa_width - 1);
250 
251 	td_attr = args.rdx;
252 
253 	/* Kernel does not use NOTIFY_ENABLES and does not need random #VEs */
254 	tdg_vm_wr(TDCS_NOTIFY_ENABLES, 0, -1ULL);
255 
256 	disable_sept_ve(td_attr);
257 }
258 
259 /*
260  * The TDX module spec states that #VE may be injected for a limited set of
261  * reasons:
262  *
263  *  - Emulation of the architectural #VE injection on EPT violation;
264  *
265  *  - As a result of guest TD execution of a disallowed instruction,
266  *    a disallowed MSR access, or CPUID virtualization;
267  *
268  *  - A notification to the guest TD about anomalous behavior;
269  *
270  * The last one is opt-in and is not used by the kernel.
271  *
272  * The Intel Software Developer's Manual describes cases when instruction
273  * length field can be used in section "Information for VM Exits Due to
274  * Instruction Execution".
275  *
276  * For TDX, it ultimately means GET_VEINFO provides reliable instruction length
277  * information if #VE occurred due to instruction execution, but not for EPT
278  * violations.
279  */
280 static int ve_instr_len(struct ve_info *ve)
281 {
282 	switch (ve->exit_reason) {
283 	case EXIT_REASON_HLT:
284 	case EXIT_REASON_MSR_READ:
285 	case EXIT_REASON_MSR_WRITE:
286 	case EXIT_REASON_CPUID:
287 	case EXIT_REASON_IO_INSTRUCTION:
288 		/* It is safe to use ve->instr_len for #VE due instructions */
289 		return ve->instr_len;
290 	case EXIT_REASON_EPT_VIOLATION:
291 		/*
292 		 * For EPT violations, ve->insn_len is not defined. For those,
293 		 * the kernel must decode instructions manually and should not
294 		 * be using this function.
295 		 */
296 		WARN_ONCE(1, "ve->instr_len is not defined for EPT violations");
297 		return 0;
298 	default:
299 		WARN_ONCE(1, "Unexpected #VE-type: %lld\n", ve->exit_reason);
300 		return ve->instr_len;
301 	}
302 }
303 
304 static u64 __cpuidle __halt(const bool irq_disabled)
305 {
306 	struct tdx_hypercall_args args = {
307 		.r10 = TDX_HYPERCALL_STANDARD,
308 		.r11 = hcall_func(EXIT_REASON_HLT),
309 		.r12 = irq_disabled,
310 	};
311 
312 	/*
313 	 * Emulate HLT operation via hypercall. More info about ABI
314 	 * can be found in TDX Guest-Host-Communication Interface
315 	 * (GHCI), section 3.8 TDG.VP.VMCALL<Instruction.HLT>.
316 	 *
317 	 * The VMM uses the "IRQ disabled" param to understand IRQ
318 	 * enabled status (RFLAGS.IF) of the TD guest and to determine
319 	 * whether or not it should schedule the halted vCPU if an
320 	 * IRQ becomes pending. E.g. if IRQs are disabled, the VMM
321 	 * can keep the vCPU in virtual HLT, even if an IRQ is
322 	 * pending, without hanging/breaking the guest.
323 	 */
324 	return __tdx_hypercall(&args);
325 }
326 
327 static int handle_halt(struct ve_info *ve)
328 {
329 	const bool irq_disabled = irqs_disabled();
330 
331 	if (__halt(irq_disabled))
332 		return -EIO;
333 
334 	return ve_instr_len(ve);
335 }
336 
337 void __cpuidle tdx_safe_halt(void)
338 {
339 	const bool irq_disabled = false;
340 
341 	/*
342 	 * Use WARN_ONCE() to report the failure.
343 	 */
344 	if (__halt(irq_disabled))
345 		WARN_ONCE(1, "HLT instruction emulation failed\n");
346 }
347 
348 static int read_msr(struct pt_regs *regs, struct ve_info *ve)
349 {
350 	struct tdx_hypercall_args args = {
351 		.r10 = TDX_HYPERCALL_STANDARD,
352 		.r11 = hcall_func(EXIT_REASON_MSR_READ),
353 		.r12 = regs->cx,
354 	};
355 
356 	/*
357 	 * Emulate the MSR read via hypercall. More info about ABI
358 	 * can be found in TDX Guest-Host-Communication Interface
359 	 * (GHCI), section titled "TDG.VP.VMCALL<Instruction.RDMSR>".
360 	 */
361 	if (__tdx_hypercall_ret(&args))
362 		return -EIO;
363 
364 	regs->ax = lower_32_bits(args.r11);
365 	regs->dx = upper_32_bits(args.r11);
366 	return ve_instr_len(ve);
367 }
368 
369 static int write_msr(struct pt_regs *regs, struct ve_info *ve)
370 {
371 	struct tdx_hypercall_args args = {
372 		.r10 = TDX_HYPERCALL_STANDARD,
373 		.r11 = hcall_func(EXIT_REASON_MSR_WRITE),
374 		.r12 = regs->cx,
375 		.r13 = (u64)regs->dx << 32 | regs->ax,
376 	};
377 
378 	/*
379 	 * Emulate the MSR write via hypercall. More info about ABI
380 	 * can be found in TDX Guest-Host-Communication Interface
381 	 * (GHCI) section titled "TDG.VP.VMCALL<Instruction.WRMSR>".
382 	 */
383 	if (__tdx_hypercall(&args))
384 		return -EIO;
385 
386 	return ve_instr_len(ve);
387 }
388 
389 static int handle_cpuid(struct pt_regs *regs, struct ve_info *ve)
390 {
391 	struct tdx_hypercall_args args = {
392 		.r10 = TDX_HYPERCALL_STANDARD,
393 		.r11 = hcall_func(EXIT_REASON_CPUID),
394 		.r12 = regs->ax,
395 		.r13 = regs->cx,
396 	};
397 
398 	/*
399 	 * Only allow VMM to control range reserved for hypervisor
400 	 * communication.
401 	 *
402 	 * Return all-zeros for any CPUID outside the range. It matches CPU
403 	 * behaviour for non-supported leaf.
404 	 */
405 	if (regs->ax < 0x40000000 || regs->ax > 0x4FFFFFFF) {
406 		regs->ax = regs->bx = regs->cx = regs->dx = 0;
407 		return ve_instr_len(ve);
408 	}
409 
410 	/*
411 	 * Emulate the CPUID instruction via a hypercall. More info about
412 	 * ABI can be found in TDX Guest-Host-Communication Interface
413 	 * (GHCI), section titled "VP.VMCALL<Instruction.CPUID>".
414 	 */
415 	if (__tdx_hypercall_ret(&args))
416 		return -EIO;
417 
418 	/*
419 	 * As per TDX GHCI CPUID ABI, r12-r15 registers contain contents of
420 	 * EAX, EBX, ECX, EDX registers after the CPUID instruction execution.
421 	 * So copy the register contents back to pt_regs.
422 	 */
423 	regs->ax = args.r12;
424 	regs->bx = args.r13;
425 	regs->cx = args.r14;
426 	regs->dx = args.r15;
427 
428 	return ve_instr_len(ve);
429 }
430 
431 static bool mmio_read(int size, unsigned long addr, unsigned long *val)
432 {
433 	struct tdx_hypercall_args args = {
434 		.r10 = TDX_HYPERCALL_STANDARD,
435 		.r11 = hcall_func(EXIT_REASON_EPT_VIOLATION),
436 		.r12 = size,
437 		.r13 = EPT_READ,
438 		.r14 = addr,
439 	};
440 
441 	if (__tdx_hypercall_ret(&args))
442 		return false;
443 	*val = args.r11;
444 	return true;
445 }
446 
447 static bool mmio_write(int size, unsigned long addr, unsigned long val)
448 {
449 	return !_tdx_hypercall(hcall_func(EXIT_REASON_EPT_VIOLATION), size,
450 			       EPT_WRITE, addr, val);
451 }
452 
453 static int handle_mmio(struct pt_regs *regs, struct ve_info *ve)
454 {
455 	unsigned long *reg, val, vaddr;
456 	char buffer[MAX_INSN_SIZE];
457 	enum insn_mmio_type mmio;
458 	struct insn insn = {};
459 	int size, extend_size;
460 	u8 extend_val = 0;
461 
462 	/* Only in-kernel MMIO is supported */
463 	if (WARN_ON_ONCE(user_mode(regs)))
464 		return -EFAULT;
465 
466 	if (copy_from_kernel_nofault(buffer, (void *)regs->ip, MAX_INSN_SIZE))
467 		return -EFAULT;
468 
469 	if (insn_decode(&insn, buffer, MAX_INSN_SIZE, INSN_MODE_64))
470 		return -EINVAL;
471 
472 	mmio = insn_decode_mmio(&insn, &size);
473 	if (WARN_ON_ONCE(mmio == INSN_MMIO_DECODE_FAILED))
474 		return -EINVAL;
475 
476 	if (mmio != INSN_MMIO_WRITE_IMM && mmio != INSN_MMIO_MOVS) {
477 		reg = insn_get_modrm_reg_ptr(&insn, regs);
478 		if (!reg)
479 			return -EINVAL;
480 	}
481 
482 	if (!fault_in_kernel_space(ve->gla)) {
483 		WARN_ONCE(1, "Access to userspace address is not supported");
484 		return -EINVAL;
485 	}
486 
487 	/*
488 	 * Reject EPT violation #VEs that split pages.
489 	 *
490 	 * MMIO accesses are supposed to be naturally aligned and therefore
491 	 * never cross page boundaries. Seeing split page accesses indicates
492 	 * a bug or a load_unaligned_zeropad() that stepped into an MMIO page.
493 	 *
494 	 * load_unaligned_zeropad() will recover using exception fixups.
495 	 */
496 	vaddr = (unsigned long)insn_get_addr_ref(&insn, regs);
497 	if (vaddr / PAGE_SIZE != (vaddr + size - 1) / PAGE_SIZE)
498 		return -EFAULT;
499 
500 	/* Handle writes first */
501 	switch (mmio) {
502 	case INSN_MMIO_WRITE:
503 		memcpy(&val, reg, size);
504 		if (!mmio_write(size, ve->gpa, val))
505 			return -EIO;
506 		return insn.length;
507 	case INSN_MMIO_WRITE_IMM:
508 		val = insn.immediate.value;
509 		if (!mmio_write(size, ve->gpa, val))
510 			return -EIO;
511 		return insn.length;
512 	case INSN_MMIO_READ:
513 	case INSN_MMIO_READ_ZERO_EXTEND:
514 	case INSN_MMIO_READ_SIGN_EXTEND:
515 		/* Reads are handled below */
516 		break;
517 	case INSN_MMIO_MOVS:
518 	case INSN_MMIO_DECODE_FAILED:
519 		/*
520 		 * MMIO was accessed with an instruction that could not be
521 		 * decoded or handled properly. It was likely not using io.h
522 		 * helpers or accessed MMIO accidentally.
523 		 */
524 		return -EINVAL;
525 	default:
526 		WARN_ONCE(1, "Unknown insn_decode_mmio() decode value?");
527 		return -EINVAL;
528 	}
529 
530 	/* Handle reads */
531 	if (!mmio_read(size, ve->gpa, &val))
532 		return -EIO;
533 
534 	switch (mmio) {
535 	case INSN_MMIO_READ:
536 		/* Zero-extend for 32-bit operation */
537 		extend_size = size == 4 ? sizeof(*reg) : 0;
538 		break;
539 	case INSN_MMIO_READ_ZERO_EXTEND:
540 		/* Zero extend based on operand size */
541 		extend_size = insn.opnd_bytes;
542 		break;
543 	case INSN_MMIO_READ_SIGN_EXTEND:
544 		/* Sign extend based on operand size */
545 		extend_size = insn.opnd_bytes;
546 		if (size == 1 && val & BIT(7))
547 			extend_val = 0xFF;
548 		else if (size > 1 && val & BIT(15))
549 			extend_val = 0xFF;
550 		break;
551 	default:
552 		/* All other cases has to be covered with the first switch() */
553 		WARN_ON_ONCE(1);
554 		return -EINVAL;
555 	}
556 
557 	if (extend_size)
558 		memset(reg, extend_val, extend_size);
559 	memcpy(reg, &val, size);
560 	return insn.length;
561 }
562 
563 static bool handle_in(struct pt_regs *regs, int size, int port)
564 {
565 	struct tdx_hypercall_args args = {
566 		.r10 = TDX_HYPERCALL_STANDARD,
567 		.r11 = hcall_func(EXIT_REASON_IO_INSTRUCTION),
568 		.r12 = size,
569 		.r13 = PORT_READ,
570 		.r14 = port,
571 	};
572 	u64 mask = GENMASK(BITS_PER_BYTE * size, 0);
573 	bool success;
574 
575 	/*
576 	 * Emulate the I/O read via hypercall. More info about ABI can be found
577 	 * in TDX Guest-Host-Communication Interface (GHCI) section titled
578 	 * "TDG.VP.VMCALL<Instruction.IO>".
579 	 */
580 	success = !__tdx_hypercall_ret(&args);
581 
582 	/* Update part of the register affected by the emulated instruction */
583 	regs->ax &= ~mask;
584 	if (success)
585 		regs->ax |= args.r11 & mask;
586 
587 	return success;
588 }
589 
590 static bool handle_out(struct pt_regs *regs, int size, int port)
591 {
592 	u64 mask = GENMASK(BITS_PER_BYTE * size, 0);
593 
594 	/*
595 	 * Emulate the I/O write via hypercall. More info about ABI can be found
596 	 * in TDX Guest-Host-Communication Interface (GHCI) section titled
597 	 * "TDG.VP.VMCALL<Instruction.IO>".
598 	 */
599 	return !_tdx_hypercall(hcall_func(EXIT_REASON_IO_INSTRUCTION), size,
600 			       PORT_WRITE, port, regs->ax & mask);
601 }
602 
603 /*
604  * Emulate I/O using hypercall.
605  *
606  * Assumes the IO instruction was using ax, which is enforced
607  * by the standard io.h macros.
608  *
609  * Return True on success or False on failure.
610  */
611 static int handle_io(struct pt_regs *regs, struct ve_info *ve)
612 {
613 	u32 exit_qual = ve->exit_qual;
614 	int size, port;
615 	bool in, ret;
616 
617 	if (VE_IS_IO_STRING(exit_qual))
618 		return -EIO;
619 
620 	in   = VE_IS_IO_IN(exit_qual);
621 	size = VE_GET_IO_SIZE(exit_qual);
622 	port = VE_GET_PORT_NUM(exit_qual);
623 
624 
625 	if (in)
626 		ret = handle_in(regs, size, port);
627 	else
628 		ret = handle_out(regs, size, port);
629 	if (!ret)
630 		return -EIO;
631 
632 	return ve_instr_len(ve);
633 }
634 
635 /*
636  * Early #VE exception handler. Only handles a subset of port I/O.
637  * Intended only for earlyprintk. If failed, return false.
638  */
639 __init bool tdx_early_handle_ve(struct pt_regs *regs)
640 {
641 	struct ve_info ve;
642 	int insn_len;
643 
644 	tdx_get_ve_info(&ve);
645 
646 	if (ve.exit_reason != EXIT_REASON_IO_INSTRUCTION)
647 		return false;
648 
649 	insn_len = handle_io(regs, &ve);
650 	if (insn_len < 0)
651 		return false;
652 
653 	regs->ip += insn_len;
654 	return true;
655 }
656 
657 void tdx_get_ve_info(struct ve_info *ve)
658 {
659 	struct tdx_module_args args = {};
660 
661 	/*
662 	 * Called during #VE handling to retrieve the #VE info from the
663 	 * TDX module.
664 	 *
665 	 * This has to be called early in #VE handling.  A "nested" #VE which
666 	 * occurs before this will raise a #DF and is not recoverable.
667 	 *
668 	 * The call retrieves the #VE info from the TDX module, which also
669 	 * clears the "#VE valid" flag. This must be done before anything else
670 	 * because any #VE that occurs while the valid flag is set will lead to
671 	 * #DF.
672 	 *
673 	 * Note, the TDX module treats virtual NMIs as inhibited if the #VE
674 	 * valid flag is set. It means that NMI=>#VE will not result in a #DF.
675 	 */
676 	tdcall(TDG_VP_VEINFO_GET, &args);
677 
678 	/* Transfer the output parameters */
679 	ve->exit_reason = args.rcx;
680 	ve->exit_qual   = args.rdx;
681 	ve->gla         = args.r8;
682 	ve->gpa         = args.r9;
683 	ve->instr_len   = lower_32_bits(args.r10);
684 	ve->instr_info  = upper_32_bits(args.r10);
685 }
686 
687 /*
688  * Handle the user initiated #VE.
689  *
690  * On success, returns the number of bytes RIP should be incremented (>=0)
691  * or -errno on error.
692  */
693 static int virt_exception_user(struct pt_regs *regs, struct ve_info *ve)
694 {
695 	switch (ve->exit_reason) {
696 	case EXIT_REASON_CPUID:
697 		return handle_cpuid(regs, ve);
698 	default:
699 		pr_warn("Unexpected #VE: %lld\n", ve->exit_reason);
700 		return -EIO;
701 	}
702 }
703 
704 static inline bool is_private_gpa(u64 gpa)
705 {
706 	return gpa == cc_mkenc(gpa);
707 }
708 
709 /*
710  * Handle the kernel #VE.
711  *
712  * On success, returns the number of bytes RIP should be incremented (>=0)
713  * or -errno on error.
714  */
715 static int virt_exception_kernel(struct pt_regs *regs, struct ve_info *ve)
716 {
717 	switch (ve->exit_reason) {
718 	case EXIT_REASON_HLT:
719 		return handle_halt(ve);
720 	case EXIT_REASON_MSR_READ:
721 		return read_msr(regs, ve);
722 	case EXIT_REASON_MSR_WRITE:
723 		return write_msr(regs, ve);
724 	case EXIT_REASON_CPUID:
725 		return handle_cpuid(regs, ve);
726 	case EXIT_REASON_EPT_VIOLATION:
727 		if (is_private_gpa(ve->gpa))
728 			panic("Unexpected EPT-violation on private memory.");
729 		return handle_mmio(regs, ve);
730 	case EXIT_REASON_IO_INSTRUCTION:
731 		return handle_io(regs, ve);
732 	default:
733 		pr_warn("Unexpected #VE: %lld\n", ve->exit_reason);
734 		return -EIO;
735 	}
736 }
737 
738 bool tdx_handle_virt_exception(struct pt_regs *regs, struct ve_info *ve)
739 {
740 	int insn_len;
741 
742 	if (user_mode(regs))
743 		insn_len = virt_exception_user(regs, ve);
744 	else
745 		insn_len = virt_exception_kernel(regs, ve);
746 	if (insn_len < 0)
747 		return false;
748 
749 	/* After successful #VE handling, move the IP */
750 	regs->ip += insn_len;
751 
752 	return true;
753 }
754 
755 static bool tdx_tlb_flush_required(bool private)
756 {
757 	/*
758 	 * TDX guest is responsible for flushing TLB on private->shared
759 	 * transition. VMM is responsible for flushing on shared->private.
760 	 *
761 	 * The VMM _can't_ flush private addresses as it can't generate PAs
762 	 * with the guest's HKID.  Shared memory isn't subject to integrity
763 	 * checking, i.e. the VMM doesn't need to flush for its own protection.
764 	 *
765 	 * There's no need to flush when converting from shared to private,
766 	 * as flushing is the VMM's responsibility in this case, e.g. it must
767 	 * flush to avoid integrity failures in the face of a buggy or
768 	 * malicious guest.
769 	 */
770 	return !private;
771 }
772 
773 static bool tdx_cache_flush_required(void)
774 {
775 	/*
776 	 * AMD SME/SEV can avoid cache flushing if HW enforces cache coherence.
777 	 * TDX doesn't have such capability.
778 	 *
779 	 * Flush cache unconditionally.
780 	 */
781 	return true;
782 }
783 
784 /*
785  * Inform the VMM of the guest's intent for this physical page: shared with
786  * the VMM or private to the guest.  The VMM is expected to change its mapping
787  * of the page in response.
788  */
789 static bool tdx_enc_status_changed(unsigned long vaddr, int numpages, bool enc)
790 {
791 	phys_addr_t start = __pa(vaddr);
792 	phys_addr_t end   = __pa(vaddr + numpages * PAGE_SIZE);
793 
794 	if (!enc) {
795 		/* Set the shared (decrypted) bits: */
796 		start |= cc_mkdec(0);
797 		end   |= cc_mkdec(0);
798 	}
799 
800 	/*
801 	 * Notify the VMM about page mapping conversion. More info about ABI
802 	 * can be found in TDX Guest-Host-Communication Interface (GHCI),
803 	 * section "TDG.VP.VMCALL<MapGPA>"
804 	 */
805 	if (_tdx_hypercall(TDVMCALL_MAP_GPA, start, end - start, 0, 0))
806 		return false;
807 
808 	/* shared->private conversion requires memory to be accepted before use */
809 	if (enc)
810 		return tdx_accept_memory(start, end);
811 
812 	return true;
813 }
814 
815 static bool tdx_enc_status_change_prepare(unsigned long vaddr, int numpages,
816 					  bool enc)
817 {
818 	/*
819 	 * Only handle shared->private conversion here.
820 	 * See the comment in tdx_early_init().
821 	 */
822 	if (enc)
823 		return tdx_enc_status_changed(vaddr, numpages, enc);
824 	return true;
825 }
826 
827 static bool tdx_enc_status_change_finish(unsigned long vaddr, int numpages,
828 					 bool enc)
829 {
830 	/*
831 	 * Only handle private->shared conversion here.
832 	 * See the comment in tdx_early_init().
833 	 */
834 	if (!enc)
835 		return tdx_enc_status_changed(vaddr, numpages, enc);
836 	return true;
837 }
838 
839 void __init tdx_early_init(void)
840 {
841 	u64 cc_mask;
842 	u32 eax, sig[3];
843 
844 	cpuid_count(TDX_CPUID_LEAF_ID, 0, &eax, &sig[0], &sig[2],  &sig[1]);
845 
846 	if (memcmp(TDX_IDENT, sig, sizeof(sig)))
847 		return;
848 
849 	setup_force_cpu_cap(X86_FEATURE_TDX_GUEST);
850 
851 	cc_vendor = CC_VENDOR_INTEL;
852 
853 	/* Configure the TD */
854 	tdx_setup(&cc_mask);
855 
856 	cc_set_mask(cc_mask);
857 
858 	/*
859 	 * All bits above GPA width are reserved and kernel treats shared bit
860 	 * as flag, not as part of physical address.
861 	 *
862 	 * Adjust physical mask to only cover valid GPA bits.
863 	 */
864 	physical_mask &= cc_mask - 1;
865 
866 	/*
867 	 * The kernel mapping should match the TDX metadata for the page.
868 	 * load_unaligned_zeropad() can touch memory *adjacent* to that which is
869 	 * owned by the caller and can catch even _momentary_ mismatches.  Bad
870 	 * things happen on mismatch:
871 	 *
872 	 *   - Private mapping => Shared Page  == Guest shutdown
873          *   - Shared mapping  => Private Page == Recoverable #VE
874 	 *
875 	 * guest.enc_status_change_prepare() converts the page from
876 	 * shared=>private before the mapping becomes private.
877 	 *
878 	 * guest.enc_status_change_finish() converts the page from
879 	 * private=>shared after the mapping becomes private.
880 	 *
881 	 * In both cases there is a temporary shared mapping to a private page,
882 	 * which can result in a #VE.  But, there is never a private mapping to
883 	 * a shared page.
884 	 */
885 	x86_platform.guest.enc_status_change_prepare = tdx_enc_status_change_prepare;
886 	x86_platform.guest.enc_status_change_finish  = tdx_enc_status_change_finish;
887 
888 	x86_platform.guest.enc_cache_flush_required  = tdx_cache_flush_required;
889 	x86_platform.guest.enc_tlb_flush_required    = tdx_tlb_flush_required;
890 
891 	/*
892 	 * TDX intercepts the RDMSR to read the X2APIC ID in the parallel
893 	 * bringup low level code. That raises #VE which cannot be handled
894 	 * there.
895 	 *
896 	 * Intel-TDX has a secure RDMSR hypercall, but that needs to be
897 	 * implemented seperately in the low level startup ASM code.
898 	 * Until that is in place, disable parallel bringup for TDX.
899 	 */
900 	x86_cpuinit.parallel_bringup = false;
901 
902 	pr_info("Guest detected\n");
903 }
904