xref: /openbmc/linux/arch/x86/hyperv/ivm.c (revision d3a9d7e4)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Hyper-V Isolation VM interface with paravisor and hypervisor
4  *
5  * Author:
6  *  Tianyu Lan <Tianyu.Lan@microsoft.com>
7  */
8 
9 #include <linux/bitfield.h>
10 #include <linux/hyperv.h>
11 #include <linux/types.h>
12 #include <linux/slab.h>
13 #include <asm/svm.h>
14 #include <asm/sev.h>
15 #include <asm/io.h>
16 #include <asm/coco.h>
17 #include <asm/mem_encrypt.h>
18 #include <asm/mshyperv.h>
19 #include <asm/hypervisor.h>
20 #include <asm/mtrr.h>
21 #include <asm/coco.h>
22 #include <asm/io_apic.h>
23 #include <asm/sev.h>
24 #include <asm/realmode.h>
25 #include <asm/e820/api.h>
26 #include <asm/desc.h>
27 
28 #ifdef CONFIG_AMD_MEM_ENCRYPT
29 
30 #define GHCB_USAGE_HYPERV_CALL	1
31 
32 static u8 ap_start_input_arg[PAGE_SIZE] __bss_decrypted __aligned(PAGE_SIZE);
33 static u8 ap_start_stack[PAGE_SIZE] __aligned(PAGE_SIZE);
34 
35 union hv_ghcb {
36 	struct ghcb ghcb;
37 	struct {
38 		u64 hypercalldata[509];
39 		u64 outputgpa;
40 		union {
41 			union {
42 				struct {
43 					u32 callcode        : 16;
44 					u32 isfast          : 1;
45 					u32 reserved1       : 14;
46 					u32 isnested        : 1;
47 					u32 countofelements : 12;
48 					u32 reserved2       : 4;
49 					u32 repstartindex   : 12;
50 					u32 reserved3       : 4;
51 				};
52 				u64 asuint64;
53 			} hypercallinput;
54 			union {
55 				struct {
56 					u16 callstatus;
57 					u16 reserved1;
58 					u32 elementsprocessed : 12;
59 					u32 reserved2         : 20;
60 				};
61 				u64 asunit64;
62 			} hypercalloutput;
63 		};
64 		u64 reserved2;
65 	} hypercall;
66 } __packed __aligned(HV_HYP_PAGE_SIZE);
67 
68 static DEFINE_PER_CPU(struct sev_es_save_area *, hv_sev_vmsa);
69 
70 static u16 hv_ghcb_version __ro_after_init;
71 
72 u64 hv_ghcb_hypercall(u64 control, void *input, void *output, u32 input_size)
73 {
74 	union hv_ghcb *hv_ghcb;
75 	void **ghcb_base;
76 	unsigned long flags;
77 	u64 status;
78 
79 	if (!hv_ghcb_pg)
80 		return -EFAULT;
81 
82 	WARN_ON(in_nmi());
83 
84 	local_irq_save(flags);
85 	ghcb_base = (void **)this_cpu_ptr(hv_ghcb_pg);
86 	hv_ghcb = (union hv_ghcb *)*ghcb_base;
87 	if (!hv_ghcb) {
88 		local_irq_restore(flags);
89 		return -EFAULT;
90 	}
91 
92 	hv_ghcb->ghcb.protocol_version = GHCB_PROTOCOL_MAX;
93 	hv_ghcb->ghcb.ghcb_usage = GHCB_USAGE_HYPERV_CALL;
94 
95 	hv_ghcb->hypercall.outputgpa = (u64)output;
96 	hv_ghcb->hypercall.hypercallinput.asuint64 = 0;
97 	hv_ghcb->hypercall.hypercallinput.callcode = control;
98 
99 	if (input_size)
100 		memcpy(hv_ghcb->hypercall.hypercalldata, input, input_size);
101 
102 	VMGEXIT();
103 
104 	hv_ghcb->ghcb.ghcb_usage = 0xffffffff;
105 	memset(hv_ghcb->ghcb.save.valid_bitmap, 0,
106 	       sizeof(hv_ghcb->ghcb.save.valid_bitmap));
107 
108 	status = hv_ghcb->hypercall.hypercalloutput.callstatus;
109 
110 	local_irq_restore(flags);
111 
112 	return status;
113 }
114 
115 static inline u64 rd_ghcb_msr(void)
116 {
117 	return __rdmsr(MSR_AMD64_SEV_ES_GHCB);
118 }
119 
120 static inline void wr_ghcb_msr(u64 val)
121 {
122 	native_wrmsrl(MSR_AMD64_SEV_ES_GHCB, val);
123 }
124 
125 static enum es_result hv_ghcb_hv_call(struct ghcb *ghcb, u64 exit_code,
126 				   u64 exit_info_1, u64 exit_info_2)
127 {
128 	/* Fill in protocol and format specifiers */
129 	ghcb->protocol_version = hv_ghcb_version;
130 	ghcb->ghcb_usage       = GHCB_DEFAULT_USAGE;
131 
132 	ghcb_set_sw_exit_code(ghcb, exit_code);
133 	ghcb_set_sw_exit_info_1(ghcb, exit_info_1);
134 	ghcb_set_sw_exit_info_2(ghcb, exit_info_2);
135 
136 	VMGEXIT();
137 
138 	if (ghcb->save.sw_exit_info_1 & GENMASK_ULL(31, 0))
139 		return ES_VMM_ERROR;
140 	else
141 		return ES_OK;
142 }
143 
144 void __noreturn hv_ghcb_terminate(unsigned int set, unsigned int reason)
145 {
146 	u64 val = GHCB_MSR_TERM_REQ;
147 
148 	/* Tell the hypervisor what went wrong. */
149 	val |= GHCB_SEV_TERM_REASON(set, reason);
150 
151 	/* Request Guest Termination from Hypvervisor */
152 	wr_ghcb_msr(val);
153 	VMGEXIT();
154 
155 	while (true)
156 		asm volatile("hlt\n" : : : "memory");
157 }
158 
159 bool hv_ghcb_negotiate_protocol(void)
160 {
161 	u64 ghcb_gpa;
162 	u64 val;
163 
164 	/* Save ghcb page gpa. */
165 	ghcb_gpa = rd_ghcb_msr();
166 
167 	/* Do the GHCB protocol version negotiation */
168 	wr_ghcb_msr(GHCB_MSR_SEV_INFO_REQ);
169 	VMGEXIT();
170 	val = rd_ghcb_msr();
171 
172 	if (GHCB_MSR_INFO(val) != GHCB_MSR_SEV_INFO_RESP)
173 		return false;
174 
175 	if (GHCB_MSR_PROTO_MAX(val) < GHCB_PROTOCOL_MIN ||
176 	    GHCB_MSR_PROTO_MIN(val) > GHCB_PROTOCOL_MAX)
177 		return false;
178 
179 	hv_ghcb_version = min_t(size_t, GHCB_MSR_PROTO_MAX(val),
180 			     GHCB_PROTOCOL_MAX);
181 
182 	/* Write ghcb page back after negotiating protocol. */
183 	wr_ghcb_msr(ghcb_gpa);
184 	VMGEXIT();
185 
186 	return true;
187 }
188 
189 void hv_ghcb_msr_write(u64 msr, u64 value)
190 {
191 	union hv_ghcb *hv_ghcb;
192 	void **ghcb_base;
193 	unsigned long flags;
194 
195 	if (!hv_ghcb_pg)
196 		return;
197 
198 	WARN_ON(in_nmi());
199 
200 	local_irq_save(flags);
201 	ghcb_base = (void **)this_cpu_ptr(hv_ghcb_pg);
202 	hv_ghcb = (union hv_ghcb *)*ghcb_base;
203 	if (!hv_ghcb) {
204 		local_irq_restore(flags);
205 		return;
206 	}
207 
208 	ghcb_set_rcx(&hv_ghcb->ghcb, msr);
209 	ghcb_set_rax(&hv_ghcb->ghcb, lower_32_bits(value));
210 	ghcb_set_rdx(&hv_ghcb->ghcb, upper_32_bits(value));
211 
212 	if (hv_ghcb_hv_call(&hv_ghcb->ghcb, SVM_EXIT_MSR, 1, 0))
213 		pr_warn("Fail to write msr via ghcb %llx.\n", msr);
214 
215 	local_irq_restore(flags);
216 }
217 EXPORT_SYMBOL_GPL(hv_ghcb_msr_write);
218 
219 void hv_ghcb_msr_read(u64 msr, u64 *value)
220 {
221 	union hv_ghcb *hv_ghcb;
222 	void **ghcb_base;
223 	unsigned long flags;
224 
225 	/* Check size of union hv_ghcb here. */
226 	BUILD_BUG_ON(sizeof(union hv_ghcb) != HV_HYP_PAGE_SIZE);
227 
228 	if (!hv_ghcb_pg)
229 		return;
230 
231 	WARN_ON(in_nmi());
232 
233 	local_irq_save(flags);
234 	ghcb_base = (void **)this_cpu_ptr(hv_ghcb_pg);
235 	hv_ghcb = (union hv_ghcb *)*ghcb_base;
236 	if (!hv_ghcb) {
237 		local_irq_restore(flags);
238 		return;
239 	}
240 
241 	ghcb_set_rcx(&hv_ghcb->ghcb, msr);
242 	if (hv_ghcb_hv_call(&hv_ghcb->ghcb, SVM_EXIT_MSR, 0, 0))
243 		pr_warn("Fail to read msr via ghcb %llx.\n", msr);
244 	else
245 		*value = (u64)lower_32_bits(hv_ghcb->ghcb.save.rax)
246 			| ((u64)lower_32_bits(hv_ghcb->ghcb.save.rdx) << 32);
247 	local_irq_restore(flags);
248 }
249 EXPORT_SYMBOL_GPL(hv_ghcb_msr_read);
250 
251 #endif /* CONFIG_AMD_MEM_ENCRYPT */
252 
253 #if defined(CONFIG_AMD_MEM_ENCRYPT) || defined(CONFIG_INTEL_TDX_GUEST)
254 /*
255  * hv_mark_gpa_visibility - Set pages visible to host via hvcall.
256  *
257  * In Isolation VM, all guest memory is encrypted from host and guest
258  * needs to set memory visible to host via hvcall before sharing memory
259  * with host.
260  */
261 static int hv_mark_gpa_visibility(u16 count, const u64 pfn[],
262 			   enum hv_mem_host_visibility visibility)
263 {
264 	struct hv_gpa_range_for_visibility **input_pcpu, *input;
265 	u16 pages_processed;
266 	u64 hv_status;
267 	unsigned long flags;
268 
269 	/* no-op if partition isolation is not enabled */
270 	if (!hv_is_isolation_supported())
271 		return 0;
272 
273 	if (count > HV_MAX_MODIFY_GPA_REP_COUNT) {
274 		pr_err("Hyper-V: GPA count:%d exceeds supported:%lu\n", count,
275 			HV_MAX_MODIFY_GPA_REP_COUNT);
276 		return -EINVAL;
277 	}
278 
279 	local_irq_save(flags);
280 	input_pcpu = (struct hv_gpa_range_for_visibility **)
281 			this_cpu_ptr(hyperv_pcpu_input_arg);
282 	input = *input_pcpu;
283 	if (unlikely(!input)) {
284 		local_irq_restore(flags);
285 		return -EINVAL;
286 	}
287 
288 	input->partition_id = HV_PARTITION_ID_SELF;
289 	input->host_visibility = visibility;
290 	input->reserved0 = 0;
291 	input->reserved1 = 0;
292 	memcpy((void *)input->gpa_page_list, pfn, count * sizeof(*pfn));
293 	hv_status = hv_do_rep_hypercall(
294 			HVCALL_MODIFY_SPARSE_GPA_PAGE_HOST_VISIBILITY, count,
295 			0, input, &pages_processed);
296 	local_irq_restore(flags);
297 
298 	if (hv_result_success(hv_status))
299 		return 0;
300 	else
301 		return -EFAULT;
302 }
303 
304 /*
305  * hv_vtom_set_host_visibility - Set specified memory visible to host.
306  *
307  * In Isolation VM, all guest memory is encrypted from host and guest
308  * needs to set memory visible to host via hvcall before sharing memory
309  * with host. This function works as wrap of hv_mark_gpa_visibility()
310  * with memory base and size.
311  */
312 static bool hv_vtom_set_host_visibility(unsigned long kbuffer, int pagecount, bool enc)
313 {
314 	enum hv_mem_host_visibility visibility = enc ?
315 			VMBUS_PAGE_NOT_VISIBLE : VMBUS_PAGE_VISIBLE_READ_WRITE;
316 	u64 *pfn_array;
317 	int ret = 0;
318 	bool result = true;
319 	int i, pfn;
320 
321 	pfn_array = kmalloc(HV_HYP_PAGE_SIZE, GFP_KERNEL);
322 	if (!pfn_array)
323 		return false;
324 
325 	for (i = 0, pfn = 0; i < pagecount; i++) {
326 		pfn_array[pfn] = virt_to_hvpfn((void *)kbuffer + i * HV_HYP_PAGE_SIZE);
327 		pfn++;
328 
329 		if (pfn == HV_MAX_MODIFY_GPA_REP_COUNT || i == pagecount - 1) {
330 			ret = hv_mark_gpa_visibility(pfn, pfn_array,
331 						     visibility);
332 			if (ret) {
333 				result = false;
334 				goto err_free_pfn_array;
335 			}
336 			pfn = 0;
337 		}
338 	}
339 
340  err_free_pfn_array:
341 	kfree(pfn_array);
342 	return result;
343 }
344 
345 static bool hv_vtom_tlb_flush_required(bool private)
346 {
347 	return true;
348 }
349 
350 static bool hv_vtom_cache_flush_required(void)
351 {
352 	return false;
353 }
354 
355 static bool hv_is_private_mmio(u64 addr)
356 {
357 	/*
358 	 * Hyper-V always provides a single IO-APIC in a guest VM.
359 	 * When a paravisor is used, it is emulated by the paravisor
360 	 * in the guest context and must be mapped private.
361 	 */
362 	if (addr >= HV_IOAPIC_BASE_ADDRESS &&
363 	    addr < (HV_IOAPIC_BASE_ADDRESS + PAGE_SIZE))
364 		return true;
365 
366 	/* Same with a vTPM */
367 	if (addr >= VTPM_BASE_ADDRESS &&
368 	    addr < (VTPM_BASE_ADDRESS + PAGE_SIZE))
369 		return true;
370 
371 	return false;
372 }
373 
374 #endif /* defined(CONFIG_AMD_MEM_ENCRYPT) || defined(CONFIG_INTEL_TDX_GUEST) */
375 
376 #ifdef CONFIG_AMD_MEM_ENCRYPT
377 
378 #define hv_populate_vmcb_seg(seg, gdtr_base)			\
379 do {								\
380 	if (seg.selector) {					\
381 		seg.base = 0;					\
382 		seg.limit = HV_AP_SEGMENT_LIMIT;		\
383 		seg.attrib = *(u16 *)(gdtr_base + seg.selector + 5);	\
384 		seg.attrib = (seg.attrib & 0xFF) | ((seg.attrib >> 4) & 0xF00); \
385 	}							\
386 } while (0)							\
387 
388 static int snp_set_vmsa(void *va, bool vmsa)
389 {
390 	u64 attrs;
391 
392 	/*
393 	 * Running at VMPL0 allows the kernel to change the VMSA bit for a page
394 	 * using the RMPADJUST instruction. However, for the instruction to
395 	 * succeed it must target the permissions of a lesser privileged
396 	 * (higher numbered) VMPL level, so use VMPL1 (refer to the RMPADJUST
397 	 * instruction in the AMD64 APM Volume 3).
398 	 */
399 	attrs = 1;
400 	if (vmsa)
401 		attrs |= RMPADJUST_VMSA_PAGE_BIT;
402 
403 	return rmpadjust((unsigned long)va, RMP_PG_SIZE_4K, attrs);
404 }
405 
406 static void snp_cleanup_vmsa(struct sev_es_save_area *vmsa)
407 {
408 	int err;
409 
410 	err = snp_set_vmsa(vmsa, false);
411 	if (err)
412 		pr_err("clear VMSA page failed (%u), leaking page\n", err);
413 	else
414 		free_page((unsigned long)vmsa);
415 }
416 
417 int hv_snp_boot_ap(int cpu, unsigned long start_ip)
418 {
419 	struct sev_es_save_area *vmsa = (struct sev_es_save_area *)
420 		__get_free_page(GFP_KERNEL | __GFP_ZERO);
421 	struct sev_es_save_area *cur_vmsa;
422 	struct desc_ptr gdtr;
423 	u64 ret, retry = 5;
424 	struct hv_enable_vp_vtl *start_vp_input;
425 	unsigned long flags;
426 
427 	if (!vmsa)
428 		return -ENOMEM;
429 
430 	native_store_gdt(&gdtr);
431 
432 	vmsa->gdtr.base = gdtr.address;
433 	vmsa->gdtr.limit = gdtr.size;
434 
435 	asm volatile("movl %%es, %%eax;" : "=a" (vmsa->es.selector));
436 	hv_populate_vmcb_seg(vmsa->es, vmsa->gdtr.base);
437 
438 	asm volatile("movl %%cs, %%eax;" : "=a" (vmsa->cs.selector));
439 	hv_populate_vmcb_seg(vmsa->cs, vmsa->gdtr.base);
440 
441 	asm volatile("movl %%ss, %%eax;" : "=a" (vmsa->ss.selector));
442 	hv_populate_vmcb_seg(vmsa->ss, vmsa->gdtr.base);
443 
444 	asm volatile("movl %%ds, %%eax;" : "=a" (vmsa->ds.selector));
445 	hv_populate_vmcb_seg(vmsa->ds, vmsa->gdtr.base);
446 
447 	vmsa->efer = native_read_msr(MSR_EFER);
448 
449 	asm volatile("movq %%cr4, %%rax;" : "=a" (vmsa->cr4));
450 	asm volatile("movq %%cr3, %%rax;" : "=a" (vmsa->cr3));
451 	asm volatile("movq %%cr0, %%rax;" : "=a" (vmsa->cr0));
452 
453 	vmsa->xcr0 = 1;
454 	vmsa->g_pat = HV_AP_INIT_GPAT_DEFAULT;
455 	vmsa->rip = (u64)secondary_startup_64_no_verify;
456 	vmsa->rsp = (u64)&ap_start_stack[PAGE_SIZE];
457 
458 	/*
459 	 * Set the SNP-specific fields for this VMSA:
460 	 *   VMPL level
461 	 *   SEV_FEATURES (matches the SEV STATUS MSR right shifted 2 bits)
462 	 */
463 	vmsa->vmpl = 0;
464 	vmsa->sev_features = sev_status >> 2;
465 
466 	ret = snp_set_vmsa(vmsa, true);
467 	if (!ret) {
468 		pr_err("RMPADJUST(%llx) failed: %llx\n", (u64)vmsa, ret);
469 		free_page((u64)vmsa);
470 		return ret;
471 	}
472 
473 	local_irq_save(flags);
474 	start_vp_input = (struct hv_enable_vp_vtl *)ap_start_input_arg;
475 	memset(start_vp_input, 0, sizeof(*start_vp_input));
476 	start_vp_input->partition_id = -1;
477 	start_vp_input->vp_index = cpu;
478 	start_vp_input->target_vtl.target_vtl = ms_hyperv.vtl;
479 	*(u64 *)&start_vp_input->vp_context = __pa(vmsa) | 1;
480 
481 	do {
482 		ret = hv_do_hypercall(HVCALL_START_VP,
483 				      start_vp_input, NULL);
484 	} while (hv_result(ret) == HV_STATUS_TIME_OUT && retry--);
485 
486 	local_irq_restore(flags);
487 
488 	if (!hv_result_success(ret)) {
489 		pr_err("HvCallStartVirtualProcessor failed: %llx\n", ret);
490 		snp_cleanup_vmsa(vmsa);
491 		vmsa = NULL;
492 	}
493 
494 	cur_vmsa = per_cpu(hv_sev_vmsa, cpu);
495 	/* Free up any previous VMSA page */
496 	if (cur_vmsa)
497 		snp_cleanup_vmsa(cur_vmsa);
498 
499 	/* Record the current VMSA page */
500 	per_cpu(hv_sev_vmsa, cpu) = vmsa;
501 
502 	return ret;
503 }
504 
505 #endif /* CONFIG_AMD_MEM_ENCRYPT */
506 
507 #if defined(CONFIG_AMD_MEM_ENCRYPT) || defined(CONFIG_INTEL_TDX_GUEST)
508 
509 void __init hv_vtom_init(void)
510 {
511 	enum hv_isolation_type type = hv_get_isolation_type();
512 
513 	switch (type) {
514 	case HV_ISOLATION_TYPE_VBS:
515 		fallthrough;
516 	/*
517 	 * By design, a VM using vTOM doesn't see the SEV setting,
518 	 * so SEV initialization is bypassed and sev_status isn't set.
519 	 * Set it here to indicate a vTOM VM.
520 	 *
521 	 * Note: if CONFIG_AMD_MEM_ENCRYPT is not set, sev_status is
522 	 * defined as 0ULL, to which we can't assigned a value.
523 	 */
524 #ifdef CONFIG_AMD_MEM_ENCRYPT
525 	case HV_ISOLATION_TYPE_SNP:
526 		sev_status = MSR_AMD64_SNP_VTOM;
527 		cc_vendor = CC_VENDOR_AMD;
528 		break;
529 #endif
530 
531 	case HV_ISOLATION_TYPE_TDX:
532 		cc_vendor = CC_VENDOR_INTEL;
533 		break;
534 
535 	default:
536 		panic("hv_vtom_init: unsupported isolation type %d\n", type);
537 	}
538 
539 	cc_set_mask(ms_hyperv.shared_gpa_boundary);
540 	physical_mask &= ms_hyperv.shared_gpa_boundary - 1;
541 
542 	x86_platform.hyper.is_private_mmio = hv_is_private_mmio;
543 	x86_platform.guest.enc_cache_flush_required = hv_vtom_cache_flush_required;
544 	x86_platform.guest.enc_tlb_flush_required = hv_vtom_tlb_flush_required;
545 	x86_platform.guest.enc_status_change_finish = hv_vtom_set_host_visibility;
546 
547 	/* Set WB as the default cache mode. */
548 	mtrr_overwrite_state(NULL, 0, MTRR_TYPE_WRBACK);
549 }
550 
551 #endif /* defined(CONFIG_AMD_MEM_ENCRYPT) || defined(CONFIG_INTEL_TDX_GUEST) */
552 
553 enum hv_isolation_type hv_get_isolation_type(void)
554 {
555 	if (!(ms_hyperv.priv_high & HV_ISOLATION))
556 		return HV_ISOLATION_TYPE_NONE;
557 	return FIELD_GET(HV_ISOLATION_TYPE, ms_hyperv.isolation_config_b);
558 }
559 EXPORT_SYMBOL_GPL(hv_get_isolation_type);
560 
561 /*
562  * hv_is_isolation_supported - Check system runs in the Hyper-V
563  * isolation VM.
564  */
565 bool hv_is_isolation_supported(void)
566 {
567 	if (!cpu_feature_enabled(X86_FEATURE_HYPERVISOR))
568 		return false;
569 
570 	if (!hypervisor_is_type(X86_HYPER_MS_HYPERV))
571 		return false;
572 
573 	return hv_get_isolation_type() != HV_ISOLATION_TYPE_NONE;
574 }
575 
576 DEFINE_STATIC_KEY_FALSE(isolation_type_snp);
577 
578 /*
579  * hv_isolation_type_snp - Check system runs in the AMD SEV-SNP based
580  * isolation VM.
581  */
582 bool hv_isolation_type_snp(void)
583 {
584 	return static_branch_unlikely(&isolation_type_snp);
585 }
586 
587 DEFINE_STATIC_KEY_FALSE(isolation_type_en_snp);
588 /*
589  * hv_isolation_type_en_snp - Check system runs in the AMD SEV-SNP based
590  * isolation enlightened VM.
591  */
592 bool hv_isolation_type_en_snp(void)
593 {
594 	return static_branch_unlikely(&isolation_type_en_snp);
595 }
596 
597 DEFINE_STATIC_KEY_FALSE(isolation_type_tdx);
598 /*
599  * hv_isolation_type_tdx - Check if the system runs in an Intel TDX based
600  * isolated VM.
601  */
602 bool hv_isolation_type_tdx(void)
603 {
604 	return static_branch_unlikely(&isolation_type_tdx);
605 }
606 
607 #ifdef CONFIG_INTEL_TDX_GUEST
608 
609 u64 hv_tdx_hypercall(u64 control, u64 param1, u64 param2)
610 {
611 	struct tdx_hypercall_args args = { };
612 
613 	args.r10 = control;
614 	args.rdx = param1;
615 	args.r8  = param2;
616 
617 	(void)__tdx_hypercall_ret(&args);
618 
619 	return args.r11;
620 }
621 
622 #endif
623