xref: /openbmc/linux/arch/x86/hyperv/ivm.c (revision b9b4fe3a)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Hyper-V Isolation VM interface with paravisor and hypervisor
4  *
5  * Author:
6  *  Tianyu Lan <Tianyu.Lan@microsoft.com>
7  */
8 
9 #include <linux/bitfield.h>
10 #include <linux/hyperv.h>
11 #include <linux/types.h>
12 #include <linux/slab.h>
13 #include <asm/svm.h>
14 #include <asm/sev.h>
15 #include <asm/io.h>
16 #include <asm/coco.h>
17 #include <asm/mem_encrypt.h>
18 #include <asm/mshyperv.h>
19 #include <asm/hypervisor.h>
20 #include <asm/mtrr.h>
21 #include <asm/coco.h>
22 #include <asm/io_apic.h>
23 #include <asm/sev.h>
24 #include <asm/realmode.h>
25 #include <asm/e820/api.h>
26 #include <asm/desc.h>
27 #include <uapi/asm/vmx.h>
28 
29 #ifdef CONFIG_AMD_MEM_ENCRYPT
30 
31 #define GHCB_USAGE_HYPERV_CALL	1
32 
33 static u8 ap_start_input_arg[PAGE_SIZE] __bss_decrypted __aligned(PAGE_SIZE);
34 static u8 ap_start_stack[PAGE_SIZE] __aligned(PAGE_SIZE);
35 
36 union hv_ghcb {
37 	struct ghcb ghcb;
38 	struct {
39 		u64 hypercalldata[509];
40 		u64 outputgpa;
41 		union {
42 			union {
43 				struct {
44 					u32 callcode        : 16;
45 					u32 isfast          : 1;
46 					u32 reserved1       : 14;
47 					u32 isnested        : 1;
48 					u32 countofelements : 12;
49 					u32 reserved2       : 4;
50 					u32 repstartindex   : 12;
51 					u32 reserved3       : 4;
52 				};
53 				u64 asuint64;
54 			} hypercallinput;
55 			union {
56 				struct {
57 					u16 callstatus;
58 					u16 reserved1;
59 					u32 elementsprocessed : 12;
60 					u32 reserved2         : 20;
61 				};
62 				u64 asunit64;
63 			} hypercalloutput;
64 		};
65 		u64 reserved2;
66 	} hypercall;
67 } __packed __aligned(HV_HYP_PAGE_SIZE);
68 
69 static DEFINE_PER_CPU(struct sev_es_save_area *, hv_sev_vmsa);
70 
71 static u16 hv_ghcb_version __ro_after_init;
72 
73 u64 hv_ghcb_hypercall(u64 control, void *input, void *output, u32 input_size)
74 {
75 	union hv_ghcb *hv_ghcb;
76 	void **ghcb_base;
77 	unsigned long flags;
78 	u64 status;
79 
80 	if (!hv_ghcb_pg)
81 		return -EFAULT;
82 
83 	WARN_ON(in_nmi());
84 
85 	local_irq_save(flags);
86 	ghcb_base = (void **)this_cpu_ptr(hv_ghcb_pg);
87 	hv_ghcb = (union hv_ghcb *)*ghcb_base;
88 	if (!hv_ghcb) {
89 		local_irq_restore(flags);
90 		return -EFAULT;
91 	}
92 
93 	hv_ghcb->ghcb.protocol_version = GHCB_PROTOCOL_MAX;
94 	hv_ghcb->ghcb.ghcb_usage = GHCB_USAGE_HYPERV_CALL;
95 
96 	hv_ghcb->hypercall.outputgpa = (u64)output;
97 	hv_ghcb->hypercall.hypercallinput.asuint64 = 0;
98 	hv_ghcb->hypercall.hypercallinput.callcode = control;
99 
100 	if (input_size)
101 		memcpy(hv_ghcb->hypercall.hypercalldata, input, input_size);
102 
103 	VMGEXIT();
104 
105 	hv_ghcb->ghcb.ghcb_usage = 0xffffffff;
106 	memset(hv_ghcb->ghcb.save.valid_bitmap, 0,
107 	       sizeof(hv_ghcb->ghcb.save.valid_bitmap));
108 
109 	status = hv_ghcb->hypercall.hypercalloutput.callstatus;
110 
111 	local_irq_restore(flags);
112 
113 	return status;
114 }
115 
116 static inline u64 rd_ghcb_msr(void)
117 {
118 	return __rdmsr(MSR_AMD64_SEV_ES_GHCB);
119 }
120 
121 static inline void wr_ghcb_msr(u64 val)
122 {
123 	native_wrmsrl(MSR_AMD64_SEV_ES_GHCB, val);
124 }
125 
126 static enum es_result hv_ghcb_hv_call(struct ghcb *ghcb, u64 exit_code,
127 				   u64 exit_info_1, u64 exit_info_2)
128 {
129 	/* Fill in protocol and format specifiers */
130 	ghcb->protocol_version = hv_ghcb_version;
131 	ghcb->ghcb_usage       = GHCB_DEFAULT_USAGE;
132 
133 	ghcb_set_sw_exit_code(ghcb, exit_code);
134 	ghcb_set_sw_exit_info_1(ghcb, exit_info_1);
135 	ghcb_set_sw_exit_info_2(ghcb, exit_info_2);
136 
137 	VMGEXIT();
138 
139 	if (ghcb->save.sw_exit_info_1 & GENMASK_ULL(31, 0))
140 		return ES_VMM_ERROR;
141 	else
142 		return ES_OK;
143 }
144 
145 void __noreturn hv_ghcb_terminate(unsigned int set, unsigned int reason)
146 {
147 	u64 val = GHCB_MSR_TERM_REQ;
148 
149 	/* Tell the hypervisor what went wrong. */
150 	val |= GHCB_SEV_TERM_REASON(set, reason);
151 
152 	/* Request Guest Termination from Hypvervisor */
153 	wr_ghcb_msr(val);
154 	VMGEXIT();
155 
156 	while (true)
157 		asm volatile("hlt\n" : : : "memory");
158 }
159 
160 bool hv_ghcb_negotiate_protocol(void)
161 {
162 	u64 ghcb_gpa;
163 	u64 val;
164 
165 	/* Save ghcb page gpa. */
166 	ghcb_gpa = rd_ghcb_msr();
167 
168 	/* Do the GHCB protocol version negotiation */
169 	wr_ghcb_msr(GHCB_MSR_SEV_INFO_REQ);
170 	VMGEXIT();
171 	val = rd_ghcb_msr();
172 
173 	if (GHCB_MSR_INFO(val) != GHCB_MSR_SEV_INFO_RESP)
174 		return false;
175 
176 	if (GHCB_MSR_PROTO_MAX(val) < GHCB_PROTOCOL_MIN ||
177 	    GHCB_MSR_PROTO_MIN(val) > GHCB_PROTOCOL_MAX)
178 		return false;
179 
180 	hv_ghcb_version = min_t(size_t, GHCB_MSR_PROTO_MAX(val),
181 			     GHCB_PROTOCOL_MAX);
182 
183 	/* Write ghcb page back after negotiating protocol. */
184 	wr_ghcb_msr(ghcb_gpa);
185 	VMGEXIT();
186 
187 	return true;
188 }
189 
190 static void hv_ghcb_msr_write(u64 msr, u64 value)
191 {
192 	union hv_ghcb *hv_ghcb;
193 	void **ghcb_base;
194 	unsigned long flags;
195 
196 	if (!hv_ghcb_pg)
197 		return;
198 
199 	WARN_ON(in_nmi());
200 
201 	local_irq_save(flags);
202 	ghcb_base = (void **)this_cpu_ptr(hv_ghcb_pg);
203 	hv_ghcb = (union hv_ghcb *)*ghcb_base;
204 	if (!hv_ghcb) {
205 		local_irq_restore(flags);
206 		return;
207 	}
208 
209 	ghcb_set_rcx(&hv_ghcb->ghcb, msr);
210 	ghcb_set_rax(&hv_ghcb->ghcb, lower_32_bits(value));
211 	ghcb_set_rdx(&hv_ghcb->ghcb, upper_32_bits(value));
212 
213 	if (hv_ghcb_hv_call(&hv_ghcb->ghcb, SVM_EXIT_MSR, 1, 0))
214 		pr_warn("Fail to write msr via ghcb %llx.\n", msr);
215 
216 	local_irq_restore(flags);
217 }
218 
219 static void hv_ghcb_msr_read(u64 msr, u64 *value)
220 {
221 	union hv_ghcb *hv_ghcb;
222 	void **ghcb_base;
223 	unsigned long flags;
224 
225 	/* Check size of union hv_ghcb here. */
226 	BUILD_BUG_ON(sizeof(union hv_ghcb) != HV_HYP_PAGE_SIZE);
227 
228 	if (!hv_ghcb_pg)
229 		return;
230 
231 	WARN_ON(in_nmi());
232 
233 	local_irq_save(flags);
234 	ghcb_base = (void **)this_cpu_ptr(hv_ghcb_pg);
235 	hv_ghcb = (union hv_ghcb *)*ghcb_base;
236 	if (!hv_ghcb) {
237 		local_irq_restore(flags);
238 		return;
239 	}
240 
241 	ghcb_set_rcx(&hv_ghcb->ghcb, msr);
242 	if (hv_ghcb_hv_call(&hv_ghcb->ghcb, SVM_EXIT_MSR, 0, 0))
243 		pr_warn("Fail to read msr via ghcb %llx.\n", msr);
244 	else
245 		*value = (u64)lower_32_bits(hv_ghcb->ghcb.save.rax)
246 			| ((u64)lower_32_bits(hv_ghcb->ghcb.save.rdx) << 32);
247 	local_irq_restore(flags);
248 }
249 
250 #else
251 static inline void hv_ghcb_msr_write(u64 msr, u64 value) {}
252 static inline void hv_ghcb_msr_read(u64 msr, u64 *value) {}
253 #endif /* CONFIG_AMD_MEM_ENCRYPT */
254 
255 #ifdef CONFIG_INTEL_TDX_GUEST
256 static void hv_tdx_msr_write(u64 msr, u64 val)
257 {
258 	struct tdx_hypercall_args args = {
259 		.r10 = TDX_HYPERCALL_STANDARD,
260 		.r11 = EXIT_REASON_MSR_WRITE,
261 		.r12 = msr,
262 		.r13 = val,
263 	};
264 
265 	u64 ret = __tdx_hypercall(&args);
266 
267 	WARN_ONCE(ret, "Failed to emulate MSR write: %lld\n", ret);
268 }
269 
270 static void hv_tdx_msr_read(u64 msr, u64 *val)
271 {
272 	struct tdx_hypercall_args args = {
273 		.r10 = TDX_HYPERCALL_STANDARD,
274 		.r11 = EXIT_REASON_MSR_READ,
275 		.r12 = msr,
276 	};
277 
278 	u64 ret = __tdx_hypercall_ret(&args);
279 
280 	if (WARN_ONCE(ret, "Failed to emulate MSR read: %lld\n", ret))
281 		*val = 0;
282 	else
283 		*val = args.r11;
284 }
285 #else
286 static inline void hv_tdx_msr_write(u64 msr, u64 value) {}
287 static inline void hv_tdx_msr_read(u64 msr, u64 *value) {}
288 #endif /* CONFIG_INTEL_TDX_GUEST */
289 
290 #if defined(CONFIG_AMD_MEM_ENCRYPT) || defined(CONFIG_INTEL_TDX_GUEST)
291 void hv_ivm_msr_write(u64 msr, u64 value)
292 {
293 	if (!ms_hyperv.paravisor_present)
294 		return;
295 
296 	if (hv_isolation_type_tdx())
297 		hv_tdx_msr_write(msr, value);
298 	else if (hv_isolation_type_snp())
299 		hv_ghcb_msr_write(msr, value);
300 }
301 
302 void hv_ivm_msr_read(u64 msr, u64 *value)
303 {
304 	if (!ms_hyperv.paravisor_present)
305 		return;
306 
307 	if (hv_isolation_type_tdx())
308 		hv_tdx_msr_read(msr, value);
309 	else if (hv_isolation_type_snp())
310 		hv_ghcb_msr_read(msr, value);
311 }
312 #endif
313 
314 #if defined(CONFIG_AMD_MEM_ENCRYPT) || defined(CONFIG_INTEL_TDX_GUEST)
315 /*
316  * hv_mark_gpa_visibility - Set pages visible to host via hvcall.
317  *
318  * In Isolation VM, all guest memory is encrypted from host and guest
319  * needs to set memory visible to host via hvcall before sharing memory
320  * with host.
321  */
322 static int hv_mark_gpa_visibility(u16 count, const u64 pfn[],
323 			   enum hv_mem_host_visibility visibility)
324 {
325 	struct hv_gpa_range_for_visibility **input_pcpu, *input;
326 	u16 pages_processed;
327 	u64 hv_status;
328 	unsigned long flags;
329 
330 	/* no-op if partition isolation is not enabled */
331 	if (!hv_is_isolation_supported())
332 		return 0;
333 
334 	if (count > HV_MAX_MODIFY_GPA_REP_COUNT) {
335 		pr_err("Hyper-V: GPA count:%d exceeds supported:%lu\n", count,
336 			HV_MAX_MODIFY_GPA_REP_COUNT);
337 		return -EINVAL;
338 	}
339 
340 	local_irq_save(flags);
341 	input_pcpu = (struct hv_gpa_range_for_visibility **)
342 			this_cpu_ptr(hyperv_pcpu_input_arg);
343 	input = *input_pcpu;
344 	if (unlikely(!input)) {
345 		local_irq_restore(flags);
346 		return -EINVAL;
347 	}
348 
349 	input->partition_id = HV_PARTITION_ID_SELF;
350 	input->host_visibility = visibility;
351 	input->reserved0 = 0;
352 	input->reserved1 = 0;
353 	memcpy((void *)input->gpa_page_list, pfn, count * sizeof(*pfn));
354 	hv_status = hv_do_rep_hypercall(
355 			HVCALL_MODIFY_SPARSE_GPA_PAGE_HOST_VISIBILITY, count,
356 			0, input, &pages_processed);
357 	local_irq_restore(flags);
358 
359 	if (hv_result_success(hv_status))
360 		return 0;
361 	else
362 		return -EFAULT;
363 }
364 
365 /*
366  * hv_vtom_set_host_visibility - Set specified memory visible to host.
367  *
368  * In Isolation VM, all guest memory is encrypted from host and guest
369  * needs to set memory visible to host via hvcall before sharing memory
370  * with host. This function works as wrap of hv_mark_gpa_visibility()
371  * with memory base and size.
372  */
373 static bool hv_vtom_set_host_visibility(unsigned long kbuffer, int pagecount, bool enc)
374 {
375 	enum hv_mem_host_visibility visibility = enc ?
376 			VMBUS_PAGE_NOT_VISIBLE : VMBUS_PAGE_VISIBLE_READ_WRITE;
377 	u64 *pfn_array;
378 	int ret = 0;
379 	bool result = true;
380 	int i, pfn;
381 
382 	pfn_array = kmalloc(HV_HYP_PAGE_SIZE, GFP_KERNEL);
383 	if (!pfn_array)
384 		return false;
385 
386 	for (i = 0, pfn = 0; i < pagecount; i++) {
387 		pfn_array[pfn] = virt_to_hvpfn((void *)kbuffer + i * HV_HYP_PAGE_SIZE);
388 		pfn++;
389 
390 		if (pfn == HV_MAX_MODIFY_GPA_REP_COUNT || i == pagecount - 1) {
391 			ret = hv_mark_gpa_visibility(pfn, pfn_array,
392 						     visibility);
393 			if (ret) {
394 				result = false;
395 				goto err_free_pfn_array;
396 			}
397 			pfn = 0;
398 		}
399 	}
400 
401  err_free_pfn_array:
402 	kfree(pfn_array);
403 	return result;
404 }
405 
406 static bool hv_vtom_tlb_flush_required(bool private)
407 {
408 	return true;
409 }
410 
411 static bool hv_vtom_cache_flush_required(void)
412 {
413 	return false;
414 }
415 
416 static bool hv_is_private_mmio(u64 addr)
417 {
418 	/*
419 	 * Hyper-V always provides a single IO-APIC in a guest VM.
420 	 * When a paravisor is used, it is emulated by the paravisor
421 	 * in the guest context and must be mapped private.
422 	 */
423 	if (addr >= HV_IOAPIC_BASE_ADDRESS &&
424 	    addr < (HV_IOAPIC_BASE_ADDRESS + PAGE_SIZE))
425 		return true;
426 
427 	/* Same with a vTPM */
428 	if (addr >= VTPM_BASE_ADDRESS &&
429 	    addr < (VTPM_BASE_ADDRESS + PAGE_SIZE))
430 		return true;
431 
432 	return false;
433 }
434 
435 #endif /* defined(CONFIG_AMD_MEM_ENCRYPT) || defined(CONFIG_INTEL_TDX_GUEST) */
436 
437 #ifdef CONFIG_AMD_MEM_ENCRYPT
438 
439 #define hv_populate_vmcb_seg(seg, gdtr_base)			\
440 do {								\
441 	if (seg.selector) {					\
442 		seg.base = 0;					\
443 		seg.limit = HV_AP_SEGMENT_LIMIT;		\
444 		seg.attrib = *(u16 *)(gdtr_base + seg.selector + 5);	\
445 		seg.attrib = (seg.attrib & 0xFF) | ((seg.attrib >> 4) & 0xF00); \
446 	}							\
447 } while (0)							\
448 
449 static int snp_set_vmsa(void *va, bool vmsa)
450 {
451 	u64 attrs;
452 
453 	/*
454 	 * Running at VMPL0 allows the kernel to change the VMSA bit for a page
455 	 * using the RMPADJUST instruction. However, for the instruction to
456 	 * succeed it must target the permissions of a lesser privileged
457 	 * (higher numbered) VMPL level, so use VMPL1 (refer to the RMPADJUST
458 	 * instruction in the AMD64 APM Volume 3).
459 	 */
460 	attrs = 1;
461 	if (vmsa)
462 		attrs |= RMPADJUST_VMSA_PAGE_BIT;
463 
464 	return rmpadjust((unsigned long)va, RMP_PG_SIZE_4K, attrs);
465 }
466 
467 static void snp_cleanup_vmsa(struct sev_es_save_area *vmsa)
468 {
469 	int err;
470 
471 	err = snp_set_vmsa(vmsa, false);
472 	if (err)
473 		pr_err("clear VMSA page failed (%u), leaking page\n", err);
474 	else
475 		free_page((unsigned long)vmsa);
476 }
477 
478 int hv_snp_boot_ap(int cpu, unsigned long start_ip)
479 {
480 	struct sev_es_save_area *vmsa = (struct sev_es_save_area *)
481 		__get_free_page(GFP_KERNEL | __GFP_ZERO);
482 	struct sev_es_save_area *cur_vmsa;
483 	struct desc_ptr gdtr;
484 	u64 ret, retry = 5;
485 	struct hv_enable_vp_vtl *start_vp_input;
486 	unsigned long flags;
487 
488 	if (!vmsa)
489 		return -ENOMEM;
490 
491 	native_store_gdt(&gdtr);
492 
493 	vmsa->gdtr.base = gdtr.address;
494 	vmsa->gdtr.limit = gdtr.size;
495 
496 	asm volatile("movl %%es, %%eax;" : "=a" (vmsa->es.selector));
497 	hv_populate_vmcb_seg(vmsa->es, vmsa->gdtr.base);
498 
499 	asm volatile("movl %%cs, %%eax;" : "=a" (vmsa->cs.selector));
500 	hv_populate_vmcb_seg(vmsa->cs, vmsa->gdtr.base);
501 
502 	asm volatile("movl %%ss, %%eax;" : "=a" (vmsa->ss.selector));
503 	hv_populate_vmcb_seg(vmsa->ss, vmsa->gdtr.base);
504 
505 	asm volatile("movl %%ds, %%eax;" : "=a" (vmsa->ds.selector));
506 	hv_populate_vmcb_seg(vmsa->ds, vmsa->gdtr.base);
507 
508 	vmsa->efer = native_read_msr(MSR_EFER);
509 
510 	asm volatile("movq %%cr4, %%rax;" : "=a" (vmsa->cr4));
511 	asm volatile("movq %%cr3, %%rax;" : "=a" (vmsa->cr3));
512 	asm volatile("movq %%cr0, %%rax;" : "=a" (vmsa->cr0));
513 
514 	vmsa->xcr0 = 1;
515 	vmsa->g_pat = HV_AP_INIT_GPAT_DEFAULT;
516 	vmsa->rip = (u64)secondary_startup_64_no_verify;
517 	vmsa->rsp = (u64)&ap_start_stack[PAGE_SIZE];
518 
519 	/*
520 	 * Set the SNP-specific fields for this VMSA:
521 	 *   VMPL level
522 	 *   SEV_FEATURES (matches the SEV STATUS MSR right shifted 2 bits)
523 	 */
524 	vmsa->vmpl = 0;
525 	vmsa->sev_features = sev_status >> 2;
526 
527 	ret = snp_set_vmsa(vmsa, true);
528 	if (!ret) {
529 		pr_err("RMPADJUST(%llx) failed: %llx\n", (u64)vmsa, ret);
530 		free_page((u64)vmsa);
531 		return ret;
532 	}
533 
534 	local_irq_save(flags);
535 	start_vp_input = (struct hv_enable_vp_vtl *)ap_start_input_arg;
536 	memset(start_vp_input, 0, sizeof(*start_vp_input));
537 	start_vp_input->partition_id = -1;
538 	start_vp_input->vp_index = cpu;
539 	start_vp_input->target_vtl.target_vtl = ms_hyperv.vtl;
540 	*(u64 *)&start_vp_input->vp_context = __pa(vmsa) | 1;
541 
542 	do {
543 		ret = hv_do_hypercall(HVCALL_START_VP,
544 				      start_vp_input, NULL);
545 	} while (hv_result(ret) == HV_STATUS_TIME_OUT && retry--);
546 
547 	local_irq_restore(flags);
548 
549 	if (!hv_result_success(ret)) {
550 		pr_err("HvCallStartVirtualProcessor failed: %llx\n", ret);
551 		snp_cleanup_vmsa(vmsa);
552 		vmsa = NULL;
553 	}
554 
555 	cur_vmsa = per_cpu(hv_sev_vmsa, cpu);
556 	/* Free up any previous VMSA page */
557 	if (cur_vmsa)
558 		snp_cleanup_vmsa(cur_vmsa);
559 
560 	/* Record the current VMSA page */
561 	per_cpu(hv_sev_vmsa, cpu) = vmsa;
562 
563 	return ret;
564 }
565 
566 #endif /* CONFIG_AMD_MEM_ENCRYPT */
567 
568 #if defined(CONFIG_AMD_MEM_ENCRYPT) || defined(CONFIG_INTEL_TDX_GUEST)
569 
570 void __init hv_vtom_init(void)
571 {
572 	enum hv_isolation_type type = hv_get_isolation_type();
573 
574 	switch (type) {
575 	case HV_ISOLATION_TYPE_VBS:
576 		fallthrough;
577 	/*
578 	 * By design, a VM using vTOM doesn't see the SEV setting,
579 	 * so SEV initialization is bypassed and sev_status isn't set.
580 	 * Set it here to indicate a vTOM VM.
581 	 *
582 	 * Note: if CONFIG_AMD_MEM_ENCRYPT is not set, sev_status is
583 	 * defined as 0ULL, to which we can't assigned a value.
584 	 */
585 #ifdef CONFIG_AMD_MEM_ENCRYPT
586 	case HV_ISOLATION_TYPE_SNP:
587 		sev_status = MSR_AMD64_SNP_VTOM;
588 		cc_vendor = CC_VENDOR_AMD;
589 		break;
590 #endif
591 
592 	case HV_ISOLATION_TYPE_TDX:
593 		cc_vendor = CC_VENDOR_INTEL;
594 		break;
595 
596 	default:
597 		panic("hv_vtom_init: unsupported isolation type %d\n", type);
598 	}
599 
600 	cc_set_mask(ms_hyperv.shared_gpa_boundary);
601 	physical_mask &= ms_hyperv.shared_gpa_boundary - 1;
602 
603 	x86_platform.hyper.is_private_mmio = hv_is_private_mmio;
604 	x86_platform.guest.enc_cache_flush_required = hv_vtom_cache_flush_required;
605 	x86_platform.guest.enc_tlb_flush_required = hv_vtom_tlb_flush_required;
606 	x86_platform.guest.enc_status_change_finish = hv_vtom_set_host_visibility;
607 
608 	/* Set WB as the default cache mode. */
609 	mtrr_overwrite_state(NULL, 0, MTRR_TYPE_WRBACK);
610 }
611 
612 #endif /* defined(CONFIG_AMD_MEM_ENCRYPT) || defined(CONFIG_INTEL_TDX_GUEST) */
613 
614 enum hv_isolation_type hv_get_isolation_type(void)
615 {
616 	if (!(ms_hyperv.priv_high & HV_ISOLATION))
617 		return HV_ISOLATION_TYPE_NONE;
618 	return FIELD_GET(HV_ISOLATION_TYPE, ms_hyperv.isolation_config_b);
619 }
620 EXPORT_SYMBOL_GPL(hv_get_isolation_type);
621 
622 /*
623  * hv_is_isolation_supported - Check system runs in the Hyper-V
624  * isolation VM.
625  */
626 bool hv_is_isolation_supported(void)
627 {
628 	if (!cpu_feature_enabled(X86_FEATURE_HYPERVISOR))
629 		return false;
630 
631 	if (!hypervisor_is_type(X86_HYPER_MS_HYPERV))
632 		return false;
633 
634 	return hv_get_isolation_type() != HV_ISOLATION_TYPE_NONE;
635 }
636 
637 DEFINE_STATIC_KEY_FALSE(isolation_type_snp);
638 
639 /*
640  * hv_isolation_type_snp - Check system runs in the AMD SEV-SNP based
641  * isolation VM.
642  */
643 bool hv_isolation_type_snp(void)
644 {
645 	return static_branch_unlikely(&isolation_type_snp);
646 }
647 
648 DEFINE_STATIC_KEY_FALSE(isolation_type_en_snp);
649 /*
650  * hv_isolation_type_en_snp - Check system runs in the AMD SEV-SNP based
651  * isolation enlightened VM.
652  */
653 bool hv_isolation_type_en_snp(void)
654 {
655 	return static_branch_unlikely(&isolation_type_en_snp);
656 }
657 
658 DEFINE_STATIC_KEY_FALSE(isolation_type_tdx);
659 /*
660  * hv_isolation_type_tdx - Check if the system runs in an Intel TDX based
661  * isolated VM.
662  */
663 bool hv_isolation_type_tdx(void)
664 {
665 	return static_branch_unlikely(&isolation_type_tdx);
666 }
667 
668 #ifdef CONFIG_INTEL_TDX_GUEST
669 
670 u64 hv_tdx_hypercall(u64 control, u64 param1, u64 param2)
671 {
672 	struct tdx_hypercall_args args = { };
673 
674 	args.r10 = control;
675 	args.rdx = param1;
676 	args.r8  = param2;
677 
678 	(void)__tdx_hypercall_ret(&args);
679 
680 	return args.r11;
681 }
682 
683 #endif
684