xref: /openbmc/linux/arch/x86/hyperv/ivm.c (revision a67f6b60)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Hyper-V Isolation VM interface with paravisor and hypervisor
4  *
5  * Author:
6  *  Tianyu Lan <Tianyu.Lan@microsoft.com>
7  */
8 
9 #include <linux/bitfield.h>
10 #include <linux/hyperv.h>
11 #include <linux/types.h>
12 #include <linux/slab.h>
13 #include <asm/svm.h>
14 #include <asm/sev.h>
15 #include <asm/io.h>
16 #include <asm/coco.h>
17 #include <asm/mem_encrypt.h>
18 #include <asm/mshyperv.h>
19 #include <asm/hypervisor.h>
20 #include <asm/mtrr.h>
21 #include <asm/coco.h>
22 #include <asm/io_apic.h>
23 #include <asm/sev.h>
24 #include <asm/realmode.h>
25 #include <asm/e820/api.h>
26 #include <asm/desc.h>
27 #include <uapi/asm/vmx.h>
28 
29 #ifdef CONFIG_AMD_MEM_ENCRYPT
30 
31 #define GHCB_USAGE_HYPERV_CALL	1
32 
33 union hv_ghcb {
34 	struct ghcb ghcb;
35 	struct {
36 		u64 hypercalldata[509];
37 		u64 outputgpa;
38 		union {
39 			union {
40 				struct {
41 					u32 callcode        : 16;
42 					u32 isfast          : 1;
43 					u32 reserved1       : 14;
44 					u32 isnested        : 1;
45 					u32 countofelements : 12;
46 					u32 reserved2       : 4;
47 					u32 repstartindex   : 12;
48 					u32 reserved3       : 4;
49 				};
50 				u64 asuint64;
51 			} hypercallinput;
52 			union {
53 				struct {
54 					u16 callstatus;
55 					u16 reserved1;
56 					u32 elementsprocessed : 12;
57 					u32 reserved2         : 20;
58 				};
59 				u64 asunit64;
60 			} hypercalloutput;
61 		};
62 		u64 reserved2;
63 	} hypercall;
64 } __packed __aligned(HV_HYP_PAGE_SIZE);
65 
66 /* Only used in an SNP VM with the paravisor */
67 static u16 hv_ghcb_version __ro_after_init;
68 
69 /* Functions only used in an SNP VM with the paravisor go here. */
70 u64 hv_ghcb_hypercall(u64 control, void *input, void *output, u32 input_size)
71 {
72 	union hv_ghcb *hv_ghcb;
73 	void **ghcb_base;
74 	unsigned long flags;
75 	u64 status;
76 
77 	if (!hv_ghcb_pg)
78 		return -EFAULT;
79 
80 	WARN_ON(in_nmi());
81 
82 	local_irq_save(flags);
83 	ghcb_base = (void **)this_cpu_ptr(hv_ghcb_pg);
84 	hv_ghcb = (union hv_ghcb *)*ghcb_base;
85 	if (!hv_ghcb) {
86 		local_irq_restore(flags);
87 		return -EFAULT;
88 	}
89 
90 	hv_ghcb->ghcb.protocol_version = GHCB_PROTOCOL_MAX;
91 	hv_ghcb->ghcb.ghcb_usage = GHCB_USAGE_HYPERV_CALL;
92 
93 	hv_ghcb->hypercall.outputgpa = (u64)output;
94 	hv_ghcb->hypercall.hypercallinput.asuint64 = 0;
95 	hv_ghcb->hypercall.hypercallinput.callcode = control;
96 
97 	if (input_size)
98 		memcpy(hv_ghcb->hypercall.hypercalldata, input, input_size);
99 
100 	VMGEXIT();
101 
102 	hv_ghcb->ghcb.ghcb_usage = 0xffffffff;
103 	memset(hv_ghcb->ghcb.save.valid_bitmap, 0,
104 	       sizeof(hv_ghcb->ghcb.save.valid_bitmap));
105 
106 	status = hv_ghcb->hypercall.hypercalloutput.callstatus;
107 
108 	local_irq_restore(flags);
109 
110 	return status;
111 }
112 
113 static inline u64 rd_ghcb_msr(void)
114 {
115 	return __rdmsr(MSR_AMD64_SEV_ES_GHCB);
116 }
117 
118 static inline void wr_ghcb_msr(u64 val)
119 {
120 	native_wrmsrl(MSR_AMD64_SEV_ES_GHCB, val);
121 }
122 
123 static enum es_result hv_ghcb_hv_call(struct ghcb *ghcb, u64 exit_code,
124 				   u64 exit_info_1, u64 exit_info_2)
125 {
126 	/* Fill in protocol and format specifiers */
127 	ghcb->protocol_version = hv_ghcb_version;
128 	ghcb->ghcb_usage       = GHCB_DEFAULT_USAGE;
129 
130 	ghcb_set_sw_exit_code(ghcb, exit_code);
131 	ghcb_set_sw_exit_info_1(ghcb, exit_info_1);
132 	ghcb_set_sw_exit_info_2(ghcb, exit_info_2);
133 
134 	VMGEXIT();
135 
136 	if (ghcb->save.sw_exit_info_1 & GENMASK_ULL(31, 0))
137 		return ES_VMM_ERROR;
138 	else
139 		return ES_OK;
140 }
141 
142 void __noreturn hv_ghcb_terminate(unsigned int set, unsigned int reason)
143 {
144 	u64 val = GHCB_MSR_TERM_REQ;
145 
146 	/* Tell the hypervisor what went wrong. */
147 	val |= GHCB_SEV_TERM_REASON(set, reason);
148 
149 	/* Request Guest Termination from Hypvervisor */
150 	wr_ghcb_msr(val);
151 	VMGEXIT();
152 
153 	while (true)
154 		asm volatile("hlt\n" : : : "memory");
155 }
156 
157 bool hv_ghcb_negotiate_protocol(void)
158 {
159 	u64 ghcb_gpa;
160 	u64 val;
161 
162 	/* Save ghcb page gpa. */
163 	ghcb_gpa = rd_ghcb_msr();
164 
165 	/* Do the GHCB protocol version negotiation */
166 	wr_ghcb_msr(GHCB_MSR_SEV_INFO_REQ);
167 	VMGEXIT();
168 	val = rd_ghcb_msr();
169 
170 	if (GHCB_MSR_INFO(val) != GHCB_MSR_SEV_INFO_RESP)
171 		return false;
172 
173 	if (GHCB_MSR_PROTO_MAX(val) < GHCB_PROTOCOL_MIN ||
174 	    GHCB_MSR_PROTO_MIN(val) > GHCB_PROTOCOL_MAX)
175 		return false;
176 
177 	hv_ghcb_version = min_t(size_t, GHCB_MSR_PROTO_MAX(val),
178 			     GHCB_PROTOCOL_MAX);
179 
180 	/* Write ghcb page back after negotiating protocol. */
181 	wr_ghcb_msr(ghcb_gpa);
182 	VMGEXIT();
183 
184 	return true;
185 }
186 
187 static void hv_ghcb_msr_write(u64 msr, u64 value)
188 {
189 	union hv_ghcb *hv_ghcb;
190 	void **ghcb_base;
191 	unsigned long flags;
192 
193 	if (!hv_ghcb_pg)
194 		return;
195 
196 	WARN_ON(in_nmi());
197 
198 	local_irq_save(flags);
199 	ghcb_base = (void **)this_cpu_ptr(hv_ghcb_pg);
200 	hv_ghcb = (union hv_ghcb *)*ghcb_base;
201 	if (!hv_ghcb) {
202 		local_irq_restore(flags);
203 		return;
204 	}
205 
206 	ghcb_set_rcx(&hv_ghcb->ghcb, msr);
207 	ghcb_set_rax(&hv_ghcb->ghcb, lower_32_bits(value));
208 	ghcb_set_rdx(&hv_ghcb->ghcb, upper_32_bits(value));
209 
210 	if (hv_ghcb_hv_call(&hv_ghcb->ghcb, SVM_EXIT_MSR, 1, 0))
211 		pr_warn("Fail to write msr via ghcb %llx.\n", msr);
212 
213 	local_irq_restore(flags);
214 }
215 
216 static void hv_ghcb_msr_read(u64 msr, u64 *value)
217 {
218 	union hv_ghcb *hv_ghcb;
219 	void **ghcb_base;
220 	unsigned long flags;
221 
222 	/* Check size of union hv_ghcb here. */
223 	BUILD_BUG_ON(sizeof(union hv_ghcb) != HV_HYP_PAGE_SIZE);
224 
225 	if (!hv_ghcb_pg)
226 		return;
227 
228 	WARN_ON(in_nmi());
229 
230 	local_irq_save(flags);
231 	ghcb_base = (void **)this_cpu_ptr(hv_ghcb_pg);
232 	hv_ghcb = (union hv_ghcb *)*ghcb_base;
233 	if (!hv_ghcb) {
234 		local_irq_restore(flags);
235 		return;
236 	}
237 
238 	ghcb_set_rcx(&hv_ghcb->ghcb, msr);
239 	if (hv_ghcb_hv_call(&hv_ghcb->ghcb, SVM_EXIT_MSR, 0, 0))
240 		pr_warn("Fail to read msr via ghcb %llx.\n", msr);
241 	else
242 		*value = (u64)lower_32_bits(hv_ghcb->ghcb.save.rax)
243 			| ((u64)lower_32_bits(hv_ghcb->ghcb.save.rdx) << 32);
244 	local_irq_restore(flags);
245 }
246 
247 /* Only used in a fully enlightened SNP VM, i.e. without the paravisor */
248 static u8 ap_start_input_arg[PAGE_SIZE] __bss_decrypted __aligned(PAGE_SIZE);
249 static u8 ap_start_stack[PAGE_SIZE] __aligned(PAGE_SIZE);
250 static DEFINE_PER_CPU(struct sev_es_save_area *, hv_sev_vmsa);
251 
252 /* Functions only used in an SNP VM without the paravisor go here. */
253 
254 #define hv_populate_vmcb_seg(seg, gdtr_base)			\
255 do {								\
256 	if (seg.selector) {					\
257 		seg.base = 0;					\
258 		seg.limit = HV_AP_SEGMENT_LIMIT;		\
259 		seg.attrib = *(u16 *)(gdtr_base + seg.selector + 5);	\
260 		seg.attrib = (seg.attrib & 0xFF) | ((seg.attrib >> 4) & 0xF00); \
261 	}							\
262 } while (0)							\
263 
264 static int snp_set_vmsa(void *va, bool vmsa)
265 {
266 	u64 attrs;
267 
268 	/*
269 	 * Running at VMPL0 allows the kernel to change the VMSA bit for a page
270 	 * using the RMPADJUST instruction. However, for the instruction to
271 	 * succeed it must target the permissions of a lesser privileged
272 	 * (higher numbered) VMPL level, so use VMPL1 (refer to the RMPADJUST
273 	 * instruction in the AMD64 APM Volume 3).
274 	 */
275 	attrs = 1;
276 	if (vmsa)
277 		attrs |= RMPADJUST_VMSA_PAGE_BIT;
278 
279 	return rmpadjust((unsigned long)va, RMP_PG_SIZE_4K, attrs);
280 }
281 
282 static void snp_cleanup_vmsa(struct sev_es_save_area *vmsa)
283 {
284 	int err;
285 
286 	err = snp_set_vmsa(vmsa, false);
287 	if (err)
288 		pr_err("clear VMSA page failed (%u), leaking page\n", err);
289 	else
290 		free_page((unsigned long)vmsa);
291 }
292 
293 int hv_snp_boot_ap(int cpu, unsigned long start_ip)
294 {
295 	struct sev_es_save_area *vmsa = (struct sev_es_save_area *)
296 		__get_free_page(GFP_KERNEL | __GFP_ZERO);
297 	struct sev_es_save_area *cur_vmsa;
298 	struct desc_ptr gdtr;
299 	u64 ret, retry = 5;
300 	struct hv_enable_vp_vtl *start_vp_input;
301 	unsigned long flags;
302 
303 	if (!vmsa)
304 		return -ENOMEM;
305 
306 	native_store_gdt(&gdtr);
307 
308 	vmsa->gdtr.base = gdtr.address;
309 	vmsa->gdtr.limit = gdtr.size;
310 
311 	asm volatile("movl %%es, %%eax;" : "=a" (vmsa->es.selector));
312 	hv_populate_vmcb_seg(vmsa->es, vmsa->gdtr.base);
313 
314 	asm volatile("movl %%cs, %%eax;" : "=a" (vmsa->cs.selector));
315 	hv_populate_vmcb_seg(vmsa->cs, vmsa->gdtr.base);
316 
317 	asm volatile("movl %%ss, %%eax;" : "=a" (vmsa->ss.selector));
318 	hv_populate_vmcb_seg(vmsa->ss, vmsa->gdtr.base);
319 
320 	asm volatile("movl %%ds, %%eax;" : "=a" (vmsa->ds.selector));
321 	hv_populate_vmcb_seg(vmsa->ds, vmsa->gdtr.base);
322 
323 	vmsa->efer = native_read_msr(MSR_EFER);
324 
325 	asm volatile("movq %%cr4, %%rax;" : "=a" (vmsa->cr4));
326 	asm volatile("movq %%cr3, %%rax;" : "=a" (vmsa->cr3));
327 	asm volatile("movq %%cr0, %%rax;" : "=a" (vmsa->cr0));
328 
329 	vmsa->xcr0 = 1;
330 	vmsa->g_pat = HV_AP_INIT_GPAT_DEFAULT;
331 	vmsa->rip = (u64)secondary_startup_64_no_verify;
332 	vmsa->rsp = (u64)&ap_start_stack[PAGE_SIZE];
333 
334 	/*
335 	 * Set the SNP-specific fields for this VMSA:
336 	 *   VMPL level
337 	 *   SEV_FEATURES (matches the SEV STATUS MSR right shifted 2 bits)
338 	 */
339 	vmsa->vmpl = 0;
340 	vmsa->sev_features = sev_status >> 2;
341 
342 	ret = snp_set_vmsa(vmsa, true);
343 	if (!ret) {
344 		pr_err("RMPADJUST(%llx) failed: %llx\n", (u64)vmsa, ret);
345 		free_page((u64)vmsa);
346 		return ret;
347 	}
348 
349 	local_irq_save(flags);
350 	start_vp_input = (struct hv_enable_vp_vtl *)ap_start_input_arg;
351 	memset(start_vp_input, 0, sizeof(*start_vp_input));
352 	start_vp_input->partition_id = -1;
353 	start_vp_input->vp_index = cpu;
354 	start_vp_input->target_vtl.target_vtl = ms_hyperv.vtl;
355 	*(u64 *)&start_vp_input->vp_context = __pa(vmsa) | 1;
356 
357 	do {
358 		ret = hv_do_hypercall(HVCALL_START_VP,
359 				      start_vp_input, NULL);
360 	} while (hv_result(ret) == HV_STATUS_TIME_OUT && retry--);
361 
362 	local_irq_restore(flags);
363 
364 	if (!hv_result_success(ret)) {
365 		pr_err("HvCallStartVirtualProcessor failed: %llx\n", ret);
366 		snp_cleanup_vmsa(vmsa);
367 		vmsa = NULL;
368 	}
369 
370 	cur_vmsa = per_cpu(hv_sev_vmsa, cpu);
371 	/* Free up any previous VMSA page */
372 	if (cur_vmsa)
373 		snp_cleanup_vmsa(cur_vmsa);
374 
375 	/* Record the current VMSA page */
376 	per_cpu(hv_sev_vmsa, cpu) = vmsa;
377 
378 	return ret;
379 }
380 
381 #else
382 static inline void hv_ghcb_msr_write(u64 msr, u64 value) {}
383 static inline void hv_ghcb_msr_read(u64 msr, u64 *value) {}
384 #endif /* CONFIG_AMD_MEM_ENCRYPT */
385 
386 #ifdef CONFIG_INTEL_TDX_GUEST
387 static void hv_tdx_msr_write(u64 msr, u64 val)
388 {
389 	struct tdx_hypercall_args args = {
390 		.r10 = TDX_HYPERCALL_STANDARD,
391 		.r11 = EXIT_REASON_MSR_WRITE,
392 		.r12 = msr,
393 		.r13 = val,
394 	};
395 
396 	u64 ret = __tdx_hypercall(&args);
397 
398 	WARN_ONCE(ret, "Failed to emulate MSR write: %lld\n", ret);
399 }
400 
401 static void hv_tdx_msr_read(u64 msr, u64 *val)
402 {
403 	struct tdx_hypercall_args args = {
404 		.r10 = TDX_HYPERCALL_STANDARD,
405 		.r11 = EXIT_REASON_MSR_READ,
406 		.r12 = msr,
407 	};
408 
409 	u64 ret = __tdx_hypercall_ret(&args);
410 
411 	if (WARN_ONCE(ret, "Failed to emulate MSR read: %lld\n", ret))
412 		*val = 0;
413 	else
414 		*val = args.r11;
415 }
416 
417 u64 hv_tdx_hypercall(u64 control, u64 param1, u64 param2)
418 {
419 	struct tdx_hypercall_args args = { };
420 
421 	args.r10 = control;
422 	args.rdx = param1;
423 	args.r8  = param2;
424 
425 	(void)__tdx_hypercall_ret(&args);
426 
427 	return args.r11;
428 }
429 
430 #else
431 static inline void hv_tdx_msr_write(u64 msr, u64 value) {}
432 static inline void hv_tdx_msr_read(u64 msr, u64 *value) {}
433 #endif /* CONFIG_INTEL_TDX_GUEST */
434 
435 #if defined(CONFIG_AMD_MEM_ENCRYPT) || defined(CONFIG_INTEL_TDX_GUEST)
436 void hv_ivm_msr_write(u64 msr, u64 value)
437 {
438 	if (!ms_hyperv.paravisor_present)
439 		return;
440 
441 	if (hv_isolation_type_tdx())
442 		hv_tdx_msr_write(msr, value);
443 	else if (hv_isolation_type_snp())
444 		hv_ghcb_msr_write(msr, value);
445 }
446 
447 void hv_ivm_msr_read(u64 msr, u64 *value)
448 {
449 	if (!ms_hyperv.paravisor_present)
450 		return;
451 
452 	if (hv_isolation_type_tdx())
453 		hv_tdx_msr_read(msr, value);
454 	else if (hv_isolation_type_snp())
455 		hv_ghcb_msr_read(msr, value);
456 }
457 
458 /*
459  * hv_mark_gpa_visibility - Set pages visible to host via hvcall.
460  *
461  * In Isolation VM, all guest memory is encrypted from host and guest
462  * needs to set memory visible to host via hvcall before sharing memory
463  * with host.
464  */
465 static int hv_mark_gpa_visibility(u16 count, const u64 pfn[],
466 			   enum hv_mem_host_visibility visibility)
467 {
468 	struct hv_gpa_range_for_visibility **input_pcpu, *input;
469 	u16 pages_processed;
470 	u64 hv_status;
471 	unsigned long flags;
472 
473 	/* no-op if partition isolation is not enabled */
474 	if (!hv_is_isolation_supported())
475 		return 0;
476 
477 	if (count > HV_MAX_MODIFY_GPA_REP_COUNT) {
478 		pr_err("Hyper-V: GPA count:%d exceeds supported:%lu\n", count,
479 			HV_MAX_MODIFY_GPA_REP_COUNT);
480 		return -EINVAL;
481 	}
482 
483 	local_irq_save(flags);
484 	input_pcpu = (struct hv_gpa_range_for_visibility **)
485 			this_cpu_ptr(hyperv_pcpu_input_arg);
486 	input = *input_pcpu;
487 	if (unlikely(!input)) {
488 		local_irq_restore(flags);
489 		return -EINVAL;
490 	}
491 
492 	input->partition_id = HV_PARTITION_ID_SELF;
493 	input->host_visibility = visibility;
494 	input->reserved0 = 0;
495 	input->reserved1 = 0;
496 	memcpy((void *)input->gpa_page_list, pfn, count * sizeof(*pfn));
497 	hv_status = hv_do_rep_hypercall(
498 			HVCALL_MODIFY_SPARSE_GPA_PAGE_HOST_VISIBILITY, count,
499 			0, input, &pages_processed);
500 	local_irq_restore(flags);
501 
502 	if (hv_result_success(hv_status))
503 		return 0;
504 	else
505 		return -EFAULT;
506 }
507 
508 /*
509  * hv_vtom_set_host_visibility - Set specified memory visible to host.
510  *
511  * In Isolation VM, all guest memory is encrypted from host and guest
512  * needs to set memory visible to host via hvcall before sharing memory
513  * with host. This function works as wrap of hv_mark_gpa_visibility()
514  * with memory base and size.
515  */
516 static bool hv_vtom_set_host_visibility(unsigned long kbuffer, int pagecount, bool enc)
517 {
518 	enum hv_mem_host_visibility visibility = enc ?
519 			VMBUS_PAGE_NOT_VISIBLE : VMBUS_PAGE_VISIBLE_READ_WRITE;
520 	u64 *pfn_array;
521 	int ret = 0;
522 	bool result = true;
523 	int i, pfn;
524 
525 	pfn_array = kmalloc(HV_HYP_PAGE_SIZE, GFP_KERNEL);
526 	if (!pfn_array)
527 		return false;
528 
529 	for (i = 0, pfn = 0; i < pagecount; i++) {
530 		pfn_array[pfn] = virt_to_hvpfn((void *)kbuffer + i * HV_HYP_PAGE_SIZE);
531 		pfn++;
532 
533 		if (pfn == HV_MAX_MODIFY_GPA_REP_COUNT || i == pagecount - 1) {
534 			ret = hv_mark_gpa_visibility(pfn, pfn_array,
535 						     visibility);
536 			if (ret) {
537 				result = false;
538 				goto err_free_pfn_array;
539 			}
540 			pfn = 0;
541 		}
542 	}
543 
544  err_free_pfn_array:
545 	kfree(pfn_array);
546 	return result;
547 }
548 
549 static bool hv_vtom_tlb_flush_required(bool private)
550 {
551 	return true;
552 }
553 
554 static bool hv_vtom_cache_flush_required(void)
555 {
556 	return false;
557 }
558 
559 static bool hv_is_private_mmio(u64 addr)
560 {
561 	/*
562 	 * Hyper-V always provides a single IO-APIC in a guest VM.
563 	 * When a paravisor is used, it is emulated by the paravisor
564 	 * in the guest context and must be mapped private.
565 	 */
566 	if (addr >= HV_IOAPIC_BASE_ADDRESS &&
567 	    addr < (HV_IOAPIC_BASE_ADDRESS + PAGE_SIZE))
568 		return true;
569 
570 	/* Same with a vTPM */
571 	if (addr >= VTPM_BASE_ADDRESS &&
572 	    addr < (VTPM_BASE_ADDRESS + PAGE_SIZE))
573 		return true;
574 
575 	return false;
576 }
577 
578 void __init hv_vtom_init(void)
579 {
580 	enum hv_isolation_type type = hv_get_isolation_type();
581 
582 	switch (type) {
583 	case HV_ISOLATION_TYPE_VBS:
584 		fallthrough;
585 	/*
586 	 * By design, a VM using vTOM doesn't see the SEV setting,
587 	 * so SEV initialization is bypassed and sev_status isn't set.
588 	 * Set it here to indicate a vTOM VM.
589 	 *
590 	 * Note: if CONFIG_AMD_MEM_ENCRYPT is not set, sev_status is
591 	 * defined as 0ULL, to which we can't assigned a value.
592 	 */
593 #ifdef CONFIG_AMD_MEM_ENCRYPT
594 	case HV_ISOLATION_TYPE_SNP:
595 		sev_status = MSR_AMD64_SNP_VTOM;
596 		cc_vendor = CC_VENDOR_AMD;
597 		break;
598 #endif
599 
600 	case HV_ISOLATION_TYPE_TDX:
601 		cc_vendor = CC_VENDOR_INTEL;
602 		break;
603 
604 	default:
605 		panic("hv_vtom_init: unsupported isolation type %d\n", type);
606 	}
607 
608 	cc_set_mask(ms_hyperv.shared_gpa_boundary);
609 	physical_mask &= ms_hyperv.shared_gpa_boundary - 1;
610 
611 	x86_platform.hyper.is_private_mmio = hv_is_private_mmio;
612 	x86_platform.guest.enc_cache_flush_required = hv_vtom_cache_flush_required;
613 	x86_platform.guest.enc_tlb_flush_required = hv_vtom_tlb_flush_required;
614 	x86_platform.guest.enc_status_change_finish = hv_vtom_set_host_visibility;
615 
616 	/* Set WB as the default cache mode. */
617 	mtrr_overwrite_state(NULL, 0, MTRR_TYPE_WRBACK);
618 }
619 
620 #endif /* defined(CONFIG_AMD_MEM_ENCRYPT) || defined(CONFIG_INTEL_TDX_GUEST) */
621 
622 enum hv_isolation_type hv_get_isolation_type(void)
623 {
624 	if (!(ms_hyperv.priv_high & HV_ISOLATION))
625 		return HV_ISOLATION_TYPE_NONE;
626 	return FIELD_GET(HV_ISOLATION_TYPE, ms_hyperv.isolation_config_b);
627 }
628 EXPORT_SYMBOL_GPL(hv_get_isolation_type);
629 
630 /*
631  * hv_is_isolation_supported - Check system runs in the Hyper-V
632  * isolation VM.
633  */
634 bool hv_is_isolation_supported(void)
635 {
636 	if (!cpu_feature_enabled(X86_FEATURE_HYPERVISOR))
637 		return false;
638 
639 	if (!hypervisor_is_type(X86_HYPER_MS_HYPERV))
640 		return false;
641 
642 	return hv_get_isolation_type() != HV_ISOLATION_TYPE_NONE;
643 }
644 
645 DEFINE_STATIC_KEY_FALSE(isolation_type_snp);
646 
647 /*
648  * hv_isolation_type_snp - Check if the system runs in an AMD SEV-SNP based
649  * isolation VM.
650  */
651 bool hv_isolation_type_snp(void)
652 {
653 	return static_branch_unlikely(&isolation_type_snp);
654 }
655 
656 DEFINE_STATIC_KEY_FALSE(isolation_type_tdx);
657 /*
658  * hv_isolation_type_tdx - Check if the system runs in an Intel TDX based
659  * isolated VM.
660  */
661 bool hv_isolation_type_tdx(void)
662 {
663 	return static_branch_unlikely(&isolation_type_tdx);
664 }
665