1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Hyper-V Isolation VM interface with paravisor and hypervisor
4 *
5 * Author:
6 * Tianyu Lan <Tianyu.Lan@microsoft.com>
7 */
8
9 #include <linux/bitfield.h>
10 #include <linux/hyperv.h>
11 #include <linux/types.h>
12 #include <linux/slab.h>
13 #include <asm/svm.h>
14 #include <asm/sev.h>
15 #include <asm/io.h>
16 #include <asm/coco.h>
17 #include <asm/mem_encrypt.h>
18 #include <asm/mshyperv.h>
19 #include <asm/hypervisor.h>
20 #include <asm/mtrr.h>
21 #include <asm/io_apic.h>
22 #include <asm/realmode.h>
23 #include <asm/e820/api.h>
24 #include <asm/desc.h>
25 #include <uapi/asm/vmx.h>
26
27 #ifdef CONFIG_AMD_MEM_ENCRYPT
28
29 #define GHCB_USAGE_HYPERV_CALL 1
30
31 union hv_ghcb {
32 struct ghcb ghcb;
33 struct {
34 u64 hypercalldata[509];
35 u64 outputgpa;
36 union {
37 union {
38 struct {
39 u32 callcode : 16;
40 u32 isfast : 1;
41 u32 reserved1 : 14;
42 u32 isnested : 1;
43 u32 countofelements : 12;
44 u32 reserved2 : 4;
45 u32 repstartindex : 12;
46 u32 reserved3 : 4;
47 };
48 u64 asuint64;
49 } hypercallinput;
50 union {
51 struct {
52 u16 callstatus;
53 u16 reserved1;
54 u32 elementsprocessed : 12;
55 u32 reserved2 : 20;
56 };
57 u64 asunit64;
58 } hypercalloutput;
59 };
60 u64 reserved2;
61 } hypercall;
62 } __packed __aligned(HV_HYP_PAGE_SIZE);
63
64 /* Only used in an SNP VM with the paravisor */
65 static u16 hv_ghcb_version __ro_after_init;
66
67 /* Functions only used in an SNP VM with the paravisor go here. */
hv_ghcb_hypercall(u64 control,void * input,void * output,u32 input_size)68 u64 hv_ghcb_hypercall(u64 control, void *input, void *output, u32 input_size)
69 {
70 union hv_ghcb *hv_ghcb;
71 void **ghcb_base;
72 unsigned long flags;
73 u64 status;
74
75 if (!hv_ghcb_pg)
76 return -EFAULT;
77
78 WARN_ON(in_nmi());
79
80 local_irq_save(flags);
81 ghcb_base = (void **)this_cpu_ptr(hv_ghcb_pg);
82 hv_ghcb = (union hv_ghcb *)*ghcb_base;
83 if (!hv_ghcb) {
84 local_irq_restore(flags);
85 return -EFAULT;
86 }
87
88 hv_ghcb->ghcb.protocol_version = GHCB_PROTOCOL_MAX;
89 hv_ghcb->ghcb.ghcb_usage = GHCB_USAGE_HYPERV_CALL;
90
91 hv_ghcb->hypercall.outputgpa = (u64)output;
92 hv_ghcb->hypercall.hypercallinput.asuint64 = 0;
93 hv_ghcb->hypercall.hypercallinput.callcode = control;
94
95 if (input_size)
96 memcpy(hv_ghcb->hypercall.hypercalldata, input, input_size);
97
98 VMGEXIT();
99
100 hv_ghcb->ghcb.ghcb_usage = 0xffffffff;
101 memset(hv_ghcb->ghcb.save.valid_bitmap, 0,
102 sizeof(hv_ghcb->ghcb.save.valid_bitmap));
103
104 status = hv_ghcb->hypercall.hypercalloutput.callstatus;
105
106 local_irq_restore(flags);
107
108 return status;
109 }
110
rd_ghcb_msr(void)111 static inline u64 rd_ghcb_msr(void)
112 {
113 return __rdmsr(MSR_AMD64_SEV_ES_GHCB);
114 }
115
wr_ghcb_msr(u64 val)116 static inline void wr_ghcb_msr(u64 val)
117 {
118 native_wrmsrl(MSR_AMD64_SEV_ES_GHCB, val);
119 }
120
hv_ghcb_hv_call(struct ghcb * ghcb,u64 exit_code,u64 exit_info_1,u64 exit_info_2)121 static enum es_result hv_ghcb_hv_call(struct ghcb *ghcb, u64 exit_code,
122 u64 exit_info_1, u64 exit_info_2)
123 {
124 /* Fill in protocol and format specifiers */
125 ghcb->protocol_version = hv_ghcb_version;
126 ghcb->ghcb_usage = GHCB_DEFAULT_USAGE;
127
128 ghcb_set_sw_exit_code(ghcb, exit_code);
129 ghcb_set_sw_exit_info_1(ghcb, exit_info_1);
130 ghcb_set_sw_exit_info_2(ghcb, exit_info_2);
131
132 VMGEXIT();
133
134 if (ghcb->save.sw_exit_info_1 & GENMASK_ULL(31, 0))
135 return ES_VMM_ERROR;
136 else
137 return ES_OK;
138 }
139
hv_ghcb_terminate(unsigned int set,unsigned int reason)140 void __noreturn hv_ghcb_terminate(unsigned int set, unsigned int reason)
141 {
142 u64 val = GHCB_MSR_TERM_REQ;
143
144 /* Tell the hypervisor what went wrong. */
145 val |= GHCB_SEV_TERM_REASON(set, reason);
146
147 /* Request Guest Termination from Hypvervisor */
148 wr_ghcb_msr(val);
149 VMGEXIT();
150
151 while (true)
152 asm volatile("hlt\n" : : : "memory");
153 }
154
hv_ghcb_negotiate_protocol(void)155 bool hv_ghcb_negotiate_protocol(void)
156 {
157 u64 ghcb_gpa;
158 u64 val;
159
160 /* Save ghcb page gpa. */
161 ghcb_gpa = rd_ghcb_msr();
162
163 /* Do the GHCB protocol version negotiation */
164 wr_ghcb_msr(GHCB_MSR_SEV_INFO_REQ);
165 VMGEXIT();
166 val = rd_ghcb_msr();
167
168 if (GHCB_MSR_INFO(val) != GHCB_MSR_SEV_INFO_RESP)
169 return false;
170
171 if (GHCB_MSR_PROTO_MAX(val) < GHCB_PROTOCOL_MIN ||
172 GHCB_MSR_PROTO_MIN(val) > GHCB_PROTOCOL_MAX)
173 return false;
174
175 hv_ghcb_version = min_t(size_t, GHCB_MSR_PROTO_MAX(val),
176 GHCB_PROTOCOL_MAX);
177
178 /* Write ghcb page back after negotiating protocol. */
179 wr_ghcb_msr(ghcb_gpa);
180 VMGEXIT();
181
182 return true;
183 }
184
hv_ghcb_msr_write(u64 msr,u64 value)185 static void hv_ghcb_msr_write(u64 msr, u64 value)
186 {
187 union hv_ghcb *hv_ghcb;
188 void **ghcb_base;
189 unsigned long flags;
190
191 if (!hv_ghcb_pg)
192 return;
193
194 WARN_ON(in_nmi());
195
196 local_irq_save(flags);
197 ghcb_base = (void **)this_cpu_ptr(hv_ghcb_pg);
198 hv_ghcb = (union hv_ghcb *)*ghcb_base;
199 if (!hv_ghcb) {
200 local_irq_restore(flags);
201 return;
202 }
203
204 ghcb_set_rcx(&hv_ghcb->ghcb, msr);
205 ghcb_set_rax(&hv_ghcb->ghcb, lower_32_bits(value));
206 ghcb_set_rdx(&hv_ghcb->ghcb, upper_32_bits(value));
207
208 if (hv_ghcb_hv_call(&hv_ghcb->ghcb, SVM_EXIT_MSR, 1, 0))
209 pr_warn("Fail to write msr via ghcb %llx.\n", msr);
210
211 local_irq_restore(flags);
212 }
213
hv_ghcb_msr_read(u64 msr,u64 * value)214 static void hv_ghcb_msr_read(u64 msr, u64 *value)
215 {
216 union hv_ghcb *hv_ghcb;
217 void **ghcb_base;
218 unsigned long flags;
219
220 /* Check size of union hv_ghcb here. */
221 BUILD_BUG_ON(sizeof(union hv_ghcb) != HV_HYP_PAGE_SIZE);
222
223 if (!hv_ghcb_pg)
224 return;
225
226 WARN_ON(in_nmi());
227
228 local_irq_save(flags);
229 ghcb_base = (void **)this_cpu_ptr(hv_ghcb_pg);
230 hv_ghcb = (union hv_ghcb *)*ghcb_base;
231 if (!hv_ghcb) {
232 local_irq_restore(flags);
233 return;
234 }
235
236 ghcb_set_rcx(&hv_ghcb->ghcb, msr);
237 if (hv_ghcb_hv_call(&hv_ghcb->ghcb, SVM_EXIT_MSR, 0, 0))
238 pr_warn("Fail to read msr via ghcb %llx.\n", msr);
239 else
240 *value = (u64)lower_32_bits(hv_ghcb->ghcb.save.rax)
241 | ((u64)lower_32_bits(hv_ghcb->ghcb.save.rdx) << 32);
242 local_irq_restore(flags);
243 }
244
245 /* Only used in a fully enlightened SNP VM, i.e. without the paravisor */
246 static u8 ap_start_input_arg[PAGE_SIZE] __bss_decrypted __aligned(PAGE_SIZE);
247 static u8 ap_start_stack[PAGE_SIZE] __aligned(PAGE_SIZE);
248 static DEFINE_PER_CPU(struct sev_es_save_area *, hv_sev_vmsa);
249
250 /* Functions only used in an SNP VM without the paravisor go here. */
251
252 #define hv_populate_vmcb_seg(seg, gdtr_base) \
253 do { \
254 if (seg.selector) { \
255 seg.base = 0; \
256 seg.limit = HV_AP_SEGMENT_LIMIT; \
257 seg.attrib = *(u16 *)(gdtr_base + seg.selector + 5); \
258 seg.attrib = (seg.attrib & 0xFF) | ((seg.attrib >> 4) & 0xF00); \
259 } \
260 } while (0) \
261
snp_set_vmsa(void * va,bool vmsa)262 static int snp_set_vmsa(void *va, bool vmsa)
263 {
264 u64 attrs;
265
266 /*
267 * Running at VMPL0 allows the kernel to change the VMSA bit for a page
268 * using the RMPADJUST instruction. However, for the instruction to
269 * succeed it must target the permissions of a lesser privileged
270 * (higher numbered) VMPL level, so use VMPL1 (refer to the RMPADJUST
271 * instruction in the AMD64 APM Volume 3).
272 */
273 attrs = 1;
274 if (vmsa)
275 attrs |= RMPADJUST_VMSA_PAGE_BIT;
276
277 return rmpadjust((unsigned long)va, RMP_PG_SIZE_4K, attrs);
278 }
279
snp_cleanup_vmsa(struct sev_es_save_area * vmsa)280 static void snp_cleanup_vmsa(struct sev_es_save_area *vmsa)
281 {
282 int err;
283
284 err = snp_set_vmsa(vmsa, false);
285 if (err)
286 pr_err("clear VMSA page failed (%u), leaking page\n", err);
287 else
288 free_page((unsigned long)vmsa);
289 }
290
hv_snp_boot_ap(int cpu,unsigned long start_ip)291 int hv_snp_boot_ap(int cpu, unsigned long start_ip)
292 {
293 struct sev_es_save_area *vmsa = (struct sev_es_save_area *)
294 __get_free_page(GFP_KERNEL | __GFP_ZERO);
295 struct sev_es_save_area *cur_vmsa;
296 struct desc_ptr gdtr;
297 u64 ret, retry = 5;
298 struct hv_enable_vp_vtl *start_vp_input;
299 unsigned long flags;
300
301 if (!vmsa)
302 return -ENOMEM;
303
304 native_store_gdt(&gdtr);
305
306 vmsa->gdtr.base = gdtr.address;
307 vmsa->gdtr.limit = gdtr.size;
308
309 asm volatile("movl %%es, %%eax;" : "=a" (vmsa->es.selector));
310 hv_populate_vmcb_seg(vmsa->es, vmsa->gdtr.base);
311
312 asm volatile("movl %%cs, %%eax;" : "=a" (vmsa->cs.selector));
313 hv_populate_vmcb_seg(vmsa->cs, vmsa->gdtr.base);
314
315 asm volatile("movl %%ss, %%eax;" : "=a" (vmsa->ss.selector));
316 hv_populate_vmcb_seg(vmsa->ss, vmsa->gdtr.base);
317
318 asm volatile("movl %%ds, %%eax;" : "=a" (vmsa->ds.selector));
319 hv_populate_vmcb_seg(vmsa->ds, vmsa->gdtr.base);
320
321 vmsa->efer = native_read_msr(MSR_EFER);
322
323 asm volatile("movq %%cr4, %%rax;" : "=a" (vmsa->cr4));
324 asm volatile("movq %%cr3, %%rax;" : "=a" (vmsa->cr3));
325 asm volatile("movq %%cr0, %%rax;" : "=a" (vmsa->cr0));
326
327 vmsa->xcr0 = 1;
328 vmsa->g_pat = HV_AP_INIT_GPAT_DEFAULT;
329 vmsa->rip = (u64)secondary_startup_64_no_verify;
330 vmsa->rsp = (u64)&ap_start_stack[PAGE_SIZE];
331
332 /*
333 * Set the SNP-specific fields for this VMSA:
334 * VMPL level
335 * SEV_FEATURES (matches the SEV STATUS MSR right shifted 2 bits)
336 */
337 vmsa->vmpl = 0;
338 vmsa->sev_features = sev_status >> 2;
339
340 ret = snp_set_vmsa(vmsa, true);
341 if (!ret) {
342 pr_err("RMPADJUST(%llx) failed: %llx\n", (u64)vmsa, ret);
343 free_page((u64)vmsa);
344 return ret;
345 }
346
347 local_irq_save(flags);
348 start_vp_input = (struct hv_enable_vp_vtl *)ap_start_input_arg;
349 memset(start_vp_input, 0, sizeof(*start_vp_input));
350 start_vp_input->partition_id = -1;
351 start_vp_input->vp_index = cpu;
352 start_vp_input->target_vtl.target_vtl = ms_hyperv.vtl;
353 *(u64 *)&start_vp_input->vp_context = __pa(vmsa) | 1;
354
355 do {
356 ret = hv_do_hypercall(HVCALL_START_VP,
357 start_vp_input, NULL);
358 } while (hv_result(ret) == HV_STATUS_TIME_OUT && retry--);
359
360 local_irq_restore(flags);
361
362 if (!hv_result_success(ret)) {
363 pr_err("HvCallStartVirtualProcessor failed: %llx\n", ret);
364 snp_cleanup_vmsa(vmsa);
365 vmsa = NULL;
366 }
367
368 cur_vmsa = per_cpu(hv_sev_vmsa, cpu);
369 /* Free up any previous VMSA page */
370 if (cur_vmsa)
371 snp_cleanup_vmsa(cur_vmsa);
372
373 /* Record the current VMSA page */
374 per_cpu(hv_sev_vmsa, cpu) = vmsa;
375
376 return ret;
377 }
378
379 #else
hv_ghcb_msr_write(u64 msr,u64 value)380 static inline void hv_ghcb_msr_write(u64 msr, u64 value) {}
hv_ghcb_msr_read(u64 msr,u64 * value)381 static inline void hv_ghcb_msr_read(u64 msr, u64 *value) {}
382 #endif /* CONFIG_AMD_MEM_ENCRYPT */
383
384 #ifdef CONFIG_INTEL_TDX_GUEST
hv_tdx_msr_write(u64 msr,u64 val)385 static void hv_tdx_msr_write(u64 msr, u64 val)
386 {
387 struct tdx_hypercall_args args = {
388 .r10 = TDX_HYPERCALL_STANDARD,
389 .r11 = EXIT_REASON_MSR_WRITE,
390 .r12 = msr,
391 .r13 = val,
392 };
393
394 u64 ret = __tdx_hypercall(&args);
395
396 WARN_ONCE(ret, "Failed to emulate MSR write: %lld\n", ret);
397 }
398
hv_tdx_msr_read(u64 msr,u64 * val)399 static void hv_tdx_msr_read(u64 msr, u64 *val)
400 {
401 struct tdx_hypercall_args args = {
402 .r10 = TDX_HYPERCALL_STANDARD,
403 .r11 = EXIT_REASON_MSR_READ,
404 .r12 = msr,
405 };
406
407 u64 ret = __tdx_hypercall_ret(&args);
408
409 if (WARN_ONCE(ret, "Failed to emulate MSR read: %lld\n", ret))
410 *val = 0;
411 else
412 *val = args.r11;
413 }
414
hv_tdx_hypercall(u64 control,u64 param1,u64 param2)415 u64 hv_tdx_hypercall(u64 control, u64 param1, u64 param2)
416 {
417 struct tdx_hypercall_args args = { };
418
419 args.r10 = control;
420 args.rdx = param1;
421 args.r8 = param2;
422
423 (void)__tdx_hypercall_ret(&args);
424
425 return args.r11;
426 }
427
428 #else
hv_tdx_msr_write(u64 msr,u64 value)429 static inline void hv_tdx_msr_write(u64 msr, u64 value) {}
hv_tdx_msr_read(u64 msr,u64 * value)430 static inline void hv_tdx_msr_read(u64 msr, u64 *value) {}
431 #endif /* CONFIG_INTEL_TDX_GUEST */
432
433 #if defined(CONFIG_AMD_MEM_ENCRYPT) || defined(CONFIG_INTEL_TDX_GUEST)
hv_ivm_msr_write(u64 msr,u64 value)434 void hv_ivm_msr_write(u64 msr, u64 value)
435 {
436 if (!ms_hyperv.paravisor_present)
437 return;
438
439 if (hv_isolation_type_tdx())
440 hv_tdx_msr_write(msr, value);
441 else if (hv_isolation_type_snp())
442 hv_ghcb_msr_write(msr, value);
443 }
444
hv_ivm_msr_read(u64 msr,u64 * value)445 void hv_ivm_msr_read(u64 msr, u64 *value)
446 {
447 if (!ms_hyperv.paravisor_present)
448 return;
449
450 if (hv_isolation_type_tdx())
451 hv_tdx_msr_read(msr, value);
452 else if (hv_isolation_type_snp())
453 hv_ghcb_msr_read(msr, value);
454 }
455
456 /*
457 * hv_mark_gpa_visibility - Set pages visible to host via hvcall.
458 *
459 * In Isolation VM, all guest memory is encrypted from host and guest
460 * needs to set memory visible to host via hvcall before sharing memory
461 * with host.
462 */
hv_mark_gpa_visibility(u16 count,const u64 pfn[],enum hv_mem_host_visibility visibility)463 static int hv_mark_gpa_visibility(u16 count, const u64 pfn[],
464 enum hv_mem_host_visibility visibility)
465 {
466 struct hv_gpa_range_for_visibility *input;
467 u16 pages_processed;
468 u64 hv_status;
469 unsigned long flags;
470
471 /* no-op if partition isolation is not enabled */
472 if (!hv_is_isolation_supported())
473 return 0;
474
475 if (count > HV_MAX_MODIFY_GPA_REP_COUNT) {
476 pr_err("Hyper-V: GPA count:%d exceeds supported:%lu\n", count,
477 HV_MAX_MODIFY_GPA_REP_COUNT);
478 return -EINVAL;
479 }
480
481 local_irq_save(flags);
482 input = *this_cpu_ptr(hyperv_pcpu_input_arg);
483
484 if (unlikely(!input)) {
485 local_irq_restore(flags);
486 return -EINVAL;
487 }
488
489 input->partition_id = HV_PARTITION_ID_SELF;
490 input->host_visibility = visibility;
491 input->reserved0 = 0;
492 input->reserved1 = 0;
493 memcpy((void *)input->gpa_page_list, pfn, count * sizeof(*pfn));
494 hv_status = hv_do_rep_hypercall(
495 HVCALL_MODIFY_SPARSE_GPA_PAGE_HOST_VISIBILITY, count,
496 0, input, &pages_processed);
497 local_irq_restore(flags);
498
499 if (hv_result_success(hv_status))
500 return 0;
501 else
502 return -EFAULT;
503 }
504
505 /*
506 * hv_vtom_set_host_visibility - Set specified memory visible to host.
507 *
508 * In Isolation VM, all guest memory is encrypted from host and guest
509 * needs to set memory visible to host via hvcall before sharing memory
510 * with host. This function works as wrap of hv_mark_gpa_visibility()
511 * with memory base and size.
512 */
hv_vtom_set_host_visibility(unsigned long kbuffer,int pagecount,bool enc)513 static bool hv_vtom_set_host_visibility(unsigned long kbuffer, int pagecount, bool enc)
514 {
515 enum hv_mem_host_visibility visibility = enc ?
516 VMBUS_PAGE_NOT_VISIBLE : VMBUS_PAGE_VISIBLE_READ_WRITE;
517 u64 *pfn_array;
518 int ret = 0;
519 bool result = true;
520 int i, pfn;
521
522 pfn_array = kmalloc(HV_HYP_PAGE_SIZE, GFP_KERNEL);
523 if (!pfn_array)
524 return false;
525
526 for (i = 0, pfn = 0; i < pagecount; i++) {
527 pfn_array[pfn] = virt_to_hvpfn((void *)kbuffer + i * HV_HYP_PAGE_SIZE);
528 pfn++;
529
530 if (pfn == HV_MAX_MODIFY_GPA_REP_COUNT || i == pagecount - 1) {
531 ret = hv_mark_gpa_visibility(pfn, pfn_array,
532 visibility);
533 if (ret) {
534 result = false;
535 goto err_free_pfn_array;
536 }
537 pfn = 0;
538 }
539 }
540
541 err_free_pfn_array:
542 kfree(pfn_array);
543 return result;
544 }
545
hv_vtom_tlb_flush_required(bool private)546 static bool hv_vtom_tlb_flush_required(bool private)
547 {
548 return true;
549 }
550
hv_vtom_cache_flush_required(void)551 static bool hv_vtom_cache_flush_required(void)
552 {
553 return false;
554 }
555
hv_is_private_mmio(u64 addr)556 static bool hv_is_private_mmio(u64 addr)
557 {
558 /*
559 * Hyper-V always provides a single IO-APIC in a guest VM.
560 * When a paravisor is used, it is emulated by the paravisor
561 * in the guest context and must be mapped private.
562 */
563 if (addr >= HV_IOAPIC_BASE_ADDRESS &&
564 addr < (HV_IOAPIC_BASE_ADDRESS + PAGE_SIZE))
565 return true;
566
567 /* Same with a vTPM */
568 if (addr >= VTPM_BASE_ADDRESS &&
569 addr < (VTPM_BASE_ADDRESS + PAGE_SIZE))
570 return true;
571
572 return false;
573 }
574
hv_vtom_init(void)575 void __init hv_vtom_init(void)
576 {
577 enum hv_isolation_type type = hv_get_isolation_type();
578
579 switch (type) {
580 case HV_ISOLATION_TYPE_VBS:
581 fallthrough;
582 /*
583 * By design, a VM using vTOM doesn't see the SEV setting,
584 * so SEV initialization is bypassed and sev_status isn't set.
585 * Set it here to indicate a vTOM VM.
586 *
587 * Note: if CONFIG_AMD_MEM_ENCRYPT is not set, sev_status is
588 * defined as 0ULL, to which we can't assigned a value.
589 */
590 #ifdef CONFIG_AMD_MEM_ENCRYPT
591 case HV_ISOLATION_TYPE_SNP:
592 sev_status = MSR_AMD64_SNP_VTOM;
593 cc_vendor = CC_VENDOR_AMD;
594 break;
595 #endif
596
597 case HV_ISOLATION_TYPE_TDX:
598 cc_vendor = CC_VENDOR_INTEL;
599 break;
600
601 default:
602 panic("hv_vtom_init: unsupported isolation type %d\n", type);
603 }
604
605 cc_set_mask(ms_hyperv.shared_gpa_boundary);
606 physical_mask &= ms_hyperv.shared_gpa_boundary - 1;
607
608 x86_platform.hyper.is_private_mmio = hv_is_private_mmio;
609 x86_platform.guest.enc_cache_flush_required = hv_vtom_cache_flush_required;
610 x86_platform.guest.enc_tlb_flush_required = hv_vtom_tlb_flush_required;
611 x86_platform.guest.enc_status_change_finish = hv_vtom_set_host_visibility;
612
613 /* Set WB as the default cache mode. */
614 mtrr_overwrite_state(NULL, 0, MTRR_TYPE_WRBACK);
615 }
616
617 #endif /* defined(CONFIG_AMD_MEM_ENCRYPT) || defined(CONFIG_INTEL_TDX_GUEST) */
618
hv_get_isolation_type(void)619 enum hv_isolation_type hv_get_isolation_type(void)
620 {
621 if (!(ms_hyperv.priv_high & HV_ISOLATION))
622 return HV_ISOLATION_TYPE_NONE;
623 return FIELD_GET(HV_ISOLATION_TYPE, ms_hyperv.isolation_config_b);
624 }
625 EXPORT_SYMBOL_GPL(hv_get_isolation_type);
626
627 /*
628 * hv_is_isolation_supported - Check system runs in the Hyper-V
629 * isolation VM.
630 */
hv_is_isolation_supported(void)631 bool hv_is_isolation_supported(void)
632 {
633 if (!cpu_feature_enabled(X86_FEATURE_HYPERVISOR))
634 return false;
635
636 if (!hypervisor_is_type(X86_HYPER_MS_HYPERV))
637 return false;
638
639 return hv_get_isolation_type() != HV_ISOLATION_TYPE_NONE;
640 }
641
642 DEFINE_STATIC_KEY_FALSE(isolation_type_snp);
643
644 /*
645 * hv_isolation_type_snp - Check if the system runs in an AMD SEV-SNP based
646 * isolation VM.
647 */
hv_isolation_type_snp(void)648 bool hv_isolation_type_snp(void)
649 {
650 return static_branch_unlikely(&isolation_type_snp);
651 }
652
653 DEFINE_STATIC_KEY_FALSE(isolation_type_tdx);
654 /*
655 * hv_isolation_type_tdx - Check if the system runs in an Intel TDX based
656 * isolated VM.
657 */
hv_isolation_type_tdx(void)658 bool hv_isolation_type_tdx(void)
659 {
660 return static_branch_unlikely(&isolation_type_tdx);
661 }
662