19798adbcSSean Christopherson // SPDX-License-Identifier: GPL-2.0 29798adbcSSean Christopherson /* Copyright(c) 2021 Intel Corporation. */ 39798adbcSSean Christopherson 49798adbcSSean Christopherson #include <asm/sgx.h> 59798adbcSSean Christopherson 69798adbcSSean Christopherson #include "cpuid.h" 79798adbcSSean Christopherson #include "kvm_cache_regs.h" 872add915SSean Christopherson #include "nested.h" 99798adbcSSean Christopherson #include "sgx.h" 109798adbcSSean Christopherson #include "vmx.h" 119798adbcSSean Christopherson #include "x86.h" 129798adbcSSean Christopherson 1372add915SSean Christopherson bool __read_mostly enable_sgx = 1; 1472add915SSean Christopherson module_param_named(sgx, enable_sgx, bool, 0444); 159798adbcSSean Christopherson 168f102445SSean Christopherson /* Initial value of guest's virtual SGX_LEPUBKEYHASHn MSRs */ 178f102445SSean Christopherson static u64 sgx_pubkey_hash[4] __ro_after_init; 188f102445SSean Christopherson 1970210c04SSean Christopherson /* 2070210c04SSean Christopherson * ENCLS's memory operands use a fixed segment (DS) and a fixed 2170210c04SSean Christopherson * address size based on the mode. Related prefixes are ignored. 2270210c04SSean Christopherson */ 2370210c04SSean Christopherson static int sgx_get_encls_gva(struct kvm_vcpu *vcpu, unsigned long offset, 2470210c04SSean Christopherson int size, int alignment, gva_t *gva) 2570210c04SSean Christopherson { 2670210c04SSean Christopherson struct kvm_segment s; 2770210c04SSean Christopherson bool fault; 2870210c04SSean Christopherson 2970210c04SSean Christopherson /* Skip vmcs.GUEST_DS retrieval for 64-bit mode to avoid VMREADs. */ 3070210c04SSean Christopherson *gva = offset; 3170210c04SSean Christopherson if (!is_long_mode(vcpu)) { 3270210c04SSean Christopherson vmx_get_segment(vcpu, &s, VCPU_SREG_DS); 3370210c04SSean Christopherson *gva += s.base; 3470210c04SSean Christopherson } 3570210c04SSean Christopherson 3670210c04SSean Christopherson if (!IS_ALIGNED(*gva, alignment)) { 3770210c04SSean Christopherson fault = true; 3870210c04SSean Christopherson } else if (likely(is_long_mode(vcpu))) { 3970210c04SSean Christopherson fault = is_noncanonical_address(*gva, vcpu); 4070210c04SSean Christopherson } else { 4170210c04SSean Christopherson *gva &= 0xffffffff; 4270210c04SSean Christopherson fault = (s.unusable) || 4370210c04SSean Christopherson (s.type != 2 && s.type != 3) || 4470210c04SSean Christopherson (*gva > s.limit) || 4570210c04SSean Christopherson ((s.base != 0 || s.limit != 0xffffffff) && 4670210c04SSean Christopherson (((u64)*gva + size - 1) > s.limit + 1)); 4770210c04SSean Christopherson } 4870210c04SSean Christopherson if (fault) 4970210c04SSean Christopherson kvm_inject_gp(vcpu, 0); 5070210c04SSean Christopherson return fault ? -EINVAL : 0; 5170210c04SSean Christopherson } 5270210c04SSean Christopherson 5370210c04SSean Christopherson static void sgx_handle_emulation_failure(struct kvm_vcpu *vcpu, u64 addr, 5470210c04SSean Christopherson unsigned int size) 5570210c04SSean Christopherson { 56*0d7d8449SDavid Edmondson uint64_t data[2] = { addr, size }; 57*0d7d8449SDavid Edmondson 58*0d7d8449SDavid Edmondson __kvm_prepare_emulation_failure_exit(vcpu, data, ARRAY_SIZE(data)); 5970210c04SSean Christopherson } 6070210c04SSean Christopherson 6170210c04SSean Christopherson static int sgx_read_hva(struct kvm_vcpu *vcpu, unsigned long hva, void *data, 6270210c04SSean Christopherson unsigned int size) 6370210c04SSean Christopherson { 6470210c04SSean Christopherson if (__copy_from_user(data, (void __user *)hva, size)) { 6570210c04SSean Christopherson sgx_handle_emulation_failure(vcpu, hva, size); 6670210c04SSean Christopherson return -EFAULT; 6770210c04SSean Christopherson } 6870210c04SSean Christopherson 6970210c04SSean Christopherson return 0; 7070210c04SSean Christopherson } 7170210c04SSean Christopherson 7270210c04SSean Christopherson static int sgx_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t gva, bool write, 7370210c04SSean Christopherson gpa_t *gpa) 7470210c04SSean Christopherson { 7570210c04SSean Christopherson struct x86_exception ex; 7670210c04SSean Christopherson 7770210c04SSean Christopherson if (write) 7870210c04SSean Christopherson *gpa = kvm_mmu_gva_to_gpa_write(vcpu, gva, &ex); 7970210c04SSean Christopherson else 8070210c04SSean Christopherson *gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, &ex); 8170210c04SSean Christopherson 8270210c04SSean Christopherson if (*gpa == UNMAPPED_GVA) { 8370210c04SSean Christopherson kvm_inject_emulated_page_fault(vcpu, &ex); 8470210c04SSean Christopherson return -EFAULT; 8570210c04SSean Christopherson } 8670210c04SSean Christopherson 8770210c04SSean Christopherson return 0; 8870210c04SSean Christopherson } 8970210c04SSean Christopherson 9070210c04SSean Christopherson static int sgx_gpa_to_hva(struct kvm_vcpu *vcpu, gpa_t gpa, unsigned long *hva) 9170210c04SSean Christopherson { 9270210c04SSean Christopherson *hva = kvm_vcpu_gfn_to_hva(vcpu, PFN_DOWN(gpa)); 9370210c04SSean Christopherson if (kvm_is_error_hva(*hva)) { 9470210c04SSean Christopherson sgx_handle_emulation_failure(vcpu, gpa, 1); 9570210c04SSean Christopherson return -EFAULT; 9670210c04SSean Christopherson } 9770210c04SSean Christopherson 9870210c04SSean Christopherson *hva |= gpa & ~PAGE_MASK; 9970210c04SSean Christopherson 10070210c04SSean Christopherson return 0; 10170210c04SSean Christopherson } 10270210c04SSean Christopherson 10370210c04SSean Christopherson static int sgx_inject_fault(struct kvm_vcpu *vcpu, gva_t gva, int trapnr) 10470210c04SSean Christopherson { 10570210c04SSean Christopherson struct x86_exception ex; 10670210c04SSean Christopherson 10770210c04SSean Christopherson /* 10870210c04SSean Christopherson * A non-EPCM #PF indicates a bad userspace HVA. This *should* check 10970210c04SSean Christopherson * for PFEC.SGX and not assume any #PF on SGX2 originated in the EPC, 11070210c04SSean Christopherson * but the error code isn't (yet) plumbed through the ENCLS helpers. 11170210c04SSean Christopherson */ 11270210c04SSean Christopherson if (trapnr == PF_VECTOR && !boot_cpu_has(X86_FEATURE_SGX2)) { 113*0d7d8449SDavid Edmondson kvm_prepare_emulation_failure_exit(vcpu); 11470210c04SSean Christopherson return 0; 11570210c04SSean Christopherson } 11670210c04SSean Christopherson 11770210c04SSean Christopherson /* 11870210c04SSean Christopherson * If the guest thinks it's running on SGX2 hardware, inject an SGX 11970210c04SSean Christopherson * #PF if the fault matches an EPCM fault signature (#GP on SGX1, 12070210c04SSean Christopherson * #PF on SGX2). The assumption is that EPCM faults are much more 12170210c04SSean Christopherson * likely than a bad userspace address. 12270210c04SSean Christopherson */ 12370210c04SSean Christopherson if ((trapnr == PF_VECTOR || !boot_cpu_has(X86_FEATURE_SGX2)) && 12470210c04SSean Christopherson guest_cpuid_has(vcpu, X86_FEATURE_SGX2)) { 12570210c04SSean Christopherson memset(&ex, 0, sizeof(ex)); 12670210c04SSean Christopherson ex.vector = PF_VECTOR; 12770210c04SSean Christopherson ex.error_code = PFERR_PRESENT_MASK | PFERR_WRITE_MASK | 12870210c04SSean Christopherson PFERR_SGX_MASK; 12970210c04SSean Christopherson ex.address = gva; 13070210c04SSean Christopherson ex.error_code_valid = true; 13170210c04SSean Christopherson ex.nested_page_fault = false; 13270210c04SSean Christopherson kvm_inject_page_fault(vcpu, &ex); 13370210c04SSean Christopherson } else { 13470210c04SSean Christopherson kvm_inject_gp(vcpu, 0); 13570210c04SSean Christopherson } 13670210c04SSean Christopherson return 1; 13770210c04SSean Christopherson } 13870210c04SSean Christopherson 13970210c04SSean Christopherson static int __handle_encls_ecreate(struct kvm_vcpu *vcpu, 14070210c04SSean Christopherson struct sgx_pageinfo *pageinfo, 14170210c04SSean Christopherson unsigned long secs_hva, 14270210c04SSean Christopherson gva_t secs_gva) 14370210c04SSean Christopherson { 14470210c04SSean Christopherson struct sgx_secs *contents = (struct sgx_secs *)pageinfo->contents; 14570210c04SSean Christopherson struct kvm_cpuid_entry2 *sgx_12_0, *sgx_12_1; 14670210c04SSean Christopherson u64 attributes, xfrm, size; 14770210c04SSean Christopherson u32 miscselect; 14870210c04SSean Christopherson u8 max_size_log2; 14970210c04SSean Christopherson int trapnr, ret; 15070210c04SSean Christopherson 15170210c04SSean Christopherson sgx_12_0 = kvm_find_cpuid_entry(vcpu, 0x12, 0); 15270210c04SSean Christopherson sgx_12_1 = kvm_find_cpuid_entry(vcpu, 0x12, 1); 15370210c04SSean Christopherson if (!sgx_12_0 || !sgx_12_1) { 154*0d7d8449SDavid Edmondson kvm_prepare_emulation_failure_exit(vcpu); 15570210c04SSean Christopherson return 0; 15670210c04SSean Christopherson } 15770210c04SSean Christopherson 15870210c04SSean Christopherson miscselect = contents->miscselect; 15970210c04SSean Christopherson attributes = contents->attributes; 16070210c04SSean Christopherson xfrm = contents->xfrm; 16170210c04SSean Christopherson size = contents->size; 16270210c04SSean Christopherson 16370210c04SSean Christopherson /* Enforce restriction of access to the PROVISIONKEY. */ 16470210c04SSean Christopherson if (!vcpu->kvm->arch.sgx_provisioning_allowed && 16570210c04SSean Christopherson (attributes & SGX_ATTR_PROVISIONKEY)) { 16670210c04SSean Christopherson if (sgx_12_1->eax & SGX_ATTR_PROVISIONKEY) 16770210c04SSean Christopherson pr_warn_once("KVM: SGX PROVISIONKEY advertised but not allowed\n"); 16870210c04SSean Christopherson kvm_inject_gp(vcpu, 0); 16970210c04SSean Christopherson return 1; 17070210c04SSean Christopherson } 17170210c04SSean Christopherson 17270210c04SSean Christopherson /* Enforce CPUID restrictions on MISCSELECT, ATTRIBUTES and XFRM. */ 17370210c04SSean Christopherson if ((u32)miscselect & ~sgx_12_0->ebx || 17470210c04SSean Christopherson (u32)attributes & ~sgx_12_1->eax || 17570210c04SSean Christopherson (u32)(attributes >> 32) & ~sgx_12_1->ebx || 17670210c04SSean Christopherson (u32)xfrm & ~sgx_12_1->ecx || 17770210c04SSean Christopherson (u32)(xfrm >> 32) & ~sgx_12_1->edx) { 17870210c04SSean Christopherson kvm_inject_gp(vcpu, 0); 17970210c04SSean Christopherson return 1; 18070210c04SSean Christopherson } 18170210c04SSean Christopherson 18270210c04SSean Christopherson /* Enforce CPUID restriction on max enclave size. */ 18370210c04SSean Christopherson max_size_log2 = (attributes & SGX_ATTR_MODE64BIT) ? sgx_12_0->edx >> 8 : 18470210c04SSean Christopherson sgx_12_0->edx; 18570210c04SSean Christopherson if (size >= BIT_ULL(max_size_log2)) 18670210c04SSean Christopherson kvm_inject_gp(vcpu, 0); 18770210c04SSean Christopherson 18870210c04SSean Christopherson /* 18970210c04SSean Christopherson * sgx_virt_ecreate() returns: 19070210c04SSean Christopherson * 1) 0: ECREATE was successful 19170210c04SSean Christopherson * 2) -EFAULT: ECREATE was run but faulted, and trapnr was set to the 19270210c04SSean Christopherson * exception number. 19370210c04SSean Christopherson * 3) -EINVAL: access_ok() on @secs_hva failed. This should never 19470210c04SSean Christopherson * happen as KVM checks host addresses at memslot creation. 19570210c04SSean Christopherson * sgx_virt_ecreate() has already warned in this case. 19670210c04SSean Christopherson */ 19770210c04SSean Christopherson ret = sgx_virt_ecreate(pageinfo, (void __user *)secs_hva, &trapnr); 19870210c04SSean Christopherson if (!ret) 19970210c04SSean Christopherson return kvm_skip_emulated_instruction(vcpu); 20070210c04SSean Christopherson if (ret == -EFAULT) 20170210c04SSean Christopherson return sgx_inject_fault(vcpu, secs_gva, trapnr); 20270210c04SSean Christopherson 20370210c04SSean Christopherson return ret; 20470210c04SSean Christopherson } 20570210c04SSean Christopherson 20670210c04SSean Christopherson static int handle_encls_ecreate(struct kvm_vcpu *vcpu) 20770210c04SSean Christopherson { 20870210c04SSean Christopherson gva_t pageinfo_gva, secs_gva; 20970210c04SSean Christopherson gva_t metadata_gva, contents_gva; 21070210c04SSean Christopherson gpa_t metadata_gpa, contents_gpa, secs_gpa; 21170210c04SSean Christopherson unsigned long metadata_hva, contents_hva, secs_hva; 21270210c04SSean Christopherson struct sgx_pageinfo pageinfo; 21370210c04SSean Christopherson struct sgx_secs *contents; 21470210c04SSean Christopherson struct x86_exception ex; 21570210c04SSean Christopherson int r; 21670210c04SSean Christopherson 21770210c04SSean Christopherson if (sgx_get_encls_gva(vcpu, kvm_rbx_read(vcpu), 32, 32, &pageinfo_gva) || 21870210c04SSean Christopherson sgx_get_encls_gva(vcpu, kvm_rcx_read(vcpu), 4096, 4096, &secs_gva)) 21970210c04SSean Christopherson return 1; 22070210c04SSean Christopherson 22170210c04SSean Christopherson /* 22270210c04SSean Christopherson * Copy the PAGEINFO to local memory, its pointers need to be 22370210c04SSean Christopherson * translated, i.e. we need to do a deep copy/translate. 22470210c04SSean Christopherson */ 22570210c04SSean Christopherson r = kvm_read_guest_virt(vcpu, pageinfo_gva, &pageinfo, 22670210c04SSean Christopherson sizeof(pageinfo), &ex); 22770210c04SSean Christopherson if (r == X86EMUL_PROPAGATE_FAULT) { 22870210c04SSean Christopherson kvm_inject_emulated_page_fault(vcpu, &ex); 22970210c04SSean Christopherson return 1; 23070210c04SSean Christopherson } else if (r != X86EMUL_CONTINUE) { 23170210c04SSean Christopherson sgx_handle_emulation_failure(vcpu, pageinfo_gva, 23270210c04SSean Christopherson sizeof(pageinfo)); 23370210c04SSean Christopherson return 0; 23470210c04SSean Christopherson } 23570210c04SSean Christopherson 23670210c04SSean Christopherson if (sgx_get_encls_gva(vcpu, pageinfo.metadata, 64, 64, &metadata_gva) || 23770210c04SSean Christopherson sgx_get_encls_gva(vcpu, pageinfo.contents, 4096, 4096, 23870210c04SSean Christopherson &contents_gva)) 23970210c04SSean Christopherson return 1; 24070210c04SSean Christopherson 24170210c04SSean Christopherson /* 24270210c04SSean Christopherson * Translate the SECINFO, SOURCE and SECS pointers from GVA to GPA. 24370210c04SSean Christopherson * Resume the guest on failure to inject a #PF. 24470210c04SSean Christopherson */ 24570210c04SSean Christopherson if (sgx_gva_to_gpa(vcpu, metadata_gva, false, &metadata_gpa) || 24670210c04SSean Christopherson sgx_gva_to_gpa(vcpu, contents_gva, false, &contents_gpa) || 24770210c04SSean Christopherson sgx_gva_to_gpa(vcpu, secs_gva, true, &secs_gpa)) 24870210c04SSean Christopherson return 1; 24970210c04SSean Christopherson 25070210c04SSean Christopherson /* 25170210c04SSean Christopherson * ...and then to HVA. The order of accesses isn't architectural, i.e. 25270210c04SSean Christopherson * KVM doesn't have to fully process one address at a time. Exit to 25370210c04SSean Christopherson * userspace if a GPA is invalid. 25470210c04SSean Christopherson */ 25570210c04SSean Christopherson if (sgx_gpa_to_hva(vcpu, metadata_gpa, &metadata_hva) || 25670210c04SSean Christopherson sgx_gpa_to_hva(vcpu, contents_gpa, &contents_hva) || 25770210c04SSean Christopherson sgx_gpa_to_hva(vcpu, secs_gpa, &secs_hva)) 25870210c04SSean Christopherson return 0; 25970210c04SSean Christopherson 26070210c04SSean Christopherson /* 26170210c04SSean Christopherson * Copy contents into kernel memory to prevent TOCTOU attack. E.g. the 26270210c04SSean Christopherson * guest could do ECREATE w/ SECS.SGX_ATTR_PROVISIONKEY=0, and 26370210c04SSean Christopherson * simultaneously set SGX_ATTR_PROVISIONKEY to bypass the check to 26470210c04SSean Christopherson * enforce restriction of access to the PROVISIONKEY. 26570210c04SSean Christopherson */ 26670210c04SSean Christopherson contents = (struct sgx_secs *)__get_free_page(GFP_KERNEL_ACCOUNT); 26770210c04SSean Christopherson if (!contents) 26870210c04SSean Christopherson return -ENOMEM; 26970210c04SSean Christopherson 27070210c04SSean Christopherson /* Exit to userspace if copying from a host userspace address fails. */ 27170210c04SSean Christopherson if (sgx_read_hva(vcpu, contents_hva, (void *)contents, PAGE_SIZE)) { 27270210c04SSean Christopherson free_page((unsigned long)contents); 27370210c04SSean Christopherson return 0; 27470210c04SSean Christopherson } 27570210c04SSean Christopherson 27670210c04SSean Christopherson pageinfo.metadata = metadata_hva; 27770210c04SSean Christopherson pageinfo.contents = (u64)contents; 27870210c04SSean Christopherson 27970210c04SSean Christopherson r = __handle_encls_ecreate(vcpu, &pageinfo, secs_hva, secs_gva); 28070210c04SSean Christopherson 28170210c04SSean Christopherson free_page((unsigned long)contents); 28270210c04SSean Christopherson 28370210c04SSean Christopherson return r; 28470210c04SSean Christopherson } 28570210c04SSean Christopherson 286b6f084caSSean Christopherson static int handle_encls_einit(struct kvm_vcpu *vcpu) 287b6f084caSSean Christopherson { 288b6f084caSSean Christopherson unsigned long sig_hva, secs_hva, token_hva, rflags; 289b6f084caSSean Christopherson struct vcpu_vmx *vmx = to_vmx(vcpu); 290b6f084caSSean Christopherson gva_t sig_gva, secs_gva, token_gva; 291b6f084caSSean Christopherson gpa_t sig_gpa, secs_gpa, token_gpa; 292b6f084caSSean Christopherson int ret, trapnr; 293b6f084caSSean Christopherson 294b6f084caSSean Christopherson if (sgx_get_encls_gva(vcpu, kvm_rbx_read(vcpu), 1808, 4096, &sig_gva) || 295b6f084caSSean Christopherson sgx_get_encls_gva(vcpu, kvm_rcx_read(vcpu), 4096, 4096, &secs_gva) || 296b6f084caSSean Christopherson sgx_get_encls_gva(vcpu, kvm_rdx_read(vcpu), 304, 512, &token_gva)) 297b6f084caSSean Christopherson return 1; 298b6f084caSSean Christopherson 299b6f084caSSean Christopherson /* 300b6f084caSSean Christopherson * Translate the SIGSTRUCT, SECS and TOKEN pointers from GVA to GPA. 301b6f084caSSean Christopherson * Resume the guest on failure to inject a #PF. 302b6f084caSSean Christopherson */ 303b6f084caSSean Christopherson if (sgx_gva_to_gpa(vcpu, sig_gva, false, &sig_gpa) || 304b6f084caSSean Christopherson sgx_gva_to_gpa(vcpu, secs_gva, true, &secs_gpa) || 305b6f084caSSean Christopherson sgx_gva_to_gpa(vcpu, token_gva, false, &token_gpa)) 306b6f084caSSean Christopherson return 1; 307b6f084caSSean Christopherson 308b6f084caSSean Christopherson /* 309b6f084caSSean Christopherson * ...and then to HVA. The order of accesses isn't architectural, i.e. 310b6f084caSSean Christopherson * KVM doesn't have to fully process one address at a time. Exit to 311b6f084caSSean Christopherson * userspace if a GPA is invalid. Note, all structures are aligned and 312b6f084caSSean Christopherson * cannot split pages. 313b6f084caSSean Christopherson */ 314b6f084caSSean Christopherson if (sgx_gpa_to_hva(vcpu, sig_gpa, &sig_hva) || 315b6f084caSSean Christopherson sgx_gpa_to_hva(vcpu, secs_gpa, &secs_hva) || 316b6f084caSSean Christopherson sgx_gpa_to_hva(vcpu, token_gpa, &token_hva)) 317b6f084caSSean Christopherson return 0; 318b6f084caSSean Christopherson 319b6f084caSSean Christopherson ret = sgx_virt_einit((void __user *)sig_hva, (void __user *)token_hva, 320b6f084caSSean Christopherson (void __user *)secs_hva, 321b6f084caSSean Christopherson vmx->msr_ia32_sgxlepubkeyhash, &trapnr); 322b6f084caSSean Christopherson 323b6f084caSSean Christopherson if (ret == -EFAULT) 324b6f084caSSean Christopherson return sgx_inject_fault(vcpu, secs_gva, trapnr); 325b6f084caSSean Christopherson 326b6f084caSSean Christopherson /* 327b6f084caSSean Christopherson * sgx_virt_einit() returns -EINVAL when access_ok() fails on @sig_hva, 328b6f084caSSean Christopherson * @token_hva or @secs_hva. This should never happen as KVM checks host 329b6f084caSSean Christopherson * addresses at memslot creation. sgx_virt_einit() has already warned 330b6f084caSSean Christopherson * in this case, so just return. 331b6f084caSSean Christopherson */ 332b6f084caSSean Christopherson if (ret < 0) 333b6f084caSSean Christopherson return ret; 334b6f084caSSean Christopherson 335b6f084caSSean Christopherson rflags = vmx_get_rflags(vcpu) & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | 336b6f084caSSean Christopherson X86_EFLAGS_AF | X86_EFLAGS_SF | 337b6f084caSSean Christopherson X86_EFLAGS_OF); 338b6f084caSSean Christopherson if (ret) 339b6f084caSSean Christopherson rflags |= X86_EFLAGS_ZF; 340b6f084caSSean Christopherson else 341b6f084caSSean Christopherson rflags &= ~X86_EFLAGS_ZF; 342b6f084caSSean Christopherson vmx_set_rflags(vcpu, rflags); 343b6f084caSSean Christopherson 344b6f084caSSean Christopherson kvm_rax_write(vcpu, ret); 345b6f084caSSean Christopherson return kvm_skip_emulated_instruction(vcpu); 346b6f084caSSean Christopherson } 347b6f084caSSean Christopherson 3489798adbcSSean Christopherson static inline bool encls_leaf_enabled_in_guest(struct kvm_vcpu *vcpu, u32 leaf) 3499798adbcSSean Christopherson { 3509798adbcSSean Christopherson if (!enable_sgx || !guest_cpuid_has(vcpu, X86_FEATURE_SGX)) 3519798adbcSSean Christopherson return false; 3529798adbcSSean Christopherson 3539798adbcSSean Christopherson if (leaf >= ECREATE && leaf <= ETRACK) 3549798adbcSSean Christopherson return guest_cpuid_has(vcpu, X86_FEATURE_SGX1); 3559798adbcSSean Christopherson 3569798adbcSSean Christopherson if (leaf >= EAUG && leaf <= EMODT) 3579798adbcSSean Christopherson return guest_cpuid_has(vcpu, X86_FEATURE_SGX2); 3589798adbcSSean Christopherson 3599798adbcSSean Christopherson return false; 3609798adbcSSean Christopherson } 3619798adbcSSean Christopherson 3629798adbcSSean Christopherson static inline bool sgx_enabled_in_guest_bios(struct kvm_vcpu *vcpu) 3639798adbcSSean Christopherson { 3649798adbcSSean Christopherson const u64 bits = FEAT_CTL_SGX_ENABLED | FEAT_CTL_LOCKED; 3659798adbcSSean Christopherson 3669798adbcSSean Christopherson return (to_vmx(vcpu)->msr_ia32_feature_control & bits) == bits; 3679798adbcSSean Christopherson } 3689798adbcSSean Christopherson 3699798adbcSSean Christopherson int handle_encls(struct kvm_vcpu *vcpu) 3709798adbcSSean Christopherson { 3719798adbcSSean Christopherson u32 leaf = (u32)kvm_rax_read(vcpu); 3729798adbcSSean Christopherson 3739798adbcSSean Christopherson if (!encls_leaf_enabled_in_guest(vcpu, leaf)) { 3749798adbcSSean Christopherson kvm_queue_exception(vcpu, UD_VECTOR); 3759798adbcSSean Christopherson } else if (!sgx_enabled_in_guest_bios(vcpu)) { 3769798adbcSSean Christopherson kvm_inject_gp(vcpu, 0); 3779798adbcSSean Christopherson } else { 37870210c04SSean Christopherson if (leaf == ECREATE) 37970210c04SSean Christopherson return handle_encls_ecreate(vcpu); 380b6f084caSSean Christopherson if (leaf == EINIT) 381b6f084caSSean Christopherson return handle_encls_einit(vcpu); 3829798adbcSSean Christopherson WARN(1, "KVM: unexpected exit on ENCLS[%u]", leaf); 3839798adbcSSean Christopherson vcpu->run->exit_reason = KVM_EXIT_UNKNOWN; 3849798adbcSSean Christopherson vcpu->run->hw.hardware_exit_reason = EXIT_REASON_ENCLS; 3859798adbcSSean Christopherson return 0; 3869798adbcSSean Christopherson } 3879798adbcSSean Christopherson return 1; 3889798adbcSSean Christopherson } 3898f102445SSean Christopherson 3908f102445SSean Christopherson void setup_default_sgx_lepubkeyhash(void) 3918f102445SSean Christopherson { 3928f102445SSean Christopherson /* 3938f102445SSean Christopherson * Use Intel's default value for Skylake hardware if Launch Control is 3948f102445SSean Christopherson * not supported, i.e. Intel's hash is hardcoded into silicon, or if 3958f102445SSean Christopherson * Launch Control is supported and enabled, i.e. mimic the reset value 3968f102445SSean Christopherson * and let the guest write the MSRs at will. If Launch Control is 3978f102445SSean Christopherson * supported but disabled, then use the current MSR values as the hash 3988f102445SSean Christopherson * MSRs exist but are read-only (locked and not writable). 3998f102445SSean Christopherson */ 4008f102445SSean Christopherson if (!enable_sgx || boot_cpu_has(X86_FEATURE_SGX_LC) || 4018f102445SSean Christopherson rdmsrl_safe(MSR_IA32_SGXLEPUBKEYHASH0, &sgx_pubkey_hash[0])) { 4028f102445SSean Christopherson sgx_pubkey_hash[0] = 0xa6053e051270b7acULL; 4038f102445SSean Christopherson sgx_pubkey_hash[1] = 0x6cfbe8ba8b3b413dULL; 4048f102445SSean Christopherson sgx_pubkey_hash[2] = 0xc4916d99f2b3735dULL; 4058f102445SSean Christopherson sgx_pubkey_hash[3] = 0xd4f8c05909f9bb3bULL; 4068f102445SSean Christopherson } else { 4078f102445SSean Christopherson /* MSR_IA32_SGXLEPUBKEYHASH0 is read above */ 4088f102445SSean Christopherson rdmsrl(MSR_IA32_SGXLEPUBKEYHASH1, sgx_pubkey_hash[1]); 4098f102445SSean Christopherson rdmsrl(MSR_IA32_SGXLEPUBKEYHASH2, sgx_pubkey_hash[2]); 4108f102445SSean Christopherson rdmsrl(MSR_IA32_SGXLEPUBKEYHASH3, sgx_pubkey_hash[3]); 4118f102445SSean Christopherson } 4128f102445SSean Christopherson } 4138f102445SSean Christopherson 4148f102445SSean Christopherson void vcpu_setup_sgx_lepubkeyhash(struct kvm_vcpu *vcpu) 4158f102445SSean Christopherson { 4168f102445SSean Christopherson struct vcpu_vmx *vmx = to_vmx(vcpu); 4178f102445SSean Christopherson 4188f102445SSean Christopherson memcpy(vmx->msr_ia32_sgxlepubkeyhash, sgx_pubkey_hash, 4198f102445SSean Christopherson sizeof(sgx_pubkey_hash)); 4208f102445SSean Christopherson } 42172add915SSean Christopherson 42272add915SSean Christopherson /* 42372add915SSean Christopherson * ECREATE must be intercepted to enforce MISCSELECT, ATTRIBUTES and XFRM 42472add915SSean Christopherson * restrictions if the guest's allowed-1 settings diverge from hardware. 42572add915SSean Christopherson */ 42672add915SSean Christopherson static bool sgx_intercept_encls_ecreate(struct kvm_vcpu *vcpu) 42772add915SSean Christopherson { 42872add915SSean Christopherson struct kvm_cpuid_entry2 *guest_cpuid; 42972add915SSean Christopherson u32 eax, ebx, ecx, edx; 43072add915SSean Christopherson 43172add915SSean Christopherson if (!vcpu->kvm->arch.sgx_provisioning_allowed) 43272add915SSean Christopherson return true; 43372add915SSean Christopherson 43472add915SSean Christopherson guest_cpuid = kvm_find_cpuid_entry(vcpu, 0x12, 0); 43572add915SSean Christopherson if (!guest_cpuid) 43672add915SSean Christopherson return true; 43772add915SSean Christopherson 43872add915SSean Christopherson cpuid_count(0x12, 0, &eax, &ebx, &ecx, &edx); 43972add915SSean Christopherson if (guest_cpuid->ebx != ebx || guest_cpuid->edx != edx) 44072add915SSean Christopherson return true; 44172add915SSean Christopherson 44272add915SSean Christopherson guest_cpuid = kvm_find_cpuid_entry(vcpu, 0x12, 1); 44372add915SSean Christopherson if (!guest_cpuid) 44472add915SSean Christopherson return true; 44572add915SSean Christopherson 44672add915SSean Christopherson cpuid_count(0x12, 1, &eax, &ebx, &ecx, &edx); 44772add915SSean Christopherson if (guest_cpuid->eax != eax || guest_cpuid->ebx != ebx || 44872add915SSean Christopherson guest_cpuid->ecx != ecx || guest_cpuid->edx != edx) 44972add915SSean Christopherson return true; 45072add915SSean Christopherson 45172add915SSean Christopherson return false; 45272add915SSean Christopherson } 45372add915SSean Christopherson 45472add915SSean Christopherson void vmx_write_encls_bitmap(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) 45572add915SSean Christopherson { 45672add915SSean Christopherson /* 45772add915SSean Christopherson * There is no software enable bit for SGX that is virtualized by 45872add915SSean Christopherson * hardware, e.g. there's no CR4.SGXE, so when SGX is disabled in the 45972add915SSean Christopherson * guest (either by the host or by the guest's BIOS) but enabled in the 46072add915SSean Christopherson * host, trap all ENCLS leafs and inject #UD/#GP as needed to emulate 46172add915SSean Christopherson * the expected system behavior for ENCLS. 46272add915SSean Christopherson */ 46372add915SSean Christopherson u64 bitmap = -1ull; 46472add915SSean Christopherson 46572add915SSean Christopherson /* Nothing to do if hardware doesn't support SGX */ 46672add915SSean Christopherson if (!cpu_has_vmx_encls_vmexit()) 46772add915SSean Christopherson return; 46872add915SSean Christopherson 46972add915SSean Christopherson if (guest_cpuid_has(vcpu, X86_FEATURE_SGX) && 47072add915SSean Christopherson sgx_enabled_in_guest_bios(vcpu)) { 47172add915SSean Christopherson if (guest_cpuid_has(vcpu, X86_FEATURE_SGX1)) { 47272add915SSean Christopherson bitmap &= ~GENMASK_ULL(ETRACK, ECREATE); 47372add915SSean Christopherson if (sgx_intercept_encls_ecreate(vcpu)) 47472add915SSean Christopherson bitmap |= (1 << ECREATE); 47572add915SSean Christopherson } 47672add915SSean Christopherson 47772add915SSean Christopherson if (guest_cpuid_has(vcpu, X86_FEATURE_SGX2)) 47872add915SSean Christopherson bitmap &= ~GENMASK_ULL(EMODT, EAUG); 47972add915SSean Christopherson 48072add915SSean Christopherson /* 48172add915SSean Christopherson * Trap and execute EINIT if launch control is enabled in the 48272add915SSean Christopherson * host using the guest's values for launch control MSRs, even 48372add915SSean Christopherson * if the guest's values are fixed to hardware default values. 48472add915SSean Christopherson * The MSRs are not loaded/saved on VM-Enter/VM-Exit as writing 48572add915SSean Christopherson * the MSRs is extraordinarily expensive. 48672add915SSean Christopherson */ 48772add915SSean Christopherson if (boot_cpu_has(X86_FEATURE_SGX_LC)) 48872add915SSean Christopherson bitmap |= (1 << EINIT); 48972add915SSean Christopherson 49072add915SSean Christopherson if (!vmcs12 && is_guest_mode(vcpu)) 49172add915SSean Christopherson vmcs12 = get_vmcs12(vcpu); 49272add915SSean Christopherson if (vmcs12 && nested_cpu_has_encls_exit(vmcs12)) 49372add915SSean Christopherson bitmap |= vmcs12->encls_exiting_bitmap; 49472add915SSean Christopherson } 49572add915SSean Christopherson vmcs_write64(ENCLS_EXITING_BITMAP, bitmap); 49672add915SSean Christopherson } 497