xref: /openbmc/linux/arch/x86/kvm/vmx/sgx.c (revision 2d99a7ec)
1 // SPDX-License-Identifier: GPL-2.0
2 /*  Copyright(c) 2021 Intel Corporation. */
3 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
4 
5 #include <asm/sgx.h>
6 
7 #include "cpuid.h"
8 #include "kvm_cache_regs.h"
9 #include "nested.h"
10 #include "sgx.h"
11 #include "vmx.h"
12 #include "x86.h"
13 
14 bool __read_mostly enable_sgx = 1;
15 module_param_named(sgx, enable_sgx, bool, 0444);
16 
17 /* Initial value of guest's virtual SGX_LEPUBKEYHASHn MSRs */
18 static u64 sgx_pubkey_hash[4] __ro_after_init;
19 
20 /*
21  * ENCLS's memory operands use a fixed segment (DS) and a fixed
22  * address size based on the mode.  Related prefixes are ignored.
23  */
24 static int sgx_get_encls_gva(struct kvm_vcpu *vcpu, unsigned long offset,
25 			     int size, int alignment, gva_t *gva)
26 {
27 	struct kvm_segment s;
28 	bool fault;
29 
30 	/* Skip vmcs.GUEST_DS retrieval for 64-bit mode to avoid VMREADs. */
31 	*gva = offset;
32 	if (!is_long_mode(vcpu)) {
33 		vmx_get_segment(vcpu, &s, VCPU_SREG_DS);
34 		*gva += s.base;
35 	}
36 
37 	if (!IS_ALIGNED(*gva, alignment)) {
38 		fault = true;
39 	} else if (likely(is_long_mode(vcpu))) {
40 		fault = is_noncanonical_address(*gva, vcpu);
41 	} else {
42 		*gva &= 0xffffffff;
43 		fault = (s.unusable) ||
44 			(s.type != 2 && s.type != 3) ||
45 			(*gva > s.limit) ||
46 			((s.base != 0 || s.limit != 0xffffffff) &&
47 			(((u64)*gva + size - 1) > s.limit + 1));
48 	}
49 	if (fault)
50 		kvm_inject_gp(vcpu, 0);
51 	return fault ? -EINVAL : 0;
52 }
53 
54 static void sgx_handle_emulation_failure(struct kvm_vcpu *vcpu, u64 addr,
55 					 unsigned int size)
56 {
57 	uint64_t data[2] = { addr, size };
58 
59 	__kvm_prepare_emulation_failure_exit(vcpu, data, ARRAY_SIZE(data));
60 }
61 
62 static int sgx_read_hva(struct kvm_vcpu *vcpu, unsigned long hva, void *data,
63 			unsigned int size)
64 {
65 	if (__copy_from_user(data, (void __user *)hva, size)) {
66 		sgx_handle_emulation_failure(vcpu, hva, size);
67 		return -EFAULT;
68 	}
69 
70 	return 0;
71 }
72 
73 static int sgx_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t gva, bool write,
74 			  gpa_t *gpa)
75 {
76 	struct x86_exception ex;
77 
78 	if (write)
79 		*gpa = kvm_mmu_gva_to_gpa_write(vcpu, gva, &ex);
80 	else
81 		*gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, &ex);
82 
83 	if (*gpa == INVALID_GPA) {
84 		kvm_inject_emulated_page_fault(vcpu, &ex);
85 		return -EFAULT;
86 	}
87 
88 	return 0;
89 }
90 
91 static int sgx_gpa_to_hva(struct kvm_vcpu *vcpu, gpa_t gpa, unsigned long *hva)
92 {
93 	*hva = kvm_vcpu_gfn_to_hva(vcpu, PFN_DOWN(gpa));
94 	if (kvm_is_error_hva(*hva)) {
95 		sgx_handle_emulation_failure(vcpu, gpa, 1);
96 		return -EFAULT;
97 	}
98 
99 	*hva |= gpa & ~PAGE_MASK;
100 
101 	return 0;
102 }
103 
104 static int sgx_inject_fault(struct kvm_vcpu *vcpu, gva_t gva, int trapnr)
105 {
106 	struct x86_exception ex;
107 
108 	/*
109 	 * A non-EPCM #PF indicates a bad userspace HVA.  This *should* check
110 	 * for PFEC.SGX and not assume any #PF on SGX2 originated in the EPC,
111 	 * but the error code isn't (yet) plumbed through the ENCLS helpers.
112 	 */
113 	if (trapnr == PF_VECTOR && !boot_cpu_has(X86_FEATURE_SGX2)) {
114 		kvm_prepare_emulation_failure_exit(vcpu);
115 		return 0;
116 	}
117 
118 	/*
119 	 * If the guest thinks it's running on SGX2 hardware, inject an SGX
120 	 * #PF if the fault matches an EPCM fault signature (#GP on SGX1,
121 	 * #PF on SGX2).  The assumption is that EPCM faults are much more
122 	 * likely than a bad userspace address.
123 	 */
124 	if ((trapnr == PF_VECTOR || !boot_cpu_has(X86_FEATURE_SGX2)) &&
125 	    guest_cpuid_has(vcpu, X86_FEATURE_SGX2)) {
126 		memset(&ex, 0, sizeof(ex));
127 		ex.vector = PF_VECTOR;
128 		ex.error_code = PFERR_PRESENT_MASK | PFERR_WRITE_MASK |
129 				PFERR_SGX_MASK;
130 		ex.address = gva;
131 		ex.error_code_valid = true;
132 		ex.nested_page_fault = false;
133 		kvm_inject_emulated_page_fault(vcpu, &ex);
134 	} else {
135 		kvm_inject_gp(vcpu, 0);
136 	}
137 	return 1;
138 }
139 
140 static int __handle_encls_ecreate(struct kvm_vcpu *vcpu,
141 				  struct sgx_pageinfo *pageinfo,
142 				  unsigned long secs_hva,
143 				  gva_t secs_gva)
144 {
145 	struct sgx_secs *contents = (struct sgx_secs *)pageinfo->contents;
146 	struct kvm_cpuid_entry2 *sgx_12_0, *sgx_12_1;
147 	u64 attributes, xfrm, size;
148 	u32 miscselect;
149 	u8 max_size_log2;
150 	int trapnr, ret;
151 
152 	sgx_12_0 = kvm_find_cpuid_entry_index(vcpu, 0x12, 0);
153 	sgx_12_1 = kvm_find_cpuid_entry_index(vcpu, 0x12, 1);
154 	if (!sgx_12_0 || !sgx_12_1) {
155 		kvm_prepare_emulation_failure_exit(vcpu);
156 		return 0;
157 	}
158 
159 	miscselect = contents->miscselect;
160 	attributes = contents->attributes;
161 	xfrm = contents->xfrm;
162 	size = contents->size;
163 
164 	/* Enforce restriction of access to the PROVISIONKEY. */
165 	if (!vcpu->kvm->arch.sgx_provisioning_allowed &&
166 	    (attributes & SGX_ATTR_PROVISIONKEY)) {
167 		if (sgx_12_1->eax & SGX_ATTR_PROVISIONKEY)
168 			pr_warn_once("SGX PROVISIONKEY advertised but not allowed\n");
169 		kvm_inject_gp(vcpu, 0);
170 		return 1;
171 	}
172 
173 	/* Enforce CPUID restrictions on MISCSELECT, ATTRIBUTES and XFRM. */
174 	if ((u32)miscselect & ~sgx_12_0->ebx ||
175 	    (u32)attributes & ~sgx_12_1->eax ||
176 	    (u32)(attributes >> 32) & ~sgx_12_1->ebx ||
177 	    (u32)xfrm & ~sgx_12_1->ecx ||
178 	    (u32)(xfrm >> 32) & ~sgx_12_1->edx) {
179 		kvm_inject_gp(vcpu, 0);
180 		return 1;
181 	}
182 
183 	/* Enforce CPUID restriction on max enclave size. */
184 	max_size_log2 = (attributes & SGX_ATTR_MODE64BIT) ? sgx_12_0->edx >> 8 :
185 							    sgx_12_0->edx;
186 	if (size >= BIT_ULL(max_size_log2)) {
187 		kvm_inject_gp(vcpu, 0);
188 		return 1;
189 	}
190 
191 	/*
192 	 * sgx_virt_ecreate() returns:
193 	 *  1) 0:	ECREATE was successful
194 	 *  2) -EFAULT:	ECREATE was run but faulted, and trapnr was set to the
195 	 *		exception number.
196 	 *  3) -EINVAL:	access_ok() on @secs_hva failed. This should never
197 	 *		happen as KVM checks host addresses at memslot creation.
198 	 *		sgx_virt_ecreate() has already warned in this case.
199 	 */
200 	ret = sgx_virt_ecreate(pageinfo, (void __user *)secs_hva, &trapnr);
201 	if (!ret)
202 		return kvm_skip_emulated_instruction(vcpu);
203 	if (ret == -EFAULT)
204 		return sgx_inject_fault(vcpu, secs_gva, trapnr);
205 
206 	return ret;
207 }
208 
209 static int handle_encls_ecreate(struct kvm_vcpu *vcpu)
210 {
211 	gva_t pageinfo_gva, secs_gva;
212 	gva_t metadata_gva, contents_gva;
213 	gpa_t metadata_gpa, contents_gpa, secs_gpa;
214 	unsigned long metadata_hva, contents_hva, secs_hva;
215 	struct sgx_pageinfo pageinfo;
216 	struct sgx_secs *contents;
217 	struct x86_exception ex;
218 	int r;
219 
220 	if (sgx_get_encls_gva(vcpu, kvm_rbx_read(vcpu), 32, 32, &pageinfo_gva) ||
221 	    sgx_get_encls_gva(vcpu, kvm_rcx_read(vcpu), 4096, 4096, &secs_gva))
222 		return 1;
223 
224 	/*
225 	 * Copy the PAGEINFO to local memory, its pointers need to be
226 	 * translated, i.e. we need to do a deep copy/translate.
227 	 */
228 	r = kvm_read_guest_virt(vcpu, pageinfo_gva, &pageinfo,
229 				sizeof(pageinfo), &ex);
230 	if (r == X86EMUL_PROPAGATE_FAULT) {
231 		kvm_inject_emulated_page_fault(vcpu, &ex);
232 		return 1;
233 	} else if (r != X86EMUL_CONTINUE) {
234 		sgx_handle_emulation_failure(vcpu, pageinfo_gva,
235 					     sizeof(pageinfo));
236 		return 0;
237 	}
238 
239 	if (sgx_get_encls_gva(vcpu, pageinfo.metadata, 64, 64, &metadata_gva) ||
240 	    sgx_get_encls_gva(vcpu, pageinfo.contents, 4096, 4096,
241 			      &contents_gva))
242 		return 1;
243 
244 	/*
245 	 * Translate the SECINFO, SOURCE and SECS pointers from GVA to GPA.
246 	 * Resume the guest on failure to inject a #PF.
247 	 */
248 	if (sgx_gva_to_gpa(vcpu, metadata_gva, false, &metadata_gpa) ||
249 	    sgx_gva_to_gpa(vcpu, contents_gva, false, &contents_gpa) ||
250 	    sgx_gva_to_gpa(vcpu, secs_gva, true, &secs_gpa))
251 		return 1;
252 
253 	/*
254 	 * ...and then to HVA.  The order of accesses isn't architectural, i.e.
255 	 * KVM doesn't have to fully process one address at a time.  Exit to
256 	 * userspace if a GPA is invalid.
257 	 */
258 	if (sgx_gpa_to_hva(vcpu, metadata_gpa, &metadata_hva) ||
259 	    sgx_gpa_to_hva(vcpu, contents_gpa, &contents_hva) ||
260 	    sgx_gpa_to_hva(vcpu, secs_gpa, &secs_hva))
261 		return 0;
262 
263 	/*
264 	 * Copy contents into kernel memory to prevent TOCTOU attack. E.g. the
265 	 * guest could do ECREATE w/ SECS.SGX_ATTR_PROVISIONKEY=0, and
266 	 * simultaneously set SGX_ATTR_PROVISIONKEY to bypass the check to
267 	 * enforce restriction of access to the PROVISIONKEY.
268 	 */
269 	contents = (struct sgx_secs *)__get_free_page(GFP_KERNEL_ACCOUNT);
270 	if (!contents)
271 		return -ENOMEM;
272 
273 	/* Exit to userspace if copying from a host userspace address fails. */
274 	if (sgx_read_hva(vcpu, contents_hva, (void *)contents, PAGE_SIZE)) {
275 		free_page((unsigned long)contents);
276 		return 0;
277 	}
278 
279 	pageinfo.metadata = metadata_hva;
280 	pageinfo.contents = (u64)contents;
281 
282 	r = __handle_encls_ecreate(vcpu, &pageinfo, secs_hva, secs_gva);
283 
284 	free_page((unsigned long)contents);
285 
286 	return r;
287 }
288 
289 static int handle_encls_einit(struct kvm_vcpu *vcpu)
290 {
291 	unsigned long sig_hva, secs_hva, token_hva, rflags;
292 	struct vcpu_vmx *vmx = to_vmx(vcpu);
293 	gva_t sig_gva, secs_gva, token_gva;
294 	gpa_t sig_gpa, secs_gpa, token_gpa;
295 	int ret, trapnr;
296 
297 	if (sgx_get_encls_gva(vcpu, kvm_rbx_read(vcpu), 1808, 4096, &sig_gva) ||
298 	    sgx_get_encls_gva(vcpu, kvm_rcx_read(vcpu), 4096, 4096, &secs_gva) ||
299 	    sgx_get_encls_gva(vcpu, kvm_rdx_read(vcpu), 304, 512, &token_gva))
300 		return 1;
301 
302 	/*
303 	 * Translate the SIGSTRUCT, SECS and TOKEN pointers from GVA to GPA.
304 	 * Resume the guest on failure to inject a #PF.
305 	 */
306 	if (sgx_gva_to_gpa(vcpu, sig_gva, false, &sig_gpa) ||
307 	    sgx_gva_to_gpa(vcpu, secs_gva, true, &secs_gpa) ||
308 	    sgx_gva_to_gpa(vcpu, token_gva, false, &token_gpa))
309 		return 1;
310 
311 	/*
312 	 * ...and then to HVA.  The order of accesses isn't architectural, i.e.
313 	 * KVM doesn't have to fully process one address at a time.  Exit to
314 	 * userspace if a GPA is invalid.  Note, all structures are aligned and
315 	 * cannot split pages.
316 	 */
317 	if (sgx_gpa_to_hva(vcpu, sig_gpa, &sig_hva) ||
318 	    sgx_gpa_to_hva(vcpu, secs_gpa, &secs_hva) ||
319 	    sgx_gpa_to_hva(vcpu, token_gpa, &token_hva))
320 		return 0;
321 
322 	ret = sgx_virt_einit((void __user *)sig_hva, (void __user *)token_hva,
323 			     (void __user *)secs_hva,
324 			     vmx->msr_ia32_sgxlepubkeyhash, &trapnr);
325 
326 	if (ret == -EFAULT)
327 		return sgx_inject_fault(vcpu, secs_gva, trapnr);
328 
329 	/*
330 	 * sgx_virt_einit() returns -EINVAL when access_ok() fails on @sig_hva,
331 	 * @token_hva or @secs_hva. This should never happen as KVM checks host
332 	 * addresses at memslot creation. sgx_virt_einit() has already warned
333 	 * in this case, so just return.
334 	 */
335 	if (ret < 0)
336 		return ret;
337 
338 	rflags = vmx_get_rflags(vcpu) & ~(X86_EFLAGS_CF | X86_EFLAGS_PF |
339 					  X86_EFLAGS_AF | X86_EFLAGS_SF |
340 					  X86_EFLAGS_OF);
341 	if (ret)
342 		rflags |= X86_EFLAGS_ZF;
343 	else
344 		rflags &= ~X86_EFLAGS_ZF;
345 	vmx_set_rflags(vcpu, rflags);
346 
347 	kvm_rax_write(vcpu, ret);
348 	return kvm_skip_emulated_instruction(vcpu);
349 }
350 
351 static inline bool encls_leaf_enabled_in_guest(struct kvm_vcpu *vcpu, u32 leaf)
352 {
353 	if (!enable_sgx || !guest_cpuid_has(vcpu, X86_FEATURE_SGX))
354 		return false;
355 
356 	if (leaf >= ECREATE && leaf <= ETRACK)
357 		return guest_cpuid_has(vcpu, X86_FEATURE_SGX1);
358 
359 	if (leaf >= EAUG && leaf <= EMODT)
360 		return guest_cpuid_has(vcpu, X86_FEATURE_SGX2);
361 
362 	return false;
363 }
364 
365 static inline bool sgx_enabled_in_guest_bios(struct kvm_vcpu *vcpu)
366 {
367 	const u64 bits = FEAT_CTL_SGX_ENABLED | FEAT_CTL_LOCKED;
368 
369 	return (to_vmx(vcpu)->msr_ia32_feature_control & bits) == bits;
370 }
371 
372 int handle_encls(struct kvm_vcpu *vcpu)
373 {
374 	u32 leaf = (u32)kvm_rax_read(vcpu);
375 
376 	if (!encls_leaf_enabled_in_guest(vcpu, leaf)) {
377 		kvm_queue_exception(vcpu, UD_VECTOR);
378 	} else if (!sgx_enabled_in_guest_bios(vcpu)) {
379 		kvm_inject_gp(vcpu, 0);
380 	} else {
381 		if (leaf == ECREATE)
382 			return handle_encls_ecreate(vcpu);
383 		if (leaf == EINIT)
384 			return handle_encls_einit(vcpu);
385 		WARN_ONCE(1, "unexpected exit on ENCLS[%u]", leaf);
386 		vcpu->run->exit_reason = KVM_EXIT_UNKNOWN;
387 		vcpu->run->hw.hardware_exit_reason = EXIT_REASON_ENCLS;
388 		return 0;
389 	}
390 	return 1;
391 }
392 
393 void setup_default_sgx_lepubkeyhash(void)
394 {
395 	/*
396 	 * Use Intel's default value for Skylake hardware if Launch Control is
397 	 * not supported, i.e. Intel's hash is hardcoded into silicon, or if
398 	 * Launch Control is supported and enabled, i.e. mimic the reset value
399 	 * and let the guest write the MSRs at will.  If Launch Control is
400 	 * supported but disabled, then use the current MSR values as the hash
401 	 * MSRs exist but are read-only (locked and not writable).
402 	 */
403 	if (!enable_sgx || boot_cpu_has(X86_FEATURE_SGX_LC) ||
404 	    rdmsrl_safe(MSR_IA32_SGXLEPUBKEYHASH0, &sgx_pubkey_hash[0])) {
405 		sgx_pubkey_hash[0] = 0xa6053e051270b7acULL;
406 		sgx_pubkey_hash[1] = 0x6cfbe8ba8b3b413dULL;
407 		sgx_pubkey_hash[2] = 0xc4916d99f2b3735dULL;
408 		sgx_pubkey_hash[3] = 0xd4f8c05909f9bb3bULL;
409 	} else {
410 		/* MSR_IA32_SGXLEPUBKEYHASH0 is read above */
411 		rdmsrl(MSR_IA32_SGXLEPUBKEYHASH1, sgx_pubkey_hash[1]);
412 		rdmsrl(MSR_IA32_SGXLEPUBKEYHASH2, sgx_pubkey_hash[2]);
413 		rdmsrl(MSR_IA32_SGXLEPUBKEYHASH3, sgx_pubkey_hash[3]);
414 	}
415 }
416 
417 void vcpu_setup_sgx_lepubkeyhash(struct kvm_vcpu *vcpu)
418 {
419 	struct vcpu_vmx *vmx = to_vmx(vcpu);
420 
421 	memcpy(vmx->msr_ia32_sgxlepubkeyhash, sgx_pubkey_hash,
422 	       sizeof(sgx_pubkey_hash));
423 }
424 
425 /*
426  * ECREATE must be intercepted to enforce MISCSELECT, ATTRIBUTES and XFRM
427  * restrictions if the guest's allowed-1 settings diverge from hardware.
428  */
429 static bool sgx_intercept_encls_ecreate(struct kvm_vcpu *vcpu)
430 {
431 	struct kvm_cpuid_entry2 *guest_cpuid;
432 	u32 eax, ebx, ecx, edx;
433 
434 	if (!vcpu->kvm->arch.sgx_provisioning_allowed)
435 		return true;
436 
437 	guest_cpuid = kvm_find_cpuid_entry_index(vcpu, 0x12, 0);
438 	if (!guest_cpuid)
439 		return true;
440 
441 	cpuid_count(0x12, 0, &eax, &ebx, &ecx, &edx);
442 	if (guest_cpuid->ebx != ebx || guest_cpuid->edx != edx)
443 		return true;
444 
445 	guest_cpuid = kvm_find_cpuid_entry_index(vcpu, 0x12, 1);
446 	if (!guest_cpuid)
447 		return true;
448 
449 	cpuid_count(0x12, 1, &eax, &ebx, &ecx, &edx);
450 	if (guest_cpuid->eax != eax || guest_cpuid->ebx != ebx ||
451 	    guest_cpuid->ecx != ecx || guest_cpuid->edx != edx)
452 		return true;
453 
454 	return false;
455 }
456 
457 void vmx_write_encls_bitmap(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
458 {
459 	/*
460 	 * There is no software enable bit for SGX that is virtualized by
461 	 * hardware, e.g. there's no CR4.SGXE, so when SGX is disabled in the
462 	 * guest (either by the host or by the guest's BIOS) but enabled in the
463 	 * host, trap all ENCLS leafs and inject #UD/#GP as needed to emulate
464 	 * the expected system behavior for ENCLS.
465 	 */
466 	u64 bitmap = -1ull;
467 
468 	/* Nothing to do if hardware doesn't support SGX */
469 	if (!cpu_has_vmx_encls_vmexit())
470 		return;
471 
472 	if (guest_cpuid_has(vcpu, X86_FEATURE_SGX) &&
473 	    sgx_enabled_in_guest_bios(vcpu)) {
474 		if (guest_cpuid_has(vcpu, X86_FEATURE_SGX1)) {
475 			bitmap &= ~GENMASK_ULL(ETRACK, ECREATE);
476 			if (sgx_intercept_encls_ecreate(vcpu))
477 				bitmap |= (1 << ECREATE);
478 		}
479 
480 		if (guest_cpuid_has(vcpu, X86_FEATURE_SGX2))
481 			bitmap &= ~GENMASK_ULL(EMODT, EAUG);
482 
483 		/*
484 		 * Trap and execute EINIT if launch control is enabled in the
485 		 * host using the guest's values for launch control MSRs, even
486 		 * if the guest's values are fixed to hardware default values.
487 		 * The MSRs are not loaded/saved on VM-Enter/VM-Exit as writing
488 		 * the MSRs is extraordinarily expensive.
489 		 */
490 		if (boot_cpu_has(X86_FEATURE_SGX_LC))
491 			bitmap |= (1 << EINIT);
492 
493 		if (!vmcs12 && is_guest_mode(vcpu))
494 			vmcs12 = get_vmcs12(vcpu);
495 		if (vmcs12 && nested_cpu_has_encls_exit(vmcs12))
496 			bitmap |= vmcs12->encls_exiting_bitmap;
497 	}
498 	vmcs_write64(ENCLS_EXITING_BITMAP, bitmap);
499 }
500