xref: /openbmc/linux/arch/x86/kvm/svm/sev.c (revision 09de5cd2)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Kernel-based Virtual Machine driver for Linux
4  *
5  * AMD SVM-SEV support
6  *
7  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
8  */
9 
10 #include <linux/kvm_types.h>
11 #include <linux/kvm_host.h>
12 #include <linux/kernel.h>
13 #include <linux/highmem.h>
14 #include <linux/psp-sev.h>
15 #include <linux/pagemap.h>
16 #include <linux/swap.h>
17 #include <linux/misc_cgroup.h>
18 #include <linux/processor.h>
19 #include <linux/trace_events.h>
20 
21 #include <asm/pkru.h>
22 #include <asm/trapnr.h>
23 #include <asm/fpu/xcr.h>
24 
25 #include "x86.h"
26 #include "svm.h"
27 #include "svm_ops.h"
28 #include "cpuid.h"
29 #include "trace.h"
30 
31 #ifndef CONFIG_KVM_AMD_SEV
32 /*
33  * When this config is not defined, SEV feature is not supported and APIs in
34  * this file are not used but this file still gets compiled into the KVM AMD
35  * module.
36  *
37  * We will not have MISC_CG_RES_SEV and MISC_CG_RES_SEV_ES entries in the enum
38  * misc_res_type {} defined in linux/misc_cgroup.h.
39  *
40  * Below macros allow compilation to succeed.
41  */
42 #define MISC_CG_RES_SEV MISC_CG_RES_TYPES
43 #define MISC_CG_RES_SEV_ES MISC_CG_RES_TYPES
44 #endif
45 
46 #ifdef CONFIG_KVM_AMD_SEV
47 /* enable/disable SEV support */
48 static bool sev_enabled = true;
49 module_param_named(sev, sev_enabled, bool, 0444);
50 
51 /* enable/disable SEV-ES support */
52 static bool sev_es_enabled = true;
53 module_param_named(sev_es, sev_es_enabled, bool, 0444);
54 #else
55 #define sev_enabled false
56 #define sev_es_enabled false
57 #endif /* CONFIG_KVM_AMD_SEV */
58 
59 static u8 sev_enc_bit;
60 static DECLARE_RWSEM(sev_deactivate_lock);
61 static DEFINE_MUTEX(sev_bitmap_lock);
62 unsigned int max_sev_asid;
63 static unsigned int min_sev_asid;
64 static unsigned long sev_me_mask;
65 static unsigned int nr_asids;
66 static unsigned long *sev_asid_bitmap;
67 static unsigned long *sev_reclaim_asid_bitmap;
68 
69 struct enc_region {
70 	struct list_head list;
71 	unsigned long npages;
72 	struct page **pages;
73 	unsigned long uaddr;
74 	unsigned long size;
75 };
76 
77 /* Called with the sev_bitmap_lock held, or on shutdown  */
78 static int sev_flush_asids(int min_asid, int max_asid)
79 {
80 	int ret, asid, error = 0;
81 
82 	/* Check if there are any ASIDs to reclaim before performing a flush */
83 	asid = find_next_bit(sev_reclaim_asid_bitmap, nr_asids, min_asid);
84 	if (asid > max_asid)
85 		return -EBUSY;
86 
87 	/*
88 	 * DEACTIVATE will clear the WBINVD indicator causing DF_FLUSH to fail,
89 	 * so it must be guarded.
90 	 */
91 	down_write(&sev_deactivate_lock);
92 
93 	wbinvd_on_all_cpus();
94 	ret = sev_guest_df_flush(&error);
95 
96 	up_write(&sev_deactivate_lock);
97 
98 	if (ret)
99 		pr_err("SEV: DF_FLUSH failed, ret=%d, error=%#x\n", ret, error);
100 
101 	return ret;
102 }
103 
104 static inline bool is_mirroring_enc_context(struct kvm *kvm)
105 {
106 	return !!to_kvm_svm(kvm)->sev_info.enc_context_owner;
107 }
108 
109 /* Must be called with the sev_bitmap_lock held */
110 static bool __sev_recycle_asids(int min_asid, int max_asid)
111 {
112 	if (sev_flush_asids(min_asid, max_asid))
113 		return false;
114 
115 	/* The flush process will flush all reclaimable SEV and SEV-ES ASIDs */
116 	bitmap_xor(sev_asid_bitmap, sev_asid_bitmap, sev_reclaim_asid_bitmap,
117 		   nr_asids);
118 	bitmap_zero(sev_reclaim_asid_bitmap, nr_asids);
119 
120 	return true;
121 }
122 
123 static int sev_misc_cg_try_charge(struct kvm_sev_info *sev)
124 {
125 	enum misc_res_type type = sev->es_active ? MISC_CG_RES_SEV_ES : MISC_CG_RES_SEV;
126 	return misc_cg_try_charge(type, sev->misc_cg, 1);
127 }
128 
129 static void sev_misc_cg_uncharge(struct kvm_sev_info *sev)
130 {
131 	enum misc_res_type type = sev->es_active ? MISC_CG_RES_SEV_ES : MISC_CG_RES_SEV;
132 	misc_cg_uncharge(type, sev->misc_cg, 1);
133 }
134 
135 static int sev_asid_new(struct kvm_sev_info *sev)
136 {
137 	int asid, min_asid, max_asid, ret;
138 	bool retry = true;
139 
140 	WARN_ON(sev->misc_cg);
141 	sev->misc_cg = get_current_misc_cg();
142 	ret = sev_misc_cg_try_charge(sev);
143 	if (ret) {
144 		put_misc_cg(sev->misc_cg);
145 		sev->misc_cg = NULL;
146 		return ret;
147 	}
148 
149 	mutex_lock(&sev_bitmap_lock);
150 
151 	/*
152 	 * SEV-enabled guests must use asid from min_sev_asid to max_sev_asid.
153 	 * SEV-ES-enabled guest can use from 1 to min_sev_asid - 1.
154 	 */
155 	min_asid = sev->es_active ? 1 : min_sev_asid;
156 	max_asid = sev->es_active ? min_sev_asid - 1 : max_sev_asid;
157 again:
158 	asid = find_next_zero_bit(sev_asid_bitmap, max_asid + 1, min_asid);
159 	if (asid > max_asid) {
160 		if (retry && __sev_recycle_asids(min_asid, max_asid)) {
161 			retry = false;
162 			goto again;
163 		}
164 		mutex_unlock(&sev_bitmap_lock);
165 		ret = -EBUSY;
166 		goto e_uncharge;
167 	}
168 
169 	__set_bit(asid, sev_asid_bitmap);
170 
171 	mutex_unlock(&sev_bitmap_lock);
172 
173 	return asid;
174 e_uncharge:
175 	sev_misc_cg_uncharge(sev);
176 	put_misc_cg(sev->misc_cg);
177 	sev->misc_cg = NULL;
178 	return ret;
179 }
180 
181 static int sev_get_asid(struct kvm *kvm)
182 {
183 	struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
184 
185 	return sev->asid;
186 }
187 
188 static void sev_asid_free(struct kvm_sev_info *sev)
189 {
190 	struct svm_cpu_data *sd;
191 	int cpu;
192 
193 	mutex_lock(&sev_bitmap_lock);
194 
195 	__set_bit(sev->asid, sev_reclaim_asid_bitmap);
196 
197 	for_each_possible_cpu(cpu) {
198 		sd = per_cpu(svm_data, cpu);
199 		sd->sev_vmcbs[sev->asid] = NULL;
200 	}
201 
202 	mutex_unlock(&sev_bitmap_lock);
203 
204 	sev_misc_cg_uncharge(sev);
205 	put_misc_cg(sev->misc_cg);
206 	sev->misc_cg = NULL;
207 }
208 
209 static void sev_decommission(unsigned int handle)
210 {
211 	struct sev_data_decommission decommission;
212 
213 	if (!handle)
214 		return;
215 
216 	decommission.handle = handle;
217 	sev_guest_decommission(&decommission, NULL);
218 }
219 
220 static void sev_unbind_asid(struct kvm *kvm, unsigned int handle)
221 {
222 	struct sev_data_deactivate deactivate;
223 
224 	if (!handle)
225 		return;
226 
227 	deactivate.handle = handle;
228 
229 	/* Guard DEACTIVATE against WBINVD/DF_FLUSH used in ASID recycling */
230 	down_read(&sev_deactivate_lock);
231 	sev_guest_deactivate(&deactivate, NULL);
232 	up_read(&sev_deactivate_lock);
233 
234 	sev_decommission(handle);
235 }
236 
237 static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
238 {
239 	struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
240 	int asid, ret;
241 
242 	if (kvm->created_vcpus)
243 		return -EINVAL;
244 
245 	ret = -EBUSY;
246 	if (unlikely(sev->active))
247 		return ret;
248 
249 	sev->active = true;
250 	sev->es_active = argp->id == KVM_SEV_ES_INIT;
251 	asid = sev_asid_new(sev);
252 	if (asid < 0)
253 		goto e_no_asid;
254 	sev->asid = asid;
255 
256 	ret = sev_platform_init(&argp->error);
257 	if (ret)
258 		goto e_free;
259 
260 	INIT_LIST_HEAD(&sev->regions_list);
261 	INIT_LIST_HEAD(&sev->mirror_vms);
262 
263 	kvm_set_apicv_inhibit(kvm, APICV_INHIBIT_REASON_SEV);
264 
265 	return 0;
266 
267 e_free:
268 	sev_asid_free(sev);
269 	sev->asid = 0;
270 e_no_asid:
271 	sev->es_active = false;
272 	sev->active = false;
273 	return ret;
274 }
275 
276 static int sev_bind_asid(struct kvm *kvm, unsigned int handle, int *error)
277 {
278 	struct sev_data_activate activate;
279 	int asid = sev_get_asid(kvm);
280 	int ret;
281 
282 	/* activate ASID on the given handle */
283 	activate.handle = handle;
284 	activate.asid   = asid;
285 	ret = sev_guest_activate(&activate, error);
286 
287 	return ret;
288 }
289 
290 static int __sev_issue_cmd(int fd, int id, void *data, int *error)
291 {
292 	struct fd f;
293 	int ret;
294 
295 	f = fdget(fd);
296 	if (!f.file)
297 		return -EBADF;
298 
299 	ret = sev_issue_cmd_external_user(f.file, id, data, error);
300 
301 	fdput(f);
302 	return ret;
303 }
304 
305 static int sev_issue_cmd(struct kvm *kvm, int id, void *data, int *error)
306 {
307 	struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
308 
309 	return __sev_issue_cmd(sev->fd, id, data, error);
310 }
311 
312 static int sev_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
313 {
314 	struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
315 	struct sev_data_launch_start start;
316 	struct kvm_sev_launch_start params;
317 	void *dh_blob, *session_blob;
318 	int *error = &argp->error;
319 	int ret;
320 
321 	if (!sev_guest(kvm))
322 		return -ENOTTY;
323 
324 	if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, sizeof(params)))
325 		return -EFAULT;
326 
327 	memset(&start, 0, sizeof(start));
328 
329 	dh_blob = NULL;
330 	if (params.dh_uaddr) {
331 		dh_blob = psp_copy_user_blob(params.dh_uaddr, params.dh_len);
332 		if (IS_ERR(dh_blob))
333 			return PTR_ERR(dh_blob);
334 
335 		start.dh_cert_address = __sme_set(__pa(dh_blob));
336 		start.dh_cert_len = params.dh_len;
337 	}
338 
339 	session_blob = NULL;
340 	if (params.session_uaddr) {
341 		session_blob = psp_copy_user_blob(params.session_uaddr, params.session_len);
342 		if (IS_ERR(session_blob)) {
343 			ret = PTR_ERR(session_blob);
344 			goto e_free_dh;
345 		}
346 
347 		start.session_address = __sme_set(__pa(session_blob));
348 		start.session_len = params.session_len;
349 	}
350 
351 	start.handle = params.handle;
352 	start.policy = params.policy;
353 
354 	/* create memory encryption context */
355 	ret = __sev_issue_cmd(argp->sev_fd, SEV_CMD_LAUNCH_START, &start, error);
356 	if (ret)
357 		goto e_free_session;
358 
359 	/* Bind ASID to this guest */
360 	ret = sev_bind_asid(kvm, start.handle, error);
361 	if (ret) {
362 		sev_decommission(start.handle);
363 		goto e_free_session;
364 	}
365 
366 	/* return handle to userspace */
367 	params.handle = start.handle;
368 	if (copy_to_user((void __user *)(uintptr_t)argp->data, &params, sizeof(params))) {
369 		sev_unbind_asid(kvm, start.handle);
370 		ret = -EFAULT;
371 		goto e_free_session;
372 	}
373 
374 	sev->handle = start.handle;
375 	sev->fd = argp->sev_fd;
376 
377 e_free_session:
378 	kfree(session_blob);
379 e_free_dh:
380 	kfree(dh_blob);
381 	return ret;
382 }
383 
384 static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr,
385 				    unsigned long ulen, unsigned long *n,
386 				    int write)
387 {
388 	struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
389 	unsigned long npages, size;
390 	int npinned;
391 	unsigned long locked, lock_limit;
392 	struct page **pages;
393 	unsigned long first, last;
394 	int ret;
395 
396 	lockdep_assert_held(&kvm->lock);
397 
398 	if (ulen == 0 || uaddr + ulen < uaddr)
399 		return ERR_PTR(-EINVAL);
400 
401 	/* Calculate number of pages. */
402 	first = (uaddr & PAGE_MASK) >> PAGE_SHIFT;
403 	last = ((uaddr + ulen - 1) & PAGE_MASK) >> PAGE_SHIFT;
404 	npages = (last - first + 1);
405 
406 	locked = sev->pages_locked + npages;
407 	lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
408 	if (locked > lock_limit && !capable(CAP_IPC_LOCK)) {
409 		pr_err("SEV: %lu locked pages exceed the lock limit of %lu.\n", locked, lock_limit);
410 		return ERR_PTR(-ENOMEM);
411 	}
412 
413 	if (WARN_ON_ONCE(npages > INT_MAX))
414 		return ERR_PTR(-EINVAL);
415 
416 	/* Avoid using vmalloc for smaller buffers. */
417 	size = npages * sizeof(struct page *);
418 	if (size > PAGE_SIZE)
419 		pages = __vmalloc(size, GFP_KERNEL_ACCOUNT | __GFP_ZERO);
420 	else
421 		pages = kmalloc(size, GFP_KERNEL_ACCOUNT);
422 
423 	if (!pages)
424 		return ERR_PTR(-ENOMEM);
425 
426 	/* Pin the user virtual address. */
427 	npinned = pin_user_pages_fast(uaddr, npages, write ? FOLL_WRITE : 0, pages);
428 	if (npinned != npages) {
429 		pr_err("SEV: Failure locking %lu pages.\n", npages);
430 		ret = -ENOMEM;
431 		goto err;
432 	}
433 
434 	*n = npages;
435 	sev->pages_locked = locked;
436 
437 	return pages;
438 
439 err:
440 	if (npinned > 0)
441 		unpin_user_pages(pages, npinned);
442 
443 	kvfree(pages);
444 	return ERR_PTR(ret);
445 }
446 
447 static void sev_unpin_memory(struct kvm *kvm, struct page **pages,
448 			     unsigned long npages)
449 {
450 	struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
451 
452 	unpin_user_pages(pages, npages);
453 	kvfree(pages);
454 	sev->pages_locked -= npages;
455 }
456 
457 static void sev_clflush_pages(struct page *pages[], unsigned long npages)
458 {
459 	uint8_t *page_virtual;
460 	unsigned long i;
461 
462 	if (this_cpu_has(X86_FEATURE_SME_COHERENT) || npages == 0 ||
463 	    pages == NULL)
464 		return;
465 
466 	for (i = 0; i < npages; i++) {
467 		page_virtual = kmap_atomic(pages[i]);
468 		clflush_cache_range(page_virtual, PAGE_SIZE);
469 		kunmap_atomic(page_virtual);
470 		cond_resched();
471 	}
472 }
473 
474 static unsigned long get_num_contig_pages(unsigned long idx,
475 				struct page **inpages, unsigned long npages)
476 {
477 	unsigned long paddr, next_paddr;
478 	unsigned long i = idx + 1, pages = 1;
479 
480 	/* find the number of contiguous pages starting from idx */
481 	paddr = __sme_page_pa(inpages[idx]);
482 	while (i < npages) {
483 		next_paddr = __sme_page_pa(inpages[i++]);
484 		if ((paddr + PAGE_SIZE) == next_paddr) {
485 			pages++;
486 			paddr = next_paddr;
487 			continue;
488 		}
489 		break;
490 	}
491 
492 	return pages;
493 }
494 
495 static int sev_launch_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
496 {
497 	unsigned long vaddr, vaddr_end, next_vaddr, npages, pages, size, i;
498 	struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
499 	struct kvm_sev_launch_update_data params;
500 	struct sev_data_launch_update_data data;
501 	struct page **inpages;
502 	int ret;
503 
504 	if (!sev_guest(kvm))
505 		return -ENOTTY;
506 
507 	if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, sizeof(params)))
508 		return -EFAULT;
509 
510 	vaddr = params.uaddr;
511 	size = params.len;
512 	vaddr_end = vaddr + size;
513 
514 	/* Lock the user memory. */
515 	inpages = sev_pin_memory(kvm, vaddr, size, &npages, 1);
516 	if (IS_ERR(inpages))
517 		return PTR_ERR(inpages);
518 
519 	/*
520 	 * Flush (on non-coherent CPUs) before LAUNCH_UPDATE encrypts pages in
521 	 * place; the cache may contain the data that was written unencrypted.
522 	 */
523 	sev_clflush_pages(inpages, npages);
524 
525 	data.reserved = 0;
526 	data.handle = sev->handle;
527 
528 	for (i = 0; vaddr < vaddr_end; vaddr = next_vaddr, i += pages) {
529 		int offset, len;
530 
531 		/*
532 		 * If the user buffer is not page-aligned, calculate the offset
533 		 * within the page.
534 		 */
535 		offset = vaddr & (PAGE_SIZE - 1);
536 
537 		/* Calculate the number of pages that can be encrypted in one go. */
538 		pages = get_num_contig_pages(i, inpages, npages);
539 
540 		len = min_t(size_t, ((pages * PAGE_SIZE) - offset), size);
541 
542 		data.len = len;
543 		data.address = __sme_page_pa(inpages[i]) + offset;
544 		ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_DATA, &data, &argp->error);
545 		if (ret)
546 			goto e_unpin;
547 
548 		size -= len;
549 		next_vaddr = vaddr + len;
550 	}
551 
552 e_unpin:
553 	/* content of memory is updated, mark pages dirty */
554 	for (i = 0; i < npages; i++) {
555 		set_page_dirty_lock(inpages[i]);
556 		mark_page_accessed(inpages[i]);
557 	}
558 	/* unlock the user pages */
559 	sev_unpin_memory(kvm, inpages, npages);
560 	return ret;
561 }
562 
563 static int sev_es_sync_vmsa(struct vcpu_svm *svm)
564 {
565 	struct vmcb_save_area *save = &svm->vmcb->save;
566 
567 	/* Check some debug related fields before encrypting the VMSA */
568 	if (svm->vcpu.guest_debug || (save->dr7 & ~DR7_FIXED_1))
569 		return -EINVAL;
570 
571 	/* Sync registgers */
572 	save->rax = svm->vcpu.arch.regs[VCPU_REGS_RAX];
573 	save->rbx = svm->vcpu.arch.regs[VCPU_REGS_RBX];
574 	save->rcx = svm->vcpu.arch.regs[VCPU_REGS_RCX];
575 	save->rdx = svm->vcpu.arch.regs[VCPU_REGS_RDX];
576 	save->rsp = svm->vcpu.arch.regs[VCPU_REGS_RSP];
577 	save->rbp = svm->vcpu.arch.regs[VCPU_REGS_RBP];
578 	save->rsi = svm->vcpu.arch.regs[VCPU_REGS_RSI];
579 	save->rdi = svm->vcpu.arch.regs[VCPU_REGS_RDI];
580 #ifdef CONFIG_X86_64
581 	save->r8  = svm->vcpu.arch.regs[VCPU_REGS_R8];
582 	save->r9  = svm->vcpu.arch.regs[VCPU_REGS_R9];
583 	save->r10 = svm->vcpu.arch.regs[VCPU_REGS_R10];
584 	save->r11 = svm->vcpu.arch.regs[VCPU_REGS_R11];
585 	save->r12 = svm->vcpu.arch.regs[VCPU_REGS_R12];
586 	save->r13 = svm->vcpu.arch.regs[VCPU_REGS_R13];
587 	save->r14 = svm->vcpu.arch.regs[VCPU_REGS_R14];
588 	save->r15 = svm->vcpu.arch.regs[VCPU_REGS_R15];
589 #endif
590 	save->rip = svm->vcpu.arch.regs[VCPU_REGS_RIP];
591 
592 	/* Sync some non-GPR registers before encrypting */
593 	save->xcr0 = svm->vcpu.arch.xcr0;
594 	save->pkru = svm->vcpu.arch.pkru;
595 	save->xss  = svm->vcpu.arch.ia32_xss;
596 	save->dr6  = svm->vcpu.arch.dr6;
597 
598 	/*
599 	 * SEV-ES will use a VMSA that is pointed to by the VMCB, not
600 	 * the traditional VMSA that is part of the VMCB. Copy the
601 	 * traditional VMSA as it has been built so far (in prep
602 	 * for LAUNCH_UPDATE_VMSA) to be the initial SEV-ES state.
603 	 */
604 	memcpy(svm->sev_es.vmsa, save, sizeof(*save));
605 
606 	return 0;
607 }
608 
609 static int __sev_launch_update_vmsa(struct kvm *kvm, struct kvm_vcpu *vcpu,
610 				    int *error)
611 {
612 	struct sev_data_launch_update_vmsa vmsa;
613 	struct vcpu_svm *svm = to_svm(vcpu);
614 	int ret;
615 
616 	/* Perform some pre-encryption checks against the VMSA */
617 	ret = sev_es_sync_vmsa(svm);
618 	if (ret)
619 		return ret;
620 
621 	/*
622 	 * The LAUNCH_UPDATE_VMSA command will perform in-place encryption of
623 	 * the VMSA memory content (i.e it will write the same memory region
624 	 * with the guest's key), so invalidate it first.
625 	 */
626 	clflush_cache_range(svm->sev_es.vmsa, PAGE_SIZE);
627 
628 	vmsa.reserved = 0;
629 	vmsa.handle = to_kvm_svm(kvm)->sev_info.handle;
630 	vmsa.address = __sme_pa(svm->sev_es.vmsa);
631 	vmsa.len = PAGE_SIZE;
632 	ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_VMSA, &vmsa, error);
633 	if (ret)
634 	  return ret;
635 
636 	vcpu->arch.guest_state_protected = true;
637 	return 0;
638 }
639 
640 static int sev_launch_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp)
641 {
642 	struct kvm_vcpu *vcpu;
643 	unsigned long i;
644 	int ret;
645 
646 	if (!sev_es_guest(kvm))
647 		return -ENOTTY;
648 
649 	kvm_for_each_vcpu(i, vcpu, kvm) {
650 		ret = mutex_lock_killable(&vcpu->mutex);
651 		if (ret)
652 			return ret;
653 
654 		ret = __sev_launch_update_vmsa(kvm, vcpu, &argp->error);
655 
656 		mutex_unlock(&vcpu->mutex);
657 		if (ret)
658 			return ret;
659 	}
660 
661 	return 0;
662 }
663 
664 static int sev_launch_measure(struct kvm *kvm, struct kvm_sev_cmd *argp)
665 {
666 	void __user *measure = (void __user *)(uintptr_t)argp->data;
667 	struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
668 	struct sev_data_launch_measure data;
669 	struct kvm_sev_launch_measure params;
670 	void __user *p = NULL;
671 	void *blob = NULL;
672 	int ret;
673 
674 	if (!sev_guest(kvm))
675 		return -ENOTTY;
676 
677 	if (copy_from_user(&params, measure, sizeof(params)))
678 		return -EFAULT;
679 
680 	memset(&data, 0, sizeof(data));
681 
682 	/* User wants to query the blob length */
683 	if (!params.len)
684 		goto cmd;
685 
686 	p = (void __user *)(uintptr_t)params.uaddr;
687 	if (p) {
688 		if (params.len > SEV_FW_BLOB_MAX_SIZE)
689 			return -EINVAL;
690 
691 		blob = kmalloc(params.len, GFP_KERNEL_ACCOUNT);
692 		if (!blob)
693 			return -ENOMEM;
694 
695 		data.address = __psp_pa(blob);
696 		data.len = params.len;
697 	}
698 
699 cmd:
700 	data.handle = sev->handle;
701 	ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_MEASURE, &data, &argp->error);
702 
703 	/*
704 	 * If we query the session length, FW responded with expected data.
705 	 */
706 	if (!params.len)
707 		goto done;
708 
709 	if (ret)
710 		goto e_free_blob;
711 
712 	if (blob) {
713 		if (copy_to_user(p, blob, params.len))
714 			ret = -EFAULT;
715 	}
716 
717 done:
718 	params.len = data.len;
719 	if (copy_to_user(measure, &params, sizeof(params)))
720 		ret = -EFAULT;
721 e_free_blob:
722 	kfree(blob);
723 	return ret;
724 }
725 
726 static int sev_launch_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
727 {
728 	struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
729 	struct sev_data_launch_finish data;
730 
731 	if (!sev_guest(kvm))
732 		return -ENOTTY;
733 
734 	data.handle = sev->handle;
735 	return sev_issue_cmd(kvm, SEV_CMD_LAUNCH_FINISH, &data, &argp->error);
736 }
737 
738 static int sev_guest_status(struct kvm *kvm, struct kvm_sev_cmd *argp)
739 {
740 	struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
741 	struct kvm_sev_guest_status params;
742 	struct sev_data_guest_status data;
743 	int ret;
744 
745 	if (!sev_guest(kvm))
746 		return -ENOTTY;
747 
748 	memset(&data, 0, sizeof(data));
749 
750 	data.handle = sev->handle;
751 	ret = sev_issue_cmd(kvm, SEV_CMD_GUEST_STATUS, &data, &argp->error);
752 	if (ret)
753 		return ret;
754 
755 	params.policy = data.policy;
756 	params.state = data.state;
757 	params.handle = data.handle;
758 
759 	if (copy_to_user((void __user *)(uintptr_t)argp->data, &params, sizeof(params)))
760 		ret = -EFAULT;
761 
762 	return ret;
763 }
764 
765 static int __sev_issue_dbg_cmd(struct kvm *kvm, unsigned long src,
766 			       unsigned long dst, int size,
767 			       int *error, bool enc)
768 {
769 	struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
770 	struct sev_data_dbg data;
771 
772 	data.reserved = 0;
773 	data.handle = sev->handle;
774 	data.dst_addr = dst;
775 	data.src_addr = src;
776 	data.len = size;
777 
778 	return sev_issue_cmd(kvm,
779 			     enc ? SEV_CMD_DBG_ENCRYPT : SEV_CMD_DBG_DECRYPT,
780 			     &data, error);
781 }
782 
783 static int __sev_dbg_decrypt(struct kvm *kvm, unsigned long src_paddr,
784 			     unsigned long dst_paddr, int sz, int *err)
785 {
786 	int offset;
787 
788 	/*
789 	 * Its safe to read more than we are asked, caller should ensure that
790 	 * destination has enough space.
791 	 */
792 	offset = src_paddr & 15;
793 	src_paddr = round_down(src_paddr, 16);
794 	sz = round_up(sz + offset, 16);
795 
796 	return __sev_issue_dbg_cmd(kvm, src_paddr, dst_paddr, sz, err, false);
797 }
798 
799 static int __sev_dbg_decrypt_user(struct kvm *kvm, unsigned long paddr,
800 				  void __user *dst_uaddr,
801 				  unsigned long dst_paddr,
802 				  int size, int *err)
803 {
804 	struct page *tpage = NULL;
805 	int ret, offset;
806 
807 	/* if inputs are not 16-byte then use intermediate buffer */
808 	if (!IS_ALIGNED(dst_paddr, 16) ||
809 	    !IS_ALIGNED(paddr,     16) ||
810 	    !IS_ALIGNED(size,      16)) {
811 		tpage = (void *)alloc_page(GFP_KERNEL);
812 		if (!tpage)
813 			return -ENOMEM;
814 
815 		dst_paddr = __sme_page_pa(tpage);
816 	}
817 
818 	ret = __sev_dbg_decrypt(kvm, paddr, dst_paddr, size, err);
819 	if (ret)
820 		goto e_free;
821 
822 	if (tpage) {
823 		offset = paddr & 15;
824 		if (copy_to_user(dst_uaddr, page_address(tpage) + offset, size))
825 			ret = -EFAULT;
826 	}
827 
828 e_free:
829 	if (tpage)
830 		__free_page(tpage);
831 
832 	return ret;
833 }
834 
835 static int __sev_dbg_encrypt_user(struct kvm *kvm, unsigned long paddr,
836 				  void __user *vaddr,
837 				  unsigned long dst_paddr,
838 				  void __user *dst_vaddr,
839 				  int size, int *error)
840 {
841 	struct page *src_tpage = NULL;
842 	struct page *dst_tpage = NULL;
843 	int ret, len = size;
844 
845 	/* If source buffer is not aligned then use an intermediate buffer */
846 	if (!IS_ALIGNED((unsigned long)vaddr, 16)) {
847 		src_tpage = alloc_page(GFP_KERNEL);
848 		if (!src_tpage)
849 			return -ENOMEM;
850 
851 		if (copy_from_user(page_address(src_tpage), vaddr, size)) {
852 			__free_page(src_tpage);
853 			return -EFAULT;
854 		}
855 
856 		paddr = __sme_page_pa(src_tpage);
857 	}
858 
859 	/*
860 	 *  If destination buffer or length is not aligned then do read-modify-write:
861 	 *   - decrypt destination in an intermediate buffer
862 	 *   - copy the source buffer in an intermediate buffer
863 	 *   - use the intermediate buffer as source buffer
864 	 */
865 	if (!IS_ALIGNED((unsigned long)dst_vaddr, 16) || !IS_ALIGNED(size, 16)) {
866 		int dst_offset;
867 
868 		dst_tpage = alloc_page(GFP_KERNEL);
869 		if (!dst_tpage) {
870 			ret = -ENOMEM;
871 			goto e_free;
872 		}
873 
874 		ret = __sev_dbg_decrypt(kvm, dst_paddr,
875 					__sme_page_pa(dst_tpage), size, error);
876 		if (ret)
877 			goto e_free;
878 
879 		/*
880 		 *  If source is kernel buffer then use memcpy() otherwise
881 		 *  copy_from_user().
882 		 */
883 		dst_offset = dst_paddr & 15;
884 
885 		if (src_tpage)
886 			memcpy(page_address(dst_tpage) + dst_offset,
887 			       page_address(src_tpage), size);
888 		else {
889 			if (copy_from_user(page_address(dst_tpage) + dst_offset,
890 					   vaddr, size)) {
891 				ret = -EFAULT;
892 				goto e_free;
893 			}
894 		}
895 
896 		paddr = __sme_page_pa(dst_tpage);
897 		dst_paddr = round_down(dst_paddr, 16);
898 		len = round_up(size, 16);
899 	}
900 
901 	ret = __sev_issue_dbg_cmd(kvm, paddr, dst_paddr, len, error, true);
902 
903 e_free:
904 	if (src_tpage)
905 		__free_page(src_tpage);
906 	if (dst_tpage)
907 		__free_page(dst_tpage);
908 	return ret;
909 }
910 
911 static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
912 {
913 	unsigned long vaddr, vaddr_end, next_vaddr;
914 	unsigned long dst_vaddr;
915 	struct page **src_p, **dst_p;
916 	struct kvm_sev_dbg debug;
917 	unsigned long n;
918 	unsigned int size;
919 	int ret;
920 
921 	if (!sev_guest(kvm))
922 		return -ENOTTY;
923 
924 	if (copy_from_user(&debug, (void __user *)(uintptr_t)argp->data, sizeof(debug)))
925 		return -EFAULT;
926 
927 	if (!debug.len || debug.src_uaddr + debug.len < debug.src_uaddr)
928 		return -EINVAL;
929 	if (!debug.dst_uaddr)
930 		return -EINVAL;
931 
932 	vaddr = debug.src_uaddr;
933 	size = debug.len;
934 	vaddr_end = vaddr + size;
935 	dst_vaddr = debug.dst_uaddr;
936 
937 	for (; vaddr < vaddr_end; vaddr = next_vaddr) {
938 		int len, s_off, d_off;
939 
940 		/* lock userspace source and destination page */
941 		src_p = sev_pin_memory(kvm, vaddr & PAGE_MASK, PAGE_SIZE, &n, 0);
942 		if (IS_ERR(src_p))
943 			return PTR_ERR(src_p);
944 
945 		dst_p = sev_pin_memory(kvm, dst_vaddr & PAGE_MASK, PAGE_SIZE, &n, 1);
946 		if (IS_ERR(dst_p)) {
947 			sev_unpin_memory(kvm, src_p, n);
948 			return PTR_ERR(dst_p);
949 		}
950 
951 		/*
952 		 * Flush (on non-coherent CPUs) before DBG_{DE,EN}CRYPT read or modify
953 		 * the pages; flush the destination too so that future accesses do not
954 		 * see stale data.
955 		 */
956 		sev_clflush_pages(src_p, 1);
957 		sev_clflush_pages(dst_p, 1);
958 
959 		/*
960 		 * Since user buffer may not be page aligned, calculate the
961 		 * offset within the page.
962 		 */
963 		s_off = vaddr & ~PAGE_MASK;
964 		d_off = dst_vaddr & ~PAGE_MASK;
965 		len = min_t(size_t, (PAGE_SIZE - s_off), size);
966 
967 		if (dec)
968 			ret = __sev_dbg_decrypt_user(kvm,
969 						     __sme_page_pa(src_p[0]) + s_off,
970 						     (void __user *)dst_vaddr,
971 						     __sme_page_pa(dst_p[0]) + d_off,
972 						     len, &argp->error);
973 		else
974 			ret = __sev_dbg_encrypt_user(kvm,
975 						     __sme_page_pa(src_p[0]) + s_off,
976 						     (void __user *)vaddr,
977 						     __sme_page_pa(dst_p[0]) + d_off,
978 						     (void __user *)dst_vaddr,
979 						     len, &argp->error);
980 
981 		sev_unpin_memory(kvm, src_p, n);
982 		sev_unpin_memory(kvm, dst_p, n);
983 
984 		if (ret)
985 			goto err;
986 
987 		next_vaddr = vaddr + len;
988 		dst_vaddr = dst_vaddr + len;
989 		size -= len;
990 	}
991 err:
992 	return ret;
993 }
994 
995 static int sev_launch_secret(struct kvm *kvm, struct kvm_sev_cmd *argp)
996 {
997 	struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
998 	struct sev_data_launch_secret data;
999 	struct kvm_sev_launch_secret params;
1000 	struct page **pages;
1001 	void *blob, *hdr;
1002 	unsigned long n, i;
1003 	int ret, offset;
1004 
1005 	if (!sev_guest(kvm))
1006 		return -ENOTTY;
1007 
1008 	if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, sizeof(params)))
1009 		return -EFAULT;
1010 
1011 	pages = sev_pin_memory(kvm, params.guest_uaddr, params.guest_len, &n, 1);
1012 	if (IS_ERR(pages))
1013 		return PTR_ERR(pages);
1014 
1015 	/*
1016 	 * Flush (on non-coherent CPUs) before LAUNCH_SECRET encrypts pages in
1017 	 * place; the cache may contain the data that was written unencrypted.
1018 	 */
1019 	sev_clflush_pages(pages, n);
1020 
1021 	/*
1022 	 * The secret must be copied into contiguous memory region, lets verify
1023 	 * that userspace memory pages are contiguous before we issue command.
1024 	 */
1025 	if (get_num_contig_pages(0, pages, n) != n) {
1026 		ret = -EINVAL;
1027 		goto e_unpin_memory;
1028 	}
1029 
1030 	memset(&data, 0, sizeof(data));
1031 
1032 	offset = params.guest_uaddr & (PAGE_SIZE - 1);
1033 	data.guest_address = __sme_page_pa(pages[0]) + offset;
1034 	data.guest_len = params.guest_len;
1035 
1036 	blob = psp_copy_user_blob(params.trans_uaddr, params.trans_len);
1037 	if (IS_ERR(blob)) {
1038 		ret = PTR_ERR(blob);
1039 		goto e_unpin_memory;
1040 	}
1041 
1042 	data.trans_address = __psp_pa(blob);
1043 	data.trans_len = params.trans_len;
1044 
1045 	hdr = psp_copy_user_blob(params.hdr_uaddr, params.hdr_len);
1046 	if (IS_ERR(hdr)) {
1047 		ret = PTR_ERR(hdr);
1048 		goto e_free_blob;
1049 	}
1050 	data.hdr_address = __psp_pa(hdr);
1051 	data.hdr_len = params.hdr_len;
1052 
1053 	data.handle = sev->handle;
1054 	ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_SECRET, &data, &argp->error);
1055 
1056 	kfree(hdr);
1057 
1058 e_free_blob:
1059 	kfree(blob);
1060 e_unpin_memory:
1061 	/* content of memory is updated, mark pages dirty */
1062 	for (i = 0; i < n; i++) {
1063 		set_page_dirty_lock(pages[i]);
1064 		mark_page_accessed(pages[i]);
1065 	}
1066 	sev_unpin_memory(kvm, pages, n);
1067 	return ret;
1068 }
1069 
1070 static int sev_get_attestation_report(struct kvm *kvm, struct kvm_sev_cmd *argp)
1071 {
1072 	void __user *report = (void __user *)(uintptr_t)argp->data;
1073 	struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1074 	struct sev_data_attestation_report data;
1075 	struct kvm_sev_attestation_report params;
1076 	void __user *p;
1077 	void *blob = NULL;
1078 	int ret;
1079 
1080 	if (!sev_guest(kvm))
1081 		return -ENOTTY;
1082 
1083 	if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, sizeof(params)))
1084 		return -EFAULT;
1085 
1086 	memset(&data, 0, sizeof(data));
1087 
1088 	/* User wants to query the blob length */
1089 	if (!params.len)
1090 		goto cmd;
1091 
1092 	p = (void __user *)(uintptr_t)params.uaddr;
1093 	if (p) {
1094 		if (params.len > SEV_FW_BLOB_MAX_SIZE)
1095 			return -EINVAL;
1096 
1097 		blob = kmalloc(params.len, GFP_KERNEL_ACCOUNT);
1098 		if (!blob)
1099 			return -ENOMEM;
1100 
1101 		data.address = __psp_pa(blob);
1102 		data.len = params.len;
1103 		memcpy(data.mnonce, params.mnonce, sizeof(params.mnonce));
1104 	}
1105 cmd:
1106 	data.handle = sev->handle;
1107 	ret = sev_issue_cmd(kvm, SEV_CMD_ATTESTATION_REPORT, &data, &argp->error);
1108 	/*
1109 	 * If we query the session length, FW responded with expected data.
1110 	 */
1111 	if (!params.len)
1112 		goto done;
1113 
1114 	if (ret)
1115 		goto e_free_blob;
1116 
1117 	if (blob) {
1118 		if (copy_to_user(p, blob, params.len))
1119 			ret = -EFAULT;
1120 	}
1121 
1122 done:
1123 	params.len = data.len;
1124 	if (copy_to_user(report, &params, sizeof(params)))
1125 		ret = -EFAULT;
1126 e_free_blob:
1127 	kfree(blob);
1128 	return ret;
1129 }
1130 
1131 /* Userspace wants to query session length. */
1132 static int
1133 __sev_send_start_query_session_length(struct kvm *kvm, struct kvm_sev_cmd *argp,
1134 				      struct kvm_sev_send_start *params)
1135 {
1136 	struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1137 	struct sev_data_send_start data;
1138 	int ret;
1139 
1140 	memset(&data, 0, sizeof(data));
1141 	data.handle = sev->handle;
1142 	ret = sev_issue_cmd(kvm, SEV_CMD_SEND_START, &data, &argp->error);
1143 
1144 	params->session_len = data.session_len;
1145 	if (copy_to_user((void __user *)(uintptr_t)argp->data, params,
1146 				sizeof(struct kvm_sev_send_start)))
1147 		ret = -EFAULT;
1148 
1149 	return ret;
1150 }
1151 
1152 static int sev_send_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
1153 {
1154 	struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1155 	struct sev_data_send_start data;
1156 	struct kvm_sev_send_start params;
1157 	void *amd_certs, *session_data;
1158 	void *pdh_cert, *plat_certs;
1159 	int ret;
1160 
1161 	if (!sev_guest(kvm))
1162 		return -ENOTTY;
1163 
1164 	if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data,
1165 				sizeof(struct kvm_sev_send_start)))
1166 		return -EFAULT;
1167 
1168 	/* if session_len is zero, userspace wants to query the session length */
1169 	if (!params.session_len)
1170 		return __sev_send_start_query_session_length(kvm, argp,
1171 				&params);
1172 
1173 	/* some sanity checks */
1174 	if (!params.pdh_cert_uaddr || !params.pdh_cert_len ||
1175 	    !params.session_uaddr || params.session_len > SEV_FW_BLOB_MAX_SIZE)
1176 		return -EINVAL;
1177 
1178 	/* allocate the memory to hold the session data blob */
1179 	session_data = kmalloc(params.session_len, GFP_KERNEL_ACCOUNT);
1180 	if (!session_data)
1181 		return -ENOMEM;
1182 
1183 	/* copy the certificate blobs from userspace */
1184 	pdh_cert = psp_copy_user_blob(params.pdh_cert_uaddr,
1185 				params.pdh_cert_len);
1186 	if (IS_ERR(pdh_cert)) {
1187 		ret = PTR_ERR(pdh_cert);
1188 		goto e_free_session;
1189 	}
1190 
1191 	plat_certs = psp_copy_user_blob(params.plat_certs_uaddr,
1192 				params.plat_certs_len);
1193 	if (IS_ERR(plat_certs)) {
1194 		ret = PTR_ERR(plat_certs);
1195 		goto e_free_pdh;
1196 	}
1197 
1198 	amd_certs = psp_copy_user_blob(params.amd_certs_uaddr,
1199 				params.amd_certs_len);
1200 	if (IS_ERR(amd_certs)) {
1201 		ret = PTR_ERR(amd_certs);
1202 		goto e_free_plat_cert;
1203 	}
1204 
1205 	/* populate the FW SEND_START field with system physical address */
1206 	memset(&data, 0, sizeof(data));
1207 	data.pdh_cert_address = __psp_pa(pdh_cert);
1208 	data.pdh_cert_len = params.pdh_cert_len;
1209 	data.plat_certs_address = __psp_pa(plat_certs);
1210 	data.plat_certs_len = params.plat_certs_len;
1211 	data.amd_certs_address = __psp_pa(amd_certs);
1212 	data.amd_certs_len = params.amd_certs_len;
1213 	data.session_address = __psp_pa(session_data);
1214 	data.session_len = params.session_len;
1215 	data.handle = sev->handle;
1216 
1217 	ret = sev_issue_cmd(kvm, SEV_CMD_SEND_START, &data, &argp->error);
1218 
1219 	if (!ret && copy_to_user((void __user *)(uintptr_t)params.session_uaddr,
1220 			session_data, params.session_len)) {
1221 		ret = -EFAULT;
1222 		goto e_free_amd_cert;
1223 	}
1224 
1225 	params.policy = data.policy;
1226 	params.session_len = data.session_len;
1227 	if (copy_to_user((void __user *)(uintptr_t)argp->data, &params,
1228 				sizeof(struct kvm_sev_send_start)))
1229 		ret = -EFAULT;
1230 
1231 e_free_amd_cert:
1232 	kfree(amd_certs);
1233 e_free_plat_cert:
1234 	kfree(plat_certs);
1235 e_free_pdh:
1236 	kfree(pdh_cert);
1237 e_free_session:
1238 	kfree(session_data);
1239 	return ret;
1240 }
1241 
1242 /* Userspace wants to query either header or trans length. */
1243 static int
1244 __sev_send_update_data_query_lengths(struct kvm *kvm, struct kvm_sev_cmd *argp,
1245 				     struct kvm_sev_send_update_data *params)
1246 {
1247 	struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1248 	struct sev_data_send_update_data data;
1249 	int ret;
1250 
1251 	memset(&data, 0, sizeof(data));
1252 	data.handle = sev->handle;
1253 	ret = sev_issue_cmd(kvm, SEV_CMD_SEND_UPDATE_DATA, &data, &argp->error);
1254 
1255 	params->hdr_len = data.hdr_len;
1256 	params->trans_len = data.trans_len;
1257 
1258 	if (copy_to_user((void __user *)(uintptr_t)argp->data, params,
1259 			 sizeof(struct kvm_sev_send_update_data)))
1260 		ret = -EFAULT;
1261 
1262 	return ret;
1263 }
1264 
1265 static int sev_send_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
1266 {
1267 	struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1268 	struct sev_data_send_update_data data;
1269 	struct kvm_sev_send_update_data params;
1270 	void *hdr, *trans_data;
1271 	struct page **guest_page;
1272 	unsigned long n;
1273 	int ret, offset;
1274 
1275 	if (!sev_guest(kvm))
1276 		return -ENOTTY;
1277 
1278 	if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data,
1279 			sizeof(struct kvm_sev_send_update_data)))
1280 		return -EFAULT;
1281 
1282 	/* userspace wants to query either header or trans length */
1283 	if (!params.trans_len || !params.hdr_len)
1284 		return __sev_send_update_data_query_lengths(kvm, argp, &params);
1285 
1286 	if (!params.trans_uaddr || !params.guest_uaddr ||
1287 	    !params.guest_len || !params.hdr_uaddr)
1288 		return -EINVAL;
1289 
1290 	/* Check if we are crossing the page boundary */
1291 	offset = params.guest_uaddr & (PAGE_SIZE - 1);
1292 	if ((params.guest_len + offset > PAGE_SIZE))
1293 		return -EINVAL;
1294 
1295 	/* Pin guest memory */
1296 	guest_page = sev_pin_memory(kvm, params.guest_uaddr & PAGE_MASK,
1297 				    PAGE_SIZE, &n, 0);
1298 	if (IS_ERR(guest_page))
1299 		return PTR_ERR(guest_page);
1300 
1301 	/* allocate memory for header and transport buffer */
1302 	ret = -ENOMEM;
1303 	hdr = kmalloc(params.hdr_len, GFP_KERNEL_ACCOUNT);
1304 	if (!hdr)
1305 		goto e_unpin;
1306 
1307 	trans_data = kmalloc(params.trans_len, GFP_KERNEL_ACCOUNT);
1308 	if (!trans_data)
1309 		goto e_free_hdr;
1310 
1311 	memset(&data, 0, sizeof(data));
1312 	data.hdr_address = __psp_pa(hdr);
1313 	data.hdr_len = params.hdr_len;
1314 	data.trans_address = __psp_pa(trans_data);
1315 	data.trans_len = params.trans_len;
1316 
1317 	/* The SEND_UPDATE_DATA command requires C-bit to be always set. */
1318 	data.guest_address = (page_to_pfn(guest_page[0]) << PAGE_SHIFT) + offset;
1319 	data.guest_address |= sev_me_mask;
1320 	data.guest_len = params.guest_len;
1321 	data.handle = sev->handle;
1322 
1323 	ret = sev_issue_cmd(kvm, SEV_CMD_SEND_UPDATE_DATA, &data, &argp->error);
1324 
1325 	if (ret)
1326 		goto e_free_trans_data;
1327 
1328 	/* copy transport buffer to user space */
1329 	if (copy_to_user((void __user *)(uintptr_t)params.trans_uaddr,
1330 			 trans_data, params.trans_len)) {
1331 		ret = -EFAULT;
1332 		goto e_free_trans_data;
1333 	}
1334 
1335 	/* Copy packet header to userspace. */
1336 	if (copy_to_user((void __user *)(uintptr_t)params.hdr_uaddr, hdr,
1337 			 params.hdr_len))
1338 		ret = -EFAULT;
1339 
1340 e_free_trans_data:
1341 	kfree(trans_data);
1342 e_free_hdr:
1343 	kfree(hdr);
1344 e_unpin:
1345 	sev_unpin_memory(kvm, guest_page, n);
1346 
1347 	return ret;
1348 }
1349 
1350 static int sev_send_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
1351 {
1352 	struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1353 	struct sev_data_send_finish data;
1354 
1355 	if (!sev_guest(kvm))
1356 		return -ENOTTY;
1357 
1358 	data.handle = sev->handle;
1359 	return sev_issue_cmd(kvm, SEV_CMD_SEND_FINISH, &data, &argp->error);
1360 }
1361 
1362 static int sev_send_cancel(struct kvm *kvm, struct kvm_sev_cmd *argp)
1363 {
1364 	struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1365 	struct sev_data_send_cancel data;
1366 
1367 	if (!sev_guest(kvm))
1368 		return -ENOTTY;
1369 
1370 	data.handle = sev->handle;
1371 	return sev_issue_cmd(kvm, SEV_CMD_SEND_CANCEL, &data, &argp->error);
1372 }
1373 
1374 static int sev_receive_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
1375 {
1376 	struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1377 	struct sev_data_receive_start start;
1378 	struct kvm_sev_receive_start params;
1379 	int *error = &argp->error;
1380 	void *session_data;
1381 	void *pdh_data;
1382 	int ret;
1383 
1384 	if (!sev_guest(kvm))
1385 		return -ENOTTY;
1386 
1387 	/* Get parameter from the userspace */
1388 	if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data,
1389 			sizeof(struct kvm_sev_receive_start)))
1390 		return -EFAULT;
1391 
1392 	/* some sanity checks */
1393 	if (!params.pdh_uaddr || !params.pdh_len ||
1394 	    !params.session_uaddr || !params.session_len)
1395 		return -EINVAL;
1396 
1397 	pdh_data = psp_copy_user_blob(params.pdh_uaddr, params.pdh_len);
1398 	if (IS_ERR(pdh_data))
1399 		return PTR_ERR(pdh_data);
1400 
1401 	session_data = psp_copy_user_blob(params.session_uaddr,
1402 			params.session_len);
1403 	if (IS_ERR(session_data)) {
1404 		ret = PTR_ERR(session_data);
1405 		goto e_free_pdh;
1406 	}
1407 
1408 	memset(&start, 0, sizeof(start));
1409 	start.handle = params.handle;
1410 	start.policy = params.policy;
1411 	start.pdh_cert_address = __psp_pa(pdh_data);
1412 	start.pdh_cert_len = params.pdh_len;
1413 	start.session_address = __psp_pa(session_data);
1414 	start.session_len = params.session_len;
1415 
1416 	/* create memory encryption context */
1417 	ret = __sev_issue_cmd(argp->sev_fd, SEV_CMD_RECEIVE_START, &start,
1418 				error);
1419 	if (ret)
1420 		goto e_free_session;
1421 
1422 	/* Bind ASID to this guest */
1423 	ret = sev_bind_asid(kvm, start.handle, error);
1424 	if (ret) {
1425 		sev_decommission(start.handle);
1426 		goto e_free_session;
1427 	}
1428 
1429 	params.handle = start.handle;
1430 	if (copy_to_user((void __user *)(uintptr_t)argp->data,
1431 			 &params, sizeof(struct kvm_sev_receive_start))) {
1432 		ret = -EFAULT;
1433 		sev_unbind_asid(kvm, start.handle);
1434 		goto e_free_session;
1435 	}
1436 
1437     	sev->handle = start.handle;
1438 	sev->fd = argp->sev_fd;
1439 
1440 e_free_session:
1441 	kfree(session_data);
1442 e_free_pdh:
1443 	kfree(pdh_data);
1444 
1445 	return ret;
1446 }
1447 
1448 static int sev_receive_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
1449 {
1450 	struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1451 	struct kvm_sev_receive_update_data params;
1452 	struct sev_data_receive_update_data data;
1453 	void *hdr = NULL, *trans = NULL;
1454 	struct page **guest_page;
1455 	unsigned long n;
1456 	int ret, offset;
1457 
1458 	if (!sev_guest(kvm))
1459 		return -EINVAL;
1460 
1461 	if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data,
1462 			sizeof(struct kvm_sev_receive_update_data)))
1463 		return -EFAULT;
1464 
1465 	if (!params.hdr_uaddr || !params.hdr_len ||
1466 	    !params.guest_uaddr || !params.guest_len ||
1467 	    !params.trans_uaddr || !params.trans_len)
1468 		return -EINVAL;
1469 
1470 	/* Check if we are crossing the page boundary */
1471 	offset = params.guest_uaddr & (PAGE_SIZE - 1);
1472 	if ((params.guest_len + offset > PAGE_SIZE))
1473 		return -EINVAL;
1474 
1475 	hdr = psp_copy_user_blob(params.hdr_uaddr, params.hdr_len);
1476 	if (IS_ERR(hdr))
1477 		return PTR_ERR(hdr);
1478 
1479 	trans = psp_copy_user_blob(params.trans_uaddr, params.trans_len);
1480 	if (IS_ERR(trans)) {
1481 		ret = PTR_ERR(trans);
1482 		goto e_free_hdr;
1483 	}
1484 
1485 	memset(&data, 0, sizeof(data));
1486 	data.hdr_address = __psp_pa(hdr);
1487 	data.hdr_len = params.hdr_len;
1488 	data.trans_address = __psp_pa(trans);
1489 	data.trans_len = params.trans_len;
1490 
1491 	/* Pin guest memory */
1492 	guest_page = sev_pin_memory(kvm, params.guest_uaddr & PAGE_MASK,
1493 				    PAGE_SIZE, &n, 1);
1494 	if (IS_ERR(guest_page)) {
1495 		ret = PTR_ERR(guest_page);
1496 		goto e_free_trans;
1497 	}
1498 
1499 	/*
1500 	 * Flush (on non-coherent CPUs) before RECEIVE_UPDATE_DATA, the PSP
1501 	 * encrypts the written data with the guest's key, and the cache may
1502 	 * contain dirty, unencrypted data.
1503 	 */
1504 	sev_clflush_pages(guest_page, n);
1505 
1506 	/* The RECEIVE_UPDATE_DATA command requires C-bit to be always set. */
1507 	data.guest_address = (page_to_pfn(guest_page[0]) << PAGE_SHIFT) + offset;
1508 	data.guest_address |= sev_me_mask;
1509 	data.guest_len = params.guest_len;
1510 	data.handle = sev->handle;
1511 
1512 	ret = sev_issue_cmd(kvm, SEV_CMD_RECEIVE_UPDATE_DATA, &data,
1513 				&argp->error);
1514 
1515 	sev_unpin_memory(kvm, guest_page, n);
1516 
1517 e_free_trans:
1518 	kfree(trans);
1519 e_free_hdr:
1520 	kfree(hdr);
1521 
1522 	return ret;
1523 }
1524 
1525 static int sev_receive_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
1526 {
1527 	struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1528 	struct sev_data_receive_finish data;
1529 
1530 	if (!sev_guest(kvm))
1531 		return -ENOTTY;
1532 
1533 	data.handle = sev->handle;
1534 	return sev_issue_cmd(kvm, SEV_CMD_RECEIVE_FINISH, &data, &argp->error);
1535 }
1536 
1537 static bool is_cmd_allowed_from_mirror(u32 cmd_id)
1538 {
1539 	/*
1540 	 * Allow mirrors VM to call KVM_SEV_LAUNCH_UPDATE_VMSA to enable SEV-ES
1541 	 * active mirror VMs. Also allow the debugging and status commands.
1542 	 */
1543 	if (cmd_id == KVM_SEV_LAUNCH_UPDATE_VMSA ||
1544 	    cmd_id == KVM_SEV_GUEST_STATUS || cmd_id == KVM_SEV_DBG_DECRYPT ||
1545 	    cmd_id == KVM_SEV_DBG_ENCRYPT)
1546 		return true;
1547 
1548 	return false;
1549 }
1550 
1551 static int sev_lock_two_vms(struct kvm *dst_kvm, struct kvm *src_kvm)
1552 {
1553 	struct kvm_sev_info *dst_sev = &to_kvm_svm(dst_kvm)->sev_info;
1554 	struct kvm_sev_info *src_sev = &to_kvm_svm(src_kvm)->sev_info;
1555 	int r = -EBUSY;
1556 
1557 	if (dst_kvm == src_kvm)
1558 		return -EINVAL;
1559 
1560 	/*
1561 	 * Bail if these VMs are already involved in a migration to avoid
1562 	 * deadlock between two VMs trying to migrate to/from each other.
1563 	 */
1564 	if (atomic_cmpxchg_acquire(&dst_sev->migration_in_progress, 0, 1))
1565 		return -EBUSY;
1566 
1567 	if (atomic_cmpxchg_acquire(&src_sev->migration_in_progress, 0, 1))
1568 		goto release_dst;
1569 
1570 	r = -EINTR;
1571 	if (mutex_lock_killable(&dst_kvm->lock))
1572 		goto release_src;
1573 	if (mutex_lock_killable_nested(&src_kvm->lock, SINGLE_DEPTH_NESTING))
1574 		goto unlock_dst;
1575 	return 0;
1576 
1577 unlock_dst:
1578 	mutex_unlock(&dst_kvm->lock);
1579 release_src:
1580 	atomic_set_release(&src_sev->migration_in_progress, 0);
1581 release_dst:
1582 	atomic_set_release(&dst_sev->migration_in_progress, 0);
1583 	return r;
1584 }
1585 
1586 static void sev_unlock_two_vms(struct kvm *dst_kvm, struct kvm *src_kvm)
1587 {
1588 	struct kvm_sev_info *dst_sev = &to_kvm_svm(dst_kvm)->sev_info;
1589 	struct kvm_sev_info *src_sev = &to_kvm_svm(src_kvm)->sev_info;
1590 
1591 	mutex_unlock(&dst_kvm->lock);
1592 	mutex_unlock(&src_kvm->lock);
1593 	atomic_set_release(&dst_sev->migration_in_progress, 0);
1594 	atomic_set_release(&src_sev->migration_in_progress, 0);
1595 }
1596 
1597 
1598 static int sev_lock_vcpus_for_migration(struct kvm *kvm)
1599 {
1600 	struct kvm_vcpu *vcpu;
1601 	unsigned long i, j;
1602 
1603 	kvm_for_each_vcpu(i, vcpu, kvm) {
1604 		if (mutex_lock_killable(&vcpu->mutex))
1605 			goto out_unlock;
1606 	}
1607 
1608 	return 0;
1609 
1610 out_unlock:
1611 	kvm_for_each_vcpu(j, vcpu, kvm) {
1612 		if (i == j)
1613 			break;
1614 
1615 		mutex_unlock(&vcpu->mutex);
1616 	}
1617 	return -EINTR;
1618 }
1619 
1620 static void sev_unlock_vcpus_for_migration(struct kvm *kvm)
1621 {
1622 	struct kvm_vcpu *vcpu;
1623 	unsigned long i;
1624 
1625 	kvm_for_each_vcpu(i, vcpu, kvm) {
1626 		mutex_unlock(&vcpu->mutex);
1627 	}
1628 }
1629 
1630 static void sev_migrate_from(struct kvm *dst_kvm, struct kvm *src_kvm)
1631 {
1632 	struct kvm_sev_info *dst = &to_kvm_svm(dst_kvm)->sev_info;
1633 	struct kvm_sev_info *src = &to_kvm_svm(src_kvm)->sev_info;
1634 	struct kvm_sev_info *mirror;
1635 
1636 	dst->active = true;
1637 	dst->asid = src->asid;
1638 	dst->handle = src->handle;
1639 	dst->pages_locked = src->pages_locked;
1640 	dst->enc_context_owner = src->enc_context_owner;
1641 
1642 	src->asid = 0;
1643 	src->active = false;
1644 	src->handle = 0;
1645 	src->pages_locked = 0;
1646 	src->enc_context_owner = NULL;
1647 
1648 	list_cut_before(&dst->regions_list, &src->regions_list, &src->regions_list);
1649 
1650 	/*
1651 	 * If this VM has mirrors, "transfer" each mirror's refcount of the
1652 	 * source to the destination (this KVM).  The caller holds a reference
1653 	 * to the source, so there's no danger of use-after-free.
1654 	 */
1655 	list_cut_before(&dst->mirror_vms, &src->mirror_vms, &src->mirror_vms);
1656 	list_for_each_entry(mirror, &dst->mirror_vms, mirror_entry) {
1657 		kvm_get_kvm(dst_kvm);
1658 		kvm_put_kvm(src_kvm);
1659 		mirror->enc_context_owner = dst_kvm;
1660 	}
1661 
1662 	/*
1663 	 * If this VM is a mirror, remove the old mirror from the owners list
1664 	 * and add the new mirror to the list.
1665 	 */
1666 	if (is_mirroring_enc_context(dst_kvm)) {
1667 		struct kvm_sev_info *owner_sev_info =
1668 			&to_kvm_svm(dst->enc_context_owner)->sev_info;
1669 
1670 		list_del(&src->mirror_entry);
1671 		list_add_tail(&dst->mirror_entry, &owner_sev_info->mirror_vms);
1672 	}
1673 }
1674 
1675 static int sev_es_migrate_from(struct kvm *dst, struct kvm *src)
1676 {
1677 	unsigned long i;
1678 	struct kvm_vcpu *dst_vcpu, *src_vcpu;
1679 	struct vcpu_svm *dst_svm, *src_svm;
1680 
1681 	if (atomic_read(&src->online_vcpus) != atomic_read(&dst->online_vcpus))
1682 		return -EINVAL;
1683 
1684 	kvm_for_each_vcpu(i, src_vcpu, src) {
1685 		if (!src_vcpu->arch.guest_state_protected)
1686 			return -EINVAL;
1687 	}
1688 
1689 	kvm_for_each_vcpu(i, src_vcpu, src) {
1690 		src_svm = to_svm(src_vcpu);
1691 		dst_vcpu = kvm_get_vcpu(dst, i);
1692 		dst_svm = to_svm(dst_vcpu);
1693 
1694 		/*
1695 		 * Transfer VMSA and GHCB state to the destination.  Nullify and
1696 		 * clear source fields as appropriate, the state now belongs to
1697 		 * the destination.
1698 		 */
1699 		memcpy(&dst_svm->sev_es, &src_svm->sev_es, sizeof(src_svm->sev_es));
1700 		dst_svm->vmcb->control.ghcb_gpa = src_svm->vmcb->control.ghcb_gpa;
1701 		dst_svm->vmcb->control.vmsa_pa = src_svm->vmcb->control.vmsa_pa;
1702 		dst_vcpu->arch.guest_state_protected = true;
1703 
1704 		memset(&src_svm->sev_es, 0, sizeof(src_svm->sev_es));
1705 		src_svm->vmcb->control.ghcb_gpa = INVALID_PAGE;
1706 		src_svm->vmcb->control.vmsa_pa = INVALID_PAGE;
1707 		src_vcpu->arch.guest_state_protected = false;
1708 	}
1709 	to_kvm_svm(src)->sev_info.es_active = false;
1710 	to_kvm_svm(dst)->sev_info.es_active = true;
1711 
1712 	return 0;
1713 }
1714 
1715 int sev_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd)
1716 {
1717 	struct kvm_sev_info *dst_sev = &to_kvm_svm(kvm)->sev_info;
1718 	struct kvm_sev_info *src_sev, *cg_cleanup_sev;
1719 	struct file *source_kvm_file;
1720 	struct kvm *source_kvm;
1721 	bool charged = false;
1722 	int ret;
1723 
1724 	source_kvm_file = fget(source_fd);
1725 	if (!file_is_kvm(source_kvm_file)) {
1726 		ret = -EBADF;
1727 		goto out_fput;
1728 	}
1729 
1730 	source_kvm = source_kvm_file->private_data;
1731 	ret = sev_lock_two_vms(kvm, source_kvm);
1732 	if (ret)
1733 		goto out_fput;
1734 
1735 	if (sev_guest(kvm) || !sev_guest(source_kvm)) {
1736 		ret = -EINVAL;
1737 		goto out_unlock;
1738 	}
1739 
1740 	src_sev = &to_kvm_svm(source_kvm)->sev_info;
1741 
1742 	dst_sev->misc_cg = get_current_misc_cg();
1743 	cg_cleanup_sev = dst_sev;
1744 	if (dst_sev->misc_cg != src_sev->misc_cg) {
1745 		ret = sev_misc_cg_try_charge(dst_sev);
1746 		if (ret)
1747 			goto out_dst_cgroup;
1748 		charged = true;
1749 	}
1750 
1751 	ret = sev_lock_vcpus_for_migration(kvm);
1752 	if (ret)
1753 		goto out_dst_cgroup;
1754 	ret = sev_lock_vcpus_for_migration(source_kvm);
1755 	if (ret)
1756 		goto out_dst_vcpu;
1757 
1758 	if (sev_es_guest(source_kvm)) {
1759 		ret = sev_es_migrate_from(kvm, source_kvm);
1760 		if (ret)
1761 			goto out_source_vcpu;
1762 	}
1763 
1764 	sev_migrate_from(kvm, source_kvm);
1765 	kvm_vm_dead(source_kvm);
1766 	cg_cleanup_sev = src_sev;
1767 	ret = 0;
1768 
1769 out_source_vcpu:
1770 	sev_unlock_vcpus_for_migration(source_kvm);
1771 out_dst_vcpu:
1772 	sev_unlock_vcpus_for_migration(kvm);
1773 out_dst_cgroup:
1774 	/* Operates on the source on success, on the destination on failure.  */
1775 	if (charged)
1776 		sev_misc_cg_uncharge(cg_cleanup_sev);
1777 	put_misc_cg(cg_cleanup_sev->misc_cg);
1778 	cg_cleanup_sev->misc_cg = NULL;
1779 out_unlock:
1780 	sev_unlock_two_vms(kvm, source_kvm);
1781 out_fput:
1782 	if (source_kvm_file)
1783 		fput(source_kvm_file);
1784 	return ret;
1785 }
1786 
1787 int sev_mem_enc_ioctl(struct kvm *kvm, void __user *argp)
1788 {
1789 	struct kvm_sev_cmd sev_cmd;
1790 	int r;
1791 
1792 	if (!sev_enabled)
1793 		return -ENOTTY;
1794 
1795 	if (!argp)
1796 		return 0;
1797 
1798 	if (copy_from_user(&sev_cmd, argp, sizeof(struct kvm_sev_cmd)))
1799 		return -EFAULT;
1800 
1801 	mutex_lock(&kvm->lock);
1802 
1803 	/* Only the enc_context_owner handles some memory enc operations. */
1804 	if (is_mirroring_enc_context(kvm) &&
1805 	    !is_cmd_allowed_from_mirror(sev_cmd.id)) {
1806 		r = -EINVAL;
1807 		goto out;
1808 	}
1809 
1810 	switch (sev_cmd.id) {
1811 	case KVM_SEV_ES_INIT:
1812 		if (!sev_es_enabled) {
1813 			r = -ENOTTY;
1814 			goto out;
1815 		}
1816 		fallthrough;
1817 	case KVM_SEV_INIT:
1818 		r = sev_guest_init(kvm, &sev_cmd);
1819 		break;
1820 	case KVM_SEV_LAUNCH_START:
1821 		r = sev_launch_start(kvm, &sev_cmd);
1822 		break;
1823 	case KVM_SEV_LAUNCH_UPDATE_DATA:
1824 		r = sev_launch_update_data(kvm, &sev_cmd);
1825 		break;
1826 	case KVM_SEV_LAUNCH_UPDATE_VMSA:
1827 		r = sev_launch_update_vmsa(kvm, &sev_cmd);
1828 		break;
1829 	case KVM_SEV_LAUNCH_MEASURE:
1830 		r = sev_launch_measure(kvm, &sev_cmd);
1831 		break;
1832 	case KVM_SEV_LAUNCH_FINISH:
1833 		r = sev_launch_finish(kvm, &sev_cmd);
1834 		break;
1835 	case KVM_SEV_GUEST_STATUS:
1836 		r = sev_guest_status(kvm, &sev_cmd);
1837 		break;
1838 	case KVM_SEV_DBG_DECRYPT:
1839 		r = sev_dbg_crypt(kvm, &sev_cmd, true);
1840 		break;
1841 	case KVM_SEV_DBG_ENCRYPT:
1842 		r = sev_dbg_crypt(kvm, &sev_cmd, false);
1843 		break;
1844 	case KVM_SEV_LAUNCH_SECRET:
1845 		r = sev_launch_secret(kvm, &sev_cmd);
1846 		break;
1847 	case KVM_SEV_GET_ATTESTATION_REPORT:
1848 		r = sev_get_attestation_report(kvm, &sev_cmd);
1849 		break;
1850 	case KVM_SEV_SEND_START:
1851 		r = sev_send_start(kvm, &sev_cmd);
1852 		break;
1853 	case KVM_SEV_SEND_UPDATE_DATA:
1854 		r = sev_send_update_data(kvm, &sev_cmd);
1855 		break;
1856 	case KVM_SEV_SEND_FINISH:
1857 		r = sev_send_finish(kvm, &sev_cmd);
1858 		break;
1859 	case KVM_SEV_SEND_CANCEL:
1860 		r = sev_send_cancel(kvm, &sev_cmd);
1861 		break;
1862 	case KVM_SEV_RECEIVE_START:
1863 		r = sev_receive_start(kvm, &sev_cmd);
1864 		break;
1865 	case KVM_SEV_RECEIVE_UPDATE_DATA:
1866 		r = sev_receive_update_data(kvm, &sev_cmd);
1867 		break;
1868 	case KVM_SEV_RECEIVE_FINISH:
1869 		r = sev_receive_finish(kvm, &sev_cmd);
1870 		break;
1871 	default:
1872 		r = -EINVAL;
1873 		goto out;
1874 	}
1875 
1876 	if (copy_to_user(argp, &sev_cmd, sizeof(struct kvm_sev_cmd)))
1877 		r = -EFAULT;
1878 
1879 out:
1880 	mutex_unlock(&kvm->lock);
1881 	return r;
1882 }
1883 
1884 int sev_mem_enc_register_region(struct kvm *kvm,
1885 				struct kvm_enc_region *range)
1886 {
1887 	struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1888 	struct enc_region *region;
1889 	int ret = 0;
1890 
1891 	if (!sev_guest(kvm))
1892 		return -ENOTTY;
1893 
1894 	/* If kvm is mirroring encryption context it isn't responsible for it */
1895 	if (is_mirroring_enc_context(kvm))
1896 		return -EINVAL;
1897 
1898 	if (range->addr > ULONG_MAX || range->size > ULONG_MAX)
1899 		return -EINVAL;
1900 
1901 	region = kzalloc(sizeof(*region), GFP_KERNEL_ACCOUNT);
1902 	if (!region)
1903 		return -ENOMEM;
1904 
1905 	mutex_lock(&kvm->lock);
1906 	region->pages = sev_pin_memory(kvm, range->addr, range->size, &region->npages, 1);
1907 	if (IS_ERR(region->pages)) {
1908 		ret = PTR_ERR(region->pages);
1909 		mutex_unlock(&kvm->lock);
1910 		goto e_free;
1911 	}
1912 
1913 	region->uaddr = range->addr;
1914 	region->size = range->size;
1915 
1916 	list_add_tail(&region->list, &sev->regions_list);
1917 	mutex_unlock(&kvm->lock);
1918 
1919 	/*
1920 	 * The guest may change the memory encryption attribute from C=0 -> C=1
1921 	 * or vice versa for this memory range. Lets make sure caches are
1922 	 * flushed to ensure that guest data gets written into memory with
1923 	 * correct C-bit.
1924 	 */
1925 	sev_clflush_pages(region->pages, region->npages);
1926 
1927 	return ret;
1928 
1929 e_free:
1930 	kfree(region);
1931 	return ret;
1932 }
1933 
1934 static struct enc_region *
1935 find_enc_region(struct kvm *kvm, struct kvm_enc_region *range)
1936 {
1937 	struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1938 	struct list_head *head = &sev->regions_list;
1939 	struct enc_region *i;
1940 
1941 	list_for_each_entry(i, head, list) {
1942 		if (i->uaddr == range->addr &&
1943 		    i->size == range->size)
1944 			return i;
1945 	}
1946 
1947 	return NULL;
1948 }
1949 
1950 static void __unregister_enc_region_locked(struct kvm *kvm,
1951 					   struct enc_region *region)
1952 {
1953 	sev_unpin_memory(kvm, region->pages, region->npages);
1954 	list_del(&region->list);
1955 	kfree(region);
1956 }
1957 
1958 int sev_mem_enc_unregister_region(struct kvm *kvm,
1959 				  struct kvm_enc_region *range)
1960 {
1961 	struct enc_region *region;
1962 	int ret;
1963 
1964 	/* If kvm is mirroring encryption context it isn't responsible for it */
1965 	if (is_mirroring_enc_context(kvm))
1966 		return -EINVAL;
1967 
1968 	mutex_lock(&kvm->lock);
1969 
1970 	if (!sev_guest(kvm)) {
1971 		ret = -ENOTTY;
1972 		goto failed;
1973 	}
1974 
1975 	region = find_enc_region(kvm, range);
1976 	if (!region) {
1977 		ret = -EINVAL;
1978 		goto failed;
1979 	}
1980 
1981 	/*
1982 	 * Ensure that all guest tagged cache entries are flushed before
1983 	 * releasing the pages back to the system for use. CLFLUSH will
1984 	 * not do this, so issue a WBINVD.
1985 	 */
1986 	wbinvd_on_all_cpus();
1987 
1988 	__unregister_enc_region_locked(kvm, region);
1989 
1990 	mutex_unlock(&kvm->lock);
1991 	return 0;
1992 
1993 failed:
1994 	mutex_unlock(&kvm->lock);
1995 	return ret;
1996 }
1997 
1998 int sev_vm_copy_enc_context_from(struct kvm *kvm, unsigned int source_fd)
1999 {
2000 	struct file *source_kvm_file;
2001 	struct kvm *source_kvm;
2002 	struct kvm_sev_info *source_sev, *mirror_sev;
2003 	int ret;
2004 
2005 	source_kvm_file = fget(source_fd);
2006 	if (!file_is_kvm(source_kvm_file)) {
2007 		ret = -EBADF;
2008 		goto e_source_fput;
2009 	}
2010 
2011 	source_kvm = source_kvm_file->private_data;
2012 	ret = sev_lock_two_vms(kvm, source_kvm);
2013 	if (ret)
2014 		goto e_source_fput;
2015 
2016 	/*
2017 	 * Mirrors of mirrors should work, but let's not get silly.  Also
2018 	 * disallow out-of-band SEV/SEV-ES init if the target is already an
2019 	 * SEV guest, or if vCPUs have been created.  KVM relies on vCPUs being
2020 	 * created after SEV/SEV-ES initialization, e.g. to init intercepts.
2021 	 */
2022 	if (sev_guest(kvm) || !sev_guest(source_kvm) ||
2023 	    is_mirroring_enc_context(source_kvm) || kvm->created_vcpus) {
2024 		ret = -EINVAL;
2025 		goto e_unlock;
2026 	}
2027 
2028 	/*
2029 	 * The mirror kvm holds an enc_context_owner ref so its asid can't
2030 	 * disappear until we're done with it
2031 	 */
2032 	source_sev = &to_kvm_svm(source_kvm)->sev_info;
2033 	kvm_get_kvm(source_kvm);
2034 	mirror_sev = &to_kvm_svm(kvm)->sev_info;
2035 	list_add_tail(&mirror_sev->mirror_entry, &source_sev->mirror_vms);
2036 
2037 	/* Set enc_context_owner and copy its encryption context over */
2038 	mirror_sev->enc_context_owner = source_kvm;
2039 	mirror_sev->active = true;
2040 	mirror_sev->asid = source_sev->asid;
2041 	mirror_sev->fd = source_sev->fd;
2042 	mirror_sev->es_active = source_sev->es_active;
2043 	mirror_sev->handle = source_sev->handle;
2044 	INIT_LIST_HEAD(&mirror_sev->regions_list);
2045 	INIT_LIST_HEAD(&mirror_sev->mirror_vms);
2046 	ret = 0;
2047 
2048 	/*
2049 	 * Do not copy ap_jump_table. Since the mirror does not share the same
2050 	 * KVM contexts as the original, and they may have different
2051 	 * memory-views.
2052 	 */
2053 
2054 e_unlock:
2055 	sev_unlock_two_vms(kvm, source_kvm);
2056 e_source_fput:
2057 	if (source_kvm_file)
2058 		fput(source_kvm_file);
2059 	return ret;
2060 }
2061 
2062 void sev_vm_destroy(struct kvm *kvm)
2063 {
2064 	struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
2065 	struct list_head *head = &sev->regions_list;
2066 	struct list_head *pos, *q;
2067 
2068 	if (!sev_guest(kvm))
2069 		return;
2070 
2071 	WARN_ON(!list_empty(&sev->mirror_vms));
2072 
2073 	/* If this is a mirror_kvm release the enc_context_owner and skip sev cleanup */
2074 	if (is_mirroring_enc_context(kvm)) {
2075 		struct kvm *owner_kvm = sev->enc_context_owner;
2076 
2077 		mutex_lock(&owner_kvm->lock);
2078 		list_del(&sev->mirror_entry);
2079 		mutex_unlock(&owner_kvm->lock);
2080 		kvm_put_kvm(owner_kvm);
2081 		return;
2082 	}
2083 
2084 	/*
2085 	 * Ensure that all guest tagged cache entries are flushed before
2086 	 * releasing the pages back to the system for use. CLFLUSH will
2087 	 * not do this, so issue a WBINVD.
2088 	 */
2089 	wbinvd_on_all_cpus();
2090 
2091 	/*
2092 	 * if userspace was terminated before unregistering the memory regions
2093 	 * then lets unpin all the registered memory.
2094 	 */
2095 	if (!list_empty(head)) {
2096 		list_for_each_safe(pos, q, head) {
2097 			__unregister_enc_region_locked(kvm,
2098 				list_entry(pos, struct enc_region, list));
2099 			cond_resched();
2100 		}
2101 	}
2102 
2103 	sev_unbind_asid(kvm, sev->handle);
2104 	sev_asid_free(sev);
2105 }
2106 
2107 void __init sev_set_cpu_caps(void)
2108 {
2109 	if (!sev_enabled)
2110 		kvm_cpu_cap_clear(X86_FEATURE_SEV);
2111 	if (!sev_es_enabled)
2112 		kvm_cpu_cap_clear(X86_FEATURE_SEV_ES);
2113 }
2114 
2115 void __init sev_hardware_setup(void)
2116 {
2117 #ifdef CONFIG_KVM_AMD_SEV
2118 	unsigned int eax, ebx, ecx, edx, sev_asid_count, sev_es_asid_count;
2119 	bool sev_es_supported = false;
2120 	bool sev_supported = false;
2121 
2122 	if (!sev_enabled || !npt_enabled)
2123 		goto out;
2124 
2125 	/*
2126 	 * SEV must obviously be supported in hardware.  Sanity check that the
2127 	 * CPU supports decode assists, which is mandatory for SEV guests to
2128 	 * support instruction emulation.
2129 	 */
2130 	if (!boot_cpu_has(X86_FEATURE_SEV) ||
2131 	    WARN_ON_ONCE(!boot_cpu_has(X86_FEATURE_DECODEASSISTS)))
2132 		goto out;
2133 
2134 	/* Retrieve SEV CPUID information */
2135 	cpuid(0x8000001f, &eax, &ebx, &ecx, &edx);
2136 
2137 	/* Set encryption bit location for SEV-ES guests */
2138 	sev_enc_bit = ebx & 0x3f;
2139 
2140 	/* Maximum number of encrypted guests supported simultaneously */
2141 	max_sev_asid = ecx;
2142 	if (!max_sev_asid)
2143 		goto out;
2144 
2145 	/* Minimum ASID value that should be used for SEV guest */
2146 	min_sev_asid = edx;
2147 	sev_me_mask = 1UL << (ebx & 0x3f);
2148 
2149 	/*
2150 	 * Initialize SEV ASID bitmaps. Allocate space for ASID 0 in the bitmap,
2151 	 * even though it's never used, so that the bitmap is indexed by the
2152 	 * actual ASID.
2153 	 */
2154 	nr_asids = max_sev_asid + 1;
2155 	sev_asid_bitmap = bitmap_zalloc(nr_asids, GFP_KERNEL);
2156 	if (!sev_asid_bitmap)
2157 		goto out;
2158 
2159 	sev_reclaim_asid_bitmap = bitmap_zalloc(nr_asids, GFP_KERNEL);
2160 	if (!sev_reclaim_asid_bitmap) {
2161 		bitmap_free(sev_asid_bitmap);
2162 		sev_asid_bitmap = NULL;
2163 		goto out;
2164 	}
2165 
2166 	sev_asid_count = max_sev_asid - min_sev_asid + 1;
2167 	if (misc_cg_set_capacity(MISC_CG_RES_SEV, sev_asid_count))
2168 		goto out;
2169 
2170 	pr_info("SEV supported: %u ASIDs\n", sev_asid_count);
2171 	sev_supported = true;
2172 
2173 	/* SEV-ES support requested? */
2174 	if (!sev_es_enabled)
2175 		goto out;
2176 
2177 	/* Does the CPU support SEV-ES? */
2178 	if (!boot_cpu_has(X86_FEATURE_SEV_ES))
2179 		goto out;
2180 
2181 	/* Has the system been allocated ASIDs for SEV-ES? */
2182 	if (min_sev_asid == 1)
2183 		goto out;
2184 
2185 	sev_es_asid_count = min_sev_asid - 1;
2186 	if (misc_cg_set_capacity(MISC_CG_RES_SEV_ES, sev_es_asid_count))
2187 		goto out;
2188 
2189 	pr_info("SEV-ES supported: %u ASIDs\n", sev_es_asid_count);
2190 	sev_es_supported = true;
2191 
2192 out:
2193 	sev_enabled = sev_supported;
2194 	sev_es_enabled = sev_es_supported;
2195 #endif
2196 }
2197 
2198 void sev_hardware_unsetup(void)
2199 {
2200 	if (!sev_enabled)
2201 		return;
2202 
2203 	/* No need to take sev_bitmap_lock, all VMs have been destroyed. */
2204 	sev_flush_asids(1, max_sev_asid);
2205 
2206 	bitmap_free(sev_asid_bitmap);
2207 	bitmap_free(sev_reclaim_asid_bitmap);
2208 
2209 	misc_cg_set_capacity(MISC_CG_RES_SEV, 0);
2210 	misc_cg_set_capacity(MISC_CG_RES_SEV_ES, 0);
2211 }
2212 
2213 int sev_cpu_init(struct svm_cpu_data *sd)
2214 {
2215 	if (!sev_enabled)
2216 		return 0;
2217 
2218 	sd->sev_vmcbs = kcalloc(nr_asids, sizeof(void *), GFP_KERNEL);
2219 	if (!sd->sev_vmcbs)
2220 		return -ENOMEM;
2221 
2222 	return 0;
2223 }
2224 
2225 /*
2226  * Pages used by hardware to hold guest encrypted state must be flushed before
2227  * returning them to the system.
2228  */
2229 static void sev_flush_encrypted_page(struct kvm_vcpu *vcpu, void *va)
2230 {
2231 	int asid = to_kvm_svm(vcpu->kvm)->sev_info.asid;
2232 
2233 	/*
2234 	 * Note!  The address must be a kernel address, as regular page walk
2235 	 * checks are performed by VM_PAGE_FLUSH, i.e. operating on a user
2236 	 * address is non-deterministic and unsafe.  This function deliberately
2237 	 * takes a pointer to deter passing in a user address.
2238 	 */
2239 	unsigned long addr = (unsigned long)va;
2240 
2241 	/*
2242 	 * If CPU enforced cache coherency for encrypted mappings of the
2243 	 * same physical page is supported, use CLFLUSHOPT instead. NOTE: cache
2244 	 * flush is still needed in order to work properly with DMA devices.
2245 	 */
2246 	if (boot_cpu_has(X86_FEATURE_SME_COHERENT)) {
2247 		clflush_cache_range(va, PAGE_SIZE);
2248 		return;
2249 	}
2250 
2251 	/*
2252 	 * VM Page Flush takes a host virtual address and a guest ASID.  Fall
2253 	 * back to WBINVD if this faults so as not to make any problems worse
2254 	 * by leaving stale encrypted data in the cache.
2255 	 */
2256 	if (WARN_ON_ONCE(wrmsrl_safe(MSR_AMD64_VM_PAGE_FLUSH, addr | asid)))
2257 		goto do_wbinvd;
2258 
2259 	return;
2260 
2261 do_wbinvd:
2262 	wbinvd_on_all_cpus();
2263 }
2264 
2265 void sev_guest_memory_reclaimed(struct kvm *kvm)
2266 {
2267 	if (!sev_guest(kvm))
2268 		return;
2269 
2270 	wbinvd_on_all_cpus();
2271 }
2272 
2273 void sev_free_vcpu(struct kvm_vcpu *vcpu)
2274 {
2275 	struct vcpu_svm *svm;
2276 
2277 	if (!sev_es_guest(vcpu->kvm))
2278 		return;
2279 
2280 	svm = to_svm(vcpu);
2281 
2282 	if (vcpu->arch.guest_state_protected)
2283 		sev_flush_encrypted_page(vcpu, svm->sev_es.vmsa);
2284 
2285 	__free_page(virt_to_page(svm->sev_es.vmsa));
2286 
2287 	if (svm->sev_es.ghcb_sa_free)
2288 		kvfree(svm->sev_es.ghcb_sa);
2289 }
2290 
2291 static void dump_ghcb(struct vcpu_svm *svm)
2292 {
2293 	struct ghcb *ghcb = svm->sev_es.ghcb;
2294 	unsigned int nbits;
2295 
2296 	/* Re-use the dump_invalid_vmcb module parameter */
2297 	if (!dump_invalid_vmcb) {
2298 		pr_warn_ratelimited("set kvm_amd.dump_invalid_vmcb=1 to dump internal KVM state.\n");
2299 		return;
2300 	}
2301 
2302 	nbits = sizeof(ghcb->save.valid_bitmap) * 8;
2303 
2304 	pr_err("GHCB (GPA=%016llx):\n", svm->vmcb->control.ghcb_gpa);
2305 	pr_err("%-20s%016llx is_valid: %u\n", "sw_exit_code",
2306 	       ghcb->save.sw_exit_code, ghcb_sw_exit_code_is_valid(ghcb));
2307 	pr_err("%-20s%016llx is_valid: %u\n", "sw_exit_info_1",
2308 	       ghcb->save.sw_exit_info_1, ghcb_sw_exit_info_1_is_valid(ghcb));
2309 	pr_err("%-20s%016llx is_valid: %u\n", "sw_exit_info_2",
2310 	       ghcb->save.sw_exit_info_2, ghcb_sw_exit_info_2_is_valid(ghcb));
2311 	pr_err("%-20s%016llx is_valid: %u\n", "sw_scratch",
2312 	       ghcb->save.sw_scratch, ghcb_sw_scratch_is_valid(ghcb));
2313 	pr_err("%-20s%*pb\n", "valid_bitmap", nbits, ghcb->save.valid_bitmap);
2314 }
2315 
2316 static void sev_es_sync_to_ghcb(struct vcpu_svm *svm)
2317 {
2318 	struct kvm_vcpu *vcpu = &svm->vcpu;
2319 	struct ghcb *ghcb = svm->sev_es.ghcb;
2320 
2321 	/*
2322 	 * The GHCB protocol so far allows for the following data
2323 	 * to be returned:
2324 	 *   GPRs RAX, RBX, RCX, RDX
2325 	 *
2326 	 * Copy their values, even if they may not have been written during the
2327 	 * VM-Exit.  It's the guest's responsibility to not consume random data.
2328 	 */
2329 	ghcb_set_rax(ghcb, vcpu->arch.regs[VCPU_REGS_RAX]);
2330 	ghcb_set_rbx(ghcb, vcpu->arch.regs[VCPU_REGS_RBX]);
2331 	ghcb_set_rcx(ghcb, vcpu->arch.regs[VCPU_REGS_RCX]);
2332 	ghcb_set_rdx(ghcb, vcpu->arch.regs[VCPU_REGS_RDX]);
2333 }
2334 
2335 static void sev_es_sync_from_ghcb(struct vcpu_svm *svm)
2336 {
2337 	struct vmcb_control_area *control = &svm->vmcb->control;
2338 	struct kvm_vcpu *vcpu = &svm->vcpu;
2339 	struct ghcb *ghcb = svm->sev_es.ghcb;
2340 	u64 exit_code;
2341 
2342 	/*
2343 	 * The GHCB protocol so far allows for the following data
2344 	 * to be supplied:
2345 	 *   GPRs RAX, RBX, RCX, RDX
2346 	 *   XCR0
2347 	 *   CPL
2348 	 *
2349 	 * VMMCALL allows the guest to provide extra registers. KVM also
2350 	 * expects RSI for hypercalls, so include that, too.
2351 	 *
2352 	 * Copy their values to the appropriate location if supplied.
2353 	 */
2354 	memset(vcpu->arch.regs, 0, sizeof(vcpu->arch.regs));
2355 
2356 	vcpu->arch.regs[VCPU_REGS_RAX] = ghcb_get_rax_if_valid(ghcb);
2357 	vcpu->arch.regs[VCPU_REGS_RBX] = ghcb_get_rbx_if_valid(ghcb);
2358 	vcpu->arch.regs[VCPU_REGS_RCX] = ghcb_get_rcx_if_valid(ghcb);
2359 	vcpu->arch.regs[VCPU_REGS_RDX] = ghcb_get_rdx_if_valid(ghcb);
2360 	vcpu->arch.regs[VCPU_REGS_RSI] = ghcb_get_rsi_if_valid(ghcb);
2361 
2362 	svm->vmcb->save.cpl = ghcb_get_cpl_if_valid(ghcb);
2363 
2364 	if (ghcb_xcr0_is_valid(ghcb)) {
2365 		vcpu->arch.xcr0 = ghcb_get_xcr0(ghcb);
2366 		kvm_update_cpuid_runtime(vcpu);
2367 	}
2368 
2369 	/* Copy the GHCB exit information into the VMCB fields */
2370 	exit_code = ghcb_get_sw_exit_code(ghcb);
2371 	control->exit_code = lower_32_bits(exit_code);
2372 	control->exit_code_hi = upper_32_bits(exit_code);
2373 	control->exit_info_1 = ghcb_get_sw_exit_info_1(ghcb);
2374 	control->exit_info_2 = ghcb_get_sw_exit_info_2(ghcb);
2375 
2376 	/* Clear the valid entries fields */
2377 	memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap));
2378 }
2379 
2380 static int sev_es_validate_vmgexit(struct vcpu_svm *svm)
2381 {
2382 	struct kvm_vcpu *vcpu;
2383 	struct ghcb *ghcb;
2384 	u64 exit_code;
2385 	u64 reason;
2386 
2387 	ghcb = svm->sev_es.ghcb;
2388 
2389 	/*
2390 	 * Retrieve the exit code now even though it may not be marked valid
2391 	 * as it could help with debugging.
2392 	 */
2393 	exit_code = ghcb_get_sw_exit_code(ghcb);
2394 
2395 	/* Only GHCB Usage code 0 is supported */
2396 	if (ghcb->ghcb_usage) {
2397 		reason = GHCB_ERR_INVALID_USAGE;
2398 		goto vmgexit_err;
2399 	}
2400 
2401 	reason = GHCB_ERR_MISSING_INPUT;
2402 
2403 	if (!ghcb_sw_exit_code_is_valid(ghcb) ||
2404 	    !ghcb_sw_exit_info_1_is_valid(ghcb) ||
2405 	    !ghcb_sw_exit_info_2_is_valid(ghcb))
2406 		goto vmgexit_err;
2407 
2408 	switch (ghcb_get_sw_exit_code(ghcb)) {
2409 	case SVM_EXIT_READ_DR7:
2410 		break;
2411 	case SVM_EXIT_WRITE_DR7:
2412 		if (!ghcb_rax_is_valid(ghcb))
2413 			goto vmgexit_err;
2414 		break;
2415 	case SVM_EXIT_RDTSC:
2416 		break;
2417 	case SVM_EXIT_RDPMC:
2418 		if (!ghcb_rcx_is_valid(ghcb))
2419 			goto vmgexit_err;
2420 		break;
2421 	case SVM_EXIT_CPUID:
2422 		if (!ghcb_rax_is_valid(ghcb) ||
2423 		    !ghcb_rcx_is_valid(ghcb))
2424 			goto vmgexit_err;
2425 		if (ghcb_get_rax(ghcb) == 0xd)
2426 			if (!ghcb_xcr0_is_valid(ghcb))
2427 				goto vmgexit_err;
2428 		break;
2429 	case SVM_EXIT_INVD:
2430 		break;
2431 	case SVM_EXIT_IOIO:
2432 		if (ghcb_get_sw_exit_info_1(ghcb) & SVM_IOIO_STR_MASK) {
2433 			if (!ghcb_sw_scratch_is_valid(ghcb))
2434 				goto vmgexit_err;
2435 		} else {
2436 			if (!(ghcb_get_sw_exit_info_1(ghcb) & SVM_IOIO_TYPE_MASK))
2437 				if (!ghcb_rax_is_valid(ghcb))
2438 					goto vmgexit_err;
2439 		}
2440 		break;
2441 	case SVM_EXIT_MSR:
2442 		if (!ghcb_rcx_is_valid(ghcb))
2443 			goto vmgexit_err;
2444 		if (ghcb_get_sw_exit_info_1(ghcb)) {
2445 			if (!ghcb_rax_is_valid(ghcb) ||
2446 			    !ghcb_rdx_is_valid(ghcb))
2447 				goto vmgexit_err;
2448 		}
2449 		break;
2450 	case SVM_EXIT_VMMCALL:
2451 		if (!ghcb_rax_is_valid(ghcb) ||
2452 		    !ghcb_cpl_is_valid(ghcb))
2453 			goto vmgexit_err;
2454 		break;
2455 	case SVM_EXIT_RDTSCP:
2456 		break;
2457 	case SVM_EXIT_WBINVD:
2458 		break;
2459 	case SVM_EXIT_MONITOR:
2460 		if (!ghcb_rax_is_valid(ghcb) ||
2461 		    !ghcb_rcx_is_valid(ghcb) ||
2462 		    !ghcb_rdx_is_valid(ghcb))
2463 			goto vmgexit_err;
2464 		break;
2465 	case SVM_EXIT_MWAIT:
2466 		if (!ghcb_rax_is_valid(ghcb) ||
2467 		    !ghcb_rcx_is_valid(ghcb))
2468 			goto vmgexit_err;
2469 		break;
2470 	case SVM_VMGEXIT_MMIO_READ:
2471 	case SVM_VMGEXIT_MMIO_WRITE:
2472 		if (!ghcb_sw_scratch_is_valid(ghcb))
2473 			goto vmgexit_err;
2474 		break;
2475 	case SVM_VMGEXIT_NMI_COMPLETE:
2476 	case SVM_VMGEXIT_AP_HLT_LOOP:
2477 	case SVM_VMGEXIT_AP_JUMP_TABLE:
2478 	case SVM_VMGEXIT_UNSUPPORTED_EVENT:
2479 		break;
2480 	default:
2481 		reason = GHCB_ERR_INVALID_EVENT;
2482 		goto vmgexit_err;
2483 	}
2484 
2485 	return 0;
2486 
2487 vmgexit_err:
2488 	vcpu = &svm->vcpu;
2489 
2490 	if (reason == GHCB_ERR_INVALID_USAGE) {
2491 		vcpu_unimpl(vcpu, "vmgexit: ghcb usage %#x is not valid\n",
2492 			    ghcb->ghcb_usage);
2493 	} else if (reason == GHCB_ERR_INVALID_EVENT) {
2494 		vcpu_unimpl(vcpu, "vmgexit: exit code %#llx is not valid\n",
2495 			    exit_code);
2496 	} else {
2497 		vcpu_unimpl(vcpu, "vmgexit: exit code %#llx input is not valid\n",
2498 			    exit_code);
2499 		dump_ghcb(svm);
2500 	}
2501 
2502 	/* Clear the valid entries fields */
2503 	memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap));
2504 
2505 	ghcb_set_sw_exit_info_1(ghcb, 2);
2506 	ghcb_set_sw_exit_info_2(ghcb, reason);
2507 
2508 	/* Resume the guest to "return" the error code. */
2509 	return 1;
2510 }
2511 
2512 void sev_es_unmap_ghcb(struct vcpu_svm *svm)
2513 {
2514 	if (!svm->sev_es.ghcb)
2515 		return;
2516 
2517 	if (svm->sev_es.ghcb_sa_free) {
2518 		/*
2519 		 * The scratch area lives outside the GHCB, so there is a
2520 		 * buffer that, depending on the operation performed, may
2521 		 * need to be synced, then freed.
2522 		 */
2523 		if (svm->sev_es.ghcb_sa_sync) {
2524 			kvm_write_guest(svm->vcpu.kvm,
2525 					ghcb_get_sw_scratch(svm->sev_es.ghcb),
2526 					svm->sev_es.ghcb_sa,
2527 					svm->sev_es.ghcb_sa_len);
2528 			svm->sev_es.ghcb_sa_sync = false;
2529 		}
2530 
2531 		kvfree(svm->sev_es.ghcb_sa);
2532 		svm->sev_es.ghcb_sa = NULL;
2533 		svm->sev_es.ghcb_sa_free = false;
2534 	}
2535 
2536 	trace_kvm_vmgexit_exit(svm->vcpu.vcpu_id, svm->sev_es.ghcb);
2537 
2538 	sev_es_sync_to_ghcb(svm);
2539 
2540 	kvm_vcpu_unmap(&svm->vcpu, &svm->sev_es.ghcb_map, true);
2541 	svm->sev_es.ghcb = NULL;
2542 }
2543 
2544 void pre_sev_run(struct vcpu_svm *svm, int cpu)
2545 {
2546 	struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
2547 	int asid = sev_get_asid(svm->vcpu.kvm);
2548 
2549 	/* Assign the asid allocated with this SEV guest */
2550 	svm->asid = asid;
2551 
2552 	/*
2553 	 * Flush guest TLB:
2554 	 *
2555 	 * 1) when different VMCB for the same ASID is to be run on the same host CPU.
2556 	 * 2) or this VMCB was executed on different host CPU in previous VMRUNs.
2557 	 */
2558 	if (sd->sev_vmcbs[asid] == svm->vmcb &&
2559 	    svm->vcpu.arch.last_vmentry_cpu == cpu)
2560 		return;
2561 
2562 	sd->sev_vmcbs[asid] = svm->vmcb;
2563 	svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID;
2564 	vmcb_mark_dirty(svm->vmcb, VMCB_ASID);
2565 }
2566 
2567 #define GHCB_SCRATCH_AREA_LIMIT		(16ULL * PAGE_SIZE)
2568 static int setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len)
2569 {
2570 	struct vmcb_control_area *control = &svm->vmcb->control;
2571 	struct ghcb *ghcb = svm->sev_es.ghcb;
2572 	u64 ghcb_scratch_beg, ghcb_scratch_end;
2573 	u64 scratch_gpa_beg, scratch_gpa_end;
2574 	void *scratch_va;
2575 
2576 	scratch_gpa_beg = ghcb_get_sw_scratch(ghcb);
2577 	if (!scratch_gpa_beg) {
2578 		pr_err("vmgexit: scratch gpa not provided\n");
2579 		goto e_scratch;
2580 	}
2581 
2582 	scratch_gpa_end = scratch_gpa_beg + len;
2583 	if (scratch_gpa_end < scratch_gpa_beg) {
2584 		pr_err("vmgexit: scratch length (%#llx) not valid for scratch address (%#llx)\n",
2585 		       len, scratch_gpa_beg);
2586 		goto e_scratch;
2587 	}
2588 
2589 	if ((scratch_gpa_beg & PAGE_MASK) == control->ghcb_gpa) {
2590 		/* Scratch area begins within GHCB */
2591 		ghcb_scratch_beg = control->ghcb_gpa +
2592 				   offsetof(struct ghcb, shared_buffer);
2593 		ghcb_scratch_end = control->ghcb_gpa +
2594 				   offsetof(struct ghcb, reserved_1);
2595 
2596 		/*
2597 		 * If the scratch area begins within the GHCB, it must be
2598 		 * completely contained in the GHCB shared buffer area.
2599 		 */
2600 		if (scratch_gpa_beg < ghcb_scratch_beg ||
2601 		    scratch_gpa_end > ghcb_scratch_end) {
2602 			pr_err("vmgexit: scratch area is outside of GHCB shared buffer area (%#llx - %#llx)\n",
2603 			       scratch_gpa_beg, scratch_gpa_end);
2604 			goto e_scratch;
2605 		}
2606 
2607 		scratch_va = (void *)svm->sev_es.ghcb;
2608 		scratch_va += (scratch_gpa_beg - control->ghcb_gpa);
2609 	} else {
2610 		/*
2611 		 * The guest memory must be read into a kernel buffer, so
2612 		 * limit the size
2613 		 */
2614 		if (len > GHCB_SCRATCH_AREA_LIMIT) {
2615 			pr_err("vmgexit: scratch area exceeds KVM limits (%#llx requested, %#llx limit)\n",
2616 			       len, GHCB_SCRATCH_AREA_LIMIT);
2617 			goto e_scratch;
2618 		}
2619 		scratch_va = kvzalloc(len, GFP_KERNEL_ACCOUNT);
2620 		if (!scratch_va)
2621 			return -ENOMEM;
2622 
2623 		if (kvm_read_guest(svm->vcpu.kvm, scratch_gpa_beg, scratch_va, len)) {
2624 			/* Unable to copy scratch area from guest */
2625 			pr_err("vmgexit: kvm_read_guest for scratch area failed\n");
2626 
2627 			kvfree(scratch_va);
2628 			return -EFAULT;
2629 		}
2630 
2631 		/*
2632 		 * The scratch area is outside the GHCB. The operation will
2633 		 * dictate whether the buffer needs to be synced before running
2634 		 * the vCPU next time (i.e. a read was requested so the data
2635 		 * must be written back to the guest memory).
2636 		 */
2637 		svm->sev_es.ghcb_sa_sync = sync;
2638 		svm->sev_es.ghcb_sa_free = true;
2639 	}
2640 
2641 	svm->sev_es.ghcb_sa = scratch_va;
2642 	svm->sev_es.ghcb_sa_len = len;
2643 
2644 	return 0;
2645 
2646 e_scratch:
2647 	ghcb_set_sw_exit_info_1(ghcb, 2);
2648 	ghcb_set_sw_exit_info_2(ghcb, GHCB_ERR_INVALID_SCRATCH_AREA);
2649 
2650 	return 1;
2651 }
2652 
2653 static void set_ghcb_msr_bits(struct vcpu_svm *svm, u64 value, u64 mask,
2654 			      unsigned int pos)
2655 {
2656 	svm->vmcb->control.ghcb_gpa &= ~(mask << pos);
2657 	svm->vmcb->control.ghcb_gpa |= (value & mask) << pos;
2658 }
2659 
2660 static u64 get_ghcb_msr_bits(struct vcpu_svm *svm, u64 mask, unsigned int pos)
2661 {
2662 	return (svm->vmcb->control.ghcb_gpa >> pos) & mask;
2663 }
2664 
2665 static void set_ghcb_msr(struct vcpu_svm *svm, u64 value)
2666 {
2667 	svm->vmcb->control.ghcb_gpa = value;
2668 }
2669 
2670 static int sev_handle_vmgexit_msr_protocol(struct vcpu_svm *svm)
2671 {
2672 	struct vmcb_control_area *control = &svm->vmcb->control;
2673 	struct kvm_vcpu *vcpu = &svm->vcpu;
2674 	u64 ghcb_info;
2675 	int ret = 1;
2676 
2677 	ghcb_info = control->ghcb_gpa & GHCB_MSR_INFO_MASK;
2678 
2679 	trace_kvm_vmgexit_msr_protocol_enter(svm->vcpu.vcpu_id,
2680 					     control->ghcb_gpa);
2681 
2682 	switch (ghcb_info) {
2683 	case GHCB_MSR_SEV_INFO_REQ:
2684 		set_ghcb_msr(svm, GHCB_MSR_SEV_INFO(GHCB_VERSION_MAX,
2685 						    GHCB_VERSION_MIN,
2686 						    sev_enc_bit));
2687 		break;
2688 	case GHCB_MSR_CPUID_REQ: {
2689 		u64 cpuid_fn, cpuid_reg, cpuid_value;
2690 
2691 		cpuid_fn = get_ghcb_msr_bits(svm,
2692 					     GHCB_MSR_CPUID_FUNC_MASK,
2693 					     GHCB_MSR_CPUID_FUNC_POS);
2694 
2695 		/* Initialize the registers needed by the CPUID intercept */
2696 		vcpu->arch.regs[VCPU_REGS_RAX] = cpuid_fn;
2697 		vcpu->arch.regs[VCPU_REGS_RCX] = 0;
2698 
2699 		ret = svm_invoke_exit_handler(vcpu, SVM_EXIT_CPUID);
2700 		if (!ret) {
2701 			/* Error, keep GHCB MSR value as-is */
2702 			break;
2703 		}
2704 
2705 		cpuid_reg = get_ghcb_msr_bits(svm,
2706 					      GHCB_MSR_CPUID_REG_MASK,
2707 					      GHCB_MSR_CPUID_REG_POS);
2708 		if (cpuid_reg == 0)
2709 			cpuid_value = vcpu->arch.regs[VCPU_REGS_RAX];
2710 		else if (cpuid_reg == 1)
2711 			cpuid_value = vcpu->arch.regs[VCPU_REGS_RBX];
2712 		else if (cpuid_reg == 2)
2713 			cpuid_value = vcpu->arch.regs[VCPU_REGS_RCX];
2714 		else
2715 			cpuid_value = vcpu->arch.regs[VCPU_REGS_RDX];
2716 
2717 		set_ghcb_msr_bits(svm, cpuid_value,
2718 				  GHCB_MSR_CPUID_VALUE_MASK,
2719 				  GHCB_MSR_CPUID_VALUE_POS);
2720 
2721 		set_ghcb_msr_bits(svm, GHCB_MSR_CPUID_RESP,
2722 				  GHCB_MSR_INFO_MASK,
2723 				  GHCB_MSR_INFO_POS);
2724 		break;
2725 	}
2726 	case GHCB_MSR_TERM_REQ: {
2727 		u64 reason_set, reason_code;
2728 
2729 		reason_set = get_ghcb_msr_bits(svm,
2730 					       GHCB_MSR_TERM_REASON_SET_MASK,
2731 					       GHCB_MSR_TERM_REASON_SET_POS);
2732 		reason_code = get_ghcb_msr_bits(svm,
2733 						GHCB_MSR_TERM_REASON_MASK,
2734 						GHCB_MSR_TERM_REASON_POS);
2735 		pr_info("SEV-ES guest requested termination: %#llx:%#llx\n",
2736 			reason_set, reason_code);
2737 
2738 		ret = -EINVAL;
2739 		break;
2740 	}
2741 	default:
2742 		/* Error, keep GHCB MSR value as-is */
2743 		break;
2744 	}
2745 
2746 	trace_kvm_vmgexit_msr_protocol_exit(svm->vcpu.vcpu_id,
2747 					    control->ghcb_gpa, ret);
2748 
2749 	return ret;
2750 }
2751 
2752 int sev_handle_vmgexit(struct kvm_vcpu *vcpu)
2753 {
2754 	struct vcpu_svm *svm = to_svm(vcpu);
2755 	struct vmcb_control_area *control = &svm->vmcb->control;
2756 	u64 ghcb_gpa, exit_code;
2757 	struct ghcb *ghcb;
2758 	int ret;
2759 
2760 	/* Validate the GHCB */
2761 	ghcb_gpa = control->ghcb_gpa;
2762 	if (ghcb_gpa & GHCB_MSR_INFO_MASK)
2763 		return sev_handle_vmgexit_msr_protocol(svm);
2764 
2765 	if (!ghcb_gpa) {
2766 		vcpu_unimpl(vcpu, "vmgexit: GHCB gpa is not set\n");
2767 
2768 		/* Without a GHCB, just return right back to the guest */
2769 		return 1;
2770 	}
2771 
2772 	if (kvm_vcpu_map(vcpu, ghcb_gpa >> PAGE_SHIFT, &svm->sev_es.ghcb_map)) {
2773 		/* Unable to map GHCB from guest */
2774 		vcpu_unimpl(vcpu, "vmgexit: error mapping GHCB [%#llx] from guest\n",
2775 			    ghcb_gpa);
2776 
2777 		/* Without a GHCB, just return right back to the guest */
2778 		return 1;
2779 	}
2780 
2781 	svm->sev_es.ghcb = svm->sev_es.ghcb_map.hva;
2782 	ghcb = svm->sev_es.ghcb_map.hva;
2783 
2784 	trace_kvm_vmgexit_enter(vcpu->vcpu_id, ghcb);
2785 
2786 	exit_code = ghcb_get_sw_exit_code(ghcb);
2787 
2788 	ret = sev_es_validate_vmgexit(svm);
2789 	if (ret)
2790 		return ret;
2791 
2792 	sev_es_sync_from_ghcb(svm);
2793 	ghcb_set_sw_exit_info_1(ghcb, 0);
2794 	ghcb_set_sw_exit_info_2(ghcb, 0);
2795 
2796 	switch (exit_code) {
2797 	case SVM_VMGEXIT_MMIO_READ:
2798 		ret = setup_vmgexit_scratch(svm, true, control->exit_info_2);
2799 		if (ret)
2800 			break;
2801 
2802 		ret = kvm_sev_es_mmio_read(vcpu,
2803 					   control->exit_info_1,
2804 					   control->exit_info_2,
2805 					   svm->sev_es.ghcb_sa);
2806 		break;
2807 	case SVM_VMGEXIT_MMIO_WRITE:
2808 		ret = setup_vmgexit_scratch(svm, false, control->exit_info_2);
2809 		if (ret)
2810 			break;
2811 
2812 		ret = kvm_sev_es_mmio_write(vcpu,
2813 					    control->exit_info_1,
2814 					    control->exit_info_2,
2815 					    svm->sev_es.ghcb_sa);
2816 		break;
2817 	case SVM_VMGEXIT_NMI_COMPLETE:
2818 		ret = svm_invoke_exit_handler(vcpu, SVM_EXIT_IRET);
2819 		break;
2820 	case SVM_VMGEXIT_AP_HLT_LOOP:
2821 		ret = kvm_emulate_ap_reset_hold(vcpu);
2822 		break;
2823 	case SVM_VMGEXIT_AP_JUMP_TABLE: {
2824 		struct kvm_sev_info *sev = &to_kvm_svm(vcpu->kvm)->sev_info;
2825 
2826 		switch (control->exit_info_1) {
2827 		case 0:
2828 			/* Set AP jump table address */
2829 			sev->ap_jump_table = control->exit_info_2;
2830 			break;
2831 		case 1:
2832 			/* Get AP jump table address */
2833 			ghcb_set_sw_exit_info_2(ghcb, sev->ap_jump_table);
2834 			break;
2835 		default:
2836 			pr_err("svm: vmgexit: unsupported AP jump table request - exit_info_1=%#llx\n",
2837 			       control->exit_info_1);
2838 			ghcb_set_sw_exit_info_1(ghcb, 2);
2839 			ghcb_set_sw_exit_info_2(ghcb, GHCB_ERR_INVALID_INPUT);
2840 		}
2841 
2842 		ret = 1;
2843 		break;
2844 	}
2845 	case SVM_VMGEXIT_UNSUPPORTED_EVENT:
2846 		vcpu_unimpl(vcpu,
2847 			    "vmgexit: unsupported event - exit_info_1=%#llx, exit_info_2=%#llx\n",
2848 			    control->exit_info_1, control->exit_info_2);
2849 		ret = -EINVAL;
2850 		break;
2851 	default:
2852 		ret = svm_invoke_exit_handler(vcpu, exit_code);
2853 	}
2854 
2855 	return ret;
2856 }
2857 
2858 int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in)
2859 {
2860 	int count;
2861 	int bytes;
2862 	int r;
2863 
2864 	if (svm->vmcb->control.exit_info_2 > INT_MAX)
2865 		return -EINVAL;
2866 
2867 	count = svm->vmcb->control.exit_info_2;
2868 	if (unlikely(check_mul_overflow(count, size, &bytes)))
2869 		return -EINVAL;
2870 
2871 	r = setup_vmgexit_scratch(svm, in, bytes);
2872 	if (r)
2873 		return r;
2874 
2875 	return kvm_sev_es_string_io(&svm->vcpu, size, port, svm->sev_es.ghcb_sa,
2876 				    count, in);
2877 }
2878 
2879 void sev_es_init_vmcb(struct vcpu_svm *svm)
2880 {
2881 	struct kvm_vcpu *vcpu = &svm->vcpu;
2882 
2883 	svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ES_ENABLE;
2884 	svm->vmcb->control.virt_ext |= LBR_CTL_ENABLE_MASK;
2885 
2886 	/*
2887 	 * An SEV-ES guest requires a VMSA area that is a separate from the
2888 	 * VMCB page. Do not include the encryption mask on the VMSA physical
2889 	 * address since hardware will access it using the guest key.
2890 	 */
2891 	svm->vmcb->control.vmsa_pa = __pa(svm->sev_es.vmsa);
2892 
2893 	/* Can't intercept CR register access, HV can't modify CR registers */
2894 	svm_clr_intercept(svm, INTERCEPT_CR0_READ);
2895 	svm_clr_intercept(svm, INTERCEPT_CR4_READ);
2896 	svm_clr_intercept(svm, INTERCEPT_CR8_READ);
2897 	svm_clr_intercept(svm, INTERCEPT_CR0_WRITE);
2898 	svm_clr_intercept(svm, INTERCEPT_CR4_WRITE);
2899 	svm_clr_intercept(svm, INTERCEPT_CR8_WRITE);
2900 
2901 	svm_clr_intercept(svm, INTERCEPT_SELECTIVE_CR0);
2902 
2903 	/* Track EFER/CR register changes */
2904 	svm_set_intercept(svm, TRAP_EFER_WRITE);
2905 	svm_set_intercept(svm, TRAP_CR0_WRITE);
2906 	svm_set_intercept(svm, TRAP_CR4_WRITE);
2907 	svm_set_intercept(svm, TRAP_CR8_WRITE);
2908 
2909 	/* No support for enable_vmware_backdoor */
2910 	clr_exception_intercept(svm, GP_VECTOR);
2911 
2912 	/* Can't intercept XSETBV, HV can't modify XCR0 directly */
2913 	svm_clr_intercept(svm, INTERCEPT_XSETBV);
2914 
2915 	/* Clear intercepts on selected MSRs */
2916 	set_msr_interception(vcpu, svm->msrpm, MSR_EFER, 1, 1);
2917 	set_msr_interception(vcpu, svm->msrpm, MSR_IA32_CR_PAT, 1, 1);
2918 	set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHFROMIP, 1, 1);
2919 	set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1);
2920 	set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTFROMIP, 1, 1);
2921 	set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTTOIP, 1, 1);
2922 }
2923 
2924 void sev_es_vcpu_reset(struct vcpu_svm *svm)
2925 {
2926 	/*
2927 	 * Set the GHCB MSR value as per the GHCB specification when emulating
2928 	 * vCPU RESET for an SEV-ES guest.
2929 	 */
2930 	set_ghcb_msr(svm, GHCB_MSR_SEV_INFO(GHCB_VERSION_MAX,
2931 					    GHCB_VERSION_MIN,
2932 					    sev_enc_bit));
2933 }
2934 
2935 void sev_es_prepare_switch_to_guest(struct vmcb_save_area *hostsa)
2936 {
2937 	/*
2938 	 * As an SEV-ES guest, hardware will restore the host state on VMEXIT,
2939 	 * of which one step is to perform a VMLOAD.  KVM performs the
2940 	 * corresponding VMSAVE in svm_prepare_guest_switch for both
2941 	 * traditional and SEV-ES guests.
2942 	 */
2943 
2944 	/* XCR0 is restored on VMEXIT, save the current host value */
2945 	hostsa->xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
2946 
2947 	/* PKRU is restored on VMEXIT, save the current host value */
2948 	hostsa->pkru = read_pkru();
2949 
2950 	/* MSR_IA32_XSS is restored on VMEXIT, save the currnet host value */
2951 	hostsa->xss = host_xss;
2952 }
2953 
2954 void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
2955 {
2956 	struct vcpu_svm *svm = to_svm(vcpu);
2957 
2958 	/* First SIPI: Use the values as initially set by the VMM */
2959 	if (!svm->sev_es.received_first_sipi) {
2960 		svm->sev_es.received_first_sipi = true;
2961 		return;
2962 	}
2963 
2964 	/*
2965 	 * Subsequent SIPI: Return from an AP Reset Hold VMGEXIT, where
2966 	 * the guest will set the CS and RIP. Set SW_EXIT_INFO_2 to a
2967 	 * non-zero value.
2968 	 */
2969 	if (!svm->sev_es.ghcb)
2970 		return;
2971 
2972 	ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, 1);
2973 }
2974