1eaf78265SJoerg Roedel // SPDX-License-Identifier: GPL-2.0-only
2eaf78265SJoerg Roedel /*
3eaf78265SJoerg Roedel * Kernel-based Virtual Machine driver for Linux
4eaf78265SJoerg Roedel *
5eaf78265SJoerg Roedel * AMD SVM-SEV support
6eaf78265SJoerg Roedel *
7eaf78265SJoerg Roedel * Copyright 2010 Red Hat, Inc. and/or its affiliates.
8eaf78265SJoerg Roedel */
98d20bd63SSean Christopherson #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10eaf78265SJoerg Roedel
11eaf78265SJoerg Roedel #include <linux/kvm_types.h>
12eaf78265SJoerg Roedel #include <linux/kvm_host.h>
13eaf78265SJoerg Roedel #include <linux/kernel.h>
14eaf78265SJoerg Roedel #include <linux/highmem.h>
15ae7d45fbSMario Limonciello #include <linux/psp.h>
16eaf78265SJoerg Roedel #include <linux/psp-sev.h>
17b2bce0a5SBorislav Petkov #include <linux/pagemap.h>
18eaf78265SJoerg Roedel #include <linux/swap.h>
197aef27f0SVipin Sharma #include <linux/misc_cgroup.h>
20add5e2f0STom Lendacky #include <linux/processor.h>
21d523ab6bSTom Lendacky #include <linux/trace_events.h>
22eaf78265SJoerg Roedel
23784a4661SDave Hansen #include <asm/pkru.h>
248640ca58STom Lendacky #include <asm/trapnr.h>
25d9d005f3SThomas Gleixner #include <asm/fpu/xcr.h>
26d1f85fbeSAlexey Kardashevskiy #include <asm/debugreg.h>
278640ca58STom Lendacky
280c29397aSSean Christopherson #include "mmu.h"
29eaf78265SJoerg Roedel #include "x86.h"
30eaf78265SJoerg Roedel #include "svm.h"
3135a78319SSean Christopherson #include "svm_ops.h"
32291bd20dSTom Lendacky #include "cpuid.h"
33d523ab6bSTom Lendacky #include "trace.h"
34eaf78265SJoerg Roedel
357aef27f0SVipin Sharma #ifndef CONFIG_KVM_AMD_SEV
367aef27f0SVipin Sharma /*
377aef27f0SVipin Sharma * When this config is not defined, SEV feature is not supported and APIs in
387aef27f0SVipin Sharma * this file are not used but this file still gets compiled into the KVM AMD
397aef27f0SVipin Sharma * module.
407aef27f0SVipin Sharma *
417aef27f0SVipin Sharma * We will not have MISC_CG_RES_SEV and MISC_CG_RES_SEV_ES entries in the enum
427aef27f0SVipin Sharma * misc_res_type {} defined in linux/misc_cgroup.h.
437aef27f0SVipin Sharma *
447aef27f0SVipin Sharma * Below macros allow compilation to succeed.
457aef27f0SVipin Sharma */
467aef27f0SVipin Sharma #define MISC_CG_RES_SEV MISC_CG_RES_TYPES
477aef27f0SVipin Sharma #define MISC_CG_RES_SEV_ES MISC_CG_RES_TYPES
487aef27f0SVipin Sharma #endif
497aef27f0SVipin Sharma
50a479c334SSean Christopherson #ifdef CONFIG_KVM_AMD_SEV
51e8126bdaSSean Christopherson /* enable/disable SEV support */
526c2c7bf5SSean Christopherson static bool sev_enabled = true;
538d364a07SSean Christopherson module_param_named(sev, sev_enabled, bool, 0444);
54e8126bdaSSean Christopherson
55e8126bdaSSean Christopherson /* enable/disable SEV-ES support */
566c2c7bf5SSean Christopherson static bool sev_es_enabled = true;
578d364a07SSean Christopherson module_param_named(sev_es, sev_es_enabled, bool, 0444);
58d1f85fbeSAlexey Kardashevskiy
59d1f85fbeSAlexey Kardashevskiy /* enable/disable SEV-ES DebugSwap support */
6045770363SPaolo Bonzini static bool sev_es_debug_swap_enabled = false;
61d1f85fbeSAlexey Kardashevskiy module_param_named(debug_swap, sev_es_debug_swap_enabled, bool, 0444);
62a479c334SSean Christopherson #else
63a479c334SSean Christopherson #define sev_enabled false
64a479c334SSean Christopherson #define sev_es_enabled false
65d1f85fbeSAlexey Kardashevskiy #define sev_es_debug_swap_enabled false
66a479c334SSean Christopherson #endif /* CONFIG_KVM_AMD_SEV */
67e8126bdaSSean Christopherson
681edc1459STom Lendacky static u8 sev_enc_bit;
69eaf78265SJoerg Roedel static DECLARE_RWSEM(sev_deactivate_lock);
70eaf78265SJoerg Roedel static DEFINE_MUTEX(sev_bitmap_lock);
71eaf78265SJoerg Roedel unsigned int max_sev_asid;
72eaf78265SJoerg Roedel static unsigned int min_sev_asid;
73d3d1af85SBrijesh Singh static unsigned long sev_me_mask;
74bb2baeb2SMingwei Zhang static unsigned int nr_asids;
75eaf78265SJoerg Roedel static unsigned long *sev_asid_bitmap;
76eaf78265SJoerg Roedel static unsigned long *sev_reclaim_asid_bitmap;
77eaf78265SJoerg Roedel
78eaf78265SJoerg Roedel struct enc_region {
79eaf78265SJoerg Roedel struct list_head list;
80eaf78265SJoerg Roedel unsigned long npages;
81eaf78265SJoerg Roedel struct page **pages;
82eaf78265SJoerg Roedel unsigned long uaddr;
83eaf78265SJoerg Roedel unsigned long size;
84eaf78265SJoerg Roedel };
85eaf78265SJoerg Roedel
86469bb32bSSean Christopherson /* Called with the sev_bitmap_lock held, or on shutdown */
sev_flush_asids(unsigned int min_asid,unsigned int max_asid)8779b79ea2SSean Christopherson static int sev_flush_asids(unsigned int min_asid, unsigned int max_asid)
88eaf78265SJoerg Roedel {
8979b79ea2SSean Christopherson int ret, error = 0;
9079b79ea2SSean Christopherson unsigned int asid;
91469bb32bSSean Christopherson
92469bb32bSSean Christopherson /* Check if there are any ASIDs to reclaim before performing a flush */
93bb2baeb2SMingwei Zhang asid = find_next_bit(sev_reclaim_asid_bitmap, nr_asids, min_asid);
94bb2baeb2SMingwei Zhang if (asid > max_asid)
95469bb32bSSean Christopherson return -EBUSY;
96eaf78265SJoerg Roedel
97eaf78265SJoerg Roedel /*
98eaf78265SJoerg Roedel * DEACTIVATE will clear the WBINVD indicator causing DF_FLUSH to fail,
99eaf78265SJoerg Roedel * so it must be guarded.
100eaf78265SJoerg Roedel */
101eaf78265SJoerg Roedel down_write(&sev_deactivate_lock);
102eaf78265SJoerg Roedel
103eaf78265SJoerg Roedel wbinvd_on_all_cpus();
104eaf78265SJoerg Roedel ret = sev_guest_df_flush(&error);
105eaf78265SJoerg Roedel
106eaf78265SJoerg Roedel up_write(&sev_deactivate_lock);
107eaf78265SJoerg Roedel
108eaf78265SJoerg Roedel if (ret)
109eaf78265SJoerg Roedel pr_err("SEV: DF_FLUSH failed, ret=%d, error=%#x\n", ret, error);
110eaf78265SJoerg Roedel
111eaf78265SJoerg Roedel return ret;
112eaf78265SJoerg Roedel }
113eaf78265SJoerg Roedel
is_mirroring_enc_context(struct kvm * kvm)11454526d1fSNathan Tempelman static inline bool is_mirroring_enc_context(struct kvm *kvm)
11554526d1fSNathan Tempelman {
11654526d1fSNathan Tempelman return !!to_kvm_svm(kvm)->sev_info.enc_context_owner;
11754526d1fSNathan Tempelman }
11854526d1fSNathan Tempelman
119eaf78265SJoerg Roedel /* Must be called with the sev_bitmap_lock held */
__sev_recycle_asids(unsigned int min_asid,unsigned int max_asid)12079b79ea2SSean Christopherson static bool __sev_recycle_asids(unsigned int min_asid, unsigned int max_asid)
121eaf78265SJoerg Roedel {
122469bb32bSSean Christopherson if (sev_flush_asids(min_asid, max_asid))
123eaf78265SJoerg Roedel return false;
124eaf78265SJoerg Roedel
12580675b3aSTom Lendacky /* The flush process will flush all reclaimable SEV and SEV-ES ASIDs */
126eaf78265SJoerg Roedel bitmap_xor(sev_asid_bitmap, sev_asid_bitmap, sev_reclaim_asid_bitmap,
127bb2baeb2SMingwei Zhang nr_asids);
128bb2baeb2SMingwei Zhang bitmap_zero(sev_reclaim_asid_bitmap, nr_asids);
129eaf78265SJoerg Roedel
130eaf78265SJoerg Roedel return true;
131eaf78265SJoerg Roedel }
132eaf78265SJoerg Roedel
sev_misc_cg_try_charge(struct kvm_sev_info * sev)13391b692a0SPaolo Bonzini static int sev_misc_cg_try_charge(struct kvm_sev_info *sev)
13491b692a0SPaolo Bonzini {
13591b692a0SPaolo Bonzini enum misc_res_type type = sev->es_active ? MISC_CG_RES_SEV_ES : MISC_CG_RES_SEV;
13691b692a0SPaolo Bonzini return misc_cg_try_charge(type, sev->misc_cg, 1);
13791b692a0SPaolo Bonzini }
13891b692a0SPaolo Bonzini
sev_misc_cg_uncharge(struct kvm_sev_info * sev)13991b692a0SPaolo Bonzini static void sev_misc_cg_uncharge(struct kvm_sev_info *sev)
14091b692a0SPaolo Bonzini {
14191b692a0SPaolo Bonzini enum misc_res_type type = sev->es_active ? MISC_CG_RES_SEV_ES : MISC_CG_RES_SEV;
14291b692a0SPaolo Bonzini misc_cg_uncharge(type, sev->misc_cg, 1);
14391b692a0SPaolo Bonzini }
14491b692a0SPaolo Bonzini
sev_asid_new(struct kvm_sev_info * sev)14580675b3aSTom Lendacky static int sev_asid_new(struct kvm_sev_info *sev)
146eaf78265SJoerg Roedel {
147ab7a6fe9SAshish Kalra /*
148ab7a6fe9SAshish Kalra * SEV-enabled guests must use asid from min_sev_asid to max_sev_asid.
149ab7a6fe9SAshish Kalra * SEV-ES-enabled guest can use from 1 to min_sev_asid - 1.
150ab7a6fe9SAshish Kalra * Note: min ASID can end up larger than the max if basic SEV support is
151ab7a6fe9SAshish Kalra * effectively disabled by disallowing use of ASIDs for SEV guests.
152ab7a6fe9SAshish Kalra */
153ab7a6fe9SAshish Kalra unsigned int min_asid = sev->es_active ? 1 : min_sev_asid;
154ab7a6fe9SAshish Kalra unsigned int max_asid = sev->es_active ? min_sev_asid - 1 : max_sev_asid;
155ab7a6fe9SAshish Kalra unsigned int asid;
156eaf78265SJoerg Roedel bool retry = true;
15779b79ea2SSean Christopherson int ret;
1587aef27f0SVipin Sharma
159ab7a6fe9SAshish Kalra if (min_asid > max_asid)
160ab7a6fe9SAshish Kalra return -ENOTTY;
161ab7a6fe9SAshish Kalra
1627aef27f0SVipin Sharma WARN_ON(sev->misc_cg);
1637aef27f0SVipin Sharma sev->misc_cg = get_current_misc_cg();
16491b692a0SPaolo Bonzini ret = sev_misc_cg_try_charge(sev);
1657aef27f0SVipin Sharma if (ret) {
1667aef27f0SVipin Sharma put_misc_cg(sev->misc_cg);
1677aef27f0SVipin Sharma sev->misc_cg = NULL;
1687aef27f0SVipin Sharma return ret;
1697aef27f0SVipin Sharma }
170eaf78265SJoerg Roedel
171eaf78265SJoerg Roedel mutex_lock(&sev_bitmap_lock);
172eaf78265SJoerg Roedel
173eaf78265SJoerg Roedel again:
174bb2baeb2SMingwei Zhang asid = find_next_zero_bit(sev_asid_bitmap, max_asid + 1, min_asid);
175bb2baeb2SMingwei Zhang if (asid > max_asid) {
17680675b3aSTom Lendacky if (retry && __sev_recycle_asids(min_asid, max_asid)) {
177eaf78265SJoerg Roedel retry = false;
178eaf78265SJoerg Roedel goto again;
179eaf78265SJoerg Roedel }
180eaf78265SJoerg Roedel mutex_unlock(&sev_bitmap_lock);
1817aef27f0SVipin Sharma ret = -EBUSY;
1827aef27f0SVipin Sharma goto e_uncharge;
183eaf78265SJoerg Roedel }
184eaf78265SJoerg Roedel
185bb2baeb2SMingwei Zhang __set_bit(asid, sev_asid_bitmap);
186eaf78265SJoerg Roedel
187eaf78265SJoerg Roedel mutex_unlock(&sev_bitmap_lock);
188eaf78265SJoerg Roedel
189bb2baeb2SMingwei Zhang return asid;
1907aef27f0SVipin Sharma e_uncharge:
19191b692a0SPaolo Bonzini sev_misc_cg_uncharge(sev);
1927aef27f0SVipin Sharma put_misc_cg(sev->misc_cg);
1937aef27f0SVipin Sharma sev->misc_cg = NULL;
1947aef27f0SVipin Sharma return ret;
195eaf78265SJoerg Roedel }
196eaf78265SJoerg Roedel
sev_get_asid(struct kvm * kvm)19779b79ea2SSean Christopherson static unsigned int sev_get_asid(struct kvm *kvm)
198eaf78265SJoerg Roedel {
199eaf78265SJoerg Roedel struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
200eaf78265SJoerg Roedel
201eaf78265SJoerg Roedel return sev->asid;
202eaf78265SJoerg Roedel }
203eaf78265SJoerg Roedel
sev_asid_free(struct kvm_sev_info * sev)2047aef27f0SVipin Sharma static void sev_asid_free(struct kvm_sev_info *sev)
205eaf78265SJoerg Roedel {
206eaf78265SJoerg Roedel struct svm_cpu_data *sd;
207bb2baeb2SMingwei Zhang int cpu;
208eaf78265SJoerg Roedel
209eaf78265SJoerg Roedel mutex_lock(&sev_bitmap_lock);
210eaf78265SJoerg Roedel
211bb2baeb2SMingwei Zhang __set_bit(sev->asid, sev_reclaim_asid_bitmap);
212eaf78265SJoerg Roedel
213eaf78265SJoerg Roedel for_each_possible_cpu(cpu) {
21473412dfeSPaolo Bonzini sd = per_cpu_ptr(&svm_data, cpu);
215179c6c27SSean Christopherson sd->sev_vmcbs[sev->asid] = NULL;
216eaf78265SJoerg Roedel }
217eaf78265SJoerg Roedel
218eaf78265SJoerg Roedel mutex_unlock(&sev_bitmap_lock);
2197aef27f0SVipin Sharma
22091b692a0SPaolo Bonzini sev_misc_cg_uncharge(sev);
2217aef27f0SVipin Sharma put_misc_cg(sev->misc_cg);
2227aef27f0SVipin Sharma sev->misc_cg = NULL;
223eaf78265SJoerg Roedel }
224eaf78265SJoerg Roedel
sev_decommission(unsigned int handle)225934002cdSAlper Gun static void sev_decommission(unsigned int handle)
226eaf78265SJoerg Roedel {
227238eca82SSean Christopherson struct sev_data_decommission decommission;
228934002cdSAlper Gun
229934002cdSAlper Gun if (!handle)
230934002cdSAlper Gun return;
231934002cdSAlper Gun
232934002cdSAlper Gun decommission.handle = handle;
233934002cdSAlper Gun sev_guest_decommission(&decommission, NULL);
234934002cdSAlper Gun }
235934002cdSAlper Gun
sev_unbind_asid(struct kvm * kvm,unsigned int handle)236934002cdSAlper Gun static void sev_unbind_asid(struct kvm *kvm, unsigned int handle)
237934002cdSAlper Gun {
238238eca82SSean Christopherson struct sev_data_deactivate deactivate;
239eaf78265SJoerg Roedel
240eaf78265SJoerg Roedel if (!handle)
241eaf78265SJoerg Roedel return;
242eaf78265SJoerg Roedel
243238eca82SSean Christopherson deactivate.handle = handle;
244eaf78265SJoerg Roedel
245eaf78265SJoerg Roedel /* Guard DEACTIVATE against WBINVD/DF_FLUSH used in ASID recycling */
246eaf78265SJoerg Roedel down_read(&sev_deactivate_lock);
247238eca82SSean Christopherson sev_guest_deactivate(&deactivate, NULL);
248eaf78265SJoerg Roedel up_read(&sev_deactivate_lock);
249eaf78265SJoerg Roedel
250934002cdSAlper Gun sev_decommission(handle);
251eaf78265SJoerg Roedel }
252eaf78265SJoerg Roedel
sev_guest_init(struct kvm * kvm,struct kvm_sev_cmd * argp)253eaf78265SJoerg Roedel static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
254eaf78265SJoerg Roedel {
255eaf78265SJoerg Roedel struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
256eaf78265SJoerg Roedel int asid, ret;
257eaf78265SJoerg Roedel
2588727906fSSean Christopherson if (kvm->created_vcpus)
2598727906fSSean Christopherson return -EINVAL;
2608727906fSSean Christopherson
261eaf78265SJoerg Roedel ret = -EBUSY;
262eaf78265SJoerg Roedel if (unlikely(sev->active))
263eaf78265SJoerg Roedel return ret;
264eaf78265SJoerg Roedel
265a41fb26eSSean Christopherson sev->active = true;
266a41fb26eSSean Christopherson sev->es_active = argp->id == KVM_SEV_ES_INIT;
26780675b3aSTom Lendacky asid = sev_asid_new(sev);
268eaf78265SJoerg Roedel if (asid < 0)
269fd49e8eeSPaolo Bonzini goto e_no_asid;
2707aef27f0SVipin Sharma sev->asid = asid;
271eaf78265SJoerg Roedel
272eaf78265SJoerg Roedel ret = sev_platform_init(&argp->error);
273eaf78265SJoerg Roedel if (ret)
274eaf78265SJoerg Roedel goto e_free;
275eaf78265SJoerg Roedel
276eaf78265SJoerg Roedel INIT_LIST_HEAD(&sev->regions_list);
277b2125513SPeter Gonda INIT_LIST_HEAD(&sev->mirror_vms);
278eaf78265SJoerg Roedel
279c538dc79SSuravee Suthikulpanit kvm_set_apicv_inhibit(kvm, APICV_INHIBIT_REASON_SEV);
280c538dc79SSuravee Suthikulpanit
281eaf78265SJoerg Roedel return 0;
282eaf78265SJoerg Roedel
283eaf78265SJoerg Roedel e_free:
2847aef27f0SVipin Sharma sev_asid_free(sev);
2857aef27f0SVipin Sharma sev->asid = 0;
286fd49e8eeSPaolo Bonzini e_no_asid:
287fd49e8eeSPaolo Bonzini sev->es_active = false;
288a41fb26eSSean Christopherson sev->active = false;
289eaf78265SJoerg Roedel return ret;
290eaf78265SJoerg Roedel }
291eaf78265SJoerg Roedel
sev_bind_asid(struct kvm * kvm,unsigned int handle,int * error)292eaf78265SJoerg Roedel static int sev_bind_asid(struct kvm *kvm, unsigned int handle, int *error)
293eaf78265SJoerg Roedel {
29479b79ea2SSean Christopherson unsigned int asid = sev_get_asid(kvm);
295238eca82SSean Christopherson struct sev_data_activate activate;
296eaf78265SJoerg Roedel int ret;
297eaf78265SJoerg Roedel
298eaf78265SJoerg Roedel /* activate ASID on the given handle */
299238eca82SSean Christopherson activate.handle = handle;
300238eca82SSean Christopherson activate.asid = asid;
301238eca82SSean Christopherson ret = sev_guest_activate(&activate, error);
302eaf78265SJoerg Roedel
303eaf78265SJoerg Roedel return ret;
304eaf78265SJoerg Roedel }
305eaf78265SJoerg Roedel
__sev_issue_cmd(int fd,int id,void * data,int * error)306eaf78265SJoerg Roedel static int __sev_issue_cmd(int fd, int id, void *data, int *error)
307eaf78265SJoerg Roedel {
308eaf78265SJoerg Roedel struct fd f;
309eaf78265SJoerg Roedel int ret;
310eaf78265SJoerg Roedel
311eaf78265SJoerg Roedel f = fdget(fd);
312eaf78265SJoerg Roedel if (!f.file)
313eaf78265SJoerg Roedel return -EBADF;
314eaf78265SJoerg Roedel
315eaf78265SJoerg Roedel ret = sev_issue_cmd_external_user(f.file, id, data, error);
316eaf78265SJoerg Roedel
317eaf78265SJoerg Roedel fdput(f);
318eaf78265SJoerg Roedel return ret;
319eaf78265SJoerg Roedel }
320eaf78265SJoerg Roedel
sev_issue_cmd(struct kvm * kvm,int id,void * data,int * error)321eaf78265SJoerg Roedel static int sev_issue_cmd(struct kvm *kvm, int id, void *data, int *error)
322eaf78265SJoerg Roedel {
323eaf78265SJoerg Roedel struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
324eaf78265SJoerg Roedel
325eaf78265SJoerg Roedel return __sev_issue_cmd(sev->fd, id, data, error);
326eaf78265SJoerg Roedel }
327eaf78265SJoerg Roedel
sev_launch_start(struct kvm * kvm,struct kvm_sev_cmd * argp)328eaf78265SJoerg Roedel static int sev_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
329eaf78265SJoerg Roedel {
330eaf78265SJoerg Roedel struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
331238eca82SSean Christopherson struct sev_data_launch_start start;
332eaf78265SJoerg Roedel struct kvm_sev_launch_start params;
333eaf78265SJoerg Roedel void *dh_blob, *session_blob;
334eaf78265SJoerg Roedel int *error = &argp->error;
335eaf78265SJoerg Roedel int ret;
336eaf78265SJoerg Roedel
337eaf78265SJoerg Roedel if (!sev_guest(kvm))
338eaf78265SJoerg Roedel return -ENOTTY;
339eaf78265SJoerg Roedel
340eaf78265SJoerg Roedel if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, sizeof(params)))
341eaf78265SJoerg Roedel return -EFAULT;
342eaf78265SJoerg Roedel
343238eca82SSean Christopherson memset(&start, 0, sizeof(start));
344eaf78265SJoerg Roedel
345eaf78265SJoerg Roedel dh_blob = NULL;
346eaf78265SJoerg Roedel if (params.dh_uaddr) {
347eaf78265SJoerg Roedel dh_blob = psp_copy_user_blob(params.dh_uaddr, params.dh_len);
348238eca82SSean Christopherson if (IS_ERR(dh_blob))
349238eca82SSean Christopherson return PTR_ERR(dh_blob);
350eaf78265SJoerg Roedel
351238eca82SSean Christopherson start.dh_cert_address = __sme_set(__pa(dh_blob));
352238eca82SSean Christopherson start.dh_cert_len = params.dh_len;
353eaf78265SJoerg Roedel }
354eaf78265SJoerg Roedel
355eaf78265SJoerg Roedel session_blob = NULL;
356eaf78265SJoerg Roedel if (params.session_uaddr) {
357eaf78265SJoerg Roedel session_blob = psp_copy_user_blob(params.session_uaddr, params.session_len);
358eaf78265SJoerg Roedel if (IS_ERR(session_blob)) {
359eaf78265SJoerg Roedel ret = PTR_ERR(session_blob);
360eaf78265SJoerg Roedel goto e_free_dh;
361eaf78265SJoerg Roedel }
362eaf78265SJoerg Roedel
363238eca82SSean Christopherson start.session_address = __sme_set(__pa(session_blob));
364238eca82SSean Christopherson start.session_len = params.session_len;
365eaf78265SJoerg Roedel }
366eaf78265SJoerg Roedel
367238eca82SSean Christopherson start.handle = params.handle;
368238eca82SSean Christopherson start.policy = params.policy;
369eaf78265SJoerg Roedel
370eaf78265SJoerg Roedel /* create memory encryption context */
371238eca82SSean Christopherson ret = __sev_issue_cmd(argp->sev_fd, SEV_CMD_LAUNCH_START, &start, error);
372eaf78265SJoerg Roedel if (ret)
373eaf78265SJoerg Roedel goto e_free_session;
374eaf78265SJoerg Roedel
375eaf78265SJoerg Roedel /* Bind ASID to this guest */
376238eca82SSean Christopherson ret = sev_bind_asid(kvm, start.handle, error);
377934002cdSAlper Gun if (ret) {
378934002cdSAlper Gun sev_decommission(start.handle);
379eaf78265SJoerg Roedel goto e_free_session;
380934002cdSAlper Gun }
381eaf78265SJoerg Roedel
382eaf78265SJoerg Roedel /* return handle to userspace */
383238eca82SSean Christopherson params.handle = start.handle;
384eaf78265SJoerg Roedel if (copy_to_user((void __user *)(uintptr_t)argp->data, ¶ms, sizeof(params))) {
385238eca82SSean Christopherson sev_unbind_asid(kvm, start.handle);
386eaf78265SJoerg Roedel ret = -EFAULT;
387eaf78265SJoerg Roedel goto e_free_session;
388eaf78265SJoerg Roedel }
389eaf78265SJoerg Roedel
390238eca82SSean Christopherson sev->handle = start.handle;
391eaf78265SJoerg Roedel sev->fd = argp->sev_fd;
392eaf78265SJoerg Roedel
393eaf78265SJoerg Roedel e_free_session:
394eaf78265SJoerg Roedel kfree(session_blob);
395eaf78265SJoerg Roedel e_free_dh:
396eaf78265SJoerg Roedel kfree(dh_blob);
397eaf78265SJoerg Roedel return ret;
398eaf78265SJoerg Roedel }
399eaf78265SJoerg Roedel
sev_pin_memory(struct kvm * kvm,unsigned long uaddr,unsigned long ulen,unsigned long * n,int write)400eaf78265SJoerg Roedel static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr,
401eaf78265SJoerg Roedel unsigned long ulen, unsigned long *n,
402eaf78265SJoerg Roedel int write)
403eaf78265SJoerg Roedel {
404eaf78265SJoerg Roedel struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
40578824fabSJohn Hubbard unsigned long npages, size;
40678824fabSJohn Hubbard int npinned;
407eaf78265SJoerg Roedel unsigned long locked, lock_limit;
408eaf78265SJoerg Roedel struct page **pages;
409eaf78265SJoerg Roedel unsigned long first, last;
410ff2bd9ffSDan Carpenter int ret;
411eaf78265SJoerg Roedel
41219a23da5SPeter Gonda lockdep_assert_held(&kvm->lock);
41319a23da5SPeter Gonda
414eaf78265SJoerg Roedel if (ulen == 0 || uaddr + ulen < uaddr)
415a8d908b5SPaolo Bonzini return ERR_PTR(-EINVAL);
416eaf78265SJoerg Roedel
417eaf78265SJoerg Roedel /* Calculate number of pages. */
418eaf78265SJoerg Roedel first = (uaddr & PAGE_MASK) >> PAGE_SHIFT;
419eaf78265SJoerg Roedel last = ((uaddr + ulen - 1) & PAGE_MASK) >> PAGE_SHIFT;
420eaf78265SJoerg Roedel npages = (last - first + 1);
421eaf78265SJoerg Roedel
422eaf78265SJoerg Roedel locked = sev->pages_locked + npages;
423eaf78265SJoerg Roedel lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
424eaf78265SJoerg Roedel if (locked > lock_limit && !capable(CAP_IPC_LOCK)) {
425eaf78265SJoerg Roedel pr_err("SEV: %lu locked pages exceed the lock limit of %lu.\n", locked, lock_limit);
426a8d908b5SPaolo Bonzini return ERR_PTR(-ENOMEM);
427eaf78265SJoerg Roedel }
428eaf78265SJoerg Roedel
42978824fabSJohn Hubbard if (WARN_ON_ONCE(npages > INT_MAX))
430a8d908b5SPaolo Bonzini return ERR_PTR(-EINVAL);
43178824fabSJohn Hubbard
432eaf78265SJoerg Roedel /* Avoid using vmalloc for smaller buffers. */
433eaf78265SJoerg Roedel size = npages * sizeof(struct page *);
434eaf78265SJoerg Roedel if (size > PAGE_SIZE)
43588dca4caSChristoph Hellwig pages = __vmalloc(size, GFP_KERNEL_ACCOUNT | __GFP_ZERO);
436eaf78265SJoerg Roedel else
437eaf78265SJoerg Roedel pages = kmalloc(size, GFP_KERNEL_ACCOUNT);
438eaf78265SJoerg Roedel
439eaf78265SJoerg Roedel if (!pages)
440a8d908b5SPaolo Bonzini return ERR_PTR(-ENOMEM);
441eaf78265SJoerg Roedel
442eaf78265SJoerg Roedel /* Pin the user virtual address. */
443dc42c8aeSJohn Hubbard npinned = pin_user_pages_fast(uaddr, npages, write ? FOLL_WRITE : 0, pages);
444eaf78265SJoerg Roedel if (npinned != npages) {
445eaf78265SJoerg Roedel pr_err("SEV: Failure locking %lu pages.\n", npages);
446ff2bd9ffSDan Carpenter ret = -ENOMEM;
447eaf78265SJoerg Roedel goto err;
448eaf78265SJoerg Roedel }
449eaf78265SJoerg Roedel
450eaf78265SJoerg Roedel *n = npages;
451eaf78265SJoerg Roedel sev->pages_locked = locked;
452eaf78265SJoerg Roedel
453eaf78265SJoerg Roedel return pages;
454eaf78265SJoerg Roedel
455eaf78265SJoerg Roedel err:
456ff2bd9ffSDan Carpenter if (npinned > 0)
457dc42c8aeSJohn Hubbard unpin_user_pages(pages, npinned);
458eaf78265SJoerg Roedel
459eaf78265SJoerg Roedel kvfree(pages);
460ff2bd9ffSDan Carpenter return ERR_PTR(ret);
461eaf78265SJoerg Roedel }
462eaf78265SJoerg Roedel
sev_unpin_memory(struct kvm * kvm,struct page ** pages,unsigned long npages)463eaf78265SJoerg Roedel static void sev_unpin_memory(struct kvm *kvm, struct page **pages,
464eaf78265SJoerg Roedel unsigned long npages)
465eaf78265SJoerg Roedel {
466eaf78265SJoerg Roedel struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
467eaf78265SJoerg Roedel
468dc42c8aeSJohn Hubbard unpin_user_pages(pages, npages);
469eaf78265SJoerg Roedel kvfree(pages);
470eaf78265SJoerg Roedel sev->pages_locked -= npages;
471eaf78265SJoerg Roedel }
472eaf78265SJoerg Roedel
sev_clflush_pages(struct page * pages[],unsigned long npages)473eaf78265SJoerg Roedel static void sev_clflush_pages(struct page *pages[], unsigned long npages)
474eaf78265SJoerg Roedel {
475eaf78265SJoerg Roedel uint8_t *page_virtual;
476eaf78265SJoerg Roedel unsigned long i;
477eaf78265SJoerg Roedel
478e1ebb2b4SKrish Sadhukhan if (this_cpu_has(X86_FEATURE_SME_COHERENT) || npages == 0 ||
479e1ebb2b4SKrish Sadhukhan pages == NULL)
480eaf78265SJoerg Roedel return;
481eaf78265SJoerg Roedel
482eaf78265SJoerg Roedel for (i = 0; i < npages; i++) {
483a8a12c00SZhao Liu page_virtual = kmap_local_page(pages[i]);
484eaf78265SJoerg Roedel clflush_cache_range(page_virtual, PAGE_SIZE);
485a8a12c00SZhao Liu kunmap_local(page_virtual);
48600c22013SPeter Gonda cond_resched();
487eaf78265SJoerg Roedel }
488eaf78265SJoerg Roedel }
489eaf78265SJoerg Roedel
get_num_contig_pages(unsigned long idx,struct page ** inpages,unsigned long npages)490eaf78265SJoerg Roedel static unsigned long get_num_contig_pages(unsigned long idx,
491eaf78265SJoerg Roedel struct page **inpages, unsigned long npages)
492eaf78265SJoerg Roedel {
493eaf78265SJoerg Roedel unsigned long paddr, next_paddr;
494eaf78265SJoerg Roedel unsigned long i = idx + 1, pages = 1;
495eaf78265SJoerg Roedel
496eaf78265SJoerg Roedel /* find the number of contiguous pages starting from idx */
497eaf78265SJoerg Roedel paddr = __sme_page_pa(inpages[idx]);
498eaf78265SJoerg Roedel while (i < npages) {
499eaf78265SJoerg Roedel next_paddr = __sme_page_pa(inpages[i++]);
500eaf78265SJoerg Roedel if ((paddr + PAGE_SIZE) == next_paddr) {
501eaf78265SJoerg Roedel pages++;
502eaf78265SJoerg Roedel paddr = next_paddr;
503eaf78265SJoerg Roedel continue;
504eaf78265SJoerg Roedel }
505eaf78265SJoerg Roedel break;
506eaf78265SJoerg Roedel }
507eaf78265SJoerg Roedel
508eaf78265SJoerg Roedel return pages;
509eaf78265SJoerg Roedel }
510eaf78265SJoerg Roedel
sev_launch_update_data(struct kvm * kvm,struct kvm_sev_cmd * argp)511eaf78265SJoerg Roedel static int sev_launch_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
512eaf78265SJoerg Roedel {
513eaf78265SJoerg Roedel unsigned long vaddr, vaddr_end, next_vaddr, npages, pages, size, i;
514eaf78265SJoerg Roedel struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
515eaf78265SJoerg Roedel struct kvm_sev_launch_update_data params;
516238eca82SSean Christopherson struct sev_data_launch_update_data data;
517eaf78265SJoerg Roedel struct page **inpages;
518eaf78265SJoerg Roedel int ret;
519eaf78265SJoerg Roedel
520eaf78265SJoerg Roedel if (!sev_guest(kvm))
521eaf78265SJoerg Roedel return -ENOTTY;
522eaf78265SJoerg Roedel
523eaf78265SJoerg Roedel if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, sizeof(params)))
524eaf78265SJoerg Roedel return -EFAULT;
525eaf78265SJoerg Roedel
526eaf78265SJoerg Roedel vaddr = params.uaddr;
527eaf78265SJoerg Roedel size = params.len;
528eaf78265SJoerg Roedel vaddr_end = vaddr + size;
529eaf78265SJoerg Roedel
530eaf78265SJoerg Roedel /* Lock the user memory. */
531eaf78265SJoerg Roedel inpages = sev_pin_memory(kvm, vaddr, size, &npages, 1);
532238eca82SSean Christopherson if (IS_ERR(inpages))
533238eca82SSean Christopherson return PTR_ERR(inpages);
534eaf78265SJoerg Roedel
535eaf78265SJoerg Roedel /*
53614e3dd8dSPaolo Bonzini * Flush (on non-coherent CPUs) before LAUNCH_UPDATE encrypts pages in
53714e3dd8dSPaolo Bonzini * place; the cache may contain the data that was written unencrypted.
538eaf78265SJoerg Roedel */
539eaf78265SJoerg Roedel sev_clflush_pages(inpages, npages);
540eaf78265SJoerg Roedel
541238eca82SSean Christopherson data.reserved = 0;
542238eca82SSean Christopherson data.handle = sev->handle;
543238eca82SSean Christopherson
544eaf78265SJoerg Roedel for (i = 0; vaddr < vaddr_end; vaddr = next_vaddr, i += pages) {
545eaf78265SJoerg Roedel int offset, len;
546eaf78265SJoerg Roedel
547eaf78265SJoerg Roedel /*
548eaf78265SJoerg Roedel * If the user buffer is not page-aligned, calculate the offset
549eaf78265SJoerg Roedel * within the page.
550eaf78265SJoerg Roedel */
551eaf78265SJoerg Roedel offset = vaddr & (PAGE_SIZE - 1);
552eaf78265SJoerg Roedel
553eaf78265SJoerg Roedel /* Calculate the number of pages that can be encrypted in one go. */
554eaf78265SJoerg Roedel pages = get_num_contig_pages(i, inpages, npages);
555eaf78265SJoerg Roedel
556eaf78265SJoerg Roedel len = min_t(size_t, ((pages * PAGE_SIZE) - offset), size);
557eaf78265SJoerg Roedel
558238eca82SSean Christopherson data.len = len;
559238eca82SSean Christopherson data.address = __sme_page_pa(inpages[i]) + offset;
560238eca82SSean Christopherson ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_DATA, &data, &argp->error);
561eaf78265SJoerg Roedel if (ret)
562eaf78265SJoerg Roedel goto e_unpin;
563eaf78265SJoerg Roedel
564eaf78265SJoerg Roedel size -= len;
565eaf78265SJoerg Roedel next_vaddr = vaddr + len;
566eaf78265SJoerg Roedel }
567eaf78265SJoerg Roedel
568eaf78265SJoerg Roedel e_unpin:
569eaf78265SJoerg Roedel /* content of memory is updated, mark pages dirty */
570eaf78265SJoerg Roedel for (i = 0; i < npages; i++) {
571eaf78265SJoerg Roedel set_page_dirty_lock(inpages[i]);
572eaf78265SJoerg Roedel mark_page_accessed(inpages[i]);
573eaf78265SJoerg Roedel }
574eaf78265SJoerg Roedel /* unlock the user pages */
575eaf78265SJoerg Roedel sev_unpin_memory(kvm, inpages, npages);
576eaf78265SJoerg Roedel return ret;
577eaf78265SJoerg Roedel }
578eaf78265SJoerg Roedel
sev_es_sync_vmsa(struct vcpu_svm * svm)579ad73109aSTom Lendacky static int sev_es_sync_vmsa(struct vcpu_svm *svm)
580ad73109aSTom Lendacky {
5813dd2775bSTom Lendacky struct sev_es_save_area *save = svm->sev_es.vmsa;
582ad73109aSTom Lendacky
583ad73109aSTom Lendacky /* Check some debug related fields before encrypting the VMSA */
5843dd2775bSTom Lendacky if (svm->vcpu.guest_debug || (svm->vmcb->save.dr7 & ~DR7_FIXED_1))
585ad73109aSTom Lendacky return -EINVAL;
586ad73109aSTom Lendacky
5873dd2775bSTom Lendacky /*
5883dd2775bSTom Lendacky * SEV-ES will use a VMSA that is pointed to by the VMCB, not
5893dd2775bSTom Lendacky * the traditional VMSA that is part of the VMCB. Copy the
5903dd2775bSTom Lendacky * traditional VMSA as it has been built so far (in prep
5913dd2775bSTom Lendacky * for LAUNCH_UPDATE_VMSA) to be the initial SEV-ES state.
5923dd2775bSTom Lendacky */
5933dd2775bSTom Lendacky memcpy(save, &svm->vmcb->save, sizeof(svm->vmcb->save));
5943dd2775bSTom Lendacky
595ad73109aSTom Lendacky /* Sync registgers */
596ad73109aSTom Lendacky save->rax = svm->vcpu.arch.regs[VCPU_REGS_RAX];
597ad73109aSTom Lendacky save->rbx = svm->vcpu.arch.regs[VCPU_REGS_RBX];
598ad73109aSTom Lendacky save->rcx = svm->vcpu.arch.regs[VCPU_REGS_RCX];
599ad73109aSTom Lendacky save->rdx = svm->vcpu.arch.regs[VCPU_REGS_RDX];
600ad73109aSTom Lendacky save->rsp = svm->vcpu.arch.regs[VCPU_REGS_RSP];
601ad73109aSTom Lendacky save->rbp = svm->vcpu.arch.regs[VCPU_REGS_RBP];
602ad73109aSTom Lendacky save->rsi = svm->vcpu.arch.regs[VCPU_REGS_RSI];
603ad73109aSTom Lendacky save->rdi = svm->vcpu.arch.regs[VCPU_REGS_RDI];
604d45f89f7SPaolo Bonzini #ifdef CONFIG_X86_64
605ad73109aSTom Lendacky save->r8 = svm->vcpu.arch.regs[VCPU_REGS_R8];
606ad73109aSTom Lendacky save->r9 = svm->vcpu.arch.regs[VCPU_REGS_R9];
607ad73109aSTom Lendacky save->r10 = svm->vcpu.arch.regs[VCPU_REGS_R10];
608ad73109aSTom Lendacky save->r11 = svm->vcpu.arch.regs[VCPU_REGS_R11];
609ad73109aSTom Lendacky save->r12 = svm->vcpu.arch.regs[VCPU_REGS_R12];
610ad73109aSTom Lendacky save->r13 = svm->vcpu.arch.regs[VCPU_REGS_R13];
611ad73109aSTom Lendacky save->r14 = svm->vcpu.arch.regs[VCPU_REGS_R14];
612ad73109aSTom Lendacky save->r15 = svm->vcpu.arch.regs[VCPU_REGS_R15];
613d45f89f7SPaolo Bonzini #endif
614ad73109aSTom Lendacky save->rip = svm->vcpu.arch.regs[VCPU_REGS_RIP];
615ad73109aSTom Lendacky
616ad73109aSTom Lendacky /* Sync some non-GPR registers before encrypting */
617ad73109aSTom Lendacky save->xcr0 = svm->vcpu.arch.xcr0;
618ad73109aSTom Lendacky save->pkru = svm->vcpu.arch.pkru;
619ad73109aSTom Lendacky save->xss = svm->vcpu.arch.ia32_xss;
620d0f9f826SSean Christopherson save->dr6 = svm->vcpu.arch.dr6;
621ad73109aSTom Lendacky
62245770363SPaolo Bonzini if (sev_es_debug_swap_enabled) {
623d1f85fbeSAlexey Kardashevskiy save->sev_features |= SVM_SEV_FEAT_DEBUG_SWAP;
62445770363SPaolo Bonzini pr_warn_once("Enabling DebugSwap with KVM_SEV_ES_INIT. "
62545770363SPaolo Bonzini "This will not work starting with Linux 6.10\n");
62645770363SPaolo Bonzini }
627d1f85fbeSAlexey Kardashevskiy
6286fac42f1SJarkko Sakkinen pr_debug("Virtual Machine Save Area (VMSA):\n");
6290bd8bd2fSPeter Gonda print_hex_dump_debug("", DUMP_PREFIX_NONE, 16, 1, save, sizeof(*save), false);
6306fac42f1SJarkko Sakkinen
631ad73109aSTom Lendacky return 0;
632ad73109aSTom Lendacky }
633ad73109aSTom Lendacky
__sev_launch_update_vmsa(struct kvm * kvm,struct kvm_vcpu * vcpu,int * error)634bb18a677SPeter Gonda static int __sev_launch_update_vmsa(struct kvm *kvm, struct kvm_vcpu *vcpu,
635bb18a677SPeter Gonda int *error)
636ad73109aSTom Lendacky {
637238eca82SSean Christopherson struct sev_data_launch_update_vmsa vmsa;
638c36b16d2SSean Christopherson struct vcpu_svm *svm = to_svm(vcpu);
639bb18a677SPeter Gonda int ret;
640ad73109aSTom Lendacky
6412837dd00SAlexey Kardashevskiy if (vcpu->guest_debug) {
6422837dd00SAlexey Kardashevskiy pr_warn_once("KVM_SET_GUEST_DEBUG for SEV-ES guest is not supported");
6432837dd00SAlexey Kardashevskiy return -EINVAL;
6442837dd00SAlexey Kardashevskiy }
6452837dd00SAlexey Kardashevskiy
646ad73109aSTom Lendacky /* Perform some pre-encryption checks against the VMSA */
647ad73109aSTom Lendacky ret = sev_es_sync_vmsa(svm);
648ad73109aSTom Lendacky if (ret)
649238eca82SSean Christopherson return ret;
650ad73109aSTom Lendacky
651ad73109aSTom Lendacky /*
652bb18a677SPeter Gonda * The LAUNCH_UPDATE_VMSA command will perform in-place encryption of
653bb18a677SPeter Gonda * the VMSA memory content (i.e it will write the same memory region
654bb18a677SPeter Gonda * with the guest's key), so invalidate it first.
655ad73109aSTom Lendacky */
656b67a4cc3SPeter Gonda clflush_cache_range(svm->sev_es.vmsa, PAGE_SIZE);
657ad73109aSTom Lendacky
658bb18a677SPeter Gonda vmsa.reserved = 0;
659bb18a677SPeter Gonda vmsa.handle = to_kvm_svm(kvm)->sev_info.handle;
660b67a4cc3SPeter Gonda vmsa.address = __sme_pa(svm->sev_es.vmsa);
661238eca82SSean Christopherson vmsa.len = PAGE_SIZE;
662baa1e5caSPeter Gonda ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_VMSA, &vmsa, error);
663baa1e5caSPeter Gonda if (ret)
664baa1e5caSPeter Gonda return ret;
665baa1e5caSPeter Gonda
666baa1e5caSPeter Gonda vcpu->arch.guest_state_protected = true;
667*834aa2c3SRavi Bangoria
668*834aa2c3SRavi Bangoria /*
669*834aa2c3SRavi Bangoria * SEV-ES guest mandates LBR Virtualization to be _always_ ON. Enable it
670*834aa2c3SRavi Bangoria * only after setting guest_state_protected because KVM_SET_MSRS allows
671*834aa2c3SRavi Bangoria * dynamic toggling of LBRV (for performance reason) on write access to
672*834aa2c3SRavi Bangoria * MSR_IA32_DEBUGCTLMSR when guest_state_protected is not set.
673*834aa2c3SRavi Bangoria */
674*834aa2c3SRavi Bangoria svm_enable_lbrv(vcpu);
675baa1e5caSPeter Gonda return 0;
676bb18a677SPeter Gonda }
677bb18a677SPeter Gonda
sev_launch_update_vmsa(struct kvm * kvm,struct kvm_sev_cmd * argp)678bb18a677SPeter Gonda static int sev_launch_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp)
679bb18a677SPeter Gonda {
680bb18a677SPeter Gonda struct kvm_vcpu *vcpu;
68146808a4cSMarc Zyngier unsigned long i;
68246808a4cSMarc Zyngier int ret;
683bb18a677SPeter Gonda
684bb18a677SPeter Gonda if (!sev_es_guest(kvm))
685bb18a677SPeter Gonda return -ENOTTY;
686bb18a677SPeter Gonda
687bb18a677SPeter Gonda kvm_for_each_vcpu(i, vcpu, kvm) {
688bb18a677SPeter Gonda ret = mutex_lock_killable(&vcpu->mutex);
689ad73109aSTom Lendacky if (ret)
690238eca82SSean Christopherson return ret;
691ad73109aSTom Lendacky
692bb18a677SPeter Gonda ret = __sev_launch_update_vmsa(kvm, vcpu, &argp->error);
693bb18a677SPeter Gonda
694bb18a677SPeter Gonda mutex_unlock(&vcpu->mutex);
695bb18a677SPeter Gonda if (ret)
696bb18a677SPeter Gonda return ret;
697ad73109aSTom Lendacky }
698ad73109aSTom Lendacky
699238eca82SSean Christopherson return 0;
700ad73109aSTom Lendacky }
701ad73109aSTom Lendacky
sev_launch_measure(struct kvm * kvm,struct kvm_sev_cmd * argp)702eaf78265SJoerg Roedel static int sev_launch_measure(struct kvm *kvm, struct kvm_sev_cmd *argp)
703eaf78265SJoerg Roedel {
704eaf78265SJoerg Roedel void __user *measure = (void __user *)(uintptr_t)argp->data;
705eaf78265SJoerg Roedel struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
706238eca82SSean Christopherson struct sev_data_launch_measure data;
707eaf78265SJoerg Roedel struct kvm_sev_launch_measure params;
708eaf78265SJoerg Roedel void __user *p = NULL;
709eaf78265SJoerg Roedel void *blob = NULL;
710eaf78265SJoerg Roedel int ret;
711eaf78265SJoerg Roedel
712eaf78265SJoerg Roedel if (!sev_guest(kvm))
713eaf78265SJoerg Roedel return -ENOTTY;
714eaf78265SJoerg Roedel
715eaf78265SJoerg Roedel if (copy_from_user(¶ms, measure, sizeof(params)))
716eaf78265SJoerg Roedel return -EFAULT;
717eaf78265SJoerg Roedel
718238eca82SSean Christopherson memset(&data, 0, sizeof(data));
719eaf78265SJoerg Roedel
720eaf78265SJoerg Roedel /* User wants to query the blob length */
721eaf78265SJoerg Roedel if (!params.len)
722eaf78265SJoerg Roedel goto cmd;
723eaf78265SJoerg Roedel
724eaf78265SJoerg Roedel p = (void __user *)(uintptr_t)params.uaddr;
725eaf78265SJoerg Roedel if (p) {
726238eca82SSean Christopherson if (params.len > SEV_FW_BLOB_MAX_SIZE)
727238eca82SSean Christopherson return -EINVAL;
728eaf78265SJoerg Roedel
729d22d2474SAshish Kalra blob = kzalloc(params.len, GFP_KERNEL_ACCOUNT);
730eaf78265SJoerg Roedel if (!blob)
731238eca82SSean Christopherson return -ENOMEM;
732eaf78265SJoerg Roedel
733238eca82SSean Christopherson data.address = __psp_pa(blob);
734238eca82SSean Christopherson data.len = params.len;
735eaf78265SJoerg Roedel }
736eaf78265SJoerg Roedel
737eaf78265SJoerg Roedel cmd:
738238eca82SSean Christopherson data.handle = sev->handle;
739238eca82SSean Christopherson ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_MEASURE, &data, &argp->error);
740eaf78265SJoerg Roedel
741eaf78265SJoerg Roedel /*
742eaf78265SJoerg Roedel * If we query the session length, FW responded with expected data.
743eaf78265SJoerg Roedel */
744eaf78265SJoerg Roedel if (!params.len)
745eaf78265SJoerg Roedel goto done;
746eaf78265SJoerg Roedel
747eaf78265SJoerg Roedel if (ret)
748eaf78265SJoerg Roedel goto e_free_blob;
749eaf78265SJoerg Roedel
750eaf78265SJoerg Roedel if (blob) {
751eaf78265SJoerg Roedel if (copy_to_user(p, blob, params.len))
752eaf78265SJoerg Roedel ret = -EFAULT;
753eaf78265SJoerg Roedel }
754eaf78265SJoerg Roedel
755eaf78265SJoerg Roedel done:
756238eca82SSean Christopherson params.len = data.len;
757eaf78265SJoerg Roedel if (copy_to_user(measure, ¶ms, sizeof(params)))
758eaf78265SJoerg Roedel ret = -EFAULT;
759eaf78265SJoerg Roedel e_free_blob:
760eaf78265SJoerg Roedel kfree(blob);
761eaf78265SJoerg Roedel return ret;
762eaf78265SJoerg Roedel }
763eaf78265SJoerg Roedel
sev_launch_finish(struct kvm * kvm,struct kvm_sev_cmd * argp)764eaf78265SJoerg Roedel static int sev_launch_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
765eaf78265SJoerg Roedel {
766eaf78265SJoerg Roedel struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
767238eca82SSean Christopherson struct sev_data_launch_finish data;
768eaf78265SJoerg Roedel
769eaf78265SJoerg Roedel if (!sev_guest(kvm))
770eaf78265SJoerg Roedel return -ENOTTY;
771eaf78265SJoerg Roedel
772238eca82SSean Christopherson data.handle = sev->handle;
773238eca82SSean Christopherson return sev_issue_cmd(kvm, SEV_CMD_LAUNCH_FINISH, &data, &argp->error);
774eaf78265SJoerg Roedel }
775eaf78265SJoerg Roedel
sev_guest_status(struct kvm * kvm,struct kvm_sev_cmd * argp)776eaf78265SJoerg Roedel static int sev_guest_status(struct kvm *kvm, struct kvm_sev_cmd *argp)
777eaf78265SJoerg Roedel {
778eaf78265SJoerg Roedel struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
779eaf78265SJoerg Roedel struct kvm_sev_guest_status params;
780238eca82SSean Christopherson struct sev_data_guest_status data;
781eaf78265SJoerg Roedel int ret;
782eaf78265SJoerg Roedel
783eaf78265SJoerg Roedel if (!sev_guest(kvm))
784eaf78265SJoerg Roedel return -ENOTTY;
785eaf78265SJoerg Roedel
786238eca82SSean Christopherson memset(&data, 0, sizeof(data));
787eaf78265SJoerg Roedel
788238eca82SSean Christopherson data.handle = sev->handle;
789238eca82SSean Christopherson ret = sev_issue_cmd(kvm, SEV_CMD_GUEST_STATUS, &data, &argp->error);
790eaf78265SJoerg Roedel if (ret)
791238eca82SSean Christopherson return ret;
792eaf78265SJoerg Roedel
793238eca82SSean Christopherson params.policy = data.policy;
794238eca82SSean Christopherson params.state = data.state;
795238eca82SSean Christopherson params.handle = data.handle;
796eaf78265SJoerg Roedel
797eaf78265SJoerg Roedel if (copy_to_user((void __user *)(uintptr_t)argp->data, ¶ms, sizeof(params)))
798eaf78265SJoerg Roedel ret = -EFAULT;
799238eca82SSean Christopherson
800eaf78265SJoerg Roedel return ret;
801eaf78265SJoerg Roedel }
802eaf78265SJoerg Roedel
__sev_issue_dbg_cmd(struct kvm * kvm,unsigned long src,unsigned long dst,int size,int * error,bool enc)803eaf78265SJoerg Roedel static int __sev_issue_dbg_cmd(struct kvm *kvm, unsigned long src,
804eaf78265SJoerg Roedel unsigned long dst, int size,
805eaf78265SJoerg Roedel int *error, bool enc)
806eaf78265SJoerg Roedel {
807eaf78265SJoerg Roedel struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
808238eca82SSean Christopherson struct sev_data_dbg data;
809eaf78265SJoerg Roedel
810238eca82SSean Christopherson data.reserved = 0;
811238eca82SSean Christopherson data.handle = sev->handle;
812238eca82SSean Christopherson data.dst_addr = dst;
813238eca82SSean Christopherson data.src_addr = src;
814238eca82SSean Christopherson data.len = size;
815eaf78265SJoerg Roedel
816238eca82SSean Christopherson return sev_issue_cmd(kvm,
817eaf78265SJoerg Roedel enc ? SEV_CMD_DBG_ENCRYPT : SEV_CMD_DBG_DECRYPT,
818238eca82SSean Christopherson &data, error);
819eaf78265SJoerg Roedel }
820eaf78265SJoerg Roedel
__sev_dbg_decrypt(struct kvm * kvm,unsigned long src_paddr,unsigned long dst_paddr,int sz,int * err)821eaf78265SJoerg Roedel static int __sev_dbg_decrypt(struct kvm *kvm, unsigned long src_paddr,
822eaf78265SJoerg Roedel unsigned long dst_paddr, int sz, int *err)
823eaf78265SJoerg Roedel {
824eaf78265SJoerg Roedel int offset;
825eaf78265SJoerg Roedel
826eaf78265SJoerg Roedel /*
827eaf78265SJoerg Roedel * Its safe to read more than we are asked, caller should ensure that
828eaf78265SJoerg Roedel * destination has enough space.
829eaf78265SJoerg Roedel */
830eaf78265SJoerg Roedel offset = src_paddr & 15;
831854c57f0SAshish Kalra src_paddr = round_down(src_paddr, 16);
832eaf78265SJoerg Roedel sz = round_up(sz + offset, 16);
833eaf78265SJoerg Roedel
834eaf78265SJoerg Roedel return __sev_issue_dbg_cmd(kvm, src_paddr, dst_paddr, sz, err, false);
835eaf78265SJoerg Roedel }
836eaf78265SJoerg Roedel
__sev_dbg_decrypt_user(struct kvm * kvm,unsigned long paddr,void __user * dst_uaddr,unsigned long dst_paddr,int size,int * err)837eaf78265SJoerg Roedel static int __sev_dbg_decrypt_user(struct kvm *kvm, unsigned long paddr,
838368340a3SSean Christopherson void __user *dst_uaddr,
839eaf78265SJoerg Roedel unsigned long dst_paddr,
840eaf78265SJoerg Roedel int size, int *err)
841eaf78265SJoerg Roedel {
842eaf78265SJoerg Roedel struct page *tpage = NULL;
843eaf78265SJoerg Roedel int ret, offset;
844eaf78265SJoerg Roedel
845eaf78265SJoerg Roedel /* if inputs are not 16-byte then use intermediate buffer */
846eaf78265SJoerg Roedel if (!IS_ALIGNED(dst_paddr, 16) ||
847eaf78265SJoerg Roedel !IS_ALIGNED(paddr, 16) ||
848eaf78265SJoerg Roedel !IS_ALIGNED(size, 16)) {
849a31b531cSAnish Ghulati tpage = (void *)alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
850eaf78265SJoerg Roedel if (!tpage)
851eaf78265SJoerg Roedel return -ENOMEM;
852eaf78265SJoerg Roedel
853eaf78265SJoerg Roedel dst_paddr = __sme_page_pa(tpage);
854eaf78265SJoerg Roedel }
855eaf78265SJoerg Roedel
856eaf78265SJoerg Roedel ret = __sev_dbg_decrypt(kvm, paddr, dst_paddr, size, err);
857eaf78265SJoerg Roedel if (ret)
858eaf78265SJoerg Roedel goto e_free;
859eaf78265SJoerg Roedel
860eaf78265SJoerg Roedel if (tpage) {
861eaf78265SJoerg Roedel offset = paddr & 15;
862368340a3SSean Christopherson if (copy_to_user(dst_uaddr, page_address(tpage) + offset, size))
863eaf78265SJoerg Roedel ret = -EFAULT;
864eaf78265SJoerg Roedel }
865eaf78265SJoerg Roedel
866eaf78265SJoerg Roedel e_free:
867eaf78265SJoerg Roedel if (tpage)
868eaf78265SJoerg Roedel __free_page(tpage);
869eaf78265SJoerg Roedel
870eaf78265SJoerg Roedel return ret;
871eaf78265SJoerg Roedel }
872eaf78265SJoerg Roedel
__sev_dbg_encrypt_user(struct kvm * kvm,unsigned long paddr,void __user * vaddr,unsigned long dst_paddr,void __user * dst_vaddr,int size,int * error)873eaf78265SJoerg Roedel static int __sev_dbg_encrypt_user(struct kvm *kvm, unsigned long paddr,
874368340a3SSean Christopherson void __user *vaddr,
875eaf78265SJoerg Roedel unsigned long dst_paddr,
876368340a3SSean Christopherson void __user *dst_vaddr,
877eaf78265SJoerg Roedel int size, int *error)
878eaf78265SJoerg Roedel {
879eaf78265SJoerg Roedel struct page *src_tpage = NULL;
880eaf78265SJoerg Roedel struct page *dst_tpage = NULL;
881eaf78265SJoerg Roedel int ret, len = size;
882eaf78265SJoerg Roedel
883eaf78265SJoerg Roedel /* If source buffer is not aligned then use an intermediate buffer */
884368340a3SSean Christopherson if (!IS_ALIGNED((unsigned long)vaddr, 16)) {
885ebdec859SMingwei Zhang src_tpage = alloc_page(GFP_KERNEL_ACCOUNT);
886eaf78265SJoerg Roedel if (!src_tpage)
887eaf78265SJoerg Roedel return -ENOMEM;
888eaf78265SJoerg Roedel
889368340a3SSean Christopherson if (copy_from_user(page_address(src_tpage), vaddr, size)) {
890eaf78265SJoerg Roedel __free_page(src_tpage);
891eaf78265SJoerg Roedel return -EFAULT;
892eaf78265SJoerg Roedel }
893eaf78265SJoerg Roedel
894eaf78265SJoerg Roedel paddr = __sme_page_pa(src_tpage);
895eaf78265SJoerg Roedel }
896eaf78265SJoerg Roedel
897eaf78265SJoerg Roedel /*
898eaf78265SJoerg Roedel * If destination buffer or length is not aligned then do read-modify-write:
899eaf78265SJoerg Roedel * - decrypt destination in an intermediate buffer
900eaf78265SJoerg Roedel * - copy the source buffer in an intermediate buffer
901eaf78265SJoerg Roedel * - use the intermediate buffer as source buffer
902eaf78265SJoerg Roedel */
903368340a3SSean Christopherson if (!IS_ALIGNED((unsigned long)dst_vaddr, 16) || !IS_ALIGNED(size, 16)) {
904eaf78265SJoerg Roedel int dst_offset;
905eaf78265SJoerg Roedel
906ebdec859SMingwei Zhang dst_tpage = alloc_page(GFP_KERNEL_ACCOUNT);
907eaf78265SJoerg Roedel if (!dst_tpage) {
908eaf78265SJoerg Roedel ret = -ENOMEM;
909eaf78265SJoerg Roedel goto e_free;
910eaf78265SJoerg Roedel }
911eaf78265SJoerg Roedel
912eaf78265SJoerg Roedel ret = __sev_dbg_decrypt(kvm, dst_paddr,
913eaf78265SJoerg Roedel __sme_page_pa(dst_tpage), size, error);
914eaf78265SJoerg Roedel if (ret)
915eaf78265SJoerg Roedel goto e_free;
916eaf78265SJoerg Roedel
917eaf78265SJoerg Roedel /*
918eaf78265SJoerg Roedel * If source is kernel buffer then use memcpy() otherwise
919eaf78265SJoerg Roedel * copy_from_user().
920eaf78265SJoerg Roedel */
921eaf78265SJoerg Roedel dst_offset = dst_paddr & 15;
922eaf78265SJoerg Roedel
923eaf78265SJoerg Roedel if (src_tpage)
924eaf78265SJoerg Roedel memcpy(page_address(dst_tpage) + dst_offset,
925eaf78265SJoerg Roedel page_address(src_tpage), size);
926eaf78265SJoerg Roedel else {
927eaf78265SJoerg Roedel if (copy_from_user(page_address(dst_tpage) + dst_offset,
928368340a3SSean Christopherson vaddr, size)) {
929eaf78265SJoerg Roedel ret = -EFAULT;
930eaf78265SJoerg Roedel goto e_free;
931eaf78265SJoerg Roedel }
932eaf78265SJoerg Roedel }
933eaf78265SJoerg Roedel
934eaf78265SJoerg Roedel paddr = __sme_page_pa(dst_tpage);
935eaf78265SJoerg Roedel dst_paddr = round_down(dst_paddr, 16);
936eaf78265SJoerg Roedel len = round_up(size, 16);
937eaf78265SJoerg Roedel }
938eaf78265SJoerg Roedel
939eaf78265SJoerg Roedel ret = __sev_issue_dbg_cmd(kvm, paddr, dst_paddr, len, error, true);
940eaf78265SJoerg Roedel
941eaf78265SJoerg Roedel e_free:
942eaf78265SJoerg Roedel if (src_tpage)
943eaf78265SJoerg Roedel __free_page(src_tpage);
944eaf78265SJoerg Roedel if (dst_tpage)
945eaf78265SJoerg Roedel __free_page(dst_tpage);
946eaf78265SJoerg Roedel return ret;
947eaf78265SJoerg Roedel }
948eaf78265SJoerg Roedel
sev_dbg_crypt(struct kvm * kvm,struct kvm_sev_cmd * argp,bool dec)949eaf78265SJoerg Roedel static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
950eaf78265SJoerg Roedel {
951eaf78265SJoerg Roedel unsigned long vaddr, vaddr_end, next_vaddr;
952eaf78265SJoerg Roedel unsigned long dst_vaddr;
953eaf78265SJoerg Roedel struct page **src_p, **dst_p;
954eaf78265SJoerg Roedel struct kvm_sev_dbg debug;
955eaf78265SJoerg Roedel unsigned long n;
956eaf78265SJoerg Roedel unsigned int size;
957eaf78265SJoerg Roedel int ret;
958eaf78265SJoerg Roedel
959eaf78265SJoerg Roedel if (!sev_guest(kvm))
960eaf78265SJoerg Roedel return -ENOTTY;
961eaf78265SJoerg Roedel
962eaf78265SJoerg Roedel if (copy_from_user(&debug, (void __user *)(uintptr_t)argp->data, sizeof(debug)))
963eaf78265SJoerg Roedel return -EFAULT;
964eaf78265SJoerg Roedel
965eaf78265SJoerg Roedel if (!debug.len || debug.src_uaddr + debug.len < debug.src_uaddr)
966eaf78265SJoerg Roedel return -EINVAL;
967eaf78265SJoerg Roedel if (!debug.dst_uaddr)
968eaf78265SJoerg Roedel return -EINVAL;
969eaf78265SJoerg Roedel
970eaf78265SJoerg Roedel vaddr = debug.src_uaddr;
971eaf78265SJoerg Roedel size = debug.len;
972eaf78265SJoerg Roedel vaddr_end = vaddr + size;
973eaf78265SJoerg Roedel dst_vaddr = debug.dst_uaddr;
974eaf78265SJoerg Roedel
975eaf78265SJoerg Roedel for (; vaddr < vaddr_end; vaddr = next_vaddr) {
976eaf78265SJoerg Roedel int len, s_off, d_off;
977eaf78265SJoerg Roedel
978eaf78265SJoerg Roedel /* lock userspace source and destination page */
979eaf78265SJoerg Roedel src_p = sev_pin_memory(kvm, vaddr & PAGE_MASK, PAGE_SIZE, &n, 0);
980ff2bd9ffSDan Carpenter if (IS_ERR(src_p))
981ff2bd9ffSDan Carpenter return PTR_ERR(src_p);
982eaf78265SJoerg Roedel
983eaf78265SJoerg Roedel dst_p = sev_pin_memory(kvm, dst_vaddr & PAGE_MASK, PAGE_SIZE, &n, 1);
984ff2bd9ffSDan Carpenter if (IS_ERR(dst_p)) {
985eaf78265SJoerg Roedel sev_unpin_memory(kvm, src_p, n);
986ff2bd9ffSDan Carpenter return PTR_ERR(dst_p);
987eaf78265SJoerg Roedel }
988eaf78265SJoerg Roedel
989eaf78265SJoerg Roedel /*
99014e3dd8dSPaolo Bonzini * Flush (on non-coherent CPUs) before DBG_{DE,EN}CRYPT read or modify
99114e3dd8dSPaolo Bonzini * the pages; flush the destination too so that future accesses do not
99214e3dd8dSPaolo Bonzini * see stale data.
993eaf78265SJoerg Roedel */
994eaf78265SJoerg Roedel sev_clflush_pages(src_p, 1);
995eaf78265SJoerg Roedel sev_clflush_pages(dst_p, 1);
996eaf78265SJoerg Roedel
997eaf78265SJoerg Roedel /*
998eaf78265SJoerg Roedel * Since user buffer may not be page aligned, calculate the
999eaf78265SJoerg Roedel * offset within the page.
1000eaf78265SJoerg Roedel */
1001eaf78265SJoerg Roedel s_off = vaddr & ~PAGE_MASK;
1002eaf78265SJoerg Roedel d_off = dst_vaddr & ~PAGE_MASK;
1003eaf78265SJoerg Roedel len = min_t(size_t, (PAGE_SIZE - s_off), size);
1004eaf78265SJoerg Roedel
1005eaf78265SJoerg Roedel if (dec)
1006eaf78265SJoerg Roedel ret = __sev_dbg_decrypt_user(kvm,
1007eaf78265SJoerg Roedel __sme_page_pa(src_p[0]) + s_off,
1008368340a3SSean Christopherson (void __user *)dst_vaddr,
1009eaf78265SJoerg Roedel __sme_page_pa(dst_p[0]) + d_off,
1010eaf78265SJoerg Roedel len, &argp->error);
1011eaf78265SJoerg Roedel else
1012eaf78265SJoerg Roedel ret = __sev_dbg_encrypt_user(kvm,
1013eaf78265SJoerg Roedel __sme_page_pa(src_p[0]) + s_off,
1014368340a3SSean Christopherson (void __user *)vaddr,
1015eaf78265SJoerg Roedel __sme_page_pa(dst_p[0]) + d_off,
1016368340a3SSean Christopherson (void __user *)dst_vaddr,
1017eaf78265SJoerg Roedel len, &argp->error);
1018eaf78265SJoerg Roedel
1019eaf78265SJoerg Roedel sev_unpin_memory(kvm, src_p, n);
1020eaf78265SJoerg Roedel sev_unpin_memory(kvm, dst_p, n);
1021eaf78265SJoerg Roedel
1022eaf78265SJoerg Roedel if (ret)
1023eaf78265SJoerg Roedel goto err;
1024eaf78265SJoerg Roedel
1025eaf78265SJoerg Roedel next_vaddr = vaddr + len;
1026eaf78265SJoerg Roedel dst_vaddr = dst_vaddr + len;
1027eaf78265SJoerg Roedel size -= len;
1028eaf78265SJoerg Roedel }
1029eaf78265SJoerg Roedel err:
1030eaf78265SJoerg Roedel return ret;
1031eaf78265SJoerg Roedel }
1032eaf78265SJoerg Roedel
sev_launch_secret(struct kvm * kvm,struct kvm_sev_cmd * argp)1033eaf78265SJoerg Roedel static int sev_launch_secret(struct kvm *kvm, struct kvm_sev_cmd *argp)
1034eaf78265SJoerg Roedel {
1035eaf78265SJoerg Roedel struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1036238eca82SSean Christopherson struct sev_data_launch_secret data;
1037eaf78265SJoerg Roedel struct kvm_sev_launch_secret params;
1038eaf78265SJoerg Roedel struct page **pages;
1039eaf78265SJoerg Roedel void *blob, *hdr;
104050085beeSCfir Cohen unsigned long n, i;
1041eaf78265SJoerg Roedel int ret, offset;
1042eaf78265SJoerg Roedel
1043eaf78265SJoerg Roedel if (!sev_guest(kvm))
1044eaf78265SJoerg Roedel return -ENOTTY;
1045eaf78265SJoerg Roedel
1046eaf78265SJoerg Roedel if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, sizeof(params)))
1047eaf78265SJoerg Roedel return -EFAULT;
1048eaf78265SJoerg Roedel
1049eaf78265SJoerg Roedel pages = sev_pin_memory(kvm, params.guest_uaddr, params.guest_len, &n, 1);
1050a8d908b5SPaolo Bonzini if (IS_ERR(pages))
1051a8d908b5SPaolo Bonzini return PTR_ERR(pages);
1052eaf78265SJoerg Roedel
1053eaf78265SJoerg Roedel /*
105414e3dd8dSPaolo Bonzini * Flush (on non-coherent CPUs) before LAUNCH_SECRET encrypts pages in
105514e3dd8dSPaolo Bonzini * place; the cache may contain the data that was written unencrypted.
105650085beeSCfir Cohen */
105750085beeSCfir Cohen sev_clflush_pages(pages, n);
105850085beeSCfir Cohen
105950085beeSCfir Cohen /*
1060eaf78265SJoerg Roedel * The secret must be copied into contiguous memory region, lets verify
1061eaf78265SJoerg Roedel * that userspace memory pages are contiguous before we issue command.
1062eaf78265SJoerg Roedel */
1063eaf78265SJoerg Roedel if (get_num_contig_pages(0, pages, n) != n) {
1064eaf78265SJoerg Roedel ret = -EINVAL;
1065eaf78265SJoerg Roedel goto e_unpin_memory;
1066eaf78265SJoerg Roedel }
1067eaf78265SJoerg Roedel
1068238eca82SSean Christopherson memset(&data, 0, sizeof(data));
1069eaf78265SJoerg Roedel
1070eaf78265SJoerg Roedel offset = params.guest_uaddr & (PAGE_SIZE - 1);
1071238eca82SSean Christopherson data.guest_address = __sme_page_pa(pages[0]) + offset;
1072238eca82SSean Christopherson data.guest_len = params.guest_len;
1073eaf78265SJoerg Roedel
1074eaf78265SJoerg Roedel blob = psp_copy_user_blob(params.trans_uaddr, params.trans_len);
1075eaf78265SJoerg Roedel if (IS_ERR(blob)) {
1076eaf78265SJoerg Roedel ret = PTR_ERR(blob);
1077238eca82SSean Christopherson goto e_unpin_memory;
1078eaf78265SJoerg Roedel }
1079eaf78265SJoerg Roedel
1080238eca82SSean Christopherson data.trans_address = __psp_pa(blob);
1081238eca82SSean Christopherson data.trans_len = params.trans_len;
1082eaf78265SJoerg Roedel
1083eaf78265SJoerg Roedel hdr = psp_copy_user_blob(params.hdr_uaddr, params.hdr_len);
1084eaf78265SJoerg Roedel if (IS_ERR(hdr)) {
1085eaf78265SJoerg Roedel ret = PTR_ERR(hdr);
1086eaf78265SJoerg Roedel goto e_free_blob;
1087eaf78265SJoerg Roedel }
1088238eca82SSean Christopherson data.hdr_address = __psp_pa(hdr);
1089238eca82SSean Christopherson data.hdr_len = params.hdr_len;
1090eaf78265SJoerg Roedel
1091238eca82SSean Christopherson data.handle = sev->handle;
1092238eca82SSean Christopherson ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_SECRET, &data, &argp->error);
1093eaf78265SJoerg Roedel
1094eaf78265SJoerg Roedel kfree(hdr);
1095eaf78265SJoerg Roedel
1096eaf78265SJoerg Roedel e_free_blob:
1097eaf78265SJoerg Roedel kfree(blob);
1098eaf78265SJoerg Roedel e_unpin_memory:
109950085beeSCfir Cohen /* content of memory is updated, mark pages dirty */
110050085beeSCfir Cohen for (i = 0; i < n; i++) {
110150085beeSCfir Cohen set_page_dirty_lock(pages[i]);
110250085beeSCfir Cohen mark_page_accessed(pages[i]);
110350085beeSCfir Cohen }
1104eaf78265SJoerg Roedel sev_unpin_memory(kvm, pages, n);
1105eaf78265SJoerg Roedel return ret;
1106eaf78265SJoerg Roedel }
1107eaf78265SJoerg Roedel
sev_get_attestation_report(struct kvm * kvm,struct kvm_sev_cmd * argp)11082c07ded0SBrijesh Singh static int sev_get_attestation_report(struct kvm *kvm, struct kvm_sev_cmd *argp)
11092c07ded0SBrijesh Singh {
11102c07ded0SBrijesh Singh void __user *report = (void __user *)(uintptr_t)argp->data;
11112c07ded0SBrijesh Singh struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1112238eca82SSean Christopherson struct sev_data_attestation_report data;
11132c07ded0SBrijesh Singh struct kvm_sev_attestation_report params;
11142c07ded0SBrijesh Singh void __user *p;
11152c07ded0SBrijesh Singh void *blob = NULL;
11162c07ded0SBrijesh Singh int ret;
11172c07ded0SBrijesh Singh
11182c07ded0SBrijesh Singh if (!sev_guest(kvm))
11192c07ded0SBrijesh Singh return -ENOTTY;
11202c07ded0SBrijesh Singh
11212c07ded0SBrijesh Singh if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, sizeof(params)))
11222c07ded0SBrijesh Singh return -EFAULT;
11232c07ded0SBrijesh Singh
1124238eca82SSean Christopherson memset(&data, 0, sizeof(data));
11252c07ded0SBrijesh Singh
11262c07ded0SBrijesh Singh /* User wants to query the blob length */
11272c07ded0SBrijesh Singh if (!params.len)
11282c07ded0SBrijesh Singh goto cmd;
11292c07ded0SBrijesh Singh
11302c07ded0SBrijesh Singh p = (void __user *)(uintptr_t)params.uaddr;
11312c07ded0SBrijesh Singh if (p) {
1132238eca82SSean Christopherson if (params.len > SEV_FW_BLOB_MAX_SIZE)
1133238eca82SSean Christopherson return -EINVAL;
11342c07ded0SBrijesh Singh
1135d22d2474SAshish Kalra blob = kzalloc(params.len, GFP_KERNEL_ACCOUNT);
11362c07ded0SBrijesh Singh if (!blob)
1137238eca82SSean Christopherson return -ENOMEM;
11382c07ded0SBrijesh Singh
1139238eca82SSean Christopherson data.address = __psp_pa(blob);
1140238eca82SSean Christopherson data.len = params.len;
1141238eca82SSean Christopherson memcpy(data.mnonce, params.mnonce, sizeof(params.mnonce));
11422c07ded0SBrijesh Singh }
11432c07ded0SBrijesh Singh cmd:
1144238eca82SSean Christopherson data.handle = sev->handle;
1145238eca82SSean Christopherson ret = sev_issue_cmd(kvm, SEV_CMD_ATTESTATION_REPORT, &data, &argp->error);
11462c07ded0SBrijesh Singh /*
11472c07ded0SBrijesh Singh * If we query the session length, FW responded with expected data.
11482c07ded0SBrijesh Singh */
11492c07ded0SBrijesh Singh if (!params.len)
11502c07ded0SBrijesh Singh goto done;
11512c07ded0SBrijesh Singh
11522c07ded0SBrijesh Singh if (ret)
11532c07ded0SBrijesh Singh goto e_free_blob;
11542c07ded0SBrijesh Singh
11552c07ded0SBrijesh Singh if (blob) {
11562c07ded0SBrijesh Singh if (copy_to_user(p, blob, params.len))
11572c07ded0SBrijesh Singh ret = -EFAULT;
11582c07ded0SBrijesh Singh }
11592c07ded0SBrijesh Singh
11602c07ded0SBrijesh Singh done:
1161238eca82SSean Christopherson params.len = data.len;
11622c07ded0SBrijesh Singh if (copy_to_user(report, ¶ms, sizeof(params)))
11632c07ded0SBrijesh Singh ret = -EFAULT;
11642c07ded0SBrijesh Singh e_free_blob:
11652c07ded0SBrijesh Singh kfree(blob);
11662c07ded0SBrijesh Singh return ret;
11672c07ded0SBrijesh Singh }
11682c07ded0SBrijesh Singh
11694cfdd47dSBrijesh Singh /* Userspace wants to query session length. */
11704cfdd47dSBrijesh Singh static int
__sev_send_start_query_session_length(struct kvm * kvm,struct kvm_sev_cmd * argp,struct kvm_sev_send_start * params)11714cfdd47dSBrijesh Singh __sev_send_start_query_session_length(struct kvm *kvm, struct kvm_sev_cmd *argp,
11724cfdd47dSBrijesh Singh struct kvm_sev_send_start *params)
11734cfdd47dSBrijesh Singh {
11744cfdd47dSBrijesh Singh struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1175238eca82SSean Christopherson struct sev_data_send_start data;
11764cfdd47dSBrijesh Singh int ret;
11774cfdd47dSBrijesh Singh
11784f13d471SAshish Kalra memset(&data, 0, sizeof(data));
1179238eca82SSean Christopherson data.handle = sev->handle;
1180238eca82SSean Christopherson ret = sev_issue_cmd(kvm, SEV_CMD_SEND_START, &data, &argp->error);
11814cfdd47dSBrijesh Singh
1182238eca82SSean Christopherson params->session_len = data.session_len;
11834cfdd47dSBrijesh Singh if (copy_to_user((void __user *)(uintptr_t)argp->data, params,
11844cfdd47dSBrijesh Singh sizeof(struct kvm_sev_send_start)))
11854cfdd47dSBrijesh Singh ret = -EFAULT;
11864cfdd47dSBrijesh Singh
11874cfdd47dSBrijesh Singh return ret;
11884cfdd47dSBrijesh Singh }
11894cfdd47dSBrijesh Singh
sev_send_start(struct kvm * kvm,struct kvm_sev_cmd * argp)11904cfdd47dSBrijesh Singh static int sev_send_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
11914cfdd47dSBrijesh Singh {
11924cfdd47dSBrijesh Singh struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1193238eca82SSean Christopherson struct sev_data_send_start data;
11944cfdd47dSBrijesh Singh struct kvm_sev_send_start params;
11954cfdd47dSBrijesh Singh void *amd_certs, *session_data;
11964cfdd47dSBrijesh Singh void *pdh_cert, *plat_certs;
11974cfdd47dSBrijesh Singh int ret;
11984cfdd47dSBrijesh Singh
11994cfdd47dSBrijesh Singh if (!sev_guest(kvm))
12004cfdd47dSBrijesh Singh return -ENOTTY;
12014cfdd47dSBrijesh Singh
12024cfdd47dSBrijesh Singh if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data,
12034cfdd47dSBrijesh Singh sizeof(struct kvm_sev_send_start)))
12044cfdd47dSBrijesh Singh return -EFAULT;
12054cfdd47dSBrijesh Singh
12064cfdd47dSBrijesh Singh /* if session_len is zero, userspace wants to query the session length */
12074cfdd47dSBrijesh Singh if (!params.session_len)
12084cfdd47dSBrijesh Singh return __sev_send_start_query_session_length(kvm, argp,
12094cfdd47dSBrijesh Singh ¶ms);
12104cfdd47dSBrijesh Singh
12114cfdd47dSBrijesh Singh /* some sanity checks */
12124cfdd47dSBrijesh Singh if (!params.pdh_cert_uaddr || !params.pdh_cert_len ||
12134cfdd47dSBrijesh Singh !params.session_uaddr || params.session_len > SEV_FW_BLOB_MAX_SIZE)
12144cfdd47dSBrijesh Singh return -EINVAL;
12154cfdd47dSBrijesh Singh
12164cfdd47dSBrijesh Singh /* allocate the memory to hold the session data blob */
1217d22d2474SAshish Kalra session_data = kzalloc(params.session_len, GFP_KERNEL_ACCOUNT);
12184cfdd47dSBrijesh Singh if (!session_data)
12194cfdd47dSBrijesh Singh return -ENOMEM;
12204cfdd47dSBrijesh Singh
12214cfdd47dSBrijesh Singh /* copy the certificate blobs from userspace */
12224cfdd47dSBrijesh Singh pdh_cert = psp_copy_user_blob(params.pdh_cert_uaddr,
12234cfdd47dSBrijesh Singh params.pdh_cert_len);
12244cfdd47dSBrijesh Singh if (IS_ERR(pdh_cert)) {
12254cfdd47dSBrijesh Singh ret = PTR_ERR(pdh_cert);
12264cfdd47dSBrijesh Singh goto e_free_session;
12274cfdd47dSBrijesh Singh }
12284cfdd47dSBrijesh Singh
12294cfdd47dSBrijesh Singh plat_certs = psp_copy_user_blob(params.plat_certs_uaddr,
12304cfdd47dSBrijesh Singh params.plat_certs_len);
12314cfdd47dSBrijesh Singh if (IS_ERR(plat_certs)) {
12324cfdd47dSBrijesh Singh ret = PTR_ERR(plat_certs);
12334cfdd47dSBrijesh Singh goto e_free_pdh;
12344cfdd47dSBrijesh Singh }
12354cfdd47dSBrijesh Singh
12364cfdd47dSBrijesh Singh amd_certs = psp_copy_user_blob(params.amd_certs_uaddr,
12374cfdd47dSBrijesh Singh params.amd_certs_len);
12384cfdd47dSBrijesh Singh if (IS_ERR(amd_certs)) {
12394cfdd47dSBrijesh Singh ret = PTR_ERR(amd_certs);
12404cfdd47dSBrijesh Singh goto e_free_plat_cert;
12414cfdd47dSBrijesh Singh }
12424cfdd47dSBrijesh Singh
12434cfdd47dSBrijesh Singh /* populate the FW SEND_START field with system physical address */
1244238eca82SSean Christopherson memset(&data, 0, sizeof(data));
1245238eca82SSean Christopherson data.pdh_cert_address = __psp_pa(pdh_cert);
1246238eca82SSean Christopherson data.pdh_cert_len = params.pdh_cert_len;
1247238eca82SSean Christopherson data.plat_certs_address = __psp_pa(plat_certs);
1248238eca82SSean Christopherson data.plat_certs_len = params.plat_certs_len;
1249238eca82SSean Christopherson data.amd_certs_address = __psp_pa(amd_certs);
1250238eca82SSean Christopherson data.amd_certs_len = params.amd_certs_len;
1251238eca82SSean Christopherson data.session_address = __psp_pa(session_data);
1252238eca82SSean Christopherson data.session_len = params.session_len;
1253238eca82SSean Christopherson data.handle = sev->handle;
12544cfdd47dSBrijesh Singh
1255238eca82SSean Christopherson ret = sev_issue_cmd(kvm, SEV_CMD_SEND_START, &data, &argp->error);
12564cfdd47dSBrijesh Singh
12574cfdd47dSBrijesh Singh if (!ret && copy_to_user((void __user *)(uintptr_t)params.session_uaddr,
12584cfdd47dSBrijesh Singh session_data, params.session_len)) {
12594cfdd47dSBrijesh Singh ret = -EFAULT;
1260238eca82SSean Christopherson goto e_free_amd_cert;
12614cfdd47dSBrijesh Singh }
12624cfdd47dSBrijesh Singh
1263238eca82SSean Christopherson params.policy = data.policy;
1264238eca82SSean Christopherson params.session_len = data.session_len;
12654cfdd47dSBrijesh Singh if (copy_to_user((void __user *)(uintptr_t)argp->data, ¶ms,
12664cfdd47dSBrijesh Singh sizeof(struct kvm_sev_send_start)))
12674cfdd47dSBrijesh Singh ret = -EFAULT;
12684cfdd47dSBrijesh Singh
12694cfdd47dSBrijesh Singh e_free_amd_cert:
12704cfdd47dSBrijesh Singh kfree(amd_certs);
12714cfdd47dSBrijesh Singh e_free_plat_cert:
12724cfdd47dSBrijesh Singh kfree(plat_certs);
12734cfdd47dSBrijesh Singh e_free_pdh:
12744cfdd47dSBrijesh Singh kfree(pdh_cert);
12754cfdd47dSBrijesh Singh e_free_session:
12764cfdd47dSBrijesh Singh kfree(session_data);
12774cfdd47dSBrijesh Singh return ret;
12784cfdd47dSBrijesh Singh }
12794cfdd47dSBrijesh Singh
1280d3d1af85SBrijesh Singh /* Userspace wants to query either header or trans length. */
1281d3d1af85SBrijesh Singh static int
__sev_send_update_data_query_lengths(struct kvm * kvm,struct kvm_sev_cmd * argp,struct kvm_sev_send_update_data * params)1282d3d1af85SBrijesh Singh __sev_send_update_data_query_lengths(struct kvm *kvm, struct kvm_sev_cmd *argp,
1283d3d1af85SBrijesh Singh struct kvm_sev_send_update_data *params)
1284d3d1af85SBrijesh Singh {
1285d3d1af85SBrijesh Singh struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1286238eca82SSean Christopherson struct sev_data_send_update_data data;
1287d3d1af85SBrijesh Singh int ret;
1288d3d1af85SBrijesh Singh
12894f13d471SAshish Kalra memset(&data, 0, sizeof(data));
1290238eca82SSean Christopherson data.handle = sev->handle;
1291238eca82SSean Christopherson ret = sev_issue_cmd(kvm, SEV_CMD_SEND_UPDATE_DATA, &data, &argp->error);
1292d3d1af85SBrijesh Singh
1293238eca82SSean Christopherson params->hdr_len = data.hdr_len;
1294238eca82SSean Christopherson params->trans_len = data.trans_len;
1295d3d1af85SBrijesh Singh
1296d3d1af85SBrijesh Singh if (copy_to_user((void __user *)(uintptr_t)argp->data, params,
1297d3d1af85SBrijesh Singh sizeof(struct kvm_sev_send_update_data)))
1298d3d1af85SBrijesh Singh ret = -EFAULT;
1299d3d1af85SBrijesh Singh
1300d3d1af85SBrijesh Singh return ret;
1301d3d1af85SBrijesh Singh }
1302d3d1af85SBrijesh Singh
sev_send_update_data(struct kvm * kvm,struct kvm_sev_cmd * argp)1303d3d1af85SBrijesh Singh static int sev_send_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
1304d3d1af85SBrijesh Singh {
1305d3d1af85SBrijesh Singh struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1306238eca82SSean Christopherson struct sev_data_send_update_data data;
1307d3d1af85SBrijesh Singh struct kvm_sev_send_update_data params;
1308d3d1af85SBrijesh Singh void *hdr, *trans_data;
1309d3d1af85SBrijesh Singh struct page **guest_page;
1310d3d1af85SBrijesh Singh unsigned long n;
1311d3d1af85SBrijesh Singh int ret, offset;
1312d3d1af85SBrijesh Singh
1313d3d1af85SBrijesh Singh if (!sev_guest(kvm))
1314d3d1af85SBrijesh Singh return -ENOTTY;
1315d3d1af85SBrijesh Singh
1316d3d1af85SBrijesh Singh if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data,
1317d3d1af85SBrijesh Singh sizeof(struct kvm_sev_send_update_data)))
1318d3d1af85SBrijesh Singh return -EFAULT;
1319d3d1af85SBrijesh Singh
1320d3d1af85SBrijesh Singh /* userspace wants to query either header or trans length */
1321d3d1af85SBrijesh Singh if (!params.trans_len || !params.hdr_len)
1322d3d1af85SBrijesh Singh return __sev_send_update_data_query_lengths(kvm, argp, ¶ms);
1323d3d1af85SBrijesh Singh
1324d3d1af85SBrijesh Singh if (!params.trans_uaddr || !params.guest_uaddr ||
1325d3d1af85SBrijesh Singh !params.guest_len || !params.hdr_uaddr)
1326d3d1af85SBrijesh Singh return -EINVAL;
1327d3d1af85SBrijesh Singh
1328d3d1af85SBrijesh Singh /* Check if we are crossing the page boundary */
1329d3d1af85SBrijesh Singh offset = params.guest_uaddr & (PAGE_SIZE - 1);
1330f94f053aSPeter Gonda if (params.guest_len > PAGE_SIZE || (params.guest_len + offset) > PAGE_SIZE)
1331d3d1af85SBrijesh Singh return -EINVAL;
1332d3d1af85SBrijesh Singh
1333d3d1af85SBrijesh Singh /* Pin guest memory */
1334d3d1af85SBrijesh Singh guest_page = sev_pin_memory(kvm, params.guest_uaddr & PAGE_MASK,
1335d3d1af85SBrijesh Singh PAGE_SIZE, &n, 0);
1336c7a1b2b6SSean Christopherson if (IS_ERR(guest_page))
1337c7a1b2b6SSean Christopherson return PTR_ERR(guest_page);
1338d3d1af85SBrijesh Singh
1339d3d1af85SBrijesh Singh /* allocate memory for header and transport buffer */
1340d3d1af85SBrijesh Singh ret = -ENOMEM;
1341d22d2474SAshish Kalra hdr = kzalloc(params.hdr_len, GFP_KERNEL_ACCOUNT);
1342d3d1af85SBrijesh Singh if (!hdr)
1343d3d1af85SBrijesh Singh goto e_unpin;
1344d3d1af85SBrijesh Singh
1345d22d2474SAshish Kalra trans_data = kzalloc(params.trans_len, GFP_KERNEL_ACCOUNT);
1346d3d1af85SBrijesh Singh if (!trans_data)
1347d3d1af85SBrijesh Singh goto e_free_hdr;
1348d3d1af85SBrijesh Singh
1349238eca82SSean Christopherson memset(&data, 0, sizeof(data));
1350238eca82SSean Christopherson data.hdr_address = __psp_pa(hdr);
1351238eca82SSean Christopherson data.hdr_len = params.hdr_len;
1352238eca82SSean Christopherson data.trans_address = __psp_pa(trans_data);
1353238eca82SSean Christopherson data.trans_len = params.trans_len;
1354d3d1af85SBrijesh Singh
1355d3d1af85SBrijesh Singh /* The SEND_UPDATE_DATA command requires C-bit to be always set. */
1356238eca82SSean Christopherson data.guest_address = (page_to_pfn(guest_page[0]) << PAGE_SHIFT) + offset;
1357238eca82SSean Christopherson data.guest_address |= sev_me_mask;
1358238eca82SSean Christopherson data.guest_len = params.guest_len;
1359238eca82SSean Christopherson data.handle = sev->handle;
1360d3d1af85SBrijesh Singh
1361238eca82SSean Christopherson ret = sev_issue_cmd(kvm, SEV_CMD_SEND_UPDATE_DATA, &data, &argp->error);
1362d3d1af85SBrijesh Singh
1363d3d1af85SBrijesh Singh if (ret)
1364238eca82SSean Christopherson goto e_free_trans_data;
1365d3d1af85SBrijesh Singh
1366d3d1af85SBrijesh Singh /* copy transport buffer to user space */
1367d3d1af85SBrijesh Singh if (copy_to_user((void __user *)(uintptr_t)params.trans_uaddr,
1368d3d1af85SBrijesh Singh trans_data, params.trans_len)) {
1369d3d1af85SBrijesh Singh ret = -EFAULT;
1370238eca82SSean Christopherson goto e_free_trans_data;
1371d3d1af85SBrijesh Singh }
1372d3d1af85SBrijesh Singh
1373d3d1af85SBrijesh Singh /* Copy packet header to userspace. */
1374b4a69392SSean Christopherson if (copy_to_user((void __user *)(uintptr_t)params.hdr_uaddr, hdr,
1375b4a69392SSean Christopherson params.hdr_len))
1376b4a69392SSean Christopherson ret = -EFAULT;
1377d3d1af85SBrijesh Singh
1378d3d1af85SBrijesh Singh e_free_trans_data:
1379d3d1af85SBrijesh Singh kfree(trans_data);
1380d3d1af85SBrijesh Singh e_free_hdr:
1381d3d1af85SBrijesh Singh kfree(hdr);
1382d3d1af85SBrijesh Singh e_unpin:
1383d3d1af85SBrijesh Singh sev_unpin_memory(kvm, guest_page, n);
1384d3d1af85SBrijesh Singh
1385d3d1af85SBrijesh Singh return ret;
1386d3d1af85SBrijesh Singh }
1387d3d1af85SBrijesh Singh
sev_send_finish(struct kvm * kvm,struct kvm_sev_cmd * argp)1388fddecf6aSBrijesh Singh static int sev_send_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
1389fddecf6aSBrijesh Singh {
1390fddecf6aSBrijesh Singh struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1391238eca82SSean Christopherson struct sev_data_send_finish data;
1392fddecf6aSBrijesh Singh
1393fddecf6aSBrijesh Singh if (!sev_guest(kvm))
1394fddecf6aSBrijesh Singh return -ENOTTY;
1395fddecf6aSBrijesh Singh
1396238eca82SSean Christopherson data.handle = sev->handle;
1397238eca82SSean Christopherson return sev_issue_cmd(kvm, SEV_CMD_SEND_FINISH, &data, &argp->error);
1398fddecf6aSBrijesh Singh }
1399fddecf6aSBrijesh Singh
sev_send_cancel(struct kvm * kvm,struct kvm_sev_cmd * argp)14005569e2e7SSteve Rutherford static int sev_send_cancel(struct kvm *kvm, struct kvm_sev_cmd *argp)
14015569e2e7SSteve Rutherford {
14025569e2e7SSteve Rutherford struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1403238eca82SSean Christopherson struct sev_data_send_cancel data;
14045569e2e7SSteve Rutherford
14055569e2e7SSteve Rutherford if (!sev_guest(kvm))
14065569e2e7SSteve Rutherford return -ENOTTY;
14075569e2e7SSteve Rutherford
1408238eca82SSean Christopherson data.handle = sev->handle;
1409238eca82SSean Christopherson return sev_issue_cmd(kvm, SEV_CMD_SEND_CANCEL, &data, &argp->error);
14105569e2e7SSteve Rutherford }
14115569e2e7SSteve Rutherford
sev_receive_start(struct kvm * kvm,struct kvm_sev_cmd * argp)1412af43cbbfSBrijesh Singh static int sev_receive_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
1413af43cbbfSBrijesh Singh {
1414af43cbbfSBrijesh Singh struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1415238eca82SSean Christopherson struct sev_data_receive_start start;
1416af43cbbfSBrijesh Singh struct kvm_sev_receive_start params;
1417af43cbbfSBrijesh Singh int *error = &argp->error;
1418af43cbbfSBrijesh Singh void *session_data;
1419af43cbbfSBrijesh Singh void *pdh_data;
1420af43cbbfSBrijesh Singh int ret;
1421af43cbbfSBrijesh Singh
1422af43cbbfSBrijesh Singh if (!sev_guest(kvm))
1423af43cbbfSBrijesh Singh return -ENOTTY;
1424af43cbbfSBrijesh Singh
1425af43cbbfSBrijesh Singh /* Get parameter from the userspace */
1426af43cbbfSBrijesh Singh if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data,
1427af43cbbfSBrijesh Singh sizeof(struct kvm_sev_receive_start)))
1428af43cbbfSBrijesh Singh return -EFAULT;
1429af43cbbfSBrijesh Singh
1430af43cbbfSBrijesh Singh /* some sanity checks */
1431af43cbbfSBrijesh Singh if (!params.pdh_uaddr || !params.pdh_len ||
1432af43cbbfSBrijesh Singh !params.session_uaddr || !params.session_len)
1433af43cbbfSBrijesh Singh return -EINVAL;
1434af43cbbfSBrijesh Singh
1435af43cbbfSBrijesh Singh pdh_data = psp_copy_user_blob(params.pdh_uaddr, params.pdh_len);
1436af43cbbfSBrijesh Singh if (IS_ERR(pdh_data))
1437af43cbbfSBrijesh Singh return PTR_ERR(pdh_data);
1438af43cbbfSBrijesh Singh
1439af43cbbfSBrijesh Singh session_data = psp_copy_user_blob(params.session_uaddr,
1440af43cbbfSBrijesh Singh params.session_len);
1441af43cbbfSBrijesh Singh if (IS_ERR(session_data)) {
1442af43cbbfSBrijesh Singh ret = PTR_ERR(session_data);
1443af43cbbfSBrijesh Singh goto e_free_pdh;
1444af43cbbfSBrijesh Singh }
1445af43cbbfSBrijesh Singh
1446238eca82SSean Christopherson memset(&start, 0, sizeof(start));
1447238eca82SSean Christopherson start.handle = params.handle;
1448238eca82SSean Christopherson start.policy = params.policy;
1449238eca82SSean Christopherson start.pdh_cert_address = __psp_pa(pdh_data);
1450238eca82SSean Christopherson start.pdh_cert_len = params.pdh_len;
1451238eca82SSean Christopherson start.session_address = __psp_pa(session_data);
1452238eca82SSean Christopherson start.session_len = params.session_len;
1453af43cbbfSBrijesh Singh
1454af43cbbfSBrijesh Singh /* create memory encryption context */
1455238eca82SSean Christopherson ret = __sev_issue_cmd(argp->sev_fd, SEV_CMD_RECEIVE_START, &start,
1456af43cbbfSBrijesh Singh error);
1457af43cbbfSBrijesh Singh if (ret)
1458238eca82SSean Christopherson goto e_free_session;
1459af43cbbfSBrijesh Singh
1460af43cbbfSBrijesh Singh /* Bind ASID to this guest */
1461238eca82SSean Christopherson ret = sev_bind_asid(kvm, start.handle, error);
1462f1815e0aSMingwei Zhang if (ret) {
1463f1815e0aSMingwei Zhang sev_decommission(start.handle);
1464238eca82SSean Christopherson goto e_free_session;
1465f1815e0aSMingwei Zhang }
1466af43cbbfSBrijesh Singh
1467238eca82SSean Christopherson params.handle = start.handle;
1468af43cbbfSBrijesh Singh if (copy_to_user((void __user *)(uintptr_t)argp->data,
1469af43cbbfSBrijesh Singh ¶ms, sizeof(struct kvm_sev_receive_start))) {
1470af43cbbfSBrijesh Singh ret = -EFAULT;
1471238eca82SSean Christopherson sev_unbind_asid(kvm, start.handle);
1472238eca82SSean Christopherson goto e_free_session;
1473af43cbbfSBrijesh Singh }
1474af43cbbfSBrijesh Singh
1475238eca82SSean Christopherson sev->handle = start.handle;
1476af43cbbfSBrijesh Singh sev->fd = argp->sev_fd;
1477af43cbbfSBrijesh Singh
1478af43cbbfSBrijesh Singh e_free_session:
1479af43cbbfSBrijesh Singh kfree(session_data);
1480af43cbbfSBrijesh Singh e_free_pdh:
1481af43cbbfSBrijesh Singh kfree(pdh_data);
1482af43cbbfSBrijesh Singh
1483af43cbbfSBrijesh Singh return ret;
1484af43cbbfSBrijesh Singh }
1485af43cbbfSBrijesh Singh
sev_receive_update_data(struct kvm * kvm,struct kvm_sev_cmd * argp)148615fb7de1SBrijesh Singh static int sev_receive_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
148715fb7de1SBrijesh Singh {
148815fb7de1SBrijesh Singh struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
148915fb7de1SBrijesh Singh struct kvm_sev_receive_update_data params;
1490238eca82SSean Christopherson struct sev_data_receive_update_data data;
149115fb7de1SBrijesh Singh void *hdr = NULL, *trans = NULL;
149215fb7de1SBrijesh Singh struct page **guest_page;
149315fb7de1SBrijesh Singh unsigned long n;
149415fb7de1SBrijesh Singh int ret, offset;
149515fb7de1SBrijesh Singh
149615fb7de1SBrijesh Singh if (!sev_guest(kvm))
149715fb7de1SBrijesh Singh return -EINVAL;
149815fb7de1SBrijesh Singh
149915fb7de1SBrijesh Singh if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data,
150015fb7de1SBrijesh Singh sizeof(struct kvm_sev_receive_update_data)))
150115fb7de1SBrijesh Singh return -EFAULT;
150215fb7de1SBrijesh Singh
150315fb7de1SBrijesh Singh if (!params.hdr_uaddr || !params.hdr_len ||
150415fb7de1SBrijesh Singh !params.guest_uaddr || !params.guest_len ||
150515fb7de1SBrijesh Singh !params.trans_uaddr || !params.trans_len)
150615fb7de1SBrijesh Singh return -EINVAL;
150715fb7de1SBrijesh Singh
150815fb7de1SBrijesh Singh /* Check if we are crossing the page boundary */
150915fb7de1SBrijesh Singh offset = params.guest_uaddr & (PAGE_SIZE - 1);
1510f94f053aSPeter Gonda if (params.guest_len > PAGE_SIZE || (params.guest_len + offset) > PAGE_SIZE)
151115fb7de1SBrijesh Singh return -EINVAL;
151215fb7de1SBrijesh Singh
151315fb7de1SBrijesh Singh hdr = psp_copy_user_blob(params.hdr_uaddr, params.hdr_len);
151415fb7de1SBrijesh Singh if (IS_ERR(hdr))
151515fb7de1SBrijesh Singh return PTR_ERR(hdr);
151615fb7de1SBrijesh Singh
151715fb7de1SBrijesh Singh trans = psp_copy_user_blob(params.trans_uaddr, params.trans_len);
151815fb7de1SBrijesh Singh if (IS_ERR(trans)) {
151915fb7de1SBrijesh Singh ret = PTR_ERR(trans);
152015fb7de1SBrijesh Singh goto e_free_hdr;
152115fb7de1SBrijesh Singh }
152215fb7de1SBrijesh Singh
1523238eca82SSean Christopherson memset(&data, 0, sizeof(data));
1524238eca82SSean Christopherson data.hdr_address = __psp_pa(hdr);
1525238eca82SSean Christopherson data.hdr_len = params.hdr_len;
1526238eca82SSean Christopherson data.trans_address = __psp_pa(trans);
1527238eca82SSean Christopherson data.trans_len = params.trans_len;
152815fb7de1SBrijesh Singh
152915fb7de1SBrijesh Singh /* Pin guest memory */
153015fb7de1SBrijesh Singh guest_page = sev_pin_memory(kvm, params.guest_uaddr & PAGE_MASK,
153150c03801SSean Christopherson PAGE_SIZE, &n, 1);
1532c7a1b2b6SSean Christopherson if (IS_ERR(guest_page)) {
1533c7a1b2b6SSean Christopherson ret = PTR_ERR(guest_page);
1534238eca82SSean Christopherson goto e_free_trans;
1535c7a1b2b6SSean Christopherson }
153615fb7de1SBrijesh Singh
1537c8c340a9SMasahiro Kozuka /*
1538c8c340a9SMasahiro Kozuka * Flush (on non-coherent CPUs) before RECEIVE_UPDATE_DATA, the PSP
1539c8c340a9SMasahiro Kozuka * encrypts the written data with the guest's key, and the cache may
1540c8c340a9SMasahiro Kozuka * contain dirty, unencrypted data.
1541c8c340a9SMasahiro Kozuka */
1542c8c340a9SMasahiro Kozuka sev_clflush_pages(guest_page, n);
1543c8c340a9SMasahiro Kozuka
154415fb7de1SBrijesh Singh /* The RECEIVE_UPDATE_DATA command requires C-bit to be always set. */
1545238eca82SSean Christopherson data.guest_address = (page_to_pfn(guest_page[0]) << PAGE_SHIFT) + offset;
1546238eca82SSean Christopherson data.guest_address |= sev_me_mask;
1547238eca82SSean Christopherson data.guest_len = params.guest_len;
1548238eca82SSean Christopherson data.handle = sev->handle;
154915fb7de1SBrijesh Singh
1550238eca82SSean Christopherson ret = sev_issue_cmd(kvm, SEV_CMD_RECEIVE_UPDATE_DATA, &data,
155115fb7de1SBrijesh Singh &argp->error);
155215fb7de1SBrijesh Singh
155315fb7de1SBrijesh Singh sev_unpin_memory(kvm, guest_page, n);
155415fb7de1SBrijesh Singh
155515fb7de1SBrijesh Singh e_free_trans:
155615fb7de1SBrijesh Singh kfree(trans);
155715fb7de1SBrijesh Singh e_free_hdr:
155815fb7de1SBrijesh Singh kfree(hdr);
155915fb7de1SBrijesh Singh
156015fb7de1SBrijesh Singh return ret;
156115fb7de1SBrijesh Singh }
156215fb7de1SBrijesh Singh
sev_receive_finish(struct kvm * kvm,struct kvm_sev_cmd * argp)15636a443defSBrijesh Singh static int sev_receive_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
15646a443defSBrijesh Singh {
15656a443defSBrijesh Singh struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1566238eca82SSean Christopherson struct sev_data_receive_finish data;
15676a443defSBrijesh Singh
15686a443defSBrijesh Singh if (!sev_guest(kvm))
15696a443defSBrijesh Singh return -ENOTTY;
15706a443defSBrijesh Singh
1571238eca82SSean Christopherson data.handle = sev->handle;
1572238eca82SSean Christopherson return sev_issue_cmd(kvm, SEV_CMD_RECEIVE_FINISH, &data, &argp->error);
15736a443defSBrijesh Singh }
15746a443defSBrijesh Singh
is_cmd_allowed_from_mirror(u32 cmd_id)15758e38e96aSSean Christopherson static bool is_cmd_allowed_from_mirror(u32 cmd_id)
15765b92b6caSPeter Gonda {
15775b92b6caSPeter Gonda /*
15785b92b6caSPeter Gonda * Allow mirrors VM to call KVM_SEV_LAUNCH_UPDATE_VMSA to enable SEV-ES
15795b92b6caSPeter Gonda * active mirror VMs. Also allow the debugging and status commands.
15805b92b6caSPeter Gonda */
15815b92b6caSPeter Gonda if (cmd_id == KVM_SEV_LAUNCH_UPDATE_VMSA ||
15825b92b6caSPeter Gonda cmd_id == KVM_SEV_GUEST_STATUS || cmd_id == KVM_SEV_DBG_DECRYPT ||
15835b92b6caSPeter Gonda cmd_id == KVM_SEV_DBG_ENCRYPT)
15845b92b6caSPeter Gonda return true;
15855b92b6caSPeter Gonda
15865b92b6caSPeter Gonda return false;
15875b92b6caSPeter Gonda }
15885b92b6caSPeter Gonda
sev_lock_two_vms(struct kvm * dst_kvm,struct kvm * src_kvm)1589501b580cSPaolo Bonzini static int sev_lock_two_vms(struct kvm *dst_kvm, struct kvm *src_kvm)
1590b5663931SPeter Gonda {
1591501b580cSPaolo Bonzini struct kvm_sev_info *dst_sev = &to_kvm_svm(dst_kvm)->sev_info;
1592501b580cSPaolo Bonzini struct kvm_sev_info *src_sev = &to_kvm_svm(src_kvm)->sev_info;
1593c9d61dcbSPaolo Bonzini int r = -EBUSY;
1594501b580cSPaolo Bonzini
1595501b580cSPaolo Bonzini if (dst_kvm == src_kvm)
1596501b580cSPaolo Bonzini return -EINVAL;
1597b5663931SPeter Gonda
1598b5663931SPeter Gonda /*
1599501b580cSPaolo Bonzini * Bail if these VMs are already involved in a migration to avoid
1600501b580cSPaolo Bonzini * deadlock between two VMs trying to migrate to/from each other.
1601b5663931SPeter Gonda */
1602501b580cSPaolo Bonzini if (atomic_cmpxchg_acquire(&dst_sev->migration_in_progress, 0, 1))
1603b5663931SPeter Gonda return -EBUSY;
1604b5663931SPeter Gonda
1605c9d61dcbSPaolo Bonzini if (atomic_cmpxchg_acquire(&src_sev->migration_in_progress, 0, 1))
1606c9d61dcbSPaolo Bonzini goto release_dst;
1607b5663931SPeter Gonda
1608c9d61dcbSPaolo Bonzini r = -EINTR;
1609c9d61dcbSPaolo Bonzini if (mutex_lock_killable(&dst_kvm->lock))
1610c9d61dcbSPaolo Bonzini goto release_src;
1611597cb796SWanpeng Li if (mutex_lock_killable_nested(&src_kvm->lock, SINGLE_DEPTH_NESTING))
1612c9d61dcbSPaolo Bonzini goto unlock_dst;
1613b5663931SPeter Gonda return 0;
1614c9d61dcbSPaolo Bonzini
1615c9d61dcbSPaolo Bonzini unlock_dst:
1616c9d61dcbSPaolo Bonzini mutex_unlock(&dst_kvm->lock);
1617c9d61dcbSPaolo Bonzini release_src:
1618c9d61dcbSPaolo Bonzini atomic_set_release(&src_sev->migration_in_progress, 0);
1619c9d61dcbSPaolo Bonzini release_dst:
1620c9d61dcbSPaolo Bonzini atomic_set_release(&dst_sev->migration_in_progress, 0);
1621c9d61dcbSPaolo Bonzini return r;
1622b5663931SPeter Gonda }
1623b5663931SPeter Gonda
sev_unlock_two_vms(struct kvm * dst_kvm,struct kvm * src_kvm)1624501b580cSPaolo Bonzini static void sev_unlock_two_vms(struct kvm *dst_kvm, struct kvm *src_kvm)
1625b5663931SPeter Gonda {
1626501b580cSPaolo Bonzini struct kvm_sev_info *dst_sev = &to_kvm_svm(dst_kvm)->sev_info;
1627501b580cSPaolo Bonzini struct kvm_sev_info *src_sev = &to_kvm_svm(src_kvm)->sev_info;
1628b5663931SPeter Gonda
1629501b580cSPaolo Bonzini mutex_unlock(&dst_kvm->lock);
1630501b580cSPaolo Bonzini mutex_unlock(&src_kvm->lock);
1631501b580cSPaolo Bonzini atomic_set_release(&dst_sev->migration_in_progress, 0);
1632501b580cSPaolo Bonzini atomic_set_release(&src_sev->migration_in_progress, 0);
1633b5663931SPeter Gonda }
1634b5663931SPeter Gonda
16350c2c7c06SPeter Gonda /* vCPU mutex subclasses. */
16360c2c7c06SPeter Gonda enum sev_migration_role {
16370c2c7c06SPeter Gonda SEV_MIGRATION_SOURCE = 0,
16380c2c7c06SPeter Gonda SEV_MIGRATION_TARGET,
16390c2c7c06SPeter Gonda SEV_NR_MIGRATION_ROLES,
16400c2c7c06SPeter Gonda };
1641b5663931SPeter Gonda
sev_lock_vcpus_for_migration(struct kvm * kvm,enum sev_migration_role role)16420c2c7c06SPeter Gonda static int sev_lock_vcpus_for_migration(struct kvm *kvm,
16430c2c7c06SPeter Gonda enum sev_migration_role role)
1644b5663931SPeter Gonda {
1645b5663931SPeter Gonda struct kvm_vcpu *vcpu;
164646808a4cSMarc Zyngier unsigned long i, j;
1647b5663931SPeter Gonda
1648b5663931SPeter Gonda kvm_for_each_vcpu(i, vcpu, kvm) {
16490c2c7c06SPeter Gonda if (mutex_lock_killable_nested(&vcpu->mutex, role))
1650b5663931SPeter Gonda goto out_unlock;
16510c2c7c06SPeter Gonda
1652e5380f6dSSean Christopherson #ifdef CONFIG_PROVE_LOCKING
1653e5380f6dSSean Christopherson if (!i)
16540c2c7c06SPeter Gonda /*
16550c2c7c06SPeter Gonda * Reset the role to one that avoids colliding with
16560c2c7c06SPeter Gonda * the role used for the first vcpu mutex.
16570c2c7c06SPeter Gonda */
16580c2c7c06SPeter Gonda role = SEV_NR_MIGRATION_ROLES;
1659e5380f6dSSean Christopherson else
16600c2c7c06SPeter Gonda mutex_release(&vcpu->mutex.dep_map, _THIS_IP_);
1661e5380f6dSSean Christopherson #endif
1662b5663931SPeter Gonda }
1663b5663931SPeter Gonda
1664b5663931SPeter Gonda return 0;
1665b5663931SPeter Gonda
1666b5663931SPeter Gonda out_unlock:
16670c2c7c06SPeter Gonda
1668b5663931SPeter Gonda kvm_for_each_vcpu(j, vcpu, kvm) {
1669b5663931SPeter Gonda if (i == j)
1670b5663931SPeter Gonda break;
1671b5663931SPeter Gonda
1672e5380f6dSSean Christopherson #ifdef CONFIG_PROVE_LOCKING
1673e5380f6dSSean Christopherson if (j)
16740c2c7c06SPeter Gonda mutex_acquire(&vcpu->mutex.dep_map, role, 0, _THIS_IP_);
1675e5380f6dSSean Christopherson #endif
16760c2c7c06SPeter Gonda
1677b5663931SPeter Gonda mutex_unlock(&vcpu->mutex);
1678b5663931SPeter Gonda }
1679b5663931SPeter Gonda return -EINTR;
1680b5663931SPeter Gonda }
1681b5663931SPeter Gonda
sev_unlock_vcpus_for_migration(struct kvm * kvm)1682b5663931SPeter Gonda static void sev_unlock_vcpus_for_migration(struct kvm *kvm)
1683b5663931SPeter Gonda {
1684b5663931SPeter Gonda struct kvm_vcpu *vcpu;
168546808a4cSMarc Zyngier unsigned long i;
16860c2c7c06SPeter Gonda bool first = true;
1687b5663931SPeter Gonda
1688b5663931SPeter Gonda kvm_for_each_vcpu(i, vcpu, kvm) {
16890c2c7c06SPeter Gonda if (first)
16900c2c7c06SPeter Gonda first = false;
16910c2c7c06SPeter Gonda else
16920c2c7c06SPeter Gonda mutex_acquire(&vcpu->mutex.dep_map,
16930c2c7c06SPeter Gonda SEV_NR_MIGRATION_ROLES, 0, _THIS_IP_);
16940c2c7c06SPeter Gonda
1695b5663931SPeter Gonda mutex_unlock(&vcpu->mutex);
1696b5663931SPeter Gonda }
1697b5663931SPeter Gonda }
1698b5663931SPeter Gonda
sev_migrate_from(struct kvm * dst_kvm,struct kvm * src_kvm)1699b2125513SPeter Gonda static void sev_migrate_from(struct kvm *dst_kvm, struct kvm *src_kvm)
1700b5663931SPeter Gonda {
1701b2125513SPeter Gonda struct kvm_sev_info *dst = &to_kvm_svm(dst_kvm)->sev_info;
1702b2125513SPeter Gonda struct kvm_sev_info *src = &to_kvm_svm(src_kvm)->sev_info;
17036defa24dSPeter Gonda struct kvm_vcpu *dst_vcpu, *src_vcpu;
17046defa24dSPeter Gonda struct vcpu_svm *dst_svm, *src_svm;
1705b2125513SPeter Gonda struct kvm_sev_info *mirror;
17066defa24dSPeter Gonda unsigned long i;
1707b2125513SPeter Gonda
1708b5663931SPeter Gonda dst->active = true;
1709b5663931SPeter Gonda dst->asid = src->asid;
1710b5663931SPeter Gonda dst->handle = src->handle;
1711b5663931SPeter Gonda dst->pages_locked = src->pages_locked;
1712642525e3SPaolo Bonzini dst->enc_context_owner = src->enc_context_owner;
17136defa24dSPeter Gonda dst->es_active = src->es_active;
1714b5663931SPeter Gonda
1715b5663931SPeter Gonda src->asid = 0;
1716b5663931SPeter Gonda src->active = false;
1717b5663931SPeter Gonda src->handle = 0;
1718b5663931SPeter Gonda src->pages_locked = 0;
1719642525e3SPaolo Bonzini src->enc_context_owner = NULL;
17206defa24dSPeter Gonda src->es_active = false;
1721b5663931SPeter Gonda
17224674164fSPaolo Bonzini list_cut_before(&dst->regions_list, &src->regions_list, &src->regions_list);
1723b2125513SPeter Gonda
1724b2125513SPeter Gonda /*
1725b2125513SPeter Gonda * If this VM has mirrors, "transfer" each mirror's refcount of the
1726b2125513SPeter Gonda * source to the destination (this KVM). The caller holds a reference
1727b2125513SPeter Gonda * to the source, so there's no danger of use-after-free.
1728b2125513SPeter Gonda */
1729b2125513SPeter Gonda list_cut_before(&dst->mirror_vms, &src->mirror_vms, &src->mirror_vms);
1730b2125513SPeter Gonda list_for_each_entry(mirror, &dst->mirror_vms, mirror_entry) {
1731b2125513SPeter Gonda kvm_get_kvm(dst_kvm);
1732b2125513SPeter Gonda kvm_put_kvm(src_kvm);
1733b2125513SPeter Gonda mirror->enc_context_owner = dst_kvm;
1734b2125513SPeter Gonda }
1735b2125513SPeter Gonda
1736b2125513SPeter Gonda /*
1737b2125513SPeter Gonda * If this VM is a mirror, remove the old mirror from the owners list
1738b2125513SPeter Gonda * and add the new mirror to the list.
1739b2125513SPeter Gonda */
1740b2125513SPeter Gonda if (is_mirroring_enc_context(dst_kvm)) {
1741b2125513SPeter Gonda struct kvm_sev_info *owner_sev_info =
1742b2125513SPeter Gonda &to_kvm_svm(dst->enc_context_owner)->sev_info;
1743b2125513SPeter Gonda
1744b2125513SPeter Gonda list_del(&src->mirror_entry);
1745b2125513SPeter Gonda list_add_tail(&dst->mirror_entry, &owner_sev_info->mirror_vms);
1746b2125513SPeter Gonda }
1747b5663931SPeter Gonda
17486defa24dSPeter Gonda kvm_for_each_vcpu(i, dst_vcpu, dst_kvm) {
17490b020f5aSPeter Gonda dst_svm = to_svm(dst_vcpu);
17500b020f5aSPeter Gonda
17516defa24dSPeter Gonda sev_init_vmcb(dst_svm);
17526defa24dSPeter Gonda
17536defa24dSPeter Gonda if (!dst->es_active)
17546defa24dSPeter Gonda continue;
17556defa24dSPeter Gonda
17566defa24dSPeter Gonda /*
17576defa24dSPeter Gonda * Note, the source is not required to have the same number of
17586defa24dSPeter Gonda * vCPUs as the destination when migrating a vanilla SEV VM.
17596defa24dSPeter Gonda */
1760f1187ef2SSean Christopherson src_vcpu = kvm_get_vcpu(src_kvm, i);
17616defa24dSPeter Gonda src_svm = to_svm(src_vcpu);
17626defa24dSPeter Gonda
17630b020f5aSPeter Gonda /*
17640b020f5aSPeter Gonda * Transfer VMSA and GHCB state to the destination. Nullify and
17650b020f5aSPeter Gonda * clear source fields as appropriate, the state now belongs to
17660b020f5aSPeter Gonda * the destination.
17670b020f5aSPeter Gonda */
17680b020f5aSPeter Gonda memcpy(&dst_svm->sev_es, &src_svm->sev_es, sizeof(src_svm->sev_es));
17690b020f5aSPeter Gonda dst_svm->vmcb->control.ghcb_gpa = src_svm->vmcb->control.ghcb_gpa;
17700b020f5aSPeter Gonda dst_svm->vmcb->control.vmsa_pa = src_svm->vmcb->control.vmsa_pa;
17710b020f5aSPeter Gonda dst_vcpu->arch.guest_state_protected = true;
17720b020f5aSPeter Gonda
17730b020f5aSPeter Gonda memset(&src_svm->sev_es, 0, sizeof(src_svm->sev_es));
17740b020f5aSPeter Gonda src_svm->vmcb->control.ghcb_gpa = INVALID_PAGE;
17750b020f5aSPeter Gonda src_svm->vmcb->control.vmsa_pa = INVALID_PAGE;
17760b020f5aSPeter Gonda src_vcpu->arch.guest_state_protected = false;
17770b020f5aSPeter Gonda }
17786defa24dSPeter Gonda }
17796defa24dSPeter Gonda
sev_check_source_vcpus(struct kvm * dst,struct kvm * src)17806defa24dSPeter Gonda static int sev_check_source_vcpus(struct kvm *dst, struct kvm *src)
17816defa24dSPeter Gonda {
17826defa24dSPeter Gonda struct kvm_vcpu *src_vcpu;
17836defa24dSPeter Gonda unsigned long i;
17846defa24dSPeter Gonda
17856defa24dSPeter Gonda if (!sev_es_guest(src))
17866defa24dSPeter Gonda return 0;
17876defa24dSPeter Gonda
17886defa24dSPeter Gonda if (atomic_read(&src->online_vcpus) != atomic_read(&dst->online_vcpus))
17896defa24dSPeter Gonda return -EINVAL;
17906defa24dSPeter Gonda
17916defa24dSPeter Gonda kvm_for_each_vcpu(i, src_vcpu, src) {
17926defa24dSPeter Gonda if (!src_vcpu->arch.guest_state_protected)
17936defa24dSPeter Gonda return -EINVAL;
17946defa24dSPeter Gonda }
17950b020f5aSPeter Gonda
17960b020f5aSPeter Gonda return 0;
17970b020f5aSPeter Gonda }
17980b020f5aSPeter Gonda
sev_vm_move_enc_context_from(struct kvm * kvm,unsigned int source_fd)1799559c7c75SSean Christopherson int sev_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd)
1800b5663931SPeter Gonda {
1801b5663931SPeter Gonda struct kvm_sev_info *dst_sev = &to_kvm_svm(kvm)->sev_info;
1802501cfe06SPaolo Bonzini struct kvm_sev_info *src_sev, *cg_cleanup_sev;
1803d2084fd8SAl Viro struct fd f = fdget(source_fd);
1804b5663931SPeter Gonda struct kvm *source_kvm;
1805501cfe06SPaolo Bonzini bool charged = false;
1806b5663931SPeter Gonda int ret;
1807b5663931SPeter Gonda
1808d2084fd8SAl Viro if (!f.file)
1809d2084fd8SAl Viro return -EBADF;
1810d2084fd8SAl Viro
1811d2084fd8SAl Viro if (!file_is_kvm(f.file)) {
1812b5663931SPeter Gonda ret = -EBADF;
1813b5663931SPeter Gonda goto out_fput;
1814b5663931SPeter Gonda }
1815b5663931SPeter Gonda
1816d2084fd8SAl Viro source_kvm = f.file->private_data;
1817501b580cSPaolo Bonzini ret = sev_lock_two_vms(kvm, source_kvm);
1818b5663931SPeter Gonda if (ret)
1819b5663931SPeter Gonda goto out_fput;
1820b5663931SPeter Gonda
1821501b580cSPaolo Bonzini if (sev_guest(kvm) || !sev_guest(source_kvm)) {
1822b5663931SPeter Gonda ret = -EINVAL;
1823501b580cSPaolo Bonzini goto out_unlock;
1824b5663931SPeter Gonda }
1825b5663931SPeter Gonda
1826b5663931SPeter Gonda src_sev = &to_kvm_svm(source_kvm)->sev_info;
182717d44a96SPaolo Bonzini
1828b5663931SPeter Gonda dst_sev->misc_cg = get_current_misc_cg();
1829501cfe06SPaolo Bonzini cg_cleanup_sev = dst_sev;
1830b5663931SPeter Gonda if (dst_sev->misc_cg != src_sev->misc_cg) {
1831b5663931SPeter Gonda ret = sev_misc_cg_try_charge(dst_sev);
1832b5663931SPeter Gonda if (ret)
1833501cfe06SPaolo Bonzini goto out_dst_cgroup;
1834501cfe06SPaolo Bonzini charged = true;
1835b5663931SPeter Gonda }
1836b5663931SPeter Gonda
18370c2c7c06SPeter Gonda ret = sev_lock_vcpus_for_migration(kvm, SEV_MIGRATION_SOURCE);
1838b5663931SPeter Gonda if (ret)
1839b5663931SPeter Gonda goto out_dst_cgroup;
18400c2c7c06SPeter Gonda ret = sev_lock_vcpus_for_migration(source_kvm, SEV_MIGRATION_TARGET);
1841b5663931SPeter Gonda if (ret)
1842b5663931SPeter Gonda goto out_dst_vcpu;
1843b5663931SPeter Gonda
18446defa24dSPeter Gonda ret = sev_check_source_vcpus(kvm, source_kvm);
18450b020f5aSPeter Gonda if (ret)
18460b020f5aSPeter Gonda goto out_source_vcpu;
1847b2125513SPeter Gonda
1848b2125513SPeter Gonda sev_migrate_from(kvm, source_kvm);
1849b5663931SPeter Gonda kvm_vm_dead(source_kvm);
1850501cfe06SPaolo Bonzini cg_cleanup_sev = src_sev;
1851b5663931SPeter Gonda ret = 0;
1852b5663931SPeter Gonda
18530b020f5aSPeter Gonda out_source_vcpu:
1854b5663931SPeter Gonda sev_unlock_vcpus_for_migration(source_kvm);
1855b5663931SPeter Gonda out_dst_vcpu:
1856b5663931SPeter Gonda sev_unlock_vcpus_for_migration(kvm);
1857b5663931SPeter Gonda out_dst_cgroup:
1858501cfe06SPaolo Bonzini /* Operates on the source on success, on the destination on failure. */
1859501cfe06SPaolo Bonzini if (charged)
1860501cfe06SPaolo Bonzini sev_misc_cg_uncharge(cg_cleanup_sev);
1861501cfe06SPaolo Bonzini put_misc_cg(cg_cleanup_sev->misc_cg);
1862501cfe06SPaolo Bonzini cg_cleanup_sev->misc_cg = NULL;
1863501b580cSPaolo Bonzini out_unlock:
1864501b580cSPaolo Bonzini sev_unlock_two_vms(kvm, source_kvm);
1865b5663931SPeter Gonda out_fput:
1866d2084fd8SAl Viro fdput(f);
1867b5663931SPeter Gonda return ret;
1868b5663931SPeter Gonda }
1869b5663931SPeter Gonda
sev_mem_enc_ioctl(struct kvm * kvm,void __user * argp)1870559c7c75SSean Christopherson int sev_mem_enc_ioctl(struct kvm *kvm, void __user *argp)
1871eaf78265SJoerg Roedel {
1872eaf78265SJoerg Roedel struct kvm_sev_cmd sev_cmd;
1873eaf78265SJoerg Roedel int r;
1874eaf78265SJoerg Roedel
1875a5c1c5aaSSean Christopherson if (!sev_enabled)
1876eaf78265SJoerg Roedel return -ENOTTY;
1877eaf78265SJoerg Roedel
1878eaf78265SJoerg Roedel if (!argp)
1879eaf78265SJoerg Roedel return 0;
1880eaf78265SJoerg Roedel
1881eaf78265SJoerg Roedel if (copy_from_user(&sev_cmd, argp, sizeof(struct kvm_sev_cmd)))
1882eaf78265SJoerg Roedel return -EFAULT;
1883eaf78265SJoerg Roedel
1884eaf78265SJoerg Roedel mutex_lock(&kvm->lock);
1885eaf78265SJoerg Roedel
18865b92b6caSPeter Gonda /* Only the enc_context_owner handles some memory enc operations. */
18875b92b6caSPeter Gonda if (is_mirroring_enc_context(kvm) &&
18888e38e96aSSean Christopherson !is_cmd_allowed_from_mirror(sev_cmd.id)) {
188954526d1fSNathan Tempelman r = -EINVAL;
189054526d1fSNathan Tempelman goto out;
189154526d1fSNathan Tempelman }
189254526d1fSNathan Tempelman
1893eaf78265SJoerg Roedel switch (sev_cmd.id) {
18949fa1521dSSean Christopherson case KVM_SEV_ES_INIT:
18958d364a07SSean Christopherson if (!sev_es_enabled) {
18969fa1521dSSean Christopherson r = -ENOTTY;
18979fa1521dSSean Christopherson goto out;
18989fa1521dSSean Christopherson }
18999fa1521dSSean Christopherson fallthrough;
1900eaf78265SJoerg Roedel case KVM_SEV_INIT:
1901eaf78265SJoerg Roedel r = sev_guest_init(kvm, &sev_cmd);
1902eaf78265SJoerg Roedel break;
1903eaf78265SJoerg Roedel case KVM_SEV_LAUNCH_START:
1904eaf78265SJoerg Roedel r = sev_launch_start(kvm, &sev_cmd);
1905eaf78265SJoerg Roedel break;
1906eaf78265SJoerg Roedel case KVM_SEV_LAUNCH_UPDATE_DATA:
1907eaf78265SJoerg Roedel r = sev_launch_update_data(kvm, &sev_cmd);
1908eaf78265SJoerg Roedel break;
1909ad73109aSTom Lendacky case KVM_SEV_LAUNCH_UPDATE_VMSA:
1910ad73109aSTom Lendacky r = sev_launch_update_vmsa(kvm, &sev_cmd);
1911ad73109aSTom Lendacky break;
1912eaf78265SJoerg Roedel case KVM_SEV_LAUNCH_MEASURE:
1913eaf78265SJoerg Roedel r = sev_launch_measure(kvm, &sev_cmd);
1914eaf78265SJoerg Roedel break;
1915eaf78265SJoerg Roedel case KVM_SEV_LAUNCH_FINISH:
1916eaf78265SJoerg Roedel r = sev_launch_finish(kvm, &sev_cmd);
1917eaf78265SJoerg Roedel break;
1918eaf78265SJoerg Roedel case KVM_SEV_GUEST_STATUS:
1919eaf78265SJoerg Roedel r = sev_guest_status(kvm, &sev_cmd);
1920eaf78265SJoerg Roedel break;
1921eaf78265SJoerg Roedel case KVM_SEV_DBG_DECRYPT:
1922eaf78265SJoerg Roedel r = sev_dbg_crypt(kvm, &sev_cmd, true);
1923eaf78265SJoerg Roedel break;
1924eaf78265SJoerg Roedel case KVM_SEV_DBG_ENCRYPT:
1925eaf78265SJoerg Roedel r = sev_dbg_crypt(kvm, &sev_cmd, false);
1926eaf78265SJoerg Roedel break;
1927eaf78265SJoerg Roedel case KVM_SEV_LAUNCH_SECRET:
1928eaf78265SJoerg Roedel r = sev_launch_secret(kvm, &sev_cmd);
1929eaf78265SJoerg Roedel break;
19302c07ded0SBrijesh Singh case KVM_SEV_GET_ATTESTATION_REPORT:
19312c07ded0SBrijesh Singh r = sev_get_attestation_report(kvm, &sev_cmd);
19322c07ded0SBrijesh Singh break;
19334cfdd47dSBrijesh Singh case KVM_SEV_SEND_START:
19344cfdd47dSBrijesh Singh r = sev_send_start(kvm, &sev_cmd);
19354cfdd47dSBrijesh Singh break;
1936d3d1af85SBrijesh Singh case KVM_SEV_SEND_UPDATE_DATA:
1937d3d1af85SBrijesh Singh r = sev_send_update_data(kvm, &sev_cmd);
1938d3d1af85SBrijesh Singh break;
1939fddecf6aSBrijesh Singh case KVM_SEV_SEND_FINISH:
1940fddecf6aSBrijesh Singh r = sev_send_finish(kvm, &sev_cmd);
1941fddecf6aSBrijesh Singh break;
19425569e2e7SSteve Rutherford case KVM_SEV_SEND_CANCEL:
19435569e2e7SSteve Rutherford r = sev_send_cancel(kvm, &sev_cmd);
19445569e2e7SSteve Rutherford break;
1945af43cbbfSBrijesh Singh case KVM_SEV_RECEIVE_START:
1946af43cbbfSBrijesh Singh r = sev_receive_start(kvm, &sev_cmd);
1947af43cbbfSBrijesh Singh break;
194815fb7de1SBrijesh Singh case KVM_SEV_RECEIVE_UPDATE_DATA:
194915fb7de1SBrijesh Singh r = sev_receive_update_data(kvm, &sev_cmd);
195015fb7de1SBrijesh Singh break;
19516a443defSBrijesh Singh case KVM_SEV_RECEIVE_FINISH:
19526a443defSBrijesh Singh r = sev_receive_finish(kvm, &sev_cmd);
19536a443defSBrijesh Singh break;
1954eaf78265SJoerg Roedel default:
1955eaf78265SJoerg Roedel r = -EINVAL;
1956eaf78265SJoerg Roedel goto out;
1957eaf78265SJoerg Roedel }
1958eaf78265SJoerg Roedel
1959eaf78265SJoerg Roedel if (copy_to_user(argp, &sev_cmd, sizeof(struct kvm_sev_cmd)))
1960eaf78265SJoerg Roedel r = -EFAULT;
1961eaf78265SJoerg Roedel
1962eaf78265SJoerg Roedel out:
1963eaf78265SJoerg Roedel mutex_unlock(&kvm->lock);
1964eaf78265SJoerg Roedel return r;
1965eaf78265SJoerg Roedel }
1966eaf78265SJoerg Roedel
sev_mem_enc_register_region(struct kvm * kvm,struct kvm_enc_region * range)1967559c7c75SSean Christopherson int sev_mem_enc_register_region(struct kvm *kvm,
1968eaf78265SJoerg Roedel struct kvm_enc_region *range)
1969eaf78265SJoerg Roedel {
1970eaf78265SJoerg Roedel struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1971eaf78265SJoerg Roedel struct enc_region *region;
1972eaf78265SJoerg Roedel int ret = 0;
1973eaf78265SJoerg Roedel
1974eaf78265SJoerg Roedel if (!sev_guest(kvm))
1975eaf78265SJoerg Roedel return -ENOTTY;
1976eaf78265SJoerg Roedel
197754526d1fSNathan Tempelman /* If kvm is mirroring encryption context it isn't responsible for it */
197854526d1fSNathan Tempelman if (is_mirroring_enc_context(kvm))
197954526d1fSNathan Tempelman return -EINVAL;
198054526d1fSNathan Tempelman
1981eaf78265SJoerg Roedel if (range->addr > ULONG_MAX || range->size > ULONG_MAX)
1982eaf78265SJoerg Roedel return -EINVAL;
1983eaf78265SJoerg Roedel
1984eaf78265SJoerg Roedel region = kzalloc(sizeof(*region), GFP_KERNEL_ACCOUNT);
1985eaf78265SJoerg Roedel if (!region)
1986eaf78265SJoerg Roedel return -ENOMEM;
1987eaf78265SJoerg Roedel
198819a23da5SPeter Gonda mutex_lock(&kvm->lock);
1989eaf78265SJoerg Roedel region->pages = sev_pin_memory(kvm, range->addr, range->size, ®ion->npages, 1);
1990a8d908b5SPaolo Bonzini if (IS_ERR(region->pages)) {
1991a8d908b5SPaolo Bonzini ret = PTR_ERR(region->pages);
199219a23da5SPeter Gonda mutex_unlock(&kvm->lock);
1993eaf78265SJoerg Roedel goto e_free;
1994eaf78265SJoerg Roedel }
1995eaf78265SJoerg Roedel
199612f8e32aSSean Christopherson /*
199712f8e32aSSean Christopherson * The guest may change the memory encryption attribute from C=0 -> C=1
199812f8e32aSSean Christopherson * or vice versa for this memory range. Lets make sure caches are
199912f8e32aSSean Christopherson * flushed to ensure that guest data gets written into memory with
200012f8e32aSSean Christopherson * correct C-bit. Note, this must be done before dropping kvm->lock,
200112f8e32aSSean Christopherson * as region and its array of pages can be freed by a different task
200212f8e32aSSean Christopherson * once kvm->lock is released.
200312f8e32aSSean Christopherson */
200412f8e32aSSean Christopherson sev_clflush_pages(region->pages, region->npages);
200512f8e32aSSean Christopherson
200619a23da5SPeter Gonda region->uaddr = range->addr;
200719a23da5SPeter Gonda region->size = range->size;
200819a23da5SPeter Gonda
200919a23da5SPeter Gonda list_add_tail(®ion->list, &sev->regions_list);
201019a23da5SPeter Gonda mutex_unlock(&kvm->lock);
201119a23da5SPeter Gonda
2012eaf78265SJoerg Roedel return ret;
2013eaf78265SJoerg Roedel
2014eaf78265SJoerg Roedel e_free:
2015eaf78265SJoerg Roedel kfree(region);
2016eaf78265SJoerg Roedel return ret;
2017eaf78265SJoerg Roedel }
2018eaf78265SJoerg Roedel
2019eaf78265SJoerg Roedel static struct enc_region *
find_enc_region(struct kvm * kvm,struct kvm_enc_region * range)2020eaf78265SJoerg Roedel find_enc_region(struct kvm *kvm, struct kvm_enc_region *range)
2021eaf78265SJoerg Roedel {
2022eaf78265SJoerg Roedel struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
2023eaf78265SJoerg Roedel struct list_head *head = &sev->regions_list;
2024eaf78265SJoerg Roedel struct enc_region *i;
2025eaf78265SJoerg Roedel
2026eaf78265SJoerg Roedel list_for_each_entry(i, head, list) {
2027eaf78265SJoerg Roedel if (i->uaddr == range->addr &&
2028eaf78265SJoerg Roedel i->size == range->size)
2029eaf78265SJoerg Roedel return i;
2030eaf78265SJoerg Roedel }
2031eaf78265SJoerg Roedel
2032eaf78265SJoerg Roedel return NULL;
2033eaf78265SJoerg Roedel }
2034eaf78265SJoerg Roedel
__unregister_enc_region_locked(struct kvm * kvm,struct enc_region * region)2035eaf78265SJoerg Roedel static void __unregister_enc_region_locked(struct kvm *kvm,
2036eaf78265SJoerg Roedel struct enc_region *region)
2037eaf78265SJoerg Roedel {
2038eaf78265SJoerg Roedel sev_unpin_memory(kvm, region->pages, region->npages);
2039eaf78265SJoerg Roedel list_del(®ion->list);
2040eaf78265SJoerg Roedel kfree(region);
2041eaf78265SJoerg Roedel }
2042eaf78265SJoerg Roedel
sev_mem_enc_unregister_region(struct kvm * kvm,struct kvm_enc_region * range)2043559c7c75SSean Christopherson int sev_mem_enc_unregister_region(struct kvm *kvm,
2044eaf78265SJoerg Roedel struct kvm_enc_region *range)
2045eaf78265SJoerg Roedel {
2046eaf78265SJoerg Roedel struct enc_region *region;
2047eaf78265SJoerg Roedel int ret;
2048eaf78265SJoerg Roedel
204954526d1fSNathan Tempelman /* If kvm is mirroring encryption context it isn't responsible for it */
205054526d1fSNathan Tempelman if (is_mirroring_enc_context(kvm))
205154526d1fSNathan Tempelman return -EINVAL;
205254526d1fSNathan Tempelman
2053eaf78265SJoerg Roedel mutex_lock(&kvm->lock);
2054eaf78265SJoerg Roedel
2055eaf78265SJoerg Roedel if (!sev_guest(kvm)) {
2056eaf78265SJoerg Roedel ret = -ENOTTY;
2057eaf78265SJoerg Roedel goto failed;
2058eaf78265SJoerg Roedel }
2059eaf78265SJoerg Roedel
2060eaf78265SJoerg Roedel region = find_enc_region(kvm, range);
2061eaf78265SJoerg Roedel if (!region) {
2062eaf78265SJoerg Roedel ret = -EINVAL;
2063eaf78265SJoerg Roedel goto failed;
2064eaf78265SJoerg Roedel }
2065eaf78265SJoerg Roedel
2066eaf78265SJoerg Roedel /*
2067eaf78265SJoerg Roedel * Ensure that all guest tagged cache entries are flushed before
2068eaf78265SJoerg Roedel * releasing the pages back to the system for use. CLFLUSH will
2069eaf78265SJoerg Roedel * not do this, so issue a WBINVD.
2070eaf78265SJoerg Roedel */
2071eaf78265SJoerg Roedel wbinvd_on_all_cpus();
2072eaf78265SJoerg Roedel
2073eaf78265SJoerg Roedel __unregister_enc_region_locked(kvm, region);
2074eaf78265SJoerg Roedel
2075eaf78265SJoerg Roedel mutex_unlock(&kvm->lock);
2076eaf78265SJoerg Roedel return 0;
2077eaf78265SJoerg Roedel
2078eaf78265SJoerg Roedel failed:
2079eaf78265SJoerg Roedel mutex_unlock(&kvm->lock);
2080eaf78265SJoerg Roedel return ret;
2081eaf78265SJoerg Roedel }
2082eaf78265SJoerg Roedel
sev_vm_copy_enc_context_from(struct kvm * kvm,unsigned int source_fd)2083559c7c75SSean Christopherson int sev_vm_copy_enc_context_from(struct kvm *kvm, unsigned int source_fd)
208454526d1fSNathan Tempelman {
2085d2084fd8SAl Viro struct fd f = fdget(source_fd);
208654526d1fSNathan Tempelman struct kvm *source_kvm;
2087bf42b02bSPaolo Bonzini struct kvm_sev_info *source_sev, *mirror_sev;
208854526d1fSNathan Tempelman int ret;
208954526d1fSNathan Tempelman
2090d2084fd8SAl Viro if (!f.file)
2091d2084fd8SAl Viro return -EBADF;
2092d2084fd8SAl Viro
2093d2084fd8SAl Viro if (!file_is_kvm(f.file)) {
209454526d1fSNathan Tempelman ret = -EBADF;
2095bf42b02bSPaolo Bonzini goto e_source_fput;
209654526d1fSNathan Tempelman }
209754526d1fSNathan Tempelman
2098d2084fd8SAl Viro source_kvm = f.file->private_data;
2099bf42b02bSPaolo Bonzini ret = sev_lock_two_vms(kvm, source_kvm);
2100bf42b02bSPaolo Bonzini if (ret)
2101bf42b02bSPaolo Bonzini goto e_source_fput;
210254526d1fSNathan Tempelman
2103bf42b02bSPaolo Bonzini /*
2104bf42b02bSPaolo Bonzini * Mirrors of mirrors should work, but let's not get silly. Also
2105bf42b02bSPaolo Bonzini * disallow out-of-band SEV/SEV-ES init if the target is already an
2106bf42b02bSPaolo Bonzini * SEV guest, or if vCPUs have been created. KVM relies on vCPUs being
2107bf42b02bSPaolo Bonzini * created after SEV/SEV-ES initialization, e.g. to init intercepts.
2108bf42b02bSPaolo Bonzini */
2109bf42b02bSPaolo Bonzini if (sev_guest(kvm) || !sev_guest(source_kvm) ||
2110bf42b02bSPaolo Bonzini is_mirroring_enc_context(source_kvm) || kvm->created_vcpus) {
211154526d1fSNathan Tempelman ret = -EINVAL;
2112bf42b02bSPaolo Bonzini goto e_unlock;
211354526d1fSNathan Tempelman }
211454526d1fSNathan Tempelman
211554526d1fSNathan Tempelman /*
211654526d1fSNathan Tempelman * The mirror kvm holds an enc_context_owner ref so its asid can't
211754526d1fSNathan Tempelman * disappear until we're done with it
211854526d1fSNathan Tempelman */
2119bf42b02bSPaolo Bonzini source_sev = &to_kvm_svm(source_kvm)->sev_info;
212054526d1fSNathan Tempelman kvm_get_kvm(source_kvm);
2121b2125513SPeter Gonda mirror_sev = &to_kvm_svm(kvm)->sev_info;
2122b2125513SPeter Gonda list_add_tail(&mirror_sev->mirror_entry, &source_sev->mirror_vms);
212354526d1fSNathan Tempelman
212454526d1fSNathan Tempelman /* Set enc_context_owner and copy its encryption context over */
212554526d1fSNathan Tempelman mirror_sev->enc_context_owner = source_kvm;
212654526d1fSNathan Tempelman mirror_sev->active = true;
2127bf42b02bSPaolo Bonzini mirror_sev->asid = source_sev->asid;
2128bf42b02bSPaolo Bonzini mirror_sev->fd = source_sev->fd;
2129bf42b02bSPaolo Bonzini mirror_sev->es_active = source_sev->es_active;
2130bf42b02bSPaolo Bonzini mirror_sev->handle = source_sev->handle;
21312b347a38SPaolo Bonzini INIT_LIST_HEAD(&mirror_sev->regions_list);
2132b2125513SPeter Gonda INIT_LIST_HEAD(&mirror_sev->mirror_vms);
2133bf42b02bSPaolo Bonzini ret = 0;
2134bf42b02bSPaolo Bonzini
2135f43c887cSPeter Gonda /*
2136f43c887cSPeter Gonda * Do not copy ap_jump_table. Since the mirror does not share the same
2137f43c887cSPeter Gonda * KVM contexts as the original, and they may have different
2138f43c887cSPeter Gonda * memory-views.
2139f43c887cSPeter Gonda */
214054526d1fSNathan Tempelman
2141bf42b02bSPaolo Bonzini e_unlock:
2142bf42b02bSPaolo Bonzini sev_unlock_two_vms(kvm, source_kvm);
2143bf42b02bSPaolo Bonzini e_source_fput:
2144d2084fd8SAl Viro fdput(f);
214554526d1fSNathan Tempelman return ret;
214654526d1fSNathan Tempelman }
214754526d1fSNathan Tempelman
sev_vm_destroy(struct kvm * kvm)2148eaf78265SJoerg Roedel void sev_vm_destroy(struct kvm *kvm)
2149eaf78265SJoerg Roedel {
2150eaf78265SJoerg Roedel struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
2151eaf78265SJoerg Roedel struct list_head *head = &sev->regions_list;
2152eaf78265SJoerg Roedel struct list_head *pos, *q;
2153eaf78265SJoerg Roedel
2154eaf78265SJoerg Roedel if (!sev_guest(kvm))
2155eaf78265SJoerg Roedel return;
2156eaf78265SJoerg Roedel
2157b2125513SPeter Gonda WARN_ON(!list_empty(&sev->mirror_vms));
2158b2125513SPeter Gonda
215954526d1fSNathan Tempelman /* If this is a mirror_kvm release the enc_context_owner and skip sev cleanup */
216054526d1fSNathan Tempelman if (is_mirroring_enc_context(kvm)) {
216117d44a96SPaolo Bonzini struct kvm *owner_kvm = sev->enc_context_owner;
216217d44a96SPaolo Bonzini
216317d44a96SPaolo Bonzini mutex_lock(&owner_kvm->lock);
2164b2125513SPeter Gonda list_del(&sev->mirror_entry);
216517d44a96SPaolo Bonzini mutex_unlock(&owner_kvm->lock);
216617d44a96SPaolo Bonzini kvm_put_kvm(owner_kvm);
216754526d1fSNathan Tempelman return;
216854526d1fSNathan Tempelman }
216954526d1fSNathan Tempelman
2170eaf78265SJoerg Roedel /*
2171eaf78265SJoerg Roedel * Ensure that all guest tagged cache entries are flushed before
2172eaf78265SJoerg Roedel * releasing the pages back to the system for use. CLFLUSH will
2173eaf78265SJoerg Roedel * not do this, so issue a WBINVD.
2174eaf78265SJoerg Roedel */
2175eaf78265SJoerg Roedel wbinvd_on_all_cpus();
2176eaf78265SJoerg Roedel
2177eaf78265SJoerg Roedel /*
2178eaf78265SJoerg Roedel * if userspace was terminated before unregistering the memory regions
2179eaf78265SJoerg Roedel * then lets unpin all the registered memory.
2180eaf78265SJoerg Roedel */
2181eaf78265SJoerg Roedel if (!list_empty(head)) {
2182eaf78265SJoerg Roedel list_for_each_safe(pos, q, head) {
2183eaf78265SJoerg Roedel __unregister_enc_region_locked(kvm,
2184eaf78265SJoerg Roedel list_entry(pos, struct enc_region, list));
21857be74942SDavid Rientjes cond_resched();
2186eaf78265SJoerg Roedel }
2187eaf78265SJoerg Roedel }
2188eaf78265SJoerg Roedel
2189eaf78265SJoerg Roedel sev_unbind_asid(kvm, sev->handle);
21907aef27f0SVipin Sharma sev_asid_free(sev);
2191eaf78265SJoerg Roedel }
2192eaf78265SJoerg Roedel
sev_set_cpu_caps(void)2193d9db0fd6SPaolo Bonzini void __init sev_set_cpu_caps(void)
2194d9db0fd6SPaolo Bonzini {
21958d364a07SSean Christopherson if (!sev_enabled)
2196d9db0fd6SPaolo Bonzini kvm_cpu_cap_clear(X86_FEATURE_SEV);
21978d364a07SSean Christopherson if (!sev_es_enabled)
2198d9db0fd6SPaolo Bonzini kvm_cpu_cap_clear(X86_FEATURE_SEV_ES);
2199d9db0fd6SPaolo Bonzini }
2200d9db0fd6SPaolo Bonzini
sev_hardware_setup(void)2201916391a2STom Lendacky void __init sev_hardware_setup(void)
2202eaf78265SJoerg Roedel {
2203a479c334SSean Christopherson #ifdef CONFIG_KVM_AMD_SEV
22047aef27f0SVipin Sharma unsigned int eax, ebx, ecx, edx, sev_asid_count, sev_es_asid_count;
2205916391a2STom Lendacky bool sev_es_supported = false;
2206916391a2STom Lendacky bool sev_supported = false;
2207916391a2STom Lendacky
220880d0f521SSean Christopherson if (!sev_enabled || !npt_enabled || !nrips)
2209e8126bdaSSean Christopherson goto out;
2210e8126bdaSSean Christopherson
2211c532f290SSean Christopherson /*
2212c532f290SSean Christopherson * SEV must obviously be supported in hardware. Sanity check that the
2213c532f290SSean Christopherson * CPU supports decode assists, which is mandatory for SEV guests to
2214c532f290SSean Christopherson * support instruction emulation.
2215c532f290SSean Christopherson */
2216c532f290SSean Christopherson if (!boot_cpu_has(X86_FEATURE_SEV) ||
2217c532f290SSean Christopherson WARN_ON_ONCE(!boot_cpu_has(X86_FEATURE_DECODEASSISTS)))
2218916391a2STom Lendacky goto out;
2219916391a2STom Lendacky
2220916391a2STom Lendacky /* Retrieve SEV CPUID information */
2221916391a2STom Lendacky cpuid(0x8000001f, &eax, &ebx, &ecx, &edx);
2222916391a2STom Lendacky
22231edc1459STom Lendacky /* Set encryption bit location for SEV-ES guests */
22241edc1459STom Lendacky sev_enc_bit = ebx & 0x3f;
22251edc1459STom Lendacky
2226eaf78265SJoerg Roedel /* Maximum number of encrypted guests supported simultaneously */
2227916391a2STom Lendacky max_sev_asid = ecx;
22288cb756b7SSean Christopherson if (!max_sev_asid)
2229916391a2STom Lendacky goto out;
2230eaf78265SJoerg Roedel
2231eaf78265SJoerg Roedel /* Minimum ASID value that should be used for SEV guest */
2232916391a2STom Lendacky min_sev_asid = edx;
2233d3d1af85SBrijesh Singh sev_me_mask = 1UL << (ebx & 0x3f);
2234eaf78265SJoerg Roedel
2235bb2baeb2SMingwei Zhang /*
2236bb2baeb2SMingwei Zhang * Initialize SEV ASID bitmaps. Allocate space for ASID 0 in the bitmap,
2237bb2baeb2SMingwei Zhang * even though it's never used, so that the bitmap is indexed by the
2238bb2baeb2SMingwei Zhang * actual ASID.
2239bb2baeb2SMingwei Zhang */
2240bb2baeb2SMingwei Zhang nr_asids = max_sev_asid + 1;
2241bb2baeb2SMingwei Zhang sev_asid_bitmap = bitmap_zalloc(nr_asids, GFP_KERNEL);
2242eaf78265SJoerg Roedel if (!sev_asid_bitmap)
2243916391a2STom Lendacky goto out;
2244eaf78265SJoerg Roedel
2245bb2baeb2SMingwei Zhang sev_reclaim_asid_bitmap = bitmap_zalloc(nr_asids, GFP_KERNEL);
2246f31b88b3SSean Christopherson if (!sev_reclaim_asid_bitmap) {
2247f31b88b3SSean Christopherson bitmap_free(sev_asid_bitmap);
2248f31b88b3SSean Christopherson sev_asid_bitmap = NULL;
2249916391a2STom Lendacky goto out;
2250f31b88b3SSean Christopherson }
2251eaf78265SJoerg Roedel
2252ab7a6fe9SAshish Kalra if (min_sev_asid <= max_sev_asid) {
22537aef27f0SVipin Sharma sev_asid_count = max_sev_asid - min_sev_asid + 1;
2254106ed2caSSean Christopherson WARN_ON_ONCE(misc_cg_set_capacity(MISC_CG_RES_SEV, sev_asid_count));
2255ab7a6fe9SAshish Kalra }
2256916391a2STom Lendacky sev_supported = true;
2257eaf78265SJoerg Roedel
2258916391a2STom Lendacky /* SEV-ES support requested? */
22598d364a07SSean Christopherson if (!sev_es_enabled)
2260916391a2STom Lendacky goto out;
2261916391a2STom Lendacky
22620c29397aSSean Christopherson /*
22630c29397aSSean Christopherson * SEV-ES requires MMIO caching as KVM doesn't have access to the guest
22640c29397aSSean Christopherson * instruction stream, i.e. can't emulate in response to a #NPF and
22650c29397aSSean Christopherson * instead relies on #NPF(RSVD) being reflected into the guest as #VC
22660c29397aSSean Christopherson * (the guest can then do a #VMGEXIT to request MMIO emulation).
22670c29397aSSean Christopherson */
22680c29397aSSean Christopherson if (!enable_mmio_caching)
22690c29397aSSean Christopherson goto out;
22700c29397aSSean Christopherson
2271916391a2STom Lendacky /* Does the CPU support SEV-ES? */
2272916391a2STom Lendacky if (!boot_cpu_has(X86_FEATURE_SEV_ES))
2273916391a2STom Lendacky goto out;
2274916391a2STom Lendacky
22752128bae4SRavi Bangoria if (!lbrv) {
22762128bae4SRavi Bangoria WARN_ONCE(!boot_cpu_has(X86_FEATURE_LBRV),
22772128bae4SRavi Bangoria "LBRV must be present for SEV-ES support");
22782128bae4SRavi Bangoria goto out;
22792128bae4SRavi Bangoria }
22802128bae4SRavi Bangoria
2281916391a2STom Lendacky /* Has the system been allocated ASIDs for SEV-ES? */
2282916391a2STom Lendacky if (min_sev_asid == 1)
2283916391a2STom Lendacky goto out;
2284916391a2STom Lendacky
22857aef27f0SVipin Sharma sev_es_asid_count = min_sev_asid - 1;
2286106ed2caSSean Christopherson WARN_ON_ONCE(misc_cg_set_capacity(MISC_CG_RES_SEV_ES, sev_es_asid_count));
2287916391a2STom Lendacky sev_es_supported = true;
2288916391a2STom Lendacky
2289916391a2STom Lendacky out:
22906d1bc975SAlexander Mikhalitsyn if (boot_cpu_has(X86_FEATURE_SEV))
22916d1bc975SAlexander Mikhalitsyn pr_info("SEV %s (ASIDs %u - %u)\n",
2292ab7a6fe9SAshish Kalra sev_supported ? min_sev_asid <= max_sev_asid ? "enabled" :
2293ab7a6fe9SAshish Kalra "unusable" :
2294ab7a6fe9SAshish Kalra "disabled",
22956d1bc975SAlexander Mikhalitsyn min_sev_asid, max_sev_asid);
22966d1bc975SAlexander Mikhalitsyn if (boot_cpu_has(X86_FEATURE_SEV_ES))
22976d1bc975SAlexander Mikhalitsyn pr_info("SEV-ES %s (ASIDs %u - %u)\n",
22986d1bc975SAlexander Mikhalitsyn sev_es_supported ? "enabled" : "disabled",
22996d1bc975SAlexander Mikhalitsyn min_sev_asid > 1 ? 1 : 0, min_sev_asid - 1);
23006d1bc975SAlexander Mikhalitsyn
23018d364a07SSean Christopherson sev_enabled = sev_supported;
23028d364a07SSean Christopherson sev_es_enabled = sev_es_supported;
2303d1f85fbeSAlexey Kardashevskiy if (!sev_es_enabled || !cpu_feature_enabled(X86_FEATURE_DEBUG_SWAP) ||
2304d1f85fbeSAlexey Kardashevskiy !cpu_feature_enabled(X86_FEATURE_NO_NESTED_DATA_BP))
2305d1f85fbeSAlexey Kardashevskiy sev_es_debug_swap_enabled = false;
2306a479c334SSean Christopherson #endif
2307eaf78265SJoerg Roedel }
2308eaf78265SJoerg Roedel
sev_hardware_unsetup(void)230923e5092bSSean Christopherson void sev_hardware_unsetup(void)
2310eaf78265SJoerg Roedel {
2311a5c1c5aaSSean Christopherson if (!sev_enabled)
23129ef1530cSPaolo Bonzini return;
23139ef1530cSPaolo Bonzini
2314469bb32bSSean Christopherson /* No need to take sev_bitmap_lock, all VMs have been destroyed. */
2315bb2baeb2SMingwei Zhang sev_flush_asids(1, max_sev_asid);
2316469bb32bSSean Christopherson
2317eaf78265SJoerg Roedel bitmap_free(sev_asid_bitmap);
2318eaf78265SJoerg Roedel bitmap_free(sev_reclaim_asid_bitmap);
2319469bb32bSSean Christopherson
23207aef27f0SVipin Sharma misc_cg_set_capacity(MISC_CG_RES_SEV, 0);
23217aef27f0SVipin Sharma misc_cg_set_capacity(MISC_CG_RES_SEV_ES, 0);
2322eaf78265SJoerg Roedel }
2323eaf78265SJoerg Roedel
sev_cpu_init(struct svm_cpu_data * sd)2324b95c221cSSean Christopherson int sev_cpu_init(struct svm_cpu_data *sd)
2325b95c221cSSean Christopherson {
2326a5c1c5aaSSean Christopherson if (!sev_enabled)
2327b95c221cSSean Christopherson return 0;
2328b95c221cSSean Christopherson
2329bb2baeb2SMingwei Zhang sd->sev_vmcbs = kcalloc(nr_asids, sizeof(void *), GFP_KERNEL);
2330b95c221cSSean Christopherson if (!sd->sev_vmcbs)
2331b95c221cSSean Christopherson return -ENOMEM;
2332b95c221cSSean Christopherson
2333b95c221cSSean Christopherson return 0;
2334eaf78265SJoerg Roedel }
2335eaf78265SJoerg Roedel
2336add5e2f0STom Lendacky /*
2337add5e2f0STom Lendacky * Pages used by hardware to hold guest encrypted state must be flushed before
2338add5e2f0STom Lendacky * returning them to the system.
2339add5e2f0STom Lendacky */
sev_flush_encrypted_page(struct kvm_vcpu * vcpu,void * va)23404bbef7e8SSean Christopherson static void sev_flush_encrypted_page(struct kvm_vcpu *vcpu, void *va)
2341add5e2f0STom Lendacky {
234279b79ea2SSean Christopherson unsigned int asid = sev_get_asid(vcpu->kvm);
23434bbef7e8SSean Christopherson
23444bbef7e8SSean Christopherson /*
23454bbef7e8SSean Christopherson * Note! The address must be a kernel address, as regular page walk
23464bbef7e8SSean Christopherson * checks are performed by VM_PAGE_FLUSH, i.e. operating on a user
23474bbef7e8SSean Christopherson * address is non-deterministic and unsafe. This function deliberately
23484bbef7e8SSean Christopherson * takes a pointer to deter passing in a user address.
23494bbef7e8SSean Christopherson */
23504bbef7e8SSean Christopherson unsigned long addr = (unsigned long)va;
23514bbef7e8SSean Christopherson
2352add5e2f0STom Lendacky /*
2353d45829b3SMingwei Zhang * If CPU enforced cache coherency for encrypted mappings of the
2354d45829b3SMingwei Zhang * same physical page is supported, use CLFLUSHOPT instead. NOTE: cache
2355d45829b3SMingwei Zhang * flush is still needed in order to work properly with DMA devices.
2356add5e2f0STom Lendacky */
2357d45829b3SMingwei Zhang if (boot_cpu_has(X86_FEATURE_SME_COHERENT)) {
2358d45829b3SMingwei Zhang clflush_cache_range(va, PAGE_SIZE);
2359add5e2f0STom Lendacky return;
2360d45829b3SMingwei Zhang }
2361add5e2f0STom Lendacky
2362add5e2f0STom Lendacky /*
23634bbef7e8SSean Christopherson * VM Page Flush takes a host virtual address and a guest ASID. Fall
23644bbef7e8SSean Christopherson * back to WBINVD if this faults so as not to make any problems worse
23654bbef7e8SSean Christopherson * by leaving stale encrypted data in the cache.
2366add5e2f0STom Lendacky */
23674bbef7e8SSean Christopherson if (WARN_ON_ONCE(wrmsrl_safe(MSR_AMD64_VM_PAGE_FLUSH, addr | asid)))
23684bbef7e8SSean Christopherson goto do_wbinvd;
2369add5e2f0STom Lendacky
2370add5e2f0STom Lendacky return;
2371add5e2f0STom Lendacky
23724bbef7e8SSean Christopherson do_wbinvd:
2373add5e2f0STom Lendacky wbinvd_on_all_cpus();
2374add5e2f0STom Lendacky }
2375add5e2f0STom Lendacky
sev_guest_memory_reclaimed(struct kvm * kvm)2376683412ccSMingwei Zhang void sev_guest_memory_reclaimed(struct kvm *kvm)
2377683412ccSMingwei Zhang {
2378683412ccSMingwei Zhang if (!sev_guest(kvm))
2379683412ccSMingwei Zhang return;
2380683412ccSMingwei Zhang
2381683412ccSMingwei Zhang wbinvd_on_all_cpus();
2382683412ccSMingwei Zhang }
2383683412ccSMingwei Zhang
sev_free_vcpu(struct kvm_vcpu * vcpu)2384add5e2f0STom Lendacky void sev_free_vcpu(struct kvm_vcpu *vcpu)
2385add5e2f0STom Lendacky {
2386add5e2f0STom Lendacky struct vcpu_svm *svm;
2387add5e2f0STom Lendacky
2388add5e2f0STom Lendacky if (!sev_es_guest(vcpu->kvm))
2389add5e2f0STom Lendacky return;
2390add5e2f0STom Lendacky
2391add5e2f0STom Lendacky svm = to_svm(vcpu);
2392add5e2f0STom Lendacky
2393add5e2f0STom Lendacky if (vcpu->arch.guest_state_protected)
23944bbef7e8SSean Christopherson sev_flush_encrypted_page(vcpu, svm->sev_es.vmsa);
23954bbef7e8SSean Christopherson
2396b67a4cc3SPeter Gonda __free_page(virt_to_page(svm->sev_es.vmsa));
23978f423a80STom Lendacky
2398b67a4cc3SPeter Gonda if (svm->sev_es.ghcb_sa_free)
2399a655276aSSean Christopherson kvfree(svm->sev_es.ghcb_sa);
2400add5e2f0STom Lendacky }
2401add5e2f0STom Lendacky
dump_ghcb(struct vcpu_svm * svm)2402291bd20dSTom Lendacky static void dump_ghcb(struct vcpu_svm *svm)
2403291bd20dSTom Lendacky {
2404b67a4cc3SPeter Gonda struct ghcb *ghcb = svm->sev_es.ghcb;
2405291bd20dSTom Lendacky unsigned int nbits;
2406291bd20dSTom Lendacky
2407291bd20dSTom Lendacky /* Re-use the dump_invalid_vmcb module parameter */
2408291bd20dSTom Lendacky if (!dump_invalid_vmcb) {
2409291bd20dSTom Lendacky pr_warn_ratelimited("set kvm_amd.dump_invalid_vmcb=1 to dump internal KVM state.\n");
2410291bd20dSTom Lendacky return;
2411291bd20dSTom Lendacky }
2412291bd20dSTom Lendacky
2413291bd20dSTom Lendacky nbits = sizeof(ghcb->save.valid_bitmap) * 8;
2414291bd20dSTom Lendacky
2415291bd20dSTom Lendacky pr_err("GHCB (GPA=%016llx):\n", svm->vmcb->control.ghcb_gpa);
2416291bd20dSTom Lendacky pr_err("%-20s%016llx is_valid: %u\n", "sw_exit_code",
2417291bd20dSTom Lendacky ghcb->save.sw_exit_code, ghcb_sw_exit_code_is_valid(ghcb));
2418291bd20dSTom Lendacky pr_err("%-20s%016llx is_valid: %u\n", "sw_exit_info_1",
2419291bd20dSTom Lendacky ghcb->save.sw_exit_info_1, ghcb_sw_exit_info_1_is_valid(ghcb));
2420291bd20dSTom Lendacky pr_err("%-20s%016llx is_valid: %u\n", "sw_exit_info_2",
2421291bd20dSTom Lendacky ghcb->save.sw_exit_info_2, ghcb_sw_exit_info_2_is_valid(ghcb));
2422291bd20dSTom Lendacky pr_err("%-20s%016llx is_valid: %u\n", "sw_scratch",
2423291bd20dSTom Lendacky ghcb->save.sw_scratch, ghcb_sw_scratch_is_valid(ghcb));
2424291bd20dSTom Lendacky pr_err("%-20s%*pb\n", "valid_bitmap", nbits, ghcb->save.valid_bitmap);
2425291bd20dSTom Lendacky }
2426291bd20dSTom Lendacky
sev_es_sync_to_ghcb(struct vcpu_svm * svm)2427291bd20dSTom Lendacky static void sev_es_sync_to_ghcb(struct vcpu_svm *svm)
2428291bd20dSTom Lendacky {
2429291bd20dSTom Lendacky struct kvm_vcpu *vcpu = &svm->vcpu;
2430b67a4cc3SPeter Gonda struct ghcb *ghcb = svm->sev_es.ghcb;
2431291bd20dSTom Lendacky
2432291bd20dSTom Lendacky /*
2433291bd20dSTom Lendacky * The GHCB protocol so far allows for the following data
2434291bd20dSTom Lendacky * to be returned:
2435291bd20dSTom Lendacky * GPRs RAX, RBX, RCX, RDX
2436291bd20dSTom Lendacky *
243725009140SSean Christopherson * Copy their values, even if they may not have been written during the
243825009140SSean Christopherson * VM-Exit. It's the guest's responsibility to not consume random data.
2439291bd20dSTom Lendacky */
2440291bd20dSTom Lendacky ghcb_set_rax(ghcb, vcpu->arch.regs[VCPU_REGS_RAX]);
2441291bd20dSTom Lendacky ghcb_set_rbx(ghcb, vcpu->arch.regs[VCPU_REGS_RBX]);
2442291bd20dSTom Lendacky ghcb_set_rcx(ghcb, vcpu->arch.regs[VCPU_REGS_RCX]);
2443291bd20dSTom Lendacky ghcb_set_rdx(ghcb, vcpu->arch.regs[VCPU_REGS_RDX]);
2444291bd20dSTom Lendacky }
2445291bd20dSTom Lendacky
sev_es_sync_from_ghcb(struct vcpu_svm * svm)2446291bd20dSTom Lendacky static void sev_es_sync_from_ghcb(struct vcpu_svm *svm)
2447291bd20dSTom Lendacky {
2448291bd20dSTom Lendacky struct vmcb_control_area *control = &svm->vmcb->control;
2449291bd20dSTom Lendacky struct kvm_vcpu *vcpu = &svm->vcpu;
2450b67a4cc3SPeter Gonda struct ghcb *ghcb = svm->sev_es.ghcb;
2451291bd20dSTom Lendacky u64 exit_code;
2452291bd20dSTom Lendacky
2453291bd20dSTom Lendacky /*
2454291bd20dSTom Lendacky * The GHCB protocol so far allows for the following data
2455291bd20dSTom Lendacky * to be supplied:
2456291bd20dSTom Lendacky * GPRs RAX, RBX, RCX, RDX
2457291bd20dSTom Lendacky * XCR0
2458291bd20dSTom Lendacky * CPL
2459291bd20dSTom Lendacky *
2460291bd20dSTom Lendacky * VMMCALL allows the guest to provide extra registers. KVM also
2461291bd20dSTom Lendacky * expects RSI for hypercalls, so include that, too.
2462291bd20dSTom Lendacky *
2463291bd20dSTom Lendacky * Copy their values to the appropriate location if supplied.
2464291bd20dSTom Lendacky */
2465291bd20dSTom Lendacky memset(vcpu->arch.regs, 0, sizeof(vcpu->arch.regs));
2466291bd20dSTom Lendacky
24674e15a0ddSPaolo Bonzini BUILD_BUG_ON(sizeof(svm->sev_es.valid_bitmap) != sizeof(ghcb->save.valid_bitmap));
24684e15a0ddSPaolo Bonzini memcpy(&svm->sev_es.valid_bitmap, &ghcb->save.valid_bitmap, sizeof(ghcb->save.valid_bitmap));
2469291bd20dSTom Lendacky
24704e15a0ddSPaolo Bonzini vcpu->arch.regs[VCPU_REGS_RAX] = kvm_ghcb_get_rax_if_valid(svm, ghcb);
24714e15a0ddSPaolo Bonzini vcpu->arch.regs[VCPU_REGS_RBX] = kvm_ghcb_get_rbx_if_valid(svm, ghcb);
24724e15a0ddSPaolo Bonzini vcpu->arch.regs[VCPU_REGS_RCX] = kvm_ghcb_get_rcx_if_valid(svm, ghcb);
24734e15a0ddSPaolo Bonzini vcpu->arch.regs[VCPU_REGS_RDX] = kvm_ghcb_get_rdx_if_valid(svm, ghcb);
24744e15a0ddSPaolo Bonzini vcpu->arch.regs[VCPU_REGS_RSI] = kvm_ghcb_get_rsi_if_valid(svm, ghcb);
2475291bd20dSTom Lendacky
24764e15a0ddSPaolo Bonzini svm->vmcb->save.cpl = kvm_ghcb_get_cpl_if_valid(svm, ghcb);
24774e15a0ddSPaolo Bonzini
24784e15a0ddSPaolo Bonzini if (kvm_ghcb_xcr0_is_valid(svm)) {
2479291bd20dSTom Lendacky vcpu->arch.xcr0 = ghcb_get_xcr0(ghcb);
2480291bd20dSTom Lendacky kvm_update_cpuid_runtime(vcpu);
2481291bd20dSTom Lendacky }
2482291bd20dSTom Lendacky
2483291bd20dSTom Lendacky /* Copy the GHCB exit information into the VMCB fields */
2484291bd20dSTom Lendacky exit_code = ghcb_get_sw_exit_code(ghcb);
2485291bd20dSTom Lendacky control->exit_code = lower_32_bits(exit_code);
2486291bd20dSTom Lendacky control->exit_code_hi = upper_32_bits(exit_code);
2487291bd20dSTom Lendacky control->exit_info_1 = ghcb_get_sw_exit_info_1(ghcb);
2488291bd20dSTom Lendacky control->exit_info_2 = ghcb_get_sw_exit_info_2(ghcb);
24894e15a0ddSPaolo Bonzini svm->sev_es.sw_scratch = kvm_ghcb_get_sw_scratch_if_valid(svm, ghcb);
2490291bd20dSTom Lendacky
2491291bd20dSTom Lendacky /* Clear the valid entries fields */
2492291bd20dSTom Lendacky memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap));
2493291bd20dSTom Lendacky }
2494291bd20dSTom Lendacky
kvm_ghcb_get_sw_exit_code(struct vmcb_control_area * control)24957588dbceSPaolo Bonzini static u64 kvm_ghcb_get_sw_exit_code(struct vmcb_control_area *control)
24967588dbceSPaolo Bonzini {
24977588dbceSPaolo Bonzini return (((u64)control->exit_code_hi) << 32) | control->exit_code;
24987588dbceSPaolo Bonzini }
24997588dbceSPaolo Bonzini
sev_es_validate_vmgexit(struct vcpu_svm * svm)2500aa9f5841SSean Christopherson static int sev_es_validate_vmgexit(struct vcpu_svm *svm)
2501291bd20dSTom Lendacky {
25027588dbceSPaolo Bonzini struct vmcb_control_area *control = &svm->vmcb->control;
25037588dbceSPaolo Bonzini struct kvm_vcpu *vcpu = &svm->vcpu;
2504ad5b3532STom Lendacky u64 exit_code;
2505ad5b3532STom Lendacky u64 reason;
2506291bd20dSTom Lendacky
2507291bd20dSTom Lendacky /*
2508ad5b3532STom Lendacky * Retrieve the exit code now even though it may not be marked valid
2509291bd20dSTom Lendacky * as it could help with debugging.
2510291bd20dSTom Lendacky */
25117588dbceSPaolo Bonzini exit_code = kvm_ghcb_get_sw_exit_code(control);
2512291bd20dSTom Lendacky
2513ad5b3532STom Lendacky /* Only GHCB Usage code 0 is supported */
251463dbc67cSPaolo Bonzini if (svm->sev_es.ghcb->ghcb_usage) {
2515ad5b3532STom Lendacky reason = GHCB_ERR_INVALID_USAGE;
2516ad5b3532STom Lendacky goto vmgexit_err;
2517ad5b3532STom Lendacky }
2518ad5b3532STom Lendacky
2519ad5b3532STom Lendacky reason = GHCB_ERR_MISSING_INPUT;
2520ad5b3532STom Lendacky
25214e15a0ddSPaolo Bonzini if (!kvm_ghcb_sw_exit_code_is_valid(svm) ||
25224e15a0ddSPaolo Bonzini !kvm_ghcb_sw_exit_info_1_is_valid(svm) ||
25234e15a0ddSPaolo Bonzini !kvm_ghcb_sw_exit_info_2_is_valid(svm))
2524291bd20dSTom Lendacky goto vmgexit_err;
2525291bd20dSTom Lendacky
25267588dbceSPaolo Bonzini switch (exit_code) {
2527291bd20dSTom Lendacky case SVM_EXIT_READ_DR7:
2528291bd20dSTom Lendacky break;
2529291bd20dSTom Lendacky case SVM_EXIT_WRITE_DR7:
25304e15a0ddSPaolo Bonzini if (!kvm_ghcb_rax_is_valid(svm))
2531291bd20dSTom Lendacky goto vmgexit_err;
2532291bd20dSTom Lendacky break;
2533291bd20dSTom Lendacky case SVM_EXIT_RDTSC:
2534291bd20dSTom Lendacky break;
2535291bd20dSTom Lendacky case SVM_EXIT_RDPMC:
25364e15a0ddSPaolo Bonzini if (!kvm_ghcb_rcx_is_valid(svm))
2537291bd20dSTom Lendacky goto vmgexit_err;
2538291bd20dSTom Lendacky break;
2539291bd20dSTom Lendacky case SVM_EXIT_CPUID:
25404e15a0ddSPaolo Bonzini if (!kvm_ghcb_rax_is_valid(svm) ||
25414e15a0ddSPaolo Bonzini !kvm_ghcb_rcx_is_valid(svm))
2542291bd20dSTom Lendacky goto vmgexit_err;
25437588dbceSPaolo Bonzini if (vcpu->arch.regs[VCPU_REGS_RAX] == 0xd)
25444e15a0ddSPaolo Bonzini if (!kvm_ghcb_xcr0_is_valid(svm))
2545291bd20dSTom Lendacky goto vmgexit_err;
2546291bd20dSTom Lendacky break;
2547291bd20dSTom Lendacky case SVM_EXIT_INVD:
2548291bd20dSTom Lendacky break;
2549291bd20dSTom Lendacky case SVM_EXIT_IOIO:
25507588dbceSPaolo Bonzini if (control->exit_info_1 & SVM_IOIO_STR_MASK) {
25514e15a0ddSPaolo Bonzini if (!kvm_ghcb_sw_scratch_is_valid(svm))
25527ed9abfeSTom Lendacky goto vmgexit_err;
25537ed9abfeSTom Lendacky } else {
25547588dbceSPaolo Bonzini if (!(control->exit_info_1 & SVM_IOIO_TYPE_MASK))
25554e15a0ddSPaolo Bonzini if (!kvm_ghcb_rax_is_valid(svm))
2556291bd20dSTom Lendacky goto vmgexit_err;
25577ed9abfeSTom Lendacky }
2558291bd20dSTom Lendacky break;
2559291bd20dSTom Lendacky case SVM_EXIT_MSR:
25604e15a0ddSPaolo Bonzini if (!kvm_ghcb_rcx_is_valid(svm))
2561291bd20dSTom Lendacky goto vmgexit_err;
25627588dbceSPaolo Bonzini if (control->exit_info_1) {
25634e15a0ddSPaolo Bonzini if (!kvm_ghcb_rax_is_valid(svm) ||
25644e15a0ddSPaolo Bonzini !kvm_ghcb_rdx_is_valid(svm))
2565291bd20dSTom Lendacky goto vmgexit_err;
2566291bd20dSTom Lendacky }
2567291bd20dSTom Lendacky break;
2568291bd20dSTom Lendacky case SVM_EXIT_VMMCALL:
25694e15a0ddSPaolo Bonzini if (!kvm_ghcb_rax_is_valid(svm) ||
25704e15a0ddSPaolo Bonzini !kvm_ghcb_cpl_is_valid(svm))
2571291bd20dSTom Lendacky goto vmgexit_err;
2572291bd20dSTom Lendacky break;
2573291bd20dSTom Lendacky case SVM_EXIT_RDTSCP:
2574291bd20dSTom Lendacky break;
2575291bd20dSTom Lendacky case SVM_EXIT_WBINVD:
2576291bd20dSTom Lendacky break;
2577291bd20dSTom Lendacky case SVM_EXIT_MONITOR:
25784e15a0ddSPaolo Bonzini if (!kvm_ghcb_rax_is_valid(svm) ||
25794e15a0ddSPaolo Bonzini !kvm_ghcb_rcx_is_valid(svm) ||
25804e15a0ddSPaolo Bonzini !kvm_ghcb_rdx_is_valid(svm))
2581291bd20dSTom Lendacky goto vmgexit_err;
2582291bd20dSTom Lendacky break;
2583291bd20dSTom Lendacky case SVM_EXIT_MWAIT:
25844e15a0ddSPaolo Bonzini if (!kvm_ghcb_rax_is_valid(svm) ||
25854e15a0ddSPaolo Bonzini !kvm_ghcb_rcx_is_valid(svm))
2586291bd20dSTom Lendacky goto vmgexit_err;
2587291bd20dSTom Lendacky break;
25888f423a80STom Lendacky case SVM_VMGEXIT_MMIO_READ:
25898f423a80STom Lendacky case SVM_VMGEXIT_MMIO_WRITE:
25904e15a0ddSPaolo Bonzini if (!kvm_ghcb_sw_scratch_is_valid(svm))
25918f423a80STom Lendacky goto vmgexit_err;
25928f423a80STom Lendacky break;
25934444dfe4STom Lendacky case SVM_VMGEXIT_NMI_COMPLETE:
2594647daca2STom Lendacky case SVM_VMGEXIT_AP_HLT_LOOP:
25958640ca58STom Lendacky case SVM_VMGEXIT_AP_JUMP_TABLE:
2596291bd20dSTom Lendacky case SVM_VMGEXIT_UNSUPPORTED_EVENT:
2597291bd20dSTom Lendacky break;
2598291bd20dSTom Lendacky default:
2599ad5b3532STom Lendacky reason = GHCB_ERR_INVALID_EVENT;
2600291bd20dSTom Lendacky goto vmgexit_err;
2601291bd20dSTom Lendacky }
2602291bd20dSTom Lendacky
2603aa9f5841SSean Christopherson return 0;
2604291bd20dSTom Lendacky
2605291bd20dSTom Lendacky vmgexit_err:
2606ad5b3532STom Lendacky if (reason == GHCB_ERR_INVALID_USAGE) {
2607291bd20dSTom Lendacky vcpu_unimpl(vcpu, "vmgexit: ghcb usage %#x is not valid\n",
260863dbc67cSPaolo Bonzini svm->sev_es.ghcb->ghcb_usage);
2609ad5b3532STom Lendacky } else if (reason == GHCB_ERR_INVALID_EVENT) {
2610ad5b3532STom Lendacky vcpu_unimpl(vcpu, "vmgexit: exit code %#llx is not valid\n",
2611ad5b3532STom Lendacky exit_code);
2612291bd20dSTom Lendacky } else {
2613ad5b3532STom Lendacky vcpu_unimpl(vcpu, "vmgexit: exit code %#llx input is not valid\n",
2614291bd20dSTom Lendacky exit_code);
2615291bd20dSTom Lendacky dump_ghcb(svm);
2616291bd20dSTom Lendacky }
2617291bd20dSTom Lendacky
261863dbc67cSPaolo Bonzini ghcb_set_sw_exit_info_1(svm->sev_es.ghcb, 2);
261963dbc67cSPaolo Bonzini ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, reason);
2620ad5b3532STom Lendacky
2621aa9f5841SSean Christopherson /* Resume the guest to "return" the error code. */
2622aa9f5841SSean Christopherson return 1;
2623291bd20dSTom Lendacky }
2624291bd20dSTom Lendacky
sev_es_unmap_ghcb(struct vcpu_svm * svm)2625ce7ea0cfSTom Lendacky void sev_es_unmap_ghcb(struct vcpu_svm *svm)
2626291bd20dSTom Lendacky {
2627b67a4cc3SPeter Gonda if (!svm->sev_es.ghcb)
2628291bd20dSTom Lendacky return;
2629291bd20dSTom Lendacky
2630b67a4cc3SPeter Gonda if (svm->sev_es.ghcb_sa_free) {
26318f423a80STom Lendacky /*
26328f423a80STom Lendacky * The scratch area lives outside the GHCB, so there is a
26338f423a80STom Lendacky * buffer that, depending on the operation performed, may
26348f423a80STom Lendacky * need to be synced, then freed.
26358f423a80STom Lendacky */
2636b67a4cc3SPeter Gonda if (svm->sev_es.ghcb_sa_sync) {
26378f423a80STom Lendacky kvm_write_guest(svm->vcpu.kvm,
26384e15a0ddSPaolo Bonzini svm->sev_es.sw_scratch,
2639b67a4cc3SPeter Gonda svm->sev_es.ghcb_sa,
2640b67a4cc3SPeter Gonda svm->sev_es.ghcb_sa_len);
2641b67a4cc3SPeter Gonda svm->sev_es.ghcb_sa_sync = false;
26428f423a80STom Lendacky }
26438f423a80STom Lendacky
2644a655276aSSean Christopherson kvfree(svm->sev_es.ghcb_sa);
2645b67a4cc3SPeter Gonda svm->sev_es.ghcb_sa = NULL;
2646b67a4cc3SPeter Gonda svm->sev_es.ghcb_sa_free = false;
26478f423a80STom Lendacky }
26488f423a80STom Lendacky
2649b67a4cc3SPeter Gonda trace_kvm_vmgexit_exit(svm->vcpu.vcpu_id, svm->sev_es.ghcb);
2650d523ab6bSTom Lendacky
2651291bd20dSTom Lendacky sev_es_sync_to_ghcb(svm);
2652291bd20dSTom Lendacky
2653b67a4cc3SPeter Gonda kvm_vcpu_unmap(&svm->vcpu, &svm->sev_es.ghcb_map, true);
2654b67a4cc3SPeter Gonda svm->sev_es.ghcb = NULL;
2655291bd20dSTom Lendacky }
2656291bd20dSTom Lendacky
pre_sev_run(struct vcpu_svm * svm,int cpu)2657eaf78265SJoerg Roedel void pre_sev_run(struct vcpu_svm *svm, int cpu)
2658eaf78265SJoerg Roedel {
265973412dfeSPaolo Bonzini struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, cpu);
266079b79ea2SSean Christopherson unsigned int asid = sev_get_asid(svm->vcpu.kvm);
2661eaf78265SJoerg Roedel
2662eaf78265SJoerg Roedel /* Assign the asid allocated with this SEV guest */
2663dee734a7SPaolo Bonzini svm->asid = asid;
2664eaf78265SJoerg Roedel
2665eaf78265SJoerg Roedel /*
2666eaf78265SJoerg Roedel * Flush guest TLB:
2667eaf78265SJoerg Roedel *
2668eaf78265SJoerg Roedel * 1) when different VMCB for the same ASID is to be run on the same host CPU.
2669eaf78265SJoerg Roedel * 2) or this VMCB was executed on different host CPU in previous VMRUNs.
2670eaf78265SJoerg Roedel */
2671eaf78265SJoerg Roedel if (sd->sev_vmcbs[asid] == svm->vmcb &&
26728a14fe4fSJim Mattson svm->vcpu.arch.last_vmentry_cpu == cpu)
2673eaf78265SJoerg Roedel return;
2674eaf78265SJoerg Roedel
2675eaf78265SJoerg Roedel sd->sev_vmcbs[asid] = svm->vmcb;
2676eaf78265SJoerg Roedel svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID;
267706e7852cSJoerg Roedel vmcb_mark_dirty(svm->vmcb, VMCB_ASID);
2678eaf78265SJoerg Roedel }
2679291bd20dSTom Lendacky
26808f423a80STom Lendacky #define GHCB_SCRATCH_AREA_LIMIT (16ULL * PAGE_SIZE)
setup_vmgexit_scratch(struct vcpu_svm * svm,bool sync,u64 len)2681aa9f5841SSean Christopherson static int setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len)
26828f423a80STom Lendacky {
26838f423a80STom Lendacky struct vmcb_control_area *control = &svm->vmcb->control;
26848f423a80STom Lendacky u64 ghcb_scratch_beg, ghcb_scratch_end;
26858f423a80STom Lendacky u64 scratch_gpa_beg, scratch_gpa_end;
26868f423a80STom Lendacky void *scratch_va;
26878f423a80STom Lendacky
26884e15a0ddSPaolo Bonzini scratch_gpa_beg = svm->sev_es.sw_scratch;
26898f423a80STom Lendacky if (!scratch_gpa_beg) {
26908f423a80STom Lendacky pr_err("vmgexit: scratch gpa not provided\n");
2691ad5b3532STom Lendacky goto e_scratch;
26928f423a80STom Lendacky }
26938f423a80STom Lendacky
26948f423a80STom Lendacky scratch_gpa_end = scratch_gpa_beg + len;
26958f423a80STom Lendacky if (scratch_gpa_end < scratch_gpa_beg) {
26968f423a80STom Lendacky pr_err("vmgexit: scratch length (%#llx) not valid for scratch address (%#llx)\n",
26978f423a80STom Lendacky len, scratch_gpa_beg);
2698ad5b3532STom Lendacky goto e_scratch;
26998f423a80STom Lendacky }
27008f423a80STom Lendacky
27018f423a80STom Lendacky if ((scratch_gpa_beg & PAGE_MASK) == control->ghcb_gpa) {
27028f423a80STom Lendacky /* Scratch area begins within GHCB */
27038f423a80STom Lendacky ghcb_scratch_beg = control->ghcb_gpa +
27048f423a80STom Lendacky offsetof(struct ghcb, shared_buffer);
27058f423a80STom Lendacky ghcb_scratch_end = control->ghcb_gpa +
2706d08b4858SCarlos Bilbao offsetof(struct ghcb, reserved_0xff0);
27078f423a80STom Lendacky
27088f423a80STom Lendacky /*
27098f423a80STom Lendacky * If the scratch area begins within the GHCB, it must be
27108f423a80STom Lendacky * completely contained in the GHCB shared buffer area.
27118f423a80STom Lendacky */
27128f423a80STom Lendacky if (scratch_gpa_beg < ghcb_scratch_beg ||
27138f423a80STom Lendacky scratch_gpa_end > ghcb_scratch_end) {
27148f423a80STom Lendacky pr_err("vmgexit: scratch area is outside of GHCB shared buffer area (%#llx - %#llx)\n",
27158f423a80STom Lendacky scratch_gpa_beg, scratch_gpa_end);
2716ad5b3532STom Lendacky goto e_scratch;
27178f423a80STom Lendacky }
27188f423a80STom Lendacky
2719b67a4cc3SPeter Gonda scratch_va = (void *)svm->sev_es.ghcb;
27208f423a80STom Lendacky scratch_va += (scratch_gpa_beg - control->ghcb_gpa);
27218f423a80STom Lendacky } else {
27228f423a80STom Lendacky /*
27238f423a80STom Lendacky * The guest memory must be read into a kernel buffer, so
27248f423a80STom Lendacky * limit the size
27258f423a80STom Lendacky */
27268f423a80STom Lendacky if (len > GHCB_SCRATCH_AREA_LIMIT) {
27278f423a80STom Lendacky pr_err("vmgexit: scratch area exceeds KVM limits (%#llx requested, %#llx limit)\n",
27288f423a80STom Lendacky len, GHCB_SCRATCH_AREA_LIMIT);
2729ad5b3532STom Lendacky goto e_scratch;
27308f423a80STom Lendacky }
2731a655276aSSean Christopherson scratch_va = kvzalloc(len, GFP_KERNEL_ACCOUNT);
27328f423a80STom Lendacky if (!scratch_va)
2733aa9f5841SSean Christopherson return -ENOMEM;
27348f423a80STom Lendacky
27358f423a80STom Lendacky if (kvm_read_guest(svm->vcpu.kvm, scratch_gpa_beg, scratch_va, len)) {
27368f423a80STom Lendacky /* Unable to copy scratch area from guest */
27378f423a80STom Lendacky pr_err("vmgexit: kvm_read_guest for scratch area failed\n");
27388f423a80STom Lendacky
2739a655276aSSean Christopherson kvfree(scratch_va);
2740aa9f5841SSean Christopherson return -EFAULT;
27418f423a80STom Lendacky }
27428f423a80STom Lendacky
27438f423a80STom Lendacky /*
27448f423a80STom Lendacky * The scratch area is outside the GHCB. The operation will
27458f423a80STom Lendacky * dictate whether the buffer needs to be synced before running
27468f423a80STom Lendacky * the vCPU next time (i.e. a read was requested so the data
27478f423a80STom Lendacky * must be written back to the guest memory).
27488f423a80STom Lendacky */
2749b67a4cc3SPeter Gonda svm->sev_es.ghcb_sa_sync = sync;
2750b67a4cc3SPeter Gonda svm->sev_es.ghcb_sa_free = true;
27518f423a80STom Lendacky }
27528f423a80STom Lendacky
2753b67a4cc3SPeter Gonda svm->sev_es.ghcb_sa = scratch_va;
2754b67a4cc3SPeter Gonda svm->sev_es.ghcb_sa_len = len;
27558f423a80STom Lendacky
2756aa9f5841SSean Christopherson return 0;
2757ad5b3532STom Lendacky
2758ad5b3532STom Lendacky e_scratch:
275963dbc67cSPaolo Bonzini ghcb_set_sw_exit_info_1(svm->sev_es.ghcb, 2);
276063dbc67cSPaolo Bonzini ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, GHCB_ERR_INVALID_SCRATCH_AREA);
2761ad5b3532STom Lendacky
2762aa9f5841SSean Christopherson return 1;
27638f423a80STom Lendacky }
27648f423a80STom Lendacky
set_ghcb_msr_bits(struct vcpu_svm * svm,u64 value,u64 mask,unsigned int pos)2765d3694667STom Lendacky static void set_ghcb_msr_bits(struct vcpu_svm *svm, u64 value, u64 mask,
2766d3694667STom Lendacky unsigned int pos)
2767d3694667STom Lendacky {
2768d3694667STom Lendacky svm->vmcb->control.ghcb_gpa &= ~(mask << pos);
2769d3694667STom Lendacky svm->vmcb->control.ghcb_gpa |= (value & mask) << pos;
2770d3694667STom Lendacky }
2771d3694667STom Lendacky
get_ghcb_msr_bits(struct vcpu_svm * svm,u64 mask,unsigned int pos)2772d3694667STom Lendacky static u64 get_ghcb_msr_bits(struct vcpu_svm *svm, u64 mask, unsigned int pos)
2773d3694667STom Lendacky {
2774d3694667STom Lendacky return (svm->vmcb->control.ghcb_gpa >> pos) & mask;
2775d3694667STom Lendacky }
2776d3694667STom Lendacky
set_ghcb_msr(struct vcpu_svm * svm,u64 value)27771edc1459STom Lendacky static void set_ghcb_msr(struct vcpu_svm *svm, u64 value)
27781edc1459STom Lendacky {
27791edc1459STom Lendacky svm->vmcb->control.ghcb_gpa = value;
27801edc1459STom Lendacky }
27811edc1459STom Lendacky
sev_handle_vmgexit_msr_protocol(struct vcpu_svm * svm)2782291bd20dSTom Lendacky static int sev_handle_vmgexit_msr_protocol(struct vcpu_svm *svm)
2783291bd20dSTom Lendacky {
27841edc1459STom Lendacky struct vmcb_control_area *control = &svm->vmcb->control;
2785d3694667STom Lendacky struct kvm_vcpu *vcpu = &svm->vcpu;
27861edc1459STom Lendacky u64 ghcb_info;
2787d3694667STom Lendacky int ret = 1;
27881edc1459STom Lendacky
27891edc1459STom Lendacky ghcb_info = control->ghcb_gpa & GHCB_MSR_INFO_MASK;
27901edc1459STom Lendacky
279159e38b58STom Lendacky trace_kvm_vmgexit_msr_protocol_enter(svm->vcpu.vcpu_id,
279259e38b58STom Lendacky control->ghcb_gpa);
279359e38b58STom Lendacky
27941edc1459STom Lendacky switch (ghcb_info) {
27951edc1459STom Lendacky case GHCB_MSR_SEV_INFO_REQ:
27961edc1459STom Lendacky set_ghcb_msr(svm, GHCB_MSR_SEV_INFO(GHCB_VERSION_MAX,
27971edc1459STom Lendacky GHCB_VERSION_MIN,
27981edc1459STom Lendacky sev_enc_bit));
27991edc1459STom Lendacky break;
2800d3694667STom Lendacky case GHCB_MSR_CPUID_REQ: {
2801d3694667STom Lendacky u64 cpuid_fn, cpuid_reg, cpuid_value;
2802d3694667STom Lendacky
2803d3694667STom Lendacky cpuid_fn = get_ghcb_msr_bits(svm,
2804d3694667STom Lendacky GHCB_MSR_CPUID_FUNC_MASK,
2805d3694667STom Lendacky GHCB_MSR_CPUID_FUNC_POS);
2806d3694667STom Lendacky
2807d3694667STom Lendacky /* Initialize the registers needed by the CPUID intercept */
2808d3694667STom Lendacky vcpu->arch.regs[VCPU_REGS_RAX] = cpuid_fn;
2809d3694667STom Lendacky vcpu->arch.regs[VCPU_REGS_RCX] = 0;
2810d3694667STom Lendacky
281163129754SPaolo Bonzini ret = svm_invoke_exit_handler(vcpu, SVM_EXIT_CPUID);
2812d3694667STom Lendacky if (!ret) {
2813ad5b3532STom Lendacky /* Error, keep GHCB MSR value as-is */
2814d3694667STom Lendacky break;
2815291bd20dSTom Lendacky }
2816291bd20dSTom Lendacky
2817d3694667STom Lendacky cpuid_reg = get_ghcb_msr_bits(svm,
2818d3694667STom Lendacky GHCB_MSR_CPUID_REG_MASK,
2819d3694667STom Lendacky GHCB_MSR_CPUID_REG_POS);
2820d3694667STom Lendacky if (cpuid_reg == 0)
2821d3694667STom Lendacky cpuid_value = vcpu->arch.regs[VCPU_REGS_RAX];
2822d3694667STom Lendacky else if (cpuid_reg == 1)
2823d3694667STom Lendacky cpuid_value = vcpu->arch.regs[VCPU_REGS_RBX];
2824d3694667STom Lendacky else if (cpuid_reg == 2)
2825d3694667STom Lendacky cpuid_value = vcpu->arch.regs[VCPU_REGS_RCX];
2826d3694667STom Lendacky else
2827d3694667STom Lendacky cpuid_value = vcpu->arch.regs[VCPU_REGS_RDX];
2828d3694667STom Lendacky
2829d3694667STom Lendacky set_ghcb_msr_bits(svm, cpuid_value,
2830d3694667STom Lendacky GHCB_MSR_CPUID_VALUE_MASK,
2831d3694667STom Lendacky GHCB_MSR_CPUID_VALUE_POS);
2832d3694667STom Lendacky
2833d3694667STom Lendacky set_ghcb_msr_bits(svm, GHCB_MSR_CPUID_RESP,
2834d3694667STom Lendacky GHCB_MSR_INFO_MASK,
2835d3694667STom Lendacky GHCB_MSR_INFO_POS);
2836d3694667STom Lendacky break;
2837d3694667STom Lendacky }
2838e1d71116STom Lendacky case GHCB_MSR_TERM_REQ: {
2839e1d71116STom Lendacky u64 reason_set, reason_code;
2840e1d71116STom Lendacky
2841e1d71116STom Lendacky reason_set = get_ghcb_msr_bits(svm,
2842e1d71116STom Lendacky GHCB_MSR_TERM_REASON_SET_MASK,
2843e1d71116STom Lendacky GHCB_MSR_TERM_REASON_SET_POS);
2844e1d71116STom Lendacky reason_code = get_ghcb_msr_bits(svm,
2845e1d71116STom Lendacky GHCB_MSR_TERM_REASON_MASK,
2846e1d71116STom Lendacky GHCB_MSR_TERM_REASON_POS);
2847e1d71116STom Lendacky pr_info("SEV-ES guest requested termination: %#llx:%#llx\n",
2848e1d71116STom Lendacky reason_set, reason_code);
2849ad5b3532STom Lendacky
2850c24a950eSPeter Gonda vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
285171d7c575SPaolo Bonzini vcpu->run->system_event.type = KVM_SYSTEM_EVENT_SEV_TERM;
2852c24a950eSPeter Gonda vcpu->run->system_event.ndata = 1;
285371d7c575SPaolo Bonzini vcpu->run->system_event.data[0] = control->ghcb_gpa;
2854c24a950eSPeter Gonda
2855c24a950eSPeter Gonda return 0;
2856e1d71116STom Lendacky }
2857d3694667STom Lendacky default:
2858ad5b3532STom Lendacky /* Error, keep GHCB MSR value as-is */
2859ad5b3532STom Lendacky break;
2860d3694667STom Lendacky }
2861d3694667STom Lendacky
286259e38b58STom Lendacky trace_kvm_vmgexit_msr_protocol_exit(svm->vcpu.vcpu_id,
286359e38b58STom Lendacky control->ghcb_gpa, ret);
286459e38b58STom Lendacky
2865d3694667STom Lendacky return ret;
28661edc1459STom Lendacky }
28671edc1459STom Lendacky
sev_handle_vmgexit(struct kvm_vcpu * vcpu)286863129754SPaolo Bonzini int sev_handle_vmgexit(struct kvm_vcpu *vcpu)
2869291bd20dSTom Lendacky {
287063129754SPaolo Bonzini struct vcpu_svm *svm = to_svm(vcpu);
2871291bd20dSTom Lendacky struct vmcb_control_area *control = &svm->vmcb->control;
2872291bd20dSTom Lendacky u64 ghcb_gpa, exit_code;
2873291bd20dSTom Lendacky int ret;
2874291bd20dSTom Lendacky
2875291bd20dSTom Lendacky /* Validate the GHCB */
2876291bd20dSTom Lendacky ghcb_gpa = control->ghcb_gpa;
2877291bd20dSTom Lendacky if (ghcb_gpa & GHCB_MSR_INFO_MASK)
2878291bd20dSTom Lendacky return sev_handle_vmgexit_msr_protocol(svm);
2879291bd20dSTom Lendacky
2880291bd20dSTom Lendacky if (!ghcb_gpa) {
288163129754SPaolo Bonzini vcpu_unimpl(vcpu, "vmgexit: GHCB gpa is not set\n");
2882ad5b3532STom Lendacky
2883ad5b3532STom Lendacky /* Without a GHCB, just return right back to the guest */
2884ad5b3532STom Lendacky return 1;
2885291bd20dSTom Lendacky }
2886291bd20dSTom Lendacky
2887b67a4cc3SPeter Gonda if (kvm_vcpu_map(vcpu, ghcb_gpa >> PAGE_SHIFT, &svm->sev_es.ghcb_map)) {
2888291bd20dSTom Lendacky /* Unable to map GHCB from guest */
288963129754SPaolo Bonzini vcpu_unimpl(vcpu, "vmgexit: error mapping GHCB [%#llx] from guest\n",
2890291bd20dSTom Lendacky ghcb_gpa);
2891ad5b3532STom Lendacky
2892ad5b3532STom Lendacky /* Without a GHCB, just return right back to the guest */
2893ad5b3532STom Lendacky return 1;
2894291bd20dSTom Lendacky }
2895291bd20dSTom Lendacky
2896b67a4cc3SPeter Gonda svm->sev_es.ghcb = svm->sev_es.ghcb_map.hva;
2897291bd20dSTom Lendacky
289863dbc67cSPaolo Bonzini trace_kvm_vmgexit_enter(vcpu->vcpu_id, svm->sev_es.ghcb);
2899d523ab6bSTom Lendacky
29004e15a0ddSPaolo Bonzini sev_es_sync_from_ghcb(svm);
2901aa9f5841SSean Christopherson ret = sev_es_validate_vmgexit(svm);
2902aa9f5841SSean Christopherson if (ret)
2903aa9f5841SSean Christopherson return ret;
2904291bd20dSTom Lendacky
290563dbc67cSPaolo Bonzini ghcb_set_sw_exit_info_1(svm->sev_es.ghcb, 0);
290663dbc67cSPaolo Bonzini ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, 0);
2907291bd20dSTom Lendacky
29087588dbceSPaolo Bonzini exit_code = kvm_ghcb_get_sw_exit_code(control);
2909291bd20dSTom Lendacky switch (exit_code) {
29108f423a80STom Lendacky case SVM_VMGEXIT_MMIO_READ:
2911aa9f5841SSean Christopherson ret = setup_vmgexit_scratch(svm, true, control->exit_info_2);
2912aa9f5841SSean Christopherson if (ret)
29138f423a80STom Lendacky break;
29148f423a80STom Lendacky
291563129754SPaolo Bonzini ret = kvm_sev_es_mmio_read(vcpu,
29168f423a80STom Lendacky control->exit_info_1,
29178f423a80STom Lendacky control->exit_info_2,
2918b67a4cc3SPeter Gonda svm->sev_es.ghcb_sa);
29198f423a80STom Lendacky break;
29208f423a80STom Lendacky case SVM_VMGEXIT_MMIO_WRITE:
2921aa9f5841SSean Christopherson ret = setup_vmgexit_scratch(svm, false, control->exit_info_2);
2922aa9f5841SSean Christopherson if (ret)
29238f423a80STom Lendacky break;
29248f423a80STom Lendacky
292563129754SPaolo Bonzini ret = kvm_sev_es_mmio_write(vcpu,
29268f423a80STom Lendacky control->exit_info_1,
29278f423a80STom Lendacky control->exit_info_2,
2928b67a4cc3SPeter Gonda svm->sev_es.ghcb_sa);
29298f423a80STom Lendacky break;
29304444dfe4STom Lendacky case SVM_VMGEXIT_NMI_COMPLETE:
2931389fbbecSSean Christopherson ++vcpu->stat.nmi_window_exits;
2932389fbbecSSean Christopherson svm->nmi_masked = false;
2933389fbbecSSean Christopherson kvm_make_request(KVM_REQ_EVENT, vcpu);
2934389fbbecSSean Christopherson ret = 1;
29354444dfe4STom Lendacky break;
2936647daca2STom Lendacky case SVM_VMGEXIT_AP_HLT_LOOP:
293763129754SPaolo Bonzini ret = kvm_emulate_ap_reset_hold(vcpu);
2938647daca2STom Lendacky break;
29398640ca58STom Lendacky case SVM_VMGEXIT_AP_JUMP_TABLE: {
294063129754SPaolo Bonzini struct kvm_sev_info *sev = &to_kvm_svm(vcpu->kvm)->sev_info;
29418640ca58STom Lendacky
29428640ca58STom Lendacky switch (control->exit_info_1) {
29438640ca58STom Lendacky case 0:
29448640ca58STom Lendacky /* Set AP jump table address */
29458640ca58STom Lendacky sev->ap_jump_table = control->exit_info_2;
29468640ca58STom Lendacky break;
29478640ca58STom Lendacky case 1:
29488640ca58STom Lendacky /* Get AP jump table address */
294963dbc67cSPaolo Bonzini ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, sev->ap_jump_table);
29508640ca58STom Lendacky break;
29518640ca58STom Lendacky default:
29528640ca58STom Lendacky pr_err("svm: vmgexit: unsupported AP jump table request - exit_info_1=%#llx\n",
29538640ca58STom Lendacky control->exit_info_1);
295463dbc67cSPaolo Bonzini ghcb_set_sw_exit_info_1(svm->sev_es.ghcb, 2);
295563dbc67cSPaolo Bonzini ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, GHCB_ERR_INVALID_INPUT);
29568640ca58STom Lendacky }
29578640ca58STom Lendacky
2958aa9f5841SSean Christopherson ret = 1;
29598640ca58STom Lendacky break;
29608640ca58STom Lendacky }
2961291bd20dSTom Lendacky case SVM_VMGEXIT_UNSUPPORTED_EVENT:
296263129754SPaolo Bonzini vcpu_unimpl(vcpu,
2963291bd20dSTom Lendacky "vmgexit: unsupported event - exit_info_1=%#llx, exit_info_2=%#llx\n",
2964291bd20dSTom Lendacky control->exit_info_1, control->exit_info_2);
296575236f5fSSean Christopherson ret = -EINVAL;
2966291bd20dSTom Lendacky break;
2967291bd20dSTom Lendacky default:
296863129754SPaolo Bonzini ret = svm_invoke_exit_handler(vcpu, exit_code);
2969291bd20dSTom Lendacky }
2970291bd20dSTom Lendacky
2971291bd20dSTom Lendacky return ret;
2972291bd20dSTom Lendacky }
29737ed9abfeSTom Lendacky
sev_es_string_io(struct vcpu_svm * svm,int size,unsigned int port,int in)29747ed9abfeSTom Lendacky int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in)
29757ed9abfeSTom Lendacky {
29769b0971caSPaolo Bonzini int count;
29779b0971caSPaolo Bonzini int bytes;
2978aa9f5841SSean Christopherson int r;
29799b0971caSPaolo Bonzini
29809b0971caSPaolo Bonzini if (svm->vmcb->control.exit_info_2 > INT_MAX)
29817ed9abfeSTom Lendacky return -EINVAL;
29827ed9abfeSTom Lendacky
29839b0971caSPaolo Bonzini count = svm->vmcb->control.exit_info_2;
29849b0971caSPaolo Bonzini if (unlikely(check_mul_overflow(count, size, &bytes)))
29859b0971caSPaolo Bonzini return -EINVAL;
29869b0971caSPaolo Bonzini
2987aa9f5841SSean Christopherson r = setup_vmgexit_scratch(svm, in, bytes);
2988aa9f5841SSean Christopherson if (r)
2989aa9f5841SSean Christopherson return r;
29909b0971caSPaolo Bonzini
2991b67a4cc3SPeter Gonda return kvm_sev_es_string_io(&svm->vcpu, size, port, svm->sev_es.ghcb_sa,
29921f058331SPaolo Bonzini count, in);
29937ed9abfeSTom Lendacky }
2994376c6d28STom Lendacky
sev_es_vcpu_after_set_cpuid(struct vcpu_svm * svm)2995e0096d01STom Lendacky static void sev_es_vcpu_after_set_cpuid(struct vcpu_svm *svm)
2996e0096d01STom Lendacky {
2997e0096d01STom Lendacky struct kvm_vcpu *vcpu = &svm->vcpu;
2998e0096d01STom Lendacky
2999e0096d01STom Lendacky if (boot_cpu_has(X86_FEATURE_V_TSC_AUX)) {
3000e0096d01STom Lendacky bool v_tsc_aux = guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP) ||
3001e0096d01STom Lendacky guest_cpuid_has(vcpu, X86_FEATURE_RDPID);
3002e0096d01STom Lendacky
3003e0096d01STom Lendacky set_msr_interception(vcpu, svm->msrpm, MSR_TSC_AUX, v_tsc_aux, v_tsc_aux);
3004e0096d01STom Lendacky }
3005b6e4076cSMichael Roth
3006b6e4076cSMichael Roth /*
3007b6e4076cSMichael Roth * For SEV-ES, accesses to MSR_IA32_XSS should not be intercepted if
3008b6e4076cSMichael Roth * the host/guest supports its use.
3009b6e4076cSMichael Roth *
3010b6e4076cSMichael Roth * guest_can_use() checks a number of requirements on the host/guest to
3011b6e4076cSMichael Roth * ensure that MSR_IA32_XSS is available, but it might report true even
3012b6e4076cSMichael Roth * if X86_FEATURE_XSAVES isn't configured in the guest to ensure host
3013b6e4076cSMichael Roth * MSR_IA32_XSS is always properly restored. For SEV-ES, it is better
3014b6e4076cSMichael Roth * to further check that the guest CPUID actually supports
3015b6e4076cSMichael Roth * X86_FEATURE_XSAVES so that accesses to MSR_IA32_XSS by misbehaved
3016b6e4076cSMichael Roth * guests will still get intercepted and caught in the normal
3017b6e4076cSMichael Roth * kvm_emulate_rdmsr()/kvm_emulated_wrmsr() paths.
3018b6e4076cSMichael Roth */
3019b6e4076cSMichael Roth if (guest_can_use(vcpu, X86_FEATURE_XSAVES) &&
3020b6e4076cSMichael Roth guest_cpuid_has(vcpu, X86_FEATURE_XSAVES))
3021b6e4076cSMichael Roth set_msr_interception(vcpu, svm->msrpm, MSR_IA32_XSS, 1, 1);
3022b6e4076cSMichael Roth else
3023b6e4076cSMichael Roth set_msr_interception(vcpu, svm->msrpm, MSR_IA32_XSS, 0, 0);
3024e0096d01STom Lendacky }
3025e0096d01STom Lendacky
sev_vcpu_after_set_cpuid(struct vcpu_svm * svm)3026e0096d01STom Lendacky void sev_vcpu_after_set_cpuid(struct vcpu_svm *svm)
3027e0096d01STom Lendacky {
3028e0096d01STom Lendacky struct kvm_vcpu *vcpu = &svm->vcpu;
3029e0096d01STom Lendacky struct kvm_cpuid_entry2 *best;
3030e0096d01STom Lendacky
3031e0096d01STom Lendacky /* For sev guests, the memory encryption bit is not reserved in CR3. */
3032e0096d01STom Lendacky best = kvm_find_cpuid_entry(vcpu, 0x8000001F);
3033e0096d01STom Lendacky if (best)
3034e0096d01STom Lendacky vcpu->arch.reserved_gpa_bits &= ~(1UL << (best->ebx & 0x3f));
3035e0096d01STom Lendacky
3036e0096d01STom Lendacky if (sev_es_guest(svm->vcpu.kvm))
3037e0096d01STom Lendacky sev_es_vcpu_after_set_cpuid(svm);
3038e0096d01STom Lendacky }
3039e0096d01STom Lendacky
sev_es_init_vmcb(struct vcpu_svm * svm)30406defa24dSPeter Gonda static void sev_es_init_vmcb(struct vcpu_svm *svm)
3041376c6d28STom Lendacky {
3042c2690b5fSAlexey Kardashevskiy struct vmcb *vmcb = svm->vmcb01.ptr;
3043376c6d28STom Lendacky struct kvm_vcpu *vcpu = &svm->vcpu;
3044376c6d28STom Lendacky
3045376c6d28STom Lendacky svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ES_ENABLE;
3046376c6d28STom Lendacky
3047376c6d28STom Lendacky /*
3048376c6d28STom Lendacky * An SEV-ES guest requires a VMSA area that is a separate from the
3049376c6d28STom Lendacky * VMCB page. Do not include the encryption mask on the VMSA physical
30501952e74dSSean Christopherson * address since hardware will access it using the guest key. Note,
30511952e74dSSean Christopherson * the VMSA will be NULL if this vCPU is the destination for intrahost
30521952e74dSSean Christopherson * migration, and will be copied later.
3053376c6d28STom Lendacky */
30541952e74dSSean Christopherson if (svm->sev_es.vmsa)
3055b67a4cc3SPeter Gonda svm->vmcb->control.vmsa_pa = __pa(svm->sev_es.vmsa);
3056376c6d28STom Lendacky
3057376c6d28STom Lendacky /* Can't intercept CR register access, HV can't modify CR registers */
3058376c6d28STom Lendacky svm_clr_intercept(svm, INTERCEPT_CR0_READ);
3059376c6d28STom Lendacky svm_clr_intercept(svm, INTERCEPT_CR4_READ);
3060376c6d28STom Lendacky svm_clr_intercept(svm, INTERCEPT_CR8_READ);
3061376c6d28STom Lendacky svm_clr_intercept(svm, INTERCEPT_CR0_WRITE);
3062376c6d28STom Lendacky svm_clr_intercept(svm, INTERCEPT_CR4_WRITE);
3063376c6d28STom Lendacky svm_clr_intercept(svm, INTERCEPT_CR8_WRITE);
3064376c6d28STom Lendacky
3065376c6d28STom Lendacky svm_clr_intercept(svm, INTERCEPT_SELECTIVE_CR0);
3066376c6d28STom Lendacky
3067376c6d28STom Lendacky /* Track EFER/CR register changes */
3068376c6d28STom Lendacky svm_set_intercept(svm, TRAP_EFER_WRITE);
3069376c6d28STom Lendacky svm_set_intercept(svm, TRAP_CR0_WRITE);
3070376c6d28STom Lendacky svm_set_intercept(svm, TRAP_CR4_WRITE);
3071376c6d28STom Lendacky svm_set_intercept(svm, TRAP_CR8_WRITE);
3072376c6d28STom Lendacky
3073c2690b5fSAlexey Kardashevskiy vmcb->control.intercepts[INTERCEPT_DR] = 0;
3074d1f85fbeSAlexey Kardashevskiy if (!sev_es_debug_swap_enabled) {
3075c2690b5fSAlexey Kardashevskiy vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_READ);
3076c2690b5fSAlexey Kardashevskiy vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_WRITE);
3077c2690b5fSAlexey Kardashevskiy recalc_intercepts(svm);
307890cbf6d9SAlexey Kardashevskiy } else {
307990cbf6d9SAlexey Kardashevskiy /*
308090cbf6d9SAlexey Kardashevskiy * Disable #DB intercept iff DebugSwap is enabled. KVM doesn't
308190cbf6d9SAlexey Kardashevskiy * allow debugging SEV-ES guests, and enables DebugSwap iff
308290cbf6d9SAlexey Kardashevskiy * NO_NESTED_DATA_BP is supported, so there's no reason to
308390cbf6d9SAlexey Kardashevskiy * intercept #DB when DebugSwap is enabled. For simplicity
308490cbf6d9SAlexey Kardashevskiy * with respect to guest debug, intercept #DB for other VMs
308590cbf6d9SAlexey Kardashevskiy * even if NO_NESTED_DATA_BP is supported, i.e. even if the
308690cbf6d9SAlexey Kardashevskiy * guest can't DoS the CPU with infinite #DB vectoring.
308790cbf6d9SAlexey Kardashevskiy */
308890cbf6d9SAlexey Kardashevskiy clr_exception_intercept(svm, DB_VECTOR);
3089d1f85fbeSAlexey Kardashevskiy }
3090376c6d28STom Lendacky
3091376c6d28STom Lendacky /* Can't intercept XSETBV, HV can't modify XCR0 directly */
3092376c6d28STom Lendacky svm_clr_intercept(svm, INTERCEPT_XSETBV);
3093376c6d28STom Lendacky
3094376c6d28STom Lendacky /* Clear intercepts on selected MSRs */
3095376c6d28STom Lendacky set_msr_interception(vcpu, svm->msrpm, MSR_EFER, 1, 1);
3096376c6d28STom Lendacky set_msr_interception(vcpu, svm->msrpm, MSR_IA32_CR_PAT, 1, 1);
3097376c6d28STom Lendacky }
3098376c6d28STom Lendacky
sev_init_vmcb(struct vcpu_svm * svm)30996defa24dSPeter Gonda void sev_init_vmcb(struct vcpu_svm *svm)
31006defa24dSPeter Gonda {
31016defa24dSPeter Gonda svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ENABLE;
31026defa24dSPeter Gonda clr_exception_intercept(svm, UD_VECTOR);
31036defa24dSPeter Gonda
310429de732cSAlexey Kardashevskiy /*
310529de732cSAlexey Kardashevskiy * Don't intercept #GP for SEV guests, e.g. for the VMware backdoor, as
310629de732cSAlexey Kardashevskiy * KVM can't decrypt guest memory to decode the faulting instruction.
310729de732cSAlexey Kardashevskiy */
310829de732cSAlexey Kardashevskiy clr_exception_intercept(svm, GP_VECTOR);
310929de732cSAlexey Kardashevskiy
31106defa24dSPeter Gonda if (sev_es_guest(svm->vcpu.kvm))
31116defa24dSPeter Gonda sev_es_init_vmcb(svm);
31126defa24dSPeter Gonda }
31136defa24dSPeter Gonda
sev_es_vcpu_reset(struct vcpu_svm * svm)31149ebe530bSSean Christopherson void sev_es_vcpu_reset(struct vcpu_svm *svm)
3115376c6d28STom Lendacky {
3116376c6d28STom Lendacky /*
31179ebe530bSSean Christopherson * Set the GHCB MSR value as per the GHCB specification when emulating
31189ebe530bSSean Christopherson * vCPU RESET for an SEV-ES guest.
3119376c6d28STom Lendacky */
3120376c6d28STom Lendacky set_ghcb_msr(svm, GHCB_MSR_SEV_INFO(GHCB_VERSION_MAX,
3121376c6d28STom Lendacky GHCB_VERSION_MIN,
3122376c6d28STom Lendacky sev_enc_bit));
3123376c6d28STom Lendacky }
312486137773STom Lendacky
sev_es_prepare_switch_to_guest(struct sev_es_save_area * hostsa)31253dd2775bSTom Lendacky void sev_es_prepare_switch_to_guest(struct sev_es_save_area *hostsa)
312686137773STom Lendacky {
312786137773STom Lendacky /*
3128f8d808edSSean Christopherson * All host state for SEV-ES guests is categorized into three swap types
3129f8d808edSSean Christopherson * based on how it is handled by hardware during a world switch:
3130f8d808edSSean Christopherson *
3131f8d808edSSean Christopherson * A: VMRUN: Host state saved in host save area
3132f8d808edSSean Christopherson * VMEXIT: Host state loaded from host save area
3133f8d808edSSean Christopherson *
3134f8d808edSSean Christopherson * B: VMRUN: Host state _NOT_ saved in host save area
3135f8d808edSSean Christopherson * VMEXIT: Host state loaded from host save area
3136f8d808edSSean Christopherson *
3137f8d808edSSean Christopherson * C: VMRUN: Host state _NOT_ saved in host save area
3138f8d808edSSean Christopherson * VMEXIT: Host state initialized to default(reset) values
3139f8d808edSSean Christopherson *
3140f8d808edSSean Christopherson * Manually save type-B state, i.e. state that is loaded by VMEXIT but
3141f8d808edSSean Christopherson * isn't saved by VMRUN, that isn't already saved by VMSAVE (performed
3142f8d808edSSean Christopherson * by common SVM code).
314386137773STom Lendacky */
314486137773STom Lendacky hostsa->xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
314586137773STom Lendacky hostsa->pkru = read_pkru();
314686137773STom Lendacky hostsa->xss = host_xss;
3147d1f85fbeSAlexey Kardashevskiy
3148d1f85fbeSAlexey Kardashevskiy /*
3149d1f85fbeSAlexey Kardashevskiy * If DebugSwap is enabled, debug registers are loaded but NOT saved by
3150d1f85fbeSAlexey Kardashevskiy * the CPU (Type-B). If DebugSwap is disabled/unsupported, the CPU both
3151d1f85fbeSAlexey Kardashevskiy * saves and loads debug registers (Type-A).
3152d1f85fbeSAlexey Kardashevskiy */
3153d1f85fbeSAlexey Kardashevskiy if (sev_es_debug_swap_enabled) {
3154d1f85fbeSAlexey Kardashevskiy hostsa->dr0 = native_get_debugreg(0);
3155d1f85fbeSAlexey Kardashevskiy hostsa->dr1 = native_get_debugreg(1);
3156d1f85fbeSAlexey Kardashevskiy hostsa->dr2 = native_get_debugreg(2);
3157d1f85fbeSAlexey Kardashevskiy hostsa->dr3 = native_get_debugreg(3);
3158d1f85fbeSAlexey Kardashevskiy hostsa->dr0_addr_mask = amd_get_dr_addr_mask(0);
3159d1f85fbeSAlexey Kardashevskiy hostsa->dr1_addr_mask = amd_get_dr_addr_mask(1);
3160d1f85fbeSAlexey Kardashevskiy hostsa->dr2_addr_mask = amd_get_dr_addr_mask(2);
3161d1f85fbeSAlexey Kardashevskiy hostsa->dr3_addr_mask = amd_get_dr_addr_mask(3);
3162d1f85fbeSAlexey Kardashevskiy }
316386137773STom Lendacky }
316486137773STom Lendacky
sev_vcpu_deliver_sipi_vector(struct kvm_vcpu * vcpu,u8 vector)3165647daca2STom Lendacky void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
3166647daca2STom Lendacky {
3167647daca2STom Lendacky struct vcpu_svm *svm = to_svm(vcpu);
3168647daca2STom Lendacky
3169647daca2STom Lendacky /* First SIPI: Use the values as initially set by the VMM */
3170b67a4cc3SPeter Gonda if (!svm->sev_es.received_first_sipi) {
3171b67a4cc3SPeter Gonda svm->sev_es.received_first_sipi = true;
3172647daca2STom Lendacky return;
3173647daca2STom Lendacky }
3174647daca2STom Lendacky
3175647daca2STom Lendacky /*
3176647daca2STom Lendacky * Subsequent SIPI: Return from an AP Reset Hold VMGEXIT, where
3177647daca2STom Lendacky * the guest will set the CS and RIP. Set SW_EXIT_INFO_2 to a
3178647daca2STom Lendacky * non-zero value.
3179647daca2STom Lendacky */
3180b67a4cc3SPeter Gonda if (!svm->sev_es.ghcb)
3181a3ba26ecSTom Lendacky return;
3182a3ba26ecSTom Lendacky
3183b67a4cc3SPeter Gonda ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, 1);
3184647daca2STom Lendacky }
3185