xref: /openbmc/qemu/target/i386/sev.c (revision b92b39af4219df4250f121f64d215506909c7404)
1 /*
2  * QEMU SEV support
3  *
4  * Copyright Advanced Micro Devices 2016-2018
5  *
6  * Author:
7  *      Brijesh Singh <brijesh.singh@amd.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2 or later.
10  * See the COPYING file in the top-level directory.
11  *
12  */
13 
14 #include "qemu/osdep.h"
15 
16 #include <linux/kvm.h>
17 #include <linux/kvm_para.h>
18 #include <linux/psp-sev.h>
19 
20 #include <sys/ioctl.h>
21 
22 #include "qapi/error.h"
23 #include "qom/object_interfaces.h"
24 #include "qemu/base64.h"
25 #include "qemu/module.h"
26 #include "qemu/uuid.h"
27 #include "qemu/error-report.h"
28 #include "crypto/hash.h"
29 #include "exec/target_page.h"
30 #include "system/kvm.h"
31 #include "kvm/kvm_i386.h"
32 #include "sev.h"
33 #include "system/system.h"
34 #include "system/runstate.h"
35 #include "trace.h"
36 #include "migration/blocker.h"
37 #include "qom/object.h"
38 #include "monitor/monitor.h"
39 #include "monitor/hmp-target.h"
40 #include "qapi/qapi-commands-misc-i386.h"
41 #include "confidential-guest.h"
42 #include "hw/i386/pc.h"
43 #include "system/address-spaces.h"
44 #include "hw/i386/e820_memory_layout.h"
45 #include "qemu/queue.h"
46 #include "qemu/cutils.h"
47 
48 OBJECT_DECLARE_TYPE(SevCommonState, SevCommonStateClass, SEV_COMMON)
49 OBJECT_DECLARE_TYPE(SevGuestState, SevCommonStateClass, SEV_GUEST)
50 OBJECT_DECLARE_TYPE(SevSnpGuestState, SevCommonStateClass, SEV_SNP_GUEST)
51 
52 /* hard code sha256 digest size */
53 #define HASH_SIZE 32
54 
55 /* Hard coded GPA that KVM uses for the VMSA */
56 #define KVM_VMSA_GPA 0xFFFFFFFFF000
57 
58 /* Convert between SEV-ES VMSA and SegmentCache flags/attributes */
59 #define FLAGS_VMSA_TO_SEGCACHE(flags) \
60     ((((flags) & 0xff00) << 12) | (((flags) & 0xff) << 8))
61 #define FLAGS_SEGCACHE_TO_VMSA(flags) \
62     ((((flags) & 0xff00) >> 8) | (((flags) & 0xf00000) >> 12))
63 
64 typedef struct QEMU_PACKED SevHashTableEntry {
65     QemuUUID guid;
66     uint16_t len;
67     uint8_t hash[HASH_SIZE];
68 } SevHashTableEntry;
69 
70 typedef struct QEMU_PACKED SevHashTable {
71     QemuUUID guid;
72     uint16_t len;
73     SevHashTableEntry cmdline;
74     SevHashTableEntry initrd;
75     SevHashTableEntry kernel;
76 } SevHashTable;
77 
78 /*
79  * Data encrypted by sev_encrypt_flash() must be padded to a multiple of
80  * 16 bytes.
81  */
82 typedef struct QEMU_PACKED PaddedSevHashTable {
83     SevHashTable ht;
84     uint8_t padding[ROUND_UP(sizeof(SevHashTable), 16) - sizeof(SevHashTable)];
85 } PaddedSevHashTable;
86 
87 QEMU_BUILD_BUG_ON(sizeof(PaddedSevHashTable) % 16 != 0);
88 
89 #define SEV_INFO_BLOCK_GUID     "00f771de-1a7e-4fcb-890e-68c77e2fb44e"
90 typedef struct __attribute__((__packed__)) SevInfoBlock {
91     /* SEV-ES Reset Vector Address */
92     uint32_t reset_addr;
93 } SevInfoBlock;
94 
95 #define SEV_HASH_TABLE_RV_GUID  "7255371f-3a3b-4b04-927b-1da6efa8d454"
96 typedef struct QEMU_PACKED SevHashTableDescriptor {
97     /* SEV hash table area guest address */
98     uint32_t base;
99     /* SEV hash table area size (in bytes) */
100     uint32_t size;
101 } SevHashTableDescriptor;
102 
103 typedef struct SevLaunchVmsa {
104     QTAILQ_ENTRY(SevLaunchVmsa) next;
105 
106     uint16_t cpu_index;
107     uint64_t gpa;
108     struct sev_es_save_area vmsa;
109 } SevLaunchVmsa;
110 
111 struct SevCommonState {
112     X86ConfidentialGuest parent_obj;
113 
114     int kvm_type;
115 
116     /* configuration parameters */
117     char *sev_device;
118     uint32_t cbitpos;
119     uint32_t reduced_phys_bits;
120     bool kernel_hashes;
121     uint64_t sev_features;
122     uint64_t supported_sev_features;
123 
124     /* runtime state */
125     uint8_t api_major;
126     uint8_t api_minor;
127     uint8_t build_id;
128     int sev_fd;
129     SevState state;
130 
131     QTAILQ_HEAD(, SevLaunchVmsa) launch_vmsa;
132 };
133 
134 struct SevCommonStateClass {
135     X86ConfidentialGuestClass parent_class;
136 
137     /* public */
138     bool (*build_kernel_loader_hashes)(SevCommonState *sev_common,
139                                        SevHashTableDescriptor *area,
140                                        SevKernelLoaderContext *ctx,
141                                        Error **errp);
142     int (*launch_start)(SevCommonState *sev_common);
143     void (*launch_finish)(SevCommonState *sev_common);
144     int (*launch_update_data)(SevCommonState *sev_common, hwaddr gpa,
145                               uint8_t *ptr, size_t len, Error **errp);
146     int (*kvm_init)(ConfidentialGuestSupport *cgs, Error **errp);
147 };
148 
149 /**
150  * SevGuestState:
151  *
152  * The SevGuestState object is used for creating and managing a SEV
153  * guest.
154  *
155  * # $QEMU \
156  *         -object sev-guest,id=sev0 \
157  *         -machine ...,memory-encryption=sev0
158  */
159 struct SevGuestState {
160     SevCommonState parent_obj;
161     gchar *measurement;
162 
163     /* configuration parameters */
164     uint32_t handle;
165     uint32_t policy;
166     char *dh_cert_file;
167     char *session_file;
168     OnOffAuto legacy_vm_type;
169 };
170 
171 struct SevSnpGuestState {
172     SevCommonState parent_obj;
173 
174     /* configuration parameters */
175     char *guest_visible_workarounds;
176     char *id_block_base64;
177     uint8_t *id_block;
178     char *id_auth_base64;
179     uint8_t *id_auth;
180     char *host_data;
181 
182     struct kvm_sev_snp_launch_start kvm_start_conf;
183     struct kvm_sev_snp_launch_finish kvm_finish_conf;
184 
185     uint32_t kernel_hashes_offset;
186     PaddedSevHashTable *kernel_hashes_data;
187 };
188 
189 #define DEFAULT_GUEST_POLICY    0x1 /* disable debug */
190 #define DEFAULT_SEV_DEVICE      "/dev/sev"
191 #define DEFAULT_SEV_SNP_POLICY  0x30000
192 
193 typedef struct SevLaunchUpdateData {
194     QTAILQ_ENTRY(SevLaunchUpdateData) next;
195     hwaddr gpa;
196     void *hva;
197     size_t len;
198     int type;
199 } SevLaunchUpdateData;
200 
201 static QTAILQ_HEAD(, SevLaunchUpdateData) launch_update;
202 
203 static Error *sev_mig_blocker;
204 
205 static const char *const sev_fw_errlist[] = {
206     [SEV_RET_SUCCESS]                = "",
207     [SEV_RET_INVALID_PLATFORM_STATE] = "Platform state is invalid",
208     [SEV_RET_INVALID_GUEST_STATE]    = "Guest state is invalid",
209     [SEV_RET_INAVLID_CONFIG]         = "Platform configuration is invalid",
210     [SEV_RET_INVALID_LEN]            = "Buffer too small",
211     [SEV_RET_ALREADY_OWNED]          = "Platform is already owned",
212     [SEV_RET_INVALID_CERTIFICATE]    = "Certificate is invalid",
213     [SEV_RET_POLICY_FAILURE]         = "Policy is not allowed",
214     [SEV_RET_INACTIVE]               = "Guest is not active",
215     [SEV_RET_INVALID_ADDRESS]        = "Invalid address",
216     [SEV_RET_BAD_SIGNATURE]          = "Bad signature",
217     [SEV_RET_BAD_MEASUREMENT]        = "Bad measurement",
218     [SEV_RET_ASID_OWNED]             = "ASID is already owned",
219     [SEV_RET_INVALID_ASID]           = "Invalid ASID",
220     [SEV_RET_WBINVD_REQUIRED]        = "WBINVD is required",
221     [SEV_RET_DFFLUSH_REQUIRED]       = "DF_FLUSH is required",
222     [SEV_RET_INVALID_GUEST]          = "Guest handle is invalid",
223     [SEV_RET_INVALID_COMMAND]        = "Invalid command",
224     [SEV_RET_ACTIVE]                 = "Guest is active",
225     [SEV_RET_HWSEV_RET_PLATFORM]     = "Hardware error",
226     [SEV_RET_HWSEV_RET_UNSAFE]       = "Hardware unsafe",
227     [SEV_RET_UNSUPPORTED]            = "Feature not supported",
228     [SEV_RET_INVALID_PARAM]          = "Invalid parameter",
229     [SEV_RET_RESOURCE_LIMIT]         = "Required firmware resource depleted",
230     [SEV_RET_SECURE_DATA_INVALID]    = "Part-specific integrity check failure",
231 };
232 
233 #define SEV_FW_MAX_ERROR      ARRAY_SIZE(sev_fw_errlist)
234 
235 #define SNP_CPUID_FUNCTION_MAXCOUNT 64
236 #define SNP_CPUID_FUNCTION_UNKNOWN 0xFFFFFFFF
237 
238 typedef struct {
239     uint32_t eax_in;
240     uint32_t ecx_in;
241     uint64_t xcr0_in;
242     uint64_t xss_in;
243     uint32_t eax;
244     uint32_t ebx;
245     uint32_t ecx;
246     uint32_t edx;
247     uint64_t reserved;
248 } __attribute__((packed)) SnpCpuidFunc;
249 
250 typedef struct {
251     uint32_t count;
252     uint32_t reserved1;
253     uint64_t reserved2;
254     SnpCpuidFunc entries[SNP_CPUID_FUNCTION_MAXCOUNT];
255 } __attribute__((packed)) SnpCpuidInfo;
256 
257 static int
sev_ioctl(int fd,int cmd,void * data,int * error)258 sev_ioctl(int fd, int cmd, void *data, int *error)
259 {
260     int r;
261     struct kvm_sev_cmd input;
262 
263     memset(&input, 0x0, sizeof(input));
264 
265     input.id = cmd;
266     input.sev_fd = fd;
267     input.data = (uintptr_t)data;
268 
269     r = kvm_vm_ioctl(kvm_state, KVM_MEMORY_ENCRYPT_OP, &input);
270 
271     if (error) {
272         *error = input.error;
273     }
274 
275     return r;
276 }
277 
278 static int
sev_platform_ioctl(int fd,int cmd,void * data,int * error)279 sev_platform_ioctl(int fd, int cmd, void *data, int *error)
280 {
281     int r;
282     struct sev_issue_cmd arg;
283 
284     arg.cmd = cmd;
285     arg.data = (unsigned long)data;
286     r = ioctl(fd, SEV_ISSUE_CMD, &arg);
287     if (error) {
288         *error = arg.error;
289     }
290 
291     return r;
292 }
293 
294 static const char *
fw_error_to_str(int code)295 fw_error_to_str(int code)
296 {
297     if (code < 0 || code >= SEV_FW_MAX_ERROR) {
298         return "unknown error";
299     }
300 
301     return sev_fw_errlist[code];
302 }
303 
304 static bool
sev_check_state(const SevCommonState * sev_common,SevState state)305 sev_check_state(const SevCommonState *sev_common, SevState state)
306 {
307     assert(sev_common);
308     return sev_common->state == state ? true : false;
309 }
310 
311 static void
sev_set_guest_state(SevCommonState * sev_common,SevState new_state)312 sev_set_guest_state(SevCommonState *sev_common, SevState new_state)
313 {
314     assert(new_state < SEV_STATE__MAX);
315     assert(sev_common);
316 
317     trace_kvm_sev_change_state(SevState_str(sev_common->state),
318                                SevState_str(new_state));
319     sev_common->state = new_state;
320 }
321 
322 static void
sev_ram_block_added(RAMBlockNotifier * n,void * host,size_t size,size_t max_size)323 sev_ram_block_added(RAMBlockNotifier *n, void *host, size_t size,
324                     size_t max_size)
325 {
326     int r;
327     struct kvm_enc_region range;
328     ram_addr_t offset;
329     MemoryRegion *mr;
330 
331     /*
332      * The RAM device presents a memory region that should be treated
333      * as IO region and should not be pinned.
334      */
335     mr = memory_region_from_host(host, &offset);
336     if (mr && memory_region_is_ram_device(mr)) {
337         return;
338     }
339 
340     range.addr = (uintptr_t)host;
341     range.size = max_size;
342 
343     trace_kvm_memcrypt_register_region(host, max_size);
344     r = kvm_vm_ioctl(kvm_state, KVM_MEMORY_ENCRYPT_REG_REGION, &range);
345     if (r) {
346         error_report("%s: failed to register region (%p+%#zx) error '%s'",
347                      __func__, host, max_size, strerror(errno));
348         exit(1);
349     }
350 }
351 
352 static void
sev_ram_block_removed(RAMBlockNotifier * n,void * host,size_t size,size_t max_size)353 sev_ram_block_removed(RAMBlockNotifier *n, void *host, size_t size,
354                       size_t max_size)
355 {
356     int r;
357     struct kvm_enc_region range;
358     ram_addr_t offset;
359     MemoryRegion *mr;
360 
361     /*
362      * The RAM device presents a memory region that should be treated
363      * as IO region and should not have been pinned.
364      */
365     mr = memory_region_from_host(host, &offset);
366     if (mr && memory_region_is_ram_device(mr)) {
367         return;
368     }
369 
370     range.addr = (uintptr_t)host;
371     range.size = max_size;
372 
373     trace_kvm_memcrypt_unregister_region(host, max_size);
374     r = kvm_vm_ioctl(kvm_state, KVM_MEMORY_ENCRYPT_UNREG_REGION, &range);
375     if (r) {
376         error_report("%s: failed to unregister region (%p+%#zx)",
377                      __func__, host, max_size);
378     }
379 }
380 
381 static struct RAMBlockNotifier sev_ram_notifier = {
382     .ram_block_added = sev_ram_block_added,
383     .ram_block_removed = sev_ram_block_removed,
384 };
385 
sev_apply_cpu_context(CPUState * cpu)386 static void sev_apply_cpu_context(CPUState *cpu)
387 {
388     SevCommonState *sev_common = SEV_COMMON(MACHINE(qdev_get_machine())->cgs);
389     X86CPU *x86;
390     CPUX86State *env;
391     struct SevLaunchVmsa *launch_vmsa;
392 
393     /* See if an initial VMSA has been provided for this CPU */
394     QTAILQ_FOREACH(launch_vmsa, &sev_common->launch_vmsa, next)
395     {
396         if (cpu->cpu_index == launch_vmsa->cpu_index) {
397             x86 = X86_CPU(cpu);
398             env = &x86->env;
399 
400             /*
401              * Ideally we would provide the VMSA directly to kvm which would
402              * ensure that the resulting initial VMSA measurement which is
403              * calculated during KVM_SEV_LAUNCH_UPDATE_VMSA is calculated from
404              * exactly what we provide here. Currently this is not possible so
405              * we need to copy the parts of the VMSA structure that we currently
406              * support into the CPU state.
407              */
408             cpu_load_efer(env, launch_vmsa->vmsa.efer);
409             cpu_x86_update_cr4(env, launch_vmsa->vmsa.cr4);
410             cpu_x86_update_cr0(env, launch_vmsa->vmsa.cr0);
411             cpu_x86_update_cr3(env, launch_vmsa->vmsa.cr3);
412             env->xcr0 = launch_vmsa->vmsa.xcr0;
413             env->pat = launch_vmsa->vmsa.g_pat;
414 
415             cpu_x86_load_seg_cache(
416                 env, R_CS, launch_vmsa->vmsa.cs.selector,
417                 launch_vmsa->vmsa.cs.base, launch_vmsa->vmsa.cs.limit,
418                 FLAGS_VMSA_TO_SEGCACHE(launch_vmsa->vmsa.cs.attrib));
419             cpu_x86_load_seg_cache(
420                 env, R_DS, launch_vmsa->vmsa.ds.selector,
421                 launch_vmsa->vmsa.ds.base, launch_vmsa->vmsa.ds.limit,
422                 FLAGS_VMSA_TO_SEGCACHE(launch_vmsa->vmsa.ds.attrib));
423             cpu_x86_load_seg_cache(
424                 env, R_ES, launch_vmsa->vmsa.es.selector,
425                 launch_vmsa->vmsa.es.base, launch_vmsa->vmsa.es.limit,
426                 FLAGS_VMSA_TO_SEGCACHE(launch_vmsa->vmsa.es.attrib));
427             cpu_x86_load_seg_cache(
428                 env, R_FS, launch_vmsa->vmsa.fs.selector,
429                 launch_vmsa->vmsa.fs.base, launch_vmsa->vmsa.fs.limit,
430                 FLAGS_VMSA_TO_SEGCACHE(launch_vmsa->vmsa.fs.attrib));
431             cpu_x86_load_seg_cache(
432                 env, R_GS, launch_vmsa->vmsa.gs.selector,
433                 launch_vmsa->vmsa.gs.base, launch_vmsa->vmsa.gs.limit,
434                 FLAGS_VMSA_TO_SEGCACHE(launch_vmsa->vmsa.gs.attrib));
435             cpu_x86_load_seg_cache(
436                 env, R_SS, launch_vmsa->vmsa.ss.selector,
437                 launch_vmsa->vmsa.ss.base, launch_vmsa->vmsa.ss.limit,
438                 FLAGS_VMSA_TO_SEGCACHE(launch_vmsa->vmsa.ss.attrib));
439 
440             env->gdt.base = launch_vmsa->vmsa.gdtr.base;
441             env->gdt.limit = launch_vmsa->vmsa.gdtr.limit;
442             env->gdt.flags =
443                 FLAGS_VMSA_TO_SEGCACHE(launch_vmsa->vmsa.gdtr.attrib);
444             env->idt.base = launch_vmsa->vmsa.idtr.base;
445             env->idt.limit = launch_vmsa->vmsa.idtr.limit;
446             env->idt.flags =
447                 FLAGS_VMSA_TO_SEGCACHE(launch_vmsa->vmsa.idtr.attrib);
448 
449             cpu_x86_load_seg_cache(
450                 env, R_LDTR, launch_vmsa->vmsa.ldtr.selector,
451                 launch_vmsa->vmsa.ldtr.base, launch_vmsa->vmsa.ldtr.limit,
452                 FLAGS_VMSA_TO_SEGCACHE(launch_vmsa->vmsa.ldtr.attrib));
453             cpu_x86_load_seg_cache(
454                 env, R_TR, launch_vmsa->vmsa.tr.selector,
455                 launch_vmsa->vmsa.ldtr.base, launch_vmsa->vmsa.tr.limit,
456                 FLAGS_VMSA_TO_SEGCACHE(launch_vmsa->vmsa.tr.attrib));
457 
458             env->dr[6] = launch_vmsa->vmsa.dr6;
459             env->dr[7] = launch_vmsa->vmsa.dr7;
460 
461             env->regs[R_EAX] = launch_vmsa->vmsa.rax;
462             env->regs[R_ECX] = launch_vmsa->vmsa.rcx;
463             env->regs[R_EDX] = launch_vmsa->vmsa.rdx;
464             env->regs[R_EBX] = launch_vmsa->vmsa.rbx;
465             env->regs[R_ESP] = launch_vmsa->vmsa.rsp;
466             env->regs[R_EBP] = launch_vmsa->vmsa.rbp;
467             env->regs[R_ESI] = launch_vmsa->vmsa.rsi;
468             env->regs[R_EDI] = launch_vmsa->vmsa.rdi;
469 #ifdef TARGET_X86_64
470             env->regs[R_R8] = launch_vmsa->vmsa.r8;
471             env->regs[R_R9] = launch_vmsa->vmsa.r9;
472             env->regs[R_R10] = launch_vmsa->vmsa.r10;
473             env->regs[R_R11] = launch_vmsa->vmsa.r11;
474             env->regs[R_R12] = launch_vmsa->vmsa.r12;
475             env->regs[R_R13] = launch_vmsa->vmsa.r13;
476             env->regs[R_R14] = launch_vmsa->vmsa.r14;
477             env->regs[R_R15] = launch_vmsa->vmsa.r15;
478 #endif
479             env->eip = launch_vmsa->vmsa.rip;
480             env->eflags = launch_vmsa->vmsa.rflags;
481 
482             cpu_set_fpuc(env, launch_vmsa->vmsa.x87_fcw);
483             env->mxcsr = launch_vmsa->vmsa.mxcsr;
484 
485             break;
486         }
487     }
488 }
489 
check_sev_features(SevCommonState * sev_common,uint64_t sev_features,Error ** errp)490 static int check_sev_features(SevCommonState *sev_common, uint64_t sev_features,
491                               Error **errp)
492 {
493     /*
494      * Ensure SEV_FEATURES is configured for correct SEV hardware and that
495      * the requested features are supported. If SEV-SNP is enabled then
496      * that feature must be enabled, otherwise it must be cleared.
497      */
498     if (sev_snp_enabled() && !(sev_features & SVM_SEV_FEAT_SNP_ACTIVE)) {
499         error_setg(
500             errp,
501             "%s: SEV_SNP is enabled but is not enabled in VMSA sev_features",
502             __func__);
503         return -1;
504     } else if (!sev_snp_enabled() &&
505                (sev_features & SVM_SEV_FEAT_SNP_ACTIVE)) {
506         error_setg(
507             errp,
508             "%s: SEV_SNP is not enabled but is enabled in VMSA sev_features",
509             __func__);
510         return -1;
511     }
512     if (sev_features & ~sev_common->supported_sev_features) {
513         error_setg(errp,
514                    "%s: VMSA contains unsupported sev_features: %lX, "
515                    "supported features: %lX",
516                    __func__, sev_features, sev_common->supported_sev_features);
517         return -1;
518     }
519     return 0;
520 }
521 
check_vmsa_supported(SevCommonState * sev_common,hwaddr gpa,const struct sev_es_save_area * vmsa,Error ** errp)522 static int check_vmsa_supported(SevCommonState *sev_common, hwaddr gpa,
523                                 const struct sev_es_save_area *vmsa,
524                                 Error **errp)
525 {
526     struct sev_es_save_area vmsa_check;
527 
528     /*
529      * KVM always populates the VMSA at a fixed GPA which cannot be modified
530      * from userspace. Specifying a different GPA will not prevent the guest
531      * from starting but will cause the launch measurement to be different
532      * from expected. Therefore check that the provided GPA matches the KVM
533      * hardcoded value.
534      */
535     if (gpa != KVM_VMSA_GPA) {
536         error_setg(errp,
537                 "%s: The VMSA GPA must be %lX but is specified as %lX",
538                 __func__, KVM_VMSA_GPA, gpa);
539         return -1;
540     }
541 
542     /*
543      * Clear all supported fields so we can then check the entire structure
544      * is zero.
545      */
546     memcpy(&vmsa_check, vmsa, sizeof(struct sev_es_save_area));
547     memset(&vmsa_check.es, 0, sizeof(vmsa_check.es));
548     memset(&vmsa_check.cs, 0, sizeof(vmsa_check.cs));
549     memset(&vmsa_check.ss, 0, sizeof(vmsa_check.ss));
550     memset(&vmsa_check.ds, 0, sizeof(vmsa_check.ds));
551     memset(&vmsa_check.fs, 0, sizeof(vmsa_check.fs));
552     memset(&vmsa_check.gs, 0, sizeof(vmsa_check.gs));
553     memset(&vmsa_check.gdtr, 0, sizeof(vmsa_check.gdtr));
554     memset(&vmsa_check.idtr, 0, sizeof(vmsa_check.idtr));
555     memset(&vmsa_check.ldtr, 0, sizeof(vmsa_check.ldtr));
556     memset(&vmsa_check.tr, 0, sizeof(vmsa_check.tr));
557     vmsa_check.efer = 0;
558     vmsa_check.cr0 = 0;
559     vmsa_check.cr3 = 0;
560     vmsa_check.cr4 = 0;
561     vmsa_check.xcr0 = 0;
562     vmsa_check.dr6 = 0;
563     vmsa_check.dr7 = 0;
564     vmsa_check.rax = 0;
565     vmsa_check.rcx = 0;
566     vmsa_check.rdx = 0;
567     vmsa_check.rbx = 0;
568     vmsa_check.rsp = 0;
569     vmsa_check.rbp = 0;
570     vmsa_check.rsi = 0;
571     vmsa_check.rdi = 0;
572     vmsa_check.r8 = 0;
573     vmsa_check.r9 = 0;
574     vmsa_check.r10 = 0;
575     vmsa_check.r11 = 0;
576     vmsa_check.r12 = 0;
577     vmsa_check.r13 = 0;
578     vmsa_check.r14 = 0;
579     vmsa_check.r15 = 0;
580     vmsa_check.rip = 0;
581     vmsa_check.rflags = 0;
582 
583     vmsa_check.g_pat = 0;
584     vmsa_check.xcr0 = 0;
585 
586     vmsa_check.x87_fcw = 0;
587     vmsa_check.mxcsr = 0;
588 
589     if (check_sev_features(sev_common, vmsa_check.sev_features, errp) < 0) {
590         return -1;
591     }
592     vmsa_check.sev_features = 0;
593 
594     if (!buffer_is_zero(&vmsa_check, sizeof(vmsa_check))) {
595         error_setg(errp,
596                 "%s: The VMSA contains fields that are not "
597                 "synchronized with KVM. Continuing would result in "
598                 "either unpredictable guest behavior, or a "
599                 "mismatched launch measurement.",
600                 __func__);
601         return -1;
602     }
603     return 0;
604 }
605 
sev_set_cpu_context(uint16_t cpu_index,const void * ctx,uint32_t ctx_len,hwaddr gpa,Error ** errp)606 static int sev_set_cpu_context(uint16_t cpu_index, const void *ctx,
607                                uint32_t ctx_len, hwaddr gpa, Error **errp)
608 {
609     SevCommonState *sev_common = SEV_COMMON(MACHINE(qdev_get_machine())->cgs);
610     SevLaunchVmsa *launch_vmsa;
611     CPUState *cpu;
612     bool exists = false;
613 
614     /*
615      * Setting the CPU context is only supported for SEV-ES and SEV-SNP. The
616      * context buffer will contain a sev_es_save_area from the Linux kernel
617      * which is defined by "Table B-4. VMSA Layout, State Save Area for SEV-ES"
618      * in the AMD64 APM, Volume 2.
619      */
620 
621     if (!sev_es_enabled()) {
622         error_setg(errp, "SEV: unable to set CPU context: Not supported");
623         return -1;
624     }
625 
626     if (ctx_len < sizeof(struct sev_es_save_area)) {
627         error_setg(errp, "SEV: unable to set CPU context: "
628                      "Invalid context provided");
629         return -1;
630     }
631 
632     cpu = qemu_get_cpu(cpu_index);
633     if (!cpu) {
634         error_setg(errp, "SEV: unable to set CPU context for out of bounds "
635                      "CPU index %d", cpu_index);
636         return -1;
637     }
638 
639     /*
640      * If the context of this VP has already been set then replace it with the
641      * new context.
642      */
643     QTAILQ_FOREACH(launch_vmsa, &sev_common->launch_vmsa, next)
644     {
645         if (cpu_index == launch_vmsa->cpu_index) {
646             launch_vmsa->gpa = gpa;
647             memcpy(&launch_vmsa->vmsa, ctx, sizeof(launch_vmsa->vmsa));
648             exists = true;
649             break;
650         }
651     }
652 
653     if (!exists) {
654         /* New VP context */
655         launch_vmsa = g_new0(SevLaunchVmsa, 1);
656         memcpy(&launch_vmsa->vmsa, ctx, sizeof(launch_vmsa->vmsa));
657         launch_vmsa->cpu_index = cpu_index;
658         launch_vmsa->gpa = gpa;
659         QTAILQ_INSERT_TAIL(&sev_common->launch_vmsa, launch_vmsa, next);
660     }
661 
662     /* Synchronise the VMSA with the current CPU state */
663     sev_apply_cpu_context(cpu);
664 
665     return 0;
666 }
667 
668 bool
sev_enabled(void)669 sev_enabled(void)
670 {
671     ConfidentialGuestSupport *cgs = MACHINE(qdev_get_machine())->cgs;
672 
673     return !!object_dynamic_cast(OBJECT(cgs), TYPE_SEV_COMMON);
674 }
675 
676 bool
sev_snp_enabled(void)677 sev_snp_enabled(void)
678 {
679     ConfidentialGuestSupport *cgs = MACHINE(qdev_get_machine())->cgs;
680 
681     return !!object_dynamic_cast(OBJECT(cgs), TYPE_SEV_SNP_GUEST);
682 }
683 
684 bool
sev_es_enabled(void)685 sev_es_enabled(void)
686 {
687     ConfidentialGuestSupport *cgs = MACHINE(qdev_get_machine())->cgs;
688 
689     return sev_snp_enabled() ||
690             (sev_enabled() && SEV_GUEST(cgs)->policy & SEV_POLICY_ES);
691 }
692 
693 uint32_t
sev_get_cbit_position(void)694 sev_get_cbit_position(void)
695 {
696     SevCommonState *sev_common = SEV_COMMON(MACHINE(qdev_get_machine())->cgs);
697 
698     return sev_common ? sev_common->cbitpos : 0;
699 }
700 
701 uint32_t
sev_get_reduced_phys_bits(void)702 sev_get_reduced_phys_bits(void)
703 {
704     SevCommonState *sev_common = SEV_COMMON(MACHINE(qdev_get_machine())->cgs);
705 
706     return sev_common ? sev_common->reduced_phys_bits : 0;
707 }
708 
sev_get_info(void)709 static SevInfo *sev_get_info(void)
710 {
711     SevInfo *info;
712     SevCommonState *sev_common = SEV_COMMON(MACHINE(qdev_get_machine())->cgs);
713 
714     info = g_new0(SevInfo, 1);
715     info->enabled = sev_enabled();
716 
717     if (info->enabled) {
718         info->api_major = sev_common->api_major;
719         info->api_minor = sev_common->api_minor;
720         info->build_id = sev_common->build_id;
721         info->state = sev_common->state;
722 
723         if (sev_snp_enabled()) {
724             info->sev_type = SEV_GUEST_TYPE_SEV_SNP;
725             info->u.sev_snp.snp_policy =
726                 object_property_get_uint(OBJECT(sev_common), "policy", NULL);
727         } else {
728             info->sev_type = SEV_GUEST_TYPE_SEV;
729             info->u.sev.handle = SEV_GUEST(sev_common)->handle;
730             info->u.sev.policy =
731                 (uint32_t)object_property_get_uint(OBJECT(sev_common),
732                                                    "policy", NULL);
733         }
734     }
735 
736     return info;
737 }
738 
qmp_query_sev(Error ** errp)739 SevInfo *qmp_query_sev(Error **errp)
740 {
741     SevInfo *info;
742 
743     info = sev_get_info();
744     if (!info) {
745         error_setg(errp, "SEV feature is not available");
746         return NULL;
747     }
748 
749     return info;
750 }
751 
hmp_info_sev(Monitor * mon,const QDict * qdict)752 void hmp_info_sev(Monitor *mon, const QDict *qdict)
753 {
754     SevInfo *info = sev_get_info();
755 
756     if (!info || !info->enabled) {
757         monitor_printf(mon, "SEV is not enabled\n");
758         goto out;
759     }
760 
761     monitor_printf(mon, "SEV type: %s\n", SevGuestType_str(info->sev_type));
762     monitor_printf(mon, "state: %s\n", SevState_str(info->state));
763     monitor_printf(mon, "build: %d\n", info->build_id);
764     monitor_printf(mon, "api version: %d.%d\n", info->api_major,
765                    info->api_minor);
766 
767     if (sev_snp_enabled()) {
768         monitor_printf(mon, "debug: %s\n",
769                        info->u.sev_snp.snp_policy & SEV_SNP_POLICY_DBG ? "on"
770                                                                        : "off");
771         monitor_printf(mon, "SMT allowed: %s\n",
772                        info->u.sev_snp.snp_policy & SEV_SNP_POLICY_SMT ? "on"
773                                                                        : "off");
774     } else {
775         monitor_printf(mon, "handle: %d\n", info->u.sev.handle);
776         monitor_printf(mon, "debug: %s\n",
777                        info->u.sev.policy & SEV_POLICY_NODBG ? "off" : "on");
778         monitor_printf(mon, "key-sharing: %s\n",
779                        info->u.sev.policy & SEV_POLICY_NOKS ? "off" : "on");
780     }
781 
782 out:
783     qapi_free_SevInfo(info);
784 }
785 
786 static int
sev_get_pdh_info(int fd,guchar ** pdh,size_t * pdh_len,guchar ** cert_chain,size_t * cert_chain_len,Error ** errp)787 sev_get_pdh_info(int fd, guchar **pdh, size_t *pdh_len, guchar **cert_chain,
788                  size_t *cert_chain_len, Error **errp)
789 {
790     guchar *pdh_data = NULL;
791     guchar *cert_chain_data = NULL;
792     struct sev_user_data_pdh_cert_export export = {};
793     int err, r;
794 
795     /* query the certificate length */
796     r = sev_platform_ioctl(fd, SEV_PDH_CERT_EXPORT, &export, &err);
797     if (r < 0) {
798         if (err != SEV_RET_INVALID_LEN) {
799             error_setg(errp, "SEV: Failed to export PDH cert"
800                              " ret=%d fw_err=%d (%s)",
801                        r, err, fw_error_to_str(err));
802             return 1;
803         }
804     }
805 
806     pdh_data = g_new(guchar, export.pdh_cert_len);
807     cert_chain_data = g_new(guchar, export.cert_chain_len);
808     export.pdh_cert_address = (unsigned long)pdh_data;
809     export.cert_chain_address = (unsigned long)cert_chain_data;
810 
811     r = sev_platform_ioctl(fd, SEV_PDH_CERT_EXPORT, &export, &err);
812     if (r < 0) {
813         error_setg(errp, "SEV: Failed to export PDH cert ret=%d fw_err=%d (%s)",
814                    r, err, fw_error_to_str(err));
815         goto e_free;
816     }
817 
818     *pdh = pdh_data;
819     *pdh_len = export.pdh_cert_len;
820     *cert_chain = cert_chain_data;
821     *cert_chain_len = export.cert_chain_len;
822     return 0;
823 
824 e_free:
825     g_free(pdh_data);
826     g_free(cert_chain_data);
827     return 1;
828 }
829 
sev_get_cpu0_id(int fd,guchar ** id,size_t * id_len,Error ** errp)830 static int sev_get_cpu0_id(int fd, guchar **id, size_t *id_len, Error **errp)
831 {
832     guchar *id_data;
833     struct sev_user_data_get_id2 get_id2 = {};
834     int err, r;
835 
836     /* query the ID length */
837     r = sev_platform_ioctl(fd, SEV_GET_ID2, &get_id2, &err);
838     if (r < 0 && err != SEV_RET_INVALID_LEN) {
839         error_setg(errp, "SEV: Failed to get ID ret=%d fw_err=%d (%s)",
840                    r, err, fw_error_to_str(err));
841         return 1;
842     }
843 
844     id_data = g_new(guchar, get_id2.length);
845     get_id2.address = (unsigned long)id_data;
846 
847     r = sev_platform_ioctl(fd, SEV_GET_ID2, &get_id2, &err);
848     if (r < 0) {
849         error_setg(errp, "SEV: Failed to get ID ret=%d fw_err=%d (%s)",
850                    r, err, fw_error_to_str(err));
851         goto err;
852     }
853 
854     *id = id_data;
855     *id_len = get_id2.length;
856     return 0;
857 
858 err:
859     g_free(id_data);
860     return 1;
861 }
862 
sev_get_capabilities(Error ** errp)863 static SevCapability *sev_get_capabilities(Error **errp)
864 {
865     SevCapability *cap = NULL;
866     guchar *pdh_data = NULL;
867     guchar *cert_chain_data = NULL;
868     guchar *cpu0_id_data = NULL;
869     size_t pdh_len = 0, cert_chain_len = 0, cpu0_id_len = 0;
870     uint32_t ebx;
871     int fd;
872     SevCommonState *sev_common;
873     char *sev_device;
874 
875     if (!kvm_enabled()) {
876         error_setg(errp, "KVM not enabled");
877         return NULL;
878     }
879     if (kvm_vm_ioctl(kvm_state, KVM_MEMORY_ENCRYPT_OP, NULL) < 0) {
880         error_setg(errp, "SEV is not enabled in KVM");
881         return NULL;
882     }
883 
884     sev_common = SEV_COMMON(MACHINE(qdev_get_machine())->cgs);
885     if (sev_common) {
886         sev_device = object_property_get_str(OBJECT(sev_common), "sev-device",
887                                              &error_abort);
888     } else {
889         sev_device = g_strdup(DEFAULT_SEV_DEVICE);
890     }
891 
892     fd = open(sev_device, O_RDWR);
893     if (fd < 0) {
894         error_setg_errno(errp, errno, "SEV: Failed to open %s",
895                          sev_device);
896         g_free(sev_device);
897         return NULL;
898     }
899     g_free(sev_device);
900 
901     if (sev_get_pdh_info(fd, &pdh_data, &pdh_len,
902                          &cert_chain_data, &cert_chain_len, errp)) {
903         goto out;
904     }
905 
906     if (sev_get_cpu0_id(fd, &cpu0_id_data, &cpu0_id_len, errp)) {
907         goto out;
908     }
909 
910     cap = g_new0(SevCapability, 1);
911     cap->pdh = g_base64_encode(pdh_data, pdh_len);
912     cap->cert_chain = g_base64_encode(cert_chain_data, cert_chain_len);
913     cap->cpu0_id = g_base64_encode(cpu0_id_data, cpu0_id_len);
914 
915     host_cpuid(0x8000001F, 0, NULL, &ebx, NULL, NULL);
916     cap->cbitpos = ebx & 0x3f;
917 
918     /*
919      * When SEV feature is enabled, we loose one bit in guest physical
920      * addressing.
921      */
922     cap->reduced_phys_bits = 1;
923 
924 out:
925     g_free(cpu0_id_data);
926     g_free(pdh_data);
927     g_free(cert_chain_data);
928     close(fd);
929     return cap;
930 }
931 
qmp_query_sev_capabilities(Error ** errp)932 SevCapability *qmp_query_sev_capabilities(Error **errp)
933 {
934     return sev_get_capabilities(errp);
935 }
936 
937 static OvmfSevMetadata *ovmf_sev_metadata_table;
938 
939 #define OVMF_SEV_META_DATA_GUID "dc886566-984a-4798-A75e-5585a7bf67cc"
940 typedef struct __attribute__((__packed__)) OvmfSevMetadataOffset {
941     uint32_t offset;
942 } OvmfSevMetadataOffset;
943 
pc_system_get_ovmf_sev_metadata_ptr(void)944 OvmfSevMetadata *pc_system_get_ovmf_sev_metadata_ptr(void)
945 {
946     return ovmf_sev_metadata_table;
947 }
948 
pc_system_parse_sev_metadata(uint8_t * flash_ptr,size_t flash_size)949 void pc_system_parse_sev_metadata(uint8_t *flash_ptr, size_t flash_size)
950 {
951     OvmfSevMetadata     *metadata;
952     OvmfSevMetadataOffset  *data;
953 
954     if (!pc_system_ovmf_table_find(OVMF_SEV_META_DATA_GUID, (uint8_t **)&data,
955                                    NULL)) {
956         return;
957     }
958 
959     metadata = (OvmfSevMetadata *)(flash_ptr + flash_size - data->offset);
960     if (memcmp(metadata->signature, "ASEV", 4) != 0 ||
961         metadata->len < sizeof(OvmfSevMetadata) ||
962         metadata->len > flash_size - data->offset) {
963         return;
964     }
965 
966     ovmf_sev_metadata_table = g_memdup2(metadata, metadata->len);
967 }
968 
sev_get_attestation_report(const char * mnonce,Error ** errp)969 static SevAttestationReport *sev_get_attestation_report(const char *mnonce,
970                                                         Error **errp)
971 {
972     struct kvm_sev_attestation_report input = {};
973     SevAttestationReport *report = NULL;
974     SevCommonState *sev_common;
975     g_autofree guchar *data = NULL;
976     g_autofree guchar *buf = NULL;
977     gsize len;
978     int err = 0, ret;
979 
980     if (!sev_enabled()) {
981         error_setg(errp, "SEV is not enabled");
982         return NULL;
983     }
984 
985     /* lets decode the mnonce string */
986     buf = g_base64_decode(mnonce, &len);
987     if (!buf) {
988         error_setg(errp, "SEV: failed to decode mnonce input");
989         return NULL;
990     }
991 
992     /* verify the input mnonce length */
993     if (len != sizeof(input.mnonce)) {
994         error_setg(errp, "SEV: mnonce must be %zu bytes (got %" G_GSIZE_FORMAT ")",
995                 sizeof(input.mnonce), len);
996         return NULL;
997     }
998 
999     sev_common = SEV_COMMON(MACHINE(qdev_get_machine())->cgs);
1000 
1001     /* Query the report length */
1002     ret = sev_ioctl(sev_common->sev_fd, KVM_SEV_GET_ATTESTATION_REPORT,
1003             &input, &err);
1004     if (ret < 0) {
1005         if (err != SEV_RET_INVALID_LEN) {
1006             error_setg(errp, "SEV: Failed to query the attestation report"
1007                              " length ret=%d fw_err=%d (%s)",
1008                        ret, err, fw_error_to_str(err));
1009             return NULL;
1010         }
1011     }
1012 
1013     data = g_malloc(input.len);
1014     input.uaddr = (unsigned long)data;
1015     memcpy(input.mnonce, buf, sizeof(input.mnonce));
1016 
1017     /* Query the report */
1018     ret = sev_ioctl(sev_common->sev_fd, KVM_SEV_GET_ATTESTATION_REPORT,
1019             &input, &err);
1020     if (ret) {
1021         error_setg_errno(errp, errno, "SEV: Failed to get attestation report"
1022                 " ret=%d fw_err=%d (%s)", ret, err, fw_error_to_str(err));
1023         return NULL;
1024     }
1025 
1026     report = g_new0(SevAttestationReport, 1);
1027     report->data = g_base64_encode(data, input.len);
1028 
1029     trace_kvm_sev_attestation_report(mnonce, report->data);
1030 
1031     return report;
1032 }
1033 
qmp_query_sev_attestation_report(const char * mnonce,Error ** errp)1034 SevAttestationReport *qmp_query_sev_attestation_report(const char *mnonce,
1035                                                        Error **errp)
1036 {
1037     return sev_get_attestation_report(mnonce, errp);
1038 }
1039 
1040 static int
sev_read_file_base64(const char * filename,guchar ** data,gsize * len)1041 sev_read_file_base64(const char *filename, guchar **data, gsize *len)
1042 {
1043     gsize sz;
1044     g_autofree gchar *base64 = NULL;
1045     GError *error = NULL;
1046 
1047     if (!g_file_get_contents(filename, &base64, &sz, &error)) {
1048         error_report("SEV: Failed to read '%s' (%s)", filename, error->message);
1049         g_error_free(error);
1050         return -1;
1051     }
1052 
1053     *data = g_base64_decode(base64, len);
1054     return 0;
1055 }
1056 
1057 static int
sev_snp_launch_start(SevCommonState * sev_common)1058 sev_snp_launch_start(SevCommonState *sev_common)
1059 {
1060     int fw_error, rc;
1061     SevSnpGuestState *sev_snp_guest = SEV_SNP_GUEST(sev_common);
1062     struct kvm_sev_snp_launch_start *start = &sev_snp_guest->kvm_start_conf;
1063 
1064     trace_kvm_sev_snp_launch_start(start->policy,
1065                                    sev_snp_guest->guest_visible_workarounds);
1066 
1067     if (!kvm_enable_hypercall(BIT_ULL(KVM_HC_MAP_GPA_RANGE))) {
1068             return 1;
1069     }
1070 
1071     rc = sev_ioctl(sev_common->sev_fd, KVM_SEV_SNP_LAUNCH_START,
1072                    start, &fw_error);
1073     if (rc < 0) {
1074         error_report("%s: SNP_LAUNCH_START ret=%d fw_error=%d '%s'",
1075                 __func__, rc, fw_error, fw_error_to_str(fw_error));
1076         return 1;
1077     }
1078 
1079     QTAILQ_INIT(&launch_update);
1080 
1081     sev_set_guest_state(sev_common, SEV_STATE_LAUNCH_UPDATE);
1082 
1083     return 0;
1084 }
1085 
1086 static int
sev_launch_start(SevCommonState * sev_common)1087 sev_launch_start(SevCommonState *sev_common)
1088 {
1089     gsize sz;
1090     int ret = 1;
1091     int fw_error, rc;
1092     SevGuestState *sev_guest = SEV_GUEST(sev_common);
1093     struct kvm_sev_launch_start start = {
1094         .handle = sev_guest->handle, .policy = sev_guest->policy
1095     };
1096     guchar *session = NULL, *dh_cert = NULL;
1097 
1098     if (sev_guest->session_file) {
1099         if (sev_read_file_base64(sev_guest->session_file, &session, &sz) < 0) {
1100             goto out;
1101         }
1102         start.session_uaddr = (unsigned long)session;
1103         start.session_len = sz;
1104     }
1105 
1106     if (sev_guest->dh_cert_file) {
1107         if (sev_read_file_base64(sev_guest->dh_cert_file, &dh_cert, &sz) < 0) {
1108             goto out;
1109         }
1110         start.dh_uaddr = (unsigned long)dh_cert;
1111         start.dh_len = sz;
1112     }
1113 
1114     trace_kvm_sev_launch_start(start.policy, session, dh_cert);
1115     rc = sev_ioctl(sev_common->sev_fd, KVM_SEV_LAUNCH_START, &start, &fw_error);
1116     if (rc < 0) {
1117         error_report("%s: LAUNCH_START ret=%d fw_error=%d '%s'",
1118                 __func__, ret, fw_error, fw_error_to_str(fw_error));
1119         goto out;
1120     }
1121 
1122     sev_set_guest_state(sev_common, SEV_STATE_LAUNCH_UPDATE);
1123     sev_guest->handle = start.handle;
1124     ret = 0;
1125 
1126 out:
1127     g_free(session);
1128     g_free(dh_cert);
1129     return ret;
1130 }
1131 
1132 static void
sev_snp_cpuid_report_mismatches(SnpCpuidInfo * old,SnpCpuidInfo * new)1133 sev_snp_cpuid_report_mismatches(SnpCpuidInfo *old,
1134                                 SnpCpuidInfo *new)
1135 {
1136     size_t i;
1137 
1138     if (old->count != new->count) {
1139         error_report("SEV-SNP: CPUID validation failed due to count mismatch, "
1140                      "provided: %d, expected: %d", old->count, new->count);
1141         return;
1142     }
1143 
1144     for (i = 0; i < old->count; i++) {
1145         SnpCpuidFunc *old_func, *new_func;
1146 
1147         old_func = &old->entries[i];
1148         new_func = &new->entries[i];
1149 
1150         if (memcmp(old_func, new_func, sizeof(SnpCpuidFunc))) {
1151             error_report("SEV-SNP: CPUID validation failed for function 0x%x, index: 0x%x, "
1152                          "provided: eax:0x%08x, ebx: 0x%08x, ecx: 0x%08x, edx: 0x%08x, "
1153                          "expected: eax:0x%08x, ebx: 0x%08x, ecx: 0x%08x, edx: 0x%08x",
1154                          old_func->eax_in, old_func->ecx_in,
1155                          old_func->eax, old_func->ebx, old_func->ecx, old_func->edx,
1156                          new_func->eax, new_func->ebx, new_func->ecx, new_func->edx);
1157         }
1158     }
1159 }
1160 
1161 static const char *
snp_page_type_to_str(int type)1162 snp_page_type_to_str(int type)
1163 {
1164     switch (type) {
1165     case KVM_SEV_SNP_PAGE_TYPE_NORMAL: return "Normal";
1166     case KVM_SEV_SNP_PAGE_TYPE_ZERO: return "Zero";
1167     case KVM_SEV_SNP_PAGE_TYPE_UNMEASURED: return "Unmeasured";
1168     case KVM_SEV_SNP_PAGE_TYPE_SECRETS: return "Secrets";
1169     case KVM_SEV_SNP_PAGE_TYPE_CPUID: return "Cpuid";
1170     default: return "unknown";
1171     }
1172 }
1173 
1174 static int
sev_snp_launch_update(SevSnpGuestState * sev_snp_guest,SevLaunchUpdateData * data)1175 sev_snp_launch_update(SevSnpGuestState *sev_snp_guest,
1176                       SevLaunchUpdateData *data)
1177 {
1178     int ret, fw_error;
1179     SnpCpuidInfo snp_cpuid_info;
1180     struct kvm_sev_snp_launch_update update = {0};
1181 
1182     if (!data->hva || !data->len) {
1183         error_report("SNP_LAUNCH_UPDATE called with invalid address"
1184                      "/ length: %p / %zx",
1185                      data->hva, data->len);
1186         return 1;
1187     }
1188 
1189     if (data->type == KVM_SEV_SNP_PAGE_TYPE_CPUID) {
1190         /* Save a copy for comparison in case the LAUNCH_UPDATE fails */
1191         memcpy(&snp_cpuid_info, data->hva, sizeof(snp_cpuid_info));
1192     }
1193 
1194     update.uaddr = (__u64)(unsigned long)data->hva;
1195     update.gfn_start = data->gpa >> TARGET_PAGE_BITS;
1196     update.len = data->len;
1197     update.type = data->type;
1198 
1199     /*
1200      * KVM_SEV_SNP_LAUNCH_UPDATE requires that GPA ranges have the private
1201      * memory attribute set in advance.
1202      */
1203     ret = kvm_set_memory_attributes_private(data->gpa, data->len);
1204     if (ret) {
1205         error_report("SEV-SNP: failed to configure initial"
1206                      "private guest memory");
1207         goto out;
1208     }
1209 
1210     while (update.len || ret == -EAGAIN) {
1211         trace_kvm_sev_snp_launch_update(update.uaddr, update.gfn_start <<
1212                                         TARGET_PAGE_BITS, update.len,
1213                                         snp_page_type_to_str(update.type));
1214 
1215         ret = sev_ioctl(SEV_COMMON(sev_snp_guest)->sev_fd,
1216                         KVM_SEV_SNP_LAUNCH_UPDATE,
1217                         &update, &fw_error);
1218         if (ret && ret != -EAGAIN) {
1219             error_report("SNP_LAUNCH_UPDATE ret=%d fw_error=%d '%s'",
1220                          ret, fw_error, fw_error_to_str(fw_error));
1221 
1222             if (data->type == KVM_SEV_SNP_PAGE_TYPE_CPUID) {
1223                 sev_snp_cpuid_report_mismatches(&snp_cpuid_info, data->hva);
1224                 error_report("SEV-SNP: failed update CPUID page");
1225             }
1226             break;
1227         }
1228     }
1229 
1230 out:
1231     if (!ret && update.gfn_start << TARGET_PAGE_BITS != data->gpa + data->len) {
1232         error_report("SEV-SNP: expected update of GPA range %"
1233                      HWADDR_PRIx "-%" HWADDR_PRIx ","
1234                      "got GPA range %" HWADDR_PRIx "-%llx",
1235                      data->gpa, data->gpa + data->len, data->gpa,
1236                      update.gfn_start << TARGET_PAGE_BITS);
1237         ret = -EIO;
1238     }
1239 
1240     return ret;
1241 }
1242 
1243 static uint32_t
sev_snp_adjust_cpuid_features(X86ConfidentialGuest * cg,uint32_t feature,uint32_t index,int reg,uint32_t value)1244 sev_snp_adjust_cpuid_features(X86ConfidentialGuest *cg, uint32_t feature, uint32_t index,
1245                             int reg, uint32_t value)
1246 {
1247     switch (feature) {
1248     case 1:
1249         if (reg == R_ECX) {
1250             return value & ~CPUID_EXT_TSC_DEADLINE_TIMER;
1251         }
1252         break;
1253     case 7:
1254         if (index == 0 && reg == R_EBX) {
1255             return value & ~CPUID_7_0_EBX_TSC_ADJUST;
1256         }
1257         if (index == 0 && reg == R_EDX) {
1258             return value & ~(CPUID_7_0_EDX_SPEC_CTRL |
1259                              CPUID_7_0_EDX_STIBP |
1260                              CPUID_7_0_EDX_FLUSH_L1D |
1261                              CPUID_7_0_EDX_ARCH_CAPABILITIES |
1262                              CPUID_7_0_EDX_CORE_CAPABILITY |
1263                              CPUID_7_0_EDX_SPEC_CTRL_SSBD);
1264         }
1265         break;
1266     case 0x80000008:
1267         if (reg == R_EBX) {
1268             return value & ~CPUID_8000_0008_EBX_VIRT_SSBD;
1269         }
1270         break;
1271     }
1272     return value;
1273 }
1274 
sev_launch_update_data(SevCommonState * sev_common,hwaddr gpa,uint8_t * addr,size_t len,Error ** errp)1275 static int sev_launch_update_data(SevCommonState *sev_common, hwaddr gpa,
1276                                   uint8_t *addr, size_t len, Error **errp)
1277 {
1278     int ret, fw_error;
1279     struct kvm_sev_launch_update_data update;
1280 
1281     if (!addr || !len) {
1282         return 1;
1283     }
1284 
1285     update.uaddr = (uintptr_t)addr;
1286     update.len = len;
1287     trace_kvm_sev_launch_update_data(addr, len);
1288     ret = sev_ioctl(sev_common->sev_fd, KVM_SEV_LAUNCH_UPDATE_DATA,
1289                     &update, &fw_error);
1290     if (ret) {
1291         error_setg(errp, "%s: LAUNCH_UPDATE ret=%d fw_error=%d '%s'", __func__,
1292                    ret, fw_error, fw_error_to_str(fw_error));
1293     }
1294 
1295     return ret;
1296 }
1297 
1298 static int
sev_launch_update_vmsa(SevGuestState * sev_guest)1299 sev_launch_update_vmsa(SevGuestState *sev_guest)
1300 {
1301     int ret, fw_error;
1302     CPUState *cpu;
1303 
1304     /*
1305      * The initial CPU state is measured as part of KVM_SEV_LAUNCH_UPDATE_VMSA.
1306      * Synchronise the CPU state to any provided launch VMSA structures.
1307      */
1308     CPU_FOREACH(cpu) {
1309         sev_apply_cpu_context(cpu);
1310     }
1311 
1312 
1313     ret = sev_ioctl(SEV_COMMON(sev_guest)->sev_fd, KVM_SEV_LAUNCH_UPDATE_VMSA,
1314                     NULL, &fw_error);
1315     if (ret) {
1316         error_report("%s: LAUNCH_UPDATE_VMSA ret=%d fw_error=%d '%s'",
1317                 __func__, ret, fw_error, fw_error_to_str(fw_error));
1318     }
1319 
1320     return ret;
1321 }
1322 
1323 static void
sev_launch_get_measure(Notifier * notifier,void * unused)1324 sev_launch_get_measure(Notifier *notifier, void *unused)
1325 {
1326     SevCommonState *sev_common = SEV_COMMON(MACHINE(qdev_get_machine())->cgs);
1327     SevGuestState *sev_guest = SEV_GUEST(sev_common);
1328     int ret, error;
1329     g_autofree guchar *data = NULL;
1330     struct kvm_sev_launch_measure measurement = {};
1331 
1332     if (!sev_check_state(sev_common, SEV_STATE_LAUNCH_UPDATE)) {
1333         return;
1334     }
1335 
1336     if (sev_es_enabled()) {
1337         /* measure all the VM save areas before getting launch_measure */
1338         ret = sev_launch_update_vmsa(sev_guest);
1339         if (ret) {
1340             exit(1);
1341         }
1342         kvm_mark_guest_state_protected();
1343     }
1344 
1345     /* query the measurement blob length */
1346     ret = sev_ioctl(sev_common->sev_fd, KVM_SEV_LAUNCH_MEASURE,
1347                     &measurement, &error);
1348     if (!measurement.len) {
1349         error_report("%s: LAUNCH_MEASURE ret=%d fw_error=%d '%s'",
1350                      __func__, ret, error, fw_error_to_str(errno));
1351         return;
1352     }
1353 
1354     data = g_new0(guchar, measurement.len);
1355     measurement.uaddr = (unsigned long)data;
1356 
1357     /* get the measurement blob */
1358     ret = sev_ioctl(sev_common->sev_fd, KVM_SEV_LAUNCH_MEASURE,
1359                     &measurement, &error);
1360     if (ret) {
1361         error_report("%s: LAUNCH_MEASURE ret=%d fw_error=%d '%s'",
1362                      __func__, ret, error, fw_error_to_str(errno));
1363         return;
1364     }
1365 
1366     sev_set_guest_state(sev_common, SEV_STATE_LAUNCH_SECRET);
1367 
1368     /* encode the measurement value and emit the event */
1369     sev_guest->measurement = g_base64_encode(data, measurement.len);
1370     trace_kvm_sev_launch_measurement(sev_guest->measurement);
1371 }
1372 
sev_get_launch_measurement(void)1373 static char *sev_get_launch_measurement(void)
1374 {
1375     ConfidentialGuestSupport *cgs = MACHINE(qdev_get_machine())->cgs;
1376     SevGuestState *sev_guest =
1377         (SevGuestState *)object_dynamic_cast(OBJECT(cgs), TYPE_SEV_GUEST);
1378 
1379     if (sev_guest &&
1380         SEV_COMMON(sev_guest)->state >= SEV_STATE_LAUNCH_SECRET) {
1381         return g_strdup(sev_guest->measurement);
1382     }
1383 
1384     return NULL;
1385 }
1386 
qmp_query_sev_launch_measure(Error ** errp)1387 SevLaunchMeasureInfo *qmp_query_sev_launch_measure(Error **errp)
1388 {
1389     char *data;
1390     SevLaunchMeasureInfo *info;
1391 
1392     data = sev_get_launch_measurement();
1393     if (!data) {
1394         error_setg(errp, "SEV launch measurement is not available");
1395         return NULL;
1396     }
1397 
1398     info = g_malloc0(sizeof(*info));
1399     info->data = data;
1400 
1401     return info;
1402 }
1403 
1404 static Notifier sev_machine_done_notify = {
1405     .notify = sev_launch_get_measure,
1406 };
1407 
1408 static void
sev_launch_finish(SevCommonState * sev_common)1409 sev_launch_finish(SevCommonState *sev_common)
1410 {
1411     int ret, error;
1412 
1413     trace_kvm_sev_launch_finish();
1414     ret = sev_ioctl(sev_common->sev_fd, KVM_SEV_LAUNCH_FINISH, 0,
1415                     &error);
1416     if (ret) {
1417         error_report("%s: LAUNCH_FINISH ret=%d fw_error=%d '%s'",
1418                      __func__, ret, error, fw_error_to_str(error));
1419         exit(1);
1420     }
1421 
1422     sev_set_guest_state(sev_common, SEV_STATE_RUNNING);
1423 
1424     /* add migration blocker */
1425     error_setg(&sev_mig_blocker,
1426                "SEV: Migration is not implemented");
1427     migrate_add_blocker(&sev_mig_blocker, &error_fatal);
1428 }
1429 
snp_launch_update_data(uint64_t gpa,void * hva,size_t len,int type,Error ** errp)1430 static int snp_launch_update_data(uint64_t gpa, void *hva, size_t len,
1431                                   int type, Error **errp)
1432 {
1433     SevLaunchUpdateData *data;
1434 
1435     data = g_new0(SevLaunchUpdateData, 1);
1436     data->gpa = gpa;
1437     data->hva = hva;
1438     data->len = len;
1439     data->type = type;
1440 
1441     QTAILQ_INSERT_TAIL(&launch_update, data, next);
1442 
1443     return 0;
1444 }
1445 
sev_snp_launch_update_data(SevCommonState * sev_common,hwaddr gpa,uint8_t * ptr,size_t len,Error ** errp)1446 static int sev_snp_launch_update_data(SevCommonState *sev_common, hwaddr gpa,
1447                                       uint8_t *ptr, size_t len, Error **errp)
1448 {
1449     return snp_launch_update_data(gpa, ptr, len,
1450                                      KVM_SEV_SNP_PAGE_TYPE_NORMAL, errp);
1451 }
1452 
1453 static int
sev_snp_cpuid_info_fill(SnpCpuidInfo * snp_cpuid_info,const KvmCpuidInfo * kvm_cpuid_info,Error ** errp)1454 sev_snp_cpuid_info_fill(SnpCpuidInfo *snp_cpuid_info,
1455                         const KvmCpuidInfo *kvm_cpuid_info, Error **errp)
1456 {
1457     size_t i;
1458 
1459     if (kvm_cpuid_info->cpuid.nent > SNP_CPUID_FUNCTION_MAXCOUNT) {
1460         error_setg(errp, "SEV-SNP: CPUID entry count (%d) exceeds max (%d)",
1461                      kvm_cpuid_info->cpuid.nent, SNP_CPUID_FUNCTION_MAXCOUNT);
1462         return -1;
1463     }
1464 
1465     memset(snp_cpuid_info, 0, sizeof(*snp_cpuid_info));
1466 
1467     for (i = 0; i < kvm_cpuid_info->cpuid.nent; i++) {
1468         const struct kvm_cpuid_entry2 *kvm_cpuid_entry;
1469         SnpCpuidFunc *snp_cpuid_entry;
1470 
1471         kvm_cpuid_entry = &kvm_cpuid_info->entries[i];
1472         snp_cpuid_entry = &snp_cpuid_info->entries[i];
1473 
1474         snp_cpuid_entry->eax_in = kvm_cpuid_entry->function;
1475         if (kvm_cpuid_entry->flags == KVM_CPUID_FLAG_SIGNIFCANT_INDEX) {
1476             snp_cpuid_entry->ecx_in = kvm_cpuid_entry->index;
1477         }
1478         snp_cpuid_entry->eax = kvm_cpuid_entry->eax;
1479         snp_cpuid_entry->ebx = kvm_cpuid_entry->ebx;
1480         snp_cpuid_entry->ecx = kvm_cpuid_entry->ecx;
1481         snp_cpuid_entry->edx = kvm_cpuid_entry->edx;
1482 
1483         /*
1484          * Guest kernels will calculate EBX themselves using the 0xD
1485          * subfunctions corresponding to the individual XSAVE areas, so only
1486          * encode the base XSAVE size in the initial leaves, corresponding
1487          * to the initial XCR0=1 state.
1488          */
1489         if (snp_cpuid_entry->eax_in == 0xD &&
1490             (snp_cpuid_entry->ecx_in == 0x0 || snp_cpuid_entry->ecx_in == 0x1)) {
1491             snp_cpuid_entry->ebx = 0x240;
1492             snp_cpuid_entry->xcr0_in = 1;
1493             snp_cpuid_entry->xss_in = 0;
1494         }
1495     }
1496 
1497     snp_cpuid_info->count = i;
1498 
1499     return 0;
1500 }
1501 
snp_launch_update_cpuid(uint32_t cpuid_addr,void * hva,size_t cpuid_len,Error ** errp)1502 static int snp_launch_update_cpuid(uint32_t cpuid_addr, void *hva,
1503                                    size_t cpuid_len, Error **errp)
1504 {
1505     KvmCpuidInfo kvm_cpuid_info = {0};
1506     SnpCpuidInfo snp_cpuid_info;
1507     CPUState *cs = first_cpu;
1508     int ret;
1509     uint32_t i = 0;
1510 
1511     assert(sizeof(snp_cpuid_info) <= cpuid_len);
1512 
1513     /* get the cpuid list from KVM */
1514     do {
1515         kvm_cpuid_info.cpuid.nent = ++i;
1516         ret = kvm_vcpu_ioctl(cs, KVM_GET_CPUID2, &kvm_cpuid_info);
1517     } while (ret == -E2BIG);
1518 
1519     if (ret) {
1520         error_setg(errp, "SEV-SNP: unable to query CPUID values for CPU: '%s'",
1521                    strerror(-ret));
1522         return -1;
1523     }
1524 
1525     ret = sev_snp_cpuid_info_fill(&snp_cpuid_info, &kvm_cpuid_info, errp);
1526     if (ret < 0) {
1527         return -1;
1528     }
1529 
1530     memcpy(hva, &snp_cpuid_info, sizeof(snp_cpuid_info));
1531 
1532     return snp_launch_update_data(cpuid_addr, hva, cpuid_len,
1533                                   KVM_SEV_SNP_PAGE_TYPE_CPUID, errp);
1534 }
1535 
snp_launch_update_kernel_hashes(SevSnpGuestState * sev_snp,uint32_t addr,void * hva,uint32_t len,Error ** errp)1536 static int snp_launch_update_kernel_hashes(SevSnpGuestState *sev_snp,
1537                                            uint32_t addr, void *hva,
1538                                            uint32_t len, Error **errp)
1539 {
1540     int type = KVM_SEV_SNP_PAGE_TYPE_ZERO;
1541     if (sev_snp->parent_obj.kernel_hashes) {
1542         assert(sev_snp->kernel_hashes_data);
1543         assert((sev_snp->kernel_hashes_offset +
1544                 sizeof(*sev_snp->kernel_hashes_data)) <= len);
1545         memset(hva, 0, len);
1546         memcpy(hva + sev_snp->kernel_hashes_offset, sev_snp->kernel_hashes_data,
1547                sizeof(*sev_snp->kernel_hashes_data));
1548         type = KVM_SEV_SNP_PAGE_TYPE_NORMAL;
1549     }
1550     return snp_launch_update_data(addr, hva, len, type, errp);
1551 }
1552 
1553 static int
snp_metadata_desc_to_page_type(int desc_type)1554 snp_metadata_desc_to_page_type(int desc_type)
1555 {
1556     switch (desc_type) {
1557     /* Add the umeasured prevalidated pages as a zero page */
1558     case SEV_DESC_TYPE_SNP_SEC_MEM: return KVM_SEV_SNP_PAGE_TYPE_ZERO;
1559     case SEV_DESC_TYPE_SNP_SECRETS: return KVM_SEV_SNP_PAGE_TYPE_SECRETS;
1560     case SEV_DESC_TYPE_CPUID: return KVM_SEV_SNP_PAGE_TYPE_CPUID;
1561     default:
1562          return KVM_SEV_SNP_PAGE_TYPE_ZERO;
1563     }
1564 }
1565 
1566 static void
snp_populate_metadata_pages(SevSnpGuestState * sev_snp,OvmfSevMetadata * metadata)1567 snp_populate_metadata_pages(SevSnpGuestState *sev_snp,
1568                             OvmfSevMetadata *metadata)
1569 {
1570     OvmfSevMetadataDesc *desc;
1571     int type, ret, i;
1572     void *hva;
1573     MemoryRegion *mr = NULL;
1574 
1575     for (i = 0; i < metadata->num_desc; i++) {
1576         desc = &metadata->descs[i];
1577 
1578         type = snp_metadata_desc_to_page_type(desc->type);
1579 
1580         hva = gpa2hva(&mr, desc->base, desc->len, NULL);
1581         if (!hva) {
1582             error_report("%s: Failed to get HVA for GPA 0x%x sz 0x%x",
1583                          __func__, desc->base, desc->len);
1584             exit(1);
1585         }
1586 
1587         if (type == KVM_SEV_SNP_PAGE_TYPE_CPUID) {
1588             ret = snp_launch_update_cpuid(desc->base, hva, desc->len,
1589                                           &error_fatal);
1590         } else if (desc->type == SEV_DESC_TYPE_SNP_KERNEL_HASHES) {
1591             ret = snp_launch_update_kernel_hashes(sev_snp, desc->base, hva,
1592                                                   desc->len, &error_fatal);
1593         } else {
1594             ret = snp_launch_update_data(desc->base, hva, desc->len, type,
1595                                          &error_fatal);
1596         }
1597 
1598         if (ret) {
1599             error_report("%s: Failed to add metadata page gpa 0x%x+%x type %d",
1600                          __func__, desc->base, desc->len, desc->type);
1601             exit(1);
1602         }
1603     }
1604 }
1605 
1606 static void
sev_snp_launch_finish(SevCommonState * sev_common)1607 sev_snp_launch_finish(SevCommonState *sev_common)
1608 {
1609     int ret, error;
1610     Error *local_err = NULL;
1611     OvmfSevMetadata *metadata;
1612     SevLaunchUpdateData *data;
1613     SevSnpGuestState *sev_snp = SEV_SNP_GUEST(sev_common);
1614     struct kvm_sev_snp_launch_finish *finish = &sev_snp->kvm_finish_conf;
1615 
1616     /*
1617      * Populate all the metadata pages if not using an IGVM file. In the case
1618      * where an IGVM file is provided it will be used to configure the metadata
1619      * pages directly.
1620      */
1621     if (!X86_MACHINE(qdev_get_machine())->igvm) {
1622         /*
1623          * To boot the SNP guest, the hypervisor is required to populate the
1624          * CPUID and Secrets page before finalizing the launch flow. The
1625          * location of the secrets and CPUID page is available through the
1626          * OVMF metadata GUID.
1627          */
1628         metadata = pc_system_get_ovmf_sev_metadata_ptr();
1629         if (metadata == NULL) {
1630             error_report("%s: Failed to locate SEV metadata header", __func__);
1631             exit(1);
1632         }
1633 
1634         /* Populate all the metadata pages */
1635         snp_populate_metadata_pages(sev_snp, metadata);
1636     }
1637 
1638     QTAILQ_FOREACH(data, &launch_update, next) {
1639         ret = sev_snp_launch_update(sev_snp, data);
1640         if (ret) {
1641             exit(1);
1642         }
1643     }
1644 
1645     trace_kvm_sev_snp_launch_finish(sev_snp->id_block_base64, sev_snp->id_auth_base64,
1646                                     sev_snp->host_data);
1647     ret = sev_ioctl(sev_common->sev_fd, KVM_SEV_SNP_LAUNCH_FINISH,
1648                     finish, &error);
1649     if (ret) {
1650         error_report("SNP_LAUNCH_FINISH ret=%d fw_error=%d '%s'",
1651                      ret, error, fw_error_to_str(error));
1652         exit(1);
1653     }
1654 
1655     kvm_mark_guest_state_protected();
1656     sev_set_guest_state(sev_common, SEV_STATE_RUNNING);
1657 
1658     /* add migration blocker */
1659     error_setg(&sev_mig_blocker,
1660                "SEV-SNP: Migration is not implemented");
1661     ret = migrate_add_blocker(&sev_mig_blocker, &local_err);
1662     if (local_err) {
1663         error_report_err(local_err);
1664         error_free(sev_mig_blocker);
1665         exit(1);
1666     }
1667 }
1668 
1669 
1670 static void
sev_vm_state_change(void * opaque,bool running,RunState state)1671 sev_vm_state_change(void *opaque, bool running, RunState state)
1672 {
1673     SevCommonState *sev_common = opaque;
1674     SevCommonStateClass *klass = SEV_COMMON_GET_CLASS(opaque);
1675 
1676     if (running) {
1677         if (!sev_check_state(sev_common, SEV_STATE_RUNNING)) {
1678             klass->launch_finish(sev_common);
1679         }
1680     }
1681 }
1682 
1683 /*
1684  * This helper is to examine sev-guest properties and determine if any options
1685  * have been set which rely on the newer KVM_SEV_INIT2 interface and associated
1686  * KVM VM types.
1687  */
sev_init2_required(SevGuestState * sev_guest)1688 static bool sev_init2_required(SevGuestState *sev_guest)
1689 {
1690     /* Currently no KVM_SEV_INIT2-specific options are exposed via QEMU */
1691     return false;
1692 }
1693 
sev_kvm_type(X86ConfidentialGuest * cg)1694 static int sev_kvm_type(X86ConfidentialGuest *cg)
1695 {
1696     SevCommonState *sev_common = SEV_COMMON(cg);
1697     SevGuestState *sev_guest = SEV_GUEST(sev_common);
1698     int kvm_type;
1699 
1700     if (sev_common->kvm_type != -1) {
1701         goto out;
1702     }
1703 
1704     /* These are the only cases where legacy VM types can be used. */
1705     if (sev_guest->legacy_vm_type == ON_OFF_AUTO_ON ||
1706         (sev_guest->legacy_vm_type == ON_OFF_AUTO_AUTO &&
1707          !sev_init2_required(sev_guest))) {
1708         sev_common->kvm_type = KVM_X86_DEFAULT_VM;
1709         goto out;
1710     }
1711 
1712     /*
1713      * Newer VM types are required, either explicitly via legacy-vm-type=on, or
1714      * implicitly via legacy-vm-type=auto along with additional sev-guest
1715      * properties that require the newer VM types.
1716      */
1717     kvm_type = (sev_guest->policy & SEV_POLICY_ES) ?
1718                 KVM_X86_SEV_ES_VM : KVM_X86_SEV_VM;
1719     if (!kvm_is_vm_type_supported(kvm_type)) {
1720         if (sev_guest->legacy_vm_type == ON_OFF_AUTO_AUTO) {
1721             error_report("SEV: host kernel does not support requested %s VM type, which is required "
1722                          "for the set of options specified. To allow use of the legacy "
1723                          "KVM_X86_DEFAULT_VM VM type, please disable any options that are not "
1724                          "compatible with the legacy VM type, or upgrade your kernel.",
1725                          kvm_type == KVM_X86_SEV_VM ? "KVM_X86_SEV_VM" : "KVM_X86_SEV_ES_VM");
1726         } else {
1727             error_report("SEV: host kernel does not support requested %s VM type. To allow use of "
1728                          "the legacy KVM_X86_DEFAULT_VM VM type, the 'legacy-vm-type' argument "
1729                          "must be set to 'on' or 'auto' for the sev-guest object.",
1730                          kvm_type == KVM_X86_SEV_VM ? "KVM_X86_SEV_VM" : "KVM_X86_SEV_ES_VM");
1731         }
1732 
1733         return -1;
1734     }
1735 
1736     sev_common->kvm_type = kvm_type;
1737 out:
1738     return sev_common->kvm_type;
1739 }
1740 
sev_snp_kvm_type(X86ConfidentialGuest * cg)1741 static int sev_snp_kvm_type(X86ConfidentialGuest *cg)
1742 {
1743     return KVM_X86_SNP_VM;
1744 }
1745 
sev_init_supported_features(ConfidentialGuestSupport * cgs,SevCommonState * sev_common,Error ** errp)1746 static int sev_init_supported_features(ConfidentialGuestSupport *cgs,
1747                                        SevCommonState *sev_common, Error **errp)
1748 {
1749     X86ConfidentialGuestClass *x86_klass =
1750                                X86_CONFIDENTIAL_GUEST_GET_CLASS(cgs);
1751     /*
1752      * Older kernels do not support query or setting of sev_features. In this
1753      * case the set of supported features must be zero to match the settings
1754      * in the kernel.
1755      */
1756     if (x86_klass->kvm_type(X86_CONFIDENTIAL_GUEST(sev_common)) ==
1757         KVM_X86_DEFAULT_VM) {
1758         sev_common->supported_sev_features = 0;
1759         return 0;
1760     }
1761 
1762     /* Query KVM for the supported set of sev_features */
1763     struct kvm_device_attr attr = {
1764         .group = KVM_X86_GRP_SEV,
1765         .attr = KVM_X86_SEV_VMSA_FEATURES,
1766         .addr = (unsigned long)&sev_common->supported_sev_features,
1767     };
1768     if (kvm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr) < 0) {
1769         error_setg(errp, "%s: failed to query supported sev_features",
1770                    __func__);
1771         return -1;
1772     }
1773     if (sev_snp_enabled()) {
1774         sev_common->supported_sev_features |= SVM_SEV_FEAT_SNP_ACTIVE;
1775     }
1776     return 0;
1777 }
1778 
sev_common_kvm_init(ConfidentialGuestSupport * cgs,Error ** errp)1779 static int sev_common_kvm_init(ConfidentialGuestSupport *cgs, Error **errp)
1780 {
1781     char *devname;
1782     int ret, fw_error, cmd;
1783     uint32_t ebx;
1784     uint32_t host_cbitpos;
1785     struct sev_user_data_status status = {};
1786     SevCommonState *sev_common = SEV_COMMON(cgs);
1787     SevCommonStateClass *klass = SEV_COMMON_GET_CLASS(cgs);
1788     X86ConfidentialGuestClass *x86_klass =
1789                                X86_CONFIDENTIAL_GUEST_GET_CLASS(cgs);
1790 
1791     sev_common->state = SEV_STATE_UNINIT;
1792 
1793     host_cpuid(0x8000001F, 0, NULL, &ebx, NULL, NULL);
1794     host_cbitpos = ebx & 0x3f;
1795 
1796     /*
1797      * The cbitpos value will be placed in bit positions 5:0 of the EBX
1798      * register of CPUID 0x8000001F. No need to verify the range as the
1799      * comparison against the host value accomplishes that.
1800      */
1801     if (host_cbitpos != sev_common->cbitpos) {
1802         error_setg(errp, "%s: cbitpos check failed, host '%d' requested '%d'",
1803                    __func__, host_cbitpos, sev_common->cbitpos);
1804         return -1;
1805     }
1806 
1807     /*
1808      * The reduced-phys-bits value will be placed in bit positions 11:6 of
1809      * the EBX register of CPUID 0x8000001F, so verify the supplied value
1810      * is in the range of 1 to 63.
1811      */
1812     if (sev_common->reduced_phys_bits < 1 ||
1813         sev_common->reduced_phys_bits > 63) {
1814         error_setg(errp, "%s: reduced_phys_bits check failed,"
1815                    " it should be in the range of 1 to 63, requested '%d'",
1816                    __func__, sev_common->reduced_phys_bits);
1817         return -1;
1818     }
1819 
1820     devname = object_property_get_str(OBJECT(sev_common), "sev-device", NULL);
1821     sev_common->sev_fd = open(devname, O_RDWR);
1822     if (sev_common->sev_fd < 0) {
1823         error_setg(errp, "%s: Failed to open %s '%s'", __func__,
1824                    devname, strerror(errno));
1825         g_free(devname);
1826         return -1;
1827     }
1828     g_free(devname);
1829 
1830     ret = sev_platform_ioctl(sev_common->sev_fd, SEV_PLATFORM_STATUS, &status,
1831                              &fw_error);
1832     if (ret) {
1833         error_setg(errp, "%s: failed to get platform status ret=%d "
1834                    "fw_error='%d: %s'", __func__, ret, fw_error,
1835                    fw_error_to_str(fw_error));
1836         return -1;
1837     }
1838     sev_common->build_id = status.build;
1839     sev_common->api_major = status.api_major;
1840     sev_common->api_minor = status.api_minor;
1841 
1842     if (sev_es_enabled()) {
1843         if (!kvm_kernel_irqchip_allowed()) {
1844             error_setg(errp, "%s: SEV-ES guests require in-kernel irqchip"
1845                        "support", __func__);
1846             return -1;
1847         }
1848     }
1849 
1850     if (sev_es_enabled() && !sev_snp_enabled()) {
1851         if (!(status.flags & SEV_STATUS_FLAGS_CONFIG_ES)) {
1852             error_setg(errp, "%s: guest policy requires SEV-ES, but "
1853                          "host SEV-ES support unavailable",
1854                          __func__);
1855             return -1;
1856         }
1857     }
1858 
1859     if (sev_init_supported_features(cgs, sev_common, errp) < 0) {
1860         return -1;
1861     }
1862 
1863     trace_kvm_sev_init();
1864     switch (x86_klass->kvm_type(X86_CONFIDENTIAL_GUEST(sev_common))) {
1865     case KVM_X86_DEFAULT_VM:
1866         cmd = sev_es_enabled() ? KVM_SEV_ES_INIT : KVM_SEV_INIT;
1867 
1868         ret = sev_ioctl(sev_common->sev_fd, cmd, NULL, &fw_error);
1869         break;
1870     case KVM_X86_SEV_VM:
1871     case KVM_X86_SEV_ES_VM:
1872     case KVM_X86_SNP_VM: {
1873         struct kvm_sev_init args = { 0 };
1874         MachineState *machine = MACHINE(qdev_get_machine());
1875         X86MachineState *x86machine = X86_MACHINE(qdev_get_machine());
1876 
1877         /*
1878          * If configuration is provided via an IGVM file then the IGVM file
1879          * might contain configuration of the initial vcpu context. For SEV
1880          * the vcpu context includes the sev_features which should be applied
1881          * to the vcpu.
1882          *
1883          * KVM does not synchronize sev_features from CPU state. Instead it
1884          * requires sev_features to be provided as part of this initialization
1885          * call which is subsequently automatically applied to the VMSA of
1886          * each vcpu.
1887          *
1888          * The IGVM file is normally processed after initialization. Therefore
1889          * we need to pre-process it here to extract sev_features in order to
1890          * provide it to KVM_SEV_INIT2. Each cgs_* function that is called by
1891          * the IGVM processor detects this pre-process by observing the state
1892          * as SEV_STATE_UNINIT.
1893          */
1894         if (x86machine->igvm) {
1895             if (IGVM_CFG_GET_CLASS(x86machine->igvm)
1896                     ->process(x86machine->igvm, machine->cgs, true, errp) ==
1897                 -1) {
1898                 return -1;
1899             }
1900             /*
1901              * KVM maintains a bitmask of allowed sev_features. This does not
1902              * include SVM_SEV_FEAT_SNP_ACTIVE which is set accordingly by KVM
1903              * itself. Therefore we need to clear this flag.
1904              */
1905             args.vmsa_features = sev_common->sev_features &
1906                                  ~SVM_SEV_FEAT_SNP_ACTIVE;
1907         }
1908 
1909         ret = sev_ioctl(sev_common->sev_fd, KVM_SEV_INIT2, &args, &fw_error);
1910         break;
1911     }
1912     default:
1913         error_setg(errp, "%s: host kernel does not support the requested SEV configuration.",
1914                    __func__);
1915         return -1;
1916     }
1917 
1918     if (ret) {
1919         error_setg(errp, "%s: failed to initialize ret=%d fw_error=%d '%s'",
1920                    __func__, ret, fw_error, fw_error_to_str(fw_error));
1921         return -1;
1922     }
1923 
1924     ret = klass->launch_start(sev_common);
1925 
1926     if (ret) {
1927         error_setg(errp, "%s: failed to create encryption context", __func__);
1928         return -1;
1929     }
1930 
1931     if (klass->kvm_init && klass->kvm_init(cgs, errp)) {
1932         return -1;
1933     }
1934 
1935     qemu_add_vm_change_state_handler(sev_vm_state_change, sev_common);
1936 
1937     cgs->ready = true;
1938 
1939     return 0;
1940 }
1941 
sev_kvm_init(ConfidentialGuestSupport * cgs,Error ** errp)1942 static int sev_kvm_init(ConfidentialGuestSupport *cgs, Error **errp)
1943 {
1944      int ret;
1945 
1946     /*
1947      * SEV/SEV-ES rely on pinned memory to back guest RAM so discarding
1948      * isn't actually possible. With SNP, only guest_memfd pages are used
1949      * for private guest memory, so discarding of shared memory is still
1950      * possible..
1951      */
1952     ret = ram_block_discard_disable(true);
1953     if (ret) {
1954         error_setg(errp, "%s: cannot disable RAM discard", __func__);
1955         return -1;
1956     }
1957 
1958     /*
1959      * SEV uses these notifiers to register/pin pages prior to guest use,
1960      * but SNP relies on guest_memfd for private pages, which has its
1961      * own internal mechanisms for registering/pinning private memory.
1962      */
1963     ram_block_notifier_add(&sev_ram_notifier);
1964 
1965     /*
1966      * The machine done notify event is used for SEV guests to get the
1967      * measurement of the encrypted images. When SEV-SNP is enabled, the
1968      * measurement is part of the guest attestation process where it can
1969      * be collected without any reliance on the VMM. So skip registering
1970      * the notifier for SNP in favor of using guest attestation instead.
1971      */
1972     qemu_add_machine_init_done_notifier(&sev_machine_done_notify);
1973 
1974     return 0;
1975 }
1976 
sev_snp_kvm_init(ConfidentialGuestSupport * cgs,Error ** errp)1977 static int sev_snp_kvm_init(ConfidentialGuestSupport *cgs, Error **errp)
1978 {
1979     MachineState *ms = MACHINE(qdev_get_machine());
1980     X86MachineState *x86ms = X86_MACHINE(ms);
1981 
1982     if (x86ms->smm == ON_OFF_AUTO_AUTO) {
1983         x86ms->smm = ON_OFF_AUTO_OFF;
1984     } else if (x86ms->smm == ON_OFF_AUTO_ON) {
1985         error_setg(errp, "SEV-SNP does not support SMM.");
1986         return -1;
1987     }
1988 
1989     return 0;
1990 }
1991 
1992 int
sev_encrypt_flash(hwaddr gpa,uint8_t * ptr,uint64_t len,Error ** errp)1993 sev_encrypt_flash(hwaddr gpa, uint8_t *ptr, uint64_t len, Error **errp)
1994 {
1995     SevCommonState *sev_common = SEV_COMMON(MACHINE(qdev_get_machine())->cgs);
1996     SevCommonStateClass *klass;
1997 
1998     if (!sev_common) {
1999         return 0;
2000     }
2001     klass = SEV_COMMON_GET_CLASS(sev_common);
2002 
2003     /* if SEV is in update state then encrypt the data else do nothing */
2004     if (sev_check_state(sev_common, SEV_STATE_LAUNCH_UPDATE)) {
2005         int ret;
2006 
2007         ret = klass->launch_update_data(sev_common, gpa, ptr, len, errp);
2008         if (ret < 0) {
2009             return ret;
2010         }
2011     }
2012 
2013     return 0;
2014 }
2015 
sev_inject_launch_secret(const char * packet_hdr,const char * secret,uint64_t gpa,Error ** errp)2016 int sev_inject_launch_secret(const char *packet_hdr, const char *secret,
2017                              uint64_t gpa, Error **errp)
2018 {
2019     ERRP_GUARD();
2020     struct kvm_sev_launch_secret input;
2021     g_autofree guchar *data = NULL, *hdr = NULL;
2022     int error, ret = 1;
2023     void *hva;
2024     gsize hdr_sz = 0, data_sz = 0;
2025     MemoryRegion *mr = NULL;
2026     SevCommonState *sev_common = SEV_COMMON(MACHINE(qdev_get_machine())->cgs);
2027 
2028     if (!sev_common) {
2029         error_setg(errp, "SEV not enabled for guest");
2030         return 1;
2031     }
2032 
2033     /* secret can be injected only in this state */
2034     if (!sev_check_state(sev_common, SEV_STATE_LAUNCH_SECRET)) {
2035         error_setg(errp, "SEV: Not in correct state. (LSECRET) %x",
2036                    sev_common->state);
2037         return 1;
2038     }
2039 
2040     hdr = g_base64_decode(packet_hdr, &hdr_sz);
2041     if (!hdr || !hdr_sz) {
2042         error_setg(errp, "SEV: Failed to decode sequence header");
2043         return 1;
2044     }
2045 
2046     data = g_base64_decode(secret, &data_sz);
2047     if (!data || !data_sz) {
2048         error_setg(errp, "SEV: Failed to decode data");
2049         return 1;
2050     }
2051 
2052     hva = gpa2hva(&mr, gpa, data_sz, errp);
2053     if (!hva) {
2054         error_prepend(errp, "SEV: Failed to calculate guest address: ");
2055         return 1;
2056     }
2057 
2058     input.hdr_uaddr = (uint64_t)(unsigned long)hdr;
2059     input.hdr_len = hdr_sz;
2060 
2061     input.trans_uaddr = (uint64_t)(unsigned long)data;
2062     input.trans_len = data_sz;
2063 
2064     input.guest_uaddr = (uint64_t)(unsigned long)hva;
2065     input.guest_len = data_sz;
2066 
2067     trace_kvm_sev_launch_secret(gpa, input.guest_uaddr,
2068                                 input.trans_uaddr, input.trans_len);
2069 
2070     ret = sev_ioctl(sev_common->sev_fd, KVM_SEV_LAUNCH_SECRET,
2071                     &input, &error);
2072     if (ret) {
2073         error_setg(errp, "SEV: failed to inject secret ret=%d fw_error=%d '%s'",
2074                      ret, error, fw_error_to_str(error));
2075         return ret;
2076     }
2077 
2078     return 0;
2079 }
2080 
2081 #define SEV_SECRET_GUID "4c2eb361-7d9b-4cc3-8081-127c90d3d294"
2082 struct sev_secret_area {
2083     uint32_t base;
2084     uint32_t size;
2085 };
2086 
qmp_sev_inject_launch_secret(const char * packet_hdr,const char * secret,bool has_gpa,uint64_t gpa,Error ** errp)2087 void qmp_sev_inject_launch_secret(const char *packet_hdr,
2088                                   const char *secret,
2089                                   bool has_gpa, uint64_t gpa,
2090                                   Error **errp)
2091 {
2092     if (!sev_enabled()) {
2093         error_setg(errp, "SEV not enabled for guest");
2094         return;
2095     }
2096     if (!has_gpa) {
2097         uint8_t *data;
2098         struct sev_secret_area *area;
2099 
2100         if (!pc_system_ovmf_table_find(SEV_SECRET_GUID, &data, NULL)) {
2101             error_setg(errp, "SEV: no secret area found in OVMF,"
2102                        " gpa must be specified.");
2103             return;
2104         }
2105         area = (struct sev_secret_area *)data;
2106         gpa = area->base;
2107     }
2108 
2109     sev_inject_launch_secret(packet_hdr, secret, gpa, errp);
2110 }
2111 
2112 static int
sev_es_parse_reset_block(SevInfoBlock * info,uint32_t * addr)2113 sev_es_parse_reset_block(SevInfoBlock *info, uint32_t *addr)
2114 {
2115     if (!info->reset_addr) {
2116         error_report("SEV-ES reset address is zero");
2117         return 1;
2118     }
2119 
2120     *addr = info->reset_addr;
2121 
2122     return 0;
2123 }
2124 
2125 static int
sev_es_find_reset_vector(void * flash_ptr,uint64_t flash_size,uint32_t * addr)2126 sev_es_find_reset_vector(void *flash_ptr, uint64_t flash_size,
2127                          uint32_t *addr)
2128 {
2129     QemuUUID info_guid, *guid;
2130     SevInfoBlock *info;
2131     uint8_t *data;
2132     uint16_t *len;
2133 
2134     /*
2135      * Initialize the address to zero. An address of zero with a successful
2136      * return code indicates that SEV-ES is not active.
2137      */
2138     *addr = 0;
2139 
2140     /*
2141      * Extract the AP reset vector for SEV-ES guests by locating the SEV GUID.
2142      * The SEV GUID is located on its own (original implementation) or within
2143      * the Firmware GUID Table (new implementation), either of which are
2144      * located 32 bytes from the end of the flash.
2145      *
2146      * Check the Firmware GUID Table first.
2147      */
2148     if (pc_system_ovmf_table_find(SEV_INFO_BLOCK_GUID, &data, NULL)) {
2149         return sev_es_parse_reset_block((SevInfoBlock *)data, addr);
2150     }
2151 
2152     /*
2153      * SEV info block not found in the Firmware GUID Table (or there isn't
2154      * a Firmware GUID Table), fall back to the original implementation.
2155      */
2156     data = flash_ptr + flash_size - 0x20;
2157 
2158     qemu_uuid_parse(SEV_INFO_BLOCK_GUID, &info_guid);
2159     info_guid = qemu_uuid_bswap(info_guid); /* GUIDs are LE */
2160 
2161     guid = (QemuUUID *)(data - sizeof(info_guid));
2162     if (!qemu_uuid_is_equal(guid, &info_guid)) {
2163         error_report("SEV information block/Firmware GUID Table block not found in pflash rom");
2164         return 1;
2165     }
2166 
2167     len = (uint16_t *)((uint8_t *)guid - sizeof(*len));
2168     info = (SevInfoBlock *)(data - le16_to_cpu(*len));
2169 
2170     return sev_es_parse_reset_block(info, addr);
2171 }
2172 
2173 
seg_to_vmsa(const SegmentCache * cpu_seg,struct vmcb_seg * vmsa_seg)2174 static void seg_to_vmsa(const SegmentCache *cpu_seg, struct vmcb_seg *vmsa_seg)
2175 {
2176     vmsa_seg->selector = cpu_seg->selector;
2177     vmsa_seg->base = cpu_seg->base;
2178     vmsa_seg->limit = cpu_seg->limit;
2179     vmsa_seg->attrib = FLAGS_SEGCACHE_TO_VMSA(cpu_seg->flags);
2180 }
2181 
initialize_vmsa(const CPUState * cpu,struct sev_es_save_area * vmsa)2182 static void initialize_vmsa(const CPUState *cpu, struct sev_es_save_area *vmsa)
2183 {
2184     const X86CPU *x86 = X86_CPU(cpu);
2185     const CPUX86State *env = &x86->env;
2186 
2187     /*
2188      * Initialize the SEV-ES save area from the current state of
2189      * the CPU. The entire state does not need to be copied, only the state
2190      * that is copied back to the CPUState in sev_apply_cpu_context.
2191      */
2192     memset(vmsa, 0, sizeof(struct sev_es_save_area));
2193     vmsa->efer = env->efer;
2194     vmsa->cr0 = env->cr[0];
2195     vmsa->cr3 = env->cr[3];
2196     vmsa->cr4 = env->cr[4];
2197     vmsa->xcr0 = env->xcr0;
2198     vmsa->g_pat = env->pat;
2199 
2200     seg_to_vmsa(&env->segs[R_CS], &vmsa->cs);
2201     seg_to_vmsa(&env->segs[R_DS], &vmsa->ds);
2202     seg_to_vmsa(&env->segs[R_ES], &vmsa->es);
2203     seg_to_vmsa(&env->segs[R_FS], &vmsa->fs);
2204     seg_to_vmsa(&env->segs[R_GS], &vmsa->gs);
2205     seg_to_vmsa(&env->segs[R_SS], &vmsa->ss);
2206 
2207     seg_to_vmsa(&env->gdt, &vmsa->gdtr);
2208     seg_to_vmsa(&env->idt, &vmsa->idtr);
2209     seg_to_vmsa(&env->ldt, &vmsa->ldtr);
2210     seg_to_vmsa(&env->tr, &vmsa->tr);
2211 
2212     vmsa->dr6 = env->dr[6];
2213     vmsa->dr7 = env->dr[7];
2214 
2215     vmsa->rax = env->regs[R_EAX];
2216     vmsa->rcx = env->regs[R_ECX];
2217     vmsa->rdx = env->regs[R_EDX];
2218     vmsa->rbx = env->regs[R_EBX];
2219     vmsa->rsp = env->regs[R_ESP];
2220     vmsa->rbp = env->regs[R_EBP];
2221     vmsa->rsi = env->regs[R_ESI];
2222     vmsa->rdi = env->regs[R_EDI];
2223 
2224 #ifdef TARGET_X86_64
2225     vmsa->r8 = env->regs[R_R8];
2226     vmsa->r9 = env->regs[R_R9];
2227     vmsa->r10 = env->regs[R_R10];
2228     vmsa->r11 = env->regs[R_R11];
2229     vmsa->r12 = env->regs[R_R12];
2230     vmsa->r13 = env->regs[R_R13];
2231     vmsa->r14 = env->regs[R_R14];
2232     vmsa->r15 = env->regs[R_R15];
2233 #endif
2234 
2235     vmsa->rip = env->eip;
2236     vmsa->rflags = env->eflags;
2237 }
2238 
sev_es_set_ap_context(uint32_t reset_addr)2239 static void sev_es_set_ap_context(uint32_t reset_addr)
2240 {
2241     CPUState *cpu;
2242     struct sev_es_save_area vmsa;
2243     SegmentCache cs;
2244 
2245     cs.selector = 0xf000;
2246     cs.base = reset_addr & 0xffff0000;
2247     cs.limit = 0xffff;
2248     cs.flags = DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK | DESC_R_MASK |
2249                DESC_A_MASK;
2250 
2251     CPU_FOREACH(cpu) {
2252         if (cpu->cpu_index == 0) {
2253             /* Do not update the BSP reset state */
2254             continue;
2255         }
2256         initialize_vmsa(cpu, &vmsa);
2257         seg_to_vmsa(&cs, &vmsa.cs);
2258         vmsa.rip = reset_addr & 0x0000ffff;
2259         sev_set_cpu_context(cpu->cpu_index, &vmsa,
2260                             sizeof(struct sev_es_save_area),
2261                             0, &error_fatal);
2262     }
2263 }
2264 
sev_es_set_reset_vector(CPUState * cpu)2265 void sev_es_set_reset_vector(CPUState *cpu)
2266 {
2267     if (sev_enabled()) {
2268         sev_apply_cpu_context(cpu);
2269     }
2270 }
2271 
sev_es_save_reset_vector(void * flash_ptr,uint64_t flash_size)2272 int sev_es_save_reset_vector(void *flash_ptr, uint64_t flash_size)
2273 {
2274     uint32_t addr;
2275     int ret;
2276 
2277     if (!sev_es_enabled()) {
2278         return 0;
2279     }
2280 
2281     addr = 0;
2282     ret = sev_es_find_reset_vector(flash_ptr, flash_size,
2283                                    &addr);
2284     if (ret) {
2285         return ret;
2286     }
2287 
2288     /*
2289      * The reset vector is saved into a CPU context for each AP but not for
2290      * the BSP. This is applied during guest startup or when the CPU is reset.
2291      */
2292     if (addr) {
2293         sev_es_set_ap_context(addr);
2294     }
2295 
2296     return 0;
2297 }
2298 
2299 static const QemuUUID sev_hash_table_header_guid = {
2300     .data = UUID_LE(0x9438d606, 0x4f22, 0x4cc9, 0xb4, 0x79, 0xa7, 0x93,
2301                     0xd4, 0x11, 0xfd, 0x21)
2302 };
2303 
2304 static const QemuUUID sev_kernel_entry_guid = {
2305     .data = UUID_LE(0x4de79437, 0xabd2, 0x427f, 0xb8, 0x35, 0xd5, 0xb1,
2306                     0x72, 0xd2, 0x04, 0x5b)
2307 };
2308 static const QemuUUID sev_initrd_entry_guid = {
2309     .data = UUID_LE(0x44baf731, 0x3a2f, 0x4bd7, 0x9a, 0xf1, 0x41, 0xe2,
2310                     0x91, 0x69, 0x78, 0x1d)
2311 };
2312 static const QemuUUID sev_cmdline_entry_guid = {
2313     .data = UUID_LE(0x97d02dd8, 0xbd20, 0x4c94, 0xaa, 0x78, 0xe7, 0x71,
2314                     0x4d, 0x36, 0xab, 0x2a)
2315 };
2316 
build_kernel_loader_hashes(PaddedSevHashTable * padded_ht,SevKernelLoaderContext * ctx,Error ** errp)2317 static bool build_kernel_loader_hashes(PaddedSevHashTable *padded_ht,
2318                                        SevKernelLoaderContext *ctx,
2319                                        Error **errp)
2320 {
2321     SevHashTable *ht;
2322     uint8_t cmdline_hash[HASH_SIZE];
2323     uint8_t initrd_hash[HASH_SIZE];
2324     uint8_t kernel_hash[HASH_SIZE];
2325     uint8_t *hashp;
2326     size_t hash_len = HASH_SIZE;
2327 
2328     /*
2329      * Calculate hash of kernel command-line with the terminating null byte. If
2330      * the user doesn't supply a command-line via -append, the 1-byte "\0" will
2331      * be used.
2332      */
2333     hashp = cmdline_hash;
2334     if (qcrypto_hash_bytes(QCRYPTO_HASH_ALGO_SHA256, ctx->cmdline_data,
2335                            ctx->cmdline_size, &hashp, &hash_len, errp) < 0) {
2336         return false;
2337     }
2338     assert(hash_len == HASH_SIZE);
2339 
2340     /*
2341      * Calculate hash of initrd. If the user doesn't supply an initrd via
2342      * -initrd, an empty buffer will be used (ctx->initrd_size == 0).
2343      */
2344     hashp = initrd_hash;
2345     if (qcrypto_hash_bytes(QCRYPTO_HASH_ALGO_SHA256, ctx->initrd_data,
2346                            ctx->initrd_size, &hashp, &hash_len, errp) < 0) {
2347         return false;
2348     }
2349     assert(hash_len == HASH_SIZE);
2350 
2351     /* Calculate hash of the kernel */
2352     hashp = kernel_hash;
2353     struct iovec iov[2] = {
2354         { .iov_base = ctx->setup_data, .iov_len = ctx->setup_size },
2355         { .iov_base = ctx->kernel_data, .iov_len = ctx->kernel_size }
2356     };
2357     if (qcrypto_hash_bytesv(QCRYPTO_HASH_ALGO_SHA256, iov, ARRAY_SIZE(iov),
2358                             &hashp, &hash_len, errp) < 0) {
2359         return false;
2360     }
2361     assert(hash_len == HASH_SIZE);
2362 
2363     ht = &padded_ht->ht;
2364 
2365     ht->guid = sev_hash_table_header_guid;
2366     ht->len = sizeof(*ht);
2367 
2368     ht->cmdline.guid = sev_cmdline_entry_guid;
2369     ht->cmdline.len = sizeof(ht->cmdline);
2370     memcpy(ht->cmdline.hash, cmdline_hash, sizeof(ht->cmdline.hash));
2371 
2372     ht->initrd.guid = sev_initrd_entry_guid;
2373     ht->initrd.len = sizeof(ht->initrd);
2374     memcpy(ht->initrd.hash, initrd_hash, sizeof(ht->initrd.hash));
2375 
2376     ht->kernel.guid = sev_kernel_entry_guid;
2377     ht->kernel.len = sizeof(ht->kernel);
2378     memcpy(ht->kernel.hash, kernel_hash, sizeof(ht->kernel.hash));
2379 
2380     /* zero the excess data so the measurement can be reliably calculated */
2381     memset(padded_ht->padding, 0, sizeof(padded_ht->padding));
2382 
2383     return true;
2384 }
2385 
sev_snp_build_kernel_loader_hashes(SevCommonState * sev_common,SevHashTableDescriptor * area,SevKernelLoaderContext * ctx,Error ** errp)2386 static bool sev_snp_build_kernel_loader_hashes(SevCommonState *sev_common,
2387                                                SevHashTableDescriptor *area,
2388                                                SevKernelLoaderContext *ctx,
2389                                                Error **errp)
2390 {
2391     /*
2392      * SNP: Populate the hashes table in an area that later in
2393      * snp_launch_update_kernel_hashes() will be copied to the guest memory
2394      * and encrypted.
2395      */
2396     SevSnpGuestState *sev_snp_guest = SEV_SNP_GUEST(sev_common);
2397     sev_snp_guest->kernel_hashes_offset = area->base & ~TARGET_PAGE_MASK;
2398     sev_snp_guest->kernel_hashes_data = g_new0(PaddedSevHashTable, 1);
2399     return build_kernel_loader_hashes(sev_snp_guest->kernel_hashes_data, ctx, errp);
2400 }
2401 
sev_build_kernel_loader_hashes(SevCommonState * sev_common,SevHashTableDescriptor * area,SevKernelLoaderContext * ctx,Error ** errp)2402 static bool sev_build_kernel_loader_hashes(SevCommonState *sev_common,
2403                                            SevHashTableDescriptor *area,
2404                                            SevKernelLoaderContext *ctx,
2405                                            Error **errp)
2406 {
2407     PaddedSevHashTable *padded_ht;
2408     hwaddr mapped_len = sizeof(*padded_ht);
2409     MemTxAttrs attrs = { 0 };
2410     bool ret = true;
2411 
2412     /*
2413      * Populate the hashes table in the guest's memory at the OVMF-designated
2414      * area for the SEV hashes table
2415      */
2416     padded_ht = address_space_map(&address_space_memory, area->base,
2417                                   &mapped_len, true, attrs);
2418     if (!padded_ht || mapped_len != sizeof(*padded_ht)) {
2419         error_setg(errp, "SEV: cannot map hashes table guest memory area");
2420         return false;
2421     }
2422 
2423     if (build_kernel_loader_hashes(padded_ht, ctx, errp)) {
2424         if (sev_encrypt_flash(area->base, (uint8_t *)padded_ht,
2425                               sizeof(*padded_ht), errp) < 0) {
2426             ret = false;
2427         }
2428     } else {
2429         ret = false;
2430     }
2431 
2432     address_space_unmap(&address_space_memory, padded_ht,
2433                         mapped_len, true, mapped_len);
2434 
2435     return ret;
2436 }
2437 
2438 /*
2439  * Add the hashes of the linux kernel/initrd/cmdline to an encrypted guest page
2440  * which is included in SEV's initial memory measurement.
2441  */
sev_add_kernel_loader_hashes(SevKernelLoaderContext * ctx,Error ** errp)2442 bool sev_add_kernel_loader_hashes(SevKernelLoaderContext *ctx, Error **errp)
2443 {
2444     uint8_t *data;
2445     SevHashTableDescriptor *area;
2446     SevCommonState *sev_common = SEV_COMMON(MACHINE(qdev_get_machine())->cgs);
2447     SevCommonStateClass *klass = SEV_COMMON_GET_CLASS(sev_common);
2448 
2449     /*
2450      * Only add the kernel hashes if the sev-guest configuration explicitly
2451      * stated kernel-hashes=on.
2452      */
2453     if (!sev_common->kernel_hashes) {
2454         return false;
2455     }
2456 
2457     if (!pc_system_ovmf_table_find(SEV_HASH_TABLE_RV_GUID, &data, NULL)) {
2458         error_setg(errp, "SEV: kernel specified but guest firmware "
2459                          "has no hashes table GUID");
2460         return false;
2461     }
2462 
2463     area = (SevHashTableDescriptor *)data;
2464     if (!area->base || area->size < sizeof(PaddedSevHashTable)) {
2465         error_setg(errp, "SEV: guest firmware hashes table area is invalid "
2466                          "(base=0x%x size=0x%x)", area->base, area->size);
2467         return false;
2468     }
2469 
2470     return klass->build_kernel_loader_hashes(sev_common, area, ctx, errp);
2471 }
2472 
2473 static char *
sev_common_get_sev_device(Object * obj,Error ** errp)2474 sev_common_get_sev_device(Object *obj, Error **errp)
2475 {
2476     return g_strdup(SEV_COMMON(obj)->sev_device);
2477 }
2478 
2479 static void
sev_common_set_sev_device(Object * obj,const char * value,Error ** errp)2480 sev_common_set_sev_device(Object *obj, const char *value, Error **errp)
2481 {
2482     SEV_COMMON(obj)->sev_device = g_strdup(value);
2483 }
2484 
sev_common_get_kernel_hashes(Object * obj,Error ** errp)2485 static bool sev_common_get_kernel_hashes(Object *obj, Error **errp)
2486 {
2487     return SEV_COMMON(obj)->kernel_hashes;
2488 }
2489 
sev_common_set_kernel_hashes(Object * obj,bool value,Error ** errp)2490 static void sev_common_set_kernel_hashes(Object *obj, bool value, Error **errp)
2491 {
2492     SEV_COMMON(obj)->kernel_hashes = value;
2493 }
2494 
cgs_check_support(ConfidentialGuestPlatformType platform,uint16_t platform_version,uint8_t highest_vtl,uint64_t shared_gpa_boundary)2495 static bool cgs_check_support(ConfidentialGuestPlatformType platform,
2496                              uint16_t platform_version, uint8_t highest_vtl,
2497                              uint64_t shared_gpa_boundary)
2498 {
2499     return (((platform == CGS_PLATFORM_SEV_SNP) && sev_snp_enabled()) ||
2500             ((platform == CGS_PLATFORM_SEV_ES) && sev_es_enabled()) ||
2501             ((platform == CGS_PLATFORM_SEV) && sev_enabled()));
2502 }
2503 
cgs_set_guest_state(hwaddr gpa,uint8_t * ptr,uint64_t len,ConfidentialGuestPageType memory_type,uint16_t cpu_index,Error ** errp)2504 static int cgs_set_guest_state(hwaddr gpa, uint8_t *ptr, uint64_t len,
2505                                ConfidentialGuestPageType memory_type,
2506                                uint16_t cpu_index, Error **errp)
2507 {
2508     SevCommonState *sev_common = SEV_COMMON(MACHINE(qdev_get_machine())->cgs);
2509     SevCommonStateClass *klass = SEV_COMMON_GET_CLASS(sev_common);
2510 
2511     if (sev_common->state == SEV_STATE_UNINIT) {
2512         /* Pre-processing of IGVM file called from sev_common_kvm_init() */
2513         if ((cpu_index == 0) && (memory_type == CGS_PAGE_TYPE_VMSA)) {
2514             const struct sev_es_save_area *sa =
2515                 (const struct sev_es_save_area *)ptr;
2516             if (len < sizeof(*sa)) {
2517                 error_setg(errp, "%s: invalid VMSA length encountered",
2518                            __func__);
2519                 return -1;
2520             }
2521             if (check_sev_features(sev_common, sa->sev_features, errp) < 0) {
2522                 return -1;
2523             }
2524             sev_common->sev_features = sa->sev_features;
2525         }
2526         return 0;
2527     }
2528 
2529     if (!sev_enabled()) {
2530         error_setg(errp, "%s: attempt to configure guest memory, but SEV "
2531                      "is not enabled", __func__);
2532         return -1;
2533     }
2534 
2535     switch (memory_type) {
2536     case CGS_PAGE_TYPE_NORMAL:
2537     case CGS_PAGE_TYPE_ZERO:
2538         return klass->launch_update_data(sev_common, gpa, ptr, len, errp);
2539 
2540     case CGS_PAGE_TYPE_VMSA:
2541         if (!sev_es_enabled()) {
2542             error_setg(errp,
2543                        "%s: attempt to configure initial VMSA, but SEV-ES "
2544                        "is not supported",
2545                        __func__);
2546             return -1;
2547         }
2548         if (check_vmsa_supported(sev_common, gpa,
2549                                  (const struct sev_es_save_area *)ptr,
2550                                  errp) < 0) {
2551             return -1;
2552         }
2553         return sev_set_cpu_context(cpu_index, ptr, len, gpa, errp);
2554 
2555     case CGS_PAGE_TYPE_UNMEASURED:
2556         if (sev_snp_enabled()) {
2557             return snp_launch_update_data(
2558                 gpa, ptr, len, KVM_SEV_SNP_PAGE_TYPE_UNMEASURED, errp);
2559         }
2560         /* No action required if not SEV-SNP */
2561         return 0;
2562 
2563     case CGS_PAGE_TYPE_SECRETS:
2564         if (!sev_snp_enabled()) {
2565             error_setg(errp,
2566                        "%s: attempt to configure secrets page, but SEV-SNP "
2567                        "is not supported",
2568                        __func__);
2569             return -1;
2570         }
2571         return snp_launch_update_data(gpa, ptr, len,
2572                                       KVM_SEV_SNP_PAGE_TYPE_SECRETS, errp);
2573 
2574     case CGS_PAGE_TYPE_REQUIRED_MEMORY:
2575         if (kvm_convert_memory(gpa, len, true) < 0) {
2576             error_setg(
2577                 errp,
2578                 "%s: failed to configure required memory. gpa: %lX, type: %d",
2579                 __func__, gpa, memory_type);
2580             return -1;
2581         }
2582         return 0;
2583 
2584     case CGS_PAGE_TYPE_CPUID:
2585         if (!sev_snp_enabled()) {
2586             error_setg(errp,
2587                        "%s: attempt to configure CPUID page, but SEV-SNP "
2588                        "is not supported",
2589                        __func__);
2590             return -1;
2591         }
2592         return snp_launch_update_cpuid(gpa, ptr, len, errp);
2593     }
2594     error_setg(errp, "%s: failed to update guest. gpa: %lX, type: %d", __func__,
2595                gpa, memory_type);
2596     return -1;
2597 }
2598 
cgs_get_mem_map_entry(int index,ConfidentialGuestMemoryMapEntry * entry,Error ** errp)2599 static int cgs_get_mem_map_entry(int index,
2600                                  ConfidentialGuestMemoryMapEntry *entry,
2601                                  Error **errp)
2602 {
2603     struct e820_entry *table;
2604     int num_entries;
2605 
2606     SevCommonState *sev_common = SEV_COMMON(MACHINE(qdev_get_machine())->cgs);
2607     if (sev_common->state == SEV_STATE_UNINIT) {
2608         /* Pre-processing of IGVM file called from sev_common_kvm_init() */
2609         return 1;
2610     }
2611 
2612     num_entries = e820_get_table(&table);
2613     if ((index < 0) || (index >= num_entries)) {
2614         return 1;
2615     }
2616     entry->gpa = table[index].address;
2617     entry->size = table[index].length;
2618     switch (table[index].type) {
2619     case E820_RAM:
2620         entry->type = CGS_MEM_RAM;
2621         break;
2622     case E820_RESERVED:
2623         entry->type = CGS_MEM_RESERVED;
2624         break;
2625     case E820_ACPI:
2626         entry->type = CGS_MEM_ACPI;
2627         break;
2628     case E820_NVS:
2629         entry->type = CGS_MEM_NVS;
2630         break;
2631     case E820_UNUSABLE:
2632         entry->type = CGS_MEM_UNUSABLE;
2633         break;
2634     }
2635     return 0;
2636 }
2637 
cgs_set_guest_policy(ConfidentialGuestPolicyType policy_type,uint64_t policy,void * policy_data1,uint32_t policy_data1_size,void * policy_data2,uint32_t policy_data2_size,Error ** errp)2638 static int cgs_set_guest_policy(ConfidentialGuestPolicyType policy_type,
2639                                 uint64_t policy, void *policy_data1,
2640                                 uint32_t policy_data1_size, void *policy_data2,
2641                                 uint32_t policy_data2_size, Error **errp)
2642 {
2643     SevCommonState *sev_common = SEV_COMMON(MACHINE(qdev_get_machine())->cgs);
2644     if (sev_common->state == SEV_STATE_UNINIT) {
2645         /* Pre-processing of IGVM file called from sev_common_kvm_init() */
2646         return 0;
2647     }
2648 
2649     if (policy_type != GUEST_POLICY_SEV) {
2650         error_setg(errp, "%s: Invalid guest policy type provided for SEV: %d",
2651         __func__, policy_type);
2652         return -1;
2653     }
2654     /*
2655      * SEV-SNP handles policy differently. The policy flags are defined in
2656      * kvm_start_conf.policy and an ID block and ID auth can be provided.
2657      */
2658     if (sev_snp_enabled()) {
2659         SevSnpGuestState *sev_snp_guest =
2660             SEV_SNP_GUEST(MACHINE(qdev_get_machine())->cgs);
2661         struct kvm_sev_snp_launch_finish *finish =
2662             &sev_snp_guest->kvm_finish_conf;
2663 
2664         /*
2665          * The policy consists of flags in 'policy' and optionally an ID block
2666          * and ID auth in policy_data1 and policy_data2 respectively. The ID
2667          * block and auth are optional so clear any previous ID block and auth
2668          * and set them if provided, but always set the policy flags.
2669          */
2670         g_free(sev_snp_guest->id_block);
2671         g_free((guchar *)finish->id_block_uaddr);
2672         g_free(sev_snp_guest->id_auth);
2673         g_free((guchar *)finish->id_auth_uaddr);
2674         sev_snp_guest->id_block = NULL;
2675         finish->id_block_uaddr = 0;
2676         sev_snp_guest->id_auth = NULL;
2677         finish->id_auth_uaddr = 0;
2678 
2679         if (policy_data1_size > 0) {
2680             struct sev_snp_id_authentication *id_auth =
2681                 (struct sev_snp_id_authentication *)policy_data2;
2682 
2683             if (policy_data1_size != KVM_SEV_SNP_ID_BLOCK_SIZE) {
2684                 error_setg(errp, "%s: Invalid SEV-SNP ID block: incorrect size",
2685                            __func__);
2686                 return -1;
2687             }
2688             if (policy_data2_size != KVM_SEV_SNP_ID_AUTH_SIZE) {
2689                 error_setg(errp,
2690                            "%s: Invalid SEV-SNP ID auth block: incorrect size",
2691                            __func__);
2692                 return -1;
2693             }
2694             assert(policy_data1 != NULL);
2695             assert(policy_data2 != NULL);
2696 
2697             finish->id_block_uaddr =
2698                 (__u64)g_memdup2(policy_data1, KVM_SEV_SNP_ID_BLOCK_SIZE);
2699             finish->id_auth_uaddr =
2700                 (__u64)g_memdup2(policy_data2, KVM_SEV_SNP_ID_AUTH_SIZE);
2701 
2702             /*
2703              * Check if an author key has been provided and use that to flag
2704              * whether the author key is enabled. The first of the author key
2705              * must be non-zero to indicate the key type, which will currently
2706              * always be 2.
2707              */
2708             sev_snp_guest->kvm_finish_conf.auth_key_en =
2709                 id_auth->author_key[0] ? 1 : 0;
2710             finish->id_block_en = 1;
2711         }
2712         sev_snp_guest->kvm_start_conf.policy = policy;
2713     } else {
2714         SevGuestState *sev_guest = SEV_GUEST(MACHINE(qdev_get_machine())->cgs);
2715         /* Only the policy flags are supported for SEV and SEV-ES */
2716         if ((policy_data1_size > 0) || (policy_data2_size > 0) || !sev_guest) {
2717             error_setg(errp, "%s: An ID block/ID auth block has been provided "
2718                              "but SEV-SNP is not enabled", __func__);
2719             return -1;
2720         }
2721         sev_guest->policy = policy;
2722     }
2723     return 0;
2724 }
2725 
2726 static void
sev_common_class_init(ObjectClass * oc,const void * data)2727 sev_common_class_init(ObjectClass *oc, const void *data)
2728 {
2729     ConfidentialGuestSupportClass *klass = CONFIDENTIAL_GUEST_SUPPORT_CLASS(oc);
2730 
2731     klass->kvm_init = sev_common_kvm_init;
2732 
2733     object_class_property_add_str(oc, "sev-device",
2734                                   sev_common_get_sev_device,
2735                                   sev_common_set_sev_device);
2736     object_class_property_set_description(oc, "sev-device",
2737             "SEV device to use");
2738     object_class_property_add_bool(oc, "kernel-hashes",
2739                                    sev_common_get_kernel_hashes,
2740                                    sev_common_set_kernel_hashes);
2741     object_class_property_set_description(oc, "kernel-hashes",
2742             "add kernel hashes to guest firmware for measured Linux boot");
2743 }
2744 
2745 static void
sev_common_instance_init(Object * obj)2746 sev_common_instance_init(Object *obj)
2747 {
2748     SevCommonState *sev_common = SEV_COMMON(obj);
2749     ConfidentialGuestSupportClass *cgs =
2750         CONFIDENTIAL_GUEST_SUPPORT_GET_CLASS(obj);
2751 
2752     sev_common->kvm_type = -1;
2753 
2754     sev_common->sev_device = g_strdup(DEFAULT_SEV_DEVICE);
2755 
2756     object_property_add_uint32_ptr(obj, "cbitpos", &sev_common->cbitpos,
2757                                    OBJ_PROP_FLAG_READWRITE);
2758     object_property_add_uint32_ptr(obj, "reduced-phys-bits",
2759                                    &sev_common->reduced_phys_bits,
2760                                    OBJ_PROP_FLAG_READWRITE);
2761     cgs->check_support = cgs_check_support;
2762     cgs->set_guest_state = cgs_set_guest_state;
2763     cgs->get_mem_map_entry = cgs_get_mem_map_entry;
2764     cgs->set_guest_policy = cgs_set_guest_policy;
2765 
2766     QTAILQ_INIT(&sev_common->launch_vmsa);
2767 }
2768 
2769 /* sev guest info common to sev/sev-es/sev-snp */
2770 static const TypeInfo sev_common_info = {
2771     .parent = TYPE_X86_CONFIDENTIAL_GUEST,
2772     .name = TYPE_SEV_COMMON,
2773     .instance_size = sizeof(SevCommonState),
2774     .instance_init = sev_common_instance_init,
2775     .class_size = sizeof(SevCommonStateClass),
2776     .class_init = sev_common_class_init,
2777     .abstract = true,
2778     .interfaces = (const InterfaceInfo[]) {
2779         { TYPE_USER_CREATABLE },
2780         { }
2781     }
2782 };
2783 
2784 static char *
sev_guest_get_dh_cert_file(Object * obj,Error ** errp)2785 sev_guest_get_dh_cert_file(Object *obj, Error **errp)
2786 {
2787     return g_strdup(SEV_GUEST(obj)->dh_cert_file);
2788 }
2789 
2790 static void
sev_guest_set_dh_cert_file(Object * obj,const char * value,Error ** errp)2791 sev_guest_set_dh_cert_file(Object *obj, const char *value, Error **errp)
2792 {
2793     SEV_GUEST(obj)->dh_cert_file = g_strdup(value);
2794 }
2795 
2796 static char *
sev_guest_get_session_file(Object * obj,Error ** errp)2797 sev_guest_get_session_file(Object *obj, Error **errp)
2798 {
2799     SevGuestState *sev_guest = SEV_GUEST(obj);
2800 
2801     return sev_guest->session_file ? g_strdup(sev_guest->session_file) : NULL;
2802 }
2803 
2804 static void
sev_guest_set_session_file(Object * obj,const char * value,Error ** errp)2805 sev_guest_set_session_file(Object *obj, const char *value, Error **errp)
2806 {
2807     SEV_GUEST(obj)->session_file = g_strdup(value);
2808 }
2809 
sev_guest_get_legacy_vm_type(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)2810 static void sev_guest_get_legacy_vm_type(Object *obj, Visitor *v,
2811                                          const char *name, void *opaque,
2812                                          Error **errp)
2813 {
2814     SevGuestState *sev_guest = SEV_GUEST(obj);
2815     OnOffAuto legacy_vm_type = sev_guest->legacy_vm_type;
2816 
2817     visit_type_OnOffAuto(v, name, &legacy_vm_type, errp);
2818 }
2819 
sev_guest_set_legacy_vm_type(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)2820 static void sev_guest_set_legacy_vm_type(Object *obj, Visitor *v,
2821                                          const char *name, void *opaque,
2822                                          Error **errp)
2823 {
2824     SevGuestState *sev_guest = SEV_GUEST(obj);
2825 
2826     visit_type_OnOffAuto(v, name, &sev_guest->legacy_vm_type, errp);
2827 }
2828 
2829 static void
sev_guest_class_init(ObjectClass * oc,const void * data)2830 sev_guest_class_init(ObjectClass *oc, const void *data)
2831 {
2832     SevCommonStateClass *klass = SEV_COMMON_CLASS(oc);
2833     X86ConfidentialGuestClass *x86_klass = X86_CONFIDENTIAL_GUEST_CLASS(oc);
2834 
2835     klass->build_kernel_loader_hashes = sev_build_kernel_loader_hashes;
2836     klass->launch_start = sev_launch_start;
2837     klass->launch_finish = sev_launch_finish;
2838     klass->launch_update_data = sev_launch_update_data;
2839     klass->kvm_init = sev_kvm_init;
2840     x86_klass->kvm_type = sev_kvm_type;
2841 
2842     object_class_property_add_str(oc, "dh-cert-file",
2843                                   sev_guest_get_dh_cert_file,
2844                                   sev_guest_set_dh_cert_file);
2845     object_class_property_set_description(oc, "dh-cert-file",
2846             "guest owners DH certificate (encoded with base64)");
2847     object_class_property_add_str(oc, "session-file",
2848                                   sev_guest_get_session_file,
2849                                   sev_guest_set_session_file);
2850     object_class_property_set_description(oc, "session-file",
2851             "guest owners session parameters (encoded with base64)");
2852     object_class_property_add(oc, "legacy-vm-type", "OnOffAuto",
2853                               sev_guest_get_legacy_vm_type,
2854                               sev_guest_set_legacy_vm_type, NULL, NULL);
2855     object_class_property_set_description(oc, "legacy-vm-type",
2856             "use legacy VM type to maintain measurement compatibility with older QEMU or kernel versions.");
2857 }
2858 
2859 static void
sev_guest_instance_init(Object * obj)2860 sev_guest_instance_init(Object *obj)
2861 {
2862     SevGuestState *sev_guest = SEV_GUEST(obj);
2863 
2864     sev_guest->policy = DEFAULT_GUEST_POLICY;
2865     object_property_add_uint32_ptr(obj, "handle", &sev_guest->handle,
2866                                    OBJ_PROP_FLAG_READWRITE);
2867     object_property_add_uint32_ptr(obj, "policy", &sev_guest->policy,
2868                                    OBJ_PROP_FLAG_READWRITE);
2869     object_apply_compat_props(obj);
2870 
2871     sev_guest->legacy_vm_type = ON_OFF_AUTO_AUTO;
2872 }
2873 
2874 /* guest info specific sev/sev-es */
2875 static const TypeInfo sev_guest_info = {
2876     .parent = TYPE_SEV_COMMON,
2877     .name = TYPE_SEV_GUEST,
2878     .instance_size = sizeof(SevGuestState),
2879     .instance_init = sev_guest_instance_init,
2880     .class_init = sev_guest_class_init,
2881 };
2882 
2883 static void
sev_snp_guest_get_policy(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)2884 sev_snp_guest_get_policy(Object *obj, Visitor *v, const char *name,
2885                          void *opaque, Error **errp)
2886 {
2887     visit_type_uint64(v, name,
2888                       (uint64_t *)&SEV_SNP_GUEST(obj)->kvm_start_conf.policy,
2889                       errp);
2890 }
2891 
2892 static void
sev_snp_guest_set_policy(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)2893 sev_snp_guest_set_policy(Object *obj, Visitor *v, const char *name,
2894                          void *opaque, Error **errp)
2895 {
2896     visit_type_uint64(v, name,
2897                       (uint64_t *)&SEV_SNP_GUEST(obj)->kvm_start_conf.policy,
2898                       errp);
2899 }
2900 
2901 static char *
sev_snp_guest_get_guest_visible_workarounds(Object * obj,Error ** errp)2902 sev_snp_guest_get_guest_visible_workarounds(Object *obj, Error **errp)
2903 {
2904     return g_strdup(SEV_SNP_GUEST(obj)->guest_visible_workarounds);
2905 }
2906 
2907 static void
sev_snp_guest_set_guest_visible_workarounds(Object * obj,const char * value,Error ** errp)2908 sev_snp_guest_set_guest_visible_workarounds(Object *obj, const char *value,
2909                                             Error **errp)
2910 {
2911     SevSnpGuestState *sev_snp_guest = SEV_SNP_GUEST(obj);
2912     struct kvm_sev_snp_launch_start *start = &sev_snp_guest->kvm_start_conf;
2913     g_autofree guchar *blob;
2914     gsize len;
2915 
2916     g_free(sev_snp_guest->guest_visible_workarounds);
2917 
2918     /* store the base64 str so we don't need to re-encode in getter */
2919     sev_snp_guest->guest_visible_workarounds = g_strdup(value);
2920 
2921     blob = qbase64_decode(sev_snp_guest->guest_visible_workarounds,
2922                           -1, &len, errp);
2923     if (!blob) {
2924         return;
2925     }
2926 
2927     if (len != sizeof(start->gosvw)) {
2928         error_setg(errp, "parameter length of %" G_GSIZE_FORMAT
2929                    " exceeds max of %zu",
2930                    len, sizeof(start->gosvw));
2931         return;
2932     }
2933 
2934     memcpy(start->gosvw, blob, len);
2935 }
2936 
2937 static char *
sev_snp_guest_get_id_block(Object * obj,Error ** errp)2938 sev_snp_guest_get_id_block(Object *obj, Error **errp)
2939 {
2940     SevSnpGuestState *sev_snp_guest = SEV_SNP_GUEST(obj);
2941 
2942     return g_strdup(sev_snp_guest->id_block_base64);
2943 }
2944 
2945 static void
sev_snp_guest_set_id_block(Object * obj,const char * value,Error ** errp)2946 sev_snp_guest_set_id_block(Object *obj, const char *value, Error **errp)
2947 {
2948     SevSnpGuestState *sev_snp_guest = SEV_SNP_GUEST(obj);
2949     struct kvm_sev_snp_launch_finish *finish = &sev_snp_guest->kvm_finish_conf;
2950     gsize len;
2951 
2952     finish->id_block_en = 0;
2953     g_free(sev_snp_guest->id_block);
2954     g_free(sev_snp_guest->id_block_base64);
2955 
2956     /* store the base64 str so we don't need to re-encode in getter */
2957     sev_snp_guest->id_block_base64 = g_strdup(value);
2958     sev_snp_guest->id_block =
2959         qbase64_decode(sev_snp_guest->id_block_base64, -1, &len, errp);
2960 
2961     if (!sev_snp_guest->id_block) {
2962         return;
2963     }
2964 
2965     if (len != KVM_SEV_SNP_ID_BLOCK_SIZE) {
2966         error_setg(errp, "parameter length of %" G_GSIZE_FORMAT
2967                    " not equal to %u",
2968                    len, KVM_SEV_SNP_ID_BLOCK_SIZE);
2969         return;
2970     }
2971 
2972     finish->id_block_en = 1;
2973     finish->id_block_uaddr = (uintptr_t)sev_snp_guest->id_block;
2974 }
2975 
2976 static char *
sev_snp_guest_get_id_auth(Object * obj,Error ** errp)2977 sev_snp_guest_get_id_auth(Object *obj, Error **errp)
2978 {
2979     SevSnpGuestState *sev_snp_guest = SEV_SNP_GUEST(obj);
2980 
2981     return g_strdup(sev_snp_guest->id_auth_base64);
2982 }
2983 
2984 static void
sev_snp_guest_set_id_auth(Object * obj,const char * value,Error ** errp)2985 sev_snp_guest_set_id_auth(Object *obj, const char *value, Error **errp)
2986 {
2987     SevSnpGuestState *sev_snp_guest = SEV_SNP_GUEST(obj);
2988     struct kvm_sev_snp_launch_finish *finish = &sev_snp_guest->kvm_finish_conf;
2989     gsize len;
2990 
2991     finish->id_auth_uaddr = 0;
2992     g_free(sev_snp_guest->id_auth);
2993     g_free(sev_snp_guest->id_auth_base64);
2994 
2995     /* store the base64 str so we don't need to re-encode in getter */
2996     sev_snp_guest->id_auth_base64 = g_strdup(value);
2997     sev_snp_guest->id_auth =
2998         qbase64_decode(sev_snp_guest->id_auth_base64, -1, &len, errp);
2999 
3000     if (!sev_snp_guest->id_auth) {
3001         return;
3002     }
3003 
3004     if (len > KVM_SEV_SNP_ID_AUTH_SIZE) {
3005         error_setg(errp, "parameter length:ID_AUTH %" G_GSIZE_FORMAT
3006                    " exceeds max of %u",
3007                    len, KVM_SEV_SNP_ID_AUTH_SIZE);
3008         return;
3009     }
3010 
3011     finish->id_auth_uaddr = (uintptr_t)sev_snp_guest->id_auth;
3012 }
3013 
3014 static bool
sev_snp_guest_get_author_key_enabled(Object * obj,Error ** errp)3015 sev_snp_guest_get_author_key_enabled(Object *obj, Error **errp)
3016 {
3017     SevSnpGuestState *sev_snp_guest = SEV_SNP_GUEST(obj);
3018 
3019     return !!sev_snp_guest->kvm_finish_conf.auth_key_en;
3020 }
3021 
3022 static void
sev_snp_guest_set_author_key_enabled(Object * obj,bool value,Error ** errp)3023 sev_snp_guest_set_author_key_enabled(Object *obj, bool value, Error **errp)
3024 {
3025     SevSnpGuestState *sev_snp_guest = SEV_SNP_GUEST(obj);
3026 
3027     sev_snp_guest->kvm_finish_conf.auth_key_en = value;
3028 }
3029 
3030 static bool
sev_snp_guest_get_vcek_disabled(Object * obj,Error ** errp)3031 sev_snp_guest_get_vcek_disabled(Object *obj, Error **errp)
3032 {
3033     SevSnpGuestState *sev_snp_guest = SEV_SNP_GUEST(obj);
3034 
3035     return !!sev_snp_guest->kvm_finish_conf.vcek_disabled;
3036 }
3037 
3038 static void
sev_snp_guest_set_vcek_disabled(Object * obj,bool value,Error ** errp)3039 sev_snp_guest_set_vcek_disabled(Object *obj, bool value, Error **errp)
3040 {
3041     SevSnpGuestState *sev_snp_guest = SEV_SNP_GUEST(obj);
3042 
3043     sev_snp_guest->kvm_finish_conf.vcek_disabled = value;
3044 }
3045 
3046 static char *
sev_snp_guest_get_host_data(Object * obj,Error ** errp)3047 sev_snp_guest_get_host_data(Object *obj, Error **errp)
3048 {
3049     SevSnpGuestState *sev_snp_guest = SEV_SNP_GUEST(obj);
3050 
3051     return g_strdup(sev_snp_guest->host_data);
3052 }
3053 
3054 static void
sev_snp_guest_set_host_data(Object * obj,const char * value,Error ** errp)3055 sev_snp_guest_set_host_data(Object *obj, const char *value, Error **errp)
3056 {
3057     SevSnpGuestState *sev_snp_guest = SEV_SNP_GUEST(obj);
3058     struct kvm_sev_snp_launch_finish *finish = &sev_snp_guest->kvm_finish_conf;
3059     g_autofree guchar *blob;
3060     gsize len;
3061 
3062     g_free(sev_snp_guest->host_data);
3063 
3064     /* store the base64 str so we don't need to re-encode in getter */
3065     sev_snp_guest->host_data = g_strdup(value);
3066 
3067     blob = qbase64_decode(sev_snp_guest->host_data, -1, &len, errp);
3068 
3069     if (!blob) {
3070         return;
3071     }
3072 
3073     if (len != sizeof(finish->host_data)) {
3074         error_setg(errp, "parameter length of %" G_GSIZE_FORMAT
3075                    " not equal to %zu",
3076                    len, sizeof(finish->host_data));
3077         return;
3078     }
3079 
3080     memcpy(finish->host_data, blob, len);
3081 }
3082 
3083 static void
sev_snp_guest_class_init(ObjectClass * oc,const void * data)3084 sev_snp_guest_class_init(ObjectClass *oc, const void *data)
3085 {
3086     SevCommonStateClass *klass = SEV_COMMON_CLASS(oc);
3087     X86ConfidentialGuestClass *x86_klass = X86_CONFIDENTIAL_GUEST_CLASS(oc);
3088 
3089     klass->build_kernel_loader_hashes = sev_snp_build_kernel_loader_hashes;
3090     klass->launch_start = sev_snp_launch_start;
3091     klass->launch_finish = sev_snp_launch_finish;
3092     klass->launch_update_data = sev_snp_launch_update_data;
3093     klass->kvm_init = sev_snp_kvm_init;
3094     x86_klass->adjust_cpuid_features = sev_snp_adjust_cpuid_features;
3095     x86_klass->kvm_type = sev_snp_kvm_type;
3096 
3097     object_class_property_add(oc, "policy", "uint64",
3098                               sev_snp_guest_get_policy,
3099                               sev_snp_guest_set_policy, NULL, NULL);
3100     object_class_property_add_str(oc, "guest-visible-workarounds",
3101                                   sev_snp_guest_get_guest_visible_workarounds,
3102                                   sev_snp_guest_set_guest_visible_workarounds);
3103     object_class_property_add_str(oc, "id-block",
3104                                   sev_snp_guest_get_id_block,
3105                                   sev_snp_guest_set_id_block);
3106     object_class_property_add_str(oc, "id-auth",
3107                                   sev_snp_guest_get_id_auth,
3108                                   sev_snp_guest_set_id_auth);
3109     object_class_property_add_bool(oc, "author-key-enabled",
3110                                    sev_snp_guest_get_author_key_enabled,
3111                                    sev_snp_guest_set_author_key_enabled);
3112     object_class_property_add_bool(oc, "vcek-disabled",
3113                                    sev_snp_guest_get_vcek_disabled,
3114                                    sev_snp_guest_set_vcek_disabled);
3115     object_class_property_add_str(oc, "host-data",
3116                                   sev_snp_guest_get_host_data,
3117                                   sev_snp_guest_set_host_data);
3118 }
3119 
3120 static void
sev_snp_guest_instance_init(Object * obj)3121 sev_snp_guest_instance_init(Object *obj)
3122 {
3123     ConfidentialGuestSupport *cgs = CONFIDENTIAL_GUEST_SUPPORT(obj);
3124     SevSnpGuestState *sev_snp_guest = SEV_SNP_GUEST(obj);
3125 
3126     cgs->require_guest_memfd = true;
3127 
3128     /* default init/start/finish params for kvm */
3129     sev_snp_guest->kvm_start_conf.policy = DEFAULT_SEV_SNP_POLICY;
3130 }
3131 
3132 /* guest info specific to sev-snp */
3133 static const TypeInfo sev_snp_guest_info = {
3134     .parent = TYPE_SEV_COMMON,
3135     .name = TYPE_SEV_SNP_GUEST,
3136     .instance_size = sizeof(SevSnpGuestState),
3137     .class_init = sev_snp_guest_class_init,
3138     .instance_init = sev_snp_guest_instance_init,
3139 };
3140 
3141 static void
sev_register_types(void)3142 sev_register_types(void)
3143 {
3144     type_register_static(&sev_common_info);
3145     type_register_static(&sev_guest_info);
3146     type_register_static(&sev_snp_guest_info);
3147 }
3148 
3149 type_init(sev_register_types);
3150