xref: /openbmc/qemu/target/i386/sev.c (revision 596c330b19cf00384ec14d0bff25758ed204b49d)
1 /*
2  * QEMU SEV support
3  *
4  * Copyright Advanced Micro Devices 2016-2018
5  *
6  * Author:
7  *      Brijesh Singh <brijesh.singh@amd.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2 or later.
10  * See the COPYING file in the top-level directory.
11  *
12  */
13 
14 #include "qemu/osdep.h"
15 
16 #include <linux/kvm.h>
17 #include <linux/kvm_para.h>
18 #include <linux/psp-sev.h>
19 
20 #include <sys/ioctl.h>
21 
22 #include "qapi/error.h"
23 #include "qom/object_interfaces.h"
24 #include "qemu/base64.h"
25 #include "qemu/module.h"
26 #include "qemu/uuid.h"
27 #include "qemu/error-report.h"
28 #include "crypto/hash.h"
29 #include "exec/target_page.h"
30 #include "system/kvm.h"
31 #include "kvm/kvm_i386.h"
32 #include "sev.h"
33 #include "system/system.h"
34 #include "system/runstate.h"
35 #include "trace.h"
36 #include "migration/blocker.h"
37 #include "qom/object.h"
38 #include "monitor/monitor.h"
39 #include "monitor/hmp-target.h"
40 #include "qapi/qapi-commands-misc-i386.h"
41 #include "confidential-guest.h"
42 #include "hw/i386/pc.h"
43 #include "system/address-spaces.h"
44 #include "hw/i386/e820_memory_layout.h"
45 #include "qemu/queue.h"
46 #include "qemu/cutils.h"
47 
48 OBJECT_DECLARE_TYPE(SevCommonState, SevCommonStateClass, SEV_COMMON)
49 OBJECT_DECLARE_TYPE(SevGuestState, SevCommonStateClass, SEV_GUEST)
50 OBJECT_DECLARE_TYPE(SevSnpGuestState, SevCommonStateClass, SEV_SNP_GUEST)
51 
52 /* hard code sha256 digest size */
53 #define HASH_SIZE 32
54 
55 /* Hard coded GPA that KVM uses for the VMSA */
56 #define KVM_VMSA_GPA 0xFFFFFFFFF000
57 
58 /* Convert between SEV-ES VMSA and SegmentCache flags/attributes */
59 #define FLAGS_VMSA_TO_SEGCACHE(flags) \
60     ((((flags) & 0xff00) << 12) | (((flags) & 0xff) << 8))
61 #define FLAGS_SEGCACHE_TO_VMSA(flags) \
62     ((((flags) & 0xff00) >> 8) | (((flags) & 0xf00000) >> 12))
63 
64 typedef struct QEMU_PACKED SevHashTableEntry {
65     QemuUUID guid;
66     uint16_t len;
67     uint8_t hash[HASH_SIZE];
68 } SevHashTableEntry;
69 
70 typedef struct QEMU_PACKED SevHashTable {
71     QemuUUID guid;
72     uint16_t len;
73     SevHashTableEntry cmdline;
74     SevHashTableEntry initrd;
75     SevHashTableEntry kernel;
76 } SevHashTable;
77 
78 /*
79  * Data encrypted by sev_encrypt_flash() must be padded to a multiple of
80  * 16 bytes.
81  */
82 typedef struct QEMU_PACKED PaddedSevHashTable {
83     SevHashTable ht;
84     uint8_t padding[ROUND_UP(sizeof(SevHashTable), 16) - sizeof(SevHashTable)];
85 } PaddedSevHashTable;
86 
87 QEMU_BUILD_BUG_ON(sizeof(PaddedSevHashTable) % 16 != 0);
88 
89 #define SEV_INFO_BLOCK_GUID     "00f771de-1a7e-4fcb-890e-68c77e2fb44e"
90 typedef struct __attribute__((__packed__)) SevInfoBlock {
91     /* SEV-ES Reset Vector Address */
92     uint32_t reset_addr;
93 } SevInfoBlock;
94 
95 #define SEV_HASH_TABLE_RV_GUID  "7255371f-3a3b-4b04-927b-1da6efa8d454"
96 typedef struct QEMU_PACKED SevHashTableDescriptor {
97     /* SEV hash table area guest address */
98     uint32_t base;
99     /* SEV hash table area size (in bytes) */
100     uint32_t size;
101 } SevHashTableDescriptor;
102 
103 typedef struct SevLaunchVmsa {
104     QTAILQ_ENTRY(SevLaunchVmsa) next;
105 
106     uint16_t cpu_index;
107     uint64_t gpa;
108     struct sev_es_save_area vmsa;
109 } SevLaunchVmsa;
110 
111 struct SevCommonState {
112     X86ConfidentialGuest parent_obj;
113 
114     int kvm_type;
115 
116     /* configuration parameters */
117     char *sev_device;
118     uint32_t cbitpos;
119     uint32_t reduced_phys_bits;
120     bool kernel_hashes;
121 
122     /* runtime state */
123     uint8_t api_major;
124     uint8_t api_minor;
125     uint8_t build_id;
126     int sev_fd;
127     SevState state;
128 
129     QTAILQ_HEAD(, SevLaunchVmsa) launch_vmsa;
130 };
131 
132 struct SevCommonStateClass {
133     X86ConfidentialGuestClass parent_class;
134 
135     /* public */
136     bool (*build_kernel_loader_hashes)(SevCommonState *sev_common,
137                                        SevHashTableDescriptor *area,
138                                        SevKernelLoaderContext *ctx,
139                                        Error **errp);
140     int (*launch_start)(SevCommonState *sev_common);
141     void (*launch_finish)(SevCommonState *sev_common);
142     int (*launch_update_data)(SevCommonState *sev_common, hwaddr gpa,
143                               uint8_t *ptr, size_t len, Error **errp);
144     int (*kvm_init)(ConfidentialGuestSupport *cgs, Error **errp);
145 };
146 
147 /**
148  * SevGuestState:
149  *
150  * The SevGuestState object is used for creating and managing a SEV
151  * guest.
152  *
153  * # $QEMU \
154  *         -object sev-guest,id=sev0 \
155  *         -machine ...,memory-encryption=sev0
156  */
157 struct SevGuestState {
158     SevCommonState parent_obj;
159     gchar *measurement;
160 
161     /* configuration parameters */
162     uint32_t handle;
163     uint32_t policy;
164     char *dh_cert_file;
165     char *session_file;
166     OnOffAuto legacy_vm_type;
167 };
168 
169 struct SevSnpGuestState {
170     SevCommonState parent_obj;
171 
172     /* configuration parameters */
173     char *guest_visible_workarounds;
174     char *id_block_base64;
175     uint8_t *id_block;
176     char *id_auth_base64;
177     uint8_t *id_auth;
178     char *host_data;
179 
180     struct kvm_sev_snp_launch_start kvm_start_conf;
181     struct kvm_sev_snp_launch_finish kvm_finish_conf;
182 
183     uint32_t kernel_hashes_offset;
184     PaddedSevHashTable *kernel_hashes_data;
185 };
186 
187 #define DEFAULT_GUEST_POLICY    0x1 /* disable debug */
188 #define DEFAULT_SEV_DEVICE      "/dev/sev"
189 #define DEFAULT_SEV_SNP_POLICY  0x30000
190 
191 typedef struct SevLaunchUpdateData {
192     QTAILQ_ENTRY(SevLaunchUpdateData) next;
193     hwaddr gpa;
194     void *hva;
195     size_t len;
196     int type;
197 } SevLaunchUpdateData;
198 
199 static QTAILQ_HEAD(, SevLaunchUpdateData) launch_update;
200 
201 static Error *sev_mig_blocker;
202 
203 static const char *const sev_fw_errlist[] = {
204     [SEV_RET_SUCCESS]                = "",
205     [SEV_RET_INVALID_PLATFORM_STATE] = "Platform state is invalid",
206     [SEV_RET_INVALID_GUEST_STATE]    = "Guest state is invalid",
207     [SEV_RET_INAVLID_CONFIG]         = "Platform configuration is invalid",
208     [SEV_RET_INVALID_LEN]            = "Buffer too small",
209     [SEV_RET_ALREADY_OWNED]          = "Platform is already owned",
210     [SEV_RET_INVALID_CERTIFICATE]    = "Certificate is invalid",
211     [SEV_RET_POLICY_FAILURE]         = "Policy is not allowed",
212     [SEV_RET_INACTIVE]               = "Guest is not active",
213     [SEV_RET_INVALID_ADDRESS]        = "Invalid address",
214     [SEV_RET_BAD_SIGNATURE]          = "Bad signature",
215     [SEV_RET_BAD_MEASUREMENT]        = "Bad measurement",
216     [SEV_RET_ASID_OWNED]             = "ASID is already owned",
217     [SEV_RET_INVALID_ASID]           = "Invalid ASID",
218     [SEV_RET_WBINVD_REQUIRED]        = "WBINVD is required",
219     [SEV_RET_DFFLUSH_REQUIRED]       = "DF_FLUSH is required",
220     [SEV_RET_INVALID_GUEST]          = "Guest handle is invalid",
221     [SEV_RET_INVALID_COMMAND]        = "Invalid command",
222     [SEV_RET_ACTIVE]                 = "Guest is active",
223     [SEV_RET_HWSEV_RET_PLATFORM]     = "Hardware error",
224     [SEV_RET_HWSEV_RET_UNSAFE]       = "Hardware unsafe",
225     [SEV_RET_UNSUPPORTED]            = "Feature not supported",
226     [SEV_RET_INVALID_PARAM]          = "Invalid parameter",
227     [SEV_RET_RESOURCE_LIMIT]         = "Required firmware resource depleted",
228     [SEV_RET_SECURE_DATA_INVALID]    = "Part-specific integrity check failure",
229 };
230 
231 #define SEV_FW_MAX_ERROR      ARRAY_SIZE(sev_fw_errlist)
232 
233 #define SNP_CPUID_FUNCTION_MAXCOUNT 64
234 #define SNP_CPUID_FUNCTION_UNKNOWN 0xFFFFFFFF
235 
236 typedef struct {
237     uint32_t eax_in;
238     uint32_t ecx_in;
239     uint64_t xcr0_in;
240     uint64_t xss_in;
241     uint32_t eax;
242     uint32_t ebx;
243     uint32_t ecx;
244     uint32_t edx;
245     uint64_t reserved;
246 } __attribute__((packed)) SnpCpuidFunc;
247 
248 typedef struct {
249     uint32_t count;
250     uint32_t reserved1;
251     uint64_t reserved2;
252     SnpCpuidFunc entries[SNP_CPUID_FUNCTION_MAXCOUNT];
253 } __attribute__((packed)) SnpCpuidInfo;
254 
255 static int
256 sev_ioctl(int fd, int cmd, void *data, int *error)
257 {
258     int r;
259     struct kvm_sev_cmd input;
260 
261     memset(&input, 0x0, sizeof(input));
262 
263     input.id = cmd;
264     input.sev_fd = fd;
265     input.data = (uintptr_t)data;
266 
267     r = kvm_vm_ioctl(kvm_state, KVM_MEMORY_ENCRYPT_OP, &input);
268 
269     if (error) {
270         *error = input.error;
271     }
272 
273     return r;
274 }
275 
276 static int
277 sev_platform_ioctl(int fd, int cmd, void *data, int *error)
278 {
279     int r;
280     struct sev_issue_cmd arg;
281 
282     arg.cmd = cmd;
283     arg.data = (unsigned long)data;
284     r = ioctl(fd, SEV_ISSUE_CMD, &arg);
285     if (error) {
286         *error = arg.error;
287     }
288 
289     return r;
290 }
291 
292 static const char *
293 fw_error_to_str(int code)
294 {
295     if (code < 0 || code >= SEV_FW_MAX_ERROR) {
296         return "unknown error";
297     }
298 
299     return sev_fw_errlist[code];
300 }
301 
302 static bool
303 sev_check_state(const SevCommonState *sev_common, SevState state)
304 {
305     assert(sev_common);
306     return sev_common->state == state ? true : false;
307 }
308 
309 static void
310 sev_set_guest_state(SevCommonState *sev_common, SevState new_state)
311 {
312     assert(new_state < SEV_STATE__MAX);
313     assert(sev_common);
314 
315     trace_kvm_sev_change_state(SevState_str(sev_common->state),
316                                SevState_str(new_state));
317     sev_common->state = new_state;
318 }
319 
320 static void
321 sev_ram_block_added(RAMBlockNotifier *n, void *host, size_t size,
322                     size_t max_size)
323 {
324     int r;
325     struct kvm_enc_region range;
326     ram_addr_t offset;
327     MemoryRegion *mr;
328 
329     /*
330      * The RAM device presents a memory region that should be treated
331      * as IO region and should not be pinned.
332      */
333     mr = memory_region_from_host(host, &offset);
334     if (mr && memory_region_is_ram_device(mr)) {
335         return;
336     }
337 
338     range.addr = (uintptr_t)host;
339     range.size = max_size;
340 
341     trace_kvm_memcrypt_register_region(host, max_size);
342     r = kvm_vm_ioctl(kvm_state, KVM_MEMORY_ENCRYPT_REG_REGION, &range);
343     if (r) {
344         error_report("%s: failed to register region (%p+%#zx) error '%s'",
345                      __func__, host, max_size, strerror(errno));
346         exit(1);
347     }
348 }
349 
350 static void
351 sev_ram_block_removed(RAMBlockNotifier *n, void *host, size_t size,
352                       size_t max_size)
353 {
354     int r;
355     struct kvm_enc_region range;
356     ram_addr_t offset;
357     MemoryRegion *mr;
358 
359     /*
360      * The RAM device presents a memory region that should be treated
361      * as IO region and should not have been pinned.
362      */
363     mr = memory_region_from_host(host, &offset);
364     if (mr && memory_region_is_ram_device(mr)) {
365         return;
366     }
367 
368     range.addr = (uintptr_t)host;
369     range.size = max_size;
370 
371     trace_kvm_memcrypt_unregister_region(host, max_size);
372     r = kvm_vm_ioctl(kvm_state, KVM_MEMORY_ENCRYPT_UNREG_REGION, &range);
373     if (r) {
374         error_report("%s: failed to unregister region (%p+%#zx)",
375                      __func__, host, max_size);
376     }
377 }
378 
379 static struct RAMBlockNotifier sev_ram_notifier = {
380     .ram_block_added = sev_ram_block_added,
381     .ram_block_removed = sev_ram_block_removed,
382 };
383 
384 static void sev_apply_cpu_context(CPUState *cpu)
385 {
386     SevCommonState *sev_common = SEV_COMMON(MACHINE(qdev_get_machine())->cgs);
387     X86CPU *x86;
388     CPUX86State *env;
389     struct SevLaunchVmsa *launch_vmsa;
390 
391     /* See if an initial VMSA has been provided for this CPU */
392     QTAILQ_FOREACH(launch_vmsa, &sev_common->launch_vmsa, next)
393     {
394         if (cpu->cpu_index == launch_vmsa->cpu_index) {
395             x86 = X86_CPU(cpu);
396             env = &x86->env;
397 
398             /*
399              * Ideally we would provide the VMSA directly to kvm which would
400              * ensure that the resulting initial VMSA measurement which is
401              * calculated during KVM_SEV_LAUNCH_UPDATE_VMSA is calculated from
402              * exactly what we provide here. Currently this is not possible so
403              * we need to copy the parts of the VMSA structure that we currently
404              * support into the CPU state.
405              */
406             cpu_load_efer(env, launch_vmsa->vmsa.efer);
407             cpu_x86_update_cr4(env, launch_vmsa->vmsa.cr4);
408             cpu_x86_update_cr0(env, launch_vmsa->vmsa.cr0);
409             cpu_x86_update_cr3(env, launch_vmsa->vmsa.cr3);
410             env->xcr0 = launch_vmsa->vmsa.xcr0;
411             env->pat = launch_vmsa->vmsa.g_pat;
412 
413             cpu_x86_load_seg_cache(
414                 env, R_CS, launch_vmsa->vmsa.cs.selector,
415                 launch_vmsa->vmsa.cs.base, launch_vmsa->vmsa.cs.limit,
416                 FLAGS_VMSA_TO_SEGCACHE(launch_vmsa->vmsa.cs.attrib));
417             cpu_x86_load_seg_cache(
418                 env, R_DS, launch_vmsa->vmsa.ds.selector,
419                 launch_vmsa->vmsa.ds.base, launch_vmsa->vmsa.ds.limit,
420                 FLAGS_VMSA_TO_SEGCACHE(launch_vmsa->vmsa.ds.attrib));
421             cpu_x86_load_seg_cache(
422                 env, R_ES, launch_vmsa->vmsa.es.selector,
423                 launch_vmsa->vmsa.es.base, launch_vmsa->vmsa.es.limit,
424                 FLAGS_VMSA_TO_SEGCACHE(launch_vmsa->vmsa.es.attrib));
425             cpu_x86_load_seg_cache(
426                 env, R_FS, launch_vmsa->vmsa.fs.selector,
427                 launch_vmsa->vmsa.fs.base, launch_vmsa->vmsa.fs.limit,
428                 FLAGS_VMSA_TO_SEGCACHE(launch_vmsa->vmsa.fs.attrib));
429             cpu_x86_load_seg_cache(
430                 env, R_GS, launch_vmsa->vmsa.gs.selector,
431                 launch_vmsa->vmsa.gs.base, launch_vmsa->vmsa.gs.limit,
432                 FLAGS_VMSA_TO_SEGCACHE(launch_vmsa->vmsa.gs.attrib));
433             cpu_x86_load_seg_cache(
434                 env, R_SS, launch_vmsa->vmsa.ss.selector,
435                 launch_vmsa->vmsa.ss.base, launch_vmsa->vmsa.ss.limit,
436                 FLAGS_VMSA_TO_SEGCACHE(launch_vmsa->vmsa.ss.attrib));
437 
438             env->gdt.base = launch_vmsa->vmsa.gdtr.base;
439             env->gdt.limit = launch_vmsa->vmsa.gdtr.limit;
440             env->gdt.flags =
441                 FLAGS_VMSA_TO_SEGCACHE(launch_vmsa->vmsa.gdtr.attrib);
442             env->idt.base = launch_vmsa->vmsa.idtr.base;
443             env->idt.limit = launch_vmsa->vmsa.idtr.limit;
444             env->idt.flags =
445                 FLAGS_VMSA_TO_SEGCACHE(launch_vmsa->vmsa.idtr.attrib);
446 
447             cpu_x86_load_seg_cache(
448                 env, R_LDTR, launch_vmsa->vmsa.ldtr.selector,
449                 launch_vmsa->vmsa.ldtr.base, launch_vmsa->vmsa.ldtr.limit,
450                 FLAGS_VMSA_TO_SEGCACHE(launch_vmsa->vmsa.ldtr.attrib));
451             cpu_x86_load_seg_cache(
452                 env, R_TR, launch_vmsa->vmsa.tr.selector,
453                 launch_vmsa->vmsa.ldtr.base, launch_vmsa->vmsa.tr.limit,
454                 FLAGS_VMSA_TO_SEGCACHE(launch_vmsa->vmsa.tr.attrib));
455 
456             env->dr[6] = launch_vmsa->vmsa.dr6;
457             env->dr[7] = launch_vmsa->vmsa.dr7;
458 
459             env->regs[R_EAX] = launch_vmsa->vmsa.rax;
460             env->regs[R_ECX] = launch_vmsa->vmsa.rcx;
461             env->regs[R_EDX] = launch_vmsa->vmsa.rdx;
462             env->regs[R_EBX] = launch_vmsa->vmsa.rbx;
463             env->regs[R_ESP] = launch_vmsa->vmsa.rsp;
464             env->regs[R_EBP] = launch_vmsa->vmsa.rbp;
465             env->regs[R_ESI] = launch_vmsa->vmsa.rsi;
466             env->regs[R_EDI] = launch_vmsa->vmsa.rdi;
467 #ifdef TARGET_X86_64
468             env->regs[R_R8] = launch_vmsa->vmsa.r8;
469             env->regs[R_R9] = launch_vmsa->vmsa.r9;
470             env->regs[R_R10] = launch_vmsa->vmsa.r10;
471             env->regs[R_R11] = launch_vmsa->vmsa.r11;
472             env->regs[R_R12] = launch_vmsa->vmsa.r12;
473             env->regs[R_R13] = launch_vmsa->vmsa.r13;
474             env->regs[R_R14] = launch_vmsa->vmsa.r14;
475             env->regs[R_R15] = launch_vmsa->vmsa.r15;
476 #endif
477             env->eip = launch_vmsa->vmsa.rip;
478             env->eflags = launch_vmsa->vmsa.rflags;
479 
480             cpu_set_fpuc(env, launch_vmsa->vmsa.x87_fcw);
481             env->mxcsr = launch_vmsa->vmsa.mxcsr;
482 
483             break;
484         }
485     }
486 }
487 
488 static int check_vmsa_supported(hwaddr gpa, const struct sev_es_save_area *vmsa,
489                                 Error **errp)
490 {
491     struct sev_es_save_area vmsa_check;
492 
493     /*
494      * KVM always populates the VMSA at a fixed GPA which cannot be modified
495      * from userspace. Specifying a different GPA will not prevent the guest
496      * from starting but will cause the launch measurement to be different
497      * from expected. Therefore check that the provided GPA matches the KVM
498      * hardcoded value.
499      */
500     if (gpa != KVM_VMSA_GPA) {
501         error_setg(errp,
502                 "%s: The VMSA GPA must be %lX but is specified as %lX",
503                 __func__, KVM_VMSA_GPA, gpa);
504         return -1;
505     }
506 
507     /*
508      * Clear all supported fields so we can then check the entire structure
509      * is zero.
510      */
511     memcpy(&vmsa_check, vmsa, sizeof(struct sev_es_save_area));
512     memset(&vmsa_check.es, 0, sizeof(vmsa_check.es));
513     memset(&vmsa_check.cs, 0, sizeof(vmsa_check.cs));
514     memset(&vmsa_check.ss, 0, sizeof(vmsa_check.ss));
515     memset(&vmsa_check.ds, 0, sizeof(vmsa_check.ds));
516     memset(&vmsa_check.fs, 0, sizeof(vmsa_check.fs));
517     memset(&vmsa_check.gs, 0, sizeof(vmsa_check.gs));
518     memset(&vmsa_check.gdtr, 0, sizeof(vmsa_check.gdtr));
519     memset(&vmsa_check.idtr, 0, sizeof(vmsa_check.idtr));
520     memset(&vmsa_check.ldtr, 0, sizeof(vmsa_check.ldtr));
521     memset(&vmsa_check.tr, 0, sizeof(vmsa_check.tr));
522     vmsa_check.efer = 0;
523     vmsa_check.cr0 = 0;
524     vmsa_check.cr3 = 0;
525     vmsa_check.cr4 = 0;
526     vmsa_check.xcr0 = 0;
527     vmsa_check.dr6 = 0;
528     vmsa_check.dr7 = 0;
529     vmsa_check.rax = 0;
530     vmsa_check.rcx = 0;
531     vmsa_check.rdx = 0;
532     vmsa_check.rbx = 0;
533     vmsa_check.rsp = 0;
534     vmsa_check.rbp = 0;
535     vmsa_check.rsi = 0;
536     vmsa_check.rdi = 0;
537     vmsa_check.r8 = 0;
538     vmsa_check.r9 = 0;
539     vmsa_check.r10 = 0;
540     vmsa_check.r11 = 0;
541     vmsa_check.r12 = 0;
542     vmsa_check.r13 = 0;
543     vmsa_check.r14 = 0;
544     vmsa_check.r15 = 0;
545     vmsa_check.rip = 0;
546     vmsa_check.rflags = 0;
547 
548     vmsa_check.g_pat = 0;
549     vmsa_check.xcr0 = 0;
550 
551     vmsa_check.x87_fcw = 0;
552     vmsa_check.mxcsr = 0;
553 
554     if (sev_snp_enabled()) {
555         if (vmsa_check.sev_features != SVM_SEV_FEAT_SNP_ACTIVE) {
556             error_setg(errp,
557                        "%s: sev_features in the VMSA contains an unsupported "
558                        "value. For SEV-SNP, sev_features must be set to %x.",
559                        __func__, SVM_SEV_FEAT_SNP_ACTIVE);
560             return -1;
561         }
562         vmsa_check.sev_features = 0;
563     } else {
564         if (vmsa_check.sev_features != 0) {
565             error_setg(errp,
566                        "%s: sev_features in the VMSA contains an unsupported "
567                        "value. For SEV-ES and SEV, sev_features must be "
568                        "set to 0.", __func__);
569             return -1;
570         }
571     }
572 
573     if (!buffer_is_zero(&vmsa_check, sizeof(vmsa_check))) {
574         error_setg(errp,
575                 "%s: The VMSA contains fields that are not "
576                 "synchronized with KVM. Continuing would result in "
577                 "either unpredictable guest behavior, or a "
578                 "mismatched launch measurement.",
579                 __func__);
580         return -1;
581     }
582     return 0;
583 }
584 
585 static int sev_set_cpu_context(uint16_t cpu_index, const void *ctx,
586                                uint32_t ctx_len, hwaddr gpa, Error **errp)
587 {
588     SevCommonState *sev_common = SEV_COMMON(MACHINE(qdev_get_machine())->cgs);
589     SevLaunchVmsa *launch_vmsa;
590     CPUState *cpu;
591     bool exists = false;
592 
593     /*
594      * Setting the CPU context is only supported for SEV-ES and SEV-SNP. The
595      * context buffer will contain a sev_es_save_area from the Linux kernel
596      * which is defined by "Table B-4. VMSA Layout, State Save Area for SEV-ES"
597      * in the AMD64 APM, Volume 2.
598      */
599 
600     if (!sev_es_enabled()) {
601         error_setg(errp, "SEV: unable to set CPU context: Not supported");
602         return -1;
603     }
604 
605     if (ctx_len < sizeof(struct sev_es_save_area)) {
606         error_setg(errp, "SEV: unable to set CPU context: "
607                      "Invalid context provided");
608         return -1;
609     }
610 
611     cpu = qemu_get_cpu(cpu_index);
612     if (!cpu) {
613         error_setg(errp, "SEV: unable to set CPU context for out of bounds "
614                      "CPU index %d", cpu_index);
615         return -1;
616     }
617 
618     /*
619      * If the context of this VP has already been set then replace it with the
620      * new context.
621      */
622     QTAILQ_FOREACH(launch_vmsa, &sev_common->launch_vmsa, next)
623     {
624         if (cpu_index == launch_vmsa->cpu_index) {
625             launch_vmsa->gpa = gpa;
626             memcpy(&launch_vmsa->vmsa, ctx, sizeof(launch_vmsa->vmsa));
627             exists = true;
628             break;
629         }
630     }
631 
632     if (!exists) {
633         /* New VP context */
634         launch_vmsa = g_new0(SevLaunchVmsa, 1);
635         memcpy(&launch_vmsa->vmsa, ctx, sizeof(launch_vmsa->vmsa));
636         launch_vmsa->cpu_index = cpu_index;
637         launch_vmsa->gpa = gpa;
638         QTAILQ_INSERT_TAIL(&sev_common->launch_vmsa, launch_vmsa, next);
639     }
640 
641     /* Synchronise the VMSA with the current CPU state */
642     sev_apply_cpu_context(cpu);
643 
644     return 0;
645 }
646 
647 bool
648 sev_enabled(void)
649 {
650     ConfidentialGuestSupport *cgs = MACHINE(qdev_get_machine())->cgs;
651 
652     return !!object_dynamic_cast(OBJECT(cgs), TYPE_SEV_COMMON);
653 }
654 
655 bool
656 sev_snp_enabled(void)
657 {
658     ConfidentialGuestSupport *cgs = MACHINE(qdev_get_machine())->cgs;
659 
660     return !!object_dynamic_cast(OBJECT(cgs), TYPE_SEV_SNP_GUEST);
661 }
662 
663 bool
664 sev_es_enabled(void)
665 {
666     ConfidentialGuestSupport *cgs = MACHINE(qdev_get_machine())->cgs;
667 
668     return sev_snp_enabled() ||
669             (sev_enabled() && SEV_GUEST(cgs)->policy & SEV_POLICY_ES);
670 }
671 
672 uint32_t
673 sev_get_cbit_position(void)
674 {
675     SevCommonState *sev_common = SEV_COMMON(MACHINE(qdev_get_machine())->cgs);
676 
677     return sev_common ? sev_common->cbitpos : 0;
678 }
679 
680 uint32_t
681 sev_get_reduced_phys_bits(void)
682 {
683     SevCommonState *sev_common = SEV_COMMON(MACHINE(qdev_get_machine())->cgs);
684 
685     return sev_common ? sev_common->reduced_phys_bits : 0;
686 }
687 
688 static SevInfo *sev_get_info(void)
689 {
690     SevInfo *info;
691     SevCommonState *sev_common = SEV_COMMON(MACHINE(qdev_get_machine())->cgs);
692 
693     info = g_new0(SevInfo, 1);
694     info->enabled = sev_enabled();
695 
696     if (info->enabled) {
697         info->api_major = sev_common->api_major;
698         info->api_minor = sev_common->api_minor;
699         info->build_id = sev_common->build_id;
700         info->state = sev_common->state;
701 
702         if (sev_snp_enabled()) {
703             info->sev_type = SEV_GUEST_TYPE_SEV_SNP;
704             info->u.sev_snp.snp_policy =
705                 object_property_get_uint(OBJECT(sev_common), "policy", NULL);
706         } else {
707             info->sev_type = SEV_GUEST_TYPE_SEV;
708             info->u.sev.handle = SEV_GUEST(sev_common)->handle;
709             info->u.sev.policy =
710                 (uint32_t)object_property_get_uint(OBJECT(sev_common),
711                                                    "policy", NULL);
712         }
713     }
714 
715     return info;
716 }
717 
718 SevInfo *qmp_query_sev(Error **errp)
719 {
720     SevInfo *info;
721 
722     info = sev_get_info();
723     if (!info) {
724         error_setg(errp, "SEV feature is not available");
725         return NULL;
726     }
727 
728     return info;
729 }
730 
731 void hmp_info_sev(Monitor *mon, const QDict *qdict)
732 {
733     SevInfo *info = sev_get_info();
734 
735     if (!info || !info->enabled) {
736         monitor_printf(mon, "SEV is not enabled\n");
737         goto out;
738     }
739 
740     monitor_printf(mon, "SEV type: %s\n", SevGuestType_str(info->sev_type));
741     monitor_printf(mon, "state: %s\n", SevState_str(info->state));
742     monitor_printf(mon, "build: %d\n", info->build_id);
743     monitor_printf(mon, "api version: %d.%d\n", info->api_major,
744                    info->api_minor);
745 
746     if (sev_snp_enabled()) {
747         monitor_printf(mon, "debug: %s\n",
748                        info->u.sev_snp.snp_policy & SEV_SNP_POLICY_DBG ? "on"
749                                                                        : "off");
750         monitor_printf(mon, "SMT allowed: %s\n",
751                        info->u.sev_snp.snp_policy & SEV_SNP_POLICY_SMT ? "on"
752                                                                        : "off");
753     } else {
754         monitor_printf(mon, "handle: %d\n", info->u.sev.handle);
755         monitor_printf(mon, "debug: %s\n",
756                        info->u.sev.policy & SEV_POLICY_NODBG ? "off" : "on");
757         monitor_printf(mon, "key-sharing: %s\n",
758                        info->u.sev.policy & SEV_POLICY_NOKS ? "off" : "on");
759     }
760 
761 out:
762     qapi_free_SevInfo(info);
763 }
764 
765 static int
766 sev_get_pdh_info(int fd, guchar **pdh, size_t *pdh_len, guchar **cert_chain,
767                  size_t *cert_chain_len, Error **errp)
768 {
769     guchar *pdh_data = NULL;
770     guchar *cert_chain_data = NULL;
771     struct sev_user_data_pdh_cert_export export = {};
772     int err, r;
773 
774     /* query the certificate length */
775     r = sev_platform_ioctl(fd, SEV_PDH_CERT_EXPORT, &export, &err);
776     if (r < 0) {
777         if (err != SEV_RET_INVALID_LEN) {
778             error_setg(errp, "SEV: Failed to export PDH cert"
779                              " ret=%d fw_err=%d (%s)",
780                        r, err, fw_error_to_str(err));
781             return 1;
782         }
783     }
784 
785     pdh_data = g_new(guchar, export.pdh_cert_len);
786     cert_chain_data = g_new(guchar, export.cert_chain_len);
787     export.pdh_cert_address = (unsigned long)pdh_data;
788     export.cert_chain_address = (unsigned long)cert_chain_data;
789 
790     r = sev_platform_ioctl(fd, SEV_PDH_CERT_EXPORT, &export, &err);
791     if (r < 0) {
792         error_setg(errp, "SEV: Failed to export PDH cert ret=%d fw_err=%d (%s)",
793                    r, err, fw_error_to_str(err));
794         goto e_free;
795     }
796 
797     *pdh = pdh_data;
798     *pdh_len = export.pdh_cert_len;
799     *cert_chain = cert_chain_data;
800     *cert_chain_len = export.cert_chain_len;
801     return 0;
802 
803 e_free:
804     g_free(pdh_data);
805     g_free(cert_chain_data);
806     return 1;
807 }
808 
809 static int sev_get_cpu0_id(int fd, guchar **id, size_t *id_len, Error **errp)
810 {
811     guchar *id_data;
812     struct sev_user_data_get_id2 get_id2 = {};
813     int err, r;
814 
815     /* query the ID length */
816     r = sev_platform_ioctl(fd, SEV_GET_ID2, &get_id2, &err);
817     if (r < 0 && err != SEV_RET_INVALID_LEN) {
818         error_setg(errp, "SEV: Failed to get ID ret=%d fw_err=%d (%s)",
819                    r, err, fw_error_to_str(err));
820         return 1;
821     }
822 
823     id_data = g_new(guchar, get_id2.length);
824     get_id2.address = (unsigned long)id_data;
825 
826     r = sev_platform_ioctl(fd, SEV_GET_ID2, &get_id2, &err);
827     if (r < 0) {
828         error_setg(errp, "SEV: Failed to get ID ret=%d fw_err=%d (%s)",
829                    r, err, fw_error_to_str(err));
830         goto err;
831     }
832 
833     *id = id_data;
834     *id_len = get_id2.length;
835     return 0;
836 
837 err:
838     g_free(id_data);
839     return 1;
840 }
841 
842 static SevCapability *sev_get_capabilities(Error **errp)
843 {
844     SevCapability *cap = NULL;
845     guchar *pdh_data = NULL;
846     guchar *cert_chain_data = NULL;
847     guchar *cpu0_id_data = NULL;
848     size_t pdh_len = 0, cert_chain_len = 0, cpu0_id_len = 0;
849     uint32_t ebx;
850     int fd;
851     SevCommonState *sev_common;
852     char *sev_device;
853 
854     if (!kvm_enabled()) {
855         error_setg(errp, "KVM not enabled");
856         return NULL;
857     }
858     if (kvm_vm_ioctl(kvm_state, KVM_MEMORY_ENCRYPT_OP, NULL) < 0) {
859         error_setg(errp, "SEV is not enabled in KVM");
860         return NULL;
861     }
862 
863     sev_common = SEV_COMMON(MACHINE(qdev_get_machine())->cgs);
864     if (sev_common) {
865         sev_device = object_property_get_str(OBJECT(sev_common), "sev-device",
866                                              &error_abort);
867     } else {
868         sev_device = g_strdup(DEFAULT_SEV_DEVICE);
869     }
870 
871     fd = open(sev_device, O_RDWR);
872     if (fd < 0) {
873         error_setg_errno(errp, errno, "SEV: Failed to open %s",
874                          sev_device);
875         g_free(sev_device);
876         return NULL;
877     }
878     g_free(sev_device);
879 
880     if (sev_get_pdh_info(fd, &pdh_data, &pdh_len,
881                          &cert_chain_data, &cert_chain_len, errp)) {
882         goto out;
883     }
884 
885     if (sev_get_cpu0_id(fd, &cpu0_id_data, &cpu0_id_len, errp)) {
886         goto out;
887     }
888 
889     cap = g_new0(SevCapability, 1);
890     cap->pdh = g_base64_encode(pdh_data, pdh_len);
891     cap->cert_chain = g_base64_encode(cert_chain_data, cert_chain_len);
892     cap->cpu0_id = g_base64_encode(cpu0_id_data, cpu0_id_len);
893 
894     host_cpuid(0x8000001F, 0, NULL, &ebx, NULL, NULL);
895     cap->cbitpos = ebx & 0x3f;
896 
897     /*
898      * When SEV feature is enabled, we loose one bit in guest physical
899      * addressing.
900      */
901     cap->reduced_phys_bits = 1;
902 
903 out:
904     g_free(cpu0_id_data);
905     g_free(pdh_data);
906     g_free(cert_chain_data);
907     close(fd);
908     return cap;
909 }
910 
911 SevCapability *qmp_query_sev_capabilities(Error **errp)
912 {
913     return sev_get_capabilities(errp);
914 }
915 
916 static OvmfSevMetadata *ovmf_sev_metadata_table;
917 
918 #define OVMF_SEV_META_DATA_GUID "dc886566-984a-4798-A75e-5585a7bf67cc"
919 typedef struct __attribute__((__packed__)) OvmfSevMetadataOffset {
920     uint32_t offset;
921 } OvmfSevMetadataOffset;
922 
923 OvmfSevMetadata *pc_system_get_ovmf_sev_metadata_ptr(void)
924 {
925     return ovmf_sev_metadata_table;
926 }
927 
928 void pc_system_parse_sev_metadata(uint8_t *flash_ptr, size_t flash_size)
929 {
930     OvmfSevMetadata     *metadata;
931     OvmfSevMetadataOffset  *data;
932 
933     if (!pc_system_ovmf_table_find(OVMF_SEV_META_DATA_GUID, (uint8_t **)&data,
934                                    NULL)) {
935         return;
936     }
937 
938     metadata = (OvmfSevMetadata *)(flash_ptr + flash_size - data->offset);
939     if (memcmp(metadata->signature, "ASEV", 4) != 0 ||
940         metadata->len < sizeof(OvmfSevMetadata) ||
941         metadata->len > flash_size - data->offset) {
942         return;
943     }
944 
945     ovmf_sev_metadata_table = g_memdup2(metadata, metadata->len);
946 }
947 
948 static SevAttestationReport *sev_get_attestation_report(const char *mnonce,
949                                                         Error **errp)
950 {
951     struct kvm_sev_attestation_report input = {};
952     SevAttestationReport *report = NULL;
953     SevCommonState *sev_common;
954     g_autofree guchar *data = NULL;
955     g_autofree guchar *buf = NULL;
956     gsize len;
957     int err = 0, ret;
958 
959     if (!sev_enabled()) {
960         error_setg(errp, "SEV is not enabled");
961         return NULL;
962     }
963 
964     /* lets decode the mnonce string */
965     buf = g_base64_decode(mnonce, &len);
966     if (!buf) {
967         error_setg(errp, "SEV: failed to decode mnonce input");
968         return NULL;
969     }
970 
971     /* verify the input mnonce length */
972     if (len != sizeof(input.mnonce)) {
973         error_setg(errp, "SEV: mnonce must be %zu bytes (got %" G_GSIZE_FORMAT ")",
974                 sizeof(input.mnonce), len);
975         return NULL;
976     }
977 
978     sev_common = SEV_COMMON(MACHINE(qdev_get_machine())->cgs);
979 
980     /* Query the report length */
981     ret = sev_ioctl(sev_common->sev_fd, KVM_SEV_GET_ATTESTATION_REPORT,
982             &input, &err);
983     if (ret < 0) {
984         if (err != SEV_RET_INVALID_LEN) {
985             error_setg(errp, "SEV: Failed to query the attestation report"
986                              " length ret=%d fw_err=%d (%s)",
987                        ret, err, fw_error_to_str(err));
988             return NULL;
989         }
990     }
991 
992     data = g_malloc(input.len);
993     input.uaddr = (unsigned long)data;
994     memcpy(input.mnonce, buf, sizeof(input.mnonce));
995 
996     /* Query the report */
997     ret = sev_ioctl(sev_common->sev_fd, KVM_SEV_GET_ATTESTATION_REPORT,
998             &input, &err);
999     if (ret) {
1000         error_setg_errno(errp, errno, "SEV: Failed to get attestation report"
1001                 " ret=%d fw_err=%d (%s)", ret, err, fw_error_to_str(err));
1002         return NULL;
1003     }
1004 
1005     report = g_new0(SevAttestationReport, 1);
1006     report->data = g_base64_encode(data, input.len);
1007 
1008     trace_kvm_sev_attestation_report(mnonce, report->data);
1009 
1010     return report;
1011 }
1012 
1013 SevAttestationReport *qmp_query_sev_attestation_report(const char *mnonce,
1014                                                        Error **errp)
1015 {
1016     return sev_get_attestation_report(mnonce, errp);
1017 }
1018 
1019 static int
1020 sev_read_file_base64(const char *filename, guchar **data, gsize *len)
1021 {
1022     gsize sz;
1023     g_autofree gchar *base64 = NULL;
1024     GError *error = NULL;
1025 
1026     if (!g_file_get_contents(filename, &base64, &sz, &error)) {
1027         error_report("SEV: Failed to read '%s' (%s)", filename, error->message);
1028         g_error_free(error);
1029         return -1;
1030     }
1031 
1032     *data = g_base64_decode(base64, len);
1033     return 0;
1034 }
1035 
1036 static int
1037 sev_snp_launch_start(SevCommonState *sev_common)
1038 {
1039     int fw_error, rc;
1040     SevSnpGuestState *sev_snp_guest = SEV_SNP_GUEST(sev_common);
1041     struct kvm_sev_snp_launch_start *start = &sev_snp_guest->kvm_start_conf;
1042 
1043     trace_kvm_sev_snp_launch_start(start->policy,
1044                                    sev_snp_guest->guest_visible_workarounds);
1045 
1046     if (!kvm_enable_hypercall(BIT_ULL(KVM_HC_MAP_GPA_RANGE))) {
1047             return 1;
1048     }
1049 
1050     rc = sev_ioctl(sev_common->sev_fd, KVM_SEV_SNP_LAUNCH_START,
1051                    start, &fw_error);
1052     if (rc < 0) {
1053         error_report("%s: SNP_LAUNCH_START ret=%d fw_error=%d '%s'",
1054                 __func__, rc, fw_error, fw_error_to_str(fw_error));
1055         return 1;
1056     }
1057 
1058     QTAILQ_INIT(&launch_update);
1059 
1060     sev_set_guest_state(sev_common, SEV_STATE_LAUNCH_UPDATE);
1061 
1062     return 0;
1063 }
1064 
1065 static int
1066 sev_launch_start(SevCommonState *sev_common)
1067 {
1068     gsize sz;
1069     int ret = 1;
1070     int fw_error, rc;
1071     SevGuestState *sev_guest = SEV_GUEST(sev_common);
1072     struct kvm_sev_launch_start start = {
1073         .handle = sev_guest->handle, .policy = sev_guest->policy
1074     };
1075     guchar *session = NULL, *dh_cert = NULL;
1076 
1077     if (sev_guest->session_file) {
1078         if (sev_read_file_base64(sev_guest->session_file, &session, &sz) < 0) {
1079             goto out;
1080         }
1081         start.session_uaddr = (unsigned long)session;
1082         start.session_len = sz;
1083     }
1084 
1085     if (sev_guest->dh_cert_file) {
1086         if (sev_read_file_base64(sev_guest->dh_cert_file, &dh_cert, &sz) < 0) {
1087             goto out;
1088         }
1089         start.dh_uaddr = (unsigned long)dh_cert;
1090         start.dh_len = sz;
1091     }
1092 
1093     trace_kvm_sev_launch_start(start.policy, session, dh_cert);
1094     rc = sev_ioctl(sev_common->sev_fd, KVM_SEV_LAUNCH_START, &start, &fw_error);
1095     if (rc < 0) {
1096         error_report("%s: LAUNCH_START ret=%d fw_error=%d '%s'",
1097                 __func__, ret, fw_error, fw_error_to_str(fw_error));
1098         goto out;
1099     }
1100 
1101     sev_set_guest_state(sev_common, SEV_STATE_LAUNCH_UPDATE);
1102     sev_guest->handle = start.handle;
1103     ret = 0;
1104 
1105 out:
1106     g_free(session);
1107     g_free(dh_cert);
1108     return ret;
1109 }
1110 
1111 static void
1112 sev_snp_cpuid_report_mismatches(SnpCpuidInfo *old,
1113                                 SnpCpuidInfo *new)
1114 {
1115     size_t i;
1116 
1117     if (old->count != new->count) {
1118         error_report("SEV-SNP: CPUID validation failed due to count mismatch, "
1119                      "provided: %d, expected: %d", old->count, new->count);
1120         return;
1121     }
1122 
1123     for (i = 0; i < old->count; i++) {
1124         SnpCpuidFunc *old_func, *new_func;
1125 
1126         old_func = &old->entries[i];
1127         new_func = &new->entries[i];
1128 
1129         if (memcmp(old_func, new_func, sizeof(SnpCpuidFunc))) {
1130             error_report("SEV-SNP: CPUID validation failed for function 0x%x, index: 0x%x, "
1131                          "provided: eax:0x%08x, ebx: 0x%08x, ecx: 0x%08x, edx: 0x%08x, "
1132                          "expected: eax:0x%08x, ebx: 0x%08x, ecx: 0x%08x, edx: 0x%08x",
1133                          old_func->eax_in, old_func->ecx_in,
1134                          old_func->eax, old_func->ebx, old_func->ecx, old_func->edx,
1135                          new_func->eax, new_func->ebx, new_func->ecx, new_func->edx);
1136         }
1137     }
1138 }
1139 
1140 static const char *
1141 snp_page_type_to_str(int type)
1142 {
1143     switch (type) {
1144     case KVM_SEV_SNP_PAGE_TYPE_NORMAL: return "Normal";
1145     case KVM_SEV_SNP_PAGE_TYPE_ZERO: return "Zero";
1146     case KVM_SEV_SNP_PAGE_TYPE_UNMEASURED: return "Unmeasured";
1147     case KVM_SEV_SNP_PAGE_TYPE_SECRETS: return "Secrets";
1148     case KVM_SEV_SNP_PAGE_TYPE_CPUID: return "Cpuid";
1149     default: return "unknown";
1150     }
1151 }
1152 
1153 static int
1154 sev_snp_launch_update(SevSnpGuestState *sev_snp_guest,
1155                       SevLaunchUpdateData *data)
1156 {
1157     int ret, fw_error;
1158     SnpCpuidInfo snp_cpuid_info;
1159     struct kvm_sev_snp_launch_update update = {0};
1160 
1161     if (!data->hva || !data->len) {
1162         error_report("SNP_LAUNCH_UPDATE called with invalid address"
1163                      "/ length: %p / %zx",
1164                      data->hva, data->len);
1165         return 1;
1166     }
1167 
1168     if (data->type == KVM_SEV_SNP_PAGE_TYPE_CPUID) {
1169         /* Save a copy for comparison in case the LAUNCH_UPDATE fails */
1170         memcpy(&snp_cpuid_info, data->hva, sizeof(snp_cpuid_info));
1171     }
1172 
1173     update.uaddr = (__u64)(unsigned long)data->hva;
1174     update.gfn_start = data->gpa >> TARGET_PAGE_BITS;
1175     update.len = data->len;
1176     update.type = data->type;
1177 
1178     /*
1179      * KVM_SEV_SNP_LAUNCH_UPDATE requires that GPA ranges have the private
1180      * memory attribute set in advance.
1181      */
1182     ret = kvm_set_memory_attributes_private(data->gpa, data->len);
1183     if (ret) {
1184         error_report("SEV-SNP: failed to configure initial"
1185                      "private guest memory");
1186         goto out;
1187     }
1188 
1189     while (update.len || ret == -EAGAIN) {
1190         trace_kvm_sev_snp_launch_update(update.uaddr, update.gfn_start <<
1191                                         TARGET_PAGE_BITS, update.len,
1192                                         snp_page_type_to_str(update.type));
1193 
1194         ret = sev_ioctl(SEV_COMMON(sev_snp_guest)->sev_fd,
1195                         KVM_SEV_SNP_LAUNCH_UPDATE,
1196                         &update, &fw_error);
1197         if (ret && ret != -EAGAIN) {
1198             error_report("SNP_LAUNCH_UPDATE ret=%d fw_error=%d '%s'",
1199                          ret, fw_error, fw_error_to_str(fw_error));
1200 
1201             if (data->type == KVM_SEV_SNP_PAGE_TYPE_CPUID) {
1202                 sev_snp_cpuid_report_mismatches(&snp_cpuid_info, data->hva);
1203                 error_report("SEV-SNP: failed update CPUID page");
1204             }
1205             break;
1206         }
1207     }
1208 
1209 out:
1210     if (!ret && update.gfn_start << TARGET_PAGE_BITS != data->gpa + data->len) {
1211         error_report("SEV-SNP: expected update of GPA range %"
1212                      HWADDR_PRIx "-%" HWADDR_PRIx ","
1213                      "got GPA range %" HWADDR_PRIx "-%llx",
1214                      data->gpa, data->gpa + data->len, data->gpa,
1215                      update.gfn_start << TARGET_PAGE_BITS);
1216         ret = -EIO;
1217     }
1218 
1219     return ret;
1220 }
1221 
1222 static uint32_t
1223 sev_snp_adjust_cpuid_features(X86ConfidentialGuest *cg, uint32_t feature, uint32_t index,
1224                             int reg, uint32_t value)
1225 {
1226     switch (feature) {
1227     case 1:
1228         if (reg == R_ECX) {
1229             return value & ~CPUID_EXT_TSC_DEADLINE_TIMER;
1230         }
1231         break;
1232     case 7:
1233         if (index == 0 && reg == R_EBX) {
1234             return value & ~CPUID_7_0_EBX_TSC_ADJUST;
1235         }
1236         if (index == 0 && reg == R_EDX) {
1237             return value & ~(CPUID_7_0_EDX_SPEC_CTRL |
1238                              CPUID_7_0_EDX_STIBP |
1239                              CPUID_7_0_EDX_FLUSH_L1D |
1240                              CPUID_7_0_EDX_ARCH_CAPABILITIES |
1241                              CPUID_7_0_EDX_CORE_CAPABILITY |
1242                              CPUID_7_0_EDX_SPEC_CTRL_SSBD);
1243         }
1244         break;
1245     case 0x80000008:
1246         if (reg == R_EBX) {
1247             return value & ~CPUID_8000_0008_EBX_VIRT_SSBD;
1248         }
1249         break;
1250     }
1251     return value;
1252 }
1253 
1254 static int sev_launch_update_data(SevCommonState *sev_common, hwaddr gpa,
1255                                   uint8_t *addr, size_t len, Error **errp)
1256 {
1257     int ret, fw_error;
1258     struct kvm_sev_launch_update_data update;
1259 
1260     if (!addr || !len) {
1261         return 1;
1262     }
1263 
1264     update.uaddr = (uintptr_t)addr;
1265     update.len = len;
1266     trace_kvm_sev_launch_update_data(addr, len);
1267     ret = sev_ioctl(sev_common->sev_fd, KVM_SEV_LAUNCH_UPDATE_DATA,
1268                     &update, &fw_error);
1269     if (ret) {
1270         error_setg(errp, "%s: LAUNCH_UPDATE ret=%d fw_error=%d '%s'", __func__,
1271                    ret, fw_error, fw_error_to_str(fw_error));
1272     }
1273 
1274     return ret;
1275 }
1276 
1277 static int
1278 sev_launch_update_vmsa(SevGuestState *sev_guest)
1279 {
1280     int ret, fw_error;
1281     CPUState *cpu;
1282 
1283     /*
1284      * The initial CPU state is measured as part of KVM_SEV_LAUNCH_UPDATE_VMSA.
1285      * Synchronise the CPU state to any provided launch VMSA structures.
1286      */
1287     CPU_FOREACH(cpu) {
1288         sev_apply_cpu_context(cpu);
1289     }
1290 
1291 
1292     ret = sev_ioctl(SEV_COMMON(sev_guest)->sev_fd, KVM_SEV_LAUNCH_UPDATE_VMSA,
1293                     NULL, &fw_error);
1294     if (ret) {
1295         error_report("%s: LAUNCH_UPDATE_VMSA ret=%d fw_error=%d '%s'",
1296                 __func__, ret, fw_error, fw_error_to_str(fw_error));
1297     }
1298 
1299     return ret;
1300 }
1301 
1302 static void
1303 sev_launch_get_measure(Notifier *notifier, void *unused)
1304 {
1305     SevCommonState *sev_common = SEV_COMMON(MACHINE(qdev_get_machine())->cgs);
1306     SevGuestState *sev_guest = SEV_GUEST(sev_common);
1307     int ret, error;
1308     g_autofree guchar *data = NULL;
1309     struct kvm_sev_launch_measure measurement = {};
1310 
1311     if (!sev_check_state(sev_common, SEV_STATE_LAUNCH_UPDATE)) {
1312         return;
1313     }
1314 
1315     if (sev_es_enabled()) {
1316         /* measure all the VM save areas before getting launch_measure */
1317         ret = sev_launch_update_vmsa(sev_guest);
1318         if (ret) {
1319             exit(1);
1320         }
1321         kvm_mark_guest_state_protected();
1322     }
1323 
1324     /* query the measurement blob length */
1325     ret = sev_ioctl(sev_common->sev_fd, KVM_SEV_LAUNCH_MEASURE,
1326                     &measurement, &error);
1327     if (!measurement.len) {
1328         error_report("%s: LAUNCH_MEASURE ret=%d fw_error=%d '%s'",
1329                      __func__, ret, error, fw_error_to_str(errno));
1330         return;
1331     }
1332 
1333     data = g_new0(guchar, measurement.len);
1334     measurement.uaddr = (unsigned long)data;
1335 
1336     /* get the measurement blob */
1337     ret = sev_ioctl(sev_common->sev_fd, KVM_SEV_LAUNCH_MEASURE,
1338                     &measurement, &error);
1339     if (ret) {
1340         error_report("%s: LAUNCH_MEASURE ret=%d fw_error=%d '%s'",
1341                      __func__, ret, error, fw_error_to_str(errno));
1342         return;
1343     }
1344 
1345     sev_set_guest_state(sev_common, SEV_STATE_LAUNCH_SECRET);
1346 
1347     /* encode the measurement value and emit the event */
1348     sev_guest->measurement = g_base64_encode(data, measurement.len);
1349     trace_kvm_sev_launch_measurement(sev_guest->measurement);
1350 }
1351 
1352 static char *sev_get_launch_measurement(void)
1353 {
1354     ConfidentialGuestSupport *cgs = MACHINE(qdev_get_machine())->cgs;
1355     SevGuestState *sev_guest =
1356         (SevGuestState *)object_dynamic_cast(OBJECT(cgs), TYPE_SEV_GUEST);
1357 
1358     if (sev_guest &&
1359         SEV_COMMON(sev_guest)->state >= SEV_STATE_LAUNCH_SECRET) {
1360         return g_strdup(sev_guest->measurement);
1361     }
1362 
1363     return NULL;
1364 }
1365 
1366 SevLaunchMeasureInfo *qmp_query_sev_launch_measure(Error **errp)
1367 {
1368     char *data;
1369     SevLaunchMeasureInfo *info;
1370 
1371     data = sev_get_launch_measurement();
1372     if (!data) {
1373         error_setg(errp, "SEV launch measurement is not available");
1374         return NULL;
1375     }
1376 
1377     info = g_malloc0(sizeof(*info));
1378     info->data = data;
1379 
1380     return info;
1381 }
1382 
1383 static Notifier sev_machine_done_notify = {
1384     .notify = sev_launch_get_measure,
1385 };
1386 
1387 static void
1388 sev_launch_finish(SevCommonState *sev_common)
1389 {
1390     int ret, error;
1391 
1392     trace_kvm_sev_launch_finish();
1393     ret = sev_ioctl(sev_common->sev_fd, KVM_SEV_LAUNCH_FINISH, 0,
1394                     &error);
1395     if (ret) {
1396         error_report("%s: LAUNCH_FINISH ret=%d fw_error=%d '%s'",
1397                      __func__, ret, error, fw_error_to_str(error));
1398         exit(1);
1399     }
1400 
1401     sev_set_guest_state(sev_common, SEV_STATE_RUNNING);
1402 
1403     /* add migration blocker */
1404     error_setg(&sev_mig_blocker,
1405                "SEV: Migration is not implemented");
1406     migrate_add_blocker(&sev_mig_blocker, &error_fatal);
1407 }
1408 
1409 static int snp_launch_update_data(uint64_t gpa, void *hva, size_t len,
1410                                   int type, Error **errp)
1411 {
1412     SevLaunchUpdateData *data;
1413 
1414     data = g_new0(SevLaunchUpdateData, 1);
1415     data->gpa = gpa;
1416     data->hva = hva;
1417     data->len = len;
1418     data->type = type;
1419 
1420     QTAILQ_INSERT_TAIL(&launch_update, data, next);
1421 
1422     return 0;
1423 }
1424 
1425 static int sev_snp_launch_update_data(SevCommonState *sev_common, hwaddr gpa,
1426                                       uint8_t *ptr, size_t len, Error **errp)
1427 {
1428     return snp_launch_update_data(gpa, ptr, len,
1429                                      KVM_SEV_SNP_PAGE_TYPE_NORMAL, errp);
1430 }
1431 
1432 static int
1433 sev_snp_cpuid_info_fill(SnpCpuidInfo *snp_cpuid_info,
1434                         const KvmCpuidInfo *kvm_cpuid_info, Error **errp)
1435 {
1436     size_t i;
1437 
1438     if (kvm_cpuid_info->cpuid.nent > SNP_CPUID_FUNCTION_MAXCOUNT) {
1439         error_setg(errp, "SEV-SNP: CPUID entry count (%d) exceeds max (%d)",
1440                      kvm_cpuid_info->cpuid.nent, SNP_CPUID_FUNCTION_MAXCOUNT);
1441         return -1;
1442     }
1443 
1444     memset(snp_cpuid_info, 0, sizeof(*snp_cpuid_info));
1445 
1446     for (i = 0; i < kvm_cpuid_info->cpuid.nent; i++) {
1447         const struct kvm_cpuid_entry2 *kvm_cpuid_entry;
1448         SnpCpuidFunc *snp_cpuid_entry;
1449 
1450         kvm_cpuid_entry = &kvm_cpuid_info->entries[i];
1451         snp_cpuid_entry = &snp_cpuid_info->entries[i];
1452 
1453         snp_cpuid_entry->eax_in = kvm_cpuid_entry->function;
1454         if (kvm_cpuid_entry->flags == KVM_CPUID_FLAG_SIGNIFCANT_INDEX) {
1455             snp_cpuid_entry->ecx_in = kvm_cpuid_entry->index;
1456         }
1457         snp_cpuid_entry->eax = kvm_cpuid_entry->eax;
1458         snp_cpuid_entry->ebx = kvm_cpuid_entry->ebx;
1459         snp_cpuid_entry->ecx = kvm_cpuid_entry->ecx;
1460         snp_cpuid_entry->edx = kvm_cpuid_entry->edx;
1461 
1462         /*
1463          * Guest kernels will calculate EBX themselves using the 0xD
1464          * subfunctions corresponding to the individual XSAVE areas, so only
1465          * encode the base XSAVE size in the initial leaves, corresponding
1466          * to the initial XCR0=1 state.
1467          */
1468         if (snp_cpuid_entry->eax_in == 0xD &&
1469             (snp_cpuid_entry->ecx_in == 0x0 || snp_cpuid_entry->ecx_in == 0x1)) {
1470             snp_cpuid_entry->ebx = 0x240;
1471             snp_cpuid_entry->xcr0_in = 1;
1472             snp_cpuid_entry->xss_in = 0;
1473         }
1474     }
1475 
1476     snp_cpuid_info->count = i;
1477 
1478     return 0;
1479 }
1480 
1481 static int snp_launch_update_cpuid(uint32_t cpuid_addr, void *hva,
1482                                    size_t cpuid_len, Error **errp)
1483 {
1484     KvmCpuidInfo kvm_cpuid_info = {0};
1485     SnpCpuidInfo snp_cpuid_info;
1486     CPUState *cs = first_cpu;
1487     int ret;
1488     uint32_t i = 0;
1489 
1490     assert(sizeof(snp_cpuid_info) <= cpuid_len);
1491 
1492     /* get the cpuid list from KVM */
1493     do {
1494         kvm_cpuid_info.cpuid.nent = ++i;
1495         ret = kvm_vcpu_ioctl(cs, KVM_GET_CPUID2, &kvm_cpuid_info);
1496     } while (ret == -E2BIG);
1497 
1498     if (ret) {
1499         error_setg(errp, "SEV-SNP: unable to query CPUID values for CPU: '%s'",
1500                    strerror(-ret));
1501         return -1;
1502     }
1503 
1504     ret = sev_snp_cpuid_info_fill(&snp_cpuid_info, &kvm_cpuid_info, errp);
1505     if (ret < 0) {
1506         return -1;
1507     }
1508 
1509     memcpy(hva, &snp_cpuid_info, sizeof(snp_cpuid_info));
1510 
1511     return snp_launch_update_data(cpuid_addr, hva, cpuid_len,
1512                                   KVM_SEV_SNP_PAGE_TYPE_CPUID, errp);
1513 }
1514 
1515 static int snp_launch_update_kernel_hashes(SevSnpGuestState *sev_snp,
1516                                            uint32_t addr, void *hva,
1517                                            uint32_t len, Error **errp)
1518 {
1519     int type = KVM_SEV_SNP_PAGE_TYPE_ZERO;
1520     if (sev_snp->parent_obj.kernel_hashes) {
1521         assert(sev_snp->kernel_hashes_data);
1522         assert((sev_snp->kernel_hashes_offset +
1523                 sizeof(*sev_snp->kernel_hashes_data)) <= len);
1524         memset(hva, 0, len);
1525         memcpy(hva + sev_snp->kernel_hashes_offset, sev_snp->kernel_hashes_data,
1526                sizeof(*sev_snp->kernel_hashes_data));
1527         type = KVM_SEV_SNP_PAGE_TYPE_NORMAL;
1528     }
1529     return snp_launch_update_data(addr, hva, len, type, errp);
1530 }
1531 
1532 static int
1533 snp_metadata_desc_to_page_type(int desc_type)
1534 {
1535     switch (desc_type) {
1536     /* Add the umeasured prevalidated pages as a zero page */
1537     case SEV_DESC_TYPE_SNP_SEC_MEM: return KVM_SEV_SNP_PAGE_TYPE_ZERO;
1538     case SEV_DESC_TYPE_SNP_SECRETS: return KVM_SEV_SNP_PAGE_TYPE_SECRETS;
1539     case SEV_DESC_TYPE_CPUID: return KVM_SEV_SNP_PAGE_TYPE_CPUID;
1540     default:
1541          return KVM_SEV_SNP_PAGE_TYPE_ZERO;
1542     }
1543 }
1544 
1545 static void
1546 snp_populate_metadata_pages(SevSnpGuestState *sev_snp,
1547                             OvmfSevMetadata *metadata)
1548 {
1549     OvmfSevMetadataDesc *desc;
1550     int type, ret, i;
1551     void *hva;
1552     MemoryRegion *mr = NULL;
1553 
1554     for (i = 0; i < metadata->num_desc; i++) {
1555         desc = &metadata->descs[i];
1556 
1557         type = snp_metadata_desc_to_page_type(desc->type);
1558 
1559         hva = gpa2hva(&mr, desc->base, desc->len, NULL);
1560         if (!hva) {
1561             error_report("%s: Failed to get HVA for GPA 0x%x sz 0x%x",
1562                          __func__, desc->base, desc->len);
1563             exit(1);
1564         }
1565 
1566         if (type == KVM_SEV_SNP_PAGE_TYPE_CPUID) {
1567             ret = snp_launch_update_cpuid(desc->base, hva, desc->len,
1568                                           &error_fatal);
1569         } else if (desc->type == SEV_DESC_TYPE_SNP_KERNEL_HASHES) {
1570             ret = snp_launch_update_kernel_hashes(sev_snp, desc->base, hva,
1571                                                   desc->len, &error_fatal);
1572         } else {
1573             ret = snp_launch_update_data(desc->base, hva, desc->len, type,
1574                                          &error_fatal);
1575         }
1576 
1577         if (ret) {
1578             error_report("%s: Failed to add metadata page gpa 0x%x+%x type %d",
1579                          __func__, desc->base, desc->len, desc->type);
1580             exit(1);
1581         }
1582     }
1583 }
1584 
1585 static void
1586 sev_snp_launch_finish(SevCommonState *sev_common)
1587 {
1588     int ret, error;
1589     Error *local_err = NULL;
1590     OvmfSevMetadata *metadata;
1591     SevLaunchUpdateData *data;
1592     SevSnpGuestState *sev_snp = SEV_SNP_GUEST(sev_common);
1593     struct kvm_sev_snp_launch_finish *finish = &sev_snp->kvm_finish_conf;
1594 
1595     /*
1596      * Populate all the metadata pages if not using an IGVM file. In the case
1597      * where an IGVM file is provided it will be used to configure the metadata
1598      * pages directly.
1599      */
1600     if (!X86_MACHINE(qdev_get_machine())->igvm) {
1601         /*
1602          * To boot the SNP guest, the hypervisor is required to populate the
1603          * CPUID and Secrets page before finalizing the launch flow. The
1604          * location of the secrets and CPUID page is available through the
1605          * OVMF metadata GUID.
1606          */
1607         metadata = pc_system_get_ovmf_sev_metadata_ptr();
1608         if (metadata == NULL) {
1609             error_report("%s: Failed to locate SEV metadata header", __func__);
1610             exit(1);
1611         }
1612 
1613         /* Populate all the metadata pages */
1614         snp_populate_metadata_pages(sev_snp, metadata);
1615     }
1616 
1617     QTAILQ_FOREACH(data, &launch_update, next) {
1618         ret = sev_snp_launch_update(sev_snp, data);
1619         if (ret) {
1620             exit(1);
1621         }
1622     }
1623 
1624     trace_kvm_sev_snp_launch_finish(sev_snp->id_block_base64, sev_snp->id_auth_base64,
1625                                     sev_snp->host_data);
1626     ret = sev_ioctl(sev_common->sev_fd, KVM_SEV_SNP_LAUNCH_FINISH,
1627                     finish, &error);
1628     if (ret) {
1629         error_report("SNP_LAUNCH_FINISH ret=%d fw_error=%d '%s'",
1630                      ret, error, fw_error_to_str(error));
1631         exit(1);
1632     }
1633 
1634     kvm_mark_guest_state_protected();
1635     sev_set_guest_state(sev_common, SEV_STATE_RUNNING);
1636 
1637     /* add migration blocker */
1638     error_setg(&sev_mig_blocker,
1639                "SEV-SNP: Migration is not implemented");
1640     ret = migrate_add_blocker(&sev_mig_blocker, &local_err);
1641     if (local_err) {
1642         error_report_err(local_err);
1643         error_free(sev_mig_blocker);
1644         exit(1);
1645     }
1646 }
1647 
1648 
1649 static void
1650 sev_vm_state_change(void *opaque, bool running, RunState state)
1651 {
1652     SevCommonState *sev_common = opaque;
1653     SevCommonStateClass *klass = SEV_COMMON_GET_CLASS(opaque);
1654 
1655     if (running) {
1656         if (!sev_check_state(sev_common, SEV_STATE_RUNNING)) {
1657             klass->launch_finish(sev_common);
1658         }
1659     }
1660 }
1661 
1662 /*
1663  * This helper is to examine sev-guest properties and determine if any options
1664  * have been set which rely on the newer KVM_SEV_INIT2 interface and associated
1665  * KVM VM types.
1666  */
1667 static bool sev_init2_required(SevGuestState *sev_guest)
1668 {
1669     /* Currently no KVM_SEV_INIT2-specific options are exposed via QEMU */
1670     return false;
1671 }
1672 
1673 static int sev_kvm_type(X86ConfidentialGuest *cg)
1674 {
1675     SevCommonState *sev_common = SEV_COMMON(cg);
1676     SevGuestState *sev_guest = SEV_GUEST(sev_common);
1677     int kvm_type;
1678 
1679     if (sev_common->kvm_type != -1) {
1680         goto out;
1681     }
1682 
1683     /* These are the only cases where legacy VM types can be used. */
1684     if (sev_guest->legacy_vm_type == ON_OFF_AUTO_ON ||
1685         (sev_guest->legacy_vm_type == ON_OFF_AUTO_AUTO &&
1686          !sev_init2_required(sev_guest))) {
1687         sev_common->kvm_type = KVM_X86_DEFAULT_VM;
1688         goto out;
1689     }
1690 
1691     /*
1692      * Newer VM types are required, either explicitly via legacy-vm-type=on, or
1693      * implicitly via legacy-vm-type=auto along with additional sev-guest
1694      * properties that require the newer VM types.
1695      */
1696     kvm_type = (sev_guest->policy & SEV_POLICY_ES) ?
1697                 KVM_X86_SEV_ES_VM : KVM_X86_SEV_VM;
1698     if (!kvm_is_vm_type_supported(kvm_type)) {
1699         if (sev_guest->legacy_vm_type == ON_OFF_AUTO_AUTO) {
1700             error_report("SEV: host kernel does not support requested %s VM type, which is required "
1701                          "for the set of options specified. To allow use of the legacy "
1702                          "KVM_X86_DEFAULT_VM VM type, please disable any options that are not "
1703                          "compatible with the legacy VM type, or upgrade your kernel.",
1704                          kvm_type == KVM_X86_SEV_VM ? "KVM_X86_SEV_VM" : "KVM_X86_SEV_ES_VM");
1705         } else {
1706             error_report("SEV: host kernel does not support requested %s VM type. To allow use of "
1707                          "the legacy KVM_X86_DEFAULT_VM VM type, the 'legacy-vm-type' argument "
1708                          "must be set to 'on' or 'auto' for the sev-guest object.",
1709                          kvm_type == KVM_X86_SEV_VM ? "KVM_X86_SEV_VM" : "KVM_X86_SEV_ES_VM");
1710         }
1711 
1712         return -1;
1713     }
1714 
1715     sev_common->kvm_type = kvm_type;
1716 out:
1717     return sev_common->kvm_type;
1718 }
1719 
1720 static int sev_snp_kvm_type(X86ConfidentialGuest *cg)
1721 {
1722     return KVM_X86_SNP_VM;
1723 }
1724 
1725 static int sev_common_kvm_init(ConfidentialGuestSupport *cgs, Error **errp)
1726 {
1727     char *devname;
1728     int ret, fw_error, cmd;
1729     uint32_t ebx;
1730     uint32_t host_cbitpos;
1731     struct sev_user_data_status status = {};
1732     SevCommonState *sev_common = SEV_COMMON(cgs);
1733     SevCommonStateClass *klass = SEV_COMMON_GET_CLASS(cgs);
1734     X86ConfidentialGuestClass *x86_klass =
1735                                X86_CONFIDENTIAL_GUEST_GET_CLASS(cgs);
1736 
1737     sev_common->state = SEV_STATE_UNINIT;
1738 
1739     host_cpuid(0x8000001F, 0, NULL, &ebx, NULL, NULL);
1740     host_cbitpos = ebx & 0x3f;
1741 
1742     /*
1743      * The cbitpos value will be placed in bit positions 5:0 of the EBX
1744      * register of CPUID 0x8000001F. No need to verify the range as the
1745      * comparison against the host value accomplishes that.
1746      */
1747     if (host_cbitpos != sev_common->cbitpos) {
1748         error_setg(errp, "%s: cbitpos check failed, host '%d' requested '%d'",
1749                    __func__, host_cbitpos, sev_common->cbitpos);
1750         return -1;
1751     }
1752 
1753     /*
1754      * The reduced-phys-bits value will be placed in bit positions 11:6 of
1755      * the EBX register of CPUID 0x8000001F, so verify the supplied value
1756      * is in the range of 1 to 63.
1757      */
1758     if (sev_common->reduced_phys_bits < 1 ||
1759         sev_common->reduced_phys_bits > 63) {
1760         error_setg(errp, "%s: reduced_phys_bits check failed,"
1761                    " it should be in the range of 1 to 63, requested '%d'",
1762                    __func__, sev_common->reduced_phys_bits);
1763         return -1;
1764     }
1765 
1766     devname = object_property_get_str(OBJECT(sev_common), "sev-device", NULL);
1767     sev_common->sev_fd = open(devname, O_RDWR);
1768     if (sev_common->sev_fd < 0) {
1769         error_setg(errp, "%s: Failed to open %s '%s'", __func__,
1770                    devname, strerror(errno));
1771         g_free(devname);
1772         return -1;
1773     }
1774     g_free(devname);
1775 
1776     ret = sev_platform_ioctl(sev_common->sev_fd, SEV_PLATFORM_STATUS, &status,
1777                              &fw_error);
1778     if (ret) {
1779         error_setg(errp, "%s: failed to get platform status ret=%d "
1780                    "fw_error='%d: %s'", __func__, ret, fw_error,
1781                    fw_error_to_str(fw_error));
1782         return -1;
1783     }
1784     sev_common->build_id = status.build;
1785     sev_common->api_major = status.api_major;
1786     sev_common->api_minor = status.api_minor;
1787 
1788     if (sev_es_enabled()) {
1789         if (!kvm_kernel_irqchip_allowed()) {
1790             error_setg(errp, "%s: SEV-ES guests require in-kernel irqchip"
1791                        "support", __func__);
1792             return -1;
1793         }
1794     }
1795 
1796     if (sev_es_enabled() && !sev_snp_enabled()) {
1797         if (!(status.flags & SEV_STATUS_FLAGS_CONFIG_ES)) {
1798             error_setg(errp, "%s: guest policy requires SEV-ES, but "
1799                          "host SEV-ES support unavailable",
1800                          __func__);
1801             return -1;
1802         }
1803     }
1804 
1805     trace_kvm_sev_init();
1806     switch (x86_klass->kvm_type(X86_CONFIDENTIAL_GUEST(sev_common))) {
1807     case KVM_X86_DEFAULT_VM:
1808         cmd = sev_es_enabled() ? KVM_SEV_ES_INIT : KVM_SEV_INIT;
1809 
1810         ret = sev_ioctl(sev_common->sev_fd, cmd, NULL, &fw_error);
1811         break;
1812     case KVM_X86_SEV_VM:
1813     case KVM_X86_SEV_ES_VM:
1814     case KVM_X86_SNP_VM: {
1815         struct kvm_sev_init args = { 0 };
1816 
1817         ret = sev_ioctl(sev_common->sev_fd, KVM_SEV_INIT2, &args, &fw_error);
1818         break;
1819     }
1820     default:
1821         error_setg(errp, "%s: host kernel does not support the requested SEV configuration.",
1822                    __func__);
1823         return -1;
1824     }
1825 
1826     if (ret) {
1827         error_setg(errp, "%s: failed to initialize ret=%d fw_error=%d '%s'",
1828                    __func__, ret, fw_error, fw_error_to_str(fw_error));
1829         return -1;
1830     }
1831 
1832     ret = klass->launch_start(sev_common);
1833 
1834     if (ret) {
1835         error_setg(errp, "%s: failed to create encryption context", __func__);
1836         return -1;
1837     }
1838 
1839     if (klass->kvm_init && klass->kvm_init(cgs, errp)) {
1840         return -1;
1841     }
1842 
1843     qemu_add_vm_change_state_handler(sev_vm_state_change, sev_common);
1844 
1845     cgs->ready = true;
1846 
1847     return 0;
1848 }
1849 
1850 static int sev_kvm_init(ConfidentialGuestSupport *cgs, Error **errp)
1851 {
1852      int ret;
1853 
1854     /*
1855      * SEV/SEV-ES rely on pinned memory to back guest RAM so discarding
1856      * isn't actually possible. With SNP, only guest_memfd pages are used
1857      * for private guest memory, so discarding of shared memory is still
1858      * possible..
1859      */
1860     ret = ram_block_discard_disable(true);
1861     if (ret) {
1862         error_setg(errp, "%s: cannot disable RAM discard", __func__);
1863         return -1;
1864     }
1865 
1866     /*
1867      * SEV uses these notifiers to register/pin pages prior to guest use,
1868      * but SNP relies on guest_memfd for private pages, which has its
1869      * own internal mechanisms for registering/pinning private memory.
1870      */
1871     ram_block_notifier_add(&sev_ram_notifier);
1872 
1873     /*
1874      * The machine done notify event is used for SEV guests to get the
1875      * measurement of the encrypted images. When SEV-SNP is enabled, the
1876      * measurement is part of the guest attestation process where it can
1877      * be collected without any reliance on the VMM. So skip registering
1878      * the notifier for SNP in favor of using guest attestation instead.
1879      */
1880     qemu_add_machine_init_done_notifier(&sev_machine_done_notify);
1881 
1882     return 0;
1883 }
1884 
1885 static int sev_snp_kvm_init(ConfidentialGuestSupport *cgs, Error **errp)
1886 {
1887     MachineState *ms = MACHINE(qdev_get_machine());
1888     X86MachineState *x86ms = X86_MACHINE(ms);
1889 
1890     if (x86ms->smm == ON_OFF_AUTO_AUTO) {
1891         x86ms->smm = ON_OFF_AUTO_OFF;
1892     } else if (x86ms->smm == ON_OFF_AUTO_ON) {
1893         error_setg(errp, "SEV-SNP does not support SMM.");
1894         return -1;
1895     }
1896 
1897     return 0;
1898 }
1899 
1900 int
1901 sev_encrypt_flash(hwaddr gpa, uint8_t *ptr, uint64_t len, Error **errp)
1902 {
1903     SevCommonState *sev_common = SEV_COMMON(MACHINE(qdev_get_machine())->cgs);
1904     SevCommonStateClass *klass;
1905 
1906     if (!sev_common) {
1907         return 0;
1908     }
1909     klass = SEV_COMMON_GET_CLASS(sev_common);
1910 
1911     /* if SEV is in update state then encrypt the data else do nothing */
1912     if (sev_check_state(sev_common, SEV_STATE_LAUNCH_UPDATE)) {
1913         int ret;
1914 
1915         ret = klass->launch_update_data(sev_common, gpa, ptr, len, errp);
1916         if (ret < 0) {
1917             return ret;
1918         }
1919     }
1920 
1921     return 0;
1922 }
1923 
1924 int sev_inject_launch_secret(const char *packet_hdr, const char *secret,
1925                              uint64_t gpa, Error **errp)
1926 {
1927     ERRP_GUARD();
1928     struct kvm_sev_launch_secret input;
1929     g_autofree guchar *data = NULL, *hdr = NULL;
1930     int error, ret = 1;
1931     void *hva;
1932     gsize hdr_sz = 0, data_sz = 0;
1933     MemoryRegion *mr = NULL;
1934     SevCommonState *sev_common = SEV_COMMON(MACHINE(qdev_get_machine())->cgs);
1935 
1936     if (!sev_common) {
1937         error_setg(errp, "SEV not enabled for guest");
1938         return 1;
1939     }
1940 
1941     /* secret can be injected only in this state */
1942     if (!sev_check_state(sev_common, SEV_STATE_LAUNCH_SECRET)) {
1943         error_setg(errp, "SEV: Not in correct state. (LSECRET) %x",
1944                    sev_common->state);
1945         return 1;
1946     }
1947 
1948     hdr = g_base64_decode(packet_hdr, &hdr_sz);
1949     if (!hdr || !hdr_sz) {
1950         error_setg(errp, "SEV: Failed to decode sequence header");
1951         return 1;
1952     }
1953 
1954     data = g_base64_decode(secret, &data_sz);
1955     if (!data || !data_sz) {
1956         error_setg(errp, "SEV: Failed to decode data");
1957         return 1;
1958     }
1959 
1960     hva = gpa2hva(&mr, gpa, data_sz, errp);
1961     if (!hva) {
1962         error_prepend(errp, "SEV: Failed to calculate guest address: ");
1963         return 1;
1964     }
1965 
1966     input.hdr_uaddr = (uint64_t)(unsigned long)hdr;
1967     input.hdr_len = hdr_sz;
1968 
1969     input.trans_uaddr = (uint64_t)(unsigned long)data;
1970     input.trans_len = data_sz;
1971 
1972     input.guest_uaddr = (uint64_t)(unsigned long)hva;
1973     input.guest_len = data_sz;
1974 
1975     trace_kvm_sev_launch_secret(gpa, input.guest_uaddr,
1976                                 input.trans_uaddr, input.trans_len);
1977 
1978     ret = sev_ioctl(sev_common->sev_fd, KVM_SEV_LAUNCH_SECRET,
1979                     &input, &error);
1980     if (ret) {
1981         error_setg(errp, "SEV: failed to inject secret ret=%d fw_error=%d '%s'",
1982                      ret, error, fw_error_to_str(error));
1983         return ret;
1984     }
1985 
1986     return 0;
1987 }
1988 
1989 #define SEV_SECRET_GUID "4c2eb361-7d9b-4cc3-8081-127c90d3d294"
1990 struct sev_secret_area {
1991     uint32_t base;
1992     uint32_t size;
1993 };
1994 
1995 void qmp_sev_inject_launch_secret(const char *packet_hdr,
1996                                   const char *secret,
1997                                   bool has_gpa, uint64_t gpa,
1998                                   Error **errp)
1999 {
2000     if (!sev_enabled()) {
2001         error_setg(errp, "SEV not enabled for guest");
2002         return;
2003     }
2004     if (!has_gpa) {
2005         uint8_t *data;
2006         struct sev_secret_area *area;
2007 
2008         if (!pc_system_ovmf_table_find(SEV_SECRET_GUID, &data, NULL)) {
2009             error_setg(errp, "SEV: no secret area found in OVMF,"
2010                        " gpa must be specified.");
2011             return;
2012         }
2013         area = (struct sev_secret_area *)data;
2014         gpa = area->base;
2015     }
2016 
2017     sev_inject_launch_secret(packet_hdr, secret, gpa, errp);
2018 }
2019 
2020 static int
2021 sev_es_parse_reset_block(SevInfoBlock *info, uint32_t *addr)
2022 {
2023     if (!info->reset_addr) {
2024         error_report("SEV-ES reset address is zero");
2025         return 1;
2026     }
2027 
2028     *addr = info->reset_addr;
2029 
2030     return 0;
2031 }
2032 
2033 static int
2034 sev_es_find_reset_vector(void *flash_ptr, uint64_t flash_size,
2035                          uint32_t *addr)
2036 {
2037     QemuUUID info_guid, *guid;
2038     SevInfoBlock *info;
2039     uint8_t *data;
2040     uint16_t *len;
2041 
2042     /*
2043      * Initialize the address to zero. An address of zero with a successful
2044      * return code indicates that SEV-ES is not active.
2045      */
2046     *addr = 0;
2047 
2048     /*
2049      * Extract the AP reset vector for SEV-ES guests by locating the SEV GUID.
2050      * The SEV GUID is located on its own (original implementation) or within
2051      * the Firmware GUID Table (new implementation), either of which are
2052      * located 32 bytes from the end of the flash.
2053      *
2054      * Check the Firmware GUID Table first.
2055      */
2056     if (pc_system_ovmf_table_find(SEV_INFO_BLOCK_GUID, &data, NULL)) {
2057         return sev_es_parse_reset_block((SevInfoBlock *)data, addr);
2058     }
2059 
2060     /*
2061      * SEV info block not found in the Firmware GUID Table (or there isn't
2062      * a Firmware GUID Table), fall back to the original implementation.
2063      */
2064     data = flash_ptr + flash_size - 0x20;
2065 
2066     qemu_uuid_parse(SEV_INFO_BLOCK_GUID, &info_guid);
2067     info_guid = qemu_uuid_bswap(info_guid); /* GUIDs are LE */
2068 
2069     guid = (QemuUUID *)(data - sizeof(info_guid));
2070     if (!qemu_uuid_is_equal(guid, &info_guid)) {
2071         error_report("SEV information block/Firmware GUID Table block not found in pflash rom");
2072         return 1;
2073     }
2074 
2075     len = (uint16_t *)((uint8_t *)guid - sizeof(*len));
2076     info = (SevInfoBlock *)(data - le16_to_cpu(*len));
2077 
2078     return sev_es_parse_reset_block(info, addr);
2079 }
2080 
2081 
2082 static void seg_to_vmsa(const SegmentCache *cpu_seg, struct vmcb_seg *vmsa_seg)
2083 {
2084     vmsa_seg->selector = cpu_seg->selector;
2085     vmsa_seg->base = cpu_seg->base;
2086     vmsa_seg->limit = cpu_seg->limit;
2087     vmsa_seg->attrib = FLAGS_SEGCACHE_TO_VMSA(cpu_seg->flags);
2088 }
2089 
2090 static void initialize_vmsa(const CPUState *cpu, struct sev_es_save_area *vmsa)
2091 {
2092     const X86CPU *x86 = X86_CPU(cpu);
2093     const CPUX86State *env = &x86->env;
2094 
2095     /*
2096      * Initialize the SEV-ES save area from the current state of
2097      * the CPU. The entire state does not need to be copied, only the state
2098      * that is copied back to the CPUState in sev_apply_cpu_context.
2099      */
2100     memset(vmsa, 0, sizeof(struct sev_es_save_area));
2101     vmsa->efer = env->efer;
2102     vmsa->cr0 = env->cr[0];
2103     vmsa->cr3 = env->cr[3];
2104     vmsa->cr4 = env->cr[4];
2105     vmsa->xcr0 = env->xcr0;
2106     vmsa->g_pat = env->pat;
2107 
2108     seg_to_vmsa(&env->segs[R_CS], &vmsa->cs);
2109     seg_to_vmsa(&env->segs[R_DS], &vmsa->ds);
2110     seg_to_vmsa(&env->segs[R_ES], &vmsa->es);
2111     seg_to_vmsa(&env->segs[R_FS], &vmsa->fs);
2112     seg_to_vmsa(&env->segs[R_GS], &vmsa->gs);
2113     seg_to_vmsa(&env->segs[R_SS], &vmsa->ss);
2114 
2115     seg_to_vmsa(&env->gdt, &vmsa->gdtr);
2116     seg_to_vmsa(&env->idt, &vmsa->idtr);
2117     seg_to_vmsa(&env->ldt, &vmsa->ldtr);
2118     seg_to_vmsa(&env->tr, &vmsa->tr);
2119 
2120     vmsa->dr6 = env->dr[6];
2121     vmsa->dr7 = env->dr[7];
2122 
2123     vmsa->rax = env->regs[R_EAX];
2124     vmsa->rcx = env->regs[R_ECX];
2125     vmsa->rdx = env->regs[R_EDX];
2126     vmsa->rbx = env->regs[R_EBX];
2127     vmsa->rsp = env->regs[R_ESP];
2128     vmsa->rbp = env->regs[R_EBP];
2129     vmsa->rsi = env->regs[R_ESI];
2130     vmsa->rdi = env->regs[R_EDI];
2131 
2132 #ifdef TARGET_X86_64
2133     vmsa->r8 = env->regs[R_R8];
2134     vmsa->r9 = env->regs[R_R9];
2135     vmsa->r10 = env->regs[R_R10];
2136     vmsa->r11 = env->regs[R_R11];
2137     vmsa->r12 = env->regs[R_R12];
2138     vmsa->r13 = env->regs[R_R13];
2139     vmsa->r14 = env->regs[R_R14];
2140     vmsa->r15 = env->regs[R_R15];
2141 #endif
2142 
2143     vmsa->rip = env->eip;
2144     vmsa->rflags = env->eflags;
2145 }
2146 
2147 static void sev_es_set_ap_context(uint32_t reset_addr)
2148 {
2149     CPUState *cpu;
2150     struct sev_es_save_area vmsa;
2151     SegmentCache cs;
2152 
2153     cs.selector = 0xf000;
2154     cs.base = reset_addr & 0xffff0000;
2155     cs.limit = 0xffff;
2156     cs.flags = DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK | DESC_R_MASK |
2157                DESC_A_MASK;
2158 
2159     CPU_FOREACH(cpu) {
2160         if (cpu->cpu_index == 0) {
2161             /* Do not update the BSP reset state */
2162             continue;
2163         }
2164         initialize_vmsa(cpu, &vmsa);
2165         seg_to_vmsa(&cs, &vmsa.cs);
2166         vmsa.rip = reset_addr & 0x0000ffff;
2167         sev_set_cpu_context(cpu->cpu_index, &vmsa,
2168                             sizeof(struct sev_es_save_area),
2169                             0, &error_fatal);
2170     }
2171 }
2172 
2173 void sev_es_set_reset_vector(CPUState *cpu)
2174 {
2175     if (sev_enabled()) {
2176         sev_apply_cpu_context(cpu);
2177     }
2178 }
2179 
2180 int sev_es_save_reset_vector(void *flash_ptr, uint64_t flash_size)
2181 {
2182     uint32_t addr;
2183     int ret;
2184 
2185     if (!sev_es_enabled()) {
2186         return 0;
2187     }
2188 
2189     addr = 0;
2190     ret = sev_es_find_reset_vector(flash_ptr, flash_size,
2191                                    &addr);
2192     if (ret) {
2193         return ret;
2194     }
2195 
2196     /*
2197      * The reset vector is saved into a CPU context for each AP but not for
2198      * the BSP. This is applied during guest startup or when the CPU is reset.
2199      */
2200     if (addr) {
2201         sev_es_set_ap_context(addr);
2202     }
2203 
2204     return 0;
2205 }
2206 
2207 static const QemuUUID sev_hash_table_header_guid = {
2208     .data = UUID_LE(0x9438d606, 0x4f22, 0x4cc9, 0xb4, 0x79, 0xa7, 0x93,
2209                     0xd4, 0x11, 0xfd, 0x21)
2210 };
2211 
2212 static const QemuUUID sev_kernel_entry_guid = {
2213     .data = UUID_LE(0x4de79437, 0xabd2, 0x427f, 0xb8, 0x35, 0xd5, 0xb1,
2214                     0x72, 0xd2, 0x04, 0x5b)
2215 };
2216 static const QemuUUID sev_initrd_entry_guid = {
2217     .data = UUID_LE(0x44baf731, 0x3a2f, 0x4bd7, 0x9a, 0xf1, 0x41, 0xe2,
2218                     0x91, 0x69, 0x78, 0x1d)
2219 };
2220 static const QemuUUID sev_cmdline_entry_guid = {
2221     .data = UUID_LE(0x97d02dd8, 0xbd20, 0x4c94, 0xaa, 0x78, 0xe7, 0x71,
2222                     0x4d, 0x36, 0xab, 0x2a)
2223 };
2224 
2225 static bool build_kernel_loader_hashes(PaddedSevHashTable *padded_ht,
2226                                        SevKernelLoaderContext *ctx,
2227                                        Error **errp)
2228 {
2229     SevHashTable *ht;
2230     uint8_t cmdline_hash[HASH_SIZE];
2231     uint8_t initrd_hash[HASH_SIZE];
2232     uint8_t kernel_hash[HASH_SIZE];
2233     uint8_t *hashp;
2234     size_t hash_len = HASH_SIZE;
2235 
2236     /*
2237      * Calculate hash of kernel command-line with the terminating null byte. If
2238      * the user doesn't supply a command-line via -append, the 1-byte "\0" will
2239      * be used.
2240      */
2241     hashp = cmdline_hash;
2242     if (qcrypto_hash_bytes(QCRYPTO_HASH_ALGO_SHA256, ctx->cmdline_data,
2243                            ctx->cmdline_size, &hashp, &hash_len, errp) < 0) {
2244         return false;
2245     }
2246     assert(hash_len == HASH_SIZE);
2247 
2248     /*
2249      * Calculate hash of initrd. If the user doesn't supply an initrd via
2250      * -initrd, an empty buffer will be used (ctx->initrd_size == 0).
2251      */
2252     hashp = initrd_hash;
2253     if (qcrypto_hash_bytes(QCRYPTO_HASH_ALGO_SHA256, ctx->initrd_data,
2254                            ctx->initrd_size, &hashp, &hash_len, errp) < 0) {
2255         return false;
2256     }
2257     assert(hash_len == HASH_SIZE);
2258 
2259     /* Calculate hash of the kernel */
2260     hashp = kernel_hash;
2261     struct iovec iov[2] = {
2262         { .iov_base = ctx->setup_data, .iov_len = ctx->setup_size },
2263         { .iov_base = ctx->kernel_data, .iov_len = ctx->kernel_size }
2264     };
2265     if (qcrypto_hash_bytesv(QCRYPTO_HASH_ALGO_SHA256, iov, ARRAY_SIZE(iov),
2266                             &hashp, &hash_len, errp) < 0) {
2267         return false;
2268     }
2269     assert(hash_len == HASH_SIZE);
2270 
2271     ht = &padded_ht->ht;
2272 
2273     ht->guid = sev_hash_table_header_guid;
2274     ht->len = sizeof(*ht);
2275 
2276     ht->cmdline.guid = sev_cmdline_entry_guid;
2277     ht->cmdline.len = sizeof(ht->cmdline);
2278     memcpy(ht->cmdline.hash, cmdline_hash, sizeof(ht->cmdline.hash));
2279 
2280     ht->initrd.guid = sev_initrd_entry_guid;
2281     ht->initrd.len = sizeof(ht->initrd);
2282     memcpy(ht->initrd.hash, initrd_hash, sizeof(ht->initrd.hash));
2283 
2284     ht->kernel.guid = sev_kernel_entry_guid;
2285     ht->kernel.len = sizeof(ht->kernel);
2286     memcpy(ht->kernel.hash, kernel_hash, sizeof(ht->kernel.hash));
2287 
2288     /* zero the excess data so the measurement can be reliably calculated */
2289     memset(padded_ht->padding, 0, sizeof(padded_ht->padding));
2290 
2291     return true;
2292 }
2293 
2294 static bool sev_snp_build_kernel_loader_hashes(SevCommonState *sev_common,
2295                                                SevHashTableDescriptor *area,
2296                                                SevKernelLoaderContext *ctx,
2297                                                Error **errp)
2298 {
2299     /*
2300      * SNP: Populate the hashes table in an area that later in
2301      * snp_launch_update_kernel_hashes() will be copied to the guest memory
2302      * and encrypted.
2303      */
2304     SevSnpGuestState *sev_snp_guest = SEV_SNP_GUEST(sev_common);
2305     sev_snp_guest->kernel_hashes_offset = area->base & ~TARGET_PAGE_MASK;
2306     sev_snp_guest->kernel_hashes_data = g_new0(PaddedSevHashTable, 1);
2307     return build_kernel_loader_hashes(sev_snp_guest->kernel_hashes_data, ctx, errp);
2308 }
2309 
2310 static bool sev_build_kernel_loader_hashes(SevCommonState *sev_common,
2311                                            SevHashTableDescriptor *area,
2312                                            SevKernelLoaderContext *ctx,
2313                                            Error **errp)
2314 {
2315     PaddedSevHashTable *padded_ht;
2316     hwaddr mapped_len = sizeof(*padded_ht);
2317     MemTxAttrs attrs = { 0 };
2318     bool ret = true;
2319 
2320     /*
2321      * Populate the hashes table in the guest's memory at the OVMF-designated
2322      * area for the SEV hashes table
2323      */
2324     padded_ht = address_space_map(&address_space_memory, area->base,
2325                                   &mapped_len, true, attrs);
2326     if (!padded_ht || mapped_len != sizeof(*padded_ht)) {
2327         error_setg(errp, "SEV: cannot map hashes table guest memory area");
2328         return false;
2329     }
2330 
2331     if (build_kernel_loader_hashes(padded_ht, ctx, errp)) {
2332         if (sev_encrypt_flash(area->base, (uint8_t *)padded_ht,
2333                               sizeof(*padded_ht), errp) < 0) {
2334             ret = false;
2335         }
2336     } else {
2337         ret = false;
2338     }
2339 
2340     address_space_unmap(&address_space_memory, padded_ht,
2341                         mapped_len, true, mapped_len);
2342 
2343     return ret;
2344 }
2345 
2346 /*
2347  * Add the hashes of the linux kernel/initrd/cmdline to an encrypted guest page
2348  * which is included in SEV's initial memory measurement.
2349  */
2350 bool sev_add_kernel_loader_hashes(SevKernelLoaderContext *ctx, Error **errp)
2351 {
2352     uint8_t *data;
2353     SevHashTableDescriptor *area;
2354     SevCommonState *sev_common = SEV_COMMON(MACHINE(qdev_get_machine())->cgs);
2355     SevCommonStateClass *klass = SEV_COMMON_GET_CLASS(sev_common);
2356 
2357     /*
2358      * Only add the kernel hashes if the sev-guest configuration explicitly
2359      * stated kernel-hashes=on.
2360      */
2361     if (!sev_common->kernel_hashes) {
2362         return false;
2363     }
2364 
2365     if (!pc_system_ovmf_table_find(SEV_HASH_TABLE_RV_GUID, &data, NULL)) {
2366         error_setg(errp, "SEV: kernel specified but guest firmware "
2367                          "has no hashes table GUID");
2368         return false;
2369     }
2370 
2371     area = (SevHashTableDescriptor *)data;
2372     if (!area->base || area->size < sizeof(PaddedSevHashTable)) {
2373         error_setg(errp, "SEV: guest firmware hashes table area is invalid "
2374                          "(base=0x%x size=0x%x)", area->base, area->size);
2375         return false;
2376     }
2377 
2378     return klass->build_kernel_loader_hashes(sev_common, area, ctx, errp);
2379 }
2380 
2381 static char *
2382 sev_common_get_sev_device(Object *obj, Error **errp)
2383 {
2384     return g_strdup(SEV_COMMON(obj)->sev_device);
2385 }
2386 
2387 static void
2388 sev_common_set_sev_device(Object *obj, const char *value, Error **errp)
2389 {
2390     SEV_COMMON(obj)->sev_device = g_strdup(value);
2391 }
2392 
2393 static bool sev_common_get_kernel_hashes(Object *obj, Error **errp)
2394 {
2395     return SEV_COMMON(obj)->kernel_hashes;
2396 }
2397 
2398 static void sev_common_set_kernel_hashes(Object *obj, bool value, Error **errp)
2399 {
2400     SEV_COMMON(obj)->kernel_hashes = value;
2401 }
2402 
2403 static bool cgs_check_support(ConfidentialGuestPlatformType platform,
2404                              uint16_t platform_version, uint8_t highest_vtl,
2405                              uint64_t shared_gpa_boundary)
2406 {
2407     return (((platform == CGS_PLATFORM_SEV_SNP) && sev_snp_enabled()) ||
2408             ((platform == CGS_PLATFORM_SEV_ES) && sev_es_enabled()) ||
2409             ((platform == CGS_PLATFORM_SEV) && sev_enabled()));
2410 }
2411 
2412 static int cgs_set_guest_state(hwaddr gpa, uint8_t *ptr, uint64_t len,
2413                                ConfidentialGuestPageType memory_type,
2414                                uint16_t cpu_index, Error **errp)
2415 {
2416     SevCommonState *sev_common = SEV_COMMON(MACHINE(qdev_get_machine())->cgs);
2417     SevCommonStateClass *klass = SEV_COMMON_GET_CLASS(sev_common);
2418 
2419     if (!sev_enabled()) {
2420         error_setg(errp, "%s: attempt to configure guest memory, but SEV "
2421                      "is not enabled", __func__);
2422         return -1;
2423     }
2424 
2425     switch (memory_type) {
2426     case CGS_PAGE_TYPE_NORMAL:
2427     case CGS_PAGE_TYPE_ZERO:
2428         return klass->launch_update_data(sev_common, gpa, ptr, len, errp);
2429 
2430     case CGS_PAGE_TYPE_VMSA:
2431         if (!sev_es_enabled()) {
2432             error_setg(errp,
2433                        "%s: attempt to configure initial VMSA, but SEV-ES "
2434                        "is not supported",
2435                        __func__);
2436             return -1;
2437         }
2438         if (check_vmsa_supported(gpa, (const struct sev_es_save_area *)ptr,
2439                                  errp) < 0) {
2440             return -1;
2441         }
2442         return sev_set_cpu_context(cpu_index, ptr, len, gpa, errp);
2443 
2444     case CGS_PAGE_TYPE_UNMEASURED:
2445         if (sev_snp_enabled()) {
2446             return snp_launch_update_data(
2447                 gpa, ptr, len, KVM_SEV_SNP_PAGE_TYPE_UNMEASURED, errp);
2448         }
2449         /* No action required if not SEV-SNP */
2450         return 0;
2451 
2452     case CGS_PAGE_TYPE_SECRETS:
2453         if (!sev_snp_enabled()) {
2454             error_setg(errp,
2455                        "%s: attempt to configure secrets page, but SEV-SNP "
2456                        "is not supported",
2457                        __func__);
2458             return -1;
2459         }
2460         return snp_launch_update_data(gpa, ptr, len,
2461                                       KVM_SEV_SNP_PAGE_TYPE_SECRETS, errp);
2462 
2463     case CGS_PAGE_TYPE_REQUIRED_MEMORY:
2464         if (kvm_convert_memory(gpa, len, true) < 0) {
2465             error_setg(
2466                 errp,
2467                 "%s: failed to configure required memory. gpa: %lX, type: %d",
2468                 __func__, gpa, memory_type);
2469             return -1;
2470         }
2471         return 0;
2472 
2473     case CGS_PAGE_TYPE_CPUID:
2474         if (!sev_snp_enabled()) {
2475             error_setg(errp,
2476                        "%s: attempt to configure CPUID page, but SEV-SNP "
2477                        "is not supported",
2478                        __func__);
2479             return -1;
2480         }
2481         return snp_launch_update_cpuid(gpa, ptr, len, errp);
2482     }
2483     error_setg(errp, "%s: failed to update guest. gpa: %lX, type: %d", __func__,
2484                gpa, memory_type);
2485     return -1;
2486 }
2487 
2488 static int cgs_get_mem_map_entry(int index,
2489                                  ConfidentialGuestMemoryMapEntry *entry,
2490                                  Error **errp)
2491 {
2492     struct e820_entry *table;
2493     int num_entries;
2494 
2495     num_entries = e820_get_table(&table);
2496     if ((index < 0) || (index >= num_entries)) {
2497         return 1;
2498     }
2499     entry->gpa = table[index].address;
2500     entry->size = table[index].length;
2501     switch (table[index].type) {
2502     case E820_RAM:
2503         entry->type = CGS_MEM_RAM;
2504         break;
2505     case E820_RESERVED:
2506         entry->type = CGS_MEM_RESERVED;
2507         break;
2508     case E820_ACPI:
2509         entry->type = CGS_MEM_ACPI;
2510         break;
2511     case E820_NVS:
2512         entry->type = CGS_MEM_NVS;
2513         break;
2514     case E820_UNUSABLE:
2515         entry->type = CGS_MEM_UNUSABLE;
2516         break;
2517     }
2518     return 0;
2519 }
2520 
2521 static void
2522 sev_common_class_init(ObjectClass *oc, const void *data)
2523 {
2524     ConfidentialGuestSupportClass *klass = CONFIDENTIAL_GUEST_SUPPORT_CLASS(oc);
2525 
2526     klass->kvm_init = sev_common_kvm_init;
2527 
2528     object_class_property_add_str(oc, "sev-device",
2529                                   sev_common_get_sev_device,
2530                                   sev_common_set_sev_device);
2531     object_class_property_set_description(oc, "sev-device",
2532             "SEV device to use");
2533     object_class_property_add_bool(oc, "kernel-hashes",
2534                                    sev_common_get_kernel_hashes,
2535                                    sev_common_set_kernel_hashes);
2536     object_class_property_set_description(oc, "kernel-hashes",
2537             "add kernel hashes to guest firmware for measured Linux boot");
2538 }
2539 
2540 static void
2541 sev_common_instance_init(Object *obj)
2542 {
2543     SevCommonState *sev_common = SEV_COMMON(obj);
2544     ConfidentialGuestSupportClass *cgs =
2545         CONFIDENTIAL_GUEST_SUPPORT_GET_CLASS(obj);
2546 
2547     sev_common->kvm_type = -1;
2548 
2549     sev_common->sev_device = g_strdup(DEFAULT_SEV_DEVICE);
2550 
2551     object_property_add_uint32_ptr(obj, "cbitpos", &sev_common->cbitpos,
2552                                    OBJ_PROP_FLAG_READWRITE);
2553     object_property_add_uint32_ptr(obj, "reduced-phys-bits",
2554                                    &sev_common->reduced_phys_bits,
2555                                    OBJ_PROP_FLAG_READWRITE);
2556     cgs->check_support = cgs_check_support;
2557     cgs->set_guest_state = cgs_set_guest_state;
2558     cgs->get_mem_map_entry = cgs_get_mem_map_entry;
2559 
2560     QTAILQ_INIT(&sev_common->launch_vmsa);
2561 }
2562 
2563 /* sev guest info common to sev/sev-es/sev-snp */
2564 static const TypeInfo sev_common_info = {
2565     .parent = TYPE_X86_CONFIDENTIAL_GUEST,
2566     .name = TYPE_SEV_COMMON,
2567     .instance_size = sizeof(SevCommonState),
2568     .instance_init = sev_common_instance_init,
2569     .class_size = sizeof(SevCommonStateClass),
2570     .class_init = sev_common_class_init,
2571     .abstract = true,
2572     .interfaces = (const InterfaceInfo[]) {
2573         { TYPE_USER_CREATABLE },
2574         { }
2575     }
2576 };
2577 
2578 static char *
2579 sev_guest_get_dh_cert_file(Object *obj, Error **errp)
2580 {
2581     return g_strdup(SEV_GUEST(obj)->dh_cert_file);
2582 }
2583 
2584 static void
2585 sev_guest_set_dh_cert_file(Object *obj, const char *value, Error **errp)
2586 {
2587     SEV_GUEST(obj)->dh_cert_file = g_strdup(value);
2588 }
2589 
2590 static char *
2591 sev_guest_get_session_file(Object *obj, Error **errp)
2592 {
2593     SevGuestState *sev_guest = SEV_GUEST(obj);
2594 
2595     return sev_guest->session_file ? g_strdup(sev_guest->session_file) : NULL;
2596 }
2597 
2598 static void
2599 sev_guest_set_session_file(Object *obj, const char *value, Error **errp)
2600 {
2601     SEV_GUEST(obj)->session_file = g_strdup(value);
2602 }
2603 
2604 static void sev_guest_get_legacy_vm_type(Object *obj, Visitor *v,
2605                                          const char *name, void *opaque,
2606                                          Error **errp)
2607 {
2608     SevGuestState *sev_guest = SEV_GUEST(obj);
2609     OnOffAuto legacy_vm_type = sev_guest->legacy_vm_type;
2610 
2611     visit_type_OnOffAuto(v, name, &legacy_vm_type, errp);
2612 }
2613 
2614 static void sev_guest_set_legacy_vm_type(Object *obj, Visitor *v,
2615                                          const char *name, void *opaque,
2616                                          Error **errp)
2617 {
2618     SevGuestState *sev_guest = SEV_GUEST(obj);
2619 
2620     visit_type_OnOffAuto(v, name, &sev_guest->legacy_vm_type, errp);
2621 }
2622 
2623 static void
2624 sev_guest_class_init(ObjectClass *oc, const void *data)
2625 {
2626     SevCommonStateClass *klass = SEV_COMMON_CLASS(oc);
2627     X86ConfidentialGuestClass *x86_klass = X86_CONFIDENTIAL_GUEST_CLASS(oc);
2628 
2629     klass->build_kernel_loader_hashes = sev_build_kernel_loader_hashes;
2630     klass->launch_start = sev_launch_start;
2631     klass->launch_finish = sev_launch_finish;
2632     klass->launch_update_data = sev_launch_update_data;
2633     klass->kvm_init = sev_kvm_init;
2634     x86_klass->kvm_type = sev_kvm_type;
2635 
2636     object_class_property_add_str(oc, "dh-cert-file",
2637                                   sev_guest_get_dh_cert_file,
2638                                   sev_guest_set_dh_cert_file);
2639     object_class_property_set_description(oc, "dh-cert-file",
2640             "guest owners DH certificate (encoded with base64)");
2641     object_class_property_add_str(oc, "session-file",
2642                                   sev_guest_get_session_file,
2643                                   sev_guest_set_session_file);
2644     object_class_property_set_description(oc, "session-file",
2645             "guest owners session parameters (encoded with base64)");
2646     object_class_property_add(oc, "legacy-vm-type", "OnOffAuto",
2647                               sev_guest_get_legacy_vm_type,
2648                               sev_guest_set_legacy_vm_type, NULL, NULL);
2649     object_class_property_set_description(oc, "legacy-vm-type",
2650             "use legacy VM type to maintain measurement compatibility with older QEMU or kernel versions.");
2651 }
2652 
2653 static void
2654 sev_guest_instance_init(Object *obj)
2655 {
2656     SevGuestState *sev_guest = SEV_GUEST(obj);
2657 
2658     sev_guest->policy = DEFAULT_GUEST_POLICY;
2659     object_property_add_uint32_ptr(obj, "handle", &sev_guest->handle,
2660                                    OBJ_PROP_FLAG_READWRITE);
2661     object_property_add_uint32_ptr(obj, "policy", &sev_guest->policy,
2662                                    OBJ_PROP_FLAG_READWRITE);
2663     object_apply_compat_props(obj);
2664 
2665     sev_guest->legacy_vm_type = ON_OFF_AUTO_AUTO;
2666 }
2667 
2668 /* guest info specific sev/sev-es */
2669 static const TypeInfo sev_guest_info = {
2670     .parent = TYPE_SEV_COMMON,
2671     .name = TYPE_SEV_GUEST,
2672     .instance_size = sizeof(SevGuestState),
2673     .instance_init = sev_guest_instance_init,
2674     .class_init = sev_guest_class_init,
2675 };
2676 
2677 static void
2678 sev_snp_guest_get_policy(Object *obj, Visitor *v, const char *name,
2679                          void *opaque, Error **errp)
2680 {
2681     visit_type_uint64(v, name,
2682                       (uint64_t *)&SEV_SNP_GUEST(obj)->kvm_start_conf.policy,
2683                       errp);
2684 }
2685 
2686 static void
2687 sev_snp_guest_set_policy(Object *obj, Visitor *v, const char *name,
2688                          void *opaque, Error **errp)
2689 {
2690     visit_type_uint64(v, name,
2691                       (uint64_t *)&SEV_SNP_GUEST(obj)->kvm_start_conf.policy,
2692                       errp);
2693 }
2694 
2695 static char *
2696 sev_snp_guest_get_guest_visible_workarounds(Object *obj, Error **errp)
2697 {
2698     return g_strdup(SEV_SNP_GUEST(obj)->guest_visible_workarounds);
2699 }
2700 
2701 static void
2702 sev_snp_guest_set_guest_visible_workarounds(Object *obj, const char *value,
2703                                             Error **errp)
2704 {
2705     SevSnpGuestState *sev_snp_guest = SEV_SNP_GUEST(obj);
2706     struct kvm_sev_snp_launch_start *start = &sev_snp_guest->kvm_start_conf;
2707     g_autofree guchar *blob;
2708     gsize len;
2709 
2710     g_free(sev_snp_guest->guest_visible_workarounds);
2711 
2712     /* store the base64 str so we don't need to re-encode in getter */
2713     sev_snp_guest->guest_visible_workarounds = g_strdup(value);
2714 
2715     blob = qbase64_decode(sev_snp_guest->guest_visible_workarounds,
2716                           -1, &len, errp);
2717     if (!blob) {
2718         return;
2719     }
2720 
2721     if (len != sizeof(start->gosvw)) {
2722         error_setg(errp, "parameter length of %" G_GSIZE_FORMAT
2723                    " exceeds max of %zu",
2724                    len, sizeof(start->gosvw));
2725         return;
2726     }
2727 
2728     memcpy(start->gosvw, blob, len);
2729 }
2730 
2731 static char *
2732 sev_snp_guest_get_id_block(Object *obj, Error **errp)
2733 {
2734     SevSnpGuestState *sev_snp_guest = SEV_SNP_GUEST(obj);
2735 
2736     return g_strdup(sev_snp_guest->id_block_base64);
2737 }
2738 
2739 static void
2740 sev_snp_guest_set_id_block(Object *obj, const char *value, Error **errp)
2741 {
2742     SevSnpGuestState *sev_snp_guest = SEV_SNP_GUEST(obj);
2743     struct kvm_sev_snp_launch_finish *finish = &sev_snp_guest->kvm_finish_conf;
2744     gsize len;
2745 
2746     finish->id_block_en = 0;
2747     g_free(sev_snp_guest->id_block);
2748     g_free(sev_snp_guest->id_block_base64);
2749 
2750     /* store the base64 str so we don't need to re-encode in getter */
2751     sev_snp_guest->id_block_base64 = g_strdup(value);
2752     sev_snp_guest->id_block =
2753         qbase64_decode(sev_snp_guest->id_block_base64, -1, &len, errp);
2754 
2755     if (!sev_snp_guest->id_block) {
2756         return;
2757     }
2758 
2759     if (len != KVM_SEV_SNP_ID_BLOCK_SIZE) {
2760         error_setg(errp, "parameter length of %" G_GSIZE_FORMAT
2761                    " not equal to %u",
2762                    len, KVM_SEV_SNP_ID_BLOCK_SIZE);
2763         return;
2764     }
2765 
2766     finish->id_block_en = 1;
2767     finish->id_block_uaddr = (uintptr_t)sev_snp_guest->id_block;
2768 }
2769 
2770 static char *
2771 sev_snp_guest_get_id_auth(Object *obj, Error **errp)
2772 {
2773     SevSnpGuestState *sev_snp_guest = SEV_SNP_GUEST(obj);
2774 
2775     return g_strdup(sev_snp_guest->id_auth_base64);
2776 }
2777 
2778 static void
2779 sev_snp_guest_set_id_auth(Object *obj, const char *value, Error **errp)
2780 {
2781     SevSnpGuestState *sev_snp_guest = SEV_SNP_GUEST(obj);
2782     struct kvm_sev_snp_launch_finish *finish = &sev_snp_guest->kvm_finish_conf;
2783     gsize len;
2784 
2785     finish->id_auth_uaddr = 0;
2786     g_free(sev_snp_guest->id_auth);
2787     g_free(sev_snp_guest->id_auth_base64);
2788 
2789     /* store the base64 str so we don't need to re-encode in getter */
2790     sev_snp_guest->id_auth_base64 = g_strdup(value);
2791     sev_snp_guest->id_auth =
2792         qbase64_decode(sev_snp_guest->id_auth_base64, -1, &len, errp);
2793 
2794     if (!sev_snp_guest->id_auth) {
2795         return;
2796     }
2797 
2798     if (len > KVM_SEV_SNP_ID_AUTH_SIZE) {
2799         error_setg(errp, "parameter length:ID_AUTH %" G_GSIZE_FORMAT
2800                    " exceeds max of %u",
2801                    len, KVM_SEV_SNP_ID_AUTH_SIZE);
2802         return;
2803     }
2804 
2805     finish->id_auth_uaddr = (uintptr_t)sev_snp_guest->id_auth;
2806 }
2807 
2808 static bool
2809 sev_snp_guest_get_author_key_enabled(Object *obj, Error **errp)
2810 {
2811     SevSnpGuestState *sev_snp_guest = SEV_SNP_GUEST(obj);
2812 
2813     return !!sev_snp_guest->kvm_finish_conf.auth_key_en;
2814 }
2815 
2816 static void
2817 sev_snp_guest_set_author_key_enabled(Object *obj, bool value, Error **errp)
2818 {
2819     SevSnpGuestState *sev_snp_guest = SEV_SNP_GUEST(obj);
2820 
2821     sev_snp_guest->kvm_finish_conf.auth_key_en = value;
2822 }
2823 
2824 static bool
2825 sev_snp_guest_get_vcek_disabled(Object *obj, Error **errp)
2826 {
2827     SevSnpGuestState *sev_snp_guest = SEV_SNP_GUEST(obj);
2828 
2829     return !!sev_snp_guest->kvm_finish_conf.vcek_disabled;
2830 }
2831 
2832 static void
2833 sev_snp_guest_set_vcek_disabled(Object *obj, bool value, Error **errp)
2834 {
2835     SevSnpGuestState *sev_snp_guest = SEV_SNP_GUEST(obj);
2836 
2837     sev_snp_guest->kvm_finish_conf.vcek_disabled = value;
2838 }
2839 
2840 static char *
2841 sev_snp_guest_get_host_data(Object *obj, Error **errp)
2842 {
2843     SevSnpGuestState *sev_snp_guest = SEV_SNP_GUEST(obj);
2844 
2845     return g_strdup(sev_snp_guest->host_data);
2846 }
2847 
2848 static void
2849 sev_snp_guest_set_host_data(Object *obj, const char *value, Error **errp)
2850 {
2851     SevSnpGuestState *sev_snp_guest = SEV_SNP_GUEST(obj);
2852     struct kvm_sev_snp_launch_finish *finish = &sev_snp_guest->kvm_finish_conf;
2853     g_autofree guchar *blob;
2854     gsize len;
2855 
2856     g_free(sev_snp_guest->host_data);
2857 
2858     /* store the base64 str so we don't need to re-encode in getter */
2859     sev_snp_guest->host_data = g_strdup(value);
2860 
2861     blob = qbase64_decode(sev_snp_guest->host_data, -1, &len, errp);
2862 
2863     if (!blob) {
2864         return;
2865     }
2866 
2867     if (len != sizeof(finish->host_data)) {
2868         error_setg(errp, "parameter length of %" G_GSIZE_FORMAT
2869                    " not equal to %zu",
2870                    len, sizeof(finish->host_data));
2871         return;
2872     }
2873 
2874     memcpy(finish->host_data, blob, len);
2875 }
2876 
2877 static void
2878 sev_snp_guest_class_init(ObjectClass *oc, const void *data)
2879 {
2880     SevCommonStateClass *klass = SEV_COMMON_CLASS(oc);
2881     X86ConfidentialGuestClass *x86_klass = X86_CONFIDENTIAL_GUEST_CLASS(oc);
2882 
2883     klass->build_kernel_loader_hashes = sev_snp_build_kernel_loader_hashes;
2884     klass->launch_start = sev_snp_launch_start;
2885     klass->launch_finish = sev_snp_launch_finish;
2886     klass->launch_update_data = sev_snp_launch_update_data;
2887     klass->kvm_init = sev_snp_kvm_init;
2888     x86_klass->adjust_cpuid_features = sev_snp_adjust_cpuid_features;
2889     x86_klass->kvm_type = sev_snp_kvm_type;
2890 
2891     object_class_property_add(oc, "policy", "uint64",
2892                               sev_snp_guest_get_policy,
2893                               sev_snp_guest_set_policy, NULL, NULL);
2894     object_class_property_add_str(oc, "guest-visible-workarounds",
2895                                   sev_snp_guest_get_guest_visible_workarounds,
2896                                   sev_snp_guest_set_guest_visible_workarounds);
2897     object_class_property_add_str(oc, "id-block",
2898                                   sev_snp_guest_get_id_block,
2899                                   sev_snp_guest_set_id_block);
2900     object_class_property_add_str(oc, "id-auth",
2901                                   sev_snp_guest_get_id_auth,
2902                                   sev_snp_guest_set_id_auth);
2903     object_class_property_add_bool(oc, "author-key-enabled",
2904                                    sev_snp_guest_get_author_key_enabled,
2905                                    sev_snp_guest_set_author_key_enabled);
2906     object_class_property_add_bool(oc, "vcek-disabled",
2907                                    sev_snp_guest_get_vcek_disabled,
2908                                    sev_snp_guest_set_vcek_disabled);
2909     object_class_property_add_str(oc, "host-data",
2910                                   sev_snp_guest_get_host_data,
2911                                   sev_snp_guest_set_host_data);
2912 }
2913 
2914 static void
2915 sev_snp_guest_instance_init(Object *obj)
2916 {
2917     ConfidentialGuestSupport *cgs = CONFIDENTIAL_GUEST_SUPPORT(obj);
2918     SevSnpGuestState *sev_snp_guest = SEV_SNP_GUEST(obj);
2919 
2920     cgs->require_guest_memfd = true;
2921 
2922     /* default init/start/finish params for kvm */
2923     sev_snp_guest->kvm_start_conf.policy = DEFAULT_SEV_SNP_POLICY;
2924 }
2925 
2926 /* guest info specific to sev-snp */
2927 static const TypeInfo sev_snp_guest_info = {
2928     .parent = TYPE_SEV_COMMON,
2929     .name = TYPE_SEV_SNP_GUEST,
2930     .instance_size = sizeof(SevSnpGuestState),
2931     .class_init = sev_snp_guest_class_init,
2932     .instance_init = sev_snp_guest_instance_init,
2933 };
2934 
2935 static void
2936 sev_register_types(void)
2937 {
2938     type_register_static(&sev_common_info);
2939     type_register_static(&sev_guest_info);
2940     type_register_static(&sev_snp_guest_info);
2941 }
2942 
2943 type_init(sev_register_types);
2944