xref: /openbmc/linux/arch/x86/kvm/svm/svm.h (revision 0eb76ba2)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Kernel-based Virtual Machine driver for Linux
4  *
5  * AMD SVM support
6  *
7  * Copyright (C) 2006 Qumranet, Inc.
8  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
9  *
10  * Authors:
11  *   Yaniv Kamay  <yaniv@qumranet.com>
12  *   Avi Kivity   <avi@qumranet.com>
13  */
14 
15 #ifndef __SVM_SVM_H
16 #define __SVM_SVM_H
17 
18 #include <linux/kvm_types.h>
19 #include <linux/kvm_host.h>
20 #include <linux/bits.h>
21 
22 #include <asm/svm.h>
23 
24 #define __sme_page_pa(x) __sme_set(page_to_pfn(x) << PAGE_SHIFT)
25 
26 static const struct svm_host_save_msrs {
27 	u32 index;		/* Index of the MSR */
28 	bool sev_es_restored;	/* True if MSR is restored on SEV-ES VMEXIT */
29 } host_save_user_msrs[] = {
30 #ifdef CONFIG_X86_64
31 	{ .index = MSR_STAR,			.sev_es_restored = true },
32 	{ .index = MSR_LSTAR,			.sev_es_restored = true },
33 	{ .index = MSR_CSTAR,			.sev_es_restored = true },
34 	{ .index = MSR_SYSCALL_MASK,		.sev_es_restored = true },
35 	{ .index = MSR_KERNEL_GS_BASE,		.sev_es_restored = true },
36 	{ .index = MSR_FS_BASE,			.sev_es_restored = true },
37 #endif
38 	{ .index = MSR_IA32_SYSENTER_CS,	.sev_es_restored = true },
39 	{ .index = MSR_IA32_SYSENTER_ESP,	.sev_es_restored = true },
40 	{ .index = MSR_IA32_SYSENTER_EIP,	.sev_es_restored = true },
41 	{ .index = MSR_TSC_AUX,			.sev_es_restored = false },
42 };
43 #define NR_HOST_SAVE_USER_MSRS ARRAY_SIZE(host_save_user_msrs)
44 
45 #define MAX_DIRECT_ACCESS_MSRS	18
46 #define MSRPM_OFFSETS	16
47 extern u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
48 extern bool npt_enabled;
49 
50 enum {
51 	VMCB_INTERCEPTS, /* Intercept vectors, TSC offset,
52 			    pause filter count */
53 	VMCB_PERM_MAP,   /* IOPM Base and MSRPM Base */
54 	VMCB_ASID,	 /* ASID */
55 	VMCB_INTR,	 /* int_ctl, int_vector */
56 	VMCB_NPT,        /* npt_en, nCR3, gPAT */
57 	VMCB_CR,	 /* CR0, CR3, CR4, EFER */
58 	VMCB_DR,         /* DR6, DR7 */
59 	VMCB_DT,         /* GDT, IDT */
60 	VMCB_SEG,        /* CS, DS, SS, ES, CPL */
61 	VMCB_CR2,        /* CR2 only */
62 	VMCB_LBR,        /* DBGCTL, BR_FROM, BR_TO, LAST_EX_FROM, LAST_EX_TO */
63 	VMCB_AVIC,       /* AVIC APIC_BAR, AVIC APIC_BACKING_PAGE,
64 			  * AVIC PHYSICAL_TABLE pointer,
65 			  * AVIC LOGICAL_TABLE pointer
66 			  */
67 	VMCB_DIRTY_MAX,
68 };
69 
70 /* TPR and CR2 are always written before VMRUN */
71 #define VMCB_ALWAYS_DIRTY_MASK	((1U << VMCB_INTR) | (1U << VMCB_CR2))
72 
73 struct kvm_sev_info {
74 	bool active;		/* SEV enabled guest */
75 	bool es_active;		/* SEV-ES enabled guest */
76 	unsigned int asid;	/* ASID used for this guest */
77 	unsigned int handle;	/* SEV firmware handle */
78 	int fd;			/* SEV device fd */
79 	unsigned long pages_locked; /* Number of pages locked */
80 	struct list_head regions_list;  /* List of registered regions */
81 	u64 ap_jump_table;	/* SEV-ES AP Jump Table address */
82 };
83 
84 struct kvm_svm {
85 	struct kvm kvm;
86 
87 	/* Struct members for AVIC */
88 	u32 avic_vm_id;
89 	struct page *avic_logical_id_table_page;
90 	struct page *avic_physical_id_table_page;
91 	struct hlist_node hnode;
92 
93 	struct kvm_sev_info sev_info;
94 };
95 
96 struct kvm_vcpu;
97 
98 struct svm_nested_state {
99 	struct vmcb *hsave;
100 	u64 hsave_msr;
101 	u64 vm_cr_msr;
102 	u64 vmcb12_gpa;
103 
104 	/* These are the merged vectors */
105 	u32 *msrpm;
106 
107 	/* A VMRUN has started but has not yet been performed, so
108 	 * we cannot inject a nested vmexit yet.  */
109 	bool nested_run_pending;
110 
111 	/* cache for control fields of the guest */
112 	struct vmcb_control_area ctl;
113 
114 	bool initialized;
115 };
116 
117 struct vcpu_svm {
118 	struct kvm_vcpu vcpu;
119 	struct vmcb *vmcb;
120 	unsigned long vmcb_pa;
121 	struct svm_cpu_data *svm_data;
122 	u32 asid;
123 	uint64_t asid_generation;
124 	uint64_t sysenter_esp;
125 	uint64_t sysenter_eip;
126 	uint64_t tsc_aux;
127 
128 	u64 msr_decfg;
129 
130 	u64 next_rip;
131 
132 	u64 host_user_msrs[NR_HOST_SAVE_USER_MSRS];
133 	struct {
134 		u16 fs;
135 		u16 gs;
136 		u16 ldt;
137 		u64 gs_base;
138 	} host;
139 
140 	u64 spec_ctrl;
141 	/*
142 	 * Contains guest-controlled bits of VIRT_SPEC_CTRL, which will be
143 	 * translated into the appropriate L2_CFG bits on the host to
144 	 * perform speculative control.
145 	 */
146 	u64 virt_spec_ctrl;
147 
148 	u32 *msrpm;
149 
150 	ulong nmi_iret_rip;
151 
152 	struct svm_nested_state nested;
153 
154 	bool nmi_singlestep;
155 	u64 nmi_singlestep_guest_rflags;
156 
157 	unsigned int3_injected;
158 	unsigned long int3_rip;
159 
160 	/* cached guest cpuid flags for faster access */
161 	bool nrips_enabled	: 1;
162 
163 	u32 ldr_reg;
164 	u32 dfr_reg;
165 	struct page *avic_backing_page;
166 	u64 *avic_physical_id_cache;
167 	bool avic_is_running;
168 
169 	/*
170 	 * Per-vcpu list of struct amd_svm_iommu_ir:
171 	 * This is used mainly to store interrupt remapping information used
172 	 * when update the vcpu affinity. This avoids the need to scan for
173 	 * IRTE and try to match ga_tag in the IOMMU driver.
174 	 */
175 	struct list_head ir_list;
176 	spinlock_t ir_list_lock;
177 
178 	/* Save desired MSR intercept (read: pass-through) state */
179 	struct {
180 		DECLARE_BITMAP(read, MAX_DIRECT_ACCESS_MSRS);
181 		DECLARE_BITMAP(write, MAX_DIRECT_ACCESS_MSRS);
182 	} shadow_msr_intercept;
183 
184 	/* SEV-ES support */
185 	struct vmcb_save_area *vmsa;
186 	struct ghcb *ghcb;
187 	struct kvm_host_map ghcb_map;
188 
189 	/* SEV-ES scratch area support */
190 	void *ghcb_sa;
191 	u64 ghcb_sa_len;
192 	bool ghcb_sa_sync;
193 	bool ghcb_sa_free;
194 };
195 
196 struct svm_cpu_data {
197 	int cpu;
198 
199 	u64 asid_generation;
200 	u32 max_asid;
201 	u32 next_asid;
202 	u32 min_asid;
203 	struct kvm_ldttss_desc *tss_desc;
204 
205 	struct page *save_area;
206 	struct vmcb *current_vmcb;
207 
208 	/* index = sev_asid, value = vmcb pointer */
209 	struct vmcb **sev_vmcbs;
210 };
211 
212 DECLARE_PER_CPU(struct svm_cpu_data *, svm_data);
213 
214 void recalc_intercepts(struct vcpu_svm *svm);
215 
216 static inline struct kvm_svm *to_kvm_svm(struct kvm *kvm)
217 {
218 	return container_of(kvm, struct kvm_svm, kvm);
219 }
220 
221 static inline bool sev_guest(struct kvm *kvm)
222 {
223 #ifdef CONFIG_KVM_AMD_SEV
224 	struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
225 
226 	return sev->active;
227 #else
228 	return false;
229 #endif
230 }
231 
232 static inline bool sev_es_guest(struct kvm *kvm)
233 {
234 #ifdef CONFIG_KVM_AMD_SEV
235 	struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
236 
237 	return sev_guest(kvm) && sev->es_active;
238 #else
239 	return false;
240 #endif
241 }
242 
243 static inline void vmcb_mark_all_dirty(struct vmcb *vmcb)
244 {
245 	vmcb->control.clean = 0;
246 }
247 
248 static inline void vmcb_mark_all_clean(struct vmcb *vmcb)
249 {
250 	vmcb->control.clean = ((1 << VMCB_DIRTY_MAX) - 1)
251 			       & ~VMCB_ALWAYS_DIRTY_MASK;
252 }
253 
254 static inline void vmcb_mark_dirty(struct vmcb *vmcb, int bit)
255 {
256 	vmcb->control.clean &= ~(1 << bit);
257 }
258 
259 static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
260 {
261 	return container_of(vcpu, struct vcpu_svm, vcpu);
262 }
263 
264 static inline struct vmcb *get_host_vmcb(struct vcpu_svm *svm)
265 {
266 	if (is_guest_mode(&svm->vcpu))
267 		return svm->nested.hsave;
268 	else
269 		return svm->vmcb;
270 }
271 
272 static inline void vmcb_set_intercept(struct vmcb_control_area *control, u32 bit)
273 {
274 	WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT);
275 	__set_bit(bit, (unsigned long *)&control->intercepts);
276 }
277 
278 static inline void vmcb_clr_intercept(struct vmcb_control_area *control, u32 bit)
279 {
280 	WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT);
281 	__clear_bit(bit, (unsigned long *)&control->intercepts);
282 }
283 
284 static inline bool vmcb_is_intercept(struct vmcb_control_area *control, u32 bit)
285 {
286 	WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT);
287 	return test_bit(bit, (unsigned long *)&control->intercepts);
288 }
289 
290 static inline void set_dr_intercepts(struct vcpu_svm *svm)
291 {
292 	struct vmcb *vmcb = get_host_vmcb(svm);
293 
294 	if (!sev_es_guest(svm->vcpu.kvm)) {
295 		vmcb_set_intercept(&vmcb->control, INTERCEPT_DR0_READ);
296 		vmcb_set_intercept(&vmcb->control, INTERCEPT_DR1_READ);
297 		vmcb_set_intercept(&vmcb->control, INTERCEPT_DR2_READ);
298 		vmcb_set_intercept(&vmcb->control, INTERCEPT_DR3_READ);
299 		vmcb_set_intercept(&vmcb->control, INTERCEPT_DR4_READ);
300 		vmcb_set_intercept(&vmcb->control, INTERCEPT_DR5_READ);
301 		vmcb_set_intercept(&vmcb->control, INTERCEPT_DR6_READ);
302 		vmcb_set_intercept(&vmcb->control, INTERCEPT_DR0_WRITE);
303 		vmcb_set_intercept(&vmcb->control, INTERCEPT_DR1_WRITE);
304 		vmcb_set_intercept(&vmcb->control, INTERCEPT_DR2_WRITE);
305 		vmcb_set_intercept(&vmcb->control, INTERCEPT_DR3_WRITE);
306 		vmcb_set_intercept(&vmcb->control, INTERCEPT_DR4_WRITE);
307 		vmcb_set_intercept(&vmcb->control, INTERCEPT_DR5_WRITE);
308 		vmcb_set_intercept(&vmcb->control, INTERCEPT_DR6_WRITE);
309 	}
310 
311 	vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_READ);
312 	vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_WRITE);
313 
314 	recalc_intercepts(svm);
315 }
316 
317 static inline void clr_dr_intercepts(struct vcpu_svm *svm)
318 {
319 	struct vmcb *vmcb = get_host_vmcb(svm);
320 
321 	vmcb->control.intercepts[INTERCEPT_DR] = 0;
322 
323 	/* DR7 access must remain intercepted for an SEV-ES guest */
324 	if (sev_es_guest(svm->vcpu.kvm)) {
325 		vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_READ);
326 		vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_WRITE);
327 	}
328 
329 	recalc_intercepts(svm);
330 }
331 
332 static inline void set_exception_intercept(struct vcpu_svm *svm, u32 bit)
333 {
334 	struct vmcb *vmcb = get_host_vmcb(svm);
335 
336 	WARN_ON_ONCE(bit >= 32);
337 	vmcb_set_intercept(&vmcb->control, INTERCEPT_EXCEPTION_OFFSET + bit);
338 
339 	recalc_intercepts(svm);
340 }
341 
342 static inline void clr_exception_intercept(struct vcpu_svm *svm, u32 bit)
343 {
344 	struct vmcb *vmcb = get_host_vmcb(svm);
345 
346 	WARN_ON_ONCE(bit >= 32);
347 	vmcb_clr_intercept(&vmcb->control, INTERCEPT_EXCEPTION_OFFSET + bit);
348 
349 	recalc_intercepts(svm);
350 }
351 
352 static inline void svm_set_intercept(struct vcpu_svm *svm, int bit)
353 {
354 	struct vmcb *vmcb = get_host_vmcb(svm);
355 
356 	vmcb_set_intercept(&vmcb->control, bit);
357 
358 	recalc_intercepts(svm);
359 }
360 
361 static inline void svm_clr_intercept(struct vcpu_svm *svm, int bit)
362 {
363 	struct vmcb *vmcb = get_host_vmcb(svm);
364 
365 	vmcb_clr_intercept(&vmcb->control, bit);
366 
367 	recalc_intercepts(svm);
368 }
369 
370 static inline bool svm_is_intercept(struct vcpu_svm *svm, int bit)
371 {
372 	return vmcb_is_intercept(&svm->vmcb->control, bit);
373 }
374 
375 static inline bool vgif_enabled(struct vcpu_svm *svm)
376 {
377 	return !!(svm->vmcb->control.int_ctl & V_GIF_ENABLE_MASK);
378 }
379 
380 static inline void enable_gif(struct vcpu_svm *svm)
381 {
382 	if (vgif_enabled(svm))
383 		svm->vmcb->control.int_ctl |= V_GIF_MASK;
384 	else
385 		svm->vcpu.arch.hflags |= HF_GIF_MASK;
386 }
387 
388 static inline void disable_gif(struct vcpu_svm *svm)
389 {
390 	if (vgif_enabled(svm))
391 		svm->vmcb->control.int_ctl &= ~V_GIF_MASK;
392 	else
393 		svm->vcpu.arch.hflags &= ~HF_GIF_MASK;
394 }
395 
396 static inline bool gif_set(struct vcpu_svm *svm)
397 {
398 	if (vgif_enabled(svm))
399 		return !!(svm->vmcb->control.int_ctl & V_GIF_MASK);
400 	else
401 		return !!(svm->vcpu.arch.hflags & HF_GIF_MASK);
402 }
403 
404 /* svm.c */
405 #define MSR_CR3_LEGACY_RESERVED_MASK		0xfe7U
406 #define MSR_CR3_LEGACY_PAE_RESERVED_MASK	0x7U
407 #define MSR_CR3_LONG_MBZ_MASK			0xfff0000000000000U
408 #define MSR_INVALID				0xffffffffU
409 
410 extern int sev;
411 extern int sev_es;
412 extern bool dump_invalid_vmcb;
413 
414 u32 svm_msrpm_offset(u32 msr);
415 u32 *svm_vcpu_alloc_msrpm(void);
416 void svm_vcpu_init_msrpm(struct kvm_vcpu *vcpu, u32 *msrpm);
417 void svm_vcpu_free_msrpm(u32 *msrpm);
418 
419 int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer);
420 void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
421 void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
422 void svm_flush_tlb(struct kvm_vcpu *vcpu);
423 void disable_nmi_singlestep(struct vcpu_svm *svm);
424 bool svm_smi_blocked(struct kvm_vcpu *vcpu);
425 bool svm_nmi_blocked(struct kvm_vcpu *vcpu);
426 bool svm_interrupt_blocked(struct kvm_vcpu *vcpu);
427 void svm_set_gif(struct vcpu_svm *svm, bool value);
428 int svm_invoke_exit_handler(struct vcpu_svm *svm, u64 exit_code);
429 void set_msr_interception(struct kvm_vcpu *vcpu, u32 *msrpm, u32 msr,
430 			  int read, int write);
431 
432 /* nested.c */
433 
434 #define NESTED_EXIT_HOST	0	/* Exit handled on host level */
435 #define NESTED_EXIT_DONE	1	/* Exit caused nested vmexit  */
436 #define NESTED_EXIT_CONTINUE	2	/* Further checks needed      */
437 
438 static inline bool nested_svm_virtualize_tpr(struct kvm_vcpu *vcpu)
439 {
440 	struct vcpu_svm *svm = to_svm(vcpu);
441 
442 	return is_guest_mode(vcpu) && (svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK);
443 }
444 
445 static inline bool nested_exit_on_smi(struct vcpu_svm *svm)
446 {
447 	return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_SMI);
448 }
449 
450 static inline bool nested_exit_on_intr(struct vcpu_svm *svm)
451 {
452 	return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_INTR);
453 }
454 
455 static inline bool nested_exit_on_nmi(struct vcpu_svm *svm)
456 {
457 	return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_NMI);
458 }
459 
460 int enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
461 			 struct vmcb *nested_vmcb);
462 void svm_leave_nested(struct vcpu_svm *svm);
463 void svm_free_nested(struct vcpu_svm *svm);
464 int svm_allocate_nested(struct vcpu_svm *svm);
465 int nested_svm_vmrun(struct vcpu_svm *svm);
466 void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb);
467 int nested_svm_vmexit(struct vcpu_svm *svm);
468 int nested_svm_exit_handled(struct vcpu_svm *svm);
469 int nested_svm_check_permissions(struct vcpu_svm *svm);
470 int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
471 			       bool has_error_code, u32 error_code);
472 int nested_svm_exit_special(struct vcpu_svm *svm);
473 void sync_nested_vmcb_control(struct vcpu_svm *svm);
474 
475 extern struct kvm_x86_nested_ops svm_nested_ops;
476 
477 /* avic.c */
478 
479 #define AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK	(0xFF)
480 #define AVIC_LOGICAL_ID_ENTRY_VALID_BIT			31
481 #define AVIC_LOGICAL_ID_ENTRY_VALID_MASK		(1 << 31)
482 
483 #define AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK	(0xFFULL)
484 #define AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK	(0xFFFFFFFFFFULL << 12)
485 #define AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK		(1ULL << 62)
486 #define AVIC_PHYSICAL_ID_ENTRY_VALID_MASK		(1ULL << 63)
487 
488 #define VMCB_AVIC_APIC_BAR_MASK		0xFFFFFFFFFF000ULL
489 
490 extern int avic;
491 
492 static inline void avic_update_vapic_bar(struct vcpu_svm *svm, u64 data)
493 {
494 	svm->vmcb->control.avic_vapic_bar = data & VMCB_AVIC_APIC_BAR_MASK;
495 	vmcb_mark_dirty(svm->vmcb, VMCB_AVIC);
496 }
497 
498 static inline bool avic_vcpu_is_running(struct kvm_vcpu *vcpu)
499 {
500 	struct vcpu_svm *svm = to_svm(vcpu);
501 	u64 *entry = svm->avic_physical_id_cache;
502 
503 	if (!entry)
504 		return false;
505 
506 	return (READ_ONCE(*entry) & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK);
507 }
508 
509 int avic_ga_log_notifier(u32 ga_tag);
510 void avic_vm_destroy(struct kvm *kvm);
511 int avic_vm_init(struct kvm *kvm);
512 void avic_init_vmcb(struct vcpu_svm *svm);
513 void svm_toggle_avic_for_irq_window(struct kvm_vcpu *vcpu, bool activate);
514 int avic_incomplete_ipi_interception(struct vcpu_svm *svm);
515 int avic_unaccelerated_access_interception(struct vcpu_svm *svm);
516 int avic_init_vcpu(struct vcpu_svm *svm);
517 void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
518 void avic_vcpu_put(struct kvm_vcpu *vcpu);
519 void avic_post_state_restore(struct kvm_vcpu *vcpu);
520 void svm_set_virtual_apic_mode(struct kvm_vcpu *vcpu);
521 void svm_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu);
522 bool svm_check_apicv_inhibit_reasons(ulong bit);
523 void svm_pre_update_apicv_exec_ctrl(struct kvm *kvm, bool activate);
524 void svm_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
525 void svm_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr);
526 void svm_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr);
527 int svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec);
528 bool svm_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu);
529 int svm_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
530 		       uint32_t guest_irq, bool set);
531 void svm_vcpu_blocking(struct kvm_vcpu *vcpu);
532 void svm_vcpu_unblocking(struct kvm_vcpu *vcpu);
533 
534 /* sev.c */
535 
536 #define GHCB_VERSION_MAX		1ULL
537 #define GHCB_VERSION_MIN		1ULL
538 
539 #define GHCB_MSR_INFO_POS		0
540 #define GHCB_MSR_INFO_MASK		(BIT_ULL(12) - 1)
541 
542 #define GHCB_MSR_SEV_INFO_RESP		0x001
543 #define GHCB_MSR_SEV_INFO_REQ		0x002
544 #define GHCB_MSR_VER_MAX_POS		48
545 #define GHCB_MSR_VER_MAX_MASK		0xffff
546 #define GHCB_MSR_VER_MIN_POS		32
547 #define GHCB_MSR_VER_MIN_MASK		0xffff
548 #define GHCB_MSR_CBIT_POS		24
549 #define GHCB_MSR_CBIT_MASK		0xff
550 #define GHCB_MSR_SEV_INFO(_max, _min, _cbit)				\
551 	((((_max) & GHCB_MSR_VER_MAX_MASK) << GHCB_MSR_VER_MAX_POS) |	\
552 	 (((_min) & GHCB_MSR_VER_MIN_MASK) << GHCB_MSR_VER_MIN_POS) |	\
553 	 (((_cbit) & GHCB_MSR_CBIT_MASK) << GHCB_MSR_CBIT_POS) |	\
554 	 GHCB_MSR_SEV_INFO_RESP)
555 
556 #define GHCB_MSR_CPUID_REQ		0x004
557 #define GHCB_MSR_CPUID_RESP		0x005
558 #define GHCB_MSR_CPUID_FUNC_POS		32
559 #define GHCB_MSR_CPUID_FUNC_MASK	0xffffffff
560 #define GHCB_MSR_CPUID_VALUE_POS	32
561 #define GHCB_MSR_CPUID_VALUE_MASK	0xffffffff
562 #define GHCB_MSR_CPUID_REG_POS		30
563 #define GHCB_MSR_CPUID_REG_MASK		0x3
564 
565 #define GHCB_MSR_TERM_REQ		0x100
566 #define GHCB_MSR_TERM_REASON_SET_POS	12
567 #define GHCB_MSR_TERM_REASON_SET_MASK	0xf
568 #define GHCB_MSR_TERM_REASON_POS	16
569 #define GHCB_MSR_TERM_REASON_MASK	0xff
570 
571 extern unsigned int max_sev_asid;
572 
573 static inline bool svm_sev_enabled(void)
574 {
575 	return IS_ENABLED(CONFIG_KVM_AMD_SEV) ? max_sev_asid : 0;
576 }
577 
578 void sev_vm_destroy(struct kvm *kvm);
579 int svm_mem_enc_op(struct kvm *kvm, void __user *argp);
580 int svm_register_enc_region(struct kvm *kvm,
581 			    struct kvm_enc_region *range);
582 int svm_unregister_enc_region(struct kvm *kvm,
583 			      struct kvm_enc_region *range);
584 void pre_sev_run(struct vcpu_svm *svm, int cpu);
585 void __init sev_hardware_setup(void);
586 void sev_hardware_teardown(void);
587 void sev_free_vcpu(struct kvm_vcpu *vcpu);
588 int sev_handle_vmgexit(struct vcpu_svm *svm);
589 int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in);
590 void sev_es_init_vmcb(struct vcpu_svm *svm);
591 void sev_es_create_vcpu(struct vcpu_svm *svm);
592 void sev_es_vcpu_load(struct vcpu_svm *svm, int cpu);
593 void sev_es_vcpu_put(struct vcpu_svm *svm);
594 
595 /* vmenter.S */
596 
597 void __svm_sev_es_vcpu_run(unsigned long vmcb_pa);
598 void __svm_vcpu_run(unsigned long vmcb_pa, unsigned long *regs);
599 
600 #endif
601