xref: /openbmc/linux/arch/x86/kvm/svm/svm.h (revision a531b0c2)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Kernel-based Virtual Machine driver for Linux
4  *
5  * AMD SVM support
6  *
7  * Copyright (C) 2006 Qumranet, Inc.
8  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
9  *
10  * Authors:
11  *   Yaniv Kamay  <yaniv@qumranet.com>
12  *   Avi Kivity   <avi@qumranet.com>
13  */
14 
15 #ifndef __SVM_SVM_H
16 #define __SVM_SVM_H
17 
18 #include <linux/kvm_types.h>
19 #include <linux/kvm_host.h>
20 #include <linux/bits.h>
21 
22 #include <asm/svm.h>
23 #include <asm/sev-common.h>
24 
25 #define __sme_page_pa(x) __sme_set(page_to_pfn(x) << PAGE_SHIFT)
26 
27 #define	IOPM_SIZE PAGE_SIZE * 3
28 #define	MSRPM_SIZE PAGE_SIZE * 2
29 
30 #define MAX_DIRECT_ACCESS_MSRS	20
31 #define MSRPM_OFFSETS	16
32 extern u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
33 extern bool npt_enabled;
34 extern bool intercept_smi;
35 
36 /*
37  * Clean bits in VMCB.
38  * VMCB_ALL_CLEAN_MASK might also need to
39  * be updated if this enum is modified.
40  */
41 enum {
42 	VMCB_INTERCEPTS, /* Intercept vectors, TSC offset,
43 			    pause filter count */
44 	VMCB_PERM_MAP,   /* IOPM Base and MSRPM Base */
45 	VMCB_ASID,	 /* ASID */
46 	VMCB_INTR,	 /* int_ctl, int_vector */
47 	VMCB_NPT,        /* npt_en, nCR3, gPAT */
48 	VMCB_CR,	 /* CR0, CR3, CR4, EFER */
49 	VMCB_DR,         /* DR6, DR7 */
50 	VMCB_DT,         /* GDT, IDT */
51 	VMCB_SEG,        /* CS, DS, SS, ES, CPL */
52 	VMCB_CR2,        /* CR2 only */
53 	VMCB_LBR,        /* DBGCTL, BR_FROM, BR_TO, LAST_EX_FROM, LAST_EX_TO */
54 	VMCB_AVIC,       /* AVIC APIC_BAR, AVIC APIC_BACKING_PAGE,
55 			  * AVIC PHYSICAL_TABLE pointer,
56 			  * AVIC LOGICAL_TABLE pointer
57 			  */
58 	VMCB_SW = 31,    /* Reserved for hypervisor/software use */
59 };
60 
61 #define VMCB_ALL_CLEAN_MASK (					\
62 	(1U << VMCB_INTERCEPTS) | (1U << VMCB_PERM_MAP) |	\
63 	(1U << VMCB_ASID) | (1U << VMCB_INTR) |			\
64 	(1U << VMCB_NPT) | (1U << VMCB_CR) | (1U << VMCB_DR) |	\
65 	(1U << VMCB_DT) | (1U << VMCB_SEG) | (1U << VMCB_CR2) |	\
66 	(1U << VMCB_LBR) | (1U << VMCB_AVIC) |			\
67 	(1U << VMCB_SW))
68 
69 /* TPR and CR2 are always written before VMRUN */
70 #define VMCB_ALWAYS_DIRTY_MASK	((1U << VMCB_INTR) | (1U << VMCB_CR2))
71 
72 struct kvm_sev_info {
73 	bool active;		/* SEV enabled guest */
74 	bool es_active;		/* SEV-ES enabled guest */
75 	unsigned int asid;	/* ASID used for this guest */
76 	unsigned int handle;	/* SEV firmware handle */
77 	int fd;			/* SEV device fd */
78 	unsigned long pages_locked; /* Number of pages locked */
79 	struct list_head regions_list;  /* List of registered regions */
80 	u64 ap_jump_table;	/* SEV-ES AP Jump Table address */
81 	struct kvm *enc_context_owner; /* Owner of copied encryption context */
82 	unsigned long num_mirrored_vms; /* Number of VMs sharing this ASID */
83 	struct misc_cg *misc_cg; /* For misc cgroup accounting */
84 	atomic_t migration_in_progress;
85 };
86 
87 struct kvm_svm {
88 	struct kvm kvm;
89 
90 	/* Struct members for AVIC */
91 	u32 avic_vm_id;
92 	struct page *avic_logical_id_table_page;
93 	struct page *avic_physical_id_table_page;
94 	struct hlist_node hnode;
95 
96 	struct kvm_sev_info sev_info;
97 };
98 
99 struct kvm_vcpu;
100 
101 struct kvm_vmcb_info {
102 	struct vmcb *ptr;
103 	unsigned long pa;
104 	int cpu;
105 	uint64_t asid_generation;
106 };
107 
108 struct svm_nested_state {
109 	struct kvm_vmcb_info vmcb02;
110 	u64 hsave_msr;
111 	u64 vm_cr_msr;
112 	u64 vmcb12_gpa;
113 	u64 last_vmcb12_gpa;
114 
115 	/* These are the merged vectors */
116 	u32 *msrpm;
117 
118 	/* A VMRUN has started but has not yet been performed, so
119 	 * we cannot inject a nested vmexit yet.  */
120 	bool nested_run_pending;
121 
122 	/* cache for control fields of the guest */
123 	struct vmcb_control_area ctl;
124 
125 	bool initialized;
126 };
127 
128 struct vcpu_sev_es_state {
129 	/* SEV-ES support */
130 	struct vmcb_save_area *vmsa;
131 	struct ghcb *ghcb;
132 	struct kvm_host_map ghcb_map;
133 	bool received_first_sipi;
134 
135 	/* SEV-ES scratch area support */
136 	void *ghcb_sa;
137 	u32 ghcb_sa_len;
138 	bool ghcb_sa_sync;
139 	bool ghcb_sa_free;
140 };
141 
142 struct vcpu_svm {
143 	struct kvm_vcpu vcpu;
144 	/* vmcb always points at current_vmcb->ptr, it's purely a shorthand. */
145 	struct vmcb *vmcb;
146 	struct kvm_vmcb_info vmcb01;
147 	struct kvm_vmcb_info *current_vmcb;
148 	struct svm_cpu_data *svm_data;
149 	u32 asid;
150 	u32 sysenter_esp_hi;
151 	u32 sysenter_eip_hi;
152 	uint64_t tsc_aux;
153 
154 	u64 msr_decfg;
155 
156 	u64 next_rip;
157 
158 	u64 spec_ctrl;
159 
160 	u64 tsc_ratio_msr;
161 	/*
162 	 * Contains guest-controlled bits of VIRT_SPEC_CTRL, which will be
163 	 * translated into the appropriate L2_CFG bits on the host to
164 	 * perform speculative control.
165 	 */
166 	u64 virt_spec_ctrl;
167 
168 	u32 *msrpm;
169 
170 	ulong nmi_iret_rip;
171 
172 	struct svm_nested_state nested;
173 
174 	bool nmi_singlestep;
175 	u64 nmi_singlestep_guest_rflags;
176 
177 	unsigned int3_injected;
178 	unsigned long int3_rip;
179 
180 	/* cached guest cpuid flags for faster access */
181 	bool nrips_enabled                : 1;
182 	bool tsc_scaling_enabled          : 1;
183 
184 	u32 ldr_reg;
185 	u32 dfr_reg;
186 	struct page *avic_backing_page;
187 	u64 *avic_physical_id_cache;
188 	bool avic_is_running;
189 
190 	/*
191 	 * Per-vcpu list of struct amd_svm_iommu_ir:
192 	 * This is used mainly to store interrupt remapping information used
193 	 * when update the vcpu affinity. This avoids the need to scan for
194 	 * IRTE and try to match ga_tag in the IOMMU driver.
195 	 */
196 	struct list_head ir_list;
197 	spinlock_t ir_list_lock;
198 
199 	/* Save desired MSR intercept (read: pass-through) state */
200 	struct {
201 		DECLARE_BITMAP(read, MAX_DIRECT_ACCESS_MSRS);
202 		DECLARE_BITMAP(write, MAX_DIRECT_ACCESS_MSRS);
203 	} shadow_msr_intercept;
204 
205 	struct vcpu_sev_es_state sev_es;
206 
207 	bool guest_state_loaded;
208 };
209 
210 struct svm_cpu_data {
211 	int cpu;
212 
213 	u64 asid_generation;
214 	u32 max_asid;
215 	u32 next_asid;
216 	u32 min_asid;
217 	struct kvm_ldttss_desc *tss_desc;
218 
219 	struct page *save_area;
220 	struct vmcb *current_vmcb;
221 
222 	/* index = sev_asid, value = vmcb pointer */
223 	struct vmcb **sev_vmcbs;
224 };
225 
226 DECLARE_PER_CPU(struct svm_cpu_data *, svm_data);
227 
228 void recalc_intercepts(struct vcpu_svm *svm);
229 
230 static __always_inline struct kvm_svm *to_kvm_svm(struct kvm *kvm)
231 {
232 	return container_of(kvm, struct kvm_svm, kvm);
233 }
234 
235 static __always_inline bool sev_guest(struct kvm *kvm)
236 {
237 #ifdef CONFIG_KVM_AMD_SEV
238 	struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
239 
240 	return sev->active;
241 #else
242 	return false;
243 #endif
244 }
245 
246 static __always_inline bool sev_es_guest(struct kvm *kvm)
247 {
248 #ifdef CONFIG_KVM_AMD_SEV
249 	struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
250 
251 	return sev->es_active && !WARN_ON_ONCE(!sev->active);
252 #else
253 	return false;
254 #endif
255 }
256 
257 static inline void vmcb_mark_all_dirty(struct vmcb *vmcb)
258 {
259 	vmcb->control.clean = 0;
260 }
261 
262 static inline void vmcb_mark_all_clean(struct vmcb *vmcb)
263 {
264 	vmcb->control.clean = VMCB_ALL_CLEAN_MASK
265 			       & ~VMCB_ALWAYS_DIRTY_MASK;
266 }
267 
268 static inline bool vmcb_is_clean(struct vmcb *vmcb, int bit)
269 {
270 	return (vmcb->control.clean & (1 << bit));
271 }
272 
273 static inline void vmcb_mark_dirty(struct vmcb *vmcb, int bit)
274 {
275 	vmcb->control.clean &= ~(1 << bit);
276 }
277 
278 static inline bool vmcb_is_dirty(struct vmcb *vmcb, int bit)
279 {
280         return !test_bit(bit, (unsigned long *)&vmcb->control.clean);
281 }
282 
283 static __always_inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
284 {
285 	return container_of(vcpu, struct vcpu_svm, vcpu);
286 }
287 
288 static inline void vmcb_set_intercept(struct vmcb_control_area *control, u32 bit)
289 {
290 	WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT);
291 	__set_bit(bit, (unsigned long *)&control->intercepts);
292 }
293 
294 static inline void vmcb_clr_intercept(struct vmcb_control_area *control, u32 bit)
295 {
296 	WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT);
297 	__clear_bit(bit, (unsigned long *)&control->intercepts);
298 }
299 
300 static inline bool vmcb_is_intercept(struct vmcb_control_area *control, u32 bit)
301 {
302 	WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT);
303 	return test_bit(bit, (unsigned long *)&control->intercepts);
304 }
305 
306 static inline void set_dr_intercepts(struct vcpu_svm *svm)
307 {
308 	struct vmcb *vmcb = svm->vmcb01.ptr;
309 
310 	if (!sev_es_guest(svm->vcpu.kvm)) {
311 		vmcb_set_intercept(&vmcb->control, INTERCEPT_DR0_READ);
312 		vmcb_set_intercept(&vmcb->control, INTERCEPT_DR1_READ);
313 		vmcb_set_intercept(&vmcb->control, INTERCEPT_DR2_READ);
314 		vmcb_set_intercept(&vmcb->control, INTERCEPT_DR3_READ);
315 		vmcb_set_intercept(&vmcb->control, INTERCEPT_DR4_READ);
316 		vmcb_set_intercept(&vmcb->control, INTERCEPT_DR5_READ);
317 		vmcb_set_intercept(&vmcb->control, INTERCEPT_DR6_READ);
318 		vmcb_set_intercept(&vmcb->control, INTERCEPT_DR0_WRITE);
319 		vmcb_set_intercept(&vmcb->control, INTERCEPT_DR1_WRITE);
320 		vmcb_set_intercept(&vmcb->control, INTERCEPT_DR2_WRITE);
321 		vmcb_set_intercept(&vmcb->control, INTERCEPT_DR3_WRITE);
322 		vmcb_set_intercept(&vmcb->control, INTERCEPT_DR4_WRITE);
323 		vmcb_set_intercept(&vmcb->control, INTERCEPT_DR5_WRITE);
324 		vmcb_set_intercept(&vmcb->control, INTERCEPT_DR6_WRITE);
325 	}
326 
327 	vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_READ);
328 	vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_WRITE);
329 
330 	recalc_intercepts(svm);
331 }
332 
333 static inline void clr_dr_intercepts(struct vcpu_svm *svm)
334 {
335 	struct vmcb *vmcb = svm->vmcb01.ptr;
336 
337 	vmcb->control.intercepts[INTERCEPT_DR] = 0;
338 
339 	/* DR7 access must remain intercepted for an SEV-ES guest */
340 	if (sev_es_guest(svm->vcpu.kvm)) {
341 		vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_READ);
342 		vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_WRITE);
343 	}
344 
345 	recalc_intercepts(svm);
346 }
347 
348 static inline void set_exception_intercept(struct vcpu_svm *svm, u32 bit)
349 {
350 	struct vmcb *vmcb = svm->vmcb01.ptr;
351 
352 	WARN_ON_ONCE(bit >= 32);
353 	vmcb_set_intercept(&vmcb->control, INTERCEPT_EXCEPTION_OFFSET + bit);
354 
355 	recalc_intercepts(svm);
356 }
357 
358 static inline void clr_exception_intercept(struct vcpu_svm *svm, u32 bit)
359 {
360 	struct vmcb *vmcb = svm->vmcb01.ptr;
361 
362 	WARN_ON_ONCE(bit >= 32);
363 	vmcb_clr_intercept(&vmcb->control, INTERCEPT_EXCEPTION_OFFSET + bit);
364 
365 	recalc_intercepts(svm);
366 }
367 
368 static inline void svm_set_intercept(struct vcpu_svm *svm, int bit)
369 {
370 	struct vmcb *vmcb = svm->vmcb01.ptr;
371 
372 	vmcb_set_intercept(&vmcb->control, bit);
373 
374 	recalc_intercepts(svm);
375 }
376 
377 static inline void svm_clr_intercept(struct vcpu_svm *svm, int bit)
378 {
379 	struct vmcb *vmcb = svm->vmcb01.ptr;
380 
381 	vmcb_clr_intercept(&vmcb->control, bit);
382 
383 	recalc_intercepts(svm);
384 }
385 
386 static inline bool svm_is_intercept(struct vcpu_svm *svm, int bit)
387 {
388 	return vmcb_is_intercept(&svm->vmcb->control, bit);
389 }
390 
391 static inline bool vgif_enabled(struct vcpu_svm *svm)
392 {
393 	return !!(svm->vmcb->control.int_ctl & V_GIF_ENABLE_MASK);
394 }
395 
396 static inline void enable_gif(struct vcpu_svm *svm)
397 {
398 	if (vgif_enabled(svm))
399 		svm->vmcb->control.int_ctl |= V_GIF_MASK;
400 	else
401 		svm->vcpu.arch.hflags |= HF_GIF_MASK;
402 }
403 
404 static inline void disable_gif(struct vcpu_svm *svm)
405 {
406 	if (vgif_enabled(svm))
407 		svm->vmcb->control.int_ctl &= ~V_GIF_MASK;
408 	else
409 		svm->vcpu.arch.hflags &= ~HF_GIF_MASK;
410 }
411 
412 static inline bool gif_set(struct vcpu_svm *svm)
413 {
414 	if (vgif_enabled(svm))
415 		return !!(svm->vmcb->control.int_ctl & V_GIF_MASK);
416 	else
417 		return !!(svm->vcpu.arch.hflags & HF_GIF_MASK);
418 }
419 
420 /* svm.c */
421 #define MSR_INVALID				0xffffffffU
422 
423 extern bool dump_invalid_vmcb;
424 
425 u32 svm_msrpm_offset(u32 msr);
426 u32 *svm_vcpu_alloc_msrpm(void);
427 void svm_vcpu_init_msrpm(struct kvm_vcpu *vcpu, u32 *msrpm);
428 void svm_vcpu_free_msrpm(u32 *msrpm);
429 
430 int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer);
431 void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
432 void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
433 void svm_flush_tlb(struct kvm_vcpu *vcpu);
434 void disable_nmi_singlestep(struct vcpu_svm *svm);
435 bool svm_smi_blocked(struct kvm_vcpu *vcpu);
436 bool svm_nmi_blocked(struct kvm_vcpu *vcpu);
437 bool svm_interrupt_blocked(struct kvm_vcpu *vcpu);
438 void svm_set_gif(struct vcpu_svm *svm, bool value);
439 int svm_invoke_exit_handler(struct kvm_vcpu *vcpu, u64 exit_code);
440 void set_msr_interception(struct kvm_vcpu *vcpu, u32 *msrpm, u32 msr,
441 			  int read, int write);
442 
443 /* nested.c */
444 
445 #define NESTED_EXIT_HOST	0	/* Exit handled on host level */
446 #define NESTED_EXIT_DONE	1	/* Exit caused nested vmexit  */
447 #define NESTED_EXIT_CONTINUE	2	/* Further checks needed      */
448 
449 static inline bool nested_svm_virtualize_tpr(struct kvm_vcpu *vcpu)
450 {
451 	struct vcpu_svm *svm = to_svm(vcpu);
452 
453 	return is_guest_mode(vcpu) && (svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK);
454 }
455 
456 static inline bool nested_exit_on_smi(struct vcpu_svm *svm)
457 {
458 	return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_SMI);
459 }
460 
461 static inline bool nested_exit_on_intr(struct vcpu_svm *svm)
462 {
463 	return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_INTR);
464 }
465 
466 static inline bool nested_exit_on_nmi(struct vcpu_svm *svm)
467 {
468 	return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_NMI);
469 }
470 
471 int enter_svm_guest_mode(struct kvm_vcpu *vcpu,
472 			 u64 vmcb_gpa, struct vmcb *vmcb12, bool from_vmrun);
473 void svm_leave_nested(struct vcpu_svm *svm);
474 void svm_free_nested(struct vcpu_svm *svm);
475 int svm_allocate_nested(struct vcpu_svm *svm);
476 int nested_svm_vmrun(struct kvm_vcpu *vcpu);
477 void svm_copy_vmrun_state(struct vmcb_save_area *to_save,
478 			  struct vmcb_save_area *from_save);
479 void svm_copy_vmloadsave_state(struct vmcb *to_vmcb, struct vmcb *from_vmcb);
480 int nested_svm_vmexit(struct vcpu_svm *svm);
481 
482 static inline int nested_svm_simple_vmexit(struct vcpu_svm *svm, u32 exit_code)
483 {
484 	svm->vmcb->control.exit_code   = exit_code;
485 	svm->vmcb->control.exit_info_1 = 0;
486 	svm->vmcb->control.exit_info_2 = 0;
487 	return nested_svm_vmexit(svm);
488 }
489 
490 int nested_svm_exit_handled(struct vcpu_svm *svm);
491 int nested_svm_check_permissions(struct kvm_vcpu *vcpu);
492 int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
493 			       bool has_error_code, u32 error_code);
494 int nested_svm_exit_special(struct vcpu_svm *svm);
495 void nested_svm_update_tsc_ratio_msr(struct kvm_vcpu *vcpu);
496 void svm_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 multiplier);
497 void nested_load_control_from_vmcb12(struct vcpu_svm *svm,
498 				     struct vmcb_control_area *control);
499 void nested_sync_control_from_vmcb02(struct vcpu_svm *svm);
500 void nested_vmcb02_compute_g_pat(struct vcpu_svm *svm);
501 void svm_switch_vmcb(struct vcpu_svm *svm, struct kvm_vmcb_info *target_vmcb);
502 
503 extern struct kvm_x86_nested_ops svm_nested_ops;
504 
505 /* avic.c */
506 
507 #define AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK	(0xFF)
508 #define AVIC_LOGICAL_ID_ENTRY_VALID_BIT			31
509 #define AVIC_LOGICAL_ID_ENTRY_VALID_MASK		(1 << 31)
510 
511 #define AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK	(0xFFULL)
512 #define AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK	(0xFFFFFFFFFFULL << 12)
513 #define AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK		(1ULL << 62)
514 #define AVIC_PHYSICAL_ID_ENTRY_VALID_MASK		(1ULL << 63)
515 
516 #define VMCB_AVIC_APIC_BAR_MASK		0xFFFFFFFFFF000ULL
517 
518 static inline bool avic_vcpu_is_running(struct kvm_vcpu *vcpu)
519 {
520 	struct vcpu_svm *svm = to_svm(vcpu);
521 	u64 *entry = svm->avic_physical_id_cache;
522 
523 	if (!entry)
524 		return false;
525 
526 	return (READ_ONCE(*entry) & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK);
527 }
528 
529 int avic_ga_log_notifier(u32 ga_tag);
530 void avic_vm_destroy(struct kvm *kvm);
531 int avic_vm_init(struct kvm *kvm);
532 void avic_init_vmcb(struct vcpu_svm *svm);
533 int avic_incomplete_ipi_interception(struct kvm_vcpu *vcpu);
534 int avic_unaccelerated_access_interception(struct kvm_vcpu *vcpu);
535 int avic_init_vcpu(struct vcpu_svm *svm);
536 void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
537 void avic_vcpu_put(struct kvm_vcpu *vcpu);
538 void avic_post_state_restore(struct kvm_vcpu *vcpu);
539 void svm_set_virtual_apic_mode(struct kvm_vcpu *vcpu);
540 void svm_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu);
541 bool svm_check_apicv_inhibit_reasons(ulong bit);
542 void svm_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
543 void svm_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr);
544 void svm_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr);
545 int svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec);
546 bool svm_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu);
547 int svm_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
548 		       uint32_t guest_irq, bool set);
549 void svm_vcpu_blocking(struct kvm_vcpu *vcpu);
550 void svm_vcpu_unblocking(struct kvm_vcpu *vcpu);
551 
552 /* sev.c */
553 
554 #define GHCB_VERSION_MAX	1ULL
555 #define GHCB_VERSION_MIN	1ULL
556 
557 
558 extern unsigned int max_sev_asid;
559 
560 void sev_vm_destroy(struct kvm *kvm);
561 int svm_mem_enc_op(struct kvm *kvm, void __user *argp);
562 int svm_register_enc_region(struct kvm *kvm,
563 			    struct kvm_enc_region *range);
564 int svm_unregister_enc_region(struct kvm *kvm,
565 			      struct kvm_enc_region *range);
566 int svm_vm_copy_asid_from(struct kvm *kvm, unsigned int source_fd);
567 int svm_vm_migrate_from(struct kvm *kvm, unsigned int source_fd);
568 void pre_sev_run(struct vcpu_svm *svm, int cpu);
569 void __init sev_set_cpu_caps(void);
570 void __init sev_hardware_setup(void);
571 void sev_hardware_teardown(void);
572 int sev_cpu_init(struct svm_cpu_data *sd);
573 void sev_free_vcpu(struct kvm_vcpu *vcpu);
574 int sev_handle_vmgexit(struct kvm_vcpu *vcpu);
575 int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in);
576 void sev_es_init_vmcb(struct vcpu_svm *svm);
577 void sev_es_vcpu_reset(struct vcpu_svm *svm);
578 void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector);
579 void sev_es_prepare_guest_switch(struct vcpu_svm *svm, unsigned int cpu);
580 void sev_es_unmap_ghcb(struct vcpu_svm *svm);
581 
582 /* vmenter.S */
583 
584 void __svm_sev_es_vcpu_run(unsigned long vmcb_pa);
585 void __svm_vcpu_run(unsigned long vmcb_pa, unsigned long *regs);
586 
587 #endif
588