xref: /openbmc/linux/arch/x86/kvm/svm/svm.h (revision 6c8c1406)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Kernel-based Virtual Machine driver for Linux
4  *
5  * AMD SVM support
6  *
7  * Copyright (C) 2006 Qumranet, Inc.
8  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
9  *
10  * Authors:
11  *   Yaniv Kamay  <yaniv@qumranet.com>
12  *   Avi Kivity   <avi@qumranet.com>
13  */
14 
15 #ifndef __SVM_SVM_H
16 #define __SVM_SVM_H
17 
18 #include <linux/kvm_types.h>
19 #include <linux/kvm_host.h>
20 #include <linux/bits.h>
21 
22 #include <asm/svm.h>
23 #include <asm/sev-common.h>
24 
25 #include "kvm_cache_regs.h"
26 
27 #define __sme_page_pa(x) __sme_set(page_to_pfn(x) << PAGE_SHIFT)
28 
29 #define	IOPM_SIZE PAGE_SIZE * 3
30 #define	MSRPM_SIZE PAGE_SIZE * 2
31 
32 #define MAX_DIRECT_ACCESS_MSRS	46
33 #define MSRPM_OFFSETS	32
34 extern u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
35 extern bool npt_enabled;
36 extern int vgif;
37 extern bool intercept_smi;
38 
39 enum avic_modes {
40 	AVIC_MODE_NONE = 0,
41 	AVIC_MODE_X1,
42 	AVIC_MODE_X2,
43 };
44 
45 extern enum avic_modes avic_mode;
46 
47 /*
48  * Clean bits in VMCB.
49  * VMCB_ALL_CLEAN_MASK might also need to
50  * be updated if this enum is modified.
51  */
52 enum {
53 	VMCB_INTERCEPTS, /* Intercept vectors, TSC offset,
54 			    pause filter count */
55 	VMCB_PERM_MAP,   /* IOPM Base and MSRPM Base */
56 	VMCB_ASID,	 /* ASID */
57 	VMCB_INTR,	 /* int_ctl, int_vector */
58 	VMCB_NPT,        /* npt_en, nCR3, gPAT */
59 	VMCB_CR,	 /* CR0, CR3, CR4, EFER */
60 	VMCB_DR,         /* DR6, DR7 */
61 	VMCB_DT,         /* GDT, IDT */
62 	VMCB_SEG,        /* CS, DS, SS, ES, CPL */
63 	VMCB_CR2,        /* CR2 only */
64 	VMCB_LBR,        /* DBGCTL, BR_FROM, BR_TO, LAST_EX_FROM, LAST_EX_TO */
65 	VMCB_AVIC,       /* AVIC APIC_BAR, AVIC APIC_BACKING_PAGE,
66 			  * AVIC PHYSICAL_TABLE pointer,
67 			  * AVIC LOGICAL_TABLE pointer
68 			  */
69 	VMCB_SW = 31,    /* Reserved for hypervisor/software use */
70 };
71 
72 #define VMCB_ALL_CLEAN_MASK (					\
73 	(1U << VMCB_INTERCEPTS) | (1U << VMCB_PERM_MAP) |	\
74 	(1U << VMCB_ASID) | (1U << VMCB_INTR) |			\
75 	(1U << VMCB_NPT) | (1U << VMCB_CR) | (1U << VMCB_DR) |	\
76 	(1U << VMCB_DT) | (1U << VMCB_SEG) | (1U << VMCB_CR2) |	\
77 	(1U << VMCB_LBR) | (1U << VMCB_AVIC) |			\
78 	(1U << VMCB_SW))
79 
80 /* TPR and CR2 are always written before VMRUN */
81 #define VMCB_ALWAYS_DIRTY_MASK	((1U << VMCB_INTR) | (1U << VMCB_CR2))
82 
83 struct kvm_sev_info {
84 	bool active;		/* SEV enabled guest */
85 	bool es_active;		/* SEV-ES enabled guest */
86 	unsigned int asid;	/* ASID used for this guest */
87 	unsigned int handle;	/* SEV firmware handle */
88 	int fd;			/* SEV device fd */
89 	unsigned long pages_locked; /* Number of pages locked */
90 	struct list_head regions_list;  /* List of registered regions */
91 	u64 ap_jump_table;	/* SEV-ES AP Jump Table address */
92 	struct kvm *enc_context_owner; /* Owner of copied encryption context */
93 	struct list_head mirror_vms; /* List of VMs mirroring */
94 	struct list_head mirror_entry; /* Use as a list entry of mirrors */
95 	struct misc_cg *misc_cg; /* For misc cgroup accounting */
96 	atomic_t migration_in_progress;
97 };
98 
99 struct kvm_svm {
100 	struct kvm kvm;
101 
102 	/* Struct members for AVIC */
103 	u32 avic_vm_id;
104 	struct page *avic_logical_id_table_page;
105 	struct page *avic_physical_id_table_page;
106 	struct hlist_node hnode;
107 
108 	struct kvm_sev_info sev_info;
109 };
110 
111 struct kvm_vcpu;
112 
113 struct kvm_vmcb_info {
114 	struct vmcb *ptr;
115 	unsigned long pa;
116 	int cpu;
117 	uint64_t asid_generation;
118 };
119 
120 struct vmcb_save_area_cached {
121 	u64 efer;
122 	u64 cr4;
123 	u64 cr3;
124 	u64 cr0;
125 	u64 dr7;
126 	u64 dr6;
127 };
128 
129 struct vmcb_ctrl_area_cached {
130 	u32 intercepts[MAX_INTERCEPT];
131 	u16 pause_filter_thresh;
132 	u16 pause_filter_count;
133 	u64 iopm_base_pa;
134 	u64 msrpm_base_pa;
135 	u64 tsc_offset;
136 	u32 asid;
137 	u8 tlb_ctl;
138 	u32 int_ctl;
139 	u32 int_vector;
140 	u32 int_state;
141 	u32 exit_code;
142 	u32 exit_code_hi;
143 	u64 exit_info_1;
144 	u64 exit_info_2;
145 	u32 exit_int_info;
146 	u32 exit_int_info_err;
147 	u64 nested_ctl;
148 	u32 event_inj;
149 	u32 event_inj_err;
150 	u64 next_rip;
151 	u64 nested_cr3;
152 	u64 virt_ext;
153 	u32 clean;
154 	u8 reserved_sw[32];
155 };
156 
157 struct svm_nested_state {
158 	struct kvm_vmcb_info vmcb02;
159 	u64 hsave_msr;
160 	u64 vm_cr_msr;
161 	u64 vmcb12_gpa;
162 	u64 last_vmcb12_gpa;
163 
164 	/* These are the merged vectors */
165 	u32 *msrpm;
166 
167 	/* A VMRUN has started but has not yet been performed, so
168 	 * we cannot inject a nested vmexit yet.  */
169 	bool nested_run_pending;
170 
171 	/* cache for control fields of the guest */
172 	struct vmcb_ctrl_area_cached ctl;
173 
174 	/*
175 	 * Note: this struct is not kept up-to-date while L2 runs; it is only
176 	 * valid within nested_svm_vmrun.
177 	 */
178 	struct vmcb_save_area_cached save;
179 
180 	bool initialized;
181 
182 	/*
183 	 * Indicates whether MSR bitmap for L2 needs to be rebuilt due to
184 	 * changes in MSR bitmap for L1 or switching to a different L2. Note,
185 	 * this flag can only be used reliably in conjunction with a paravirt L1
186 	 * which informs L0 whether any changes to MSR bitmap for L2 were done
187 	 * on its side.
188 	 */
189 	bool force_msr_bitmap_recalc;
190 };
191 
192 struct vcpu_sev_es_state {
193 	/* SEV-ES support */
194 	struct sev_es_save_area *vmsa;
195 	struct ghcb *ghcb;
196 	struct kvm_host_map ghcb_map;
197 	bool received_first_sipi;
198 
199 	/* SEV-ES scratch area support */
200 	void *ghcb_sa;
201 	u32 ghcb_sa_len;
202 	bool ghcb_sa_sync;
203 	bool ghcb_sa_free;
204 };
205 
206 struct vcpu_svm {
207 	struct kvm_vcpu vcpu;
208 	/* vmcb always points at current_vmcb->ptr, it's purely a shorthand. */
209 	struct vmcb *vmcb;
210 	struct kvm_vmcb_info vmcb01;
211 	struct kvm_vmcb_info *current_vmcb;
212 	u32 asid;
213 	u32 sysenter_esp_hi;
214 	u32 sysenter_eip_hi;
215 	uint64_t tsc_aux;
216 
217 	u64 msr_decfg;
218 
219 	u64 next_rip;
220 
221 	u64 spec_ctrl;
222 
223 	u64 tsc_ratio_msr;
224 	/*
225 	 * Contains guest-controlled bits of VIRT_SPEC_CTRL, which will be
226 	 * translated into the appropriate L2_CFG bits on the host to
227 	 * perform speculative control.
228 	 */
229 	u64 virt_spec_ctrl;
230 
231 	u32 *msrpm;
232 
233 	ulong nmi_iret_rip;
234 
235 	struct svm_nested_state nested;
236 
237 	bool nmi_singlestep;
238 	u64 nmi_singlestep_guest_rflags;
239 	bool nmi_l1_to_l2;
240 
241 	unsigned long soft_int_csbase;
242 	unsigned long soft_int_old_rip;
243 	unsigned long soft_int_next_rip;
244 	bool soft_int_injected;
245 
246 	/* optional nested SVM features that are enabled for this guest  */
247 	bool nrips_enabled                : 1;
248 	bool tsc_scaling_enabled          : 1;
249 	bool v_vmload_vmsave_enabled      : 1;
250 	bool lbrv_enabled                 : 1;
251 	bool pause_filter_enabled         : 1;
252 	bool pause_threshold_enabled      : 1;
253 	bool vgif_enabled                 : 1;
254 
255 	u32 ldr_reg;
256 	u32 dfr_reg;
257 	struct page *avic_backing_page;
258 	u64 *avic_physical_id_cache;
259 
260 	/*
261 	 * Per-vcpu list of struct amd_svm_iommu_ir:
262 	 * This is used mainly to store interrupt remapping information used
263 	 * when update the vcpu affinity. This avoids the need to scan for
264 	 * IRTE and try to match ga_tag in the IOMMU driver.
265 	 */
266 	struct list_head ir_list;
267 	spinlock_t ir_list_lock;
268 
269 	/* Save desired MSR intercept (read: pass-through) state */
270 	struct {
271 		DECLARE_BITMAP(read, MAX_DIRECT_ACCESS_MSRS);
272 		DECLARE_BITMAP(write, MAX_DIRECT_ACCESS_MSRS);
273 	} shadow_msr_intercept;
274 
275 	struct vcpu_sev_es_state sev_es;
276 
277 	bool guest_state_loaded;
278 
279 	bool x2avic_msrs_intercepted;
280 };
281 
282 struct svm_cpu_data {
283 	u64 asid_generation;
284 	u32 max_asid;
285 	u32 next_asid;
286 	u32 min_asid;
287 	struct kvm_ldttss_desc *tss_desc;
288 
289 	struct page *save_area;
290 	unsigned long save_area_pa;
291 
292 	struct vmcb *current_vmcb;
293 
294 	/* index = sev_asid, value = vmcb pointer */
295 	struct vmcb **sev_vmcbs;
296 };
297 
298 DECLARE_PER_CPU(struct svm_cpu_data, svm_data);
299 
300 void recalc_intercepts(struct vcpu_svm *svm);
301 
302 static __always_inline struct kvm_svm *to_kvm_svm(struct kvm *kvm)
303 {
304 	return container_of(kvm, struct kvm_svm, kvm);
305 }
306 
307 static __always_inline bool sev_guest(struct kvm *kvm)
308 {
309 #ifdef CONFIG_KVM_AMD_SEV
310 	struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
311 
312 	return sev->active;
313 #else
314 	return false;
315 #endif
316 }
317 
318 static __always_inline bool sev_es_guest(struct kvm *kvm)
319 {
320 #ifdef CONFIG_KVM_AMD_SEV
321 	struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
322 
323 	return sev->es_active && !WARN_ON_ONCE(!sev->active);
324 #else
325 	return false;
326 #endif
327 }
328 
329 static inline void vmcb_mark_all_dirty(struct vmcb *vmcb)
330 {
331 	vmcb->control.clean = 0;
332 }
333 
334 static inline void vmcb_mark_all_clean(struct vmcb *vmcb)
335 {
336 	vmcb->control.clean = VMCB_ALL_CLEAN_MASK
337 			       & ~VMCB_ALWAYS_DIRTY_MASK;
338 }
339 
340 static inline void vmcb_mark_dirty(struct vmcb *vmcb, int bit)
341 {
342 	vmcb->control.clean &= ~(1 << bit);
343 }
344 
345 static inline bool vmcb_is_dirty(struct vmcb *vmcb, int bit)
346 {
347         return !test_bit(bit, (unsigned long *)&vmcb->control.clean);
348 }
349 
350 static __always_inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
351 {
352 	return container_of(vcpu, struct vcpu_svm, vcpu);
353 }
354 
355 /*
356  * Only the PDPTRs are loaded on demand into the shadow MMU.  All other
357  * fields are synchronized on VM-Exit, because accessing the VMCB is cheap.
358  *
359  * CR3 might be out of date in the VMCB but it is not marked dirty; instead,
360  * KVM_REQ_LOAD_MMU_PGD is always requested when the cached vcpu->arch.cr3
361  * is changed.  svm_load_mmu_pgd() then syncs the new CR3 value into the VMCB.
362  */
363 #define SVM_REGS_LAZY_LOAD_SET	(1 << VCPU_EXREG_PDPTR)
364 
365 static inline void vmcb_set_intercept(struct vmcb_control_area *control, u32 bit)
366 {
367 	WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT);
368 	__set_bit(bit, (unsigned long *)&control->intercepts);
369 }
370 
371 static inline void vmcb_clr_intercept(struct vmcb_control_area *control, u32 bit)
372 {
373 	WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT);
374 	__clear_bit(bit, (unsigned long *)&control->intercepts);
375 }
376 
377 static inline bool vmcb_is_intercept(struct vmcb_control_area *control, u32 bit)
378 {
379 	WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT);
380 	return test_bit(bit, (unsigned long *)&control->intercepts);
381 }
382 
383 static inline bool vmcb12_is_intercept(struct vmcb_ctrl_area_cached *control, u32 bit)
384 {
385 	WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT);
386 	return test_bit(bit, (unsigned long *)&control->intercepts);
387 }
388 
389 static inline void set_dr_intercepts(struct vcpu_svm *svm)
390 {
391 	struct vmcb *vmcb = svm->vmcb01.ptr;
392 
393 	if (!sev_es_guest(svm->vcpu.kvm)) {
394 		vmcb_set_intercept(&vmcb->control, INTERCEPT_DR0_READ);
395 		vmcb_set_intercept(&vmcb->control, INTERCEPT_DR1_READ);
396 		vmcb_set_intercept(&vmcb->control, INTERCEPT_DR2_READ);
397 		vmcb_set_intercept(&vmcb->control, INTERCEPT_DR3_READ);
398 		vmcb_set_intercept(&vmcb->control, INTERCEPT_DR4_READ);
399 		vmcb_set_intercept(&vmcb->control, INTERCEPT_DR5_READ);
400 		vmcb_set_intercept(&vmcb->control, INTERCEPT_DR6_READ);
401 		vmcb_set_intercept(&vmcb->control, INTERCEPT_DR0_WRITE);
402 		vmcb_set_intercept(&vmcb->control, INTERCEPT_DR1_WRITE);
403 		vmcb_set_intercept(&vmcb->control, INTERCEPT_DR2_WRITE);
404 		vmcb_set_intercept(&vmcb->control, INTERCEPT_DR3_WRITE);
405 		vmcb_set_intercept(&vmcb->control, INTERCEPT_DR4_WRITE);
406 		vmcb_set_intercept(&vmcb->control, INTERCEPT_DR5_WRITE);
407 		vmcb_set_intercept(&vmcb->control, INTERCEPT_DR6_WRITE);
408 	}
409 
410 	vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_READ);
411 	vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_WRITE);
412 
413 	recalc_intercepts(svm);
414 }
415 
416 static inline void clr_dr_intercepts(struct vcpu_svm *svm)
417 {
418 	struct vmcb *vmcb = svm->vmcb01.ptr;
419 
420 	vmcb->control.intercepts[INTERCEPT_DR] = 0;
421 
422 	/* DR7 access must remain intercepted for an SEV-ES guest */
423 	if (sev_es_guest(svm->vcpu.kvm)) {
424 		vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_READ);
425 		vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_WRITE);
426 	}
427 
428 	recalc_intercepts(svm);
429 }
430 
431 static inline void set_exception_intercept(struct vcpu_svm *svm, u32 bit)
432 {
433 	struct vmcb *vmcb = svm->vmcb01.ptr;
434 
435 	WARN_ON_ONCE(bit >= 32);
436 	vmcb_set_intercept(&vmcb->control, INTERCEPT_EXCEPTION_OFFSET + bit);
437 
438 	recalc_intercepts(svm);
439 }
440 
441 static inline void clr_exception_intercept(struct vcpu_svm *svm, u32 bit)
442 {
443 	struct vmcb *vmcb = svm->vmcb01.ptr;
444 
445 	WARN_ON_ONCE(bit >= 32);
446 	vmcb_clr_intercept(&vmcb->control, INTERCEPT_EXCEPTION_OFFSET + bit);
447 
448 	recalc_intercepts(svm);
449 }
450 
451 static inline void svm_set_intercept(struct vcpu_svm *svm, int bit)
452 {
453 	struct vmcb *vmcb = svm->vmcb01.ptr;
454 
455 	vmcb_set_intercept(&vmcb->control, bit);
456 
457 	recalc_intercepts(svm);
458 }
459 
460 static inline void svm_clr_intercept(struct vcpu_svm *svm, int bit)
461 {
462 	struct vmcb *vmcb = svm->vmcb01.ptr;
463 
464 	vmcb_clr_intercept(&vmcb->control, bit);
465 
466 	recalc_intercepts(svm);
467 }
468 
469 static inline bool svm_is_intercept(struct vcpu_svm *svm, int bit)
470 {
471 	return vmcb_is_intercept(&svm->vmcb->control, bit);
472 }
473 
474 static inline bool nested_vgif_enabled(struct vcpu_svm *svm)
475 {
476 	return svm->vgif_enabled && (svm->nested.ctl.int_ctl & V_GIF_ENABLE_MASK);
477 }
478 
479 static inline struct vmcb *get_vgif_vmcb(struct vcpu_svm *svm)
480 {
481 	if (!vgif)
482 		return NULL;
483 
484 	if (is_guest_mode(&svm->vcpu) && !nested_vgif_enabled(svm))
485 		return svm->nested.vmcb02.ptr;
486 	else
487 		return svm->vmcb01.ptr;
488 }
489 
490 static inline void enable_gif(struct vcpu_svm *svm)
491 {
492 	struct vmcb *vmcb = get_vgif_vmcb(svm);
493 
494 	if (vmcb)
495 		vmcb->control.int_ctl |= V_GIF_MASK;
496 	else
497 		svm->vcpu.arch.hflags |= HF_GIF_MASK;
498 }
499 
500 static inline void disable_gif(struct vcpu_svm *svm)
501 {
502 	struct vmcb *vmcb = get_vgif_vmcb(svm);
503 
504 	if (vmcb)
505 		vmcb->control.int_ctl &= ~V_GIF_MASK;
506 	else
507 		svm->vcpu.arch.hflags &= ~HF_GIF_MASK;
508 }
509 
510 static inline bool gif_set(struct vcpu_svm *svm)
511 {
512 	struct vmcb *vmcb = get_vgif_vmcb(svm);
513 
514 	if (vmcb)
515 		return !!(vmcb->control.int_ctl & V_GIF_MASK);
516 	else
517 		return !!(svm->vcpu.arch.hflags & HF_GIF_MASK);
518 }
519 
520 static inline bool nested_npt_enabled(struct vcpu_svm *svm)
521 {
522 	return svm->nested.ctl.nested_ctl & SVM_NESTED_CTL_NP_ENABLE;
523 }
524 
525 static inline bool is_x2apic_msrpm_offset(u32 offset)
526 {
527 	/* 4 msrs per u8, and 4 u8 in u32 */
528 	u32 msr = offset * 16;
529 
530 	return (msr >= APIC_BASE_MSR) &&
531 	       (msr < (APIC_BASE_MSR + 0x100));
532 }
533 
534 /* svm.c */
535 #define MSR_INVALID				0xffffffffU
536 
537 #define DEBUGCTL_RESERVED_BITS (~(0x3fULL))
538 
539 extern bool dump_invalid_vmcb;
540 
541 u32 svm_msrpm_offset(u32 msr);
542 u32 *svm_vcpu_alloc_msrpm(void);
543 void svm_vcpu_init_msrpm(struct kvm_vcpu *vcpu, u32 *msrpm);
544 void svm_vcpu_free_msrpm(u32 *msrpm);
545 void svm_copy_lbrs(struct vmcb *to_vmcb, struct vmcb *from_vmcb);
546 void svm_update_lbrv(struct kvm_vcpu *vcpu);
547 
548 int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer);
549 void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
550 void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
551 void disable_nmi_singlestep(struct vcpu_svm *svm);
552 bool svm_smi_blocked(struct kvm_vcpu *vcpu);
553 bool svm_nmi_blocked(struct kvm_vcpu *vcpu);
554 bool svm_interrupt_blocked(struct kvm_vcpu *vcpu);
555 void svm_set_gif(struct vcpu_svm *svm, bool value);
556 int svm_invoke_exit_handler(struct kvm_vcpu *vcpu, u64 exit_code);
557 void set_msr_interception(struct kvm_vcpu *vcpu, u32 *msrpm, u32 msr,
558 			  int read, int write);
559 void svm_set_x2apic_msr_interception(struct vcpu_svm *svm, bool disable);
560 void svm_complete_interrupt_delivery(struct kvm_vcpu *vcpu, int delivery_mode,
561 				     int trig_mode, int vec);
562 
563 /* nested.c */
564 
565 #define NESTED_EXIT_HOST	0	/* Exit handled on host level */
566 #define NESTED_EXIT_DONE	1	/* Exit caused nested vmexit  */
567 #define NESTED_EXIT_CONTINUE	2	/* Further checks needed      */
568 
569 static inline bool nested_svm_virtualize_tpr(struct kvm_vcpu *vcpu)
570 {
571 	struct vcpu_svm *svm = to_svm(vcpu);
572 
573 	return is_guest_mode(vcpu) && (svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK);
574 }
575 
576 static inline bool nested_exit_on_smi(struct vcpu_svm *svm)
577 {
578 	return vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_SMI);
579 }
580 
581 static inline bool nested_exit_on_intr(struct vcpu_svm *svm)
582 {
583 	return vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_INTR);
584 }
585 
586 static inline bool nested_exit_on_nmi(struct vcpu_svm *svm)
587 {
588 	return vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_NMI);
589 }
590 
591 int enter_svm_guest_mode(struct kvm_vcpu *vcpu,
592 			 u64 vmcb_gpa, struct vmcb *vmcb12, bool from_vmrun);
593 void svm_leave_nested(struct kvm_vcpu *vcpu);
594 void svm_free_nested(struct vcpu_svm *svm);
595 int svm_allocate_nested(struct vcpu_svm *svm);
596 int nested_svm_vmrun(struct kvm_vcpu *vcpu);
597 void svm_copy_vmrun_state(struct vmcb_save_area *to_save,
598 			  struct vmcb_save_area *from_save);
599 void svm_copy_vmloadsave_state(struct vmcb *to_vmcb, struct vmcb *from_vmcb);
600 int nested_svm_vmexit(struct vcpu_svm *svm);
601 
602 static inline int nested_svm_simple_vmexit(struct vcpu_svm *svm, u32 exit_code)
603 {
604 	svm->vmcb->control.exit_code   = exit_code;
605 	svm->vmcb->control.exit_info_1 = 0;
606 	svm->vmcb->control.exit_info_2 = 0;
607 	return nested_svm_vmexit(svm);
608 }
609 
610 int nested_svm_exit_handled(struct vcpu_svm *svm);
611 int nested_svm_check_permissions(struct kvm_vcpu *vcpu);
612 int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
613 			       bool has_error_code, u32 error_code);
614 int nested_svm_exit_special(struct vcpu_svm *svm);
615 void nested_svm_update_tsc_ratio_msr(struct kvm_vcpu *vcpu);
616 void __svm_write_tsc_multiplier(u64 multiplier);
617 void nested_copy_vmcb_control_to_cache(struct vcpu_svm *svm,
618 				       struct vmcb_control_area *control);
619 void nested_copy_vmcb_save_to_cache(struct vcpu_svm *svm,
620 				    struct vmcb_save_area *save);
621 void nested_sync_control_from_vmcb02(struct vcpu_svm *svm);
622 void nested_vmcb02_compute_g_pat(struct vcpu_svm *svm);
623 void svm_switch_vmcb(struct vcpu_svm *svm, struct kvm_vmcb_info *target_vmcb);
624 
625 extern struct kvm_x86_nested_ops svm_nested_ops;
626 
627 /* avic.c */
628 
629 bool avic_hardware_setup(struct kvm_x86_ops *ops);
630 int avic_ga_log_notifier(u32 ga_tag);
631 void avic_vm_destroy(struct kvm *kvm);
632 int avic_vm_init(struct kvm *kvm);
633 void avic_init_vmcb(struct vcpu_svm *svm, struct vmcb *vmcb);
634 int avic_incomplete_ipi_interception(struct kvm_vcpu *vcpu);
635 int avic_unaccelerated_access_interception(struct kvm_vcpu *vcpu);
636 int avic_init_vcpu(struct vcpu_svm *svm);
637 void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
638 void avic_vcpu_put(struct kvm_vcpu *vcpu);
639 void avic_apicv_post_state_restore(struct kvm_vcpu *vcpu);
640 void avic_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu);
641 bool avic_check_apicv_inhibit_reasons(enum kvm_apicv_inhibit reason);
642 int avic_pi_update_irte(struct kvm *kvm, unsigned int host_irq,
643 			uint32_t guest_irq, bool set);
644 void avic_vcpu_blocking(struct kvm_vcpu *vcpu);
645 void avic_vcpu_unblocking(struct kvm_vcpu *vcpu);
646 void avic_ring_doorbell(struct kvm_vcpu *vcpu);
647 unsigned long avic_vcpu_get_apicv_inhibit_reasons(struct kvm_vcpu *vcpu);
648 void avic_set_virtual_apic_mode(struct kvm_vcpu *vcpu);
649 
650 
651 /* sev.c */
652 
653 #define GHCB_VERSION_MAX	1ULL
654 #define GHCB_VERSION_MIN	1ULL
655 
656 
657 extern unsigned int max_sev_asid;
658 
659 void sev_vm_destroy(struct kvm *kvm);
660 int sev_mem_enc_ioctl(struct kvm *kvm, void __user *argp);
661 int sev_mem_enc_register_region(struct kvm *kvm,
662 				struct kvm_enc_region *range);
663 int sev_mem_enc_unregister_region(struct kvm *kvm,
664 				  struct kvm_enc_region *range);
665 int sev_vm_copy_enc_context_from(struct kvm *kvm, unsigned int source_fd);
666 int sev_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd);
667 void sev_guest_memory_reclaimed(struct kvm *kvm);
668 
669 void pre_sev_run(struct vcpu_svm *svm, int cpu);
670 void __init sev_set_cpu_caps(void);
671 void __init sev_hardware_setup(void);
672 void sev_hardware_unsetup(void);
673 int sev_cpu_init(struct svm_cpu_data *sd);
674 void sev_init_vmcb(struct vcpu_svm *svm);
675 void sev_free_vcpu(struct kvm_vcpu *vcpu);
676 int sev_handle_vmgexit(struct kvm_vcpu *vcpu);
677 int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in);
678 void sev_es_vcpu_reset(struct vcpu_svm *svm);
679 void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector);
680 void sev_es_prepare_switch_to_guest(struct sev_es_save_area *hostsa);
681 void sev_es_unmap_ghcb(struct vcpu_svm *svm);
682 
683 /* vmenter.S */
684 
685 void __svm_sev_es_vcpu_run(struct vcpu_svm *svm, bool spec_ctrl_intercepted);
686 void __svm_vcpu_run(struct vcpu_svm *svm, bool spec_ctrl_intercepted);
687 
688 #endif
689