1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Kernel-based Virtual Machine driver for Linux 4 * 5 * AMD SVM support 6 * 7 * Copyright (C) 2006 Qumranet, Inc. 8 * Copyright 2010 Red Hat, Inc. and/or its affiliates. 9 * 10 * Authors: 11 * Yaniv Kamay <yaniv@qumranet.com> 12 * Avi Kivity <avi@qumranet.com> 13 */ 14 15 #ifndef __SVM_SVM_H 16 #define __SVM_SVM_H 17 18 #include <linux/kvm_types.h> 19 #include <linux/kvm_host.h> 20 #include <linux/bits.h> 21 22 #include <asm/svm.h> 23 #include <asm/sev-common.h> 24 25 #define __sme_page_pa(x) __sme_set(page_to_pfn(x) << PAGE_SHIFT) 26 27 #define IOPM_SIZE PAGE_SIZE * 3 28 #define MSRPM_SIZE PAGE_SIZE * 2 29 30 #define MAX_DIRECT_ACCESS_MSRS 20 31 #define MSRPM_OFFSETS 16 32 extern u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly; 33 extern bool npt_enabled; 34 extern bool intercept_smi; 35 extern bool pmu; 36 37 /* 38 * Clean bits in VMCB. 39 * VMCB_ALL_CLEAN_MASK might also need to 40 * be updated if this enum is modified. 41 */ 42 enum { 43 VMCB_INTERCEPTS, /* Intercept vectors, TSC offset, 44 pause filter count */ 45 VMCB_PERM_MAP, /* IOPM Base and MSRPM Base */ 46 VMCB_ASID, /* ASID */ 47 VMCB_INTR, /* int_ctl, int_vector */ 48 VMCB_NPT, /* npt_en, nCR3, gPAT */ 49 VMCB_CR, /* CR0, CR3, CR4, EFER */ 50 VMCB_DR, /* DR6, DR7 */ 51 VMCB_DT, /* GDT, IDT */ 52 VMCB_SEG, /* CS, DS, SS, ES, CPL */ 53 VMCB_CR2, /* CR2 only */ 54 VMCB_LBR, /* DBGCTL, BR_FROM, BR_TO, LAST_EX_FROM, LAST_EX_TO */ 55 VMCB_AVIC, /* AVIC APIC_BAR, AVIC APIC_BACKING_PAGE, 56 * AVIC PHYSICAL_TABLE pointer, 57 * AVIC LOGICAL_TABLE pointer 58 */ 59 VMCB_SW = 31, /* Reserved for hypervisor/software use */ 60 }; 61 62 #define VMCB_ALL_CLEAN_MASK ( \ 63 (1U << VMCB_INTERCEPTS) | (1U << VMCB_PERM_MAP) | \ 64 (1U << VMCB_ASID) | (1U << VMCB_INTR) | \ 65 (1U << VMCB_NPT) | (1U << VMCB_CR) | (1U << VMCB_DR) | \ 66 (1U << VMCB_DT) | (1U << VMCB_SEG) | (1U << VMCB_CR2) | \ 67 (1U << VMCB_LBR) | (1U << VMCB_AVIC) | \ 68 (1U << VMCB_SW)) 69 70 /* TPR and CR2 are always written before VMRUN */ 71 #define VMCB_ALWAYS_DIRTY_MASK ((1U << VMCB_INTR) | (1U << VMCB_CR2)) 72 73 struct kvm_sev_info { 74 bool active; /* SEV enabled guest */ 75 bool es_active; /* SEV-ES enabled guest */ 76 unsigned int asid; /* ASID used for this guest */ 77 unsigned int handle; /* SEV firmware handle */ 78 int fd; /* SEV device fd */ 79 unsigned long pages_locked; /* Number of pages locked */ 80 struct list_head regions_list; /* List of registered regions */ 81 u64 ap_jump_table; /* SEV-ES AP Jump Table address */ 82 struct kvm *enc_context_owner; /* Owner of copied encryption context */ 83 unsigned long num_mirrored_vms; /* Number of VMs sharing this ASID */ 84 struct misc_cg *misc_cg; /* For misc cgroup accounting */ 85 atomic_t migration_in_progress; 86 }; 87 88 struct kvm_svm { 89 struct kvm kvm; 90 91 /* Struct members for AVIC */ 92 u32 avic_vm_id; 93 struct page *avic_logical_id_table_page; 94 struct page *avic_physical_id_table_page; 95 struct hlist_node hnode; 96 97 struct kvm_sev_info sev_info; 98 }; 99 100 struct kvm_vcpu; 101 102 struct kvm_vmcb_info { 103 struct vmcb *ptr; 104 unsigned long pa; 105 int cpu; 106 uint64_t asid_generation; 107 }; 108 109 struct vmcb_save_area_cached { 110 u64 efer; 111 u64 cr4; 112 u64 cr3; 113 u64 cr0; 114 u64 dr7; 115 u64 dr6; 116 }; 117 118 struct vmcb_ctrl_area_cached { 119 u32 intercepts[MAX_INTERCEPT]; 120 u16 pause_filter_thresh; 121 u16 pause_filter_count; 122 u64 iopm_base_pa; 123 u64 msrpm_base_pa; 124 u64 tsc_offset; 125 u32 asid; 126 u8 tlb_ctl; 127 u32 int_ctl; 128 u32 int_vector; 129 u32 int_state; 130 u32 exit_code; 131 u32 exit_code_hi; 132 u64 exit_info_1; 133 u64 exit_info_2; 134 u32 exit_int_info; 135 u32 exit_int_info_err; 136 u64 nested_ctl; 137 u32 event_inj; 138 u32 event_inj_err; 139 u64 nested_cr3; 140 u64 virt_ext; 141 }; 142 143 struct svm_nested_state { 144 struct kvm_vmcb_info vmcb02; 145 u64 hsave_msr; 146 u64 vm_cr_msr; 147 u64 vmcb12_gpa; 148 u64 last_vmcb12_gpa; 149 150 /* These are the merged vectors */ 151 u32 *msrpm; 152 153 /* A VMRUN has started but has not yet been performed, so 154 * we cannot inject a nested vmexit yet. */ 155 bool nested_run_pending; 156 157 /* cache for control fields of the guest */ 158 struct vmcb_ctrl_area_cached ctl; 159 160 /* 161 * Note: this struct is not kept up-to-date while L2 runs; it is only 162 * valid within nested_svm_vmrun. 163 */ 164 struct vmcb_save_area_cached save; 165 166 bool initialized; 167 }; 168 169 struct vcpu_sev_es_state { 170 /* SEV-ES support */ 171 struct vmcb_save_area *vmsa; 172 struct ghcb *ghcb; 173 struct kvm_host_map ghcb_map; 174 bool received_first_sipi; 175 176 /* SEV-ES scratch area support */ 177 void *ghcb_sa; 178 u32 ghcb_sa_len; 179 bool ghcb_sa_sync; 180 bool ghcb_sa_free; 181 }; 182 183 struct vcpu_svm { 184 struct kvm_vcpu vcpu; 185 /* vmcb always points at current_vmcb->ptr, it's purely a shorthand. */ 186 struct vmcb *vmcb; 187 struct kvm_vmcb_info vmcb01; 188 struct kvm_vmcb_info *current_vmcb; 189 struct svm_cpu_data *svm_data; 190 u32 asid; 191 u32 sysenter_esp_hi; 192 u32 sysenter_eip_hi; 193 uint64_t tsc_aux; 194 195 u64 msr_decfg; 196 197 u64 next_rip; 198 199 u64 spec_ctrl; 200 201 u64 tsc_ratio_msr; 202 /* 203 * Contains guest-controlled bits of VIRT_SPEC_CTRL, which will be 204 * translated into the appropriate L2_CFG bits on the host to 205 * perform speculative control. 206 */ 207 u64 virt_spec_ctrl; 208 209 u32 *msrpm; 210 211 ulong nmi_iret_rip; 212 213 struct svm_nested_state nested; 214 215 bool nmi_singlestep; 216 u64 nmi_singlestep_guest_rflags; 217 218 unsigned int3_injected; 219 unsigned long int3_rip; 220 221 /* cached guest cpuid flags for faster access */ 222 bool nrips_enabled : 1; 223 bool tsc_scaling_enabled : 1; 224 225 u32 ldr_reg; 226 u32 dfr_reg; 227 struct page *avic_backing_page; 228 u64 *avic_physical_id_cache; 229 bool avic_is_running; 230 231 /* 232 * Per-vcpu list of struct amd_svm_iommu_ir: 233 * This is used mainly to store interrupt remapping information used 234 * when update the vcpu affinity. This avoids the need to scan for 235 * IRTE and try to match ga_tag in the IOMMU driver. 236 */ 237 struct list_head ir_list; 238 spinlock_t ir_list_lock; 239 240 /* Save desired MSR intercept (read: pass-through) state */ 241 struct { 242 DECLARE_BITMAP(read, MAX_DIRECT_ACCESS_MSRS); 243 DECLARE_BITMAP(write, MAX_DIRECT_ACCESS_MSRS); 244 } shadow_msr_intercept; 245 246 struct vcpu_sev_es_state sev_es; 247 248 bool guest_state_loaded; 249 }; 250 251 struct svm_cpu_data { 252 int cpu; 253 254 u64 asid_generation; 255 u32 max_asid; 256 u32 next_asid; 257 u32 min_asid; 258 struct kvm_ldttss_desc *tss_desc; 259 260 struct page *save_area; 261 struct vmcb *current_vmcb; 262 263 /* index = sev_asid, value = vmcb pointer */ 264 struct vmcb **sev_vmcbs; 265 }; 266 267 DECLARE_PER_CPU(struct svm_cpu_data *, svm_data); 268 269 void recalc_intercepts(struct vcpu_svm *svm); 270 271 static __always_inline struct kvm_svm *to_kvm_svm(struct kvm *kvm) 272 { 273 return container_of(kvm, struct kvm_svm, kvm); 274 } 275 276 static __always_inline bool sev_guest(struct kvm *kvm) 277 { 278 #ifdef CONFIG_KVM_AMD_SEV 279 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; 280 281 return sev->active; 282 #else 283 return false; 284 #endif 285 } 286 287 static __always_inline bool sev_es_guest(struct kvm *kvm) 288 { 289 #ifdef CONFIG_KVM_AMD_SEV 290 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; 291 292 return sev->es_active && !WARN_ON_ONCE(!sev->active); 293 #else 294 return false; 295 #endif 296 } 297 298 static inline void vmcb_mark_all_dirty(struct vmcb *vmcb) 299 { 300 vmcb->control.clean = 0; 301 } 302 303 static inline void vmcb_mark_all_clean(struct vmcb *vmcb) 304 { 305 vmcb->control.clean = VMCB_ALL_CLEAN_MASK 306 & ~VMCB_ALWAYS_DIRTY_MASK; 307 } 308 309 static inline bool vmcb_is_clean(struct vmcb *vmcb, int bit) 310 { 311 return (vmcb->control.clean & (1 << bit)); 312 } 313 314 static inline void vmcb_mark_dirty(struct vmcb *vmcb, int bit) 315 { 316 vmcb->control.clean &= ~(1 << bit); 317 } 318 319 static inline bool vmcb_is_dirty(struct vmcb *vmcb, int bit) 320 { 321 return !test_bit(bit, (unsigned long *)&vmcb->control.clean); 322 } 323 324 static __always_inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu) 325 { 326 return container_of(vcpu, struct vcpu_svm, vcpu); 327 } 328 329 /* 330 * Only the PDPTRs are loaded on demand into the shadow MMU. All other 331 * fields are synchronized in handle_exit, because accessing the VMCB is cheap. 332 * 333 * CR3 might be out of date in the VMCB but it is not marked dirty; instead, 334 * KVM_REQ_LOAD_MMU_PGD is always requested when the cached vcpu->arch.cr3 335 * is changed. svm_load_mmu_pgd() then syncs the new CR3 value into the VMCB. 336 */ 337 #define SVM_REGS_LAZY_LOAD_SET (1 << VCPU_EXREG_PDPTR) 338 339 static inline void vmcb_set_intercept(struct vmcb_control_area *control, u32 bit) 340 { 341 WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT); 342 __set_bit(bit, (unsigned long *)&control->intercepts); 343 } 344 345 static inline void vmcb_clr_intercept(struct vmcb_control_area *control, u32 bit) 346 { 347 WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT); 348 __clear_bit(bit, (unsigned long *)&control->intercepts); 349 } 350 351 static inline bool vmcb_is_intercept(struct vmcb_control_area *control, u32 bit) 352 { 353 WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT); 354 return test_bit(bit, (unsigned long *)&control->intercepts); 355 } 356 357 static inline bool vmcb12_is_intercept(struct vmcb_ctrl_area_cached *control, u32 bit) 358 { 359 WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT); 360 return test_bit(bit, (unsigned long *)&control->intercepts); 361 } 362 363 static inline void set_dr_intercepts(struct vcpu_svm *svm) 364 { 365 struct vmcb *vmcb = svm->vmcb01.ptr; 366 367 if (!sev_es_guest(svm->vcpu.kvm)) { 368 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR0_READ); 369 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR1_READ); 370 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR2_READ); 371 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR3_READ); 372 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR4_READ); 373 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR5_READ); 374 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR6_READ); 375 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR0_WRITE); 376 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR1_WRITE); 377 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR2_WRITE); 378 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR3_WRITE); 379 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR4_WRITE); 380 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR5_WRITE); 381 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR6_WRITE); 382 } 383 384 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_READ); 385 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_WRITE); 386 387 recalc_intercepts(svm); 388 } 389 390 static inline void clr_dr_intercepts(struct vcpu_svm *svm) 391 { 392 struct vmcb *vmcb = svm->vmcb01.ptr; 393 394 vmcb->control.intercepts[INTERCEPT_DR] = 0; 395 396 /* DR7 access must remain intercepted for an SEV-ES guest */ 397 if (sev_es_guest(svm->vcpu.kvm)) { 398 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_READ); 399 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_WRITE); 400 } 401 402 recalc_intercepts(svm); 403 } 404 405 static inline void set_exception_intercept(struct vcpu_svm *svm, u32 bit) 406 { 407 struct vmcb *vmcb = svm->vmcb01.ptr; 408 409 WARN_ON_ONCE(bit >= 32); 410 vmcb_set_intercept(&vmcb->control, INTERCEPT_EXCEPTION_OFFSET + bit); 411 412 recalc_intercepts(svm); 413 } 414 415 static inline void clr_exception_intercept(struct vcpu_svm *svm, u32 bit) 416 { 417 struct vmcb *vmcb = svm->vmcb01.ptr; 418 419 WARN_ON_ONCE(bit >= 32); 420 vmcb_clr_intercept(&vmcb->control, INTERCEPT_EXCEPTION_OFFSET + bit); 421 422 recalc_intercepts(svm); 423 } 424 425 static inline void svm_set_intercept(struct vcpu_svm *svm, int bit) 426 { 427 struct vmcb *vmcb = svm->vmcb01.ptr; 428 429 vmcb_set_intercept(&vmcb->control, bit); 430 431 recalc_intercepts(svm); 432 } 433 434 static inline void svm_clr_intercept(struct vcpu_svm *svm, int bit) 435 { 436 struct vmcb *vmcb = svm->vmcb01.ptr; 437 438 vmcb_clr_intercept(&vmcb->control, bit); 439 440 recalc_intercepts(svm); 441 } 442 443 static inline bool svm_is_intercept(struct vcpu_svm *svm, int bit) 444 { 445 return vmcb_is_intercept(&svm->vmcb->control, bit); 446 } 447 448 static inline bool vgif_enabled(struct vcpu_svm *svm) 449 { 450 return !!(svm->vmcb->control.int_ctl & V_GIF_ENABLE_MASK); 451 } 452 453 static inline void enable_gif(struct vcpu_svm *svm) 454 { 455 if (vgif_enabled(svm)) 456 svm->vmcb->control.int_ctl |= V_GIF_MASK; 457 else 458 svm->vcpu.arch.hflags |= HF_GIF_MASK; 459 } 460 461 static inline void disable_gif(struct vcpu_svm *svm) 462 { 463 if (vgif_enabled(svm)) 464 svm->vmcb->control.int_ctl &= ~V_GIF_MASK; 465 else 466 svm->vcpu.arch.hflags &= ~HF_GIF_MASK; 467 } 468 469 static inline bool gif_set(struct vcpu_svm *svm) 470 { 471 if (vgif_enabled(svm)) 472 return !!(svm->vmcb->control.int_ctl & V_GIF_MASK); 473 else 474 return !!(svm->vcpu.arch.hflags & HF_GIF_MASK); 475 } 476 477 /* svm.c */ 478 #define MSR_INVALID 0xffffffffU 479 480 extern bool dump_invalid_vmcb; 481 482 u32 svm_msrpm_offset(u32 msr); 483 u32 *svm_vcpu_alloc_msrpm(void); 484 void svm_vcpu_init_msrpm(struct kvm_vcpu *vcpu, u32 *msrpm); 485 void svm_vcpu_free_msrpm(u32 *msrpm); 486 487 int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer); 488 void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0); 489 void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4); 490 void svm_flush_tlb(struct kvm_vcpu *vcpu); 491 void disable_nmi_singlestep(struct vcpu_svm *svm); 492 bool svm_smi_blocked(struct kvm_vcpu *vcpu); 493 bool svm_nmi_blocked(struct kvm_vcpu *vcpu); 494 bool svm_interrupt_blocked(struct kvm_vcpu *vcpu); 495 void svm_set_gif(struct vcpu_svm *svm, bool value); 496 int svm_invoke_exit_handler(struct kvm_vcpu *vcpu, u64 exit_code); 497 void set_msr_interception(struct kvm_vcpu *vcpu, u32 *msrpm, u32 msr, 498 int read, int write); 499 500 /* nested.c */ 501 502 #define NESTED_EXIT_HOST 0 /* Exit handled on host level */ 503 #define NESTED_EXIT_DONE 1 /* Exit caused nested vmexit */ 504 #define NESTED_EXIT_CONTINUE 2 /* Further checks needed */ 505 506 static inline bool nested_svm_virtualize_tpr(struct kvm_vcpu *vcpu) 507 { 508 struct vcpu_svm *svm = to_svm(vcpu); 509 510 return is_guest_mode(vcpu) && (svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK); 511 } 512 513 static inline bool nested_exit_on_smi(struct vcpu_svm *svm) 514 { 515 return vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_SMI); 516 } 517 518 static inline bool nested_exit_on_intr(struct vcpu_svm *svm) 519 { 520 return vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_INTR); 521 } 522 523 static inline bool nested_exit_on_nmi(struct vcpu_svm *svm) 524 { 525 return vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_NMI); 526 } 527 528 int enter_svm_guest_mode(struct kvm_vcpu *vcpu, 529 u64 vmcb_gpa, struct vmcb *vmcb12, bool from_vmrun); 530 void svm_leave_nested(struct vcpu_svm *svm); 531 void svm_free_nested(struct vcpu_svm *svm); 532 int svm_allocate_nested(struct vcpu_svm *svm); 533 int nested_svm_vmrun(struct kvm_vcpu *vcpu); 534 void svm_copy_vmrun_state(struct vmcb_save_area *to_save, 535 struct vmcb_save_area *from_save); 536 void svm_copy_vmloadsave_state(struct vmcb *to_vmcb, struct vmcb *from_vmcb); 537 int nested_svm_vmexit(struct vcpu_svm *svm); 538 539 static inline int nested_svm_simple_vmexit(struct vcpu_svm *svm, u32 exit_code) 540 { 541 svm->vmcb->control.exit_code = exit_code; 542 svm->vmcb->control.exit_info_1 = 0; 543 svm->vmcb->control.exit_info_2 = 0; 544 return nested_svm_vmexit(svm); 545 } 546 547 int nested_svm_exit_handled(struct vcpu_svm *svm); 548 int nested_svm_check_permissions(struct kvm_vcpu *vcpu); 549 int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr, 550 bool has_error_code, u32 error_code); 551 int nested_svm_exit_special(struct vcpu_svm *svm); 552 void nested_svm_update_tsc_ratio_msr(struct kvm_vcpu *vcpu); 553 void svm_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 multiplier); 554 void nested_copy_vmcb_control_to_cache(struct vcpu_svm *svm, 555 struct vmcb_control_area *control); 556 void nested_copy_vmcb_save_to_cache(struct vcpu_svm *svm, 557 struct vmcb_save_area *save); 558 void nested_sync_control_from_vmcb02(struct vcpu_svm *svm); 559 void nested_vmcb02_compute_g_pat(struct vcpu_svm *svm); 560 void svm_switch_vmcb(struct vcpu_svm *svm, struct kvm_vmcb_info *target_vmcb); 561 562 extern struct kvm_x86_nested_ops svm_nested_ops; 563 564 /* avic.c */ 565 566 #define AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK (0xFF) 567 #define AVIC_LOGICAL_ID_ENTRY_VALID_BIT 31 568 #define AVIC_LOGICAL_ID_ENTRY_VALID_MASK (1 << 31) 569 570 #define AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK (0xFFULL) 571 #define AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK (0xFFFFFFFFFFULL << 12) 572 #define AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK (1ULL << 62) 573 #define AVIC_PHYSICAL_ID_ENTRY_VALID_MASK (1ULL << 63) 574 575 #define VMCB_AVIC_APIC_BAR_MASK 0xFFFFFFFFFF000ULL 576 577 static inline bool avic_vcpu_is_running(struct kvm_vcpu *vcpu) 578 { 579 struct vcpu_svm *svm = to_svm(vcpu); 580 u64 *entry = svm->avic_physical_id_cache; 581 582 if (!entry) 583 return false; 584 585 return (READ_ONCE(*entry) & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK); 586 } 587 588 int avic_ga_log_notifier(u32 ga_tag); 589 void avic_vm_destroy(struct kvm *kvm); 590 int avic_vm_init(struct kvm *kvm); 591 void avic_init_vmcb(struct vcpu_svm *svm); 592 int avic_incomplete_ipi_interception(struct kvm_vcpu *vcpu); 593 int avic_unaccelerated_access_interception(struct kvm_vcpu *vcpu); 594 int avic_init_vcpu(struct vcpu_svm *svm); 595 void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu); 596 void avic_vcpu_put(struct kvm_vcpu *vcpu); 597 void avic_post_state_restore(struct kvm_vcpu *vcpu); 598 void svm_set_virtual_apic_mode(struct kvm_vcpu *vcpu); 599 void svm_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu); 600 bool svm_check_apicv_inhibit_reasons(ulong bit); 601 void svm_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap); 602 void svm_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr); 603 void svm_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr); 604 int svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec); 605 bool svm_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu); 606 int svm_update_pi_irte(struct kvm *kvm, unsigned int host_irq, 607 uint32_t guest_irq, bool set); 608 void svm_vcpu_blocking(struct kvm_vcpu *vcpu); 609 void svm_vcpu_unblocking(struct kvm_vcpu *vcpu); 610 611 /* sev.c */ 612 613 #define GHCB_VERSION_MAX 1ULL 614 #define GHCB_VERSION_MIN 1ULL 615 616 617 extern unsigned int max_sev_asid; 618 619 void sev_vm_destroy(struct kvm *kvm); 620 int svm_mem_enc_op(struct kvm *kvm, void __user *argp); 621 int svm_register_enc_region(struct kvm *kvm, 622 struct kvm_enc_region *range); 623 int svm_unregister_enc_region(struct kvm *kvm, 624 struct kvm_enc_region *range); 625 int svm_vm_copy_asid_from(struct kvm *kvm, unsigned int source_fd); 626 int svm_vm_migrate_from(struct kvm *kvm, unsigned int source_fd); 627 void pre_sev_run(struct vcpu_svm *svm, int cpu); 628 void __init sev_set_cpu_caps(void); 629 void __init sev_hardware_setup(void); 630 void sev_hardware_teardown(void); 631 int sev_cpu_init(struct svm_cpu_data *sd); 632 void sev_free_vcpu(struct kvm_vcpu *vcpu); 633 int sev_handle_vmgexit(struct kvm_vcpu *vcpu); 634 int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in); 635 void sev_es_init_vmcb(struct vcpu_svm *svm); 636 void sev_es_vcpu_reset(struct vcpu_svm *svm); 637 void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector); 638 void sev_es_prepare_guest_switch(struct vcpu_svm *svm, unsigned int cpu); 639 void sev_es_unmap_ghcb(struct vcpu_svm *svm); 640 641 /* vmenter.S */ 642 643 void __svm_sev_es_vcpu_run(unsigned long vmcb_pa); 644 void __svm_vcpu_run(unsigned long vmcb_pa, unsigned long *regs); 645 646 #endif 647