1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Kernel-based Virtual Machine driver for Linux 4 * 5 * AMD SVM support 6 * 7 * Copyright (C) 2006 Qumranet, Inc. 8 * Copyright 2010 Red Hat, Inc. and/or its affiliates. 9 * 10 * Authors: 11 * Yaniv Kamay <yaniv@qumranet.com> 12 * Avi Kivity <avi@qumranet.com> 13 */ 14 15 #ifndef __SVM_SVM_H 16 #define __SVM_SVM_H 17 18 #include <linux/kvm_types.h> 19 #include <linux/kvm_host.h> 20 #include <linux/bits.h> 21 22 #include <asm/svm.h> 23 #include <asm/sev-common.h> 24 25 #include "cpuid.h" 26 #include "kvm_cache_regs.h" 27 28 #define __sme_page_pa(x) __sme_set(page_to_pfn(x) << PAGE_SHIFT) 29 30 #define IOPM_SIZE PAGE_SIZE * 3 31 #define MSRPM_SIZE PAGE_SIZE * 2 32 33 #define MAX_DIRECT_ACCESS_MSRS 48 34 #define MSRPM_OFFSETS 32 35 extern u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly; 36 extern bool npt_enabled; 37 extern int nrips; 38 extern int vgif; 39 extern bool intercept_smi; 40 extern bool x2avic_enabled; 41 extern bool vnmi; 42 extern int lbrv; 43 44 /* 45 * Clean bits in VMCB. 46 * VMCB_ALL_CLEAN_MASK might also need to 47 * be updated if this enum is modified. 48 */ 49 enum { 50 VMCB_INTERCEPTS, /* Intercept vectors, TSC offset, 51 pause filter count */ 52 VMCB_PERM_MAP, /* IOPM Base and MSRPM Base */ 53 VMCB_ASID, /* ASID */ 54 VMCB_INTR, /* int_ctl, int_vector */ 55 VMCB_NPT, /* npt_en, nCR3, gPAT */ 56 VMCB_CR, /* CR0, CR3, CR4, EFER */ 57 VMCB_DR, /* DR6, DR7 */ 58 VMCB_DT, /* GDT, IDT */ 59 VMCB_SEG, /* CS, DS, SS, ES, CPL */ 60 VMCB_CR2, /* CR2 only */ 61 VMCB_LBR, /* DBGCTL, BR_FROM, BR_TO, LAST_EX_FROM, LAST_EX_TO */ 62 VMCB_AVIC, /* AVIC APIC_BAR, AVIC APIC_BACKING_PAGE, 63 * AVIC PHYSICAL_TABLE pointer, 64 * AVIC LOGICAL_TABLE pointer 65 */ 66 VMCB_SW = 31, /* Reserved for hypervisor/software use */ 67 }; 68 69 #define VMCB_ALL_CLEAN_MASK ( \ 70 (1U << VMCB_INTERCEPTS) | (1U << VMCB_PERM_MAP) | \ 71 (1U << VMCB_ASID) | (1U << VMCB_INTR) | \ 72 (1U << VMCB_NPT) | (1U << VMCB_CR) | (1U << VMCB_DR) | \ 73 (1U << VMCB_DT) | (1U << VMCB_SEG) | (1U << VMCB_CR2) | \ 74 (1U << VMCB_LBR) | (1U << VMCB_AVIC) | \ 75 (1U << VMCB_SW)) 76 77 /* TPR and CR2 are always written before VMRUN */ 78 #define VMCB_ALWAYS_DIRTY_MASK ((1U << VMCB_INTR) | (1U << VMCB_CR2)) 79 80 struct kvm_sev_info { 81 bool active; /* SEV enabled guest */ 82 bool es_active; /* SEV-ES enabled guest */ 83 unsigned int asid; /* ASID used for this guest */ 84 unsigned int handle; /* SEV firmware handle */ 85 int fd; /* SEV device fd */ 86 unsigned long pages_locked; /* Number of pages locked */ 87 struct list_head regions_list; /* List of registered regions */ 88 u64 ap_jump_table; /* SEV-ES AP Jump Table address */ 89 struct kvm *enc_context_owner; /* Owner of copied encryption context */ 90 struct list_head mirror_vms; /* List of VMs mirroring */ 91 struct list_head mirror_entry; /* Use as a list entry of mirrors */ 92 struct misc_cg *misc_cg; /* For misc cgroup accounting */ 93 atomic_t migration_in_progress; 94 }; 95 96 struct kvm_svm { 97 struct kvm kvm; 98 99 /* Struct members for AVIC */ 100 u32 avic_vm_id; 101 struct page *avic_logical_id_table_page; 102 struct page *avic_physical_id_table_page; 103 struct hlist_node hnode; 104 105 struct kvm_sev_info sev_info; 106 }; 107 108 struct kvm_vcpu; 109 110 struct kvm_vmcb_info { 111 struct vmcb *ptr; 112 unsigned long pa; 113 int cpu; 114 uint64_t asid_generation; 115 }; 116 117 struct vmcb_save_area_cached { 118 u64 efer; 119 u64 cr4; 120 u64 cr3; 121 u64 cr0; 122 u64 dr7; 123 u64 dr6; 124 }; 125 126 struct vmcb_ctrl_area_cached { 127 u32 intercepts[MAX_INTERCEPT]; 128 u16 pause_filter_thresh; 129 u16 pause_filter_count; 130 u64 iopm_base_pa; 131 u64 msrpm_base_pa; 132 u64 tsc_offset; 133 u32 asid; 134 u8 tlb_ctl; 135 u32 int_ctl; 136 u32 int_vector; 137 u32 int_state; 138 u32 exit_code; 139 u32 exit_code_hi; 140 u64 exit_info_1; 141 u64 exit_info_2; 142 u32 exit_int_info; 143 u32 exit_int_info_err; 144 u64 nested_ctl; 145 u32 event_inj; 146 u32 event_inj_err; 147 u64 next_rip; 148 u64 nested_cr3; 149 u64 virt_ext; 150 u32 clean; 151 union { 152 struct hv_vmcb_enlightenments hv_enlightenments; 153 u8 reserved_sw[32]; 154 }; 155 }; 156 157 struct svm_nested_state { 158 struct kvm_vmcb_info vmcb02; 159 u64 hsave_msr; 160 u64 vm_cr_msr; 161 u64 vmcb12_gpa; 162 u64 last_vmcb12_gpa; 163 164 /* These are the merged vectors */ 165 u32 *msrpm; 166 167 /* A VMRUN has started but has not yet been performed, so 168 * we cannot inject a nested vmexit yet. */ 169 bool nested_run_pending; 170 171 /* cache for control fields of the guest */ 172 struct vmcb_ctrl_area_cached ctl; 173 174 /* 175 * Note: this struct is not kept up-to-date while L2 runs; it is only 176 * valid within nested_svm_vmrun. 177 */ 178 struct vmcb_save_area_cached save; 179 180 bool initialized; 181 182 /* 183 * Indicates whether MSR bitmap for L2 needs to be rebuilt due to 184 * changes in MSR bitmap for L1 or switching to a different L2. Note, 185 * this flag can only be used reliably in conjunction with a paravirt L1 186 * which informs L0 whether any changes to MSR bitmap for L2 were done 187 * on its side. 188 */ 189 bool force_msr_bitmap_recalc; 190 }; 191 192 struct vcpu_sev_es_state { 193 /* SEV-ES support */ 194 struct sev_es_save_area *vmsa; 195 struct ghcb *ghcb; 196 u8 valid_bitmap[16]; 197 struct kvm_host_map ghcb_map; 198 bool received_first_sipi; 199 200 /* SEV-ES scratch area support */ 201 u64 sw_scratch; 202 void *ghcb_sa; 203 u32 ghcb_sa_len; 204 bool ghcb_sa_sync; 205 bool ghcb_sa_free; 206 }; 207 208 struct vcpu_svm { 209 struct kvm_vcpu vcpu; 210 /* vmcb always points at current_vmcb->ptr, it's purely a shorthand. */ 211 struct vmcb *vmcb; 212 struct kvm_vmcb_info vmcb01; 213 struct kvm_vmcb_info *current_vmcb; 214 u32 asid; 215 u32 sysenter_esp_hi; 216 u32 sysenter_eip_hi; 217 uint64_t tsc_aux; 218 219 u64 msr_decfg; 220 221 u64 next_rip; 222 223 u64 spec_ctrl; 224 225 u64 tsc_ratio_msr; 226 /* 227 * Contains guest-controlled bits of VIRT_SPEC_CTRL, which will be 228 * translated into the appropriate L2_CFG bits on the host to 229 * perform speculative control. 230 */ 231 u64 virt_spec_ctrl; 232 233 u32 *msrpm; 234 235 ulong nmi_iret_rip; 236 237 struct svm_nested_state nested; 238 239 /* NMI mask value, used when vNMI is not enabled */ 240 bool nmi_masked; 241 242 /* 243 * True when NMIs are still masked but guest IRET was just intercepted 244 * and KVM is waiting for RIP to change, which will signal that the 245 * intercepted IRET was retired and thus NMI can be unmasked. 246 */ 247 bool awaiting_iret_completion; 248 249 /* 250 * Set when KVM is awaiting IRET completion and needs to inject NMIs as 251 * soon as the IRET completes (e.g. NMI is pending injection). KVM 252 * temporarily steals RFLAGS.TF to single-step the guest in this case 253 * in order to regain control as soon as the NMI-blocking condition 254 * goes away. 255 */ 256 bool nmi_singlestep; 257 u64 nmi_singlestep_guest_rflags; 258 259 bool nmi_l1_to_l2; 260 261 unsigned long soft_int_csbase; 262 unsigned long soft_int_old_rip; 263 unsigned long soft_int_next_rip; 264 bool soft_int_injected; 265 266 u32 ldr_reg; 267 u32 dfr_reg; 268 struct page *avic_backing_page; 269 u64 *avic_physical_id_cache; 270 271 /* 272 * Per-vcpu list of struct amd_svm_iommu_ir: 273 * This is used mainly to store interrupt remapping information used 274 * when update the vcpu affinity. This avoids the need to scan for 275 * IRTE and try to match ga_tag in the IOMMU driver. 276 */ 277 struct list_head ir_list; 278 spinlock_t ir_list_lock; 279 280 /* Save desired MSR intercept (read: pass-through) state */ 281 struct { 282 DECLARE_BITMAP(read, MAX_DIRECT_ACCESS_MSRS); 283 DECLARE_BITMAP(write, MAX_DIRECT_ACCESS_MSRS); 284 } shadow_msr_intercept; 285 286 struct vcpu_sev_es_state sev_es; 287 288 bool guest_state_loaded; 289 290 bool x2avic_msrs_intercepted; 291 292 /* Guest GIF value, used when vGIF is not enabled */ 293 bool guest_gif; 294 }; 295 296 struct svm_cpu_data { 297 u64 asid_generation; 298 u32 max_asid; 299 u32 next_asid; 300 u32 min_asid; 301 302 struct page *save_area; 303 unsigned long save_area_pa; 304 305 struct vmcb *current_vmcb; 306 307 /* index = sev_asid, value = vmcb pointer */ 308 struct vmcb **sev_vmcbs; 309 }; 310 311 DECLARE_PER_CPU(struct svm_cpu_data, svm_data); 312 313 void recalc_intercepts(struct vcpu_svm *svm); 314 315 static __always_inline struct kvm_svm *to_kvm_svm(struct kvm *kvm) 316 { 317 return container_of(kvm, struct kvm_svm, kvm); 318 } 319 320 static __always_inline bool sev_guest(struct kvm *kvm) 321 { 322 #ifdef CONFIG_KVM_AMD_SEV 323 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; 324 325 return sev->active; 326 #else 327 return false; 328 #endif 329 } 330 331 static __always_inline bool sev_es_guest(struct kvm *kvm) 332 { 333 #ifdef CONFIG_KVM_AMD_SEV 334 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; 335 336 return sev->es_active && !WARN_ON_ONCE(!sev->active); 337 #else 338 return false; 339 #endif 340 } 341 342 static inline void vmcb_mark_all_dirty(struct vmcb *vmcb) 343 { 344 vmcb->control.clean = 0; 345 } 346 347 static inline void vmcb_mark_all_clean(struct vmcb *vmcb) 348 { 349 vmcb->control.clean = VMCB_ALL_CLEAN_MASK 350 & ~VMCB_ALWAYS_DIRTY_MASK; 351 } 352 353 static inline void vmcb_mark_dirty(struct vmcb *vmcb, int bit) 354 { 355 vmcb->control.clean &= ~(1 << bit); 356 } 357 358 static inline bool vmcb_is_dirty(struct vmcb *vmcb, int bit) 359 { 360 return !test_bit(bit, (unsigned long *)&vmcb->control.clean); 361 } 362 363 static __always_inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu) 364 { 365 return container_of(vcpu, struct vcpu_svm, vcpu); 366 } 367 368 /* 369 * Only the PDPTRs are loaded on demand into the shadow MMU. All other 370 * fields are synchronized on VM-Exit, because accessing the VMCB is cheap. 371 * 372 * CR3 might be out of date in the VMCB but it is not marked dirty; instead, 373 * KVM_REQ_LOAD_MMU_PGD is always requested when the cached vcpu->arch.cr3 374 * is changed. svm_load_mmu_pgd() then syncs the new CR3 value into the VMCB. 375 */ 376 #define SVM_REGS_LAZY_LOAD_SET (1 << VCPU_EXREG_PDPTR) 377 378 static inline void vmcb_set_intercept(struct vmcb_control_area *control, u32 bit) 379 { 380 WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT); 381 __set_bit(bit, (unsigned long *)&control->intercepts); 382 } 383 384 static inline void vmcb_clr_intercept(struct vmcb_control_area *control, u32 bit) 385 { 386 WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT); 387 __clear_bit(bit, (unsigned long *)&control->intercepts); 388 } 389 390 static inline bool vmcb_is_intercept(struct vmcb_control_area *control, u32 bit) 391 { 392 WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT); 393 return test_bit(bit, (unsigned long *)&control->intercepts); 394 } 395 396 static inline bool vmcb12_is_intercept(struct vmcb_ctrl_area_cached *control, u32 bit) 397 { 398 WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT); 399 return test_bit(bit, (unsigned long *)&control->intercepts); 400 } 401 402 static inline void set_exception_intercept(struct vcpu_svm *svm, u32 bit) 403 { 404 struct vmcb *vmcb = svm->vmcb01.ptr; 405 406 WARN_ON_ONCE(bit >= 32); 407 vmcb_set_intercept(&vmcb->control, INTERCEPT_EXCEPTION_OFFSET + bit); 408 409 recalc_intercepts(svm); 410 } 411 412 static inline void clr_exception_intercept(struct vcpu_svm *svm, u32 bit) 413 { 414 struct vmcb *vmcb = svm->vmcb01.ptr; 415 416 WARN_ON_ONCE(bit >= 32); 417 vmcb_clr_intercept(&vmcb->control, INTERCEPT_EXCEPTION_OFFSET + bit); 418 419 recalc_intercepts(svm); 420 } 421 422 static inline void svm_set_intercept(struct vcpu_svm *svm, int bit) 423 { 424 struct vmcb *vmcb = svm->vmcb01.ptr; 425 426 vmcb_set_intercept(&vmcb->control, bit); 427 428 recalc_intercepts(svm); 429 } 430 431 static inline void svm_clr_intercept(struct vcpu_svm *svm, int bit) 432 { 433 struct vmcb *vmcb = svm->vmcb01.ptr; 434 435 vmcb_clr_intercept(&vmcb->control, bit); 436 437 recalc_intercepts(svm); 438 } 439 440 static inline bool svm_is_intercept(struct vcpu_svm *svm, int bit) 441 { 442 return vmcb_is_intercept(&svm->vmcb->control, bit); 443 } 444 445 static inline bool nested_vgif_enabled(struct vcpu_svm *svm) 446 { 447 return guest_can_use(&svm->vcpu, X86_FEATURE_VGIF) && 448 (svm->nested.ctl.int_ctl & V_GIF_ENABLE_MASK); 449 } 450 451 static inline struct vmcb *get_vgif_vmcb(struct vcpu_svm *svm) 452 { 453 if (!vgif) 454 return NULL; 455 456 if (is_guest_mode(&svm->vcpu) && !nested_vgif_enabled(svm)) 457 return svm->nested.vmcb02.ptr; 458 else 459 return svm->vmcb01.ptr; 460 } 461 462 static inline void enable_gif(struct vcpu_svm *svm) 463 { 464 struct vmcb *vmcb = get_vgif_vmcb(svm); 465 466 if (vmcb) 467 vmcb->control.int_ctl |= V_GIF_MASK; 468 else 469 svm->guest_gif = true; 470 } 471 472 static inline void disable_gif(struct vcpu_svm *svm) 473 { 474 struct vmcb *vmcb = get_vgif_vmcb(svm); 475 476 if (vmcb) 477 vmcb->control.int_ctl &= ~V_GIF_MASK; 478 else 479 svm->guest_gif = false; 480 } 481 482 static inline bool gif_set(struct vcpu_svm *svm) 483 { 484 struct vmcb *vmcb = get_vgif_vmcb(svm); 485 486 if (vmcb) 487 return !!(vmcb->control.int_ctl & V_GIF_MASK); 488 else 489 return svm->guest_gif; 490 } 491 492 static inline bool nested_npt_enabled(struct vcpu_svm *svm) 493 { 494 return svm->nested.ctl.nested_ctl & SVM_NESTED_CTL_NP_ENABLE; 495 } 496 497 static inline bool nested_vnmi_enabled(struct vcpu_svm *svm) 498 { 499 return guest_can_use(&svm->vcpu, X86_FEATURE_VNMI) && 500 (svm->nested.ctl.int_ctl & V_NMI_ENABLE_MASK); 501 } 502 503 static inline bool is_x2apic_msrpm_offset(u32 offset) 504 { 505 /* 4 msrs per u8, and 4 u8 in u32 */ 506 u32 msr = offset * 16; 507 508 return (msr >= APIC_BASE_MSR) && 509 (msr < (APIC_BASE_MSR + 0x100)); 510 } 511 512 static inline struct vmcb *get_vnmi_vmcb_l1(struct vcpu_svm *svm) 513 { 514 if (!vnmi) 515 return NULL; 516 517 if (is_guest_mode(&svm->vcpu)) 518 return NULL; 519 else 520 return svm->vmcb01.ptr; 521 } 522 523 static inline bool is_vnmi_enabled(struct vcpu_svm *svm) 524 { 525 struct vmcb *vmcb = get_vnmi_vmcb_l1(svm); 526 527 if (vmcb) 528 return !!(vmcb->control.int_ctl & V_NMI_ENABLE_MASK); 529 else 530 return false; 531 } 532 533 /* svm.c */ 534 #define MSR_INVALID 0xffffffffU 535 536 #define DEBUGCTL_RESERVED_BITS (~(0x3fULL)) 537 538 extern bool dump_invalid_vmcb; 539 540 u32 svm_msrpm_offset(u32 msr); 541 u32 *svm_vcpu_alloc_msrpm(void); 542 void svm_vcpu_init_msrpm(struct kvm_vcpu *vcpu, u32 *msrpm); 543 void svm_vcpu_free_msrpm(u32 *msrpm); 544 void svm_copy_lbrs(struct vmcb *to_vmcb, struct vmcb *from_vmcb); 545 void svm_enable_lbrv(struct kvm_vcpu *vcpu); 546 void svm_update_lbrv(struct kvm_vcpu *vcpu); 547 548 int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer); 549 void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0); 550 void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4); 551 void disable_nmi_singlestep(struct vcpu_svm *svm); 552 bool svm_smi_blocked(struct kvm_vcpu *vcpu); 553 bool svm_nmi_blocked(struct kvm_vcpu *vcpu); 554 bool svm_interrupt_blocked(struct kvm_vcpu *vcpu); 555 void svm_set_gif(struct vcpu_svm *svm, bool value); 556 int svm_invoke_exit_handler(struct kvm_vcpu *vcpu, u64 exit_code); 557 void set_msr_interception(struct kvm_vcpu *vcpu, u32 *msrpm, u32 msr, 558 int read, int write); 559 void svm_set_x2apic_msr_interception(struct vcpu_svm *svm, bool disable); 560 void svm_complete_interrupt_delivery(struct kvm_vcpu *vcpu, int delivery_mode, 561 int trig_mode, int vec); 562 563 /* nested.c */ 564 565 #define NESTED_EXIT_HOST 0 /* Exit handled on host level */ 566 #define NESTED_EXIT_DONE 1 /* Exit caused nested vmexit */ 567 #define NESTED_EXIT_CONTINUE 2 /* Further checks needed */ 568 569 static inline bool nested_svm_virtualize_tpr(struct kvm_vcpu *vcpu) 570 { 571 struct vcpu_svm *svm = to_svm(vcpu); 572 573 return is_guest_mode(vcpu) && (svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK); 574 } 575 576 static inline bool nested_exit_on_smi(struct vcpu_svm *svm) 577 { 578 return vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_SMI); 579 } 580 581 static inline bool nested_exit_on_intr(struct vcpu_svm *svm) 582 { 583 return vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_INTR); 584 } 585 586 static inline bool nested_exit_on_nmi(struct vcpu_svm *svm) 587 { 588 return vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_NMI); 589 } 590 591 int enter_svm_guest_mode(struct kvm_vcpu *vcpu, 592 u64 vmcb_gpa, struct vmcb *vmcb12, bool from_vmrun); 593 void svm_leave_nested(struct kvm_vcpu *vcpu); 594 void svm_free_nested(struct vcpu_svm *svm); 595 int svm_allocate_nested(struct vcpu_svm *svm); 596 int nested_svm_vmrun(struct kvm_vcpu *vcpu); 597 void svm_copy_vmrun_state(struct vmcb_save_area *to_save, 598 struct vmcb_save_area *from_save); 599 void svm_copy_vmloadsave_state(struct vmcb *to_vmcb, struct vmcb *from_vmcb); 600 int nested_svm_vmexit(struct vcpu_svm *svm); 601 602 static inline int nested_svm_simple_vmexit(struct vcpu_svm *svm, u32 exit_code) 603 { 604 svm->vmcb->control.exit_code = exit_code; 605 svm->vmcb->control.exit_info_1 = 0; 606 svm->vmcb->control.exit_info_2 = 0; 607 return nested_svm_vmexit(svm); 608 } 609 610 int nested_svm_exit_handled(struct vcpu_svm *svm); 611 int nested_svm_check_permissions(struct kvm_vcpu *vcpu); 612 int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr, 613 bool has_error_code, u32 error_code); 614 int nested_svm_exit_special(struct vcpu_svm *svm); 615 void nested_svm_update_tsc_ratio_msr(struct kvm_vcpu *vcpu); 616 void svm_write_tsc_multiplier(struct kvm_vcpu *vcpu); 617 void nested_copy_vmcb_control_to_cache(struct vcpu_svm *svm, 618 struct vmcb_control_area *control); 619 void nested_copy_vmcb_save_to_cache(struct vcpu_svm *svm, 620 struct vmcb_save_area *save); 621 void nested_sync_control_from_vmcb02(struct vcpu_svm *svm); 622 void nested_vmcb02_compute_g_pat(struct vcpu_svm *svm); 623 void svm_switch_vmcb(struct vcpu_svm *svm, struct kvm_vmcb_info *target_vmcb); 624 625 extern struct kvm_x86_nested_ops svm_nested_ops; 626 627 /* avic.c */ 628 #define AVIC_REQUIRED_APICV_INHIBITS \ 629 ( \ 630 BIT(APICV_INHIBIT_REASON_DISABLE) | \ 631 BIT(APICV_INHIBIT_REASON_ABSENT) | \ 632 BIT(APICV_INHIBIT_REASON_HYPERV) | \ 633 BIT(APICV_INHIBIT_REASON_NESTED) | \ 634 BIT(APICV_INHIBIT_REASON_IRQWIN) | \ 635 BIT(APICV_INHIBIT_REASON_PIT_REINJ) | \ 636 BIT(APICV_INHIBIT_REASON_BLOCKIRQ) | \ 637 BIT(APICV_INHIBIT_REASON_SEV) | \ 638 BIT(APICV_INHIBIT_REASON_PHYSICAL_ID_ALIASED) | \ 639 BIT(APICV_INHIBIT_REASON_APIC_ID_MODIFIED) | \ 640 BIT(APICV_INHIBIT_REASON_APIC_BASE_MODIFIED) | \ 641 BIT(APICV_INHIBIT_REASON_LOGICAL_ID_ALIASED) \ 642 ) 643 644 bool avic_hardware_setup(void); 645 int avic_ga_log_notifier(u32 ga_tag); 646 void avic_vm_destroy(struct kvm *kvm); 647 int avic_vm_init(struct kvm *kvm); 648 void avic_init_vmcb(struct vcpu_svm *svm, struct vmcb *vmcb); 649 int avic_incomplete_ipi_interception(struct kvm_vcpu *vcpu); 650 int avic_unaccelerated_access_interception(struct kvm_vcpu *vcpu); 651 int avic_init_vcpu(struct vcpu_svm *svm); 652 void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu); 653 void avic_vcpu_put(struct kvm_vcpu *vcpu); 654 void avic_apicv_post_state_restore(struct kvm_vcpu *vcpu); 655 void avic_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu); 656 int avic_pi_update_irte(struct kvm *kvm, unsigned int host_irq, 657 uint32_t guest_irq, bool set); 658 void avic_vcpu_blocking(struct kvm_vcpu *vcpu); 659 void avic_vcpu_unblocking(struct kvm_vcpu *vcpu); 660 void avic_ring_doorbell(struct kvm_vcpu *vcpu); 661 unsigned long avic_vcpu_get_apicv_inhibit_reasons(struct kvm_vcpu *vcpu); 662 void avic_refresh_virtual_apic_mode(struct kvm_vcpu *vcpu); 663 664 665 /* sev.c */ 666 667 #define GHCB_VERSION_MAX 1ULL 668 #define GHCB_VERSION_MIN 1ULL 669 670 671 extern unsigned int max_sev_asid; 672 673 void sev_vm_destroy(struct kvm *kvm); 674 int sev_mem_enc_ioctl(struct kvm *kvm, void __user *argp); 675 int sev_mem_enc_register_region(struct kvm *kvm, 676 struct kvm_enc_region *range); 677 int sev_mem_enc_unregister_region(struct kvm *kvm, 678 struct kvm_enc_region *range); 679 int sev_vm_copy_enc_context_from(struct kvm *kvm, unsigned int source_fd); 680 int sev_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd); 681 void sev_guest_memory_reclaimed(struct kvm *kvm); 682 683 void pre_sev_run(struct vcpu_svm *svm, int cpu); 684 void __init sev_set_cpu_caps(void); 685 void __init sev_hardware_setup(void); 686 void sev_hardware_unsetup(void); 687 int sev_cpu_init(struct svm_cpu_data *sd); 688 void sev_init_vmcb(struct vcpu_svm *svm); 689 void sev_vcpu_after_set_cpuid(struct vcpu_svm *svm); 690 void sev_free_vcpu(struct kvm_vcpu *vcpu); 691 int sev_handle_vmgexit(struct kvm_vcpu *vcpu); 692 int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in); 693 void sev_es_vcpu_reset(struct vcpu_svm *svm); 694 void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector); 695 void sev_es_prepare_switch_to_guest(struct sev_es_save_area *hostsa); 696 void sev_es_unmap_ghcb(struct vcpu_svm *svm); 697 698 /* vmenter.S */ 699 700 void __svm_sev_es_vcpu_run(struct vcpu_svm *svm, bool spec_ctrl_intercepted); 701 void __svm_vcpu_run(struct vcpu_svm *svm, bool spec_ctrl_intercepted); 702 703 #define DEFINE_KVM_GHCB_ACCESSORS(field) \ 704 static __always_inline bool kvm_ghcb_##field##_is_valid(const struct vcpu_svm *svm) \ 705 { \ 706 return test_bit(GHCB_BITMAP_IDX(field), \ 707 (unsigned long *)&svm->sev_es.valid_bitmap); \ 708 } \ 709 \ 710 static __always_inline u64 kvm_ghcb_get_##field##_if_valid(struct vcpu_svm *svm, struct ghcb *ghcb) \ 711 { \ 712 return kvm_ghcb_##field##_is_valid(svm) ? ghcb->save.field : 0; \ 713 } \ 714 715 DEFINE_KVM_GHCB_ACCESSORS(cpl) 716 DEFINE_KVM_GHCB_ACCESSORS(rax) 717 DEFINE_KVM_GHCB_ACCESSORS(rcx) 718 DEFINE_KVM_GHCB_ACCESSORS(rdx) 719 DEFINE_KVM_GHCB_ACCESSORS(rbx) 720 DEFINE_KVM_GHCB_ACCESSORS(rsi) 721 DEFINE_KVM_GHCB_ACCESSORS(sw_exit_code) 722 DEFINE_KVM_GHCB_ACCESSORS(sw_exit_info_1) 723 DEFINE_KVM_GHCB_ACCESSORS(sw_exit_info_2) 724 DEFINE_KVM_GHCB_ACCESSORS(sw_scratch) 725 DEFINE_KVM_GHCB_ACCESSORS(xcr0) 726 727 #endif 728