1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef __KVM_X86_VMX_H 3 #define __KVM_X86_VMX_H 4 5 #include <linux/kvm_host.h> 6 7 #include <asm/kvm.h> 8 #include <asm/intel_pt.h> 9 10 #include "capabilities.h" 11 #include "kvm_cache_regs.h" 12 #include "posted_intr.h" 13 #include "vmcs.h" 14 #include "vmx_ops.h" 15 #include "cpuid.h" 16 17 #define MSR_TYPE_R 1 18 #define MSR_TYPE_W 2 19 #define MSR_TYPE_RW 3 20 21 #define X2APIC_MSR(r) (APIC_BASE_MSR + ((r) >> 4)) 22 23 #ifdef CONFIG_X86_64 24 #define MAX_NR_USER_RETURN_MSRS 7 25 #else 26 #define MAX_NR_USER_RETURN_MSRS 4 27 #endif 28 29 #define MAX_NR_LOADSTORE_MSRS 8 30 31 struct vmx_msrs { 32 unsigned int nr; 33 struct vmx_msr_entry val[MAX_NR_LOADSTORE_MSRS]; 34 }; 35 36 struct vmx_uret_msr { 37 bool load_into_hardware; 38 u64 data; 39 u64 mask; 40 }; 41 42 enum segment_cache_field { 43 SEG_FIELD_SEL = 0, 44 SEG_FIELD_BASE = 1, 45 SEG_FIELD_LIMIT = 2, 46 SEG_FIELD_AR = 3, 47 48 SEG_FIELD_NR = 4 49 }; 50 51 #define RTIT_ADDR_RANGE 4 52 53 struct pt_ctx { 54 u64 ctl; 55 u64 status; 56 u64 output_base; 57 u64 output_mask; 58 u64 cr3_match; 59 u64 addr_a[RTIT_ADDR_RANGE]; 60 u64 addr_b[RTIT_ADDR_RANGE]; 61 }; 62 63 struct pt_desc { 64 u64 ctl_bitmask; 65 u32 num_address_ranges; 66 u32 caps[PT_CPUID_REGS_NUM * PT_CPUID_LEAVES]; 67 struct pt_ctx host; 68 struct pt_ctx guest; 69 }; 70 71 union vmx_exit_reason { 72 struct { 73 u32 basic : 16; 74 u32 reserved16 : 1; 75 u32 reserved17 : 1; 76 u32 reserved18 : 1; 77 u32 reserved19 : 1; 78 u32 reserved20 : 1; 79 u32 reserved21 : 1; 80 u32 reserved22 : 1; 81 u32 reserved23 : 1; 82 u32 reserved24 : 1; 83 u32 reserved25 : 1; 84 u32 bus_lock_detected : 1; 85 u32 enclave_mode : 1; 86 u32 smi_pending_mtf : 1; 87 u32 smi_from_vmx_root : 1; 88 u32 reserved30 : 1; 89 u32 failed_vmentry : 1; 90 }; 91 u32 full; 92 }; 93 94 #define vcpu_to_lbr_desc(vcpu) (&to_vmx(vcpu)->lbr_desc) 95 #define vcpu_to_lbr_records(vcpu) (&to_vmx(vcpu)->lbr_desc.records) 96 97 bool intel_pmu_lbr_is_compatible(struct kvm_vcpu *vcpu); 98 bool intel_pmu_lbr_is_enabled(struct kvm_vcpu *vcpu); 99 100 int intel_pmu_create_guest_lbr_event(struct kvm_vcpu *vcpu); 101 void vmx_passthrough_lbr_msrs(struct kvm_vcpu *vcpu); 102 103 struct lbr_desc { 104 /* Basic info about guest LBR records. */ 105 struct x86_pmu_lbr records; 106 107 /* 108 * Emulate LBR feature via passthrough LBR registers when the 109 * per-vcpu guest LBR event is scheduled on the current pcpu. 110 * 111 * The records may be inaccurate if the host reclaims the LBR. 112 */ 113 struct perf_event *event; 114 115 /* True if LBRs are marked as not intercepted in the MSR bitmap */ 116 bool msr_passthrough; 117 }; 118 119 /* 120 * The nested_vmx structure is part of vcpu_vmx, and holds information we need 121 * for correct emulation of VMX (i.e., nested VMX) on this vcpu. 122 */ 123 struct nested_vmx { 124 /* Has the level1 guest done vmxon? */ 125 bool vmxon; 126 gpa_t vmxon_ptr; 127 bool pml_full; 128 129 /* The guest-physical address of the current VMCS L1 keeps for L2 */ 130 gpa_t current_vmptr; 131 /* 132 * Cache of the guest's VMCS, existing outside of guest memory. 133 * Loaded from guest memory during VMPTRLD. Flushed to guest 134 * memory during VMCLEAR and VMPTRLD. 135 */ 136 struct vmcs12 *cached_vmcs12; 137 /* 138 * Cache of the guest's shadow VMCS, existing outside of guest 139 * memory. Loaded from guest memory during VM entry. Flushed 140 * to guest memory during VM exit. 141 */ 142 struct vmcs12 *cached_shadow_vmcs12; 143 144 /* 145 * GPA to HVA cache for accessing vmcs12->vmcs_link_pointer 146 */ 147 struct gfn_to_hva_cache shadow_vmcs12_cache; 148 149 /* 150 * GPA to HVA cache for VMCS12 151 */ 152 struct gfn_to_hva_cache vmcs12_cache; 153 154 /* 155 * Indicates if the shadow vmcs or enlightened vmcs must be updated 156 * with the data held by struct vmcs12. 157 */ 158 bool need_vmcs12_to_shadow_sync; 159 bool dirty_vmcs12; 160 161 /* 162 * Indicates lazily loaded guest state has not yet been decached from 163 * vmcs02. 164 */ 165 bool need_sync_vmcs02_to_vmcs12_rare; 166 167 /* 168 * vmcs02 has been initialized, i.e. state that is constant for 169 * vmcs02 has been written to the backing VMCS. Initialization 170 * is delayed until L1 actually attempts to run a nested VM. 171 */ 172 bool vmcs02_initialized; 173 174 bool change_vmcs01_virtual_apic_mode; 175 bool reload_vmcs01_apic_access_page; 176 bool update_vmcs01_cpu_dirty_logging; 177 178 /* 179 * Enlightened VMCS has been enabled. It does not mean that L1 has to 180 * use it. However, VMX features available to L1 will be limited based 181 * on what the enlightened VMCS supports. 182 */ 183 bool enlightened_vmcs_enabled; 184 185 /* L2 must run next, and mustn't decide to exit to L1. */ 186 bool nested_run_pending; 187 188 /* Pending MTF VM-exit into L1. */ 189 bool mtf_pending; 190 191 struct loaded_vmcs vmcs02; 192 193 /* 194 * Guest pages referred to in the vmcs02 with host-physical 195 * pointers, so we must keep them pinned while L2 runs. 196 */ 197 struct page *apic_access_page; 198 struct kvm_host_map virtual_apic_map; 199 struct kvm_host_map pi_desc_map; 200 201 struct kvm_host_map msr_bitmap_map; 202 203 struct pi_desc *pi_desc; 204 bool pi_pending; 205 u16 posted_intr_nv; 206 207 struct hrtimer preemption_timer; 208 u64 preemption_timer_deadline; 209 bool has_preemption_timer_deadline; 210 bool preemption_timer_expired; 211 212 /* to migrate it to L2 if VM_ENTRY_LOAD_DEBUG_CONTROLS is off */ 213 u64 vmcs01_debugctl; 214 u64 vmcs01_guest_bndcfgs; 215 216 /* to migrate it to L1 if L2 writes to L1's CR8 directly */ 217 int l1_tpr_threshold; 218 219 u16 vpid02; 220 u16 last_vpid; 221 222 struct nested_vmx_msrs msrs; 223 224 /* SMM related state */ 225 struct { 226 /* in VMX operation on SMM entry? */ 227 bool vmxon; 228 /* in guest mode on SMM entry? */ 229 bool guest_mode; 230 } smm; 231 232 gpa_t hv_evmcs_vmptr; 233 struct kvm_host_map hv_evmcs_map; 234 struct hv_enlightened_vmcs *hv_evmcs; 235 }; 236 237 struct vcpu_vmx { 238 struct kvm_vcpu vcpu; 239 u8 fail; 240 u8 x2apic_msr_bitmap_mode; 241 242 /* 243 * If true, host state has been stored in vmx->loaded_vmcs for 244 * the CPU registers that only need to be switched when transitioning 245 * to/from the kernel, and the registers have been loaded with guest 246 * values. If false, host state is loaded in the CPU registers 247 * and vmx->loaded_vmcs->host_state is invalid. 248 */ 249 bool guest_state_loaded; 250 251 unsigned long exit_qualification; 252 u32 exit_intr_info; 253 u32 idt_vectoring_info; 254 ulong rflags; 255 256 /* 257 * User return MSRs are always emulated when enabled in the guest, but 258 * only loaded into hardware when necessary, e.g. SYSCALL #UDs outside 259 * of 64-bit mode or if EFER.SCE=1, thus the SYSCALL MSRs don't need to 260 * be loaded into hardware if those conditions aren't met. 261 */ 262 struct vmx_uret_msr guest_uret_msrs[MAX_NR_USER_RETURN_MSRS]; 263 bool guest_uret_msrs_loaded; 264 #ifdef CONFIG_X86_64 265 u64 msr_host_kernel_gs_base; 266 u64 msr_guest_kernel_gs_base; 267 #endif 268 269 u64 spec_ctrl; 270 u32 msr_ia32_umwait_control; 271 272 /* 273 * loaded_vmcs points to the VMCS currently used in this vcpu. For a 274 * non-nested (L1) guest, it always points to vmcs01. For a nested 275 * guest (L2), it points to a different VMCS. 276 */ 277 struct loaded_vmcs vmcs01; 278 struct loaded_vmcs *loaded_vmcs; 279 280 struct msr_autoload { 281 struct vmx_msrs guest; 282 struct vmx_msrs host; 283 } msr_autoload; 284 285 struct msr_autostore { 286 struct vmx_msrs guest; 287 } msr_autostore; 288 289 struct { 290 int vm86_active; 291 ulong save_rflags; 292 struct kvm_segment segs[8]; 293 } rmode; 294 struct { 295 u32 bitmask; /* 4 bits per segment (1 bit per field) */ 296 struct kvm_save_segment { 297 u16 selector; 298 unsigned long base; 299 u32 limit; 300 u32 ar; 301 } seg[8]; 302 } segment_cache; 303 int vpid; 304 bool emulation_required; 305 306 union vmx_exit_reason exit_reason; 307 308 /* Posted interrupt descriptor */ 309 struct pi_desc pi_desc; 310 311 /* Support for a guest hypervisor (nested VMX) */ 312 struct nested_vmx nested; 313 314 /* Dynamic PLE window. */ 315 unsigned int ple_window; 316 bool ple_window_dirty; 317 318 bool req_immediate_exit; 319 320 /* Support for PML */ 321 #define PML_ENTITY_NUM 512 322 struct page *pml_pg; 323 324 /* apic deadline value in host tsc */ 325 u64 hv_deadline_tsc; 326 327 unsigned long host_debugctlmsr; 328 329 /* 330 * Only bits masked by msr_ia32_feature_control_valid_bits can be set in 331 * msr_ia32_feature_control. FEAT_CTL_LOCKED is always included 332 * in msr_ia32_feature_control_valid_bits. 333 */ 334 u64 msr_ia32_feature_control; 335 u64 msr_ia32_feature_control_valid_bits; 336 /* SGX Launch Control public key hash */ 337 u64 msr_ia32_sgxlepubkeyhash[4]; 338 339 struct pt_desc pt_desc; 340 struct lbr_desc lbr_desc; 341 342 /* Save desired MSR intercept (read: pass-through) state */ 343 #define MAX_POSSIBLE_PASSTHROUGH_MSRS 13 344 struct { 345 DECLARE_BITMAP(read, MAX_POSSIBLE_PASSTHROUGH_MSRS); 346 DECLARE_BITMAP(write, MAX_POSSIBLE_PASSTHROUGH_MSRS); 347 } shadow_msr_intercept; 348 }; 349 350 struct kvm_vmx { 351 struct kvm kvm; 352 353 unsigned int tss_addr; 354 bool ept_identity_pagetable_done; 355 gpa_t ept_identity_map_addr; 356 }; 357 358 bool nested_vmx_allowed(struct kvm_vcpu *vcpu); 359 void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu, 360 struct loaded_vmcs *buddy); 361 int allocate_vpid(void); 362 void free_vpid(int vpid); 363 void vmx_set_constant_host_state(struct vcpu_vmx *vmx); 364 void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu); 365 void vmx_set_host_fs_gs(struct vmcs_host_state *host, u16 fs_sel, u16 gs_sel, 366 unsigned long fs_base, unsigned long gs_base); 367 int vmx_get_cpl(struct kvm_vcpu *vcpu); 368 bool vmx_emulation_required(struct kvm_vcpu *vcpu); 369 unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu); 370 void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags); 371 u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu); 372 void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask); 373 int vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer); 374 void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0); 375 void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4); 376 void set_cr4_guest_host_mask(struct vcpu_vmx *vmx); 377 void ept_save_pdptrs(struct kvm_vcpu *vcpu); 378 void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg); 379 void __vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg); 380 u64 construct_eptp(struct kvm_vcpu *vcpu, hpa_t root_hpa, int root_level); 381 382 bool vmx_guest_inject_ac(struct kvm_vcpu *vcpu); 383 void vmx_update_exception_bitmap(struct kvm_vcpu *vcpu); 384 bool vmx_nmi_blocked(struct kvm_vcpu *vcpu); 385 bool vmx_interrupt_blocked(struct kvm_vcpu *vcpu); 386 bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu); 387 void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked); 388 void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu); 389 struct vmx_uret_msr *vmx_find_uret_msr(struct vcpu_vmx *vmx, u32 msr); 390 void pt_update_intercept_for_msr(struct kvm_vcpu *vcpu); 391 void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp); 392 bool __vmx_vcpu_run(struct vcpu_vmx *vmx, unsigned long *regs, bool launched); 393 int vmx_find_loadstore_msr_slot(struct vmx_msrs *m, u32 msr); 394 void vmx_ept_load_pdptrs(struct kvm_vcpu *vcpu); 395 396 void vmx_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type); 397 void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type); 398 399 u64 vmx_get_l2_tsc_offset(struct kvm_vcpu *vcpu); 400 u64 vmx_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu); 401 402 static inline void vmx_set_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, 403 int type, bool value) 404 { 405 if (value) 406 vmx_enable_intercept_for_msr(vcpu, msr, type); 407 else 408 vmx_disable_intercept_for_msr(vcpu, msr, type); 409 } 410 411 void vmx_update_cpu_dirty_logging(struct kvm_vcpu *vcpu); 412 413 /* 414 * Note, early Intel manuals have the write-low and read-high bitmap offsets 415 * the wrong way round. The bitmaps control MSRs 0x00000000-0x00001fff and 416 * 0xc0000000-0xc0001fff. The former (low) uses bytes 0-0x3ff for reads and 417 * 0x800-0xbff for writes. The latter (high) uses 0x400-0x7ff for reads and 418 * 0xc00-0xfff for writes. MSRs not covered by either of the ranges always 419 * VM-Exit. 420 */ 421 #define __BUILD_VMX_MSR_BITMAP_HELPER(rtype, action, bitop, access, base) \ 422 static inline rtype vmx_##action##_msr_bitmap_##access(unsigned long *bitmap, \ 423 u32 msr) \ 424 { \ 425 int f = sizeof(unsigned long); \ 426 \ 427 if (msr <= 0x1fff) \ 428 return bitop##_bit(msr, bitmap + base / f); \ 429 else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) \ 430 return bitop##_bit(msr & 0x1fff, bitmap + (base + 0x400) / f); \ 431 return (rtype)true; \ 432 } 433 #define BUILD_VMX_MSR_BITMAP_HELPERS(ret_type, action, bitop) \ 434 __BUILD_VMX_MSR_BITMAP_HELPER(ret_type, action, bitop, read, 0x0) \ 435 __BUILD_VMX_MSR_BITMAP_HELPER(ret_type, action, bitop, write, 0x800) 436 437 BUILD_VMX_MSR_BITMAP_HELPERS(bool, test, test) 438 BUILD_VMX_MSR_BITMAP_HELPERS(void, clear, __clear) 439 BUILD_VMX_MSR_BITMAP_HELPERS(void, set, __set) 440 441 static inline u8 vmx_get_rvi(void) 442 { 443 return vmcs_read16(GUEST_INTR_STATUS) & 0xff; 444 } 445 446 #define BUILD_CONTROLS_SHADOW(lname, uname) \ 447 static inline void lname##_controls_set(struct vcpu_vmx *vmx, u32 val) \ 448 { \ 449 if (vmx->loaded_vmcs->controls_shadow.lname != val) { \ 450 vmcs_write32(uname, val); \ 451 vmx->loaded_vmcs->controls_shadow.lname = val; \ 452 } \ 453 } \ 454 static inline u32 __##lname##_controls_get(struct loaded_vmcs *vmcs) \ 455 { \ 456 return vmcs->controls_shadow.lname; \ 457 } \ 458 static inline u32 lname##_controls_get(struct vcpu_vmx *vmx) \ 459 { \ 460 return __##lname##_controls_get(vmx->loaded_vmcs); \ 461 } \ 462 static inline void lname##_controls_setbit(struct vcpu_vmx *vmx, u32 val) \ 463 { \ 464 lname##_controls_set(vmx, lname##_controls_get(vmx) | val); \ 465 } \ 466 static inline void lname##_controls_clearbit(struct vcpu_vmx *vmx, u32 val) \ 467 { \ 468 lname##_controls_set(vmx, lname##_controls_get(vmx) & ~val); \ 469 } 470 BUILD_CONTROLS_SHADOW(vm_entry, VM_ENTRY_CONTROLS) 471 BUILD_CONTROLS_SHADOW(vm_exit, VM_EXIT_CONTROLS) 472 BUILD_CONTROLS_SHADOW(pin, PIN_BASED_VM_EXEC_CONTROL) 473 BUILD_CONTROLS_SHADOW(exec, CPU_BASED_VM_EXEC_CONTROL) 474 BUILD_CONTROLS_SHADOW(secondary_exec, SECONDARY_VM_EXEC_CONTROL) 475 476 static inline void vmx_register_cache_reset(struct kvm_vcpu *vcpu) 477 { 478 vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP) 479 | (1 << VCPU_EXREG_RFLAGS) 480 | (1 << VCPU_EXREG_PDPTR) 481 | (1 << VCPU_EXREG_SEGMENTS) 482 | (1 << VCPU_EXREG_CR0) 483 | (1 << VCPU_EXREG_CR3) 484 | (1 << VCPU_EXREG_CR4) 485 | (1 << VCPU_EXREG_EXIT_INFO_1) 486 | (1 << VCPU_EXREG_EXIT_INFO_2)); 487 vcpu->arch.regs_dirty = 0; 488 } 489 490 static inline struct kvm_vmx *to_kvm_vmx(struct kvm *kvm) 491 { 492 return container_of(kvm, struct kvm_vmx, kvm); 493 } 494 495 static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu) 496 { 497 return container_of(vcpu, struct vcpu_vmx, vcpu); 498 } 499 500 static inline unsigned long vmx_get_exit_qual(struct kvm_vcpu *vcpu) 501 { 502 struct vcpu_vmx *vmx = to_vmx(vcpu); 503 504 if (!kvm_register_is_available(vcpu, VCPU_EXREG_EXIT_INFO_1)) { 505 kvm_register_mark_available(vcpu, VCPU_EXREG_EXIT_INFO_1); 506 vmx->exit_qualification = vmcs_readl(EXIT_QUALIFICATION); 507 } 508 return vmx->exit_qualification; 509 } 510 511 static inline u32 vmx_get_intr_info(struct kvm_vcpu *vcpu) 512 { 513 struct vcpu_vmx *vmx = to_vmx(vcpu); 514 515 if (!kvm_register_is_available(vcpu, VCPU_EXREG_EXIT_INFO_2)) { 516 kvm_register_mark_available(vcpu, VCPU_EXREG_EXIT_INFO_2); 517 vmx->exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO); 518 } 519 return vmx->exit_intr_info; 520 } 521 522 struct vmcs *alloc_vmcs_cpu(bool shadow, int cpu, gfp_t flags); 523 void free_vmcs(struct vmcs *vmcs); 524 int alloc_loaded_vmcs(struct loaded_vmcs *loaded_vmcs); 525 void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs); 526 void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs); 527 528 static inline struct vmcs *alloc_vmcs(bool shadow) 529 { 530 return alloc_vmcs_cpu(shadow, raw_smp_processor_id(), 531 GFP_KERNEL_ACCOUNT); 532 } 533 534 static inline bool vmx_has_waitpkg(struct vcpu_vmx *vmx) 535 { 536 return secondary_exec_controls_get(vmx) & 537 SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE; 538 } 539 540 static inline bool vmx_need_pf_intercept(struct kvm_vcpu *vcpu) 541 { 542 if (!enable_ept) 543 return true; 544 545 return allow_smaller_maxphyaddr && cpuid_maxphyaddr(vcpu) < boot_cpu_data.x86_phys_bits; 546 } 547 548 static inline bool is_unrestricted_guest(struct kvm_vcpu *vcpu) 549 { 550 return enable_unrestricted_guest && (!is_guest_mode(vcpu) || 551 (secondary_exec_controls_get(to_vmx(vcpu)) & 552 SECONDARY_EXEC_UNRESTRICTED_GUEST)); 553 } 554 555 bool __vmx_guest_state_valid(struct kvm_vcpu *vcpu); 556 static inline bool vmx_guest_state_valid(struct kvm_vcpu *vcpu) 557 { 558 return is_unrestricted_guest(vcpu) || __vmx_guest_state_valid(vcpu); 559 } 560 561 void dump_vmcs(struct kvm_vcpu *vcpu); 562 563 static inline int vmx_get_instr_info_reg2(u32 vmx_instr_info) 564 { 565 return (vmx_instr_info >> 28) & 0xf; 566 } 567 568 #endif /* __KVM_X86_VMX_H */ 569