1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef __KVM_X86_VMX_H 3 #define __KVM_X86_VMX_H 4 5 #include <linux/kvm_host.h> 6 7 #include <asm/kvm.h> 8 #include <asm/intel_pt.h> 9 10 #include "capabilities.h" 11 #include "ops.h" 12 #include "vmcs.h" 13 14 extern const u32 vmx_msr_index[]; 15 extern u64 host_efer; 16 17 extern u32 get_umwait_control_msr(void); 18 19 #define MSR_TYPE_R 1 20 #define MSR_TYPE_W 2 21 #define MSR_TYPE_RW 3 22 23 #define X2APIC_MSR(r) (APIC_BASE_MSR + ((r) >> 4)) 24 25 #define NR_AUTOLOAD_MSRS 8 26 27 struct vmx_msrs { 28 unsigned int nr; 29 struct vmx_msr_entry val[NR_AUTOLOAD_MSRS]; 30 }; 31 32 struct shared_msr_entry { 33 unsigned index; 34 u64 data; 35 u64 mask; 36 }; 37 38 enum segment_cache_field { 39 SEG_FIELD_SEL = 0, 40 SEG_FIELD_BASE = 1, 41 SEG_FIELD_LIMIT = 2, 42 SEG_FIELD_AR = 3, 43 44 SEG_FIELD_NR = 4 45 }; 46 47 /* Posted-Interrupt Descriptor */ 48 struct pi_desc { 49 u32 pir[8]; /* Posted interrupt requested */ 50 union { 51 struct { 52 /* bit 256 - Outstanding Notification */ 53 u16 on : 1, 54 /* bit 257 - Suppress Notification */ 55 sn : 1, 56 /* bit 271:258 - Reserved */ 57 rsvd_1 : 14; 58 /* bit 279:272 - Notification Vector */ 59 u8 nv; 60 /* bit 287:280 - Reserved */ 61 u8 rsvd_2; 62 /* bit 319:288 - Notification Destination */ 63 u32 ndst; 64 }; 65 u64 control; 66 }; 67 u32 rsvd[6]; 68 } __aligned(64); 69 70 #define RTIT_ADDR_RANGE 4 71 72 struct pt_ctx { 73 u64 ctl; 74 u64 status; 75 u64 output_base; 76 u64 output_mask; 77 u64 cr3_match; 78 u64 addr_a[RTIT_ADDR_RANGE]; 79 u64 addr_b[RTIT_ADDR_RANGE]; 80 }; 81 82 struct pt_desc { 83 u64 ctl_bitmask; 84 u32 addr_range; 85 u32 caps[PT_CPUID_REGS_NUM * PT_CPUID_LEAVES]; 86 struct pt_ctx host; 87 struct pt_ctx guest; 88 }; 89 90 /* 91 * The nested_vmx structure is part of vcpu_vmx, and holds information we need 92 * for correct emulation of VMX (i.e., nested VMX) on this vcpu. 93 */ 94 struct nested_vmx { 95 /* Has the level1 guest done vmxon? */ 96 bool vmxon; 97 gpa_t vmxon_ptr; 98 bool pml_full; 99 100 /* The guest-physical address of the current VMCS L1 keeps for L2 */ 101 gpa_t current_vmptr; 102 /* 103 * Cache of the guest's VMCS, existing outside of guest memory. 104 * Loaded from guest memory during VMPTRLD. Flushed to guest 105 * memory during VMCLEAR and VMPTRLD. 106 */ 107 struct vmcs12 *cached_vmcs12; 108 /* 109 * Cache of the guest's shadow VMCS, existing outside of guest 110 * memory. Loaded from guest memory during VM entry. Flushed 111 * to guest memory during VM exit. 112 */ 113 struct vmcs12 *cached_shadow_vmcs12; 114 115 /* 116 * Indicates if the shadow vmcs or enlightened vmcs must be updated 117 * with the data held by struct vmcs12. 118 */ 119 bool need_vmcs12_to_shadow_sync; 120 bool dirty_vmcs12; 121 122 /* 123 * Indicates lazily loaded guest state has not yet been decached from 124 * vmcs02. 125 */ 126 bool need_sync_vmcs02_to_vmcs12_rare; 127 128 /* 129 * vmcs02 has been initialized, i.e. state that is constant for 130 * vmcs02 has been written to the backing VMCS. Initialization 131 * is delayed until L1 actually attempts to run a nested VM. 132 */ 133 bool vmcs02_initialized; 134 135 bool change_vmcs01_virtual_apic_mode; 136 137 /* 138 * Enlightened VMCS has been enabled. It does not mean that L1 has to 139 * use it. However, VMX features available to L1 will be limited based 140 * on what the enlightened VMCS supports. 141 */ 142 bool enlightened_vmcs_enabled; 143 144 /* L2 must run next, and mustn't decide to exit to L1. */ 145 bool nested_run_pending; 146 147 struct loaded_vmcs vmcs02; 148 149 /* 150 * Guest pages referred to in the vmcs02 with host-physical 151 * pointers, so we must keep them pinned while L2 runs. 152 */ 153 struct page *apic_access_page; 154 struct kvm_host_map virtual_apic_map; 155 struct kvm_host_map pi_desc_map; 156 157 struct kvm_host_map msr_bitmap_map; 158 159 struct pi_desc *pi_desc; 160 bool pi_pending; 161 u16 posted_intr_nv; 162 163 struct hrtimer preemption_timer; 164 bool preemption_timer_expired; 165 166 /* to migrate it to L2 if VM_ENTRY_LOAD_DEBUG_CONTROLS is off */ 167 u64 vmcs01_debugctl; 168 u64 vmcs01_guest_bndcfgs; 169 170 u16 vpid02; 171 u16 last_vpid; 172 173 struct nested_vmx_msrs msrs; 174 175 /* SMM related state */ 176 struct { 177 /* in VMX operation on SMM entry? */ 178 bool vmxon; 179 /* in guest mode on SMM entry? */ 180 bool guest_mode; 181 } smm; 182 183 gpa_t hv_evmcs_vmptr; 184 struct kvm_host_map hv_evmcs_map; 185 struct hv_enlightened_vmcs *hv_evmcs; 186 }; 187 188 struct vcpu_vmx { 189 struct kvm_vcpu vcpu; 190 u8 fail; 191 u8 msr_bitmap_mode; 192 193 /* 194 * If true, host state has been stored in vmx->loaded_vmcs for 195 * the CPU registers that only need to be switched when transitioning 196 * to/from the kernel, and the registers have been loaded with guest 197 * values. If false, host state is loaded in the CPU registers 198 * and vmx->loaded_vmcs->host_state is invalid. 199 */ 200 bool guest_state_loaded; 201 202 u32 exit_intr_info; 203 u32 idt_vectoring_info; 204 ulong rflags; 205 206 struct shared_msr_entry *guest_msrs; 207 int nmsrs; 208 int save_nmsrs; 209 bool guest_msrs_ready; 210 #ifdef CONFIG_X86_64 211 u64 msr_host_kernel_gs_base; 212 u64 msr_guest_kernel_gs_base; 213 #endif 214 215 u64 spec_ctrl; 216 u32 msr_ia32_umwait_control; 217 218 u32 secondary_exec_control; 219 220 /* 221 * loaded_vmcs points to the VMCS currently used in this vcpu. For a 222 * non-nested (L1) guest, it always points to vmcs01. For a nested 223 * guest (L2), it points to a different VMCS. 224 */ 225 struct loaded_vmcs vmcs01; 226 struct loaded_vmcs *loaded_vmcs; 227 228 struct msr_autoload { 229 struct vmx_msrs guest; 230 struct vmx_msrs host; 231 } msr_autoload; 232 233 struct { 234 int vm86_active; 235 ulong save_rflags; 236 struct kvm_segment segs[8]; 237 } rmode; 238 struct { 239 u32 bitmask; /* 4 bits per segment (1 bit per field) */ 240 struct kvm_save_segment { 241 u16 selector; 242 unsigned long base; 243 u32 limit; 244 u32 ar; 245 } seg[8]; 246 } segment_cache; 247 int vpid; 248 bool emulation_required; 249 250 u32 exit_reason; 251 252 /* Posted interrupt descriptor */ 253 struct pi_desc pi_desc; 254 255 /* Support for a guest hypervisor (nested VMX) */ 256 struct nested_vmx nested; 257 258 /* Dynamic PLE window. */ 259 unsigned int ple_window; 260 bool ple_window_dirty; 261 262 bool req_immediate_exit; 263 264 /* Support for PML */ 265 #define PML_ENTITY_NUM 512 266 struct page *pml_pg; 267 268 /* apic deadline value in host tsc */ 269 u64 hv_deadline_tsc; 270 271 u64 current_tsc_ratio; 272 273 u32 host_pkru; 274 275 unsigned long host_debugctlmsr; 276 277 /* 278 * Only bits masked by msr_ia32_feature_control_valid_bits can be set in 279 * msr_ia32_feature_control. FEATURE_CONTROL_LOCKED is always included 280 * in msr_ia32_feature_control_valid_bits. 281 */ 282 u64 msr_ia32_feature_control; 283 u64 msr_ia32_feature_control_valid_bits; 284 u64 ept_pointer; 285 286 struct pt_desc pt_desc; 287 }; 288 289 enum ept_pointers_status { 290 EPT_POINTERS_CHECK = 0, 291 EPT_POINTERS_MATCH = 1, 292 EPT_POINTERS_MISMATCH = 2 293 }; 294 295 struct kvm_vmx { 296 struct kvm kvm; 297 298 unsigned int tss_addr; 299 bool ept_identity_pagetable_done; 300 gpa_t ept_identity_map_addr; 301 302 enum ept_pointers_status ept_pointers_match; 303 spinlock_t ept_pointer_lock; 304 }; 305 306 bool nested_vmx_allowed(struct kvm_vcpu *vcpu); 307 void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu); 308 void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu); 309 int allocate_vpid(void); 310 void free_vpid(int vpid); 311 void vmx_set_constant_host_state(struct vcpu_vmx *vmx); 312 void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu); 313 void vmx_set_host_fs_gs(struct vmcs_host_state *host, u16 fs_sel, u16 gs_sel, 314 unsigned long fs_base, unsigned long gs_base); 315 int vmx_get_cpl(struct kvm_vcpu *vcpu); 316 unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu); 317 void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags); 318 u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu); 319 void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask); 320 void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer); 321 void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0); 322 void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3); 323 int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4); 324 void set_cr4_guest_host_mask(struct vcpu_vmx *vmx); 325 void ept_save_pdptrs(struct kvm_vcpu *vcpu); 326 void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg); 327 void vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg); 328 u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa); 329 void update_exception_bitmap(struct kvm_vcpu *vcpu); 330 void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu); 331 bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu); 332 void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked); 333 void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu); 334 struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr); 335 void pt_update_intercept_for_msr(struct vcpu_vmx *vmx); 336 void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp); 337 338 #define POSTED_INTR_ON 0 339 #define POSTED_INTR_SN 1 340 341 static inline bool pi_test_and_set_on(struct pi_desc *pi_desc) 342 { 343 return test_and_set_bit(POSTED_INTR_ON, 344 (unsigned long *)&pi_desc->control); 345 } 346 347 static inline bool pi_test_and_clear_on(struct pi_desc *pi_desc) 348 { 349 return test_and_clear_bit(POSTED_INTR_ON, 350 (unsigned long *)&pi_desc->control); 351 } 352 353 static inline int pi_test_and_set_pir(int vector, struct pi_desc *pi_desc) 354 { 355 return test_and_set_bit(vector, (unsigned long *)pi_desc->pir); 356 } 357 358 static inline void pi_set_sn(struct pi_desc *pi_desc) 359 { 360 set_bit(POSTED_INTR_SN, 361 (unsigned long *)&pi_desc->control); 362 } 363 364 static inline void pi_set_on(struct pi_desc *pi_desc) 365 { 366 set_bit(POSTED_INTR_ON, 367 (unsigned long *)&pi_desc->control); 368 } 369 370 static inline void pi_clear_on(struct pi_desc *pi_desc) 371 { 372 clear_bit(POSTED_INTR_ON, 373 (unsigned long *)&pi_desc->control); 374 } 375 376 static inline int pi_test_on(struct pi_desc *pi_desc) 377 { 378 return test_bit(POSTED_INTR_ON, 379 (unsigned long *)&pi_desc->control); 380 } 381 382 static inline int pi_test_sn(struct pi_desc *pi_desc) 383 { 384 return test_bit(POSTED_INTR_SN, 385 (unsigned long *)&pi_desc->control); 386 } 387 388 static inline u8 vmx_get_rvi(void) 389 { 390 return vmcs_read16(GUEST_INTR_STATUS) & 0xff; 391 } 392 393 #define BUILD_CONTROLS_SHADOW(lname, uname) \ 394 static inline void lname##_controls_set(struct vcpu_vmx *vmx, u32 val) \ 395 { \ 396 if (vmx->loaded_vmcs->controls_shadow.lname != val) { \ 397 vmcs_write32(uname, val); \ 398 vmx->loaded_vmcs->controls_shadow.lname = val; \ 399 } \ 400 } \ 401 static inline u32 lname##_controls_get(struct vcpu_vmx *vmx) \ 402 { \ 403 return vmx->loaded_vmcs->controls_shadow.lname; \ 404 } \ 405 static inline void lname##_controls_setbit(struct vcpu_vmx *vmx, u32 val) \ 406 { \ 407 lname##_controls_set(vmx, lname##_controls_get(vmx) | val); \ 408 } \ 409 static inline void lname##_controls_clearbit(struct vcpu_vmx *vmx, u32 val) \ 410 { \ 411 lname##_controls_set(vmx, lname##_controls_get(vmx) & ~val); \ 412 } 413 BUILD_CONTROLS_SHADOW(vm_entry, VM_ENTRY_CONTROLS) 414 BUILD_CONTROLS_SHADOW(vm_exit, VM_EXIT_CONTROLS) 415 BUILD_CONTROLS_SHADOW(pin, PIN_BASED_VM_EXEC_CONTROL) 416 BUILD_CONTROLS_SHADOW(exec, CPU_BASED_VM_EXEC_CONTROL) 417 BUILD_CONTROLS_SHADOW(secondary_exec, SECONDARY_VM_EXEC_CONTROL) 418 419 static inline void vmx_segment_cache_clear(struct vcpu_vmx *vmx) 420 { 421 vmx->segment_cache.bitmask = 0; 422 } 423 424 static inline u32 vmx_vmentry_ctrl(void) 425 { 426 u32 vmentry_ctrl = vmcs_config.vmentry_ctrl; 427 if (pt_mode == PT_MODE_SYSTEM) 428 vmentry_ctrl &= ~(VM_ENTRY_PT_CONCEAL_PIP | 429 VM_ENTRY_LOAD_IA32_RTIT_CTL); 430 /* Loading of EFER and PERF_GLOBAL_CTRL are toggled dynamically */ 431 return vmentry_ctrl & 432 ~(VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VM_ENTRY_LOAD_IA32_EFER); 433 } 434 435 static inline u32 vmx_vmexit_ctrl(void) 436 { 437 u32 vmexit_ctrl = vmcs_config.vmexit_ctrl; 438 if (pt_mode == PT_MODE_SYSTEM) 439 vmexit_ctrl &= ~(VM_EXIT_PT_CONCEAL_PIP | 440 VM_EXIT_CLEAR_IA32_RTIT_CTL); 441 /* Loading of EFER and PERF_GLOBAL_CTRL are toggled dynamically */ 442 return vmexit_ctrl & 443 ~(VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | VM_EXIT_LOAD_IA32_EFER); 444 } 445 446 u32 vmx_exec_control(struct vcpu_vmx *vmx); 447 u32 vmx_pin_based_exec_ctrl(struct vcpu_vmx *vmx); 448 449 static inline struct kvm_vmx *to_kvm_vmx(struct kvm *kvm) 450 { 451 return container_of(kvm, struct kvm_vmx, kvm); 452 } 453 454 static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu) 455 { 456 return container_of(vcpu, struct vcpu_vmx, vcpu); 457 } 458 459 static inline struct pi_desc *vcpu_to_pi_desc(struct kvm_vcpu *vcpu) 460 { 461 return &(to_vmx(vcpu)->pi_desc); 462 } 463 464 struct vmcs *alloc_vmcs_cpu(bool shadow, int cpu, gfp_t flags); 465 void free_vmcs(struct vmcs *vmcs); 466 int alloc_loaded_vmcs(struct loaded_vmcs *loaded_vmcs); 467 void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs); 468 void loaded_vmcs_init(struct loaded_vmcs *loaded_vmcs); 469 void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs); 470 471 static inline struct vmcs *alloc_vmcs(bool shadow) 472 { 473 return alloc_vmcs_cpu(shadow, raw_smp_processor_id(), 474 GFP_KERNEL_ACCOUNT); 475 } 476 477 u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa); 478 479 static inline void __vmx_flush_tlb(struct kvm_vcpu *vcpu, int vpid, 480 bool invalidate_gpa) 481 { 482 if (enable_ept && (invalidate_gpa || !enable_vpid)) { 483 if (!VALID_PAGE(vcpu->arch.mmu->root_hpa)) 484 return; 485 ept_sync_context(construct_eptp(vcpu, 486 vcpu->arch.mmu->root_hpa)); 487 } else { 488 vpid_sync_context(vpid); 489 } 490 } 491 492 static inline void vmx_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa) 493 { 494 __vmx_flush_tlb(vcpu, to_vmx(vcpu)->vpid, invalidate_gpa); 495 } 496 497 static inline void decache_tsc_multiplier(struct vcpu_vmx *vmx) 498 { 499 vmx->current_tsc_ratio = vmx->vcpu.arch.tsc_scaling_ratio; 500 vmcs_write64(TSC_MULTIPLIER, vmx->current_tsc_ratio); 501 } 502 503 static inline bool vmx_has_waitpkg(struct vcpu_vmx *vmx) 504 { 505 return vmx->secondary_exec_control & 506 SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE; 507 } 508 509 void dump_vmcs(void); 510 511 #endif /* __KVM_X86_VMX_H */ 512