1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef ARCH_X86_KVM_X86_H 3 #define ARCH_X86_KVM_X86_H 4 5 #include <linux/kvm_host.h> 6 #include <asm/mce.h> 7 #include <asm/pvclock.h> 8 #include "kvm_cache_regs.h" 9 #include "kvm_emulate.h" 10 11 void kvm_spurious_fault(void); 12 13 static __always_inline void kvm_guest_enter_irqoff(void) 14 { 15 /* 16 * VMENTER enables interrupts (host state), but the kernel state is 17 * interrupts disabled when this is invoked. Also tell RCU about 18 * it. This is the same logic as for exit_to_user_mode(). 19 * 20 * This ensures that e.g. latency analysis on the host observes 21 * guest mode as interrupt enabled. 22 * 23 * guest_enter_irqoff() informs context tracking about the 24 * transition to guest mode and if enabled adjusts RCU state 25 * accordingly. 26 */ 27 instrumentation_begin(); 28 trace_hardirqs_on_prepare(); 29 lockdep_hardirqs_on_prepare(CALLER_ADDR0); 30 instrumentation_end(); 31 32 guest_enter_irqoff(); 33 lockdep_hardirqs_on(CALLER_ADDR0); 34 } 35 36 static __always_inline void kvm_guest_exit_irqoff(void) 37 { 38 /* 39 * VMEXIT disables interrupts (host state), but tracing and lockdep 40 * have them in state 'on' as recorded before entering guest mode. 41 * Same as enter_from_user_mode(). 42 * 43 * context_tracking_guest_exit() restores host context and reinstates 44 * RCU if enabled and required. 45 * 46 * This needs to be done immediately after VM-Exit, before any code 47 * that might contain tracepoints or call out to the greater world, 48 * e.g. before x86_spec_ctrl_restore_host(). 49 */ 50 lockdep_hardirqs_off(CALLER_ADDR0); 51 context_tracking_guest_exit(); 52 53 instrumentation_begin(); 54 trace_hardirqs_off_finish(); 55 instrumentation_end(); 56 } 57 58 #define KVM_NESTED_VMENTER_CONSISTENCY_CHECK(consistency_check) \ 59 ({ \ 60 bool failed = (consistency_check); \ 61 if (failed) \ 62 trace_kvm_nested_vmenter_failed(#consistency_check, 0); \ 63 failed; \ 64 }) 65 66 #define KVM_DEFAULT_PLE_GAP 128 67 #define KVM_VMX_DEFAULT_PLE_WINDOW 4096 68 #define KVM_DEFAULT_PLE_WINDOW_GROW 2 69 #define KVM_DEFAULT_PLE_WINDOW_SHRINK 0 70 #define KVM_VMX_DEFAULT_PLE_WINDOW_MAX UINT_MAX 71 #define KVM_SVM_DEFAULT_PLE_WINDOW_MAX USHRT_MAX 72 #define KVM_SVM_DEFAULT_PLE_WINDOW 3000 73 74 static inline unsigned int __grow_ple_window(unsigned int val, 75 unsigned int base, unsigned int modifier, unsigned int max) 76 { 77 u64 ret = val; 78 79 if (modifier < 1) 80 return base; 81 82 if (modifier < base) 83 ret *= modifier; 84 else 85 ret += modifier; 86 87 return min(ret, (u64)max); 88 } 89 90 static inline unsigned int __shrink_ple_window(unsigned int val, 91 unsigned int base, unsigned int modifier, unsigned int min) 92 { 93 if (modifier < 1) 94 return base; 95 96 if (modifier < base) 97 val /= modifier; 98 else 99 val -= modifier; 100 101 return max(val, min); 102 } 103 104 #define MSR_IA32_CR_PAT_DEFAULT 0x0007040600070406ULL 105 106 int kvm_check_nested_events(struct kvm_vcpu *vcpu); 107 108 static inline void kvm_clear_exception_queue(struct kvm_vcpu *vcpu) 109 { 110 vcpu->arch.exception.pending = false; 111 vcpu->arch.exception.injected = false; 112 } 113 114 static inline void kvm_queue_interrupt(struct kvm_vcpu *vcpu, u8 vector, 115 bool soft) 116 { 117 vcpu->arch.interrupt.injected = true; 118 vcpu->arch.interrupt.soft = soft; 119 vcpu->arch.interrupt.nr = vector; 120 } 121 122 static inline void kvm_clear_interrupt_queue(struct kvm_vcpu *vcpu) 123 { 124 vcpu->arch.interrupt.injected = false; 125 } 126 127 static inline bool kvm_event_needs_reinjection(struct kvm_vcpu *vcpu) 128 { 129 return vcpu->arch.exception.injected || vcpu->arch.interrupt.injected || 130 vcpu->arch.nmi_injected; 131 } 132 133 static inline bool kvm_exception_is_soft(unsigned int nr) 134 { 135 return (nr == BP_VECTOR) || (nr == OF_VECTOR); 136 } 137 138 static inline bool is_protmode(struct kvm_vcpu *vcpu) 139 { 140 return kvm_read_cr0_bits(vcpu, X86_CR0_PE); 141 } 142 143 static inline int is_long_mode(struct kvm_vcpu *vcpu) 144 { 145 #ifdef CONFIG_X86_64 146 return vcpu->arch.efer & EFER_LMA; 147 #else 148 return 0; 149 #endif 150 } 151 152 static inline bool is_64_bit_mode(struct kvm_vcpu *vcpu) 153 { 154 int cs_db, cs_l; 155 156 if (!is_long_mode(vcpu)) 157 return false; 158 static_call(kvm_x86_get_cs_db_l_bits)(vcpu, &cs_db, &cs_l); 159 return cs_l; 160 } 161 162 static inline bool x86_exception_has_error_code(unsigned int vector) 163 { 164 static u32 exception_has_error_code = BIT(DF_VECTOR) | BIT(TS_VECTOR) | 165 BIT(NP_VECTOR) | BIT(SS_VECTOR) | BIT(GP_VECTOR) | 166 BIT(PF_VECTOR) | BIT(AC_VECTOR); 167 168 return (1U << vector) & exception_has_error_code; 169 } 170 171 static inline bool mmu_is_nested(struct kvm_vcpu *vcpu) 172 { 173 return vcpu->arch.walk_mmu == &vcpu->arch.nested_mmu; 174 } 175 176 static inline void kvm_vcpu_flush_tlb_current(struct kvm_vcpu *vcpu) 177 { 178 ++vcpu->stat.tlb_flush; 179 static_call(kvm_x86_tlb_flush_current)(vcpu); 180 } 181 182 static inline int is_pae(struct kvm_vcpu *vcpu) 183 { 184 return kvm_read_cr4_bits(vcpu, X86_CR4_PAE); 185 } 186 187 static inline int is_pse(struct kvm_vcpu *vcpu) 188 { 189 return kvm_read_cr4_bits(vcpu, X86_CR4_PSE); 190 } 191 192 static inline int is_paging(struct kvm_vcpu *vcpu) 193 { 194 return likely(kvm_read_cr0_bits(vcpu, X86_CR0_PG)); 195 } 196 197 static inline bool is_pae_paging(struct kvm_vcpu *vcpu) 198 { 199 return !is_long_mode(vcpu) && is_pae(vcpu) && is_paging(vcpu); 200 } 201 202 static inline u8 vcpu_virt_addr_bits(struct kvm_vcpu *vcpu) 203 { 204 return kvm_read_cr4_bits(vcpu, X86_CR4_LA57) ? 57 : 48; 205 } 206 207 static inline u64 get_canonical(u64 la, u8 vaddr_bits) 208 { 209 return ((int64_t)la << (64 - vaddr_bits)) >> (64 - vaddr_bits); 210 } 211 212 static inline bool is_noncanonical_address(u64 la, struct kvm_vcpu *vcpu) 213 { 214 return get_canonical(la, vcpu_virt_addr_bits(vcpu)) != la; 215 } 216 217 static inline void vcpu_cache_mmio_info(struct kvm_vcpu *vcpu, 218 gva_t gva, gfn_t gfn, unsigned access) 219 { 220 u64 gen = kvm_memslots(vcpu->kvm)->generation; 221 222 if (unlikely(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS)) 223 return; 224 225 /* 226 * If this is a shadow nested page table, the "GVA" is 227 * actually a nGPA. 228 */ 229 vcpu->arch.mmio_gva = mmu_is_nested(vcpu) ? 0 : gva & PAGE_MASK; 230 vcpu->arch.mmio_access = access; 231 vcpu->arch.mmio_gfn = gfn; 232 vcpu->arch.mmio_gen = gen; 233 } 234 235 static inline bool vcpu_match_mmio_gen(struct kvm_vcpu *vcpu) 236 { 237 return vcpu->arch.mmio_gen == kvm_memslots(vcpu->kvm)->generation; 238 } 239 240 /* 241 * Clear the mmio cache info for the given gva. If gva is MMIO_GVA_ANY, we 242 * clear all mmio cache info. 243 */ 244 #define MMIO_GVA_ANY (~(gva_t)0) 245 246 static inline void vcpu_clear_mmio_info(struct kvm_vcpu *vcpu, gva_t gva) 247 { 248 if (gva != MMIO_GVA_ANY && vcpu->arch.mmio_gva != (gva & PAGE_MASK)) 249 return; 250 251 vcpu->arch.mmio_gva = 0; 252 } 253 254 static inline bool vcpu_match_mmio_gva(struct kvm_vcpu *vcpu, unsigned long gva) 255 { 256 if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gva && 257 vcpu->arch.mmio_gva == (gva & PAGE_MASK)) 258 return true; 259 260 return false; 261 } 262 263 static inline bool vcpu_match_mmio_gpa(struct kvm_vcpu *vcpu, gpa_t gpa) 264 { 265 if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gfn && 266 vcpu->arch.mmio_gfn == gpa >> PAGE_SHIFT) 267 return true; 268 269 return false; 270 } 271 272 static inline unsigned long kvm_register_read(struct kvm_vcpu *vcpu, int reg) 273 { 274 unsigned long val = kvm_register_read_raw(vcpu, reg); 275 276 return is_64_bit_mode(vcpu) ? val : (u32)val; 277 } 278 279 static inline void kvm_register_write(struct kvm_vcpu *vcpu, 280 int reg, unsigned long val) 281 { 282 if (!is_64_bit_mode(vcpu)) 283 val = (u32)val; 284 return kvm_register_write_raw(vcpu, reg, val); 285 } 286 287 static inline bool kvm_check_has_quirk(struct kvm *kvm, u64 quirk) 288 { 289 return !(kvm->arch.disabled_quirks & quirk); 290 } 291 292 static inline bool kvm_vcpu_latch_init(struct kvm_vcpu *vcpu) 293 { 294 return is_smm(vcpu) || static_call(kvm_x86_apic_init_signal_blocked)(vcpu); 295 } 296 297 void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock, int sec_hi_ofs); 298 void kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip); 299 300 u64 get_kvmclock_ns(struct kvm *kvm); 301 302 int kvm_read_guest_virt(struct kvm_vcpu *vcpu, 303 gva_t addr, void *val, unsigned int bytes, 304 struct x86_exception *exception); 305 306 int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu, 307 gva_t addr, void *val, unsigned int bytes, 308 struct x86_exception *exception); 309 310 int handle_ud(struct kvm_vcpu *vcpu); 311 312 void kvm_deliver_exception_payload(struct kvm_vcpu *vcpu); 313 314 void kvm_vcpu_mtrr_init(struct kvm_vcpu *vcpu); 315 u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn); 316 bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data); 317 int kvm_mtrr_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data); 318 int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata); 319 bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn, 320 int page_num); 321 bool kvm_vector_hashing_enabled(void); 322 void kvm_fixup_and_inject_pf_error(struct kvm_vcpu *vcpu, gva_t gva, u16 error_code); 323 int x86_decode_emulated_instruction(struct kvm_vcpu *vcpu, int emulation_type, 324 void *insn, int insn_len); 325 int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, 326 int emulation_type, void *insn, int insn_len); 327 fastpath_t handle_fastpath_set_msr_irqoff(struct kvm_vcpu *vcpu); 328 329 extern u64 host_xcr0; 330 extern u64 supported_xcr0; 331 extern u64 host_xss; 332 extern u64 supported_xss; 333 334 static inline bool kvm_mpx_supported(void) 335 { 336 return (supported_xcr0 & (XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR)) 337 == (XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR); 338 } 339 340 extern unsigned int min_timer_period_us; 341 342 extern bool enable_vmware_backdoor; 343 344 extern int pi_inject_timer; 345 346 extern bool report_ignored_msrs; 347 348 static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec) 349 { 350 return pvclock_scale_delta(nsec, vcpu->arch.virtual_tsc_mult, 351 vcpu->arch.virtual_tsc_shift); 352 } 353 354 /* Same "calling convention" as do_div: 355 * - divide (n << 32) by base 356 * - put result in n 357 * - return remainder 358 */ 359 #define do_shl32_div32(n, base) \ 360 ({ \ 361 u32 __quot, __rem; \ 362 asm("divl %2" : "=a" (__quot), "=d" (__rem) \ 363 : "rm" (base), "0" (0), "1" ((u32) n)); \ 364 n = __quot; \ 365 __rem; \ 366 }) 367 368 static inline bool kvm_mwait_in_guest(struct kvm *kvm) 369 { 370 return kvm->arch.mwait_in_guest; 371 } 372 373 static inline bool kvm_hlt_in_guest(struct kvm *kvm) 374 { 375 return kvm->arch.hlt_in_guest; 376 } 377 378 static inline bool kvm_pause_in_guest(struct kvm *kvm) 379 { 380 return kvm->arch.pause_in_guest; 381 } 382 383 static inline bool kvm_cstate_in_guest(struct kvm *kvm) 384 { 385 return kvm->arch.cstate_in_guest; 386 } 387 388 DECLARE_PER_CPU(struct kvm_vcpu *, current_vcpu); 389 390 static inline void kvm_before_interrupt(struct kvm_vcpu *vcpu) 391 { 392 __this_cpu_write(current_vcpu, vcpu); 393 } 394 395 static inline void kvm_after_interrupt(struct kvm_vcpu *vcpu) 396 { 397 __this_cpu_write(current_vcpu, NULL); 398 } 399 400 401 static inline bool kvm_pat_valid(u64 data) 402 { 403 if (data & 0xF8F8F8F8F8F8F8F8ull) 404 return false; 405 /* 0, 1, 4, 5, 6, 7 are valid values. */ 406 return (data | ((data & 0x0202020202020202ull) << 1)) == data; 407 } 408 409 static inline bool kvm_dr7_valid(u64 data) 410 { 411 /* Bits [63:32] are reserved */ 412 return !(data >> 32); 413 } 414 static inline bool kvm_dr6_valid(u64 data) 415 { 416 /* Bits [63:32] are reserved */ 417 return !(data >> 32); 418 } 419 420 /* 421 * Trigger machine check on the host. We assume all the MSRs are already set up 422 * by the CPU and that we still run on the same CPU as the MCE occurred on. 423 * We pass a fake environment to the machine check handler because we want 424 * the guest to be always treated like user space, no matter what context 425 * it used internally. 426 */ 427 static inline void kvm_machine_check(void) 428 { 429 #if defined(CONFIG_X86_MCE) 430 struct pt_regs regs = { 431 .cs = 3, /* Fake ring 3 no matter what the guest ran on */ 432 .flags = X86_EFLAGS_IF, 433 }; 434 435 do_machine_check(®s); 436 #endif 437 } 438 439 void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu); 440 void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu); 441 int kvm_spec_ctrl_test_value(u64 value); 442 bool kvm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4); 443 int kvm_handle_memory_failure(struct kvm_vcpu *vcpu, int r, 444 struct x86_exception *e); 445 int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva); 446 bool kvm_msr_allowed(struct kvm_vcpu *vcpu, u32 index, u32 type); 447 448 /* 449 * Internal error codes that are used to indicate that MSR emulation encountered 450 * an error that should result in #GP in the guest, unless userspace 451 * handles it. 452 */ 453 #define KVM_MSR_RET_INVALID 2 /* in-kernel MSR emulation #GP condition */ 454 #define KVM_MSR_RET_FILTERED 3 /* #GP due to userspace MSR filter */ 455 456 #define __cr4_reserved_bits(__cpu_has, __c) \ 457 ({ \ 458 u64 __reserved_bits = CR4_RESERVED_BITS; \ 459 \ 460 if (!__cpu_has(__c, X86_FEATURE_XSAVE)) \ 461 __reserved_bits |= X86_CR4_OSXSAVE; \ 462 if (!__cpu_has(__c, X86_FEATURE_SMEP)) \ 463 __reserved_bits |= X86_CR4_SMEP; \ 464 if (!__cpu_has(__c, X86_FEATURE_SMAP)) \ 465 __reserved_bits |= X86_CR4_SMAP; \ 466 if (!__cpu_has(__c, X86_FEATURE_FSGSBASE)) \ 467 __reserved_bits |= X86_CR4_FSGSBASE; \ 468 if (!__cpu_has(__c, X86_FEATURE_PKU)) \ 469 __reserved_bits |= X86_CR4_PKE; \ 470 if (!__cpu_has(__c, X86_FEATURE_LA57)) \ 471 __reserved_bits |= X86_CR4_LA57; \ 472 if (!__cpu_has(__c, X86_FEATURE_UMIP)) \ 473 __reserved_bits |= X86_CR4_UMIP; \ 474 if (!__cpu_has(__c, X86_FEATURE_VMX)) \ 475 __reserved_bits |= X86_CR4_VMXE; \ 476 if (!__cpu_has(__c, X86_FEATURE_PCID)) \ 477 __reserved_bits |= X86_CR4_PCIDE; \ 478 __reserved_bits; \ 479 }) 480 481 int kvm_sev_es_mmio_write(struct kvm_vcpu *vcpu, gpa_t src, unsigned int bytes, 482 void *dst); 483 int kvm_sev_es_mmio_read(struct kvm_vcpu *vcpu, gpa_t src, unsigned int bytes, 484 void *dst); 485 int kvm_sev_es_string_io(struct kvm_vcpu *vcpu, unsigned int size, 486 unsigned int port, void *data, unsigned int count, 487 int in); 488 489 #endif 490