1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef ARCH_X86_KVM_X86_H 3 #define ARCH_X86_KVM_X86_H 4 5 #include <linux/kvm_host.h> 6 #include <asm/mce.h> 7 #include <asm/pvclock.h> 8 #include "kvm_cache_regs.h" 9 #include "kvm_emulate.h" 10 11 void kvm_spurious_fault(void); 12 13 #define KVM_NESTED_VMENTER_CONSISTENCY_CHECK(consistency_check) \ 14 ({ \ 15 bool failed = (consistency_check); \ 16 if (failed) \ 17 trace_kvm_nested_vmenter_failed(#consistency_check, 0); \ 18 failed; \ 19 }) 20 21 #define KVM_DEFAULT_PLE_GAP 128 22 #define KVM_VMX_DEFAULT_PLE_WINDOW 4096 23 #define KVM_DEFAULT_PLE_WINDOW_GROW 2 24 #define KVM_DEFAULT_PLE_WINDOW_SHRINK 0 25 #define KVM_VMX_DEFAULT_PLE_WINDOW_MAX UINT_MAX 26 #define KVM_SVM_DEFAULT_PLE_WINDOW_MAX USHRT_MAX 27 #define KVM_SVM_DEFAULT_PLE_WINDOW 3000 28 29 static inline unsigned int __grow_ple_window(unsigned int val, 30 unsigned int base, unsigned int modifier, unsigned int max) 31 { 32 u64 ret = val; 33 34 if (modifier < 1) 35 return base; 36 37 if (modifier < base) 38 ret *= modifier; 39 else 40 ret += modifier; 41 42 return min(ret, (u64)max); 43 } 44 45 static inline unsigned int __shrink_ple_window(unsigned int val, 46 unsigned int base, unsigned int modifier, unsigned int min) 47 { 48 if (modifier < 1) 49 return base; 50 51 if (modifier < base) 52 val /= modifier; 53 else 54 val -= modifier; 55 56 return max(val, min); 57 } 58 59 #define MSR_IA32_CR_PAT_DEFAULT 0x0007040600070406ULL 60 61 void kvm_service_local_tlb_flush_requests(struct kvm_vcpu *vcpu); 62 int kvm_check_nested_events(struct kvm_vcpu *vcpu); 63 64 static inline void kvm_clear_exception_queue(struct kvm_vcpu *vcpu) 65 { 66 vcpu->arch.exception.pending = false; 67 vcpu->arch.exception.injected = false; 68 } 69 70 static inline void kvm_queue_interrupt(struct kvm_vcpu *vcpu, u8 vector, 71 bool soft) 72 { 73 vcpu->arch.interrupt.injected = true; 74 vcpu->arch.interrupt.soft = soft; 75 vcpu->arch.interrupt.nr = vector; 76 } 77 78 static inline void kvm_clear_interrupt_queue(struct kvm_vcpu *vcpu) 79 { 80 vcpu->arch.interrupt.injected = false; 81 } 82 83 static inline bool kvm_event_needs_reinjection(struct kvm_vcpu *vcpu) 84 { 85 return vcpu->arch.exception.injected || vcpu->arch.interrupt.injected || 86 vcpu->arch.nmi_injected; 87 } 88 89 static inline bool kvm_exception_is_soft(unsigned int nr) 90 { 91 return (nr == BP_VECTOR) || (nr == OF_VECTOR); 92 } 93 94 static inline bool is_protmode(struct kvm_vcpu *vcpu) 95 { 96 return kvm_read_cr0_bits(vcpu, X86_CR0_PE); 97 } 98 99 static inline int is_long_mode(struct kvm_vcpu *vcpu) 100 { 101 #ifdef CONFIG_X86_64 102 return vcpu->arch.efer & EFER_LMA; 103 #else 104 return 0; 105 #endif 106 } 107 108 static inline bool is_64_bit_mode(struct kvm_vcpu *vcpu) 109 { 110 int cs_db, cs_l; 111 112 WARN_ON_ONCE(vcpu->arch.guest_state_protected); 113 114 if (!is_long_mode(vcpu)) 115 return false; 116 static_call(kvm_x86_get_cs_db_l_bits)(vcpu, &cs_db, &cs_l); 117 return cs_l; 118 } 119 120 static inline bool is_64_bit_hypercall(struct kvm_vcpu *vcpu) 121 { 122 /* 123 * If running with protected guest state, the CS register is not 124 * accessible. The hypercall register values will have had to been 125 * provided in 64-bit mode, so assume the guest is in 64-bit. 126 */ 127 return vcpu->arch.guest_state_protected || is_64_bit_mode(vcpu); 128 } 129 130 static inline bool x86_exception_has_error_code(unsigned int vector) 131 { 132 static u32 exception_has_error_code = BIT(DF_VECTOR) | BIT(TS_VECTOR) | 133 BIT(NP_VECTOR) | BIT(SS_VECTOR) | BIT(GP_VECTOR) | 134 BIT(PF_VECTOR) | BIT(AC_VECTOR); 135 136 return (1U << vector) & exception_has_error_code; 137 } 138 139 static inline bool mmu_is_nested(struct kvm_vcpu *vcpu) 140 { 141 return vcpu->arch.walk_mmu == &vcpu->arch.nested_mmu; 142 } 143 144 static inline int is_pae(struct kvm_vcpu *vcpu) 145 { 146 return kvm_read_cr4_bits(vcpu, X86_CR4_PAE); 147 } 148 149 static inline int is_pse(struct kvm_vcpu *vcpu) 150 { 151 return kvm_read_cr4_bits(vcpu, X86_CR4_PSE); 152 } 153 154 static inline int is_paging(struct kvm_vcpu *vcpu) 155 { 156 return likely(kvm_read_cr0_bits(vcpu, X86_CR0_PG)); 157 } 158 159 static inline bool is_pae_paging(struct kvm_vcpu *vcpu) 160 { 161 return !is_long_mode(vcpu) && is_pae(vcpu) && is_paging(vcpu); 162 } 163 164 static inline u8 vcpu_virt_addr_bits(struct kvm_vcpu *vcpu) 165 { 166 return kvm_read_cr4_bits(vcpu, X86_CR4_LA57) ? 57 : 48; 167 } 168 169 static inline u64 get_canonical(u64 la, u8 vaddr_bits) 170 { 171 return ((int64_t)la << (64 - vaddr_bits)) >> (64 - vaddr_bits); 172 } 173 174 static inline bool is_noncanonical_address(u64 la, struct kvm_vcpu *vcpu) 175 { 176 return get_canonical(la, vcpu_virt_addr_bits(vcpu)) != la; 177 } 178 179 static inline void vcpu_cache_mmio_info(struct kvm_vcpu *vcpu, 180 gva_t gva, gfn_t gfn, unsigned access) 181 { 182 u64 gen = kvm_memslots(vcpu->kvm)->generation; 183 184 if (unlikely(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS)) 185 return; 186 187 /* 188 * If this is a shadow nested page table, the "GVA" is 189 * actually a nGPA. 190 */ 191 vcpu->arch.mmio_gva = mmu_is_nested(vcpu) ? 0 : gva & PAGE_MASK; 192 vcpu->arch.mmio_access = access; 193 vcpu->arch.mmio_gfn = gfn; 194 vcpu->arch.mmio_gen = gen; 195 } 196 197 static inline bool vcpu_match_mmio_gen(struct kvm_vcpu *vcpu) 198 { 199 return vcpu->arch.mmio_gen == kvm_memslots(vcpu->kvm)->generation; 200 } 201 202 /* 203 * Clear the mmio cache info for the given gva. If gva is MMIO_GVA_ANY, we 204 * clear all mmio cache info. 205 */ 206 #define MMIO_GVA_ANY (~(gva_t)0) 207 208 static inline void vcpu_clear_mmio_info(struct kvm_vcpu *vcpu, gva_t gva) 209 { 210 if (gva != MMIO_GVA_ANY && vcpu->arch.mmio_gva != (gva & PAGE_MASK)) 211 return; 212 213 vcpu->arch.mmio_gva = 0; 214 } 215 216 static inline bool vcpu_match_mmio_gva(struct kvm_vcpu *vcpu, unsigned long gva) 217 { 218 if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gva && 219 vcpu->arch.mmio_gva == (gva & PAGE_MASK)) 220 return true; 221 222 return false; 223 } 224 225 static inline bool vcpu_match_mmio_gpa(struct kvm_vcpu *vcpu, gpa_t gpa) 226 { 227 if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gfn && 228 vcpu->arch.mmio_gfn == gpa >> PAGE_SHIFT) 229 return true; 230 231 return false; 232 } 233 234 static inline unsigned long kvm_register_read(struct kvm_vcpu *vcpu, int reg) 235 { 236 unsigned long val = kvm_register_read_raw(vcpu, reg); 237 238 return is_64_bit_mode(vcpu) ? val : (u32)val; 239 } 240 241 static inline void kvm_register_write(struct kvm_vcpu *vcpu, 242 int reg, unsigned long val) 243 { 244 if (!is_64_bit_mode(vcpu)) 245 val = (u32)val; 246 return kvm_register_write_raw(vcpu, reg, val); 247 } 248 249 static inline bool kvm_check_has_quirk(struct kvm *kvm, u64 quirk) 250 { 251 return !(kvm->arch.disabled_quirks & quirk); 252 } 253 254 static inline bool kvm_vcpu_latch_init(struct kvm_vcpu *vcpu) 255 { 256 return is_smm(vcpu) || static_call(kvm_x86_apic_init_signal_blocked)(vcpu); 257 } 258 259 void kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip); 260 261 u64 get_kvmclock_ns(struct kvm *kvm); 262 263 int kvm_read_guest_virt(struct kvm_vcpu *vcpu, 264 gva_t addr, void *val, unsigned int bytes, 265 struct x86_exception *exception); 266 267 int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu, 268 gva_t addr, void *val, unsigned int bytes, 269 struct x86_exception *exception); 270 271 int handle_ud(struct kvm_vcpu *vcpu); 272 273 void kvm_deliver_exception_payload(struct kvm_vcpu *vcpu); 274 275 void kvm_vcpu_mtrr_init(struct kvm_vcpu *vcpu); 276 u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn); 277 bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data); 278 int kvm_mtrr_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data); 279 int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata); 280 bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn, 281 int page_num); 282 bool kvm_vector_hashing_enabled(void); 283 void kvm_fixup_and_inject_pf_error(struct kvm_vcpu *vcpu, gva_t gva, u16 error_code); 284 int x86_decode_emulated_instruction(struct kvm_vcpu *vcpu, int emulation_type, 285 void *insn, int insn_len); 286 int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, 287 int emulation_type, void *insn, int insn_len); 288 fastpath_t handle_fastpath_set_msr_irqoff(struct kvm_vcpu *vcpu); 289 290 extern u64 host_xcr0; 291 extern u64 supported_xcr0; 292 extern u64 host_xss; 293 extern u64 supported_xss; 294 extern bool enable_pmu; 295 296 static inline bool kvm_mpx_supported(void) 297 { 298 return (supported_xcr0 & (XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR)) 299 == (XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR); 300 } 301 302 extern unsigned int min_timer_period_us; 303 304 extern bool enable_vmware_backdoor; 305 306 extern int pi_inject_timer; 307 308 extern bool report_ignored_msrs; 309 310 static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec) 311 { 312 return pvclock_scale_delta(nsec, vcpu->arch.virtual_tsc_mult, 313 vcpu->arch.virtual_tsc_shift); 314 } 315 316 /* Same "calling convention" as do_div: 317 * - divide (n << 32) by base 318 * - put result in n 319 * - return remainder 320 */ 321 #define do_shl32_div32(n, base) \ 322 ({ \ 323 u32 __quot, __rem; \ 324 asm("divl %2" : "=a" (__quot), "=d" (__rem) \ 325 : "rm" (base), "0" (0), "1" ((u32) n)); \ 326 n = __quot; \ 327 __rem; \ 328 }) 329 330 static inline bool kvm_mwait_in_guest(struct kvm *kvm) 331 { 332 return kvm->arch.mwait_in_guest; 333 } 334 335 static inline bool kvm_hlt_in_guest(struct kvm *kvm) 336 { 337 return kvm->arch.hlt_in_guest; 338 } 339 340 static inline bool kvm_pause_in_guest(struct kvm *kvm) 341 { 342 return kvm->arch.pause_in_guest; 343 } 344 345 static inline bool kvm_cstate_in_guest(struct kvm *kvm) 346 { 347 return kvm->arch.cstate_in_guest; 348 } 349 350 enum kvm_intr_type { 351 /* Values are arbitrary, but must be non-zero. */ 352 KVM_HANDLING_IRQ = 1, 353 KVM_HANDLING_NMI, 354 }; 355 356 static inline void kvm_before_interrupt(struct kvm_vcpu *vcpu, 357 enum kvm_intr_type intr) 358 { 359 WRITE_ONCE(vcpu->arch.handling_intr_from_guest, (u8)intr); 360 } 361 362 static inline void kvm_after_interrupt(struct kvm_vcpu *vcpu) 363 { 364 WRITE_ONCE(vcpu->arch.handling_intr_from_guest, 0); 365 } 366 367 static inline bool kvm_handling_nmi_from_guest(struct kvm_vcpu *vcpu) 368 { 369 return vcpu->arch.handling_intr_from_guest == KVM_HANDLING_NMI; 370 } 371 372 static inline bool kvm_pat_valid(u64 data) 373 { 374 if (data & 0xF8F8F8F8F8F8F8F8ull) 375 return false; 376 /* 0, 1, 4, 5, 6, 7 are valid values. */ 377 return (data | ((data & 0x0202020202020202ull) << 1)) == data; 378 } 379 380 static inline bool kvm_dr7_valid(u64 data) 381 { 382 /* Bits [63:32] are reserved */ 383 return !(data >> 32); 384 } 385 static inline bool kvm_dr6_valid(u64 data) 386 { 387 /* Bits [63:32] are reserved */ 388 return !(data >> 32); 389 } 390 391 /* 392 * Trigger machine check on the host. We assume all the MSRs are already set up 393 * by the CPU and that we still run on the same CPU as the MCE occurred on. 394 * We pass a fake environment to the machine check handler because we want 395 * the guest to be always treated like user space, no matter what context 396 * it used internally. 397 */ 398 static inline void kvm_machine_check(void) 399 { 400 #if defined(CONFIG_X86_MCE) 401 struct pt_regs regs = { 402 .cs = 3, /* Fake ring 3 no matter what the guest ran on */ 403 .flags = X86_EFLAGS_IF, 404 }; 405 406 do_machine_check(®s); 407 #endif 408 } 409 410 void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu); 411 void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu); 412 int kvm_spec_ctrl_test_value(u64 value); 413 bool kvm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4); 414 int kvm_handle_memory_failure(struct kvm_vcpu *vcpu, int r, 415 struct x86_exception *e); 416 int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva); 417 bool kvm_msr_allowed(struct kvm_vcpu *vcpu, u32 index, u32 type); 418 419 /* 420 * Internal error codes that are used to indicate that MSR emulation encountered 421 * an error that should result in #GP in the guest, unless userspace 422 * handles it. 423 */ 424 #define KVM_MSR_RET_INVALID 2 /* in-kernel MSR emulation #GP condition */ 425 #define KVM_MSR_RET_FILTERED 3 /* #GP due to userspace MSR filter */ 426 427 #define __cr4_reserved_bits(__cpu_has, __c) \ 428 ({ \ 429 u64 __reserved_bits = CR4_RESERVED_BITS; \ 430 \ 431 if (!__cpu_has(__c, X86_FEATURE_XSAVE)) \ 432 __reserved_bits |= X86_CR4_OSXSAVE; \ 433 if (!__cpu_has(__c, X86_FEATURE_SMEP)) \ 434 __reserved_bits |= X86_CR4_SMEP; \ 435 if (!__cpu_has(__c, X86_FEATURE_SMAP)) \ 436 __reserved_bits |= X86_CR4_SMAP; \ 437 if (!__cpu_has(__c, X86_FEATURE_FSGSBASE)) \ 438 __reserved_bits |= X86_CR4_FSGSBASE; \ 439 if (!__cpu_has(__c, X86_FEATURE_PKU)) \ 440 __reserved_bits |= X86_CR4_PKE; \ 441 if (!__cpu_has(__c, X86_FEATURE_LA57)) \ 442 __reserved_bits |= X86_CR4_LA57; \ 443 if (!__cpu_has(__c, X86_FEATURE_UMIP)) \ 444 __reserved_bits |= X86_CR4_UMIP; \ 445 if (!__cpu_has(__c, X86_FEATURE_VMX)) \ 446 __reserved_bits |= X86_CR4_VMXE; \ 447 if (!__cpu_has(__c, X86_FEATURE_PCID)) \ 448 __reserved_bits |= X86_CR4_PCIDE; \ 449 __reserved_bits; \ 450 }) 451 452 int kvm_sev_es_mmio_write(struct kvm_vcpu *vcpu, gpa_t src, unsigned int bytes, 453 void *dst); 454 int kvm_sev_es_mmio_read(struct kvm_vcpu *vcpu, gpa_t src, unsigned int bytes, 455 void *dst); 456 int kvm_sev_es_string_io(struct kvm_vcpu *vcpu, unsigned int size, 457 unsigned int port, void *data, unsigned int count, 458 int in); 459 460 #endif 461