1 #define pr_fmt(fmt) "SVM: " fmt 2 3 #include <linux/kvm_host.h> 4 5 #include "irq.h" 6 #include "mmu.h" 7 #include "kvm_cache_regs.h" 8 #include "x86.h" 9 #include "cpuid.h" 10 #include "pmu.h" 11 12 #include <linux/module.h> 13 #include <linux/mod_devicetable.h> 14 #include <linux/kernel.h> 15 #include <linux/vmalloc.h> 16 #include <linux/highmem.h> 17 #include <linux/amd-iommu.h> 18 #include <linux/sched.h> 19 #include <linux/trace_events.h> 20 #include <linux/slab.h> 21 #include <linux/hashtable.h> 22 #include <linux/objtool.h> 23 #include <linux/psp-sev.h> 24 #include <linux/file.h> 25 #include <linux/pagemap.h> 26 #include <linux/swap.h> 27 #include <linux/rwsem.h> 28 29 #include <asm/apic.h> 30 #include <asm/perf_event.h> 31 #include <asm/tlbflush.h> 32 #include <asm/desc.h> 33 #include <asm/debugreg.h> 34 #include <asm/kvm_para.h> 35 #include <asm/irq_remapping.h> 36 #include <asm/spec-ctrl.h> 37 #include <asm/cpu_device_id.h> 38 #include <asm/traps.h> 39 40 #include <asm/virtext.h> 41 #include "trace.h" 42 43 #include "svm.h" 44 #include "svm_ops.h" 45 46 #define __ex(x) __kvm_handle_fault_on_reboot(x) 47 48 MODULE_AUTHOR("Qumranet"); 49 MODULE_LICENSE("GPL"); 50 51 #ifdef MODULE 52 static const struct x86_cpu_id svm_cpu_id[] = { 53 X86_MATCH_FEATURE(X86_FEATURE_SVM, NULL), 54 {} 55 }; 56 MODULE_DEVICE_TABLE(x86cpu, svm_cpu_id); 57 #endif 58 59 #define IOPM_ALLOC_ORDER 2 60 #define MSRPM_ALLOC_ORDER 1 61 62 #define SEG_TYPE_LDT 2 63 #define SEG_TYPE_BUSY_TSS16 3 64 65 #define SVM_FEATURE_LBRV (1 << 1) 66 #define SVM_FEATURE_SVML (1 << 2) 67 #define SVM_FEATURE_TSC_RATE (1 << 4) 68 #define SVM_FEATURE_VMCB_CLEAN (1 << 5) 69 #define SVM_FEATURE_FLUSH_ASID (1 << 6) 70 #define SVM_FEATURE_DECODE_ASSIST (1 << 7) 71 #define SVM_FEATURE_PAUSE_FILTER (1 << 10) 72 73 #define DEBUGCTL_RESERVED_BITS (~(0x3fULL)) 74 75 #define TSC_RATIO_RSVD 0xffffff0000000000ULL 76 #define TSC_RATIO_MIN 0x0000000000000001ULL 77 #define TSC_RATIO_MAX 0x000000ffffffffffULL 78 79 static bool erratum_383_found __read_mostly; 80 81 u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly; 82 83 /* 84 * Set osvw_len to higher value when updated Revision Guides 85 * are published and we know what the new status bits are 86 */ 87 static uint64_t osvw_len = 4, osvw_status; 88 89 static DEFINE_PER_CPU(u64, current_tsc_ratio); 90 #define TSC_RATIO_DEFAULT 0x0100000000ULL 91 92 static const struct svm_direct_access_msrs { 93 u32 index; /* Index of the MSR */ 94 bool always; /* True if intercept is initially cleared */ 95 } direct_access_msrs[MAX_DIRECT_ACCESS_MSRS] = { 96 { .index = MSR_STAR, .always = true }, 97 { .index = MSR_IA32_SYSENTER_CS, .always = true }, 98 #ifdef CONFIG_X86_64 99 { .index = MSR_GS_BASE, .always = true }, 100 { .index = MSR_FS_BASE, .always = true }, 101 { .index = MSR_KERNEL_GS_BASE, .always = true }, 102 { .index = MSR_LSTAR, .always = true }, 103 { .index = MSR_CSTAR, .always = true }, 104 { .index = MSR_SYSCALL_MASK, .always = true }, 105 #endif 106 { .index = MSR_IA32_SPEC_CTRL, .always = false }, 107 { .index = MSR_IA32_PRED_CMD, .always = false }, 108 { .index = MSR_IA32_LASTBRANCHFROMIP, .always = false }, 109 { .index = MSR_IA32_LASTBRANCHTOIP, .always = false }, 110 { .index = MSR_IA32_LASTINTFROMIP, .always = false }, 111 { .index = MSR_IA32_LASTINTTOIP, .always = false }, 112 { .index = MSR_EFER, .always = false }, 113 { .index = MSR_IA32_CR_PAT, .always = false }, 114 { .index = MSR_AMD64_SEV_ES_GHCB, .always = true }, 115 { .index = MSR_INVALID, .always = false }, 116 }; 117 118 /* enable NPT for AMD64 and X86 with PAE */ 119 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) 120 bool npt_enabled = true; 121 #else 122 bool npt_enabled; 123 #endif 124 125 /* 126 * These 2 parameters are used to config the controls for Pause-Loop Exiting: 127 * pause_filter_count: On processors that support Pause filtering(indicated 128 * by CPUID Fn8000_000A_EDX), the VMCB provides a 16 bit pause filter 129 * count value. On VMRUN this value is loaded into an internal counter. 130 * Each time a pause instruction is executed, this counter is decremented 131 * until it reaches zero at which time a #VMEXIT is generated if pause 132 * intercept is enabled. Refer to AMD APM Vol 2 Section 15.14.4 Pause 133 * Intercept Filtering for more details. 134 * This also indicate if ple logic enabled. 135 * 136 * pause_filter_thresh: In addition, some processor families support advanced 137 * pause filtering (indicated by CPUID Fn8000_000A_EDX) upper bound on 138 * the amount of time a guest is allowed to execute in a pause loop. 139 * In this mode, a 16-bit pause filter threshold field is added in the 140 * VMCB. The threshold value is a cycle count that is used to reset the 141 * pause counter. As with simple pause filtering, VMRUN loads the pause 142 * count value from VMCB into an internal counter. Then, on each pause 143 * instruction the hardware checks the elapsed number of cycles since 144 * the most recent pause instruction against the pause filter threshold. 145 * If the elapsed cycle count is greater than the pause filter threshold, 146 * then the internal pause count is reloaded from the VMCB and execution 147 * continues. If the elapsed cycle count is less than the pause filter 148 * threshold, then the internal pause count is decremented. If the count 149 * value is less than zero and PAUSE intercept is enabled, a #VMEXIT is 150 * triggered. If advanced pause filtering is supported and pause filter 151 * threshold field is set to zero, the filter will operate in the simpler, 152 * count only mode. 153 */ 154 155 static unsigned short pause_filter_thresh = KVM_DEFAULT_PLE_GAP; 156 module_param(pause_filter_thresh, ushort, 0444); 157 158 static unsigned short pause_filter_count = KVM_SVM_DEFAULT_PLE_WINDOW; 159 module_param(pause_filter_count, ushort, 0444); 160 161 /* Default doubles per-vcpu window every exit. */ 162 static unsigned short pause_filter_count_grow = KVM_DEFAULT_PLE_WINDOW_GROW; 163 module_param(pause_filter_count_grow, ushort, 0444); 164 165 /* Default resets per-vcpu window every exit to pause_filter_count. */ 166 static unsigned short pause_filter_count_shrink = KVM_DEFAULT_PLE_WINDOW_SHRINK; 167 module_param(pause_filter_count_shrink, ushort, 0444); 168 169 /* Default is to compute the maximum so we can never overflow. */ 170 static unsigned short pause_filter_count_max = KVM_SVM_DEFAULT_PLE_WINDOW_MAX; 171 module_param(pause_filter_count_max, ushort, 0444); 172 173 /* allow nested paging (virtualized MMU) for all guests */ 174 static int npt = true; 175 module_param(npt, int, S_IRUGO); 176 177 /* allow nested virtualization in KVM/SVM */ 178 static int nested = true; 179 module_param(nested, int, S_IRUGO); 180 181 /* enable/disable Next RIP Save */ 182 static int nrips = true; 183 module_param(nrips, int, 0444); 184 185 /* enable/disable Virtual VMLOAD VMSAVE */ 186 static int vls = true; 187 module_param(vls, int, 0444); 188 189 /* enable/disable Virtual GIF */ 190 static int vgif = true; 191 module_param(vgif, int, 0444); 192 193 /* enable/disable SEV support */ 194 int sev = IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT); 195 module_param(sev, int, 0444); 196 197 /* enable/disable SEV-ES support */ 198 int sev_es = IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT); 199 module_param(sev_es, int, 0444); 200 201 bool __read_mostly dump_invalid_vmcb; 202 module_param(dump_invalid_vmcb, bool, 0644); 203 204 static bool svm_gp_erratum_intercept = true; 205 206 static u8 rsm_ins_bytes[] = "\x0f\xaa"; 207 208 static unsigned long iopm_base; 209 210 struct kvm_ldttss_desc { 211 u16 limit0; 212 u16 base0; 213 unsigned base1:8, type:5, dpl:2, p:1; 214 unsigned limit1:4, zero0:3, g:1, base2:8; 215 u32 base3; 216 u32 zero1; 217 } __attribute__((packed)); 218 219 DEFINE_PER_CPU(struct svm_cpu_data *, svm_data); 220 221 static const u32 msrpm_ranges[] = {0, 0xc0000000, 0xc0010000}; 222 223 #define NUM_MSR_MAPS ARRAY_SIZE(msrpm_ranges) 224 #define MSRS_RANGE_SIZE 2048 225 #define MSRS_IN_RANGE (MSRS_RANGE_SIZE * 8 / 2) 226 227 u32 svm_msrpm_offset(u32 msr) 228 { 229 u32 offset; 230 int i; 231 232 for (i = 0; i < NUM_MSR_MAPS; i++) { 233 if (msr < msrpm_ranges[i] || 234 msr >= msrpm_ranges[i] + MSRS_IN_RANGE) 235 continue; 236 237 offset = (msr - msrpm_ranges[i]) / 4; /* 4 msrs per u8 */ 238 offset += (i * MSRS_RANGE_SIZE); /* add range offset */ 239 240 /* Now we have the u8 offset - but need the u32 offset */ 241 return offset / 4; 242 } 243 244 /* MSR not in any range */ 245 return MSR_INVALID; 246 } 247 248 #define MAX_INST_SIZE 15 249 250 static int get_max_npt_level(void) 251 { 252 #ifdef CONFIG_X86_64 253 return PT64_ROOT_4LEVEL; 254 #else 255 return PT32E_ROOT_LEVEL; 256 #endif 257 } 258 259 int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer) 260 { 261 struct vcpu_svm *svm = to_svm(vcpu); 262 u64 old_efer = vcpu->arch.efer; 263 vcpu->arch.efer = efer; 264 265 if (!npt_enabled) { 266 /* Shadow paging assumes NX to be available. */ 267 efer |= EFER_NX; 268 269 if (!(efer & EFER_LMA)) 270 efer &= ~EFER_LME; 271 } 272 273 if ((old_efer & EFER_SVME) != (efer & EFER_SVME)) { 274 if (!(efer & EFER_SVME)) { 275 svm_leave_nested(svm); 276 svm_set_gif(svm, true); 277 /* #GP intercept is still needed for vmware backdoor */ 278 if (!enable_vmware_backdoor) 279 clr_exception_intercept(svm, GP_VECTOR); 280 281 /* 282 * Free the nested guest state, unless we are in SMM. 283 * In this case we will return to the nested guest 284 * as soon as we leave SMM. 285 */ 286 if (!is_smm(&svm->vcpu)) 287 svm_free_nested(svm); 288 289 } else { 290 int ret = svm_allocate_nested(svm); 291 292 if (ret) { 293 vcpu->arch.efer = old_efer; 294 return ret; 295 } 296 297 if (svm_gp_erratum_intercept) 298 set_exception_intercept(svm, GP_VECTOR); 299 } 300 } 301 302 svm->vmcb->save.efer = efer | EFER_SVME; 303 vmcb_mark_dirty(svm->vmcb, VMCB_CR); 304 return 0; 305 } 306 307 static int is_external_interrupt(u32 info) 308 { 309 info &= SVM_EVTINJ_TYPE_MASK | SVM_EVTINJ_VALID; 310 return info == (SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR); 311 } 312 313 static u32 svm_get_interrupt_shadow(struct kvm_vcpu *vcpu) 314 { 315 struct vcpu_svm *svm = to_svm(vcpu); 316 u32 ret = 0; 317 318 if (svm->vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) 319 ret = KVM_X86_SHADOW_INT_STI | KVM_X86_SHADOW_INT_MOV_SS; 320 return ret; 321 } 322 323 static void svm_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask) 324 { 325 struct vcpu_svm *svm = to_svm(vcpu); 326 327 if (mask == 0) 328 svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK; 329 else 330 svm->vmcb->control.int_state |= SVM_INTERRUPT_SHADOW_MASK; 331 332 } 333 334 static int skip_emulated_instruction(struct kvm_vcpu *vcpu) 335 { 336 struct vcpu_svm *svm = to_svm(vcpu); 337 338 /* 339 * SEV-ES does not expose the next RIP. The RIP update is controlled by 340 * the type of exit and the #VC handler in the guest. 341 */ 342 if (sev_es_guest(vcpu->kvm)) 343 goto done; 344 345 if (nrips && svm->vmcb->control.next_rip != 0) { 346 WARN_ON_ONCE(!static_cpu_has(X86_FEATURE_NRIPS)); 347 svm->next_rip = svm->vmcb->control.next_rip; 348 } 349 350 if (!svm->next_rip) { 351 if (!kvm_emulate_instruction(vcpu, EMULTYPE_SKIP)) 352 return 0; 353 } else { 354 kvm_rip_write(vcpu, svm->next_rip); 355 } 356 357 done: 358 svm_set_interrupt_shadow(vcpu, 0); 359 360 return 1; 361 } 362 363 static void svm_queue_exception(struct kvm_vcpu *vcpu) 364 { 365 struct vcpu_svm *svm = to_svm(vcpu); 366 unsigned nr = vcpu->arch.exception.nr; 367 bool has_error_code = vcpu->arch.exception.has_error_code; 368 u32 error_code = vcpu->arch.exception.error_code; 369 370 kvm_deliver_exception_payload(&svm->vcpu); 371 372 if (nr == BP_VECTOR && !nrips) { 373 unsigned long rip, old_rip = kvm_rip_read(&svm->vcpu); 374 375 /* 376 * For guest debugging where we have to reinject #BP if some 377 * INT3 is guest-owned: 378 * Emulate nRIP by moving RIP forward. Will fail if injection 379 * raises a fault that is not intercepted. Still better than 380 * failing in all cases. 381 */ 382 (void)skip_emulated_instruction(&svm->vcpu); 383 rip = kvm_rip_read(&svm->vcpu); 384 svm->int3_rip = rip + svm->vmcb->save.cs.base; 385 svm->int3_injected = rip - old_rip; 386 } 387 388 svm->vmcb->control.event_inj = nr 389 | SVM_EVTINJ_VALID 390 | (has_error_code ? SVM_EVTINJ_VALID_ERR : 0) 391 | SVM_EVTINJ_TYPE_EXEPT; 392 svm->vmcb->control.event_inj_err = error_code; 393 } 394 395 static void svm_init_erratum_383(void) 396 { 397 u32 low, high; 398 int err; 399 u64 val; 400 401 if (!static_cpu_has_bug(X86_BUG_AMD_TLB_MMATCH)) 402 return; 403 404 /* Use _safe variants to not break nested virtualization */ 405 val = native_read_msr_safe(MSR_AMD64_DC_CFG, &err); 406 if (err) 407 return; 408 409 val |= (1ULL << 47); 410 411 low = lower_32_bits(val); 412 high = upper_32_bits(val); 413 414 native_write_msr_safe(MSR_AMD64_DC_CFG, low, high); 415 416 erratum_383_found = true; 417 } 418 419 static void svm_init_osvw(struct kvm_vcpu *vcpu) 420 { 421 /* 422 * Guests should see errata 400 and 415 as fixed (assuming that 423 * HLT and IO instructions are intercepted). 424 */ 425 vcpu->arch.osvw.length = (osvw_len >= 3) ? (osvw_len) : 3; 426 vcpu->arch.osvw.status = osvw_status & ~(6ULL); 427 428 /* 429 * By increasing VCPU's osvw.length to 3 we are telling the guest that 430 * all osvw.status bits inside that length, including bit 0 (which is 431 * reserved for erratum 298), are valid. However, if host processor's 432 * osvw_len is 0 then osvw_status[0] carries no information. We need to 433 * be conservative here and therefore we tell the guest that erratum 298 434 * is present (because we really don't know). 435 */ 436 if (osvw_len == 0 && boot_cpu_data.x86 == 0x10) 437 vcpu->arch.osvw.status |= 1; 438 } 439 440 static int has_svm(void) 441 { 442 const char *msg; 443 444 if (!cpu_has_svm(&msg)) { 445 printk(KERN_INFO "has_svm: %s\n", msg); 446 return 0; 447 } 448 449 if (sev_active()) { 450 pr_info("KVM is unsupported when running as an SEV guest\n"); 451 return 0; 452 } 453 454 return 1; 455 } 456 457 static void svm_hardware_disable(void) 458 { 459 /* Make sure we clean up behind us */ 460 if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) 461 wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT); 462 463 cpu_svm_disable(); 464 465 amd_pmu_disable_virt(); 466 } 467 468 static int svm_hardware_enable(void) 469 { 470 471 struct svm_cpu_data *sd; 472 uint64_t efer; 473 struct desc_struct *gdt; 474 int me = raw_smp_processor_id(); 475 476 rdmsrl(MSR_EFER, efer); 477 if (efer & EFER_SVME) 478 return -EBUSY; 479 480 if (!has_svm()) { 481 pr_err("%s: err EOPNOTSUPP on %d\n", __func__, me); 482 return -EINVAL; 483 } 484 sd = per_cpu(svm_data, me); 485 if (!sd) { 486 pr_err("%s: svm_data is NULL on %d\n", __func__, me); 487 return -EINVAL; 488 } 489 490 sd->asid_generation = 1; 491 sd->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1; 492 sd->next_asid = sd->max_asid + 1; 493 sd->min_asid = max_sev_asid + 1; 494 495 gdt = get_current_gdt_rw(); 496 sd->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS); 497 498 wrmsrl(MSR_EFER, efer | EFER_SVME); 499 500 wrmsrl(MSR_VM_HSAVE_PA, __sme_page_pa(sd->save_area)); 501 502 if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) { 503 wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT); 504 __this_cpu_write(current_tsc_ratio, TSC_RATIO_DEFAULT); 505 } 506 507 508 /* 509 * Get OSVW bits. 510 * 511 * Note that it is possible to have a system with mixed processor 512 * revisions and therefore different OSVW bits. If bits are not the same 513 * on different processors then choose the worst case (i.e. if erratum 514 * is present on one processor and not on another then assume that the 515 * erratum is present everywhere). 516 */ 517 if (cpu_has(&boot_cpu_data, X86_FEATURE_OSVW)) { 518 uint64_t len, status = 0; 519 int err; 520 521 len = native_read_msr_safe(MSR_AMD64_OSVW_ID_LENGTH, &err); 522 if (!err) 523 status = native_read_msr_safe(MSR_AMD64_OSVW_STATUS, 524 &err); 525 526 if (err) 527 osvw_status = osvw_len = 0; 528 else { 529 if (len < osvw_len) 530 osvw_len = len; 531 osvw_status |= status; 532 osvw_status &= (1ULL << osvw_len) - 1; 533 } 534 } else 535 osvw_status = osvw_len = 0; 536 537 svm_init_erratum_383(); 538 539 amd_pmu_enable_virt(); 540 541 return 0; 542 } 543 544 static void svm_cpu_uninit(int cpu) 545 { 546 struct svm_cpu_data *sd = per_cpu(svm_data, cpu); 547 548 if (!sd) 549 return; 550 551 per_cpu(svm_data, cpu) = NULL; 552 kfree(sd->sev_vmcbs); 553 __free_page(sd->save_area); 554 kfree(sd); 555 } 556 557 static int svm_cpu_init(int cpu) 558 { 559 struct svm_cpu_data *sd; 560 561 sd = kzalloc(sizeof(struct svm_cpu_data), GFP_KERNEL); 562 if (!sd) 563 return -ENOMEM; 564 sd->cpu = cpu; 565 sd->save_area = alloc_page(GFP_KERNEL); 566 if (!sd->save_area) 567 goto free_cpu_data; 568 clear_page(page_address(sd->save_area)); 569 570 if (svm_sev_enabled()) { 571 sd->sev_vmcbs = kmalloc_array(max_sev_asid + 1, 572 sizeof(void *), 573 GFP_KERNEL); 574 if (!sd->sev_vmcbs) 575 goto free_save_area; 576 } 577 578 per_cpu(svm_data, cpu) = sd; 579 580 return 0; 581 582 free_save_area: 583 __free_page(sd->save_area); 584 free_cpu_data: 585 kfree(sd); 586 return -ENOMEM; 587 588 } 589 590 static int direct_access_msr_slot(u32 msr) 591 { 592 u32 i; 593 594 for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) 595 if (direct_access_msrs[i].index == msr) 596 return i; 597 598 return -ENOENT; 599 } 600 601 static void set_shadow_msr_intercept(struct kvm_vcpu *vcpu, u32 msr, int read, 602 int write) 603 { 604 struct vcpu_svm *svm = to_svm(vcpu); 605 int slot = direct_access_msr_slot(msr); 606 607 if (slot == -ENOENT) 608 return; 609 610 /* Set the shadow bitmaps to the desired intercept states */ 611 if (read) 612 set_bit(slot, svm->shadow_msr_intercept.read); 613 else 614 clear_bit(slot, svm->shadow_msr_intercept.read); 615 616 if (write) 617 set_bit(slot, svm->shadow_msr_intercept.write); 618 else 619 clear_bit(slot, svm->shadow_msr_intercept.write); 620 } 621 622 static bool valid_msr_intercept(u32 index) 623 { 624 return direct_access_msr_slot(index) != -ENOENT; 625 } 626 627 static bool msr_write_intercepted(struct kvm_vcpu *vcpu, u32 msr) 628 { 629 u8 bit_write; 630 unsigned long tmp; 631 u32 offset; 632 u32 *msrpm; 633 634 msrpm = is_guest_mode(vcpu) ? to_svm(vcpu)->nested.msrpm: 635 to_svm(vcpu)->msrpm; 636 637 offset = svm_msrpm_offset(msr); 638 bit_write = 2 * (msr & 0x0f) + 1; 639 tmp = msrpm[offset]; 640 641 BUG_ON(offset == MSR_INVALID); 642 643 return !!test_bit(bit_write, &tmp); 644 } 645 646 static void set_msr_interception_bitmap(struct kvm_vcpu *vcpu, u32 *msrpm, 647 u32 msr, int read, int write) 648 { 649 u8 bit_read, bit_write; 650 unsigned long tmp; 651 u32 offset; 652 653 /* 654 * If this warning triggers extend the direct_access_msrs list at the 655 * beginning of the file 656 */ 657 WARN_ON(!valid_msr_intercept(msr)); 658 659 /* Enforce non allowed MSRs to trap */ 660 if (read && !kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_READ)) 661 read = 0; 662 663 if (write && !kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_WRITE)) 664 write = 0; 665 666 offset = svm_msrpm_offset(msr); 667 bit_read = 2 * (msr & 0x0f); 668 bit_write = 2 * (msr & 0x0f) + 1; 669 tmp = msrpm[offset]; 670 671 BUG_ON(offset == MSR_INVALID); 672 673 read ? clear_bit(bit_read, &tmp) : set_bit(bit_read, &tmp); 674 write ? clear_bit(bit_write, &tmp) : set_bit(bit_write, &tmp); 675 676 msrpm[offset] = tmp; 677 } 678 679 void set_msr_interception(struct kvm_vcpu *vcpu, u32 *msrpm, u32 msr, 680 int read, int write) 681 { 682 set_shadow_msr_intercept(vcpu, msr, read, write); 683 set_msr_interception_bitmap(vcpu, msrpm, msr, read, write); 684 } 685 686 u32 *svm_vcpu_alloc_msrpm(void) 687 { 688 struct page *pages = alloc_pages(GFP_KERNEL_ACCOUNT, MSRPM_ALLOC_ORDER); 689 u32 *msrpm; 690 691 if (!pages) 692 return NULL; 693 694 msrpm = page_address(pages); 695 memset(msrpm, 0xff, PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER)); 696 697 return msrpm; 698 } 699 700 void svm_vcpu_init_msrpm(struct kvm_vcpu *vcpu, u32 *msrpm) 701 { 702 int i; 703 704 for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) { 705 if (!direct_access_msrs[i].always) 706 continue; 707 set_msr_interception(vcpu, msrpm, direct_access_msrs[i].index, 1, 1); 708 } 709 } 710 711 712 void svm_vcpu_free_msrpm(u32 *msrpm) 713 { 714 __free_pages(virt_to_page(msrpm), MSRPM_ALLOC_ORDER); 715 } 716 717 static void svm_msr_filter_changed(struct kvm_vcpu *vcpu) 718 { 719 struct vcpu_svm *svm = to_svm(vcpu); 720 u32 i; 721 722 /* 723 * Set intercept permissions for all direct access MSRs again. They 724 * will automatically get filtered through the MSR filter, so we are 725 * back in sync after this. 726 */ 727 for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) { 728 u32 msr = direct_access_msrs[i].index; 729 u32 read = test_bit(i, svm->shadow_msr_intercept.read); 730 u32 write = test_bit(i, svm->shadow_msr_intercept.write); 731 732 set_msr_interception_bitmap(vcpu, svm->msrpm, msr, read, write); 733 } 734 } 735 736 static void add_msr_offset(u32 offset) 737 { 738 int i; 739 740 for (i = 0; i < MSRPM_OFFSETS; ++i) { 741 742 /* Offset already in list? */ 743 if (msrpm_offsets[i] == offset) 744 return; 745 746 /* Slot used by another offset? */ 747 if (msrpm_offsets[i] != MSR_INVALID) 748 continue; 749 750 /* Add offset to list */ 751 msrpm_offsets[i] = offset; 752 753 return; 754 } 755 756 /* 757 * If this BUG triggers the msrpm_offsets table has an overflow. Just 758 * increase MSRPM_OFFSETS in this case. 759 */ 760 BUG(); 761 } 762 763 static void init_msrpm_offsets(void) 764 { 765 int i; 766 767 memset(msrpm_offsets, 0xff, sizeof(msrpm_offsets)); 768 769 for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) { 770 u32 offset; 771 772 offset = svm_msrpm_offset(direct_access_msrs[i].index); 773 BUG_ON(offset == MSR_INVALID); 774 775 add_msr_offset(offset); 776 } 777 } 778 779 static void svm_enable_lbrv(struct kvm_vcpu *vcpu) 780 { 781 struct vcpu_svm *svm = to_svm(vcpu); 782 783 svm->vmcb->control.virt_ext |= LBR_CTL_ENABLE_MASK; 784 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHFROMIP, 1, 1); 785 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1); 786 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTFROMIP, 1, 1); 787 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTTOIP, 1, 1); 788 } 789 790 static void svm_disable_lbrv(struct kvm_vcpu *vcpu) 791 { 792 struct vcpu_svm *svm = to_svm(vcpu); 793 794 svm->vmcb->control.virt_ext &= ~LBR_CTL_ENABLE_MASK; 795 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHFROMIP, 0, 0); 796 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHTOIP, 0, 0); 797 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTFROMIP, 0, 0); 798 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTTOIP, 0, 0); 799 } 800 801 void disable_nmi_singlestep(struct vcpu_svm *svm) 802 { 803 svm->nmi_singlestep = false; 804 805 if (!(svm->vcpu.guest_debug & KVM_GUESTDBG_SINGLESTEP)) { 806 /* Clear our flags if they were not set by the guest */ 807 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF)) 808 svm->vmcb->save.rflags &= ~X86_EFLAGS_TF; 809 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_RF)) 810 svm->vmcb->save.rflags &= ~X86_EFLAGS_RF; 811 } 812 } 813 814 static void grow_ple_window(struct kvm_vcpu *vcpu) 815 { 816 struct vcpu_svm *svm = to_svm(vcpu); 817 struct vmcb_control_area *control = &svm->vmcb->control; 818 int old = control->pause_filter_count; 819 820 control->pause_filter_count = __grow_ple_window(old, 821 pause_filter_count, 822 pause_filter_count_grow, 823 pause_filter_count_max); 824 825 if (control->pause_filter_count != old) { 826 vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS); 827 trace_kvm_ple_window_update(vcpu->vcpu_id, 828 control->pause_filter_count, old); 829 } 830 } 831 832 static void shrink_ple_window(struct kvm_vcpu *vcpu) 833 { 834 struct vcpu_svm *svm = to_svm(vcpu); 835 struct vmcb_control_area *control = &svm->vmcb->control; 836 int old = control->pause_filter_count; 837 838 control->pause_filter_count = 839 __shrink_ple_window(old, 840 pause_filter_count, 841 pause_filter_count_shrink, 842 pause_filter_count); 843 if (control->pause_filter_count != old) { 844 vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS); 845 trace_kvm_ple_window_update(vcpu->vcpu_id, 846 control->pause_filter_count, old); 847 } 848 } 849 850 /* 851 * The default MMIO mask is a single bit (excluding the present bit), 852 * which could conflict with the memory encryption bit. Check for 853 * memory encryption support and override the default MMIO mask if 854 * memory encryption is enabled. 855 */ 856 static __init void svm_adjust_mmio_mask(void) 857 { 858 unsigned int enc_bit, mask_bit; 859 u64 msr, mask; 860 861 /* If there is no memory encryption support, use existing mask */ 862 if (cpuid_eax(0x80000000) < 0x8000001f) 863 return; 864 865 /* If memory encryption is not enabled, use existing mask */ 866 rdmsrl(MSR_K8_SYSCFG, msr); 867 if (!(msr & MSR_K8_SYSCFG_MEM_ENCRYPT)) 868 return; 869 870 enc_bit = cpuid_ebx(0x8000001f) & 0x3f; 871 mask_bit = boot_cpu_data.x86_phys_bits; 872 873 /* Increment the mask bit if it is the same as the encryption bit */ 874 if (enc_bit == mask_bit) 875 mask_bit++; 876 877 /* 878 * If the mask bit location is below 52, then some bits above the 879 * physical addressing limit will always be reserved, so use the 880 * rsvd_bits() function to generate the mask. This mask, along with 881 * the present bit, will be used to generate a page fault with 882 * PFER.RSV = 1. 883 * 884 * If the mask bit location is 52 (or above), then clear the mask. 885 */ 886 mask = (mask_bit < 52) ? rsvd_bits(mask_bit, 51) | PT_PRESENT_MASK : 0; 887 888 kvm_mmu_set_mmio_spte_mask(mask, PT_WRITABLE_MASK | PT_USER_MASK); 889 } 890 891 static void svm_hardware_teardown(void) 892 { 893 int cpu; 894 895 if (svm_sev_enabled()) 896 sev_hardware_teardown(); 897 898 for_each_possible_cpu(cpu) 899 svm_cpu_uninit(cpu); 900 901 __free_pages(pfn_to_page(iopm_base >> PAGE_SHIFT), IOPM_ALLOC_ORDER); 902 iopm_base = 0; 903 } 904 905 static __init void svm_set_cpu_caps(void) 906 { 907 kvm_set_cpu_caps(); 908 909 supported_xss = 0; 910 911 /* CPUID 0x80000001 and 0x8000000A (SVM features) */ 912 if (nested) { 913 kvm_cpu_cap_set(X86_FEATURE_SVM); 914 915 if (nrips) 916 kvm_cpu_cap_set(X86_FEATURE_NRIPS); 917 918 if (npt_enabled) 919 kvm_cpu_cap_set(X86_FEATURE_NPT); 920 921 /* Nested VM can receive #VMEXIT instead of triggering #GP */ 922 kvm_cpu_cap_set(X86_FEATURE_SVME_ADDR_CHK); 923 } 924 925 /* CPUID 0x80000008 */ 926 if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD) || 927 boot_cpu_has(X86_FEATURE_AMD_SSBD)) 928 kvm_cpu_cap_set(X86_FEATURE_VIRT_SSBD); 929 930 /* Enable INVPCID feature */ 931 kvm_cpu_cap_check_and_set(X86_FEATURE_INVPCID); 932 } 933 934 static __init int svm_hardware_setup(void) 935 { 936 int cpu; 937 struct page *iopm_pages; 938 void *iopm_va; 939 int r; 940 941 iopm_pages = alloc_pages(GFP_KERNEL, IOPM_ALLOC_ORDER); 942 943 if (!iopm_pages) 944 return -ENOMEM; 945 946 iopm_va = page_address(iopm_pages); 947 memset(iopm_va, 0xff, PAGE_SIZE * (1 << IOPM_ALLOC_ORDER)); 948 iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT; 949 950 init_msrpm_offsets(); 951 952 supported_xcr0 &= ~(XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR); 953 954 if (boot_cpu_has(X86_FEATURE_NX)) 955 kvm_enable_efer_bits(EFER_NX); 956 957 if (boot_cpu_has(X86_FEATURE_FXSR_OPT)) 958 kvm_enable_efer_bits(EFER_FFXSR); 959 960 if (boot_cpu_has(X86_FEATURE_TSCRATEMSR)) { 961 kvm_has_tsc_control = true; 962 kvm_max_tsc_scaling_ratio = TSC_RATIO_MAX; 963 kvm_tsc_scaling_ratio_frac_bits = 32; 964 } 965 966 /* Check for pause filtering support */ 967 if (!boot_cpu_has(X86_FEATURE_PAUSEFILTER)) { 968 pause_filter_count = 0; 969 pause_filter_thresh = 0; 970 } else if (!boot_cpu_has(X86_FEATURE_PFTHRESHOLD)) { 971 pause_filter_thresh = 0; 972 } 973 974 if (nested) { 975 printk(KERN_INFO "kvm: Nested Virtualization enabled\n"); 976 kvm_enable_efer_bits(EFER_SVME | EFER_LMSLE); 977 } 978 979 if (IS_ENABLED(CONFIG_KVM_AMD_SEV) && sev) { 980 sev_hardware_setup(); 981 } else { 982 sev = false; 983 sev_es = false; 984 } 985 986 svm_adjust_mmio_mask(); 987 988 for_each_possible_cpu(cpu) { 989 r = svm_cpu_init(cpu); 990 if (r) 991 goto err; 992 } 993 994 if (!boot_cpu_has(X86_FEATURE_NPT)) 995 npt_enabled = false; 996 997 if (npt_enabled && !npt) 998 npt_enabled = false; 999 1000 kvm_configure_mmu(npt_enabled, get_max_npt_level(), PG_LEVEL_1G); 1001 pr_info("kvm: Nested Paging %sabled\n", npt_enabled ? "en" : "dis"); 1002 1003 if (nrips) { 1004 if (!boot_cpu_has(X86_FEATURE_NRIPS)) 1005 nrips = false; 1006 } 1007 1008 if (avic) { 1009 if (!npt_enabled || 1010 !boot_cpu_has(X86_FEATURE_AVIC) || 1011 !IS_ENABLED(CONFIG_X86_LOCAL_APIC)) { 1012 avic = false; 1013 } else { 1014 pr_info("AVIC enabled\n"); 1015 1016 amd_iommu_register_ga_log_notifier(&avic_ga_log_notifier); 1017 } 1018 } 1019 1020 if (vls) { 1021 if (!npt_enabled || 1022 !boot_cpu_has(X86_FEATURE_V_VMSAVE_VMLOAD) || 1023 !IS_ENABLED(CONFIG_X86_64)) { 1024 vls = false; 1025 } else { 1026 pr_info("Virtual VMLOAD VMSAVE supported\n"); 1027 } 1028 } 1029 1030 if (boot_cpu_has(X86_FEATURE_SVME_ADDR_CHK)) 1031 svm_gp_erratum_intercept = false; 1032 1033 if (vgif) { 1034 if (!boot_cpu_has(X86_FEATURE_VGIF)) 1035 vgif = false; 1036 else 1037 pr_info("Virtual GIF supported\n"); 1038 } 1039 1040 svm_set_cpu_caps(); 1041 1042 /* 1043 * It seems that on AMD processors PTE's accessed bit is 1044 * being set by the CPU hardware before the NPF vmexit. 1045 * This is not expected behaviour and our tests fail because 1046 * of it. 1047 * A workaround here is to disable support for 1048 * GUEST_MAXPHYADDR < HOST_MAXPHYADDR if NPT is enabled. 1049 * In this case userspace can know if there is support using 1050 * KVM_CAP_SMALLER_MAXPHYADDR extension and decide how to handle 1051 * it 1052 * If future AMD CPU models change the behaviour described above, 1053 * this variable can be changed accordingly 1054 */ 1055 allow_smaller_maxphyaddr = !npt_enabled; 1056 1057 return 0; 1058 1059 err: 1060 svm_hardware_teardown(); 1061 return r; 1062 } 1063 1064 static void init_seg(struct vmcb_seg *seg) 1065 { 1066 seg->selector = 0; 1067 seg->attrib = SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK | 1068 SVM_SELECTOR_WRITE_MASK; /* Read/Write Data Segment */ 1069 seg->limit = 0xffff; 1070 seg->base = 0; 1071 } 1072 1073 static void init_sys_seg(struct vmcb_seg *seg, uint32_t type) 1074 { 1075 seg->selector = 0; 1076 seg->attrib = SVM_SELECTOR_P_MASK | type; 1077 seg->limit = 0xffff; 1078 seg->base = 0; 1079 } 1080 1081 static u64 svm_write_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) 1082 { 1083 struct vcpu_svm *svm = to_svm(vcpu); 1084 u64 g_tsc_offset = 0; 1085 1086 if (is_guest_mode(vcpu)) { 1087 /* Write L1's TSC offset. */ 1088 g_tsc_offset = svm->vmcb->control.tsc_offset - 1089 svm->nested.hsave->control.tsc_offset; 1090 svm->nested.hsave->control.tsc_offset = offset; 1091 } 1092 1093 trace_kvm_write_tsc_offset(vcpu->vcpu_id, 1094 svm->vmcb->control.tsc_offset - g_tsc_offset, 1095 offset); 1096 1097 svm->vmcb->control.tsc_offset = offset + g_tsc_offset; 1098 1099 vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS); 1100 return svm->vmcb->control.tsc_offset; 1101 } 1102 1103 static void svm_check_invpcid(struct vcpu_svm *svm) 1104 { 1105 /* 1106 * Intercept INVPCID instruction only if shadow page table is 1107 * enabled. Interception is not required with nested page table 1108 * enabled. 1109 */ 1110 if (kvm_cpu_cap_has(X86_FEATURE_INVPCID)) { 1111 if (!npt_enabled) 1112 svm_set_intercept(svm, INTERCEPT_INVPCID); 1113 else 1114 svm_clr_intercept(svm, INTERCEPT_INVPCID); 1115 } 1116 } 1117 1118 static void init_vmcb(struct vcpu_svm *svm) 1119 { 1120 struct vmcb_control_area *control = &svm->vmcb->control; 1121 struct vmcb_save_area *save = &svm->vmcb->save; 1122 1123 svm->vcpu.arch.hflags = 0; 1124 1125 svm_set_intercept(svm, INTERCEPT_CR0_READ); 1126 svm_set_intercept(svm, INTERCEPT_CR3_READ); 1127 svm_set_intercept(svm, INTERCEPT_CR4_READ); 1128 svm_set_intercept(svm, INTERCEPT_CR0_WRITE); 1129 svm_set_intercept(svm, INTERCEPT_CR3_WRITE); 1130 svm_set_intercept(svm, INTERCEPT_CR4_WRITE); 1131 if (!kvm_vcpu_apicv_active(&svm->vcpu)) 1132 svm_set_intercept(svm, INTERCEPT_CR8_WRITE); 1133 1134 set_dr_intercepts(svm); 1135 1136 set_exception_intercept(svm, PF_VECTOR); 1137 set_exception_intercept(svm, UD_VECTOR); 1138 set_exception_intercept(svm, MC_VECTOR); 1139 set_exception_intercept(svm, AC_VECTOR); 1140 set_exception_intercept(svm, DB_VECTOR); 1141 /* 1142 * Guest access to VMware backdoor ports could legitimately 1143 * trigger #GP because of TSS I/O permission bitmap. 1144 * We intercept those #GP and allow access to them anyway 1145 * as VMware does. 1146 */ 1147 if (enable_vmware_backdoor) 1148 set_exception_intercept(svm, GP_VECTOR); 1149 1150 svm_set_intercept(svm, INTERCEPT_INTR); 1151 svm_set_intercept(svm, INTERCEPT_NMI); 1152 svm_set_intercept(svm, INTERCEPT_SMI); 1153 svm_set_intercept(svm, INTERCEPT_SELECTIVE_CR0); 1154 svm_set_intercept(svm, INTERCEPT_RDPMC); 1155 svm_set_intercept(svm, INTERCEPT_CPUID); 1156 svm_set_intercept(svm, INTERCEPT_INVD); 1157 svm_set_intercept(svm, INTERCEPT_INVLPG); 1158 svm_set_intercept(svm, INTERCEPT_INVLPGA); 1159 svm_set_intercept(svm, INTERCEPT_IOIO_PROT); 1160 svm_set_intercept(svm, INTERCEPT_MSR_PROT); 1161 svm_set_intercept(svm, INTERCEPT_TASK_SWITCH); 1162 svm_set_intercept(svm, INTERCEPT_SHUTDOWN); 1163 svm_set_intercept(svm, INTERCEPT_VMRUN); 1164 svm_set_intercept(svm, INTERCEPT_VMMCALL); 1165 svm_set_intercept(svm, INTERCEPT_VMLOAD); 1166 svm_set_intercept(svm, INTERCEPT_VMSAVE); 1167 svm_set_intercept(svm, INTERCEPT_STGI); 1168 svm_set_intercept(svm, INTERCEPT_CLGI); 1169 svm_set_intercept(svm, INTERCEPT_SKINIT); 1170 svm_set_intercept(svm, INTERCEPT_WBINVD); 1171 svm_set_intercept(svm, INTERCEPT_XSETBV); 1172 svm_set_intercept(svm, INTERCEPT_RDPRU); 1173 svm_set_intercept(svm, INTERCEPT_RSM); 1174 1175 if (!kvm_mwait_in_guest(svm->vcpu.kvm)) { 1176 svm_set_intercept(svm, INTERCEPT_MONITOR); 1177 svm_set_intercept(svm, INTERCEPT_MWAIT); 1178 } 1179 1180 if (!kvm_hlt_in_guest(svm->vcpu.kvm)) 1181 svm_set_intercept(svm, INTERCEPT_HLT); 1182 1183 control->iopm_base_pa = __sme_set(iopm_base); 1184 control->msrpm_base_pa = __sme_set(__pa(svm->msrpm)); 1185 control->int_ctl = V_INTR_MASKING_MASK; 1186 1187 init_seg(&save->es); 1188 init_seg(&save->ss); 1189 init_seg(&save->ds); 1190 init_seg(&save->fs); 1191 init_seg(&save->gs); 1192 1193 save->cs.selector = 0xf000; 1194 save->cs.base = 0xffff0000; 1195 /* Executable/Readable Code Segment */ 1196 save->cs.attrib = SVM_SELECTOR_READ_MASK | SVM_SELECTOR_P_MASK | 1197 SVM_SELECTOR_S_MASK | SVM_SELECTOR_CODE_MASK; 1198 save->cs.limit = 0xffff; 1199 1200 save->gdtr.limit = 0xffff; 1201 save->idtr.limit = 0xffff; 1202 1203 init_sys_seg(&save->ldtr, SEG_TYPE_LDT); 1204 init_sys_seg(&save->tr, SEG_TYPE_BUSY_TSS16); 1205 1206 svm_set_efer(&svm->vcpu, 0); 1207 save->dr6 = 0xffff0ff0; 1208 kvm_set_rflags(&svm->vcpu, X86_EFLAGS_FIXED); 1209 save->rip = 0x0000fff0; 1210 svm->vcpu.arch.regs[VCPU_REGS_RIP] = save->rip; 1211 1212 /* 1213 * svm_set_cr0() sets PG and WP and clears NW and CD on save->cr0. 1214 * It also updates the guest-visible cr0 value. 1215 */ 1216 svm_set_cr0(&svm->vcpu, X86_CR0_NW | X86_CR0_CD | X86_CR0_ET); 1217 kvm_mmu_reset_context(&svm->vcpu); 1218 1219 save->cr4 = X86_CR4_PAE; 1220 /* rdx = ?? */ 1221 1222 if (npt_enabled) { 1223 /* Setup VMCB for Nested Paging */ 1224 control->nested_ctl |= SVM_NESTED_CTL_NP_ENABLE; 1225 svm_clr_intercept(svm, INTERCEPT_INVLPG); 1226 clr_exception_intercept(svm, PF_VECTOR); 1227 svm_clr_intercept(svm, INTERCEPT_CR3_READ); 1228 svm_clr_intercept(svm, INTERCEPT_CR3_WRITE); 1229 save->g_pat = svm->vcpu.arch.pat; 1230 save->cr3 = 0; 1231 save->cr4 = 0; 1232 } 1233 svm->asid_generation = 0; 1234 svm->asid = 0; 1235 1236 svm->nested.vmcb12_gpa = 0; 1237 svm->vcpu.arch.hflags = 0; 1238 1239 if (!kvm_pause_in_guest(svm->vcpu.kvm)) { 1240 control->pause_filter_count = pause_filter_count; 1241 if (pause_filter_thresh) 1242 control->pause_filter_thresh = pause_filter_thresh; 1243 svm_set_intercept(svm, INTERCEPT_PAUSE); 1244 } else { 1245 svm_clr_intercept(svm, INTERCEPT_PAUSE); 1246 } 1247 1248 svm_check_invpcid(svm); 1249 1250 if (kvm_vcpu_apicv_active(&svm->vcpu)) 1251 avic_init_vmcb(svm); 1252 1253 /* 1254 * If hardware supports Virtual VMLOAD VMSAVE then enable it 1255 * in VMCB and clear intercepts to avoid #VMEXIT. 1256 */ 1257 if (vls) { 1258 svm_clr_intercept(svm, INTERCEPT_VMLOAD); 1259 svm_clr_intercept(svm, INTERCEPT_VMSAVE); 1260 svm->vmcb->control.virt_ext |= VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK; 1261 } 1262 1263 if (vgif) { 1264 svm_clr_intercept(svm, INTERCEPT_STGI); 1265 svm_clr_intercept(svm, INTERCEPT_CLGI); 1266 svm->vmcb->control.int_ctl |= V_GIF_ENABLE_MASK; 1267 } 1268 1269 if (sev_guest(svm->vcpu.kvm)) { 1270 svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ENABLE; 1271 clr_exception_intercept(svm, UD_VECTOR); 1272 1273 if (sev_es_guest(svm->vcpu.kvm)) { 1274 /* Perform SEV-ES specific VMCB updates */ 1275 sev_es_init_vmcb(svm); 1276 } 1277 } 1278 1279 vmcb_mark_all_dirty(svm->vmcb); 1280 1281 enable_gif(svm); 1282 1283 } 1284 1285 static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) 1286 { 1287 struct vcpu_svm *svm = to_svm(vcpu); 1288 u32 dummy; 1289 u32 eax = 1; 1290 1291 svm->spec_ctrl = 0; 1292 svm->virt_spec_ctrl = 0; 1293 1294 if (!init_event) { 1295 svm->vcpu.arch.apic_base = APIC_DEFAULT_PHYS_BASE | 1296 MSR_IA32_APICBASE_ENABLE; 1297 if (kvm_vcpu_is_reset_bsp(&svm->vcpu)) 1298 svm->vcpu.arch.apic_base |= MSR_IA32_APICBASE_BSP; 1299 } 1300 init_vmcb(svm); 1301 1302 kvm_cpuid(vcpu, &eax, &dummy, &dummy, &dummy, false); 1303 kvm_rdx_write(vcpu, eax); 1304 1305 if (kvm_vcpu_apicv_active(vcpu) && !init_event) 1306 avic_update_vapic_bar(svm, APIC_DEFAULT_PHYS_BASE); 1307 } 1308 1309 static int svm_create_vcpu(struct kvm_vcpu *vcpu) 1310 { 1311 struct vcpu_svm *svm; 1312 struct page *vmcb_page; 1313 struct page *vmsa_page = NULL; 1314 int err; 1315 1316 BUILD_BUG_ON(offsetof(struct vcpu_svm, vcpu) != 0); 1317 svm = to_svm(vcpu); 1318 1319 err = -ENOMEM; 1320 vmcb_page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO); 1321 if (!vmcb_page) 1322 goto out; 1323 1324 if (sev_es_guest(svm->vcpu.kvm)) { 1325 /* 1326 * SEV-ES guests require a separate VMSA page used to contain 1327 * the encrypted register state of the guest. 1328 */ 1329 vmsa_page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO); 1330 if (!vmsa_page) 1331 goto error_free_vmcb_page; 1332 1333 /* 1334 * SEV-ES guests maintain an encrypted version of their FPU 1335 * state which is restored and saved on VMRUN and VMEXIT. 1336 * Free the fpu structure to prevent KVM from attempting to 1337 * access the FPU state. 1338 */ 1339 kvm_free_guest_fpu(vcpu); 1340 } 1341 1342 err = avic_init_vcpu(svm); 1343 if (err) 1344 goto error_free_vmsa_page; 1345 1346 /* We initialize this flag to true to make sure that the is_running 1347 * bit would be set the first time the vcpu is loaded. 1348 */ 1349 if (irqchip_in_kernel(vcpu->kvm) && kvm_apicv_activated(vcpu->kvm)) 1350 svm->avic_is_running = true; 1351 1352 svm->msrpm = svm_vcpu_alloc_msrpm(); 1353 if (!svm->msrpm) { 1354 err = -ENOMEM; 1355 goto error_free_vmsa_page; 1356 } 1357 1358 svm_vcpu_init_msrpm(vcpu, svm->msrpm); 1359 1360 svm->vmcb = page_address(vmcb_page); 1361 svm->vmcb_pa = __sme_set(page_to_pfn(vmcb_page) << PAGE_SHIFT); 1362 1363 if (vmsa_page) 1364 svm->vmsa = page_address(vmsa_page); 1365 1366 svm->asid_generation = 0; 1367 svm->guest_state_loaded = false; 1368 init_vmcb(svm); 1369 1370 svm_init_osvw(vcpu); 1371 vcpu->arch.microcode_version = 0x01000065; 1372 1373 if (sev_es_guest(svm->vcpu.kvm)) 1374 /* Perform SEV-ES specific VMCB creation updates */ 1375 sev_es_create_vcpu(svm); 1376 1377 return 0; 1378 1379 error_free_vmsa_page: 1380 if (vmsa_page) 1381 __free_page(vmsa_page); 1382 error_free_vmcb_page: 1383 __free_page(vmcb_page); 1384 out: 1385 return err; 1386 } 1387 1388 static void svm_clear_current_vmcb(struct vmcb *vmcb) 1389 { 1390 int i; 1391 1392 for_each_online_cpu(i) 1393 cmpxchg(&per_cpu(svm_data, i)->current_vmcb, vmcb, NULL); 1394 } 1395 1396 static void svm_free_vcpu(struct kvm_vcpu *vcpu) 1397 { 1398 struct vcpu_svm *svm = to_svm(vcpu); 1399 1400 /* 1401 * The vmcb page can be recycled, causing a false negative in 1402 * svm_vcpu_load(). So, ensure that no logical CPU has this 1403 * vmcb page recorded as its current vmcb. 1404 */ 1405 svm_clear_current_vmcb(svm->vmcb); 1406 1407 svm_free_nested(svm); 1408 1409 sev_free_vcpu(vcpu); 1410 1411 __free_page(pfn_to_page(__sme_clr(svm->vmcb_pa) >> PAGE_SHIFT)); 1412 __free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER); 1413 } 1414 1415 static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu) 1416 { 1417 struct vcpu_svm *svm = to_svm(vcpu); 1418 struct svm_cpu_data *sd = per_cpu(svm_data, vcpu->cpu); 1419 unsigned int i; 1420 1421 if (svm->guest_state_loaded) 1422 return; 1423 1424 /* 1425 * Certain MSRs are restored on VMEXIT (sev-es), or vmload of host save 1426 * area (non-sev-es). Save ones that aren't so we can restore them 1427 * individually later. 1428 */ 1429 for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++) 1430 rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]); 1431 1432 /* 1433 * Save additional host state that will be restored on VMEXIT (sev-es) 1434 * or subsequent vmload of host save area. 1435 */ 1436 if (sev_es_guest(svm->vcpu.kvm)) { 1437 sev_es_prepare_guest_switch(svm, vcpu->cpu); 1438 } else { 1439 vmsave(__sme_page_pa(sd->save_area)); 1440 } 1441 1442 if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) { 1443 u64 tsc_ratio = vcpu->arch.tsc_scaling_ratio; 1444 if (tsc_ratio != __this_cpu_read(current_tsc_ratio)) { 1445 __this_cpu_write(current_tsc_ratio, tsc_ratio); 1446 wrmsrl(MSR_AMD64_TSC_RATIO, tsc_ratio); 1447 } 1448 } 1449 1450 /* This assumes that the kernel never uses MSR_TSC_AUX */ 1451 if (static_cpu_has(X86_FEATURE_RDTSCP)) 1452 wrmsrl(MSR_TSC_AUX, svm->tsc_aux); 1453 1454 svm->guest_state_loaded = true; 1455 } 1456 1457 static void svm_prepare_host_switch(struct kvm_vcpu *vcpu) 1458 { 1459 struct vcpu_svm *svm = to_svm(vcpu); 1460 unsigned int i; 1461 1462 if (!svm->guest_state_loaded) 1463 return; 1464 1465 /* 1466 * Certain MSRs are restored on VMEXIT (sev-es), or vmload of host save 1467 * area (non-sev-es). Restore the ones that weren't. 1468 */ 1469 for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++) 1470 wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]); 1471 1472 svm->guest_state_loaded = false; 1473 } 1474 1475 static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 1476 { 1477 struct vcpu_svm *svm = to_svm(vcpu); 1478 struct svm_cpu_data *sd = per_cpu(svm_data, cpu); 1479 1480 if (unlikely(cpu != vcpu->cpu)) { 1481 svm->asid_generation = 0; 1482 vmcb_mark_all_dirty(svm->vmcb); 1483 } 1484 1485 if (sd->current_vmcb != svm->vmcb) { 1486 sd->current_vmcb = svm->vmcb; 1487 indirect_branch_prediction_barrier(); 1488 } 1489 avic_vcpu_load(vcpu, cpu); 1490 } 1491 1492 static void svm_vcpu_put(struct kvm_vcpu *vcpu) 1493 { 1494 avic_vcpu_put(vcpu); 1495 svm_prepare_host_switch(vcpu); 1496 1497 ++vcpu->stat.host_state_reload; 1498 } 1499 1500 static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu) 1501 { 1502 struct vcpu_svm *svm = to_svm(vcpu); 1503 unsigned long rflags = svm->vmcb->save.rflags; 1504 1505 if (svm->nmi_singlestep) { 1506 /* Hide our flags if they were not set by the guest */ 1507 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF)) 1508 rflags &= ~X86_EFLAGS_TF; 1509 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_RF)) 1510 rflags &= ~X86_EFLAGS_RF; 1511 } 1512 return rflags; 1513 } 1514 1515 static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) 1516 { 1517 if (to_svm(vcpu)->nmi_singlestep) 1518 rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF); 1519 1520 /* 1521 * Any change of EFLAGS.VM is accompanied by a reload of SS 1522 * (caused by either a task switch or an inter-privilege IRET), 1523 * so we do not need to update the CPL here. 1524 */ 1525 to_svm(vcpu)->vmcb->save.rflags = rflags; 1526 } 1527 1528 static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg) 1529 { 1530 switch (reg) { 1531 case VCPU_EXREG_PDPTR: 1532 BUG_ON(!npt_enabled); 1533 load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu)); 1534 break; 1535 default: 1536 WARN_ON_ONCE(1); 1537 } 1538 } 1539 1540 static void svm_set_vintr(struct vcpu_svm *svm) 1541 { 1542 struct vmcb_control_area *control; 1543 1544 /* The following fields are ignored when AVIC is enabled */ 1545 WARN_ON(kvm_vcpu_apicv_active(&svm->vcpu)); 1546 svm_set_intercept(svm, INTERCEPT_VINTR); 1547 1548 /* 1549 * This is just a dummy VINTR to actually cause a vmexit to happen. 1550 * Actual injection of virtual interrupts happens through EVENTINJ. 1551 */ 1552 control = &svm->vmcb->control; 1553 control->int_vector = 0x0; 1554 control->int_ctl &= ~V_INTR_PRIO_MASK; 1555 control->int_ctl |= V_IRQ_MASK | 1556 ((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT); 1557 vmcb_mark_dirty(svm->vmcb, VMCB_INTR); 1558 } 1559 1560 static void svm_clear_vintr(struct vcpu_svm *svm) 1561 { 1562 const u32 mask = V_TPR_MASK | V_GIF_ENABLE_MASK | V_GIF_MASK | V_INTR_MASKING_MASK; 1563 svm_clr_intercept(svm, INTERCEPT_VINTR); 1564 1565 /* Drop int_ctl fields related to VINTR injection. */ 1566 svm->vmcb->control.int_ctl &= mask; 1567 if (is_guest_mode(&svm->vcpu)) { 1568 svm->nested.hsave->control.int_ctl &= mask; 1569 1570 WARN_ON((svm->vmcb->control.int_ctl & V_TPR_MASK) != 1571 (svm->nested.ctl.int_ctl & V_TPR_MASK)); 1572 svm->vmcb->control.int_ctl |= svm->nested.ctl.int_ctl & ~mask; 1573 } 1574 1575 vmcb_mark_dirty(svm->vmcb, VMCB_INTR); 1576 } 1577 1578 static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg) 1579 { 1580 struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save; 1581 1582 switch (seg) { 1583 case VCPU_SREG_CS: return &save->cs; 1584 case VCPU_SREG_DS: return &save->ds; 1585 case VCPU_SREG_ES: return &save->es; 1586 case VCPU_SREG_FS: return &save->fs; 1587 case VCPU_SREG_GS: return &save->gs; 1588 case VCPU_SREG_SS: return &save->ss; 1589 case VCPU_SREG_TR: return &save->tr; 1590 case VCPU_SREG_LDTR: return &save->ldtr; 1591 } 1592 BUG(); 1593 return NULL; 1594 } 1595 1596 static u64 svm_get_segment_base(struct kvm_vcpu *vcpu, int seg) 1597 { 1598 struct vmcb_seg *s = svm_seg(vcpu, seg); 1599 1600 return s->base; 1601 } 1602 1603 static void svm_get_segment(struct kvm_vcpu *vcpu, 1604 struct kvm_segment *var, int seg) 1605 { 1606 struct vmcb_seg *s = svm_seg(vcpu, seg); 1607 1608 var->base = s->base; 1609 var->limit = s->limit; 1610 var->selector = s->selector; 1611 var->type = s->attrib & SVM_SELECTOR_TYPE_MASK; 1612 var->s = (s->attrib >> SVM_SELECTOR_S_SHIFT) & 1; 1613 var->dpl = (s->attrib >> SVM_SELECTOR_DPL_SHIFT) & 3; 1614 var->present = (s->attrib >> SVM_SELECTOR_P_SHIFT) & 1; 1615 var->avl = (s->attrib >> SVM_SELECTOR_AVL_SHIFT) & 1; 1616 var->l = (s->attrib >> SVM_SELECTOR_L_SHIFT) & 1; 1617 var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1; 1618 1619 /* 1620 * AMD CPUs circa 2014 track the G bit for all segments except CS. 1621 * However, the SVM spec states that the G bit is not observed by the 1622 * CPU, and some VMware virtual CPUs drop the G bit for all segments. 1623 * So let's synthesize a legal G bit for all segments, this helps 1624 * running KVM nested. It also helps cross-vendor migration, because 1625 * Intel's vmentry has a check on the 'G' bit. 1626 */ 1627 var->g = s->limit > 0xfffff; 1628 1629 /* 1630 * AMD's VMCB does not have an explicit unusable field, so emulate it 1631 * for cross vendor migration purposes by "not present" 1632 */ 1633 var->unusable = !var->present; 1634 1635 switch (seg) { 1636 case VCPU_SREG_TR: 1637 /* 1638 * Work around a bug where the busy flag in the tr selector 1639 * isn't exposed 1640 */ 1641 var->type |= 0x2; 1642 break; 1643 case VCPU_SREG_DS: 1644 case VCPU_SREG_ES: 1645 case VCPU_SREG_FS: 1646 case VCPU_SREG_GS: 1647 /* 1648 * The accessed bit must always be set in the segment 1649 * descriptor cache, although it can be cleared in the 1650 * descriptor, the cached bit always remains at 1. Since 1651 * Intel has a check on this, set it here to support 1652 * cross-vendor migration. 1653 */ 1654 if (!var->unusable) 1655 var->type |= 0x1; 1656 break; 1657 case VCPU_SREG_SS: 1658 /* 1659 * On AMD CPUs sometimes the DB bit in the segment 1660 * descriptor is left as 1, although the whole segment has 1661 * been made unusable. Clear it here to pass an Intel VMX 1662 * entry check when cross vendor migrating. 1663 */ 1664 if (var->unusable) 1665 var->db = 0; 1666 /* This is symmetric with svm_set_segment() */ 1667 var->dpl = to_svm(vcpu)->vmcb->save.cpl; 1668 break; 1669 } 1670 } 1671 1672 static int svm_get_cpl(struct kvm_vcpu *vcpu) 1673 { 1674 struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save; 1675 1676 return save->cpl; 1677 } 1678 1679 static void svm_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) 1680 { 1681 struct vcpu_svm *svm = to_svm(vcpu); 1682 1683 dt->size = svm->vmcb->save.idtr.limit; 1684 dt->address = svm->vmcb->save.idtr.base; 1685 } 1686 1687 static void svm_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) 1688 { 1689 struct vcpu_svm *svm = to_svm(vcpu); 1690 1691 svm->vmcb->save.idtr.limit = dt->size; 1692 svm->vmcb->save.idtr.base = dt->address ; 1693 vmcb_mark_dirty(svm->vmcb, VMCB_DT); 1694 } 1695 1696 static void svm_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) 1697 { 1698 struct vcpu_svm *svm = to_svm(vcpu); 1699 1700 dt->size = svm->vmcb->save.gdtr.limit; 1701 dt->address = svm->vmcb->save.gdtr.base; 1702 } 1703 1704 static void svm_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) 1705 { 1706 struct vcpu_svm *svm = to_svm(vcpu); 1707 1708 svm->vmcb->save.gdtr.limit = dt->size; 1709 svm->vmcb->save.gdtr.base = dt->address ; 1710 vmcb_mark_dirty(svm->vmcb, VMCB_DT); 1711 } 1712 1713 static void update_cr0_intercept(struct vcpu_svm *svm) 1714 { 1715 ulong gcr0; 1716 u64 *hcr0; 1717 1718 /* 1719 * SEV-ES guests must always keep the CR intercepts cleared. CR 1720 * tracking is done using the CR write traps. 1721 */ 1722 if (sev_es_guest(svm->vcpu.kvm)) 1723 return; 1724 1725 gcr0 = svm->vcpu.arch.cr0; 1726 hcr0 = &svm->vmcb->save.cr0; 1727 *hcr0 = (*hcr0 & ~SVM_CR0_SELECTIVE_MASK) 1728 | (gcr0 & SVM_CR0_SELECTIVE_MASK); 1729 1730 vmcb_mark_dirty(svm->vmcb, VMCB_CR); 1731 1732 if (gcr0 == *hcr0) { 1733 svm_clr_intercept(svm, INTERCEPT_CR0_READ); 1734 svm_clr_intercept(svm, INTERCEPT_CR0_WRITE); 1735 } else { 1736 svm_set_intercept(svm, INTERCEPT_CR0_READ); 1737 svm_set_intercept(svm, INTERCEPT_CR0_WRITE); 1738 } 1739 } 1740 1741 void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) 1742 { 1743 struct vcpu_svm *svm = to_svm(vcpu); 1744 1745 #ifdef CONFIG_X86_64 1746 if (vcpu->arch.efer & EFER_LME && !vcpu->arch.guest_state_protected) { 1747 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) { 1748 vcpu->arch.efer |= EFER_LMA; 1749 svm->vmcb->save.efer |= EFER_LMA | EFER_LME; 1750 } 1751 1752 if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) { 1753 vcpu->arch.efer &= ~EFER_LMA; 1754 svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME); 1755 } 1756 } 1757 #endif 1758 vcpu->arch.cr0 = cr0; 1759 1760 if (!npt_enabled) 1761 cr0 |= X86_CR0_PG | X86_CR0_WP; 1762 1763 /* 1764 * re-enable caching here because the QEMU bios 1765 * does not do it - this results in some delay at 1766 * reboot 1767 */ 1768 if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED)) 1769 cr0 &= ~(X86_CR0_CD | X86_CR0_NW); 1770 svm->vmcb->save.cr0 = cr0; 1771 vmcb_mark_dirty(svm->vmcb, VMCB_CR); 1772 update_cr0_intercept(svm); 1773 } 1774 1775 static bool svm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) 1776 { 1777 return true; 1778 } 1779 1780 void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) 1781 { 1782 unsigned long host_cr4_mce = cr4_read_shadow() & X86_CR4_MCE; 1783 unsigned long old_cr4 = vcpu->arch.cr4; 1784 1785 if (npt_enabled && ((old_cr4 ^ cr4) & X86_CR4_PGE)) 1786 svm_flush_tlb(vcpu); 1787 1788 vcpu->arch.cr4 = cr4; 1789 if (!npt_enabled) 1790 cr4 |= X86_CR4_PAE; 1791 cr4 |= host_cr4_mce; 1792 to_svm(vcpu)->vmcb->save.cr4 = cr4; 1793 vmcb_mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR); 1794 1795 if ((cr4 ^ old_cr4) & (X86_CR4_OSXSAVE | X86_CR4_PKE)) 1796 kvm_update_cpuid_runtime(vcpu); 1797 } 1798 1799 static void svm_set_segment(struct kvm_vcpu *vcpu, 1800 struct kvm_segment *var, int seg) 1801 { 1802 struct vcpu_svm *svm = to_svm(vcpu); 1803 struct vmcb_seg *s = svm_seg(vcpu, seg); 1804 1805 s->base = var->base; 1806 s->limit = var->limit; 1807 s->selector = var->selector; 1808 s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK); 1809 s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT; 1810 s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT; 1811 s->attrib |= ((var->present & 1) && !var->unusable) << SVM_SELECTOR_P_SHIFT; 1812 s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT; 1813 s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT; 1814 s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT; 1815 s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT; 1816 1817 /* 1818 * This is always accurate, except if SYSRET returned to a segment 1819 * with SS.DPL != 3. Intel does not have this quirk, and always 1820 * forces SS.DPL to 3 on sysret, so we ignore that case; fixing it 1821 * would entail passing the CPL to userspace and back. 1822 */ 1823 if (seg == VCPU_SREG_SS) 1824 /* This is symmetric with svm_get_segment() */ 1825 svm->vmcb->save.cpl = (var->dpl & 3); 1826 1827 vmcb_mark_dirty(svm->vmcb, VMCB_SEG); 1828 } 1829 1830 static void svm_update_exception_bitmap(struct kvm_vcpu *vcpu) 1831 { 1832 struct vcpu_svm *svm = to_svm(vcpu); 1833 1834 clr_exception_intercept(svm, BP_VECTOR); 1835 1836 if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) { 1837 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) 1838 set_exception_intercept(svm, BP_VECTOR); 1839 } 1840 } 1841 1842 static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd) 1843 { 1844 if (sd->next_asid > sd->max_asid) { 1845 ++sd->asid_generation; 1846 sd->next_asid = sd->min_asid; 1847 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID; 1848 vmcb_mark_dirty(svm->vmcb, VMCB_ASID); 1849 } 1850 1851 svm->asid_generation = sd->asid_generation; 1852 svm->asid = sd->next_asid++; 1853 } 1854 1855 static void svm_set_dr6(struct vcpu_svm *svm, unsigned long value) 1856 { 1857 struct vmcb *vmcb = svm->vmcb; 1858 1859 if (svm->vcpu.arch.guest_state_protected) 1860 return; 1861 1862 if (unlikely(value != vmcb->save.dr6)) { 1863 vmcb->save.dr6 = value; 1864 vmcb_mark_dirty(vmcb, VMCB_DR); 1865 } 1866 } 1867 1868 static void svm_sync_dirty_debug_regs(struct kvm_vcpu *vcpu) 1869 { 1870 struct vcpu_svm *svm = to_svm(vcpu); 1871 1872 if (vcpu->arch.guest_state_protected) 1873 return; 1874 1875 get_debugreg(vcpu->arch.db[0], 0); 1876 get_debugreg(vcpu->arch.db[1], 1); 1877 get_debugreg(vcpu->arch.db[2], 2); 1878 get_debugreg(vcpu->arch.db[3], 3); 1879 /* 1880 * We cannot reset svm->vmcb->save.dr6 to DR6_ACTIVE_LOW here, 1881 * because db_interception might need it. We can do it before vmentry. 1882 */ 1883 vcpu->arch.dr6 = svm->vmcb->save.dr6; 1884 vcpu->arch.dr7 = svm->vmcb->save.dr7; 1885 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_WONT_EXIT; 1886 set_dr_intercepts(svm); 1887 } 1888 1889 static void svm_set_dr7(struct kvm_vcpu *vcpu, unsigned long value) 1890 { 1891 struct vcpu_svm *svm = to_svm(vcpu); 1892 1893 if (vcpu->arch.guest_state_protected) 1894 return; 1895 1896 svm->vmcb->save.dr7 = value; 1897 vmcb_mark_dirty(svm->vmcb, VMCB_DR); 1898 } 1899 1900 static int pf_interception(struct vcpu_svm *svm) 1901 { 1902 u64 fault_address = __sme_clr(svm->vmcb->control.exit_info_2); 1903 u64 error_code = svm->vmcb->control.exit_info_1; 1904 1905 return kvm_handle_page_fault(&svm->vcpu, error_code, fault_address, 1906 static_cpu_has(X86_FEATURE_DECODEASSISTS) ? 1907 svm->vmcb->control.insn_bytes : NULL, 1908 svm->vmcb->control.insn_len); 1909 } 1910 1911 static int npf_interception(struct vcpu_svm *svm) 1912 { 1913 u64 fault_address = __sme_clr(svm->vmcb->control.exit_info_2); 1914 u64 error_code = svm->vmcb->control.exit_info_1; 1915 1916 trace_kvm_page_fault(fault_address, error_code); 1917 return kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code, 1918 static_cpu_has(X86_FEATURE_DECODEASSISTS) ? 1919 svm->vmcb->control.insn_bytes : NULL, 1920 svm->vmcb->control.insn_len); 1921 } 1922 1923 static int db_interception(struct vcpu_svm *svm) 1924 { 1925 struct kvm_run *kvm_run = svm->vcpu.run; 1926 struct kvm_vcpu *vcpu = &svm->vcpu; 1927 1928 if (!(svm->vcpu.guest_debug & 1929 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) && 1930 !svm->nmi_singlestep) { 1931 u32 payload = svm->vmcb->save.dr6 ^ DR6_ACTIVE_LOW; 1932 kvm_queue_exception_p(&svm->vcpu, DB_VECTOR, payload); 1933 return 1; 1934 } 1935 1936 if (svm->nmi_singlestep) { 1937 disable_nmi_singlestep(svm); 1938 /* Make sure we check for pending NMIs upon entry */ 1939 kvm_make_request(KVM_REQ_EVENT, vcpu); 1940 } 1941 1942 if (svm->vcpu.guest_debug & 1943 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) { 1944 kvm_run->exit_reason = KVM_EXIT_DEBUG; 1945 kvm_run->debug.arch.dr6 = svm->vmcb->save.dr6; 1946 kvm_run->debug.arch.dr7 = svm->vmcb->save.dr7; 1947 kvm_run->debug.arch.pc = 1948 svm->vmcb->save.cs.base + svm->vmcb->save.rip; 1949 kvm_run->debug.arch.exception = DB_VECTOR; 1950 return 0; 1951 } 1952 1953 return 1; 1954 } 1955 1956 static int bp_interception(struct vcpu_svm *svm) 1957 { 1958 struct kvm_run *kvm_run = svm->vcpu.run; 1959 1960 kvm_run->exit_reason = KVM_EXIT_DEBUG; 1961 kvm_run->debug.arch.pc = svm->vmcb->save.cs.base + svm->vmcb->save.rip; 1962 kvm_run->debug.arch.exception = BP_VECTOR; 1963 return 0; 1964 } 1965 1966 static int ud_interception(struct vcpu_svm *svm) 1967 { 1968 return handle_ud(&svm->vcpu); 1969 } 1970 1971 static int ac_interception(struct vcpu_svm *svm) 1972 { 1973 kvm_queue_exception_e(&svm->vcpu, AC_VECTOR, 0); 1974 return 1; 1975 } 1976 1977 static bool is_erratum_383(void) 1978 { 1979 int err, i; 1980 u64 value; 1981 1982 if (!erratum_383_found) 1983 return false; 1984 1985 value = native_read_msr_safe(MSR_IA32_MC0_STATUS, &err); 1986 if (err) 1987 return false; 1988 1989 /* Bit 62 may or may not be set for this mce */ 1990 value &= ~(1ULL << 62); 1991 1992 if (value != 0xb600000000010015ULL) 1993 return false; 1994 1995 /* Clear MCi_STATUS registers */ 1996 for (i = 0; i < 6; ++i) 1997 native_write_msr_safe(MSR_IA32_MCx_STATUS(i), 0, 0); 1998 1999 value = native_read_msr_safe(MSR_IA32_MCG_STATUS, &err); 2000 if (!err) { 2001 u32 low, high; 2002 2003 value &= ~(1ULL << 2); 2004 low = lower_32_bits(value); 2005 high = upper_32_bits(value); 2006 2007 native_write_msr_safe(MSR_IA32_MCG_STATUS, low, high); 2008 } 2009 2010 /* Flush tlb to evict multi-match entries */ 2011 __flush_tlb_all(); 2012 2013 return true; 2014 } 2015 2016 static void svm_handle_mce(struct vcpu_svm *svm) 2017 { 2018 if (is_erratum_383()) { 2019 /* 2020 * Erratum 383 triggered. Guest state is corrupt so kill the 2021 * guest. 2022 */ 2023 pr_err("KVM: Guest triggered AMD Erratum 383\n"); 2024 2025 kvm_make_request(KVM_REQ_TRIPLE_FAULT, &svm->vcpu); 2026 2027 return; 2028 } 2029 2030 /* 2031 * On an #MC intercept the MCE handler is not called automatically in 2032 * the host. So do it by hand here. 2033 */ 2034 kvm_machine_check(); 2035 } 2036 2037 static int mc_interception(struct vcpu_svm *svm) 2038 { 2039 return 1; 2040 } 2041 2042 static int shutdown_interception(struct vcpu_svm *svm) 2043 { 2044 struct kvm_run *kvm_run = svm->vcpu.run; 2045 2046 /* 2047 * The VM save area has already been encrypted so it 2048 * cannot be reinitialized - just terminate. 2049 */ 2050 if (sev_es_guest(svm->vcpu.kvm)) 2051 return -EINVAL; 2052 2053 /* 2054 * VMCB is undefined after a SHUTDOWN intercept 2055 * so reinitialize it. 2056 */ 2057 clear_page(svm->vmcb); 2058 init_vmcb(svm); 2059 2060 kvm_run->exit_reason = KVM_EXIT_SHUTDOWN; 2061 return 0; 2062 } 2063 2064 static int io_interception(struct vcpu_svm *svm) 2065 { 2066 struct kvm_vcpu *vcpu = &svm->vcpu; 2067 u32 io_info = svm->vmcb->control.exit_info_1; /* address size bug? */ 2068 int size, in, string; 2069 unsigned port; 2070 2071 ++svm->vcpu.stat.io_exits; 2072 string = (io_info & SVM_IOIO_STR_MASK) != 0; 2073 in = (io_info & SVM_IOIO_TYPE_MASK) != 0; 2074 port = io_info >> 16; 2075 size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT; 2076 2077 if (string) { 2078 if (sev_es_guest(vcpu->kvm)) 2079 return sev_es_string_io(svm, size, port, in); 2080 else 2081 return kvm_emulate_instruction(vcpu, 0); 2082 } 2083 2084 svm->next_rip = svm->vmcb->control.exit_info_2; 2085 2086 return kvm_fast_pio(&svm->vcpu, size, port, in); 2087 } 2088 2089 static int nmi_interception(struct vcpu_svm *svm) 2090 { 2091 return 1; 2092 } 2093 2094 static int intr_interception(struct vcpu_svm *svm) 2095 { 2096 ++svm->vcpu.stat.irq_exits; 2097 return 1; 2098 } 2099 2100 static int nop_on_interception(struct vcpu_svm *svm) 2101 { 2102 return 1; 2103 } 2104 2105 static int halt_interception(struct vcpu_svm *svm) 2106 { 2107 return kvm_emulate_halt(&svm->vcpu); 2108 } 2109 2110 static int vmmcall_interception(struct vcpu_svm *svm) 2111 { 2112 return kvm_emulate_hypercall(&svm->vcpu); 2113 } 2114 2115 static int vmload_interception(struct vcpu_svm *svm) 2116 { 2117 struct vmcb *nested_vmcb; 2118 struct kvm_host_map map; 2119 int ret; 2120 2121 if (nested_svm_check_permissions(svm)) 2122 return 1; 2123 2124 ret = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(svm->vmcb->save.rax), &map); 2125 if (ret) { 2126 if (ret == -EINVAL) 2127 kvm_inject_gp(&svm->vcpu, 0); 2128 return 1; 2129 } 2130 2131 nested_vmcb = map.hva; 2132 2133 ret = kvm_skip_emulated_instruction(&svm->vcpu); 2134 2135 nested_svm_vmloadsave(nested_vmcb, svm->vmcb); 2136 kvm_vcpu_unmap(&svm->vcpu, &map, true); 2137 2138 return ret; 2139 } 2140 2141 static int vmsave_interception(struct vcpu_svm *svm) 2142 { 2143 struct vmcb *nested_vmcb; 2144 struct kvm_host_map map; 2145 int ret; 2146 2147 if (nested_svm_check_permissions(svm)) 2148 return 1; 2149 2150 ret = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(svm->vmcb->save.rax), &map); 2151 if (ret) { 2152 if (ret == -EINVAL) 2153 kvm_inject_gp(&svm->vcpu, 0); 2154 return 1; 2155 } 2156 2157 nested_vmcb = map.hva; 2158 2159 ret = kvm_skip_emulated_instruction(&svm->vcpu); 2160 2161 nested_svm_vmloadsave(svm->vmcb, nested_vmcb); 2162 kvm_vcpu_unmap(&svm->vcpu, &map, true); 2163 2164 return ret; 2165 } 2166 2167 static int vmrun_interception(struct vcpu_svm *svm) 2168 { 2169 if (nested_svm_check_permissions(svm)) 2170 return 1; 2171 2172 return nested_svm_vmrun(svm); 2173 } 2174 2175 enum { 2176 NONE_SVM_INSTR, 2177 SVM_INSTR_VMRUN, 2178 SVM_INSTR_VMLOAD, 2179 SVM_INSTR_VMSAVE, 2180 }; 2181 2182 /* Return NONE_SVM_INSTR if not SVM instrs, otherwise return decode result */ 2183 static int svm_instr_opcode(struct kvm_vcpu *vcpu) 2184 { 2185 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; 2186 2187 if (ctxt->b != 0x1 || ctxt->opcode_len != 2) 2188 return NONE_SVM_INSTR; 2189 2190 switch (ctxt->modrm) { 2191 case 0xd8: /* VMRUN */ 2192 return SVM_INSTR_VMRUN; 2193 case 0xda: /* VMLOAD */ 2194 return SVM_INSTR_VMLOAD; 2195 case 0xdb: /* VMSAVE */ 2196 return SVM_INSTR_VMSAVE; 2197 default: 2198 break; 2199 } 2200 2201 return NONE_SVM_INSTR; 2202 } 2203 2204 static int emulate_svm_instr(struct kvm_vcpu *vcpu, int opcode) 2205 { 2206 const int guest_mode_exit_codes[] = { 2207 [SVM_INSTR_VMRUN] = SVM_EXIT_VMRUN, 2208 [SVM_INSTR_VMLOAD] = SVM_EXIT_VMLOAD, 2209 [SVM_INSTR_VMSAVE] = SVM_EXIT_VMSAVE, 2210 }; 2211 int (*const svm_instr_handlers[])(struct vcpu_svm *svm) = { 2212 [SVM_INSTR_VMRUN] = vmrun_interception, 2213 [SVM_INSTR_VMLOAD] = vmload_interception, 2214 [SVM_INSTR_VMSAVE] = vmsave_interception, 2215 }; 2216 struct vcpu_svm *svm = to_svm(vcpu); 2217 2218 if (is_guest_mode(vcpu)) { 2219 svm->vmcb->control.exit_code = guest_mode_exit_codes[opcode]; 2220 svm->vmcb->control.exit_info_1 = 0; 2221 svm->vmcb->control.exit_info_2 = 0; 2222 2223 return nested_svm_vmexit(svm); 2224 } else 2225 return svm_instr_handlers[opcode](svm); 2226 } 2227 2228 /* 2229 * #GP handling code. Note that #GP can be triggered under the following two 2230 * cases: 2231 * 1) SVM VM-related instructions (VMRUN/VMSAVE/VMLOAD) that trigger #GP on 2232 * some AMD CPUs when EAX of these instructions are in the reserved memory 2233 * regions (e.g. SMM memory on host). 2234 * 2) VMware backdoor 2235 */ 2236 static int gp_interception(struct vcpu_svm *svm) 2237 { 2238 struct kvm_vcpu *vcpu = &svm->vcpu; 2239 u32 error_code = svm->vmcb->control.exit_info_1; 2240 int opcode; 2241 2242 /* Both #GP cases have zero error_code */ 2243 if (error_code) 2244 goto reinject; 2245 2246 /* Decode the instruction for usage later */ 2247 if (x86_decode_emulated_instruction(vcpu, 0, NULL, 0) != EMULATION_OK) 2248 goto reinject; 2249 2250 opcode = svm_instr_opcode(vcpu); 2251 2252 if (opcode == NONE_SVM_INSTR) { 2253 if (!enable_vmware_backdoor) 2254 goto reinject; 2255 2256 /* 2257 * VMware backdoor emulation on #GP interception only handles 2258 * IN{S}, OUT{S}, and RDPMC. 2259 */ 2260 if (!is_guest_mode(vcpu)) 2261 return kvm_emulate_instruction(vcpu, 2262 EMULTYPE_VMWARE_GP | EMULTYPE_NO_DECODE); 2263 } else 2264 return emulate_svm_instr(vcpu, opcode); 2265 2266 reinject: 2267 kvm_queue_exception_e(vcpu, GP_VECTOR, error_code); 2268 return 1; 2269 } 2270 2271 void svm_set_gif(struct vcpu_svm *svm, bool value) 2272 { 2273 if (value) { 2274 /* 2275 * If VGIF is enabled, the STGI intercept is only added to 2276 * detect the opening of the SMI/NMI window; remove it now. 2277 * Likewise, clear the VINTR intercept, we will set it 2278 * again while processing KVM_REQ_EVENT if needed. 2279 */ 2280 if (vgif_enabled(svm)) 2281 svm_clr_intercept(svm, INTERCEPT_STGI); 2282 if (svm_is_intercept(svm, INTERCEPT_VINTR)) 2283 svm_clear_vintr(svm); 2284 2285 enable_gif(svm); 2286 if (svm->vcpu.arch.smi_pending || 2287 svm->vcpu.arch.nmi_pending || 2288 kvm_cpu_has_injectable_intr(&svm->vcpu)) 2289 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu); 2290 } else { 2291 disable_gif(svm); 2292 2293 /* 2294 * After a CLGI no interrupts should come. But if vGIF is 2295 * in use, we still rely on the VINTR intercept (rather than 2296 * STGI) to detect an open interrupt window. 2297 */ 2298 if (!vgif_enabled(svm)) 2299 svm_clear_vintr(svm); 2300 } 2301 } 2302 2303 static int stgi_interception(struct vcpu_svm *svm) 2304 { 2305 int ret; 2306 2307 if (nested_svm_check_permissions(svm)) 2308 return 1; 2309 2310 ret = kvm_skip_emulated_instruction(&svm->vcpu); 2311 svm_set_gif(svm, true); 2312 return ret; 2313 } 2314 2315 static int clgi_interception(struct vcpu_svm *svm) 2316 { 2317 int ret; 2318 2319 if (nested_svm_check_permissions(svm)) 2320 return 1; 2321 2322 ret = kvm_skip_emulated_instruction(&svm->vcpu); 2323 svm_set_gif(svm, false); 2324 return ret; 2325 } 2326 2327 static int invlpga_interception(struct vcpu_svm *svm) 2328 { 2329 struct kvm_vcpu *vcpu = &svm->vcpu; 2330 2331 trace_kvm_invlpga(svm->vmcb->save.rip, kvm_rcx_read(&svm->vcpu), 2332 kvm_rax_read(&svm->vcpu)); 2333 2334 /* Let's treat INVLPGA the same as INVLPG (can be optimized!) */ 2335 kvm_mmu_invlpg(vcpu, kvm_rax_read(&svm->vcpu)); 2336 2337 return kvm_skip_emulated_instruction(&svm->vcpu); 2338 } 2339 2340 static int skinit_interception(struct vcpu_svm *svm) 2341 { 2342 trace_kvm_skinit(svm->vmcb->save.rip, kvm_rax_read(&svm->vcpu)); 2343 2344 kvm_queue_exception(&svm->vcpu, UD_VECTOR); 2345 return 1; 2346 } 2347 2348 static int wbinvd_interception(struct vcpu_svm *svm) 2349 { 2350 return kvm_emulate_wbinvd(&svm->vcpu); 2351 } 2352 2353 static int xsetbv_interception(struct vcpu_svm *svm) 2354 { 2355 u64 new_bv = kvm_read_edx_eax(&svm->vcpu); 2356 u32 index = kvm_rcx_read(&svm->vcpu); 2357 2358 int err = kvm_set_xcr(&svm->vcpu, index, new_bv); 2359 return kvm_complete_insn_gp(&svm->vcpu, err); 2360 } 2361 2362 static int rdpru_interception(struct vcpu_svm *svm) 2363 { 2364 kvm_queue_exception(&svm->vcpu, UD_VECTOR); 2365 return 1; 2366 } 2367 2368 static int task_switch_interception(struct vcpu_svm *svm) 2369 { 2370 u16 tss_selector; 2371 int reason; 2372 int int_type = svm->vmcb->control.exit_int_info & 2373 SVM_EXITINTINFO_TYPE_MASK; 2374 int int_vec = svm->vmcb->control.exit_int_info & SVM_EVTINJ_VEC_MASK; 2375 uint32_t type = 2376 svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_TYPE_MASK; 2377 uint32_t idt_v = 2378 svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_VALID; 2379 bool has_error_code = false; 2380 u32 error_code = 0; 2381 2382 tss_selector = (u16)svm->vmcb->control.exit_info_1; 2383 2384 if (svm->vmcb->control.exit_info_2 & 2385 (1ULL << SVM_EXITINFOSHIFT_TS_REASON_IRET)) 2386 reason = TASK_SWITCH_IRET; 2387 else if (svm->vmcb->control.exit_info_2 & 2388 (1ULL << SVM_EXITINFOSHIFT_TS_REASON_JMP)) 2389 reason = TASK_SWITCH_JMP; 2390 else if (idt_v) 2391 reason = TASK_SWITCH_GATE; 2392 else 2393 reason = TASK_SWITCH_CALL; 2394 2395 if (reason == TASK_SWITCH_GATE) { 2396 switch (type) { 2397 case SVM_EXITINTINFO_TYPE_NMI: 2398 svm->vcpu.arch.nmi_injected = false; 2399 break; 2400 case SVM_EXITINTINFO_TYPE_EXEPT: 2401 if (svm->vmcb->control.exit_info_2 & 2402 (1ULL << SVM_EXITINFOSHIFT_TS_HAS_ERROR_CODE)) { 2403 has_error_code = true; 2404 error_code = 2405 (u32)svm->vmcb->control.exit_info_2; 2406 } 2407 kvm_clear_exception_queue(&svm->vcpu); 2408 break; 2409 case SVM_EXITINTINFO_TYPE_INTR: 2410 kvm_clear_interrupt_queue(&svm->vcpu); 2411 break; 2412 default: 2413 break; 2414 } 2415 } 2416 2417 if (reason != TASK_SWITCH_GATE || 2418 int_type == SVM_EXITINTINFO_TYPE_SOFT || 2419 (int_type == SVM_EXITINTINFO_TYPE_EXEPT && 2420 (int_vec == OF_VECTOR || int_vec == BP_VECTOR))) { 2421 if (!skip_emulated_instruction(&svm->vcpu)) 2422 return 0; 2423 } 2424 2425 if (int_type != SVM_EXITINTINFO_TYPE_SOFT) 2426 int_vec = -1; 2427 2428 return kvm_task_switch(&svm->vcpu, tss_selector, int_vec, reason, 2429 has_error_code, error_code); 2430 } 2431 2432 static int cpuid_interception(struct vcpu_svm *svm) 2433 { 2434 return kvm_emulate_cpuid(&svm->vcpu); 2435 } 2436 2437 static int iret_interception(struct vcpu_svm *svm) 2438 { 2439 ++svm->vcpu.stat.nmi_window_exits; 2440 svm->vcpu.arch.hflags |= HF_IRET_MASK; 2441 if (!sev_es_guest(svm->vcpu.kvm)) { 2442 svm_clr_intercept(svm, INTERCEPT_IRET); 2443 svm->nmi_iret_rip = kvm_rip_read(&svm->vcpu); 2444 } 2445 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu); 2446 return 1; 2447 } 2448 2449 static int invd_interception(struct vcpu_svm *svm) 2450 { 2451 /* Treat an INVD instruction as a NOP and just skip it. */ 2452 return kvm_skip_emulated_instruction(&svm->vcpu); 2453 } 2454 2455 static int invlpg_interception(struct vcpu_svm *svm) 2456 { 2457 if (!static_cpu_has(X86_FEATURE_DECODEASSISTS)) 2458 return kvm_emulate_instruction(&svm->vcpu, 0); 2459 2460 kvm_mmu_invlpg(&svm->vcpu, svm->vmcb->control.exit_info_1); 2461 return kvm_skip_emulated_instruction(&svm->vcpu); 2462 } 2463 2464 static int emulate_on_interception(struct vcpu_svm *svm) 2465 { 2466 return kvm_emulate_instruction(&svm->vcpu, 0); 2467 } 2468 2469 static int rsm_interception(struct vcpu_svm *svm) 2470 { 2471 return kvm_emulate_instruction_from_buffer(&svm->vcpu, rsm_ins_bytes, 2); 2472 } 2473 2474 static int rdpmc_interception(struct vcpu_svm *svm) 2475 { 2476 int err; 2477 2478 if (!nrips) 2479 return emulate_on_interception(svm); 2480 2481 err = kvm_rdpmc(&svm->vcpu); 2482 return kvm_complete_insn_gp(&svm->vcpu, err); 2483 } 2484 2485 static bool check_selective_cr0_intercepted(struct vcpu_svm *svm, 2486 unsigned long val) 2487 { 2488 unsigned long cr0 = svm->vcpu.arch.cr0; 2489 bool ret = false; 2490 2491 if (!is_guest_mode(&svm->vcpu) || 2492 (!(vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_SELECTIVE_CR0)))) 2493 return false; 2494 2495 cr0 &= ~SVM_CR0_SELECTIVE_MASK; 2496 val &= ~SVM_CR0_SELECTIVE_MASK; 2497 2498 if (cr0 ^ val) { 2499 svm->vmcb->control.exit_code = SVM_EXIT_CR0_SEL_WRITE; 2500 ret = (nested_svm_exit_handled(svm) == NESTED_EXIT_DONE); 2501 } 2502 2503 return ret; 2504 } 2505 2506 #define CR_VALID (1ULL << 63) 2507 2508 static int cr_interception(struct vcpu_svm *svm) 2509 { 2510 int reg, cr; 2511 unsigned long val; 2512 int err; 2513 2514 if (!static_cpu_has(X86_FEATURE_DECODEASSISTS)) 2515 return emulate_on_interception(svm); 2516 2517 if (unlikely((svm->vmcb->control.exit_info_1 & CR_VALID) == 0)) 2518 return emulate_on_interception(svm); 2519 2520 reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK; 2521 if (svm->vmcb->control.exit_code == SVM_EXIT_CR0_SEL_WRITE) 2522 cr = SVM_EXIT_WRITE_CR0 - SVM_EXIT_READ_CR0; 2523 else 2524 cr = svm->vmcb->control.exit_code - SVM_EXIT_READ_CR0; 2525 2526 err = 0; 2527 if (cr >= 16) { /* mov to cr */ 2528 cr -= 16; 2529 val = kvm_register_read(&svm->vcpu, reg); 2530 trace_kvm_cr_write(cr, val); 2531 switch (cr) { 2532 case 0: 2533 if (!check_selective_cr0_intercepted(svm, val)) 2534 err = kvm_set_cr0(&svm->vcpu, val); 2535 else 2536 return 1; 2537 2538 break; 2539 case 3: 2540 err = kvm_set_cr3(&svm->vcpu, val); 2541 break; 2542 case 4: 2543 err = kvm_set_cr4(&svm->vcpu, val); 2544 break; 2545 case 8: 2546 err = kvm_set_cr8(&svm->vcpu, val); 2547 break; 2548 default: 2549 WARN(1, "unhandled write to CR%d", cr); 2550 kvm_queue_exception(&svm->vcpu, UD_VECTOR); 2551 return 1; 2552 } 2553 } else { /* mov from cr */ 2554 switch (cr) { 2555 case 0: 2556 val = kvm_read_cr0(&svm->vcpu); 2557 break; 2558 case 2: 2559 val = svm->vcpu.arch.cr2; 2560 break; 2561 case 3: 2562 val = kvm_read_cr3(&svm->vcpu); 2563 break; 2564 case 4: 2565 val = kvm_read_cr4(&svm->vcpu); 2566 break; 2567 case 8: 2568 val = kvm_get_cr8(&svm->vcpu); 2569 break; 2570 default: 2571 WARN(1, "unhandled read from CR%d", cr); 2572 kvm_queue_exception(&svm->vcpu, UD_VECTOR); 2573 return 1; 2574 } 2575 kvm_register_write(&svm->vcpu, reg, val); 2576 trace_kvm_cr_read(cr, val); 2577 } 2578 return kvm_complete_insn_gp(&svm->vcpu, err); 2579 } 2580 2581 static int cr_trap(struct vcpu_svm *svm) 2582 { 2583 struct kvm_vcpu *vcpu = &svm->vcpu; 2584 unsigned long old_value, new_value; 2585 unsigned int cr; 2586 int ret = 0; 2587 2588 new_value = (unsigned long)svm->vmcb->control.exit_info_1; 2589 2590 cr = svm->vmcb->control.exit_code - SVM_EXIT_CR0_WRITE_TRAP; 2591 switch (cr) { 2592 case 0: 2593 old_value = kvm_read_cr0(vcpu); 2594 svm_set_cr0(vcpu, new_value); 2595 2596 kvm_post_set_cr0(vcpu, old_value, new_value); 2597 break; 2598 case 4: 2599 old_value = kvm_read_cr4(vcpu); 2600 svm_set_cr4(vcpu, new_value); 2601 2602 kvm_post_set_cr4(vcpu, old_value, new_value); 2603 break; 2604 case 8: 2605 ret = kvm_set_cr8(&svm->vcpu, new_value); 2606 break; 2607 default: 2608 WARN(1, "unhandled CR%d write trap", cr); 2609 kvm_queue_exception(vcpu, UD_VECTOR); 2610 return 1; 2611 } 2612 2613 return kvm_complete_insn_gp(vcpu, ret); 2614 } 2615 2616 static int dr_interception(struct vcpu_svm *svm) 2617 { 2618 int reg, dr; 2619 unsigned long val; 2620 int err = 0; 2621 2622 if (svm->vcpu.guest_debug == 0) { 2623 /* 2624 * No more DR vmexits; force a reload of the debug registers 2625 * and reenter on this instruction. The next vmexit will 2626 * retrieve the full state of the debug registers. 2627 */ 2628 clr_dr_intercepts(svm); 2629 svm->vcpu.arch.switch_db_regs |= KVM_DEBUGREG_WONT_EXIT; 2630 return 1; 2631 } 2632 2633 if (!boot_cpu_has(X86_FEATURE_DECODEASSISTS)) 2634 return emulate_on_interception(svm); 2635 2636 reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK; 2637 dr = svm->vmcb->control.exit_code - SVM_EXIT_READ_DR0; 2638 if (dr >= 16) { /* mov to DRn */ 2639 dr -= 16; 2640 val = kvm_register_read(&svm->vcpu, reg); 2641 err = kvm_set_dr(&svm->vcpu, dr, val); 2642 } else { 2643 kvm_get_dr(&svm->vcpu, dr, &val); 2644 kvm_register_write(&svm->vcpu, reg, val); 2645 } 2646 2647 return kvm_complete_insn_gp(&svm->vcpu, err); 2648 } 2649 2650 static int cr8_write_interception(struct vcpu_svm *svm) 2651 { 2652 struct kvm_run *kvm_run = svm->vcpu.run; 2653 int r; 2654 2655 u8 cr8_prev = kvm_get_cr8(&svm->vcpu); 2656 /* instruction emulation calls kvm_set_cr8() */ 2657 r = cr_interception(svm); 2658 if (lapic_in_kernel(&svm->vcpu)) 2659 return r; 2660 if (cr8_prev <= kvm_get_cr8(&svm->vcpu)) 2661 return r; 2662 kvm_run->exit_reason = KVM_EXIT_SET_TPR; 2663 return 0; 2664 } 2665 2666 static int efer_trap(struct vcpu_svm *svm) 2667 { 2668 struct msr_data msr_info; 2669 int ret; 2670 2671 /* 2672 * Clear the EFER_SVME bit from EFER. The SVM code always sets this 2673 * bit in svm_set_efer(), but __kvm_valid_efer() checks it against 2674 * whether the guest has X86_FEATURE_SVM - this avoids a failure if 2675 * the guest doesn't have X86_FEATURE_SVM. 2676 */ 2677 msr_info.host_initiated = false; 2678 msr_info.index = MSR_EFER; 2679 msr_info.data = svm->vmcb->control.exit_info_1 & ~EFER_SVME; 2680 ret = kvm_set_msr_common(&svm->vcpu, &msr_info); 2681 2682 return kvm_complete_insn_gp(&svm->vcpu, ret); 2683 } 2684 2685 static int svm_get_msr_feature(struct kvm_msr_entry *msr) 2686 { 2687 msr->data = 0; 2688 2689 switch (msr->index) { 2690 case MSR_F10H_DECFG: 2691 if (boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) 2692 msr->data |= MSR_F10H_DECFG_LFENCE_SERIALIZE; 2693 break; 2694 case MSR_IA32_PERF_CAPABILITIES: 2695 return 0; 2696 default: 2697 return KVM_MSR_RET_INVALID; 2698 } 2699 2700 return 0; 2701 } 2702 2703 static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) 2704 { 2705 struct vcpu_svm *svm = to_svm(vcpu); 2706 2707 switch (msr_info->index) { 2708 case MSR_STAR: 2709 msr_info->data = svm->vmcb->save.star; 2710 break; 2711 #ifdef CONFIG_X86_64 2712 case MSR_LSTAR: 2713 msr_info->data = svm->vmcb->save.lstar; 2714 break; 2715 case MSR_CSTAR: 2716 msr_info->data = svm->vmcb->save.cstar; 2717 break; 2718 case MSR_KERNEL_GS_BASE: 2719 msr_info->data = svm->vmcb->save.kernel_gs_base; 2720 break; 2721 case MSR_SYSCALL_MASK: 2722 msr_info->data = svm->vmcb->save.sfmask; 2723 break; 2724 #endif 2725 case MSR_IA32_SYSENTER_CS: 2726 msr_info->data = svm->vmcb->save.sysenter_cs; 2727 break; 2728 case MSR_IA32_SYSENTER_EIP: 2729 msr_info->data = svm->sysenter_eip; 2730 break; 2731 case MSR_IA32_SYSENTER_ESP: 2732 msr_info->data = svm->sysenter_esp; 2733 break; 2734 case MSR_TSC_AUX: 2735 if (!boot_cpu_has(X86_FEATURE_RDTSCP)) 2736 return 1; 2737 msr_info->data = svm->tsc_aux; 2738 break; 2739 /* 2740 * Nobody will change the following 5 values in the VMCB so we can 2741 * safely return them on rdmsr. They will always be 0 until LBRV is 2742 * implemented. 2743 */ 2744 case MSR_IA32_DEBUGCTLMSR: 2745 msr_info->data = svm->vmcb->save.dbgctl; 2746 break; 2747 case MSR_IA32_LASTBRANCHFROMIP: 2748 msr_info->data = svm->vmcb->save.br_from; 2749 break; 2750 case MSR_IA32_LASTBRANCHTOIP: 2751 msr_info->data = svm->vmcb->save.br_to; 2752 break; 2753 case MSR_IA32_LASTINTFROMIP: 2754 msr_info->data = svm->vmcb->save.last_excp_from; 2755 break; 2756 case MSR_IA32_LASTINTTOIP: 2757 msr_info->data = svm->vmcb->save.last_excp_to; 2758 break; 2759 case MSR_VM_HSAVE_PA: 2760 msr_info->data = svm->nested.hsave_msr; 2761 break; 2762 case MSR_VM_CR: 2763 msr_info->data = svm->nested.vm_cr_msr; 2764 break; 2765 case MSR_IA32_SPEC_CTRL: 2766 if (!msr_info->host_initiated && 2767 !guest_has_spec_ctrl_msr(vcpu)) 2768 return 1; 2769 2770 msr_info->data = svm->spec_ctrl; 2771 break; 2772 case MSR_AMD64_VIRT_SPEC_CTRL: 2773 if (!msr_info->host_initiated && 2774 !guest_cpuid_has(vcpu, X86_FEATURE_VIRT_SSBD)) 2775 return 1; 2776 2777 msr_info->data = svm->virt_spec_ctrl; 2778 break; 2779 case MSR_F15H_IC_CFG: { 2780 2781 int family, model; 2782 2783 family = guest_cpuid_family(vcpu); 2784 model = guest_cpuid_model(vcpu); 2785 2786 if (family < 0 || model < 0) 2787 return kvm_get_msr_common(vcpu, msr_info); 2788 2789 msr_info->data = 0; 2790 2791 if (family == 0x15 && 2792 (model >= 0x2 && model < 0x20)) 2793 msr_info->data = 0x1E; 2794 } 2795 break; 2796 case MSR_F10H_DECFG: 2797 msr_info->data = svm->msr_decfg; 2798 break; 2799 default: 2800 return kvm_get_msr_common(vcpu, msr_info); 2801 } 2802 return 0; 2803 } 2804 2805 static int svm_complete_emulated_msr(struct kvm_vcpu *vcpu, int err) 2806 { 2807 struct vcpu_svm *svm = to_svm(vcpu); 2808 if (!sev_es_guest(svm->vcpu.kvm) || !err) 2809 return kvm_complete_insn_gp(&svm->vcpu, err); 2810 2811 ghcb_set_sw_exit_info_1(svm->ghcb, 1); 2812 ghcb_set_sw_exit_info_2(svm->ghcb, 2813 X86_TRAP_GP | 2814 SVM_EVTINJ_TYPE_EXEPT | 2815 SVM_EVTINJ_VALID); 2816 return 1; 2817 } 2818 2819 static int rdmsr_interception(struct vcpu_svm *svm) 2820 { 2821 return kvm_emulate_rdmsr(&svm->vcpu); 2822 } 2823 2824 static int svm_set_vm_cr(struct kvm_vcpu *vcpu, u64 data) 2825 { 2826 struct vcpu_svm *svm = to_svm(vcpu); 2827 int svm_dis, chg_mask; 2828 2829 if (data & ~SVM_VM_CR_VALID_MASK) 2830 return 1; 2831 2832 chg_mask = SVM_VM_CR_VALID_MASK; 2833 2834 if (svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK) 2835 chg_mask &= ~(SVM_VM_CR_SVM_LOCK_MASK | SVM_VM_CR_SVM_DIS_MASK); 2836 2837 svm->nested.vm_cr_msr &= ~chg_mask; 2838 svm->nested.vm_cr_msr |= (data & chg_mask); 2839 2840 svm_dis = svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK; 2841 2842 /* check for svm_disable while efer.svme is set */ 2843 if (svm_dis && (vcpu->arch.efer & EFER_SVME)) 2844 return 1; 2845 2846 return 0; 2847 } 2848 2849 static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) 2850 { 2851 struct vcpu_svm *svm = to_svm(vcpu); 2852 2853 u32 ecx = msr->index; 2854 u64 data = msr->data; 2855 switch (ecx) { 2856 case MSR_IA32_CR_PAT: 2857 if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data)) 2858 return 1; 2859 vcpu->arch.pat = data; 2860 svm->vmcb->save.g_pat = data; 2861 vmcb_mark_dirty(svm->vmcb, VMCB_NPT); 2862 break; 2863 case MSR_IA32_SPEC_CTRL: 2864 if (!msr->host_initiated && 2865 !guest_has_spec_ctrl_msr(vcpu)) 2866 return 1; 2867 2868 if (kvm_spec_ctrl_test_value(data)) 2869 return 1; 2870 2871 svm->spec_ctrl = data; 2872 if (!data) 2873 break; 2874 2875 /* 2876 * For non-nested: 2877 * When it's written (to non-zero) for the first time, pass 2878 * it through. 2879 * 2880 * For nested: 2881 * The handling of the MSR bitmap for L2 guests is done in 2882 * nested_svm_vmrun_msrpm. 2883 * We update the L1 MSR bit as well since it will end up 2884 * touching the MSR anyway now. 2885 */ 2886 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SPEC_CTRL, 1, 1); 2887 break; 2888 case MSR_IA32_PRED_CMD: 2889 if (!msr->host_initiated && 2890 !guest_has_pred_cmd_msr(vcpu)) 2891 return 1; 2892 2893 if (data & ~PRED_CMD_IBPB) 2894 return 1; 2895 if (!boot_cpu_has(X86_FEATURE_IBPB)) 2896 return 1; 2897 if (!data) 2898 break; 2899 2900 wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB); 2901 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_PRED_CMD, 0, 1); 2902 break; 2903 case MSR_AMD64_VIRT_SPEC_CTRL: 2904 if (!msr->host_initiated && 2905 !guest_cpuid_has(vcpu, X86_FEATURE_VIRT_SSBD)) 2906 return 1; 2907 2908 if (data & ~SPEC_CTRL_SSBD) 2909 return 1; 2910 2911 svm->virt_spec_ctrl = data; 2912 break; 2913 case MSR_STAR: 2914 svm->vmcb->save.star = data; 2915 break; 2916 #ifdef CONFIG_X86_64 2917 case MSR_LSTAR: 2918 svm->vmcb->save.lstar = data; 2919 break; 2920 case MSR_CSTAR: 2921 svm->vmcb->save.cstar = data; 2922 break; 2923 case MSR_KERNEL_GS_BASE: 2924 svm->vmcb->save.kernel_gs_base = data; 2925 break; 2926 case MSR_SYSCALL_MASK: 2927 svm->vmcb->save.sfmask = data; 2928 break; 2929 #endif 2930 case MSR_IA32_SYSENTER_CS: 2931 svm->vmcb->save.sysenter_cs = data; 2932 break; 2933 case MSR_IA32_SYSENTER_EIP: 2934 svm->sysenter_eip = data; 2935 svm->vmcb->save.sysenter_eip = data; 2936 break; 2937 case MSR_IA32_SYSENTER_ESP: 2938 svm->sysenter_esp = data; 2939 svm->vmcb->save.sysenter_esp = data; 2940 break; 2941 case MSR_TSC_AUX: 2942 if (!boot_cpu_has(X86_FEATURE_RDTSCP)) 2943 return 1; 2944 2945 /* 2946 * This is rare, so we update the MSR here instead of using 2947 * direct_access_msrs. Doing that would require a rdmsr in 2948 * svm_vcpu_put. 2949 */ 2950 svm->tsc_aux = data; 2951 wrmsrl(MSR_TSC_AUX, svm->tsc_aux); 2952 break; 2953 case MSR_IA32_DEBUGCTLMSR: 2954 if (!boot_cpu_has(X86_FEATURE_LBRV)) { 2955 vcpu_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTL 0x%llx, nop\n", 2956 __func__, data); 2957 break; 2958 } 2959 if (data & DEBUGCTL_RESERVED_BITS) 2960 return 1; 2961 2962 svm->vmcb->save.dbgctl = data; 2963 vmcb_mark_dirty(svm->vmcb, VMCB_LBR); 2964 if (data & (1ULL<<0)) 2965 svm_enable_lbrv(vcpu); 2966 else 2967 svm_disable_lbrv(vcpu); 2968 break; 2969 case MSR_VM_HSAVE_PA: 2970 svm->nested.hsave_msr = data; 2971 break; 2972 case MSR_VM_CR: 2973 return svm_set_vm_cr(vcpu, data); 2974 case MSR_VM_IGNNE: 2975 vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data); 2976 break; 2977 case MSR_F10H_DECFG: { 2978 struct kvm_msr_entry msr_entry; 2979 2980 msr_entry.index = msr->index; 2981 if (svm_get_msr_feature(&msr_entry)) 2982 return 1; 2983 2984 /* Check the supported bits */ 2985 if (data & ~msr_entry.data) 2986 return 1; 2987 2988 /* Don't allow the guest to change a bit, #GP */ 2989 if (!msr->host_initiated && (data ^ msr_entry.data)) 2990 return 1; 2991 2992 svm->msr_decfg = data; 2993 break; 2994 } 2995 case MSR_IA32_APICBASE: 2996 if (kvm_vcpu_apicv_active(vcpu)) 2997 avic_update_vapic_bar(to_svm(vcpu), data); 2998 fallthrough; 2999 default: 3000 return kvm_set_msr_common(vcpu, msr); 3001 } 3002 return 0; 3003 } 3004 3005 static int wrmsr_interception(struct vcpu_svm *svm) 3006 { 3007 return kvm_emulate_wrmsr(&svm->vcpu); 3008 } 3009 3010 static int msr_interception(struct vcpu_svm *svm) 3011 { 3012 if (svm->vmcb->control.exit_info_1) 3013 return wrmsr_interception(svm); 3014 else 3015 return rdmsr_interception(svm); 3016 } 3017 3018 static int interrupt_window_interception(struct vcpu_svm *svm) 3019 { 3020 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu); 3021 svm_clear_vintr(svm); 3022 3023 /* 3024 * For AVIC, the only reason to end up here is ExtINTs. 3025 * In this case AVIC was temporarily disabled for 3026 * requesting the IRQ window and we have to re-enable it. 3027 */ 3028 svm_toggle_avic_for_irq_window(&svm->vcpu, true); 3029 3030 ++svm->vcpu.stat.irq_window_exits; 3031 return 1; 3032 } 3033 3034 static int pause_interception(struct vcpu_svm *svm) 3035 { 3036 struct kvm_vcpu *vcpu = &svm->vcpu; 3037 bool in_kernel; 3038 3039 /* 3040 * CPL is not made available for an SEV-ES guest, therefore 3041 * vcpu->arch.preempted_in_kernel can never be true. Just 3042 * set in_kernel to false as well. 3043 */ 3044 in_kernel = !sev_es_guest(svm->vcpu.kvm) && svm_get_cpl(vcpu) == 0; 3045 3046 if (!kvm_pause_in_guest(vcpu->kvm)) 3047 grow_ple_window(vcpu); 3048 3049 kvm_vcpu_on_spin(vcpu, in_kernel); 3050 return 1; 3051 } 3052 3053 static int nop_interception(struct vcpu_svm *svm) 3054 { 3055 return kvm_skip_emulated_instruction(&(svm->vcpu)); 3056 } 3057 3058 static int monitor_interception(struct vcpu_svm *svm) 3059 { 3060 printk_once(KERN_WARNING "kvm: MONITOR instruction emulated as NOP!\n"); 3061 return nop_interception(svm); 3062 } 3063 3064 static int mwait_interception(struct vcpu_svm *svm) 3065 { 3066 printk_once(KERN_WARNING "kvm: MWAIT instruction emulated as NOP!\n"); 3067 return nop_interception(svm); 3068 } 3069 3070 static int invpcid_interception(struct vcpu_svm *svm) 3071 { 3072 struct kvm_vcpu *vcpu = &svm->vcpu; 3073 unsigned long type; 3074 gva_t gva; 3075 3076 if (!guest_cpuid_has(vcpu, X86_FEATURE_INVPCID)) { 3077 kvm_queue_exception(vcpu, UD_VECTOR); 3078 return 1; 3079 } 3080 3081 /* 3082 * For an INVPCID intercept: 3083 * EXITINFO1 provides the linear address of the memory operand. 3084 * EXITINFO2 provides the contents of the register operand. 3085 */ 3086 type = svm->vmcb->control.exit_info_2; 3087 gva = svm->vmcb->control.exit_info_1; 3088 3089 if (type > 3) { 3090 kvm_inject_gp(vcpu, 0); 3091 return 1; 3092 } 3093 3094 return kvm_handle_invpcid(vcpu, type, gva); 3095 } 3096 3097 static int (*const svm_exit_handlers[])(struct vcpu_svm *svm) = { 3098 [SVM_EXIT_READ_CR0] = cr_interception, 3099 [SVM_EXIT_READ_CR3] = cr_interception, 3100 [SVM_EXIT_READ_CR4] = cr_interception, 3101 [SVM_EXIT_READ_CR8] = cr_interception, 3102 [SVM_EXIT_CR0_SEL_WRITE] = cr_interception, 3103 [SVM_EXIT_WRITE_CR0] = cr_interception, 3104 [SVM_EXIT_WRITE_CR3] = cr_interception, 3105 [SVM_EXIT_WRITE_CR4] = cr_interception, 3106 [SVM_EXIT_WRITE_CR8] = cr8_write_interception, 3107 [SVM_EXIT_READ_DR0] = dr_interception, 3108 [SVM_EXIT_READ_DR1] = dr_interception, 3109 [SVM_EXIT_READ_DR2] = dr_interception, 3110 [SVM_EXIT_READ_DR3] = dr_interception, 3111 [SVM_EXIT_READ_DR4] = dr_interception, 3112 [SVM_EXIT_READ_DR5] = dr_interception, 3113 [SVM_EXIT_READ_DR6] = dr_interception, 3114 [SVM_EXIT_READ_DR7] = dr_interception, 3115 [SVM_EXIT_WRITE_DR0] = dr_interception, 3116 [SVM_EXIT_WRITE_DR1] = dr_interception, 3117 [SVM_EXIT_WRITE_DR2] = dr_interception, 3118 [SVM_EXIT_WRITE_DR3] = dr_interception, 3119 [SVM_EXIT_WRITE_DR4] = dr_interception, 3120 [SVM_EXIT_WRITE_DR5] = dr_interception, 3121 [SVM_EXIT_WRITE_DR6] = dr_interception, 3122 [SVM_EXIT_WRITE_DR7] = dr_interception, 3123 [SVM_EXIT_EXCP_BASE + DB_VECTOR] = db_interception, 3124 [SVM_EXIT_EXCP_BASE + BP_VECTOR] = bp_interception, 3125 [SVM_EXIT_EXCP_BASE + UD_VECTOR] = ud_interception, 3126 [SVM_EXIT_EXCP_BASE + PF_VECTOR] = pf_interception, 3127 [SVM_EXIT_EXCP_BASE + MC_VECTOR] = mc_interception, 3128 [SVM_EXIT_EXCP_BASE + AC_VECTOR] = ac_interception, 3129 [SVM_EXIT_EXCP_BASE + GP_VECTOR] = gp_interception, 3130 [SVM_EXIT_INTR] = intr_interception, 3131 [SVM_EXIT_NMI] = nmi_interception, 3132 [SVM_EXIT_SMI] = nop_on_interception, 3133 [SVM_EXIT_INIT] = nop_on_interception, 3134 [SVM_EXIT_VINTR] = interrupt_window_interception, 3135 [SVM_EXIT_RDPMC] = rdpmc_interception, 3136 [SVM_EXIT_CPUID] = cpuid_interception, 3137 [SVM_EXIT_IRET] = iret_interception, 3138 [SVM_EXIT_INVD] = invd_interception, 3139 [SVM_EXIT_PAUSE] = pause_interception, 3140 [SVM_EXIT_HLT] = halt_interception, 3141 [SVM_EXIT_INVLPG] = invlpg_interception, 3142 [SVM_EXIT_INVLPGA] = invlpga_interception, 3143 [SVM_EXIT_IOIO] = io_interception, 3144 [SVM_EXIT_MSR] = msr_interception, 3145 [SVM_EXIT_TASK_SWITCH] = task_switch_interception, 3146 [SVM_EXIT_SHUTDOWN] = shutdown_interception, 3147 [SVM_EXIT_VMRUN] = vmrun_interception, 3148 [SVM_EXIT_VMMCALL] = vmmcall_interception, 3149 [SVM_EXIT_VMLOAD] = vmload_interception, 3150 [SVM_EXIT_VMSAVE] = vmsave_interception, 3151 [SVM_EXIT_STGI] = stgi_interception, 3152 [SVM_EXIT_CLGI] = clgi_interception, 3153 [SVM_EXIT_SKINIT] = skinit_interception, 3154 [SVM_EXIT_WBINVD] = wbinvd_interception, 3155 [SVM_EXIT_MONITOR] = monitor_interception, 3156 [SVM_EXIT_MWAIT] = mwait_interception, 3157 [SVM_EXIT_XSETBV] = xsetbv_interception, 3158 [SVM_EXIT_RDPRU] = rdpru_interception, 3159 [SVM_EXIT_EFER_WRITE_TRAP] = efer_trap, 3160 [SVM_EXIT_CR0_WRITE_TRAP] = cr_trap, 3161 [SVM_EXIT_CR4_WRITE_TRAP] = cr_trap, 3162 [SVM_EXIT_CR8_WRITE_TRAP] = cr_trap, 3163 [SVM_EXIT_INVPCID] = invpcid_interception, 3164 [SVM_EXIT_NPF] = npf_interception, 3165 [SVM_EXIT_RSM] = rsm_interception, 3166 [SVM_EXIT_AVIC_INCOMPLETE_IPI] = avic_incomplete_ipi_interception, 3167 [SVM_EXIT_AVIC_UNACCELERATED_ACCESS] = avic_unaccelerated_access_interception, 3168 [SVM_EXIT_VMGEXIT] = sev_handle_vmgexit, 3169 }; 3170 3171 static void dump_vmcb(struct kvm_vcpu *vcpu) 3172 { 3173 struct vcpu_svm *svm = to_svm(vcpu); 3174 struct vmcb_control_area *control = &svm->vmcb->control; 3175 struct vmcb_save_area *save = &svm->vmcb->save; 3176 3177 if (!dump_invalid_vmcb) { 3178 pr_warn_ratelimited("set kvm_amd.dump_invalid_vmcb=1 to dump internal KVM state.\n"); 3179 return; 3180 } 3181 3182 pr_err("VMCB Control Area:\n"); 3183 pr_err("%-20s%04x\n", "cr_read:", control->intercepts[INTERCEPT_CR] & 0xffff); 3184 pr_err("%-20s%04x\n", "cr_write:", control->intercepts[INTERCEPT_CR] >> 16); 3185 pr_err("%-20s%04x\n", "dr_read:", control->intercepts[INTERCEPT_DR] & 0xffff); 3186 pr_err("%-20s%04x\n", "dr_write:", control->intercepts[INTERCEPT_DR] >> 16); 3187 pr_err("%-20s%08x\n", "exceptions:", control->intercepts[INTERCEPT_EXCEPTION]); 3188 pr_err("%-20s%08x %08x\n", "intercepts:", 3189 control->intercepts[INTERCEPT_WORD3], 3190 control->intercepts[INTERCEPT_WORD4]); 3191 pr_err("%-20s%d\n", "pause filter count:", control->pause_filter_count); 3192 pr_err("%-20s%d\n", "pause filter threshold:", 3193 control->pause_filter_thresh); 3194 pr_err("%-20s%016llx\n", "iopm_base_pa:", control->iopm_base_pa); 3195 pr_err("%-20s%016llx\n", "msrpm_base_pa:", control->msrpm_base_pa); 3196 pr_err("%-20s%016llx\n", "tsc_offset:", control->tsc_offset); 3197 pr_err("%-20s%d\n", "asid:", control->asid); 3198 pr_err("%-20s%d\n", "tlb_ctl:", control->tlb_ctl); 3199 pr_err("%-20s%08x\n", "int_ctl:", control->int_ctl); 3200 pr_err("%-20s%08x\n", "int_vector:", control->int_vector); 3201 pr_err("%-20s%08x\n", "int_state:", control->int_state); 3202 pr_err("%-20s%08x\n", "exit_code:", control->exit_code); 3203 pr_err("%-20s%016llx\n", "exit_info1:", control->exit_info_1); 3204 pr_err("%-20s%016llx\n", "exit_info2:", control->exit_info_2); 3205 pr_err("%-20s%08x\n", "exit_int_info:", control->exit_int_info); 3206 pr_err("%-20s%08x\n", "exit_int_info_err:", control->exit_int_info_err); 3207 pr_err("%-20s%lld\n", "nested_ctl:", control->nested_ctl); 3208 pr_err("%-20s%016llx\n", "nested_cr3:", control->nested_cr3); 3209 pr_err("%-20s%016llx\n", "avic_vapic_bar:", control->avic_vapic_bar); 3210 pr_err("%-20s%016llx\n", "ghcb:", control->ghcb_gpa); 3211 pr_err("%-20s%08x\n", "event_inj:", control->event_inj); 3212 pr_err("%-20s%08x\n", "event_inj_err:", control->event_inj_err); 3213 pr_err("%-20s%lld\n", "virt_ext:", control->virt_ext); 3214 pr_err("%-20s%016llx\n", "next_rip:", control->next_rip); 3215 pr_err("%-20s%016llx\n", "avic_backing_page:", control->avic_backing_page); 3216 pr_err("%-20s%016llx\n", "avic_logical_id:", control->avic_logical_id); 3217 pr_err("%-20s%016llx\n", "avic_physical_id:", control->avic_physical_id); 3218 pr_err("%-20s%016llx\n", "vmsa_pa:", control->vmsa_pa); 3219 pr_err("VMCB State Save Area:\n"); 3220 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", 3221 "es:", 3222 save->es.selector, save->es.attrib, 3223 save->es.limit, save->es.base); 3224 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", 3225 "cs:", 3226 save->cs.selector, save->cs.attrib, 3227 save->cs.limit, save->cs.base); 3228 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", 3229 "ss:", 3230 save->ss.selector, save->ss.attrib, 3231 save->ss.limit, save->ss.base); 3232 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", 3233 "ds:", 3234 save->ds.selector, save->ds.attrib, 3235 save->ds.limit, save->ds.base); 3236 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", 3237 "fs:", 3238 save->fs.selector, save->fs.attrib, 3239 save->fs.limit, save->fs.base); 3240 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", 3241 "gs:", 3242 save->gs.selector, save->gs.attrib, 3243 save->gs.limit, save->gs.base); 3244 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", 3245 "gdtr:", 3246 save->gdtr.selector, save->gdtr.attrib, 3247 save->gdtr.limit, save->gdtr.base); 3248 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", 3249 "ldtr:", 3250 save->ldtr.selector, save->ldtr.attrib, 3251 save->ldtr.limit, save->ldtr.base); 3252 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", 3253 "idtr:", 3254 save->idtr.selector, save->idtr.attrib, 3255 save->idtr.limit, save->idtr.base); 3256 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", 3257 "tr:", 3258 save->tr.selector, save->tr.attrib, 3259 save->tr.limit, save->tr.base); 3260 pr_err("cpl: %d efer: %016llx\n", 3261 save->cpl, save->efer); 3262 pr_err("%-15s %016llx %-13s %016llx\n", 3263 "cr0:", save->cr0, "cr2:", save->cr2); 3264 pr_err("%-15s %016llx %-13s %016llx\n", 3265 "cr3:", save->cr3, "cr4:", save->cr4); 3266 pr_err("%-15s %016llx %-13s %016llx\n", 3267 "dr6:", save->dr6, "dr7:", save->dr7); 3268 pr_err("%-15s %016llx %-13s %016llx\n", 3269 "rip:", save->rip, "rflags:", save->rflags); 3270 pr_err("%-15s %016llx %-13s %016llx\n", 3271 "rsp:", save->rsp, "rax:", save->rax); 3272 pr_err("%-15s %016llx %-13s %016llx\n", 3273 "star:", save->star, "lstar:", save->lstar); 3274 pr_err("%-15s %016llx %-13s %016llx\n", 3275 "cstar:", save->cstar, "sfmask:", save->sfmask); 3276 pr_err("%-15s %016llx %-13s %016llx\n", 3277 "kernel_gs_base:", save->kernel_gs_base, 3278 "sysenter_cs:", save->sysenter_cs); 3279 pr_err("%-15s %016llx %-13s %016llx\n", 3280 "sysenter_esp:", save->sysenter_esp, 3281 "sysenter_eip:", save->sysenter_eip); 3282 pr_err("%-15s %016llx %-13s %016llx\n", 3283 "gpat:", save->g_pat, "dbgctl:", save->dbgctl); 3284 pr_err("%-15s %016llx %-13s %016llx\n", 3285 "br_from:", save->br_from, "br_to:", save->br_to); 3286 pr_err("%-15s %016llx %-13s %016llx\n", 3287 "excp_from:", save->last_excp_from, 3288 "excp_to:", save->last_excp_to); 3289 } 3290 3291 static int svm_handle_invalid_exit(struct kvm_vcpu *vcpu, u64 exit_code) 3292 { 3293 if (exit_code < ARRAY_SIZE(svm_exit_handlers) && 3294 svm_exit_handlers[exit_code]) 3295 return 0; 3296 3297 vcpu_unimpl(vcpu, "svm: unexpected exit reason 0x%llx\n", exit_code); 3298 dump_vmcb(vcpu); 3299 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 3300 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON; 3301 vcpu->run->internal.ndata = 2; 3302 vcpu->run->internal.data[0] = exit_code; 3303 vcpu->run->internal.data[1] = vcpu->arch.last_vmentry_cpu; 3304 3305 return -EINVAL; 3306 } 3307 3308 int svm_invoke_exit_handler(struct vcpu_svm *svm, u64 exit_code) 3309 { 3310 if (svm_handle_invalid_exit(&svm->vcpu, exit_code)) 3311 return 0; 3312 3313 #ifdef CONFIG_RETPOLINE 3314 if (exit_code == SVM_EXIT_MSR) 3315 return msr_interception(svm); 3316 else if (exit_code == SVM_EXIT_VINTR) 3317 return interrupt_window_interception(svm); 3318 else if (exit_code == SVM_EXIT_INTR) 3319 return intr_interception(svm); 3320 else if (exit_code == SVM_EXIT_HLT) 3321 return halt_interception(svm); 3322 else if (exit_code == SVM_EXIT_NPF) 3323 return npf_interception(svm); 3324 #endif 3325 return svm_exit_handlers[exit_code](svm); 3326 } 3327 3328 static void svm_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2, 3329 u32 *intr_info, u32 *error_code) 3330 { 3331 struct vmcb_control_area *control = &to_svm(vcpu)->vmcb->control; 3332 3333 *info1 = control->exit_info_1; 3334 *info2 = control->exit_info_2; 3335 *intr_info = control->exit_int_info; 3336 if ((*intr_info & SVM_EXITINTINFO_VALID) && 3337 (*intr_info & SVM_EXITINTINFO_VALID_ERR)) 3338 *error_code = control->exit_int_info_err; 3339 else 3340 *error_code = 0; 3341 } 3342 3343 static int handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath) 3344 { 3345 struct vcpu_svm *svm = to_svm(vcpu); 3346 struct kvm_run *kvm_run = vcpu->run; 3347 u32 exit_code = svm->vmcb->control.exit_code; 3348 3349 trace_kvm_exit(exit_code, vcpu, KVM_ISA_SVM); 3350 3351 /* SEV-ES guests must use the CR write traps to track CR registers. */ 3352 if (!sev_es_guest(vcpu->kvm)) { 3353 if (!svm_is_intercept(svm, INTERCEPT_CR0_WRITE)) 3354 vcpu->arch.cr0 = svm->vmcb->save.cr0; 3355 if (npt_enabled) 3356 vcpu->arch.cr3 = svm->vmcb->save.cr3; 3357 } 3358 3359 if (is_guest_mode(vcpu)) { 3360 int vmexit; 3361 3362 trace_kvm_nested_vmexit(exit_code, vcpu, KVM_ISA_SVM); 3363 3364 vmexit = nested_svm_exit_special(svm); 3365 3366 if (vmexit == NESTED_EXIT_CONTINUE) 3367 vmexit = nested_svm_exit_handled(svm); 3368 3369 if (vmexit == NESTED_EXIT_DONE) 3370 return 1; 3371 } 3372 3373 if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) { 3374 kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY; 3375 kvm_run->fail_entry.hardware_entry_failure_reason 3376 = svm->vmcb->control.exit_code; 3377 kvm_run->fail_entry.cpu = vcpu->arch.last_vmentry_cpu; 3378 dump_vmcb(vcpu); 3379 return 0; 3380 } 3381 3382 if (is_external_interrupt(svm->vmcb->control.exit_int_info) && 3383 exit_code != SVM_EXIT_EXCP_BASE + PF_VECTOR && 3384 exit_code != SVM_EXIT_NPF && exit_code != SVM_EXIT_TASK_SWITCH && 3385 exit_code != SVM_EXIT_INTR && exit_code != SVM_EXIT_NMI) 3386 printk(KERN_ERR "%s: unexpected exit_int_info 0x%x " 3387 "exit_code 0x%x\n", 3388 __func__, svm->vmcb->control.exit_int_info, 3389 exit_code); 3390 3391 if (exit_fastpath != EXIT_FASTPATH_NONE) 3392 return 1; 3393 3394 return svm_invoke_exit_handler(svm, exit_code); 3395 } 3396 3397 static void reload_tss(struct kvm_vcpu *vcpu) 3398 { 3399 struct svm_cpu_data *sd = per_cpu(svm_data, vcpu->cpu); 3400 3401 sd->tss_desc->type = 9; /* available 32/64-bit TSS */ 3402 load_TR_desc(); 3403 } 3404 3405 static void pre_svm_run(struct vcpu_svm *svm) 3406 { 3407 struct svm_cpu_data *sd = per_cpu(svm_data, svm->vcpu.cpu); 3408 3409 if (sev_guest(svm->vcpu.kvm)) 3410 return pre_sev_run(svm, svm->vcpu.cpu); 3411 3412 /* FIXME: handle wraparound of asid_generation */ 3413 if (svm->asid_generation != sd->asid_generation) 3414 new_asid(svm, sd); 3415 } 3416 3417 static void svm_inject_nmi(struct kvm_vcpu *vcpu) 3418 { 3419 struct vcpu_svm *svm = to_svm(vcpu); 3420 3421 svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI; 3422 vcpu->arch.hflags |= HF_NMI_MASK; 3423 if (!sev_es_guest(svm->vcpu.kvm)) 3424 svm_set_intercept(svm, INTERCEPT_IRET); 3425 ++vcpu->stat.nmi_injections; 3426 } 3427 3428 static void svm_set_irq(struct kvm_vcpu *vcpu) 3429 { 3430 struct vcpu_svm *svm = to_svm(vcpu); 3431 3432 BUG_ON(!(gif_set(svm))); 3433 3434 trace_kvm_inj_virq(vcpu->arch.interrupt.nr); 3435 ++vcpu->stat.irq_injections; 3436 3437 svm->vmcb->control.event_inj = vcpu->arch.interrupt.nr | 3438 SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR; 3439 } 3440 3441 static void svm_update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr) 3442 { 3443 struct vcpu_svm *svm = to_svm(vcpu); 3444 3445 /* 3446 * SEV-ES guests must always keep the CR intercepts cleared. CR 3447 * tracking is done using the CR write traps. 3448 */ 3449 if (sev_es_guest(vcpu->kvm)) 3450 return; 3451 3452 if (nested_svm_virtualize_tpr(vcpu)) 3453 return; 3454 3455 svm_clr_intercept(svm, INTERCEPT_CR8_WRITE); 3456 3457 if (irr == -1) 3458 return; 3459 3460 if (tpr >= irr) 3461 svm_set_intercept(svm, INTERCEPT_CR8_WRITE); 3462 } 3463 3464 bool svm_nmi_blocked(struct kvm_vcpu *vcpu) 3465 { 3466 struct vcpu_svm *svm = to_svm(vcpu); 3467 struct vmcb *vmcb = svm->vmcb; 3468 bool ret; 3469 3470 if (!gif_set(svm)) 3471 return true; 3472 3473 if (is_guest_mode(vcpu) && nested_exit_on_nmi(svm)) 3474 return false; 3475 3476 ret = (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) || 3477 (svm->vcpu.arch.hflags & HF_NMI_MASK); 3478 3479 return ret; 3480 } 3481 3482 static int svm_nmi_allowed(struct kvm_vcpu *vcpu, bool for_injection) 3483 { 3484 struct vcpu_svm *svm = to_svm(vcpu); 3485 if (svm->nested.nested_run_pending) 3486 return -EBUSY; 3487 3488 /* An NMI must not be injected into L2 if it's supposed to VM-Exit. */ 3489 if (for_injection && is_guest_mode(vcpu) && nested_exit_on_nmi(svm)) 3490 return -EBUSY; 3491 3492 return !svm_nmi_blocked(vcpu); 3493 } 3494 3495 static bool svm_get_nmi_mask(struct kvm_vcpu *vcpu) 3496 { 3497 struct vcpu_svm *svm = to_svm(vcpu); 3498 3499 return !!(svm->vcpu.arch.hflags & HF_NMI_MASK); 3500 } 3501 3502 static void svm_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked) 3503 { 3504 struct vcpu_svm *svm = to_svm(vcpu); 3505 3506 if (masked) { 3507 svm->vcpu.arch.hflags |= HF_NMI_MASK; 3508 if (!sev_es_guest(svm->vcpu.kvm)) 3509 svm_set_intercept(svm, INTERCEPT_IRET); 3510 } else { 3511 svm->vcpu.arch.hflags &= ~HF_NMI_MASK; 3512 if (!sev_es_guest(svm->vcpu.kvm)) 3513 svm_clr_intercept(svm, INTERCEPT_IRET); 3514 } 3515 } 3516 3517 bool svm_interrupt_blocked(struct kvm_vcpu *vcpu) 3518 { 3519 struct vcpu_svm *svm = to_svm(vcpu); 3520 struct vmcb *vmcb = svm->vmcb; 3521 3522 if (!gif_set(svm)) 3523 return true; 3524 3525 if (sev_es_guest(svm->vcpu.kvm)) { 3526 /* 3527 * SEV-ES guests to not expose RFLAGS. Use the VMCB interrupt mask 3528 * bit to determine the state of the IF flag. 3529 */ 3530 if (!(vmcb->control.int_state & SVM_GUEST_INTERRUPT_MASK)) 3531 return true; 3532 } else if (is_guest_mode(vcpu)) { 3533 /* As long as interrupts are being delivered... */ 3534 if ((svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK) 3535 ? !(svm->nested.hsave->save.rflags & X86_EFLAGS_IF) 3536 : !(kvm_get_rflags(vcpu) & X86_EFLAGS_IF)) 3537 return true; 3538 3539 /* ... vmexits aren't blocked by the interrupt shadow */ 3540 if (nested_exit_on_intr(svm)) 3541 return false; 3542 } else { 3543 if (!(kvm_get_rflags(vcpu) & X86_EFLAGS_IF)) 3544 return true; 3545 } 3546 3547 return (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK); 3548 } 3549 3550 static int svm_interrupt_allowed(struct kvm_vcpu *vcpu, bool for_injection) 3551 { 3552 struct vcpu_svm *svm = to_svm(vcpu); 3553 if (svm->nested.nested_run_pending) 3554 return -EBUSY; 3555 3556 /* 3557 * An IRQ must not be injected into L2 if it's supposed to VM-Exit, 3558 * e.g. if the IRQ arrived asynchronously after checking nested events. 3559 */ 3560 if (for_injection && is_guest_mode(vcpu) && nested_exit_on_intr(svm)) 3561 return -EBUSY; 3562 3563 return !svm_interrupt_blocked(vcpu); 3564 } 3565 3566 static void svm_enable_irq_window(struct kvm_vcpu *vcpu) 3567 { 3568 struct vcpu_svm *svm = to_svm(vcpu); 3569 3570 /* 3571 * In case GIF=0 we can't rely on the CPU to tell us when GIF becomes 3572 * 1, because that's a separate STGI/VMRUN intercept. The next time we 3573 * get that intercept, this function will be called again though and 3574 * we'll get the vintr intercept. However, if the vGIF feature is 3575 * enabled, the STGI interception will not occur. Enable the irq 3576 * window under the assumption that the hardware will set the GIF. 3577 */ 3578 if (vgif_enabled(svm) || gif_set(svm)) { 3579 /* 3580 * IRQ window is not needed when AVIC is enabled, 3581 * unless we have pending ExtINT since it cannot be injected 3582 * via AVIC. In such case, we need to temporarily disable AVIC, 3583 * and fallback to injecting IRQ via V_IRQ. 3584 */ 3585 svm_toggle_avic_for_irq_window(vcpu, false); 3586 svm_set_vintr(svm); 3587 } 3588 } 3589 3590 static void svm_enable_nmi_window(struct kvm_vcpu *vcpu) 3591 { 3592 struct vcpu_svm *svm = to_svm(vcpu); 3593 3594 if ((svm->vcpu.arch.hflags & (HF_NMI_MASK | HF_IRET_MASK)) 3595 == HF_NMI_MASK) 3596 return; /* IRET will cause a vm exit */ 3597 3598 if (!gif_set(svm)) { 3599 if (vgif_enabled(svm)) 3600 svm_set_intercept(svm, INTERCEPT_STGI); 3601 return; /* STGI will cause a vm exit */ 3602 } 3603 3604 /* 3605 * Something prevents NMI from been injected. Single step over possible 3606 * problem (IRET or exception injection or interrupt shadow) 3607 */ 3608 svm->nmi_singlestep_guest_rflags = svm_get_rflags(vcpu); 3609 svm->nmi_singlestep = true; 3610 svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF); 3611 } 3612 3613 static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr) 3614 { 3615 return 0; 3616 } 3617 3618 static int svm_set_identity_map_addr(struct kvm *kvm, u64 ident_addr) 3619 { 3620 return 0; 3621 } 3622 3623 void svm_flush_tlb(struct kvm_vcpu *vcpu) 3624 { 3625 struct vcpu_svm *svm = to_svm(vcpu); 3626 3627 /* 3628 * Flush only the current ASID even if the TLB flush was invoked via 3629 * kvm_flush_remote_tlbs(). Although flushing remote TLBs requires all 3630 * ASIDs to be flushed, KVM uses a single ASID for L1 and L2, and 3631 * unconditionally does a TLB flush on both nested VM-Enter and nested 3632 * VM-Exit (via kvm_mmu_reset_context()). 3633 */ 3634 if (static_cpu_has(X86_FEATURE_FLUSHBYASID)) 3635 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID; 3636 else 3637 svm->asid_generation--; 3638 } 3639 3640 static void svm_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t gva) 3641 { 3642 struct vcpu_svm *svm = to_svm(vcpu); 3643 3644 invlpga(gva, svm->vmcb->control.asid); 3645 } 3646 3647 static inline void sync_cr8_to_lapic(struct kvm_vcpu *vcpu) 3648 { 3649 struct vcpu_svm *svm = to_svm(vcpu); 3650 3651 if (nested_svm_virtualize_tpr(vcpu)) 3652 return; 3653 3654 if (!svm_is_intercept(svm, INTERCEPT_CR8_WRITE)) { 3655 int cr8 = svm->vmcb->control.int_ctl & V_TPR_MASK; 3656 kvm_set_cr8(vcpu, cr8); 3657 } 3658 } 3659 3660 static inline void sync_lapic_to_cr8(struct kvm_vcpu *vcpu) 3661 { 3662 struct vcpu_svm *svm = to_svm(vcpu); 3663 u64 cr8; 3664 3665 if (nested_svm_virtualize_tpr(vcpu) || 3666 kvm_vcpu_apicv_active(vcpu)) 3667 return; 3668 3669 cr8 = kvm_get_cr8(vcpu); 3670 svm->vmcb->control.int_ctl &= ~V_TPR_MASK; 3671 svm->vmcb->control.int_ctl |= cr8 & V_TPR_MASK; 3672 } 3673 3674 static void svm_complete_interrupts(struct vcpu_svm *svm) 3675 { 3676 u8 vector; 3677 int type; 3678 u32 exitintinfo = svm->vmcb->control.exit_int_info; 3679 unsigned int3_injected = svm->int3_injected; 3680 3681 svm->int3_injected = 0; 3682 3683 /* 3684 * If we've made progress since setting HF_IRET_MASK, we've 3685 * executed an IRET and can allow NMI injection. 3686 */ 3687 if ((svm->vcpu.arch.hflags & HF_IRET_MASK) && 3688 (sev_es_guest(svm->vcpu.kvm) || 3689 kvm_rip_read(&svm->vcpu) != svm->nmi_iret_rip)) { 3690 svm->vcpu.arch.hflags &= ~(HF_NMI_MASK | HF_IRET_MASK); 3691 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu); 3692 } 3693 3694 svm->vcpu.arch.nmi_injected = false; 3695 kvm_clear_exception_queue(&svm->vcpu); 3696 kvm_clear_interrupt_queue(&svm->vcpu); 3697 3698 if (!(exitintinfo & SVM_EXITINTINFO_VALID)) 3699 return; 3700 3701 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu); 3702 3703 vector = exitintinfo & SVM_EXITINTINFO_VEC_MASK; 3704 type = exitintinfo & SVM_EXITINTINFO_TYPE_MASK; 3705 3706 switch (type) { 3707 case SVM_EXITINTINFO_TYPE_NMI: 3708 svm->vcpu.arch.nmi_injected = true; 3709 break; 3710 case SVM_EXITINTINFO_TYPE_EXEPT: 3711 /* 3712 * Never re-inject a #VC exception. 3713 */ 3714 if (vector == X86_TRAP_VC) 3715 break; 3716 3717 /* 3718 * In case of software exceptions, do not reinject the vector, 3719 * but re-execute the instruction instead. Rewind RIP first 3720 * if we emulated INT3 before. 3721 */ 3722 if (kvm_exception_is_soft(vector)) { 3723 if (vector == BP_VECTOR && int3_injected && 3724 kvm_is_linear_rip(&svm->vcpu, svm->int3_rip)) 3725 kvm_rip_write(&svm->vcpu, 3726 kvm_rip_read(&svm->vcpu) - 3727 int3_injected); 3728 break; 3729 } 3730 if (exitintinfo & SVM_EXITINTINFO_VALID_ERR) { 3731 u32 err = svm->vmcb->control.exit_int_info_err; 3732 kvm_requeue_exception_e(&svm->vcpu, vector, err); 3733 3734 } else 3735 kvm_requeue_exception(&svm->vcpu, vector); 3736 break; 3737 case SVM_EXITINTINFO_TYPE_INTR: 3738 kvm_queue_interrupt(&svm->vcpu, vector, false); 3739 break; 3740 default: 3741 break; 3742 } 3743 } 3744 3745 static void svm_cancel_injection(struct kvm_vcpu *vcpu) 3746 { 3747 struct vcpu_svm *svm = to_svm(vcpu); 3748 struct vmcb_control_area *control = &svm->vmcb->control; 3749 3750 control->exit_int_info = control->event_inj; 3751 control->exit_int_info_err = control->event_inj_err; 3752 control->event_inj = 0; 3753 svm_complete_interrupts(svm); 3754 } 3755 3756 static fastpath_t svm_exit_handlers_fastpath(struct kvm_vcpu *vcpu) 3757 { 3758 if (to_svm(vcpu)->vmcb->control.exit_code == SVM_EXIT_MSR && 3759 to_svm(vcpu)->vmcb->control.exit_info_1) 3760 return handle_fastpath_set_msr_irqoff(vcpu); 3761 3762 return EXIT_FASTPATH_NONE; 3763 } 3764 3765 static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu, 3766 struct vcpu_svm *svm) 3767 { 3768 /* 3769 * VMENTER enables interrupts (host state), but the kernel state is 3770 * interrupts disabled when this is invoked. Also tell RCU about 3771 * it. This is the same logic as for exit_to_user_mode(). 3772 * 3773 * This ensures that e.g. latency analysis on the host observes 3774 * guest mode as interrupt enabled. 3775 * 3776 * guest_enter_irqoff() informs context tracking about the 3777 * transition to guest mode and if enabled adjusts RCU state 3778 * accordingly. 3779 */ 3780 instrumentation_begin(); 3781 trace_hardirqs_on_prepare(); 3782 lockdep_hardirqs_on_prepare(CALLER_ADDR0); 3783 instrumentation_end(); 3784 3785 guest_enter_irqoff(); 3786 lockdep_hardirqs_on(CALLER_ADDR0); 3787 3788 if (sev_es_guest(svm->vcpu.kvm)) { 3789 __svm_sev_es_vcpu_run(svm->vmcb_pa); 3790 } else { 3791 struct svm_cpu_data *sd = per_cpu(svm_data, vcpu->cpu); 3792 3793 __svm_vcpu_run(svm->vmcb_pa, (unsigned long *)&svm->vcpu.arch.regs); 3794 3795 vmload(__sme_page_pa(sd->save_area)); 3796 } 3797 3798 /* 3799 * VMEXIT disables interrupts (host state), but tracing and lockdep 3800 * have them in state 'on' as recorded before entering guest mode. 3801 * Same as enter_from_user_mode(). 3802 * 3803 * guest_exit_irqoff() restores host context and reinstates RCU if 3804 * enabled and required. 3805 * 3806 * This needs to be done before the below as native_read_msr() 3807 * contains a tracepoint and x86_spec_ctrl_restore_host() calls 3808 * into world and some more. 3809 */ 3810 lockdep_hardirqs_off(CALLER_ADDR0); 3811 guest_exit_irqoff(); 3812 3813 instrumentation_begin(); 3814 trace_hardirqs_off_finish(); 3815 instrumentation_end(); 3816 } 3817 3818 static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu) 3819 { 3820 struct vcpu_svm *svm = to_svm(vcpu); 3821 3822 trace_kvm_entry(vcpu); 3823 3824 svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX]; 3825 svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP]; 3826 svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP]; 3827 3828 /* 3829 * Disable singlestep if we're injecting an interrupt/exception. 3830 * We don't want our modified rflags to be pushed on the stack where 3831 * we might not be able to easily reset them if we disabled NMI 3832 * singlestep later. 3833 */ 3834 if (svm->nmi_singlestep && svm->vmcb->control.event_inj) { 3835 /* 3836 * Event injection happens before external interrupts cause a 3837 * vmexit and interrupts are disabled here, so smp_send_reschedule 3838 * is enough to force an immediate vmexit. 3839 */ 3840 disable_nmi_singlestep(svm); 3841 smp_send_reschedule(vcpu->cpu); 3842 } 3843 3844 pre_svm_run(svm); 3845 3846 sync_lapic_to_cr8(vcpu); 3847 3848 if (unlikely(svm->asid != svm->vmcb->control.asid)) { 3849 svm->vmcb->control.asid = svm->asid; 3850 vmcb_mark_dirty(svm->vmcb, VMCB_ASID); 3851 } 3852 svm->vmcb->save.cr2 = vcpu->arch.cr2; 3853 3854 /* 3855 * Run with all-zero DR6 unless needed, so that we can get the exact cause 3856 * of a #DB. 3857 */ 3858 if (unlikely(svm->vcpu.arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)) 3859 svm_set_dr6(svm, vcpu->arch.dr6); 3860 else 3861 svm_set_dr6(svm, DR6_ACTIVE_LOW); 3862 3863 clgi(); 3864 kvm_load_guest_xsave_state(vcpu); 3865 3866 kvm_wait_lapic_expire(vcpu); 3867 3868 /* 3869 * If this vCPU has touched SPEC_CTRL, restore the guest's value if 3870 * it's non-zero. Since vmentry is serialising on affected CPUs, there 3871 * is no need to worry about the conditional branch over the wrmsr 3872 * being speculatively taken. 3873 */ 3874 x86_spec_ctrl_set_guest(svm->spec_ctrl, svm->virt_spec_ctrl); 3875 3876 svm_vcpu_enter_exit(vcpu, svm); 3877 3878 /* 3879 * We do not use IBRS in the kernel. If this vCPU has used the 3880 * SPEC_CTRL MSR it may have left it on; save the value and 3881 * turn it off. This is much more efficient than blindly adding 3882 * it to the atomic save/restore list. Especially as the former 3883 * (Saving guest MSRs on vmexit) doesn't even exist in KVM. 3884 * 3885 * For non-nested case: 3886 * If the L01 MSR bitmap does not intercept the MSR, then we need to 3887 * save it. 3888 * 3889 * For nested case: 3890 * If the L02 MSR bitmap does not intercept the MSR, then we need to 3891 * save it. 3892 */ 3893 if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL))) 3894 svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL); 3895 3896 if (!sev_es_guest(svm->vcpu.kvm)) 3897 reload_tss(vcpu); 3898 3899 x86_spec_ctrl_restore_host(svm->spec_ctrl, svm->virt_spec_ctrl); 3900 3901 if (!sev_es_guest(svm->vcpu.kvm)) { 3902 vcpu->arch.cr2 = svm->vmcb->save.cr2; 3903 vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax; 3904 vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp; 3905 vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip; 3906 } 3907 3908 if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI)) 3909 kvm_before_interrupt(&svm->vcpu); 3910 3911 kvm_load_host_xsave_state(vcpu); 3912 stgi(); 3913 3914 /* Any pending NMI will happen here */ 3915 3916 if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI)) 3917 kvm_after_interrupt(&svm->vcpu); 3918 3919 sync_cr8_to_lapic(vcpu); 3920 3921 svm->next_rip = 0; 3922 if (is_guest_mode(&svm->vcpu)) { 3923 sync_nested_vmcb_control(svm); 3924 svm->nested.nested_run_pending = 0; 3925 } 3926 3927 svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING; 3928 vmcb_mark_all_clean(svm->vmcb); 3929 3930 /* if exit due to PF check for async PF */ 3931 if (svm->vmcb->control.exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR) 3932 svm->vcpu.arch.apf.host_apf_flags = 3933 kvm_read_and_reset_apf_flags(); 3934 3935 if (npt_enabled) { 3936 vcpu->arch.regs_avail &= ~(1 << VCPU_EXREG_PDPTR); 3937 vcpu->arch.regs_dirty &= ~(1 << VCPU_EXREG_PDPTR); 3938 } 3939 3940 /* 3941 * We need to handle MC intercepts here before the vcpu has a chance to 3942 * change the physical cpu 3943 */ 3944 if (unlikely(svm->vmcb->control.exit_code == 3945 SVM_EXIT_EXCP_BASE + MC_VECTOR)) 3946 svm_handle_mce(svm); 3947 3948 svm_complete_interrupts(svm); 3949 3950 if (is_guest_mode(vcpu)) 3951 return EXIT_FASTPATH_NONE; 3952 3953 return svm_exit_handlers_fastpath(vcpu); 3954 } 3955 3956 static void svm_load_mmu_pgd(struct kvm_vcpu *vcpu, unsigned long root, 3957 int root_level) 3958 { 3959 struct vcpu_svm *svm = to_svm(vcpu); 3960 unsigned long cr3; 3961 3962 cr3 = __sme_set(root); 3963 if (npt_enabled) { 3964 svm->vmcb->control.nested_cr3 = cr3; 3965 vmcb_mark_dirty(svm->vmcb, VMCB_NPT); 3966 3967 /* Loading L2's CR3 is handled by enter_svm_guest_mode. */ 3968 if (!test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail)) 3969 return; 3970 cr3 = vcpu->arch.cr3; 3971 } 3972 3973 svm->vmcb->save.cr3 = cr3; 3974 vmcb_mark_dirty(svm->vmcb, VMCB_CR); 3975 } 3976 3977 static int is_disabled(void) 3978 { 3979 u64 vm_cr; 3980 3981 rdmsrl(MSR_VM_CR, vm_cr); 3982 if (vm_cr & (1 << SVM_VM_CR_SVM_DISABLE)) 3983 return 1; 3984 3985 return 0; 3986 } 3987 3988 static void 3989 svm_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall) 3990 { 3991 /* 3992 * Patch in the VMMCALL instruction: 3993 */ 3994 hypercall[0] = 0x0f; 3995 hypercall[1] = 0x01; 3996 hypercall[2] = 0xd9; 3997 } 3998 3999 static int __init svm_check_processor_compat(void) 4000 { 4001 return 0; 4002 } 4003 4004 static bool svm_cpu_has_accelerated_tpr(void) 4005 { 4006 return false; 4007 } 4008 4009 /* 4010 * The kvm parameter can be NULL (module initialization, or invocation before 4011 * VM creation). Be sure to check the kvm parameter before using it. 4012 */ 4013 static bool svm_has_emulated_msr(struct kvm *kvm, u32 index) 4014 { 4015 switch (index) { 4016 case MSR_IA32_MCG_EXT_CTL: 4017 case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC: 4018 return false; 4019 case MSR_IA32_SMBASE: 4020 /* SEV-ES guests do not support SMM, so report false */ 4021 if (kvm && sev_es_guest(kvm)) 4022 return false; 4023 break; 4024 default: 4025 break; 4026 } 4027 4028 return true; 4029 } 4030 4031 static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio) 4032 { 4033 return 0; 4034 } 4035 4036 static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu) 4037 { 4038 struct vcpu_svm *svm = to_svm(vcpu); 4039 struct kvm_cpuid_entry2 *best; 4040 4041 vcpu->arch.xsaves_enabled = guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) && 4042 boot_cpu_has(X86_FEATURE_XSAVE) && 4043 boot_cpu_has(X86_FEATURE_XSAVES); 4044 4045 /* Update nrips enabled cache */ 4046 svm->nrips_enabled = kvm_cpu_cap_has(X86_FEATURE_NRIPS) && 4047 guest_cpuid_has(&svm->vcpu, X86_FEATURE_NRIPS); 4048 4049 /* Check again if INVPCID interception if required */ 4050 svm_check_invpcid(svm); 4051 4052 /* For sev guests, the memory encryption bit is not reserved in CR3. */ 4053 if (sev_guest(vcpu->kvm)) { 4054 best = kvm_find_cpuid_entry(vcpu, 0x8000001F, 0); 4055 if (best) 4056 vcpu->arch.reserved_gpa_bits &= ~(1UL << (best->ebx & 0x3f)); 4057 } 4058 4059 if (!kvm_vcpu_apicv_active(vcpu)) 4060 return; 4061 4062 /* 4063 * AVIC does not work with an x2APIC mode guest. If the X2APIC feature 4064 * is exposed to the guest, disable AVIC. 4065 */ 4066 if (guest_cpuid_has(vcpu, X86_FEATURE_X2APIC)) 4067 kvm_request_apicv_update(vcpu->kvm, false, 4068 APICV_INHIBIT_REASON_X2APIC); 4069 4070 /* 4071 * Currently, AVIC does not work with nested virtualization. 4072 * So, we disable AVIC when cpuid for SVM is set in the L1 guest. 4073 */ 4074 if (nested && guest_cpuid_has(vcpu, X86_FEATURE_SVM)) 4075 kvm_request_apicv_update(vcpu->kvm, false, 4076 APICV_INHIBIT_REASON_NESTED); 4077 } 4078 4079 static bool svm_has_wbinvd_exit(void) 4080 { 4081 return true; 4082 } 4083 4084 #define PRE_EX(exit) { .exit_code = (exit), \ 4085 .stage = X86_ICPT_PRE_EXCEPT, } 4086 #define POST_EX(exit) { .exit_code = (exit), \ 4087 .stage = X86_ICPT_POST_EXCEPT, } 4088 #define POST_MEM(exit) { .exit_code = (exit), \ 4089 .stage = X86_ICPT_POST_MEMACCESS, } 4090 4091 static const struct __x86_intercept { 4092 u32 exit_code; 4093 enum x86_intercept_stage stage; 4094 } x86_intercept_map[] = { 4095 [x86_intercept_cr_read] = POST_EX(SVM_EXIT_READ_CR0), 4096 [x86_intercept_cr_write] = POST_EX(SVM_EXIT_WRITE_CR0), 4097 [x86_intercept_clts] = POST_EX(SVM_EXIT_WRITE_CR0), 4098 [x86_intercept_lmsw] = POST_EX(SVM_EXIT_WRITE_CR0), 4099 [x86_intercept_smsw] = POST_EX(SVM_EXIT_READ_CR0), 4100 [x86_intercept_dr_read] = POST_EX(SVM_EXIT_READ_DR0), 4101 [x86_intercept_dr_write] = POST_EX(SVM_EXIT_WRITE_DR0), 4102 [x86_intercept_sldt] = POST_EX(SVM_EXIT_LDTR_READ), 4103 [x86_intercept_str] = POST_EX(SVM_EXIT_TR_READ), 4104 [x86_intercept_lldt] = POST_EX(SVM_EXIT_LDTR_WRITE), 4105 [x86_intercept_ltr] = POST_EX(SVM_EXIT_TR_WRITE), 4106 [x86_intercept_sgdt] = POST_EX(SVM_EXIT_GDTR_READ), 4107 [x86_intercept_sidt] = POST_EX(SVM_EXIT_IDTR_READ), 4108 [x86_intercept_lgdt] = POST_EX(SVM_EXIT_GDTR_WRITE), 4109 [x86_intercept_lidt] = POST_EX(SVM_EXIT_IDTR_WRITE), 4110 [x86_intercept_vmrun] = POST_EX(SVM_EXIT_VMRUN), 4111 [x86_intercept_vmmcall] = POST_EX(SVM_EXIT_VMMCALL), 4112 [x86_intercept_vmload] = POST_EX(SVM_EXIT_VMLOAD), 4113 [x86_intercept_vmsave] = POST_EX(SVM_EXIT_VMSAVE), 4114 [x86_intercept_stgi] = POST_EX(SVM_EXIT_STGI), 4115 [x86_intercept_clgi] = POST_EX(SVM_EXIT_CLGI), 4116 [x86_intercept_skinit] = POST_EX(SVM_EXIT_SKINIT), 4117 [x86_intercept_invlpga] = POST_EX(SVM_EXIT_INVLPGA), 4118 [x86_intercept_rdtscp] = POST_EX(SVM_EXIT_RDTSCP), 4119 [x86_intercept_monitor] = POST_MEM(SVM_EXIT_MONITOR), 4120 [x86_intercept_mwait] = POST_EX(SVM_EXIT_MWAIT), 4121 [x86_intercept_invlpg] = POST_EX(SVM_EXIT_INVLPG), 4122 [x86_intercept_invd] = POST_EX(SVM_EXIT_INVD), 4123 [x86_intercept_wbinvd] = POST_EX(SVM_EXIT_WBINVD), 4124 [x86_intercept_wrmsr] = POST_EX(SVM_EXIT_MSR), 4125 [x86_intercept_rdtsc] = POST_EX(SVM_EXIT_RDTSC), 4126 [x86_intercept_rdmsr] = POST_EX(SVM_EXIT_MSR), 4127 [x86_intercept_rdpmc] = POST_EX(SVM_EXIT_RDPMC), 4128 [x86_intercept_cpuid] = PRE_EX(SVM_EXIT_CPUID), 4129 [x86_intercept_rsm] = PRE_EX(SVM_EXIT_RSM), 4130 [x86_intercept_pause] = PRE_EX(SVM_EXIT_PAUSE), 4131 [x86_intercept_pushf] = PRE_EX(SVM_EXIT_PUSHF), 4132 [x86_intercept_popf] = PRE_EX(SVM_EXIT_POPF), 4133 [x86_intercept_intn] = PRE_EX(SVM_EXIT_SWINT), 4134 [x86_intercept_iret] = PRE_EX(SVM_EXIT_IRET), 4135 [x86_intercept_icebp] = PRE_EX(SVM_EXIT_ICEBP), 4136 [x86_intercept_hlt] = POST_EX(SVM_EXIT_HLT), 4137 [x86_intercept_in] = POST_EX(SVM_EXIT_IOIO), 4138 [x86_intercept_ins] = POST_EX(SVM_EXIT_IOIO), 4139 [x86_intercept_out] = POST_EX(SVM_EXIT_IOIO), 4140 [x86_intercept_outs] = POST_EX(SVM_EXIT_IOIO), 4141 [x86_intercept_xsetbv] = PRE_EX(SVM_EXIT_XSETBV), 4142 }; 4143 4144 #undef PRE_EX 4145 #undef POST_EX 4146 #undef POST_MEM 4147 4148 static int svm_check_intercept(struct kvm_vcpu *vcpu, 4149 struct x86_instruction_info *info, 4150 enum x86_intercept_stage stage, 4151 struct x86_exception *exception) 4152 { 4153 struct vcpu_svm *svm = to_svm(vcpu); 4154 int vmexit, ret = X86EMUL_CONTINUE; 4155 struct __x86_intercept icpt_info; 4156 struct vmcb *vmcb = svm->vmcb; 4157 4158 if (info->intercept >= ARRAY_SIZE(x86_intercept_map)) 4159 goto out; 4160 4161 icpt_info = x86_intercept_map[info->intercept]; 4162 4163 if (stage != icpt_info.stage) 4164 goto out; 4165 4166 switch (icpt_info.exit_code) { 4167 case SVM_EXIT_READ_CR0: 4168 if (info->intercept == x86_intercept_cr_read) 4169 icpt_info.exit_code += info->modrm_reg; 4170 break; 4171 case SVM_EXIT_WRITE_CR0: { 4172 unsigned long cr0, val; 4173 4174 if (info->intercept == x86_intercept_cr_write) 4175 icpt_info.exit_code += info->modrm_reg; 4176 4177 if (icpt_info.exit_code != SVM_EXIT_WRITE_CR0 || 4178 info->intercept == x86_intercept_clts) 4179 break; 4180 4181 if (!(vmcb_is_intercept(&svm->nested.ctl, 4182 INTERCEPT_SELECTIVE_CR0))) 4183 break; 4184 4185 cr0 = vcpu->arch.cr0 & ~SVM_CR0_SELECTIVE_MASK; 4186 val = info->src_val & ~SVM_CR0_SELECTIVE_MASK; 4187 4188 if (info->intercept == x86_intercept_lmsw) { 4189 cr0 &= 0xfUL; 4190 val &= 0xfUL; 4191 /* lmsw can't clear PE - catch this here */ 4192 if (cr0 & X86_CR0_PE) 4193 val |= X86_CR0_PE; 4194 } 4195 4196 if (cr0 ^ val) 4197 icpt_info.exit_code = SVM_EXIT_CR0_SEL_WRITE; 4198 4199 break; 4200 } 4201 case SVM_EXIT_READ_DR0: 4202 case SVM_EXIT_WRITE_DR0: 4203 icpt_info.exit_code += info->modrm_reg; 4204 break; 4205 case SVM_EXIT_MSR: 4206 if (info->intercept == x86_intercept_wrmsr) 4207 vmcb->control.exit_info_1 = 1; 4208 else 4209 vmcb->control.exit_info_1 = 0; 4210 break; 4211 case SVM_EXIT_PAUSE: 4212 /* 4213 * We get this for NOP only, but pause 4214 * is rep not, check this here 4215 */ 4216 if (info->rep_prefix != REPE_PREFIX) 4217 goto out; 4218 break; 4219 case SVM_EXIT_IOIO: { 4220 u64 exit_info; 4221 u32 bytes; 4222 4223 if (info->intercept == x86_intercept_in || 4224 info->intercept == x86_intercept_ins) { 4225 exit_info = ((info->src_val & 0xffff) << 16) | 4226 SVM_IOIO_TYPE_MASK; 4227 bytes = info->dst_bytes; 4228 } else { 4229 exit_info = (info->dst_val & 0xffff) << 16; 4230 bytes = info->src_bytes; 4231 } 4232 4233 if (info->intercept == x86_intercept_outs || 4234 info->intercept == x86_intercept_ins) 4235 exit_info |= SVM_IOIO_STR_MASK; 4236 4237 if (info->rep_prefix) 4238 exit_info |= SVM_IOIO_REP_MASK; 4239 4240 bytes = min(bytes, 4u); 4241 4242 exit_info |= bytes << SVM_IOIO_SIZE_SHIFT; 4243 4244 exit_info |= (u32)info->ad_bytes << (SVM_IOIO_ASIZE_SHIFT - 1); 4245 4246 vmcb->control.exit_info_1 = exit_info; 4247 vmcb->control.exit_info_2 = info->next_rip; 4248 4249 break; 4250 } 4251 default: 4252 break; 4253 } 4254 4255 /* TODO: Advertise NRIPS to guest hypervisor unconditionally */ 4256 if (static_cpu_has(X86_FEATURE_NRIPS)) 4257 vmcb->control.next_rip = info->next_rip; 4258 vmcb->control.exit_code = icpt_info.exit_code; 4259 vmexit = nested_svm_exit_handled(svm); 4260 4261 ret = (vmexit == NESTED_EXIT_DONE) ? X86EMUL_INTERCEPTED 4262 : X86EMUL_CONTINUE; 4263 4264 out: 4265 return ret; 4266 } 4267 4268 static void svm_handle_exit_irqoff(struct kvm_vcpu *vcpu) 4269 { 4270 } 4271 4272 static void svm_sched_in(struct kvm_vcpu *vcpu, int cpu) 4273 { 4274 if (!kvm_pause_in_guest(vcpu->kvm)) 4275 shrink_ple_window(vcpu); 4276 } 4277 4278 static void svm_setup_mce(struct kvm_vcpu *vcpu) 4279 { 4280 /* [63:9] are reserved. */ 4281 vcpu->arch.mcg_cap &= 0x1ff; 4282 } 4283 4284 bool svm_smi_blocked(struct kvm_vcpu *vcpu) 4285 { 4286 struct vcpu_svm *svm = to_svm(vcpu); 4287 4288 /* Per APM Vol.2 15.22.2 "Response to SMI" */ 4289 if (!gif_set(svm)) 4290 return true; 4291 4292 return is_smm(vcpu); 4293 } 4294 4295 static int svm_smi_allowed(struct kvm_vcpu *vcpu, bool for_injection) 4296 { 4297 struct vcpu_svm *svm = to_svm(vcpu); 4298 if (svm->nested.nested_run_pending) 4299 return -EBUSY; 4300 4301 /* An SMI must not be injected into L2 if it's supposed to VM-Exit. */ 4302 if (for_injection && is_guest_mode(vcpu) && nested_exit_on_smi(svm)) 4303 return -EBUSY; 4304 4305 return !svm_smi_blocked(vcpu); 4306 } 4307 4308 static int svm_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate) 4309 { 4310 struct vcpu_svm *svm = to_svm(vcpu); 4311 int ret; 4312 4313 if (is_guest_mode(vcpu)) { 4314 /* FED8h - SVM Guest */ 4315 put_smstate(u64, smstate, 0x7ed8, 1); 4316 /* FEE0h - SVM Guest VMCB Physical Address */ 4317 put_smstate(u64, smstate, 0x7ee0, svm->nested.vmcb12_gpa); 4318 4319 svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX]; 4320 svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP]; 4321 svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP]; 4322 4323 ret = nested_svm_vmexit(svm); 4324 if (ret) 4325 return ret; 4326 } 4327 return 0; 4328 } 4329 4330 static int svm_pre_leave_smm(struct kvm_vcpu *vcpu, const char *smstate) 4331 { 4332 struct vcpu_svm *svm = to_svm(vcpu); 4333 struct kvm_host_map map; 4334 int ret = 0; 4335 4336 if (guest_cpuid_has(vcpu, X86_FEATURE_LM)) { 4337 u64 saved_efer = GET_SMSTATE(u64, smstate, 0x7ed0); 4338 u64 guest = GET_SMSTATE(u64, smstate, 0x7ed8); 4339 u64 vmcb12_gpa = GET_SMSTATE(u64, smstate, 0x7ee0); 4340 4341 if (guest) { 4342 if (!guest_cpuid_has(vcpu, X86_FEATURE_SVM)) 4343 return 1; 4344 4345 if (!(saved_efer & EFER_SVME)) 4346 return 1; 4347 4348 if (kvm_vcpu_map(&svm->vcpu, 4349 gpa_to_gfn(vmcb12_gpa), &map) == -EINVAL) 4350 return 1; 4351 4352 if (svm_allocate_nested(svm)) 4353 return 1; 4354 4355 ret = enter_svm_guest_mode(svm, vmcb12_gpa, map.hva); 4356 kvm_vcpu_unmap(&svm->vcpu, &map, true); 4357 } 4358 } 4359 4360 return ret; 4361 } 4362 4363 static void svm_enable_smi_window(struct kvm_vcpu *vcpu) 4364 { 4365 struct vcpu_svm *svm = to_svm(vcpu); 4366 4367 if (!gif_set(svm)) { 4368 if (vgif_enabled(svm)) 4369 svm_set_intercept(svm, INTERCEPT_STGI); 4370 /* STGI will cause a vm exit */ 4371 } else { 4372 /* We must be in SMM; RSM will cause a vmexit anyway. */ 4373 } 4374 } 4375 4376 static bool svm_can_emulate_instruction(struct kvm_vcpu *vcpu, void *insn, int insn_len) 4377 { 4378 bool smep, smap, is_user; 4379 unsigned long cr4; 4380 4381 /* 4382 * When the guest is an SEV-ES guest, emulation is not possible. 4383 */ 4384 if (sev_es_guest(vcpu->kvm)) 4385 return false; 4386 4387 /* 4388 * Detect and workaround Errata 1096 Fam_17h_00_0Fh. 4389 * 4390 * Errata: 4391 * When CPU raise #NPF on guest data access and vCPU CR4.SMAP=1, it is 4392 * possible that CPU microcode implementing DecodeAssist will fail 4393 * to read bytes of instruction which caused #NPF. In this case, 4394 * GuestIntrBytes field of the VMCB on a VMEXIT will incorrectly 4395 * return 0 instead of the correct guest instruction bytes. 4396 * 4397 * This happens because CPU microcode reading instruction bytes 4398 * uses a special opcode which attempts to read data using CPL=0 4399 * priviledges. The microcode reads CS:RIP and if it hits a SMAP 4400 * fault, it gives up and returns no instruction bytes. 4401 * 4402 * Detection: 4403 * We reach here in case CPU supports DecodeAssist, raised #NPF and 4404 * returned 0 in GuestIntrBytes field of the VMCB. 4405 * First, errata can only be triggered in case vCPU CR4.SMAP=1. 4406 * Second, if vCPU CR4.SMEP=1, errata could only be triggered 4407 * in case vCPU CPL==3 (Because otherwise guest would have triggered 4408 * a SMEP fault instead of #NPF). 4409 * Otherwise, vCPU CR4.SMEP=0, errata could be triggered by any vCPU CPL. 4410 * As most guests enable SMAP if they have also enabled SMEP, use above 4411 * logic in order to attempt minimize false-positive of detecting errata 4412 * while still preserving all cases semantic correctness. 4413 * 4414 * Workaround: 4415 * To determine what instruction the guest was executing, the hypervisor 4416 * will have to decode the instruction at the instruction pointer. 4417 * 4418 * In non SEV guest, hypervisor will be able to read the guest 4419 * memory to decode the instruction pointer when insn_len is zero 4420 * so we return true to indicate that decoding is possible. 4421 * 4422 * But in the SEV guest, the guest memory is encrypted with the 4423 * guest specific key and hypervisor will not be able to decode the 4424 * instruction pointer so we will not able to workaround it. Lets 4425 * print the error and request to kill the guest. 4426 */ 4427 if (likely(!insn || insn_len)) 4428 return true; 4429 4430 /* 4431 * If RIP is invalid, go ahead with emulation which will cause an 4432 * internal error exit. 4433 */ 4434 if (!kvm_vcpu_gfn_to_memslot(vcpu, kvm_rip_read(vcpu) >> PAGE_SHIFT)) 4435 return true; 4436 4437 cr4 = kvm_read_cr4(vcpu); 4438 smep = cr4 & X86_CR4_SMEP; 4439 smap = cr4 & X86_CR4_SMAP; 4440 is_user = svm_get_cpl(vcpu) == 3; 4441 if (smap && (!smep || is_user)) { 4442 if (!sev_guest(vcpu->kvm)) 4443 return true; 4444 4445 pr_err_ratelimited("KVM: SEV Guest triggered AMD Erratum 1096\n"); 4446 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); 4447 } 4448 4449 return false; 4450 } 4451 4452 static bool svm_apic_init_signal_blocked(struct kvm_vcpu *vcpu) 4453 { 4454 struct vcpu_svm *svm = to_svm(vcpu); 4455 4456 /* 4457 * TODO: Last condition latch INIT signals on vCPU when 4458 * vCPU is in guest-mode and vmcb12 defines intercept on INIT. 4459 * To properly emulate the INIT intercept, 4460 * svm_check_nested_events() should call nested_svm_vmexit() 4461 * if an INIT signal is pending. 4462 */ 4463 return !gif_set(svm) || 4464 (vmcb_is_intercept(&svm->vmcb->control, INTERCEPT_INIT)); 4465 } 4466 4467 static void svm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector) 4468 { 4469 if (!sev_es_guest(vcpu->kvm)) 4470 return kvm_vcpu_deliver_sipi_vector(vcpu, vector); 4471 4472 sev_vcpu_deliver_sipi_vector(vcpu, vector); 4473 } 4474 4475 static void svm_vm_destroy(struct kvm *kvm) 4476 { 4477 avic_vm_destroy(kvm); 4478 sev_vm_destroy(kvm); 4479 } 4480 4481 static int svm_vm_init(struct kvm *kvm) 4482 { 4483 if (!pause_filter_count || !pause_filter_thresh) 4484 kvm->arch.pause_in_guest = true; 4485 4486 if (avic) { 4487 int ret = avic_vm_init(kvm); 4488 if (ret) 4489 return ret; 4490 } 4491 4492 kvm_apicv_init(kvm, avic); 4493 return 0; 4494 } 4495 4496 static struct kvm_x86_ops svm_x86_ops __initdata = { 4497 .hardware_unsetup = svm_hardware_teardown, 4498 .hardware_enable = svm_hardware_enable, 4499 .hardware_disable = svm_hardware_disable, 4500 .cpu_has_accelerated_tpr = svm_cpu_has_accelerated_tpr, 4501 .has_emulated_msr = svm_has_emulated_msr, 4502 4503 .vcpu_create = svm_create_vcpu, 4504 .vcpu_free = svm_free_vcpu, 4505 .vcpu_reset = svm_vcpu_reset, 4506 4507 .vm_size = sizeof(struct kvm_svm), 4508 .vm_init = svm_vm_init, 4509 .vm_destroy = svm_vm_destroy, 4510 4511 .prepare_guest_switch = svm_prepare_guest_switch, 4512 .vcpu_load = svm_vcpu_load, 4513 .vcpu_put = svm_vcpu_put, 4514 .vcpu_blocking = svm_vcpu_blocking, 4515 .vcpu_unblocking = svm_vcpu_unblocking, 4516 4517 .update_exception_bitmap = svm_update_exception_bitmap, 4518 .get_msr_feature = svm_get_msr_feature, 4519 .get_msr = svm_get_msr, 4520 .set_msr = svm_set_msr, 4521 .get_segment_base = svm_get_segment_base, 4522 .get_segment = svm_get_segment, 4523 .set_segment = svm_set_segment, 4524 .get_cpl = svm_get_cpl, 4525 .get_cs_db_l_bits = kvm_get_cs_db_l_bits, 4526 .set_cr0 = svm_set_cr0, 4527 .is_valid_cr4 = svm_is_valid_cr4, 4528 .set_cr4 = svm_set_cr4, 4529 .set_efer = svm_set_efer, 4530 .get_idt = svm_get_idt, 4531 .set_idt = svm_set_idt, 4532 .get_gdt = svm_get_gdt, 4533 .set_gdt = svm_set_gdt, 4534 .set_dr7 = svm_set_dr7, 4535 .sync_dirty_debug_regs = svm_sync_dirty_debug_regs, 4536 .cache_reg = svm_cache_reg, 4537 .get_rflags = svm_get_rflags, 4538 .set_rflags = svm_set_rflags, 4539 4540 .tlb_flush_all = svm_flush_tlb, 4541 .tlb_flush_current = svm_flush_tlb, 4542 .tlb_flush_gva = svm_flush_tlb_gva, 4543 .tlb_flush_guest = svm_flush_tlb, 4544 4545 .run = svm_vcpu_run, 4546 .handle_exit = handle_exit, 4547 .skip_emulated_instruction = skip_emulated_instruction, 4548 .update_emulated_instruction = NULL, 4549 .set_interrupt_shadow = svm_set_interrupt_shadow, 4550 .get_interrupt_shadow = svm_get_interrupt_shadow, 4551 .patch_hypercall = svm_patch_hypercall, 4552 .set_irq = svm_set_irq, 4553 .set_nmi = svm_inject_nmi, 4554 .queue_exception = svm_queue_exception, 4555 .cancel_injection = svm_cancel_injection, 4556 .interrupt_allowed = svm_interrupt_allowed, 4557 .nmi_allowed = svm_nmi_allowed, 4558 .get_nmi_mask = svm_get_nmi_mask, 4559 .set_nmi_mask = svm_set_nmi_mask, 4560 .enable_nmi_window = svm_enable_nmi_window, 4561 .enable_irq_window = svm_enable_irq_window, 4562 .update_cr8_intercept = svm_update_cr8_intercept, 4563 .set_virtual_apic_mode = svm_set_virtual_apic_mode, 4564 .refresh_apicv_exec_ctrl = svm_refresh_apicv_exec_ctrl, 4565 .check_apicv_inhibit_reasons = svm_check_apicv_inhibit_reasons, 4566 .pre_update_apicv_exec_ctrl = svm_pre_update_apicv_exec_ctrl, 4567 .load_eoi_exitmap = svm_load_eoi_exitmap, 4568 .hwapic_irr_update = svm_hwapic_irr_update, 4569 .hwapic_isr_update = svm_hwapic_isr_update, 4570 .sync_pir_to_irr = kvm_lapic_find_highest_irr, 4571 .apicv_post_state_restore = avic_post_state_restore, 4572 4573 .set_tss_addr = svm_set_tss_addr, 4574 .set_identity_map_addr = svm_set_identity_map_addr, 4575 .get_mt_mask = svm_get_mt_mask, 4576 4577 .get_exit_info = svm_get_exit_info, 4578 4579 .vcpu_after_set_cpuid = svm_vcpu_after_set_cpuid, 4580 4581 .has_wbinvd_exit = svm_has_wbinvd_exit, 4582 4583 .write_l1_tsc_offset = svm_write_l1_tsc_offset, 4584 4585 .load_mmu_pgd = svm_load_mmu_pgd, 4586 4587 .check_intercept = svm_check_intercept, 4588 .handle_exit_irqoff = svm_handle_exit_irqoff, 4589 4590 .request_immediate_exit = __kvm_request_immediate_exit, 4591 4592 .sched_in = svm_sched_in, 4593 4594 .pmu_ops = &amd_pmu_ops, 4595 .nested_ops = &svm_nested_ops, 4596 4597 .deliver_posted_interrupt = svm_deliver_avic_intr, 4598 .dy_apicv_has_pending_interrupt = svm_dy_apicv_has_pending_interrupt, 4599 .update_pi_irte = svm_update_pi_irte, 4600 .setup_mce = svm_setup_mce, 4601 4602 .smi_allowed = svm_smi_allowed, 4603 .pre_enter_smm = svm_pre_enter_smm, 4604 .pre_leave_smm = svm_pre_leave_smm, 4605 .enable_smi_window = svm_enable_smi_window, 4606 4607 .mem_enc_op = svm_mem_enc_op, 4608 .mem_enc_reg_region = svm_register_enc_region, 4609 .mem_enc_unreg_region = svm_unregister_enc_region, 4610 4611 .can_emulate_instruction = svm_can_emulate_instruction, 4612 4613 .apic_init_signal_blocked = svm_apic_init_signal_blocked, 4614 4615 .msr_filter_changed = svm_msr_filter_changed, 4616 .complete_emulated_msr = svm_complete_emulated_msr, 4617 4618 .vcpu_deliver_sipi_vector = svm_vcpu_deliver_sipi_vector, 4619 }; 4620 4621 static struct kvm_x86_init_ops svm_init_ops __initdata = { 4622 .cpu_has_kvm_support = has_svm, 4623 .disabled_by_bios = is_disabled, 4624 .hardware_setup = svm_hardware_setup, 4625 .check_processor_compatibility = svm_check_processor_compat, 4626 4627 .runtime_ops = &svm_x86_ops, 4628 }; 4629 4630 static int __init svm_init(void) 4631 { 4632 __unused_size_checks(); 4633 4634 return kvm_init(&svm_init_ops, sizeof(struct vcpu_svm), 4635 __alignof__(struct vcpu_svm), THIS_MODULE); 4636 } 4637 4638 static void __exit svm_exit(void) 4639 { 4640 kvm_exit(); 4641 } 4642 4643 module_init(svm_init) 4644 module_exit(svm_exit) 4645