1 /* 2 * Kernel-based Virtual Machine driver for Linux 3 * 4 * derived from drivers/kvm/kvm_main.c 5 * 6 * Copyright (C) 2006 Qumranet, Inc. 7 * Copyright (C) 2008 Qumranet, Inc. 8 * Copyright IBM Corporation, 2008 9 * Copyright 2010 Red Hat, Inc. and/or its affiliates. 10 * 11 * Authors: 12 * Avi Kivity <avi@qumranet.com> 13 * Yaniv Kamay <yaniv@qumranet.com> 14 * Amit Shah <amit.shah@qumranet.com> 15 * Ben-Ami Yassour <benami@il.ibm.com> 16 * 17 * This work is licensed under the terms of the GNU GPL, version 2. See 18 * the COPYING file in the top-level directory. 19 * 20 */ 21 22 #include <linux/kvm_host.h> 23 #include "irq.h" 24 #include "mmu.h" 25 #include "i8254.h" 26 #include "tss.h" 27 #include "kvm_cache_regs.h" 28 #include "x86.h" 29 #include "cpuid.h" 30 #include "assigned-dev.h" 31 #include "pmu.h" 32 33 #include <linux/clocksource.h> 34 #include <linux/interrupt.h> 35 #include <linux/kvm.h> 36 #include <linux/fs.h> 37 #include <linux/vmalloc.h> 38 #include <linux/module.h> 39 #include <linux/mman.h> 40 #include <linux/highmem.h> 41 #include <linux/iommu.h> 42 #include <linux/intel-iommu.h> 43 #include <linux/cpufreq.h> 44 #include <linux/user-return-notifier.h> 45 #include <linux/srcu.h> 46 #include <linux/slab.h> 47 #include <linux/perf_event.h> 48 #include <linux/uaccess.h> 49 #include <linux/hash.h> 50 #include <linux/pci.h> 51 #include <linux/timekeeper_internal.h> 52 #include <linux/pvclock_gtod.h> 53 #include <trace/events/kvm.h> 54 55 #define CREATE_TRACE_POINTS 56 #include "trace.h" 57 58 #include <asm/debugreg.h> 59 #include <asm/msr.h> 60 #include <asm/desc.h> 61 #include <asm/mce.h> 62 #include <linux/kernel_stat.h> 63 #include <asm/fpu/internal.h> /* Ugh! */ 64 #include <asm/pvclock.h> 65 #include <asm/div64.h> 66 67 #define MAX_IO_MSRS 256 68 #define KVM_MAX_MCE_BANKS 32 69 #define KVM_MCE_CAP_SUPPORTED (MCG_CTL_P | MCG_SER_P) 70 71 #define emul_to_vcpu(ctxt) \ 72 container_of(ctxt, struct kvm_vcpu, arch.emulate_ctxt) 73 74 /* EFER defaults: 75 * - enable syscall per default because its emulated by KVM 76 * - enable LME and LMA per default on 64 bit KVM 77 */ 78 #ifdef CONFIG_X86_64 79 static 80 u64 __read_mostly efer_reserved_bits = ~((u64)(EFER_SCE | EFER_LME | EFER_LMA)); 81 #else 82 static u64 __read_mostly efer_reserved_bits = ~((u64)EFER_SCE); 83 #endif 84 85 #define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM 86 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU 87 88 static void update_cr8_intercept(struct kvm_vcpu *vcpu); 89 static void process_nmi(struct kvm_vcpu *vcpu); 90 static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags); 91 92 struct kvm_x86_ops *kvm_x86_ops; 93 EXPORT_SYMBOL_GPL(kvm_x86_ops); 94 95 static bool ignore_msrs = 0; 96 module_param(ignore_msrs, bool, S_IRUGO | S_IWUSR); 97 98 unsigned int min_timer_period_us = 500; 99 module_param(min_timer_period_us, uint, S_IRUGO | S_IWUSR); 100 101 static bool __read_mostly kvmclock_periodic_sync = true; 102 module_param(kvmclock_periodic_sync, bool, S_IRUGO); 103 104 bool kvm_has_tsc_control; 105 EXPORT_SYMBOL_GPL(kvm_has_tsc_control); 106 u32 kvm_max_guest_tsc_khz; 107 EXPORT_SYMBOL_GPL(kvm_max_guest_tsc_khz); 108 109 /* tsc tolerance in parts per million - default to 1/2 of the NTP threshold */ 110 static u32 tsc_tolerance_ppm = 250; 111 module_param(tsc_tolerance_ppm, uint, S_IRUGO | S_IWUSR); 112 113 /* lapic timer advance (tscdeadline mode only) in nanoseconds */ 114 unsigned int lapic_timer_advance_ns = 0; 115 module_param(lapic_timer_advance_ns, uint, S_IRUGO | S_IWUSR); 116 117 static bool backwards_tsc_observed = false; 118 119 #define KVM_NR_SHARED_MSRS 16 120 121 struct kvm_shared_msrs_global { 122 int nr; 123 u32 msrs[KVM_NR_SHARED_MSRS]; 124 }; 125 126 struct kvm_shared_msrs { 127 struct user_return_notifier urn; 128 bool registered; 129 struct kvm_shared_msr_values { 130 u64 host; 131 u64 curr; 132 } values[KVM_NR_SHARED_MSRS]; 133 }; 134 135 static struct kvm_shared_msrs_global __read_mostly shared_msrs_global; 136 static struct kvm_shared_msrs __percpu *shared_msrs; 137 138 struct kvm_stats_debugfs_item debugfs_entries[] = { 139 { "pf_fixed", VCPU_STAT(pf_fixed) }, 140 { "pf_guest", VCPU_STAT(pf_guest) }, 141 { "tlb_flush", VCPU_STAT(tlb_flush) }, 142 { "invlpg", VCPU_STAT(invlpg) }, 143 { "exits", VCPU_STAT(exits) }, 144 { "io_exits", VCPU_STAT(io_exits) }, 145 { "mmio_exits", VCPU_STAT(mmio_exits) }, 146 { "signal_exits", VCPU_STAT(signal_exits) }, 147 { "irq_window", VCPU_STAT(irq_window_exits) }, 148 { "nmi_window", VCPU_STAT(nmi_window_exits) }, 149 { "halt_exits", VCPU_STAT(halt_exits) }, 150 { "halt_successful_poll", VCPU_STAT(halt_successful_poll) }, 151 { "halt_wakeup", VCPU_STAT(halt_wakeup) }, 152 { "hypercalls", VCPU_STAT(hypercalls) }, 153 { "request_irq", VCPU_STAT(request_irq_exits) }, 154 { "irq_exits", VCPU_STAT(irq_exits) }, 155 { "host_state_reload", VCPU_STAT(host_state_reload) }, 156 { "efer_reload", VCPU_STAT(efer_reload) }, 157 { "fpu_reload", VCPU_STAT(fpu_reload) }, 158 { "insn_emulation", VCPU_STAT(insn_emulation) }, 159 { "insn_emulation_fail", VCPU_STAT(insn_emulation_fail) }, 160 { "irq_injections", VCPU_STAT(irq_injections) }, 161 { "nmi_injections", VCPU_STAT(nmi_injections) }, 162 { "mmu_shadow_zapped", VM_STAT(mmu_shadow_zapped) }, 163 { "mmu_pte_write", VM_STAT(mmu_pte_write) }, 164 { "mmu_pte_updated", VM_STAT(mmu_pte_updated) }, 165 { "mmu_pde_zapped", VM_STAT(mmu_pde_zapped) }, 166 { "mmu_flooded", VM_STAT(mmu_flooded) }, 167 { "mmu_recycled", VM_STAT(mmu_recycled) }, 168 { "mmu_cache_miss", VM_STAT(mmu_cache_miss) }, 169 { "mmu_unsync", VM_STAT(mmu_unsync) }, 170 { "remote_tlb_flush", VM_STAT(remote_tlb_flush) }, 171 { "largepages", VM_STAT(lpages) }, 172 { NULL } 173 }; 174 175 u64 __read_mostly host_xcr0; 176 177 static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt); 178 179 static inline void kvm_async_pf_hash_reset(struct kvm_vcpu *vcpu) 180 { 181 int i; 182 for (i = 0; i < roundup_pow_of_two(ASYNC_PF_PER_VCPU); i++) 183 vcpu->arch.apf.gfns[i] = ~0; 184 } 185 186 static void kvm_on_user_return(struct user_return_notifier *urn) 187 { 188 unsigned slot; 189 struct kvm_shared_msrs *locals 190 = container_of(urn, struct kvm_shared_msrs, urn); 191 struct kvm_shared_msr_values *values; 192 193 for (slot = 0; slot < shared_msrs_global.nr; ++slot) { 194 values = &locals->values[slot]; 195 if (values->host != values->curr) { 196 wrmsrl(shared_msrs_global.msrs[slot], values->host); 197 values->curr = values->host; 198 } 199 } 200 locals->registered = false; 201 user_return_notifier_unregister(urn); 202 } 203 204 static void shared_msr_update(unsigned slot, u32 msr) 205 { 206 u64 value; 207 unsigned int cpu = smp_processor_id(); 208 struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu); 209 210 /* only read, and nobody should modify it at this time, 211 * so don't need lock */ 212 if (slot >= shared_msrs_global.nr) { 213 printk(KERN_ERR "kvm: invalid MSR slot!"); 214 return; 215 } 216 rdmsrl_safe(msr, &value); 217 smsr->values[slot].host = value; 218 smsr->values[slot].curr = value; 219 } 220 221 void kvm_define_shared_msr(unsigned slot, u32 msr) 222 { 223 BUG_ON(slot >= KVM_NR_SHARED_MSRS); 224 if (slot >= shared_msrs_global.nr) 225 shared_msrs_global.nr = slot + 1; 226 shared_msrs_global.msrs[slot] = msr; 227 /* we need ensured the shared_msr_global have been updated */ 228 smp_wmb(); 229 } 230 EXPORT_SYMBOL_GPL(kvm_define_shared_msr); 231 232 static void kvm_shared_msr_cpu_online(void) 233 { 234 unsigned i; 235 236 for (i = 0; i < shared_msrs_global.nr; ++i) 237 shared_msr_update(i, shared_msrs_global.msrs[i]); 238 } 239 240 int kvm_set_shared_msr(unsigned slot, u64 value, u64 mask) 241 { 242 unsigned int cpu = smp_processor_id(); 243 struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu); 244 int err; 245 246 if (((value ^ smsr->values[slot].curr) & mask) == 0) 247 return 0; 248 smsr->values[slot].curr = value; 249 err = wrmsrl_safe(shared_msrs_global.msrs[slot], value); 250 if (err) 251 return 1; 252 253 if (!smsr->registered) { 254 smsr->urn.on_user_return = kvm_on_user_return; 255 user_return_notifier_register(&smsr->urn); 256 smsr->registered = true; 257 } 258 return 0; 259 } 260 EXPORT_SYMBOL_GPL(kvm_set_shared_msr); 261 262 static void drop_user_return_notifiers(void) 263 { 264 unsigned int cpu = smp_processor_id(); 265 struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu); 266 267 if (smsr->registered) 268 kvm_on_user_return(&smsr->urn); 269 } 270 271 u64 kvm_get_apic_base(struct kvm_vcpu *vcpu) 272 { 273 return vcpu->arch.apic_base; 274 } 275 EXPORT_SYMBOL_GPL(kvm_get_apic_base); 276 277 int kvm_set_apic_base(struct kvm_vcpu *vcpu, struct msr_data *msr_info) 278 { 279 u64 old_state = vcpu->arch.apic_base & 280 (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE); 281 u64 new_state = msr_info->data & 282 (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE); 283 u64 reserved_bits = ((~0ULL) << cpuid_maxphyaddr(vcpu)) | 284 0x2ff | (guest_cpuid_has_x2apic(vcpu) ? 0 : X2APIC_ENABLE); 285 286 if (!msr_info->host_initiated && 287 ((msr_info->data & reserved_bits) != 0 || 288 new_state == X2APIC_ENABLE || 289 (new_state == MSR_IA32_APICBASE_ENABLE && 290 old_state == (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE)) || 291 (new_state == (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE) && 292 old_state == 0))) 293 return 1; 294 295 kvm_lapic_set_base(vcpu, msr_info->data); 296 return 0; 297 } 298 EXPORT_SYMBOL_GPL(kvm_set_apic_base); 299 300 asmlinkage __visible void kvm_spurious_fault(void) 301 { 302 /* Fault while not rebooting. We want the trace. */ 303 BUG(); 304 } 305 EXPORT_SYMBOL_GPL(kvm_spurious_fault); 306 307 #define EXCPT_BENIGN 0 308 #define EXCPT_CONTRIBUTORY 1 309 #define EXCPT_PF 2 310 311 static int exception_class(int vector) 312 { 313 switch (vector) { 314 case PF_VECTOR: 315 return EXCPT_PF; 316 case DE_VECTOR: 317 case TS_VECTOR: 318 case NP_VECTOR: 319 case SS_VECTOR: 320 case GP_VECTOR: 321 return EXCPT_CONTRIBUTORY; 322 default: 323 break; 324 } 325 return EXCPT_BENIGN; 326 } 327 328 #define EXCPT_FAULT 0 329 #define EXCPT_TRAP 1 330 #define EXCPT_ABORT 2 331 #define EXCPT_INTERRUPT 3 332 333 static int exception_type(int vector) 334 { 335 unsigned int mask; 336 337 if (WARN_ON(vector > 31 || vector == NMI_VECTOR)) 338 return EXCPT_INTERRUPT; 339 340 mask = 1 << vector; 341 342 /* #DB is trap, as instruction watchpoints are handled elsewhere */ 343 if (mask & ((1 << DB_VECTOR) | (1 << BP_VECTOR) | (1 << OF_VECTOR))) 344 return EXCPT_TRAP; 345 346 if (mask & ((1 << DF_VECTOR) | (1 << MC_VECTOR))) 347 return EXCPT_ABORT; 348 349 /* Reserved exceptions will result in fault */ 350 return EXCPT_FAULT; 351 } 352 353 static void kvm_multiple_exception(struct kvm_vcpu *vcpu, 354 unsigned nr, bool has_error, u32 error_code, 355 bool reinject) 356 { 357 u32 prev_nr; 358 int class1, class2; 359 360 kvm_make_request(KVM_REQ_EVENT, vcpu); 361 362 if (!vcpu->arch.exception.pending) { 363 queue: 364 if (has_error && !is_protmode(vcpu)) 365 has_error = false; 366 vcpu->arch.exception.pending = true; 367 vcpu->arch.exception.has_error_code = has_error; 368 vcpu->arch.exception.nr = nr; 369 vcpu->arch.exception.error_code = error_code; 370 vcpu->arch.exception.reinject = reinject; 371 return; 372 } 373 374 /* to check exception */ 375 prev_nr = vcpu->arch.exception.nr; 376 if (prev_nr == DF_VECTOR) { 377 /* triple fault -> shutdown */ 378 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); 379 return; 380 } 381 class1 = exception_class(prev_nr); 382 class2 = exception_class(nr); 383 if ((class1 == EXCPT_CONTRIBUTORY && class2 == EXCPT_CONTRIBUTORY) 384 || (class1 == EXCPT_PF && class2 != EXCPT_BENIGN)) { 385 /* generate double fault per SDM Table 5-5 */ 386 vcpu->arch.exception.pending = true; 387 vcpu->arch.exception.has_error_code = true; 388 vcpu->arch.exception.nr = DF_VECTOR; 389 vcpu->arch.exception.error_code = 0; 390 } else 391 /* replace previous exception with a new one in a hope 392 that instruction re-execution will regenerate lost 393 exception */ 394 goto queue; 395 } 396 397 void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr) 398 { 399 kvm_multiple_exception(vcpu, nr, false, 0, false); 400 } 401 EXPORT_SYMBOL_GPL(kvm_queue_exception); 402 403 void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr) 404 { 405 kvm_multiple_exception(vcpu, nr, false, 0, true); 406 } 407 EXPORT_SYMBOL_GPL(kvm_requeue_exception); 408 409 void kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err) 410 { 411 if (err) 412 kvm_inject_gp(vcpu, 0); 413 else 414 kvm_x86_ops->skip_emulated_instruction(vcpu); 415 } 416 EXPORT_SYMBOL_GPL(kvm_complete_insn_gp); 417 418 void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault) 419 { 420 ++vcpu->stat.pf_guest; 421 vcpu->arch.cr2 = fault->address; 422 kvm_queue_exception_e(vcpu, PF_VECTOR, fault->error_code); 423 } 424 EXPORT_SYMBOL_GPL(kvm_inject_page_fault); 425 426 static bool kvm_propagate_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault) 427 { 428 if (mmu_is_nested(vcpu) && !fault->nested_page_fault) 429 vcpu->arch.nested_mmu.inject_page_fault(vcpu, fault); 430 else 431 vcpu->arch.mmu.inject_page_fault(vcpu, fault); 432 433 return fault->nested_page_fault; 434 } 435 436 void kvm_inject_nmi(struct kvm_vcpu *vcpu) 437 { 438 atomic_inc(&vcpu->arch.nmi_queued); 439 kvm_make_request(KVM_REQ_NMI, vcpu); 440 } 441 EXPORT_SYMBOL_GPL(kvm_inject_nmi); 442 443 void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code) 444 { 445 kvm_multiple_exception(vcpu, nr, true, error_code, false); 446 } 447 EXPORT_SYMBOL_GPL(kvm_queue_exception_e); 448 449 void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code) 450 { 451 kvm_multiple_exception(vcpu, nr, true, error_code, true); 452 } 453 EXPORT_SYMBOL_GPL(kvm_requeue_exception_e); 454 455 /* 456 * Checks if cpl <= required_cpl; if true, return true. Otherwise queue 457 * a #GP and return false. 458 */ 459 bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl) 460 { 461 if (kvm_x86_ops->get_cpl(vcpu) <= required_cpl) 462 return true; 463 kvm_queue_exception_e(vcpu, GP_VECTOR, 0); 464 return false; 465 } 466 EXPORT_SYMBOL_GPL(kvm_require_cpl); 467 468 bool kvm_require_dr(struct kvm_vcpu *vcpu, int dr) 469 { 470 if ((dr != 4 && dr != 5) || !kvm_read_cr4_bits(vcpu, X86_CR4_DE)) 471 return true; 472 473 kvm_queue_exception(vcpu, UD_VECTOR); 474 return false; 475 } 476 EXPORT_SYMBOL_GPL(kvm_require_dr); 477 478 /* 479 * This function will be used to read from the physical memory of the currently 480 * running guest. The difference to kvm_vcpu_read_guest_page is that this function 481 * can read from guest physical or from the guest's guest physical memory. 482 */ 483 int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, 484 gfn_t ngfn, void *data, int offset, int len, 485 u32 access) 486 { 487 struct x86_exception exception; 488 gfn_t real_gfn; 489 gpa_t ngpa; 490 491 ngpa = gfn_to_gpa(ngfn); 492 real_gfn = mmu->translate_gpa(vcpu, ngpa, access, &exception); 493 if (real_gfn == UNMAPPED_GVA) 494 return -EFAULT; 495 496 real_gfn = gpa_to_gfn(real_gfn); 497 498 return kvm_vcpu_read_guest_page(vcpu, real_gfn, data, offset, len); 499 } 500 EXPORT_SYMBOL_GPL(kvm_read_guest_page_mmu); 501 502 static int kvm_read_nested_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, 503 void *data, int offset, int len, u32 access) 504 { 505 return kvm_read_guest_page_mmu(vcpu, vcpu->arch.walk_mmu, gfn, 506 data, offset, len, access); 507 } 508 509 /* 510 * Load the pae pdptrs. Return true is they are all valid. 511 */ 512 int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3) 513 { 514 gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT; 515 unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2; 516 int i; 517 int ret; 518 u64 pdpte[ARRAY_SIZE(mmu->pdptrs)]; 519 520 ret = kvm_read_guest_page_mmu(vcpu, mmu, pdpt_gfn, pdpte, 521 offset * sizeof(u64), sizeof(pdpte), 522 PFERR_USER_MASK|PFERR_WRITE_MASK); 523 if (ret < 0) { 524 ret = 0; 525 goto out; 526 } 527 for (i = 0; i < ARRAY_SIZE(pdpte); ++i) { 528 if (is_present_gpte(pdpte[i]) && 529 (pdpte[i] & vcpu->arch.mmu.rsvd_bits_mask[0][2])) { 530 ret = 0; 531 goto out; 532 } 533 } 534 ret = 1; 535 536 memcpy(mmu->pdptrs, pdpte, sizeof(mmu->pdptrs)); 537 __set_bit(VCPU_EXREG_PDPTR, 538 (unsigned long *)&vcpu->arch.regs_avail); 539 __set_bit(VCPU_EXREG_PDPTR, 540 (unsigned long *)&vcpu->arch.regs_dirty); 541 out: 542 543 return ret; 544 } 545 EXPORT_SYMBOL_GPL(load_pdptrs); 546 547 static bool pdptrs_changed(struct kvm_vcpu *vcpu) 548 { 549 u64 pdpte[ARRAY_SIZE(vcpu->arch.walk_mmu->pdptrs)]; 550 bool changed = true; 551 int offset; 552 gfn_t gfn; 553 int r; 554 555 if (is_long_mode(vcpu) || !is_pae(vcpu)) 556 return false; 557 558 if (!test_bit(VCPU_EXREG_PDPTR, 559 (unsigned long *)&vcpu->arch.regs_avail)) 560 return true; 561 562 gfn = (kvm_read_cr3(vcpu) & ~31u) >> PAGE_SHIFT; 563 offset = (kvm_read_cr3(vcpu) & ~31u) & (PAGE_SIZE - 1); 564 r = kvm_read_nested_guest_page(vcpu, gfn, pdpte, offset, sizeof(pdpte), 565 PFERR_USER_MASK | PFERR_WRITE_MASK); 566 if (r < 0) 567 goto out; 568 changed = memcmp(pdpte, vcpu->arch.walk_mmu->pdptrs, sizeof(pdpte)) != 0; 569 out: 570 571 return changed; 572 } 573 574 int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) 575 { 576 unsigned long old_cr0 = kvm_read_cr0(vcpu); 577 unsigned long update_bits = X86_CR0_PG | X86_CR0_WP; 578 579 cr0 |= X86_CR0_ET; 580 581 #ifdef CONFIG_X86_64 582 if (cr0 & 0xffffffff00000000UL) 583 return 1; 584 #endif 585 586 cr0 &= ~CR0_RESERVED_BITS; 587 588 if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) 589 return 1; 590 591 if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) 592 return 1; 593 594 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) { 595 #ifdef CONFIG_X86_64 596 if ((vcpu->arch.efer & EFER_LME)) { 597 int cs_db, cs_l; 598 599 if (!is_pae(vcpu)) 600 return 1; 601 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); 602 if (cs_l) 603 return 1; 604 } else 605 #endif 606 if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, 607 kvm_read_cr3(vcpu))) 608 return 1; 609 } 610 611 if (!(cr0 & X86_CR0_PG) && kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE)) 612 return 1; 613 614 kvm_x86_ops->set_cr0(vcpu, cr0); 615 616 if ((cr0 ^ old_cr0) & X86_CR0_PG) { 617 kvm_clear_async_pf_completion_queue(vcpu); 618 kvm_async_pf_hash_reset(vcpu); 619 } 620 621 if ((cr0 ^ old_cr0) & update_bits) 622 kvm_mmu_reset_context(vcpu); 623 624 if ((cr0 ^ old_cr0) & X86_CR0_CD) 625 kvm_zap_gfn_range(vcpu->kvm, 0, ~0ULL); 626 627 return 0; 628 } 629 EXPORT_SYMBOL_GPL(kvm_set_cr0); 630 631 void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw) 632 { 633 (void)kvm_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~0x0eul) | (msw & 0x0f)); 634 } 635 EXPORT_SYMBOL_GPL(kvm_lmsw); 636 637 static void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu) 638 { 639 if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) && 640 !vcpu->guest_xcr0_loaded) { 641 /* kvm_set_xcr() also depends on this */ 642 xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0); 643 vcpu->guest_xcr0_loaded = 1; 644 } 645 } 646 647 static void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu) 648 { 649 if (vcpu->guest_xcr0_loaded) { 650 if (vcpu->arch.xcr0 != host_xcr0) 651 xsetbv(XCR_XFEATURE_ENABLED_MASK, host_xcr0); 652 vcpu->guest_xcr0_loaded = 0; 653 } 654 } 655 656 static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) 657 { 658 u64 xcr0 = xcr; 659 u64 old_xcr0 = vcpu->arch.xcr0; 660 u64 valid_bits; 661 662 /* Only support XCR_XFEATURE_ENABLED_MASK(xcr0) now */ 663 if (index != XCR_XFEATURE_ENABLED_MASK) 664 return 1; 665 if (!(xcr0 & XSTATE_FP)) 666 return 1; 667 if ((xcr0 & XSTATE_YMM) && !(xcr0 & XSTATE_SSE)) 668 return 1; 669 670 /* 671 * Do not allow the guest to set bits that we do not support 672 * saving. However, xcr0 bit 0 is always set, even if the 673 * emulated CPU does not support XSAVE (see fx_init). 674 */ 675 valid_bits = vcpu->arch.guest_supported_xcr0 | XSTATE_FP; 676 if (xcr0 & ~valid_bits) 677 return 1; 678 679 if ((!(xcr0 & XSTATE_BNDREGS)) != (!(xcr0 & XSTATE_BNDCSR))) 680 return 1; 681 682 if (xcr0 & XSTATE_AVX512) { 683 if (!(xcr0 & XSTATE_YMM)) 684 return 1; 685 if ((xcr0 & XSTATE_AVX512) != XSTATE_AVX512) 686 return 1; 687 } 688 kvm_put_guest_xcr0(vcpu); 689 vcpu->arch.xcr0 = xcr0; 690 691 if ((xcr0 ^ old_xcr0) & XSTATE_EXTEND_MASK) 692 kvm_update_cpuid(vcpu); 693 return 0; 694 } 695 696 int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) 697 { 698 if (kvm_x86_ops->get_cpl(vcpu) != 0 || 699 __kvm_set_xcr(vcpu, index, xcr)) { 700 kvm_inject_gp(vcpu, 0); 701 return 1; 702 } 703 return 0; 704 } 705 EXPORT_SYMBOL_GPL(kvm_set_xcr); 706 707 int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) 708 { 709 unsigned long old_cr4 = kvm_read_cr4(vcpu); 710 unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE | 711 X86_CR4_SMEP | X86_CR4_SMAP; 712 713 if (cr4 & CR4_RESERVED_BITS) 714 return 1; 715 716 if (!guest_cpuid_has_xsave(vcpu) && (cr4 & X86_CR4_OSXSAVE)) 717 return 1; 718 719 if (!guest_cpuid_has_smep(vcpu) && (cr4 & X86_CR4_SMEP)) 720 return 1; 721 722 if (!guest_cpuid_has_smap(vcpu) && (cr4 & X86_CR4_SMAP)) 723 return 1; 724 725 if (!guest_cpuid_has_fsgsbase(vcpu) && (cr4 & X86_CR4_FSGSBASE)) 726 return 1; 727 728 if (is_long_mode(vcpu)) { 729 if (!(cr4 & X86_CR4_PAE)) 730 return 1; 731 } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE) 732 && ((cr4 ^ old_cr4) & pdptr_bits) 733 && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, 734 kvm_read_cr3(vcpu))) 735 return 1; 736 737 if ((cr4 & X86_CR4_PCIDE) && !(old_cr4 & X86_CR4_PCIDE)) { 738 if (!guest_cpuid_has_pcid(vcpu)) 739 return 1; 740 741 /* PCID can not be enabled when cr3[11:0]!=000H or EFER.LMA=0 */ 742 if ((kvm_read_cr3(vcpu) & X86_CR3_PCID_MASK) || !is_long_mode(vcpu)) 743 return 1; 744 } 745 746 if (kvm_x86_ops->set_cr4(vcpu, cr4)) 747 return 1; 748 749 if (((cr4 ^ old_cr4) & pdptr_bits) || 750 (!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE))) 751 kvm_mmu_reset_context(vcpu); 752 753 if ((cr4 ^ old_cr4) & X86_CR4_OSXSAVE) 754 kvm_update_cpuid(vcpu); 755 756 return 0; 757 } 758 EXPORT_SYMBOL_GPL(kvm_set_cr4); 759 760 int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) 761 { 762 #ifdef CONFIG_X86_64 763 cr3 &= ~CR3_PCID_INVD; 764 #endif 765 766 if (cr3 == kvm_read_cr3(vcpu) && !pdptrs_changed(vcpu)) { 767 kvm_mmu_sync_roots(vcpu); 768 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); 769 return 0; 770 } 771 772 if (is_long_mode(vcpu)) { 773 if (cr3 & CR3_L_MODE_RESERVED_BITS) 774 return 1; 775 } else if (is_pae(vcpu) && is_paging(vcpu) && 776 !load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3)) 777 return 1; 778 779 vcpu->arch.cr3 = cr3; 780 __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail); 781 kvm_mmu_new_cr3(vcpu); 782 return 0; 783 } 784 EXPORT_SYMBOL_GPL(kvm_set_cr3); 785 786 int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8) 787 { 788 if (cr8 & CR8_RESERVED_BITS) 789 return 1; 790 if (irqchip_in_kernel(vcpu->kvm)) 791 kvm_lapic_set_tpr(vcpu, cr8); 792 else 793 vcpu->arch.cr8 = cr8; 794 return 0; 795 } 796 EXPORT_SYMBOL_GPL(kvm_set_cr8); 797 798 unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu) 799 { 800 if (irqchip_in_kernel(vcpu->kvm)) 801 return kvm_lapic_get_cr8(vcpu); 802 else 803 return vcpu->arch.cr8; 804 } 805 EXPORT_SYMBOL_GPL(kvm_get_cr8); 806 807 static void kvm_update_dr0123(struct kvm_vcpu *vcpu) 808 { 809 int i; 810 811 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) { 812 for (i = 0; i < KVM_NR_DB_REGS; i++) 813 vcpu->arch.eff_db[i] = vcpu->arch.db[i]; 814 vcpu->arch.switch_db_regs |= KVM_DEBUGREG_RELOAD; 815 } 816 } 817 818 static void kvm_update_dr6(struct kvm_vcpu *vcpu) 819 { 820 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) 821 kvm_x86_ops->set_dr6(vcpu, vcpu->arch.dr6); 822 } 823 824 static void kvm_update_dr7(struct kvm_vcpu *vcpu) 825 { 826 unsigned long dr7; 827 828 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) 829 dr7 = vcpu->arch.guest_debug_dr7; 830 else 831 dr7 = vcpu->arch.dr7; 832 kvm_x86_ops->set_dr7(vcpu, dr7); 833 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_BP_ENABLED; 834 if (dr7 & DR7_BP_EN_MASK) 835 vcpu->arch.switch_db_regs |= KVM_DEBUGREG_BP_ENABLED; 836 } 837 838 static u64 kvm_dr6_fixed(struct kvm_vcpu *vcpu) 839 { 840 u64 fixed = DR6_FIXED_1; 841 842 if (!guest_cpuid_has_rtm(vcpu)) 843 fixed |= DR6_RTM; 844 return fixed; 845 } 846 847 static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val) 848 { 849 switch (dr) { 850 case 0 ... 3: 851 vcpu->arch.db[dr] = val; 852 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) 853 vcpu->arch.eff_db[dr] = val; 854 break; 855 case 4: 856 /* fall through */ 857 case 6: 858 if (val & 0xffffffff00000000ULL) 859 return -1; /* #GP */ 860 vcpu->arch.dr6 = (val & DR6_VOLATILE) | kvm_dr6_fixed(vcpu); 861 kvm_update_dr6(vcpu); 862 break; 863 case 5: 864 /* fall through */ 865 default: /* 7 */ 866 if (val & 0xffffffff00000000ULL) 867 return -1; /* #GP */ 868 vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1; 869 kvm_update_dr7(vcpu); 870 break; 871 } 872 873 return 0; 874 } 875 876 int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val) 877 { 878 if (__kvm_set_dr(vcpu, dr, val)) { 879 kvm_inject_gp(vcpu, 0); 880 return 1; 881 } 882 return 0; 883 } 884 EXPORT_SYMBOL_GPL(kvm_set_dr); 885 886 int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val) 887 { 888 switch (dr) { 889 case 0 ... 3: 890 *val = vcpu->arch.db[dr]; 891 break; 892 case 4: 893 /* fall through */ 894 case 6: 895 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) 896 *val = vcpu->arch.dr6; 897 else 898 *val = kvm_x86_ops->get_dr6(vcpu); 899 break; 900 case 5: 901 /* fall through */ 902 default: /* 7 */ 903 *val = vcpu->arch.dr7; 904 break; 905 } 906 return 0; 907 } 908 EXPORT_SYMBOL_GPL(kvm_get_dr); 909 910 bool kvm_rdpmc(struct kvm_vcpu *vcpu) 911 { 912 u32 ecx = kvm_register_read(vcpu, VCPU_REGS_RCX); 913 u64 data; 914 int err; 915 916 err = kvm_pmu_rdpmc(vcpu, ecx, &data); 917 if (err) 918 return err; 919 kvm_register_write(vcpu, VCPU_REGS_RAX, (u32)data); 920 kvm_register_write(vcpu, VCPU_REGS_RDX, data >> 32); 921 return err; 922 } 923 EXPORT_SYMBOL_GPL(kvm_rdpmc); 924 925 /* 926 * List of msr numbers which we expose to userspace through KVM_GET_MSRS 927 * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST. 928 * 929 * This list is modified at module load time to reflect the 930 * capabilities of the host cpu. This capabilities test skips MSRs that are 931 * kvm-specific. Those are put in emulated_msrs; filtering of emulated_msrs 932 * may depend on host virtualization features rather than host cpu features. 933 */ 934 935 static u32 msrs_to_save[] = { 936 MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP, 937 MSR_STAR, 938 #ifdef CONFIG_X86_64 939 MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR, 940 #endif 941 MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA, 942 MSR_IA32_FEATURE_CONTROL, MSR_IA32_BNDCFGS 943 }; 944 945 static unsigned num_msrs_to_save; 946 947 static u32 emulated_msrs[] = { 948 MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK, 949 MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW, 950 HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL, 951 HV_X64_MSR_TIME_REF_COUNT, HV_X64_MSR_REFERENCE_TSC, 952 HV_X64_MSR_APIC_ASSIST_PAGE, MSR_KVM_ASYNC_PF_EN, MSR_KVM_STEAL_TIME, 953 MSR_KVM_PV_EOI_EN, 954 955 MSR_IA32_TSC_ADJUST, 956 MSR_IA32_TSCDEADLINE, 957 MSR_IA32_MISC_ENABLE, 958 MSR_IA32_MCG_STATUS, 959 MSR_IA32_MCG_CTL, 960 MSR_IA32_SMBASE, 961 }; 962 963 static unsigned num_emulated_msrs; 964 965 bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer) 966 { 967 if (efer & efer_reserved_bits) 968 return false; 969 970 if (efer & EFER_FFXSR) { 971 struct kvm_cpuid_entry2 *feat; 972 973 feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); 974 if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT))) 975 return false; 976 } 977 978 if (efer & EFER_SVME) { 979 struct kvm_cpuid_entry2 *feat; 980 981 feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); 982 if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM))) 983 return false; 984 } 985 986 return true; 987 } 988 EXPORT_SYMBOL_GPL(kvm_valid_efer); 989 990 static int set_efer(struct kvm_vcpu *vcpu, u64 efer) 991 { 992 u64 old_efer = vcpu->arch.efer; 993 994 if (!kvm_valid_efer(vcpu, efer)) 995 return 1; 996 997 if (is_paging(vcpu) 998 && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME)) 999 return 1; 1000 1001 efer &= ~EFER_LMA; 1002 efer |= vcpu->arch.efer & EFER_LMA; 1003 1004 kvm_x86_ops->set_efer(vcpu, efer); 1005 1006 /* Update reserved bits */ 1007 if ((efer ^ old_efer) & EFER_NX) 1008 kvm_mmu_reset_context(vcpu); 1009 1010 return 0; 1011 } 1012 1013 void kvm_enable_efer_bits(u64 mask) 1014 { 1015 efer_reserved_bits &= ~mask; 1016 } 1017 EXPORT_SYMBOL_GPL(kvm_enable_efer_bits); 1018 1019 /* 1020 * Writes msr value into into the appropriate "register". 1021 * Returns 0 on success, non-0 otherwise. 1022 * Assumes vcpu_load() was already called. 1023 */ 1024 int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) 1025 { 1026 switch (msr->index) { 1027 case MSR_FS_BASE: 1028 case MSR_GS_BASE: 1029 case MSR_KERNEL_GS_BASE: 1030 case MSR_CSTAR: 1031 case MSR_LSTAR: 1032 if (is_noncanonical_address(msr->data)) 1033 return 1; 1034 break; 1035 case MSR_IA32_SYSENTER_EIP: 1036 case MSR_IA32_SYSENTER_ESP: 1037 /* 1038 * IA32_SYSENTER_ESP and IA32_SYSENTER_EIP cause #GP if 1039 * non-canonical address is written on Intel but not on 1040 * AMD (which ignores the top 32-bits, because it does 1041 * not implement 64-bit SYSENTER). 1042 * 1043 * 64-bit code should hence be able to write a non-canonical 1044 * value on AMD. Making the address canonical ensures that 1045 * vmentry does not fail on Intel after writing a non-canonical 1046 * value, and that something deterministic happens if the guest 1047 * invokes 64-bit SYSENTER. 1048 */ 1049 msr->data = get_canonical(msr->data); 1050 } 1051 return kvm_x86_ops->set_msr(vcpu, msr); 1052 } 1053 EXPORT_SYMBOL_GPL(kvm_set_msr); 1054 1055 /* 1056 * Adapt set_msr() to msr_io()'s calling convention 1057 */ 1058 static int do_get_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data) 1059 { 1060 struct msr_data msr; 1061 int r; 1062 1063 msr.index = index; 1064 msr.host_initiated = true; 1065 r = kvm_get_msr(vcpu, &msr); 1066 if (r) 1067 return r; 1068 1069 *data = msr.data; 1070 return 0; 1071 } 1072 1073 static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data) 1074 { 1075 struct msr_data msr; 1076 1077 msr.data = *data; 1078 msr.index = index; 1079 msr.host_initiated = true; 1080 return kvm_set_msr(vcpu, &msr); 1081 } 1082 1083 #ifdef CONFIG_X86_64 1084 struct pvclock_gtod_data { 1085 seqcount_t seq; 1086 1087 struct { /* extract of a clocksource struct */ 1088 int vclock_mode; 1089 cycle_t cycle_last; 1090 cycle_t mask; 1091 u32 mult; 1092 u32 shift; 1093 } clock; 1094 1095 u64 boot_ns; 1096 u64 nsec_base; 1097 }; 1098 1099 static struct pvclock_gtod_data pvclock_gtod_data; 1100 1101 static void update_pvclock_gtod(struct timekeeper *tk) 1102 { 1103 struct pvclock_gtod_data *vdata = &pvclock_gtod_data; 1104 u64 boot_ns; 1105 1106 boot_ns = ktime_to_ns(ktime_add(tk->tkr_mono.base, tk->offs_boot)); 1107 1108 write_seqcount_begin(&vdata->seq); 1109 1110 /* copy pvclock gtod data */ 1111 vdata->clock.vclock_mode = tk->tkr_mono.clock->archdata.vclock_mode; 1112 vdata->clock.cycle_last = tk->tkr_mono.cycle_last; 1113 vdata->clock.mask = tk->tkr_mono.mask; 1114 vdata->clock.mult = tk->tkr_mono.mult; 1115 vdata->clock.shift = tk->tkr_mono.shift; 1116 1117 vdata->boot_ns = boot_ns; 1118 vdata->nsec_base = tk->tkr_mono.xtime_nsec; 1119 1120 write_seqcount_end(&vdata->seq); 1121 } 1122 #endif 1123 1124 void kvm_set_pending_timer(struct kvm_vcpu *vcpu) 1125 { 1126 /* 1127 * Note: KVM_REQ_PENDING_TIMER is implicitly checked in 1128 * vcpu_enter_guest. This function is only called from 1129 * the physical CPU that is running vcpu. 1130 */ 1131 kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu); 1132 } 1133 1134 static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock) 1135 { 1136 int version; 1137 int r; 1138 struct pvclock_wall_clock wc; 1139 struct timespec boot; 1140 1141 if (!wall_clock) 1142 return; 1143 1144 r = kvm_read_guest(kvm, wall_clock, &version, sizeof(version)); 1145 if (r) 1146 return; 1147 1148 if (version & 1) 1149 ++version; /* first time write, random junk */ 1150 1151 ++version; 1152 1153 kvm_write_guest(kvm, wall_clock, &version, sizeof(version)); 1154 1155 /* 1156 * The guest calculates current wall clock time by adding 1157 * system time (updated by kvm_guest_time_update below) to the 1158 * wall clock specified here. guest system time equals host 1159 * system time for us, thus we must fill in host boot time here. 1160 */ 1161 getboottime(&boot); 1162 1163 if (kvm->arch.kvmclock_offset) { 1164 struct timespec ts = ns_to_timespec(kvm->arch.kvmclock_offset); 1165 boot = timespec_sub(boot, ts); 1166 } 1167 wc.sec = boot.tv_sec; 1168 wc.nsec = boot.tv_nsec; 1169 wc.version = version; 1170 1171 kvm_write_guest(kvm, wall_clock, &wc, sizeof(wc)); 1172 1173 version++; 1174 kvm_write_guest(kvm, wall_clock, &version, sizeof(version)); 1175 } 1176 1177 static uint32_t div_frac(uint32_t dividend, uint32_t divisor) 1178 { 1179 uint32_t quotient, remainder; 1180 1181 /* Don't try to replace with do_div(), this one calculates 1182 * "(dividend << 32) / divisor" */ 1183 __asm__ ( "divl %4" 1184 : "=a" (quotient), "=d" (remainder) 1185 : "0" (0), "1" (dividend), "r" (divisor) ); 1186 return quotient; 1187 } 1188 1189 static void kvm_get_time_scale(uint32_t scaled_khz, uint32_t base_khz, 1190 s8 *pshift, u32 *pmultiplier) 1191 { 1192 uint64_t scaled64; 1193 int32_t shift = 0; 1194 uint64_t tps64; 1195 uint32_t tps32; 1196 1197 tps64 = base_khz * 1000LL; 1198 scaled64 = scaled_khz * 1000LL; 1199 while (tps64 > scaled64*2 || tps64 & 0xffffffff00000000ULL) { 1200 tps64 >>= 1; 1201 shift--; 1202 } 1203 1204 tps32 = (uint32_t)tps64; 1205 while (tps32 <= scaled64 || scaled64 & 0xffffffff00000000ULL) { 1206 if (scaled64 & 0xffffffff00000000ULL || tps32 & 0x80000000) 1207 scaled64 >>= 1; 1208 else 1209 tps32 <<= 1; 1210 shift++; 1211 } 1212 1213 *pshift = shift; 1214 *pmultiplier = div_frac(scaled64, tps32); 1215 1216 pr_debug("%s: base_khz %u => %u, shift %d, mul %u\n", 1217 __func__, base_khz, scaled_khz, shift, *pmultiplier); 1218 } 1219 1220 static inline u64 get_kernel_ns(void) 1221 { 1222 return ktime_get_boot_ns(); 1223 } 1224 1225 #ifdef CONFIG_X86_64 1226 static atomic_t kvm_guest_has_master_clock = ATOMIC_INIT(0); 1227 #endif 1228 1229 static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz); 1230 static unsigned long max_tsc_khz; 1231 1232 static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec) 1233 { 1234 return pvclock_scale_delta(nsec, vcpu->arch.virtual_tsc_mult, 1235 vcpu->arch.virtual_tsc_shift); 1236 } 1237 1238 static u32 adjust_tsc_khz(u32 khz, s32 ppm) 1239 { 1240 u64 v = (u64)khz * (1000000 + ppm); 1241 do_div(v, 1000000); 1242 return v; 1243 } 1244 1245 static void kvm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 this_tsc_khz) 1246 { 1247 u32 thresh_lo, thresh_hi; 1248 int use_scaling = 0; 1249 1250 /* tsc_khz can be zero if TSC calibration fails */ 1251 if (this_tsc_khz == 0) 1252 return; 1253 1254 /* Compute a scale to convert nanoseconds in TSC cycles */ 1255 kvm_get_time_scale(this_tsc_khz, NSEC_PER_SEC / 1000, 1256 &vcpu->arch.virtual_tsc_shift, 1257 &vcpu->arch.virtual_tsc_mult); 1258 vcpu->arch.virtual_tsc_khz = this_tsc_khz; 1259 1260 /* 1261 * Compute the variation in TSC rate which is acceptable 1262 * within the range of tolerance and decide if the 1263 * rate being applied is within that bounds of the hardware 1264 * rate. If so, no scaling or compensation need be done. 1265 */ 1266 thresh_lo = adjust_tsc_khz(tsc_khz, -tsc_tolerance_ppm); 1267 thresh_hi = adjust_tsc_khz(tsc_khz, tsc_tolerance_ppm); 1268 if (this_tsc_khz < thresh_lo || this_tsc_khz > thresh_hi) { 1269 pr_debug("kvm: requested TSC rate %u falls outside tolerance [%u,%u]\n", this_tsc_khz, thresh_lo, thresh_hi); 1270 use_scaling = 1; 1271 } 1272 kvm_x86_ops->set_tsc_khz(vcpu, this_tsc_khz, use_scaling); 1273 } 1274 1275 static u64 compute_guest_tsc(struct kvm_vcpu *vcpu, s64 kernel_ns) 1276 { 1277 u64 tsc = pvclock_scale_delta(kernel_ns-vcpu->arch.this_tsc_nsec, 1278 vcpu->arch.virtual_tsc_mult, 1279 vcpu->arch.virtual_tsc_shift); 1280 tsc += vcpu->arch.this_tsc_write; 1281 return tsc; 1282 } 1283 1284 static void kvm_track_tsc_matching(struct kvm_vcpu *vcpu) 1285 { 1286 #ifdef CONFIG_X86_64 1287 bool vcpus_matched; 1288 struct kvm_arch *ka = &vcpu->kvm->arch; 1289 struct pvclock_gtod_data *gtod = &pvclock_gtod_data; 1290 1291 vcpus_matched = (ka->nr_vcpus_matched_tsc + 1 == 1292 atomic_read(&vcpu->kvm->online_vcpus)); 1293 1294 /* 1295 * Once the masterclock is enabled, always perform request in 1296 * order to update it. 1297 * 1298 * In order to enable masterclock, the host clocksource must be TSC 1299 * and the vcpus need to have matched TSCs. When that happens, 1300 * perform request to enable masterclock. 1301 */ 1302 if (ka->use_master_clock || 1303 (gtod->clock.vclock_mode == VCLOCK_TSC && vcpus_matched)) 1304 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu); 1305 1306 trace_kvm_track_tsc(vcpu->vcpu_id, ka->nr_vcpus_matched_tsc, 1307 atomic_read(&vcpu->kvm->online_vcpus), 1308 ka->use_master_clock, gtod->clock.vclock_mode); 1309 #endif 1310 } 1311 1312 static void update_ia32_tsc_adjust_msr(struct kvm_vcpu *vcpu, s64 offset) 1313 { 1314 u64 curr_offset = kvm_x86_ops->read_tsc_offset(vcpu); 1315 vcpu->arch.ia32_tsc_adjust_msr += offset - curr_offset; 1316 } 1317 1318 void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr) 1319 { 1320 struct kvm *kvm = vcpu->kvm; 1321 u64 offset, ns, elapsed; 1322 unsigned long flags; 1323 s64 usdiff; 1324 bool matched; 1325 bool already_matched; 1326 u64 data = msr->data; 1327 1328 raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags); 1329 offset = kvm_x86_ops->compute_tsc_offset(vcpu, data); 1330 ns = get_kernel_ns(); 1331 elapsed = ns - kvm->arch.last_tsc_nsec; 1332 1333 if (vcpu->arch.virtual_tsc_khz) { 1334 int faulted = 0; 1335 1336 /* n.b - signed multiplication and division required */ 1337 usdiff = data - kvm->arch.last_tsc_write; 1338 #ifdef CONFIG_X86_64 1339 usdiff = (usdiff * 1000) / vcpu->arch.virtual_tsc_khz; 1340 #else 1341 /* do_div() only does unsigned */ 1342 asm("1: idivl %[divisor]\n" 1343 "2: xor %%edx, %%edx\n" 1344 " movl $0, %[faulted]\n" 1345 "3:\n" 1346 ".section .fixup,\"ax\"\n" 1347 "4: movl $1, %[faulted]\n" 1348 " jmp 3b\n" 1349 ".previous\n" 1350 1351 _ASM_EXTABLE(1b, 4b) 1352 1353 : "=A"(usdiff), [faulted] "=r" (faulted) 1354 : "A"(usdiff * 1000), [divisor] "rm"(vcpu->arch.virtual_tsc_khz)); 1355 1356 #endif 1357 do_div(elapsed, 1000); 1358 usdiff -= elapsed; 1359 if (usdiff < 0) 1360 usdiff = -usdiff; 1361 1362 /* idivl overflow => difference is larger than USEC_PER_SEC */ 1363 if (faulted) 1364 usdiff = USEC_PER_SEC; 1365 } else 1366 usdiff = USEC_PER_SEC; /* disable TSC match window below */ 1367 1368 /* 1369 * Special case: TSC write with a small delta (1 second) of virtual 1370 * cycle time against real time is interpreted as an attempt to 1371 * synchronize the CPU. 1372 * 1373 * For a reliable TSC, we can match TSC offsets, and for an unstable 1374 * TSC, we add elapsed time in this computation. We could let the 1375 * compensation code attempt to catch up if we fall behind, but 1376 * it's better to try to match offsets from the beginning. 1377 */ 1378 if (usdiff < USEC_PER_SEC && 1379 vcpu->arch.virtual_tsc_khz == kvm->arch.last_tsc_khz) { 1380 if (!check_tsc_unstable()) { 1381 offset = kvm->arch.cur_tsc_offset; 1382 pr_debug("kvm: matched tsc offset for %llu\n", data); 1383 } else { 1384 u64 delta = nsec_to_cycles(vcpu, elapsed); 1385 data += delta; 1386 offset = kvm_x86_ops->compute_tsc_offset(vcpu, data); 1387 pr_debug("kvm: adjusted tsc offset by %llu\n", delta); 1388 } 1389 matched = true; 1390 already_matched = (vcpu->arch.this_tsc_generation == kvm->arch.cur_tsc_generation); 1391 } else { 1392 /* 1393 * We split periods of matched TSC writes into generations. 1394 * For each generation, we track the original measured 1395 * nanosecond time, offset, and write, so if TSCs are in 1396 * sync, we can match exact offset, and if not, we can match 1397 * exact software computation in compute_guest_tsc() 1398 * 1399 * These values are tracked in kvm->arch.cur_xxx variables. 1400 */ 1401 kvm->arch.cur_tsc_generation++; 1402 kvm->arch.cur_tsc_nsec = ns; 1403 kvm->arch.cur_tsc_write = data; 1404 kvm->arch.cur_tsc_offset = offset; 1405 matched = false; 1406 pr_debug("kvm: new tsc generation %llu, clock %llu\n", 1407 kvm->arch.cur_tsc_generation, data); 1408 } 1409 1410 /* 1411 * We also track th most recent recorded KHZ, write and time to 1412 * allow the matching interval to be extended at each write. 1413 */ 1414 kvm->arch.last_tsc_nsec = ns; 1415 kvm->arch.last_tsc_write = data; 1416 kvm->arch.last_tsc_khz = vcpu->arch.virtual_tsc_khz; 1417 1418 vcpu->arch.last_guest_tsc = data; 1419 1420 /* Keep track of which generation this VCPU has synchronized to */ 1421 vcpu->arch.this_tsc_generation = kvm->arch.cur_tsc_generation; 1422 vcpu->arch.this_tsc_nsec = kvm->arch.cur_tsc_nsec; 1423 vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write; 1424 1425 if (guest_cpuid_has_tsc_adjust(vcpu) && !msr->host_initiated) 1426 update_ia32_tsc_adjust_msr(vcpu, offset); 1427 kvm_x86_ops->write_tsc_offset(vcpu, offset); 1428 raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags); 1429 1430 spin_lock(&kvm->arch.pvclock_gtod_sync_lock); 1431 if (!matched) { 1432 kvm->arch.nr_vcpus_matched_tsc = 0; 1433 } else if (!already_matched) { 1434 kvm->arch.nr_vcpus_matched_tsc++; 1435 } 1436 1437 kvm_track_tsc_matching(vcpu); 1438 spin_unlock(&kvm->arch.pvclock_gtod_sync_lock); 1439 } 1440 1441 EXPORT_SYMBOL_GPL(kvm_write_tsc); 1442 1443 #ifdef CONFIG_X86_64 1444 1445 static cycle_t read_tsc(void) 1446 { 1447 cycle_t ret; 1448 u64 last; 1449 1450 /* 1451 * Empirically, a fence (of type that depends on the CPU) 1452 * before rdtsc is enough to ensure that rdtsc is ordered 1453 * with respect to loads. The various CPU manuals are unclear 1454 * as to whether rdtsc can be reordered with later loads, 1455 * but no one has ever seen it happen. 1456 */ 1457 rdtsc_barrier(); 1458 ret = (cycle_t)vget_cycles(); 1459 1460 last = pvclock_gtod_data.clock.cycle_last; 1461 1462 if (likely(ret >= last)) 1463 return ret; 1464 1465 /* 1466 * GCC likes to generate cmov here, but this branch is extremely 1467 * predictable (it's just a funciton of time and the likely is 1468 * very likely) and there's a data dependence, so force GCC 1469 * to generate a branch instead. I don't barrier() because 1470 * we don't actually need a barrier, and if this function 1471 * ever gets inlined it will generate worse code. 1472 */ 1473 asm volatile (""); 1474 return last; 1475 } 1476 1477 static inline u64 vgettsc(cycle_t *cycle_now) 1478 { 1479 long v; 1480 struct pvclock_gtod_data *gtod = &pvclock_gtod_data; 1481 1482 *cycle_now = read_tsc(); 1483 1484 v = (*cycle_now - gtod->clock.cycle_last) & gtod->clock.mask; 1485 return v * gtod->clock.mult; 1486 } 1487 1488 static int do_monotonic_boot(s64 *t, cycle_t *cycle_now) 1489 { 1490 struct pvclock_gtod_data *gtod = &pvclock_gtod_data; 1491 unsigned long seq; 1492 int mode; 1493 u64 ns; 1494 1495 do { 1496 seq = read_seqcount_begin(>od->seq); 1497 mode = gtod->clock.vclock_mode; 1498 ns = gtod->nsec_base; 1499 ns += vgettsc(cycle_now); 1500 ns >>= gtod->clock.shift; 1501 ns += gtod->boot_ns; 1502 } while (unlikely(read_seqcount_retry(>od->seq, seq))); 1503 *t = ns; 1504 1505 return mode; 1506 } 1507 1508 /* returns true if host is using tsc clocksource */ 1509 static bool kvm_get_time_and_clockread(s64 *kernel_ns, cycle_t *cycle_now) 1510 { 1511 /* checked again under seqlock below */ 1512 if (pvclock_gtod_data.clock.vclock_mode != VCLOCK_TSC) 1513 return false; 1514 1515 return do_monotonic_boot(kernel_ns, cycle_now) == VCLOCK_TSC; 1516 } 1517 #endif 1518 1519 /* 1520 * 1521 * Assuming a stable TSC across physical CPUS, and a stable TSC 1522 * across virtual CPUs, the following condition is possible. 1523 * Each numbered line represents an event visible to both 1524 * CPUs at the next numbered event. 1525 * 1526 * "timespecX" represents host monotonic time. "tscX" represents 1527 * RDTSC value. 1528 * 1529 * VCPU0 on CPU0 | VCPU1 on CPU1 1530 * 1531 * 1. read timespec0,tsc0 1532 * 2. | timespec1 = timespec0 + N 1533 * | tsc1 = tsc0 + M 1534 * 3. transition to guest | transition to guest 1535 * 4. ret0 = timespec0 + (rdtsc - tsc0) | 1536 * 5. | ret1 = timespec1 + (rdtsc - tsc1) 1537 * | ret1 = timespec0 + N + (rdtsc - (tsc0 + M)) 1538 * 1539 * Since ret0 update is visible to VCPU1 at time 5, to obey monotonicity: 1540 * 1541 * - ret0 < ret1 1542 * - timespec0 + (rdtsc - tsc0) < timespec0 + N + (rdtsc - (tsc0 + M)) 1543 * ... 1544 * - 0 < N - M => M < N 1545 * 1546 * That is, when timespec0 != timespec1, M < N. Unfortunately that is not 1547 * always the case (the difference between two distinct xtime instances 1548 * might be smaller then the difference between corresponding TSC reads, 1549 * when updating guest vcpus pvclock areas). 1550 * 1551 * To avoid that problem, do not allow visibility of distinct 1552 * system_timestamp/tsc_timestamp values simultaneously: use a master 1553 * copy of host monotonic time values. Update that master copy 1554 * in lockstep. 1555 * 1556 * Rely on synchronization of host TSCs and guest TSCs for monotonicity. 1557 * 1558 */ 1559 1560 static void pvclock_update_vm_gtod_copy(struct kvm *kvm) 1561 { 1562 #ifdef CONFIG_X86_64 1563 struct kvm_arch *ka = &kvm->arch; 1564 int vclock_mode; 1565 bool host_tsc_clocksource, vcpus_matched; 1566 1567 vcpus_matched = (ka->nr_vcpus_matched_tsc + 1 == 1568 atomic_read(&kvm->online_vcpus)); 1569 1570 /* 1571 * If the host uses TSC clock, then passthrough TSC as stable 1572 * to the guest. 1573 */ 1574 host_tsc_clocksource = kvm_get_time_and_clockread( 1575 &ka->master_kernel_ns, 1576 &ka->master_cycle_now); 1577 1578 ka->use_master_clock = host_tsc_clocksource && vcpus_matched 1579 && !backwards_tsc_observed 1580 && !ka->boot_vcpu_runs_old_kvmclock; 1581 1582 if (ka->use_master_clock) 1583 atomic_set(&kvm_guest_has_master_clock, 1); 1584 1585 vclock_mode = pvclock_gtod_data.clock.vclock_mode; 1586 trace_kvm_update_master_clock(ka->use_master_clock, vclock_mode, 1587 vcpus_matched); 1588 #endif 1589 } 1590 1591 static void kvm_gen_update_masterclock(struct kvm *kvm) 1592 { 1593 #ifdef CONFIG_X86_64 1594 int i; 1595 struct kvm_vcpu *vcpu; 1596 struct kvm_arch *ka = &kvm->arch; 1597 1598 spin_lock(&ka->pvclock_gtod_sync_lock); 1599 kvm_make_mclock_inprogress_request(kvm); 1600 /* no guest entries from this point */ 1601 pvclock_update_vm_gtod_copy(kvm); 1602 1603 kvm_for_each_vcpu(i, vcpu, kvm) 1604 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); 1605 1606 /* guest entries allowed */ 1607 kvm_for_each_vcpu(i, vcpu, kvm) 1608 clear_bit(KVM_REQ_MCLOCK_INPROGRESS, &vcpu->requests); 1609 1610 spin_unlock(&ka->pvclock_gtod_sync_lock); 1611 #endif 1612 } 1613 1614 static int kvm_guest_time_update(struct kvm_vcpu *v) 1615 { 1616 unsigned long flags, this_tsc_khz; 1617 struct kvm_vcpu_arch *vcpu = &v->arch; 1618 struct kvm_arch *ka = &v->kvm->arch; 1619 s64 kernel_ns; 1620 u64 tsc_timestamp, host_tsc; 1621 struct pvclock_vcpu_time_info guest_hv_clock; 1622 u8 pvclock_flags; 1623 bool use_master_clock; 1624 1625 kernel_ns = 0; 1626 host_tsc = 0; 1627 1628 /* 1629 * If the host uses TSC clock, then passthrough TSC as stable 1630 * to the guest. 1631 */ 1632 spin_lock(&ka->pvclock_gtod_sync_lock); 1633 use_master_clock = ka->use_master_clock; 1634 if (use_master_clock) { 1635 host_tsc = ka->master_cycle_now; 1636 kernel_ns = ka->master_kernel_ns; 1637 } 1638 spin_unlock(&ka->pvclock_gtod_sync_lock); 1639 1640 /* Keep irq disabled to prevent changes to the clock */ 1641 local_irq_save(flags); 1642 this_tsc_khz = __this_cpu_read(cpu_tsc_khz); 1643 if (unlikely(this_tsc_khz == 0)) { 1644 local_irq_restore(flags); 1645 kvm_make_request(KVM_REQ_CLOCK_UPDATE, v); 1646 return 1; 1647 } 1648 if (!use_master_clock) { 1649 host_tsc = native_read_tsc(); 1650 kernel_ns = get_kernel_ns(); 1651 } 1652 1653 tsc_timestamp = kvm_x86_ops->read_l1_tsc(v, host_tsc); 1654 1655 /* 1656 * We may have to catch up the TSC to match elapsed wall clock 1657 * time for two reasons, even if kvmclock is used. 1658 * 1) CPU could have been running below the maximum TSC rate 1659 * 2) Broken TSC compensation resets the base at each VCPU 1660 * entry to avoid unknown leaps of TSC even when running 1661 * again on the same CPU. This may cause apparent elapsed 1662 * time to disappear, and the guest to stand still or run 1663 * very slowly. 1664 */ 1665 if (vcpu->tsc_catchup) { 1666 u64 tsc = compute_guest_tsc(v, kernel_ns); 1667 if (tsc > tsc_timestamp) { 1668 adjust_tsc_offset_guest(v, tsc - tsc_timestamp); 1669 tsc_timestamp = tsc; 1670 } 1671 } 1672 1673 local_irq_restore(flags); 1674 1675 if (!vcpu->pv_time_enabled) 1676 return 0; 1677 1678 if (unlikely(vcpu->hw_tsc_khz != this_tsc_khz)) { 1679 kvm_get_time_scale(NSEC_PER_SEC / 1000, this_tsc_khz, 1680 &vcpu->hv_clock.tsc_shift, 1681 &vcpu->hv_clock.tsc_to_system_mul); 1682 vcpu->hw_tsc_khz = this_tsc_khz; 1683 } 1684 1685 /* With all the info we got, fill in the values */ 1686 vcpu->hv_clock.tsc_timestamp = tsc_timestamp; 1687 vcpu->hv_clock.system_time = kernel_ns + v->kvm->arch.kvmclock_offset; 1688 vcpu->last_guest_tsc = tsc_timestamp; 1689 1690 if (unlikely(kvm_read_guest_cached(v->kvm, &vcpu->pv_time, 1691 &guest_hv_clock, sizeof(guest_hv_clock)))) 1692 return 0; 1693 1694 /* This VCPU is paused, but it's legal for a guest to read another 1695 * VCPU's kvmclock, so we really have to follow the specification where 1696 * it says that version is odd if data is being modified, and even after 1697 * it is consistent. 1698 * 1699 * Version field updates must be kept separate. This is because 1700 * kvm_write_guest_cached might use a "rep movs" instruction, and 1701 * writes within a string instruction are weakly ordered. So there 1702 * are three writes overall. 1703 * 1704 * As a small optimization, only write the version field in the first 1705 * and third write. The vcpu->pv_time cache is still valid, because the 1706 * version field is the first in the struct. 1707 */ 1708 BUILD_BUG_ON(offsetof(struct pvclock_vcpu_time_info, version) != 0); 1709 1710 vcpu->hv_clock.version = guest_hv_clock.version + 1; 1711 kvm_write_guest_cached(v->kvm, &vcpu->pv_time, 1712 &vcpu->hv_clock, 1713 sizeof(vcpu->hv_clock.version)); 1714 1715 smp_wmb(); 1716 1717 /* retain PVCLOCK_GUEST_STOPPED if set in guest copy */ 1718 pvclock_flags = (guest_hv_clock.flags & PVCLOCK_GUEST_STOPPED); 1719 1720 if (vcpu->pvclock_set_guest_stopped_request) { 1721 pvclock_flags |= PVCLOCK_GUEST_STOPPED; 1722 vcpu->pvclock_set_guest_stopped_request = false; 1723 } 1724 1725 pvclock_flags |= PVCLOCK_COUNTS_FROM_ZERO; 1726 1727 /* If the host uses TSC clocksource, then it is stable */ 1728 if (use_master_clock) 1729 pvclock_flags |= PVCLOCK_TSC_STABLE_BIT; 1730 1731 vcpu->hv_clock.flags = pvclock_flags; 1732 1733 trace_kvm_pvclock_update(v->vcpu_id, &vcpu->hv_clock); 1734 1735 kvm_write_guest_cached(v->kvm, &vcpu->pv_time, 1736 &vcpu->hv_clock, 1737 sizeof(vcpu->hv_clock)); 1738 1739 smp_wmb(); 1740 1741 vcpu->hv_clock.version++; 1742 kvm_write_guest_cached(v->kvm, &vcpu->pv_time, 1743 &vcpu->hv_clock, 1744 sizeof(vcpu->hv_clock.version)); 1745 return 0; 1746 } 1747 1748 /* 1749 * kvmclock updates which are isolated to a given vcpu, such as 1750 * vcpu->cpu migration, should not allow system_timestamp from 1751 * the rest of the vcpus to remain static. Otherwise ntp frequency 1752 * correction applies to one vcpu's system_timestamp but not 1753 * the others. 1754 * 1755 * So in those cases, request a kvmclock update for all vcpus. 1756 * We need to rate-limit these requests though, as they can 1757 * considerably slow guests that have a large number of vcpus. 1758 * The time for a remote vcpu to update its kvmclock is bound 1759 * by the delay we use to rate-limit the updates. 1760 */ 1761 1762 #define KVMCLOCK_UPDATE_DELAY msecs_to_jiffies(100) 1763 1764 static void kvmclock_update_fn(struct work_struct *work) 1765 { 1766 int i; 1767 struct delayed_work *dwork = to_delayed_work(work); 1768 struct kvm_arch *ka = container_of(dwork, struct kvm_arch, 1769 kvmclock_update_work); 1770 struct kvm *kvm = container_of(ka, struct kvm, arch); 1771 struct kvm_vcpu *vcpu; 1772 1773 kvm_for_each_vcpu(i, vcpu, kvm) { 1774 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); 1775 kvm_vcpu_kick(vcpu); 1776 } 1777 } 1778 1779 static void kvm_gen_kvmclock_update(struct kvm_vcpu *v) 1780 { 1781 struct kvm *kvm = v->kvm; 1782 1783 kvm_make_request(KVM_REQ_CLOCK_UPDATE, v); 1784 schedule_delayed_work(&kvm->arch.kvmclock_update_work, 1785 KVMCLOCK_UPDATE_DELAY); 1786 } 1787 1788 #define KVMCLOCK_SYNC_PERIOD (300 * HZ) 1789 1790 static void kvmclock_sync_fn(struct work_struct *work) 1791 { 1792 struct delayed_work *dwork = to_delayed_work(work); 1793 struct kvm_arch *ka = container_of(dwork, struct kvm_arch, 1794 kvmclock_sync_work); 1795 struct kvm *kvm = container_of(ka, struct kvm, arch); 1796 1797 if (!kvmclock_periodic_sync) 1798 return; 1799 1800 schedule_delayed_work(&kvm->arch.kvmclock_update_work, 0); 1801 schedule_delayed_work(&kvm->arch.kvmclock_sync_work, 1802 KVMCLOCK_SYNC_PERIOD); 1803 } 1804 1805 static int set_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 data) 1806 { 1807 u64 mcg_cap = vcpu->arch.mcg_cap; 1808 unsigned bank_num = mcg_cap & 0xff; 1809 1810 switch (msr) { 1811 case MSR_IA32_MCG_STATUS: 1812 vcpu->arch.mcg_status = data; 1813 break; 1814 case MSR_IA32_MCG_CTL: 1815 if (!(mcg_cap & MCG_CTL_P)) 1816 return 1; 1817 if (data != 0 && data != ~(u64)0) 1818 return -1; 1819 vcpu->arch.mcg_ctl = data; 1820 break; 1821 default: 1822 if (msr >= MSR_IA32_MC0_CTL && 1823 msr < MSR_IA32_MCx_CTL(bank_num)) { 1824 u32 offset = msr - MSR_IA32_MC0_CTL; 1825 /* only 0 or all 1s can be written to IA32_MCi_CTL 1826 * some Linux kernels though clear bit 10 in bank 4 to 1827 * workaround a BIOS/GART TBL issue on AMD K8s, ignore 1828 * this to avoid an uncatched #GP in the guest 1829 */ 1830 if ((offset & 0x3) == 0 && 1831 data != 0 && (data | (1 << 10)) != ~(u64)0) 1832 return -1; 1833 vcpu->arch.mce_banks[offset] = data; 1834 break; 1835 } 1836 return 1; 1837 } 1838 return 0; 1839 } 1840 1841 static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data) 1842 { 1843 struct kvm *kvm = vcpu->kvm; 1844 int lm = is_long_mode(vcpu); 1845 u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64 1846 : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32; 1847 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64 1848 : kvm->arch.xen_hvm_config.blob_size_32; 1849 u32 page_num = data & ~PAGE_MASK; 1850 u64 page_addr = data & PAGE_MASK; 1851 u8 *page; 1852 int r; 1853 1854 r = -E2BIG; 1855 if (page_num >= blob_size) 1856 goto out; 1857 r = -ENOMEM; 1858 page = memdup_user(blob_addr + (page_num * PAGE_SIZE), PAGE_SIZE); 1859 if (IS_ERR(page)) { 1860 r = PTR_ERR(page); 1861 goto out; 1862 } 1863 if (kvm_vcpu_write_guest(vcpu, page_addr, page, PAGE_SIZE)) 1864 goto out_free; 1865 r = 0; 1866 out_free: 1867 kfree(page); 1868 out: 1869 return r; 1870 } 1871 1872 static bool kvm_hv_hypercall_enabled(struct kvm *kvm) 1873 { 1874 return kvm->arch.hv_hypercall & HV_X64_MSR_HYPERCALL_ENABLE; 1875 } 1876 1877 static bool kvm_hv_msr_partition_wide(u32 msr) 1878 { 1879 bool r = false; 1880 switch (msr) { 1881 case HV_X64_MSR_GUEST_OS_ID: 1882 case HV_X64_MSR_HYPERCALL: 1883 case HV_X64_MSR_REFERENCE_TSC: 1884 case HV_X64_MSR_TIME_REF_COUNT: 1885 r = true; 1886 break; 1887 } 1888 1889 return r; 1890 } 1891 1892 static int set_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data) 1893 { 1894 struct kvm *kvm = vcpu->kvm; 1895 1896 switch (msr) { 1897 case HV_X64_MSR_GUEST_OS_ID: 1898 kvm->arch.hv_guest_os_id = data; 1899 /* setting guest os id to zero disables hypercall page */ 1900 if (!kvm->arch.hv_guest_os_id) 1901 kvm->arch.hv_hypercall &= ~HV_X64_MSR_HYPERCALL_ENABLE; 1902 break; 1903 case HV_X64_MSR_HYPERCALL: { 1904 u64 gfn; 1905 unsigned long addr; 1906 u8 instructions[4]; 1907 1908 /* if guest os id is not set hypercall should remain disabled */ 1909 if (!kvm->arch.hv_guest_os_id) 1910 break; 1911 if (!(data & HV_X64_MSR_HYPERCALL_ENABLE)) { 1912 kvm->arch.hv_hypercall = data; 1913 break; 1914 } 1915 gfn = data >> HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT; 1916 addr = gfn_to_hva(kvm, gfn); 1917 if (kvm_is_error_hva(addr)) 1918 return 1; 1919 kvm_x86_ops->patch_hypercall(vcpu, instructions); 1920 ((unsigned char *)instructions)[3] = 0xc3; /* ret */ 1921 if (__copy_to_user((void __user *)addr, instructions, 4)) 1922 return 1; 1923 kvm->arch.hv_hypercall = data; 1924 mark_page_dirty(kvm, gfn); 1925 break; 1926 } 1927 case HV_X64_MSR_REFERENCE_TSC: { 1928 u64 gfn; 1929 HV_REFERENCE_TSC_PAGE tsc_ref; 1930 memset(&tsc_ref, 0, sizeof(tsc_ref)); 1931 kvm->arch.hv_tsc_page = data; 1932 if (!(data & HV_X64_MSR_TSC_REFERENCE_ENABLE)) 1933 break; 1934 gfn = data >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT; 1935 if (kvm_write_guest(kvm, gfn << HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT, 1936 &tsc_ref, sizeof(tsc_ref))) 1937 return 1; 1938 mark_page_dirty(kvm, gfn); 1939 break; 1940 } 1941 default: 1942 vcpu_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x " 1943 "data 0x%llx\n", msr, data); 1944 return 1; 1945 } 1946 return 0; 1947 } 1948 1949 static int set_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 data) 1950 { 1951 switch (msr) { 1952 case HV_X64_MSR_APIC_ASSIST_PAGE: { 1953 u64 gfn; 1954 unsigned long addr; 1955 1956 if (!(data & HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE)) { 1957 vcpu->arch.hv_vapic = data; 1958 if (kvm_lapic_enable_pv_eoi(vcpu, 0)) 1959 return 1; 1960 break; 1961 } 1962 gfn = data >> HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT; 1963 addr = kvm_vcpu_gfn_to_hva(vcpu, gfn); 1964 if (kvm_is_error_hva(addr)) 1965 return 1; 1966 if (__clear_user((void __user *)addr, PAGE_SIZE)) 1967 return 1; 1968 vcpu->arch.hv_vapic = data; 1969 kvm_vcpu_mark_page_dirty(vcpu, gfn); 1970 if (kvm_lapic_enable_pv_eoi(vcpu, gfn_to_gpa(gfn) | KVM_MSR_ENABLED)) 1971 return 1; 1972 break; 1973 } 1974 case HV_X64_MSR_EOI: 1975 return kvm_hv_vapic_msr_write(vcpu, APIC_EOI, data); 1976 case HV_X64_MSR_ICR: 1977 return kvm_hv_vapic_msr_write(vcpu, APIC_ICR, data); 1978 case HV_X64_MSR_TPR: 1979 return kvm_hv_vapic_msr_write(vcpu, APIC_TASKPRI, data); 1980 default: 1981 vcpu_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x " 1982 "data 0x%llx\n", msr, data); 1983 return 1; 1984 } 1985 1986 return 0; 1987 } 1988 1989 static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data) 1990 { 1991 gpa_t gpa = data & ~0x3f; 1992 1993 /* Bits 2:5 are reserved, Should be zero */ 1994 if (data & 0x3c) 1995 return 1; 1996 1997 vcpu->arch.apf.msr_val = data; 1998 1999 if (!(data & KVM_ASYNC_PF_ENABLED)) { 2000 kvm_clear_async_pf_completion_queue(vcpu); 2001 kvm_async_pf_hash_reset(vcpu); 2002 return 0; 2003 } 2004 2005 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa, 2006 sizeof(u32))) 2007 return 1; 2008 2009 vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS); 2010 kvm_async_pf_wakeup_all(vcpu); 2011 return 0; 2012 } 2013 2014 static void kvmclock_reset(struct kvm_vcpu *vcpu) 2015 { 2016 vcpu->arch.pv_time_enabled = false; 2017 } 2018 2019 static void accumulate_steal_time(struct kvm_vcpu *vcpu) 2020 { 2021 u64 delta; 2022 2023 if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED)) 2024 return; 2025 2026 delta = current->sched_info.run_delay - vcpu->arch.st.last_steal; 2027 vcpu->arch.st.last_steal = current->sched_info.run_delay; 2028 vcpu->arch.st.accum_steal = delta; 2029 } 2030 2031 static void record_steal_time(struct kvm_vcpu *vcpu) 2032 { 2033 if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED)) 2034 return; 2035 2036 if (unlikely(kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.st.stime, 2037 &vcpu->arch.st.steal, sizeof(struct kvm_steal_time)))) 2038 return; 2039 2040 vcpu->arch.st.steal.steal += vcpu->arch.st.accum_steal; 2041 vcpu->arch.st.steal.version += 2; 2042 vcpu->arch.st.accum_steal = 0; 2043 2044 kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime, 2045 &vcpu->arch.st.steal, sizeof(struct kvm_steal_time)); 2046 } 2047 2048 int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) 2049 { 2050 bool pr = false; 2051 u32 msr = msr_info->index; 2052 u64 data = msr_info->data; 2053 2054 switch (msr) { 2055 case MSR_AMD64_NB_CFG: 2056 case MSR_IA32_UCODE_REV: 2057 case MSR_IA32_UCODE_WRITE: 2058 case MSR_VM_HSAVE_PA: 2059 case MSR_AMD64_PATCH_LOADER: 2060 case MSR_AMD64_BU_CFG2: 2061 break; 2062 2063 case MSR_EFER: 2064 return set_efer(vcpu, data); 2065 case MSR_K7_HWCR: 2066 data &= ~(u64)0x40; /* ignore flush filter disable */ 2067 data &= ~(u64)0x100; /* ignore ignne emulation enable */ 2068 data &= ~(u64)0x8; /* ignore TLB cache disable */ 2069 data &= ~(u64)0x40000; /* ignore Mc status write enable */ 2070 if (data != 0) { 2071 vcpu_unimpl(vcpu, "unimplemented HWCR wrmsr: 0x%llx\n", 2072 data); 2073 return 1; 2074 } 2075 break; 2076 case MSR_FAM10H_MMIO_CONF_BASE: 2077 if (data != 0) { 2078 vcpu_unimpl(vcpu, "unimplemented MMIO_CONF_BASE wrmsr: " 2079 "0x%llx\n", data); 2080 return 1; 2081 } 2082 break; 2083 case MSR_IA32_DEBUGCTLMSR: 2084 if (!data) { 2085 /* We support the non-activated case already */ 2086 break; 2087 } else if (data & ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_BTF)) { 2088 /* Values other than LBR and BTF are vendor-specific, 2089 thus reserved and should throw a #GP */ 2090 return 1; 2091 } 2092 vcpu_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTLMSR 0x%llx, nop\n", 2093 __func__, data); 2094 break; 2095 case 0x200 ... 0x2ff: 2096 return kvm_mtrr_set_msr(vcpu, msr, data); 2097 case MSR_IA32_APICBASE: 2098 return kvm_set_apic_base(vcpu, msr_info); 2099 case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff: 2100 return kvm_x2apic_msr_write(vcpu, msr, data); 2101 case MSR_IA32_TSCDEADLINE: 2102 kvm_set_lapic_tscdeadline_msr(vcpu, data); 2103 break; 2104 case MSR_IA32_TSC_ADJUST: 2105 if (guest_cpuid_has_tsc_adjust(vcpu)) { 2106 if (!msr_info->host_initiated) { 2107 s64 adj = data - vcpu->arch.ia32_tsc_adjust_msr; 2108 kvm_x86_ops->adjust_tsc_offset(vcpu, adj, true); 2109 } 2110 vcpu->arch.ia32_tsc_adjust_msr = data; 2111 } 2112 break; 2113 case MSR_IA32_MISC_ENABLE: 2114 vcpu->arch.ia32_misc_enable_msr = data; 2115 break; 2116 case MSR_IA32_SMBASE: 2117 if (!msr_info->host_initiated) 2118 return 1; 2119 vcpu->arch.smbase = data; 2120 break; 2121 case MSR_KVM_WALL_CLOCK_NEW: 2122 case MSR_KVM_WALL_CLOCK: 2123 vcpu->kvm->arch.wall_clock = data; 2124 kvm_write_wall_clock(vcpu->kvm, data); 2125 break; 2126 case MSR_KVM_SYSTEM_TIME_NEW: 2127 case MSR_KVM_SYSTEM_TIME: { 2128 u64 gpa_offset; 2129 struct kvm_arch *ka = &vcpu->kvm->arch; 2130 2131 kvmclock_reset(vcpu); 2132 2133 if (vcpu->vcpu_id == 0 && !msr_info->host_initiated) { 2134 bool tmp = (msr == MSR_KVM_SYSTEM_TIME); 2135 2136 if (ka->boot_vcpu_runs_old_kvmclock != tmp) 2137 set_bit(KVM_REQ_MASTERCLOCK_UPDATE, 2138 &vcpu->requests); 2139 2140 ka->boot_vcpu_runs_old_kvmclock = tmp; 2141 2142 ka->kvmclock_offset = -get_kernel_ns(); 2143 } 2144 2145 vcpu->arch.time = data; 2146 kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu); 2147 2148 /* we verify if the enable bit is set... */ 2149 if (!(data & 1)) 2150 break; 2151 2152 gpa_offset = data & ~(PAGE_MASK | 1); 2153 2154 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, 2155 &vcpu->arch.pv_time, data & ~1ULL, 2156 sizeof(struct pvclock_vcpu_time_info))) 2157 vcpu->arch.pv_time_enabled = false; 2158 else 2159 vcpu->arch.pv_time_enabled = true; 2160 2161 break; 2162 } 2163 case MSR_KVM_ASYNC_PF_EN: 2164 if (kvm_pv_enable_async_pf(vcpu, data)) 2165 return 1; 2166 break; 2167 case MSR_KVM_STEAL_TIME: 2168 2169 if (unlikely(!sched_info_on())) 2170 return 1; 2171 2172 if (data & KVM_STEAL_RESERVED_MASK) 2173 return 1; 2174 2175 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.st.stime, 2176 data & KVM_STEAL_VALID_BITS, 2177 sizeof(struct kvm_steal_time))) 2178 return 1; 2179 2180 vcpu->arch.st.msr_val = data; 2181 2182 if (!(data & KVM_MSR_ENABLED)) 2183 break; 2184 2185 vcpu->arch.st.last_steal = current->sched_info.run_delay; 2186 2187 preempt_disable(); 2188 accumulate_steal_time(vcpu); 2189 preempt_enable(); 2190 2191 kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu); 2192 2193 break; 2194 case MSR_KVM_PV_EOI_EN: 2195 if (kvm_lapic_enable_pv_eoi(vcpu, data)) 2196 return 1; 2197 break; 2198 2199 case MSR_IA32_MCG_CTL: 2200 case MSR_IA32_MCG_STATUS: 2201 case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1: 2202 return set_msr_mce(vcpu, msr, data); 2203 2204 case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3: 2205 case MSR_P6_PERFCTR0 ... MSR_P6_PERFCTR1: 2206 pr = true; /* fall through */ 2207 case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3: 2208 case MSR_P6_EVNTSEL0 ... MSR_P6_EVNTSEL1: 2209 if (kvm_pmu_is_valid_msr(vcpu, msr)) 2210 return kvm_pmu_set_msr(vcpu, msr_info); 2211 2212 if (pr || data != 0) 2213 vcpu_unimpl(vcpu, "disabled perfctr wrmsr: " 2214 "0x%x data 0x%llx\n", msr, data); 2215 break; 2216 case MSR_K7_CLK_CTL: 2217 /* 2218 * Ignore all writes to this no longer documented MSR. 2219 * Writes are only relevant for old K7 processors, 2220 * all pre-dating SVM, but a recommended workaround from 2221 * AMD for these chips. It is possible to specify the 2222 * affected processor models on the command line, hence 2223 * the need to ignore the workaround. 2224 */ 2225 break; 2226 case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15: 2227 if (kvm_hv_msr_partition_wide(msr)) { 2228 int r; 2229 mutex_lock(&vcpu->kvm->lock); 2230 r = set_msr_hyperv_pw(vcpu, msr, data); 2231 mutex_unlock(&vcpu->kvm->lock); 2232 return r; 2233 } else 2234 return set_msr_hyperv(vcpu, msr, data); 2235 break; 2236 case MSR_IA32_BBL_CR_CTL3: 2237 /* Drop writes to this legacy MSR -- see rdmsr 2238 * counterpart for further detail. 2239 */ 2240 vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n", msr, data); 2241 break; 2242 case MSR_AMD64_OSVW_ID_LENGTH: 2243 if (!guest_cpuid_has_osvw(vcpu)) 2244 return 1; 2245 vcpu->arch.osvw.length = data; 2246 break; 2247 case MSR_AMD64_OSVW_STATUS: 2248 if (!guest_cpuid_has_osvw(vcpu)) 2249 return 1; 2250 vcpu->arch.osvw.status = data; 2251 break; 2252 default: 2253 if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr)) 2254 return xen_hvm_config(vcpu, data); 2255 if (kvm_pmu_is_valid_msr(vcpu, msr)) 2256 return kvm_pmu_set_msr(vcpu, msr_info); 2257 if (!ignore_msrs) { 2258 vcpu_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n", 2259 msr, data); 2260 return 1; 2261 } else { 2262 vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n", 2263 msr, data); 2264 break; 2265 } 2266 } 2267 return 0; 2268 } 2269 EXPORT_SYMBOL_GPL(kvm_set_msr_common); 2270 2271 2272 /* 2273 * Reads an msr value (of 'msr_index') into 'pdata'. 2274 * Returns 0 on success, non-0 otherwise. 2275 * Assumes vcpu_load() was already called. 2276 */ 2277 int kvm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) 2278 { 2279 return kvm_x86_ops->get_msr(vcpu, msr); 2280 } 2281 EXPORT_SYMBOL_GPL(kvm_get_msr); 2282 2283 static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) 2284 { 2285 u64 data; 2286 u64 mcg_cap = vcpu->arch.mcg_cap; 2287 unsigned bank_num = mcg_cap & 0xff; 2288 2289 switch (msr) { 2290 case MSR_IA32_P5_MC_ADDR: 2291 case MSR_IA32_P5_MC_TYPE: 2292 data = 0; 2293 break; 2294 case MSR_IA32_MCG_CAP: 2295 data = vcpu->arch.mcg_cap; 2296 break; 2297 case MSR_IA32_MCG_CTL: 2298 if (!(mcg_cap & MCG_CTL_P)) 2299 return 1; 2300 data = vcpu->arch.mcg_ctl; 2301 break; 2302 case MSR_IA32_MCG_STATUS: 2303 data = vcpu->arch.mcg_status; 2304 break; 2305 default: 2306 if (msr >= MSR_IA32_MC0_CTL && 2307 msr < MSR_IA32_MCx_CTL(bank_num)) { 2308 u32 offset = msr - MSR_IA32_MC0_CTL; 2309 data = vcpu->arch.mce_banks[offset]; 2310 break; 2311 } 2312 return 1; 2313 } 2314 *pdata = data; 2315 return 0; 2316 } 2317 2318 static int get_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) 2319 { 2320 u64 data = 0; 2321 struct kvm *kvm = vcpu->kvm; 2322 2323 switch (msr) { 2324 case HV_X64_MSR_GUEST_OS_ID: 2325 data = kvm->arch.hv_guest_os_id; 2326 break; 2327 case HV_X64_MSR_HYPERCALL: 2328 data = kvm->arch.hv_hypercall; 2329 break; 2330 case HV_X64_MSR_TIME_REF_COUNT: { 2331 data = 2332 div_u64(get_kernel_ns() + kvm->arch.kvmclock_offset, 100); 2333 break; 2334 } 2335 case HV_X64_MSR_REFERENCE_TSC: 2336 data = kvm->arch.hv_tsc_page; 2337 break; 2338 default: 2339 vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr); 2340 return 1; 2341 } 2342 2343 *pdata = data; 2344 return 0; 2345 } 2346 2347 static int get_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) 2348 { 2349 u64 data = 0; 2350 2351 switch (msr) { 2352 case HV_X64_MSR_VP_INDEX: { 2353 int r; 2354 struct kvm_vcpu *v; 2355 kvm_for_each_vcpu(r, v, vcpu->kvm) { 2356 if (v == vcpu) { 2357 data = r; 2358 break; 2359 } 2360 } 2361 break; 2362 } 2363 case HV_X64_MSR_EOI: 2364 return kvm_hv_vapic_msr_read(vcpu, APIC_EOI, pdata); 2365 case HV_X64_MSR_ICR: 2366 return kvm_hv_vapic_msr_read(vcpu, APIC_ICR, pdata); 2367 case HV_X64_MSR_TPR: 2368 return kvm_hv_vapic_msr_read(vcpu, APIC_TASKPRI, pdata); 2369 case HV_X64_MSR_APIC_ASSIST_PAGE: 2370 data = vcpu->arch.hv_vapic; 2371 break; 2372 default: 2373 vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr); 2374 return 1; 2375 } 2376 *pdata = data; 2377 return 0; 2378 } 2379 2380 int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) 2381 { 2382 switch (msr_info->index) { 2383 case MSR_IA32_PLATFORM_ID: 2384 case MSR_IA32_EBL_CR_POWERON: 2385 case MSR_IA32_DEBUGCTLMSR: 2386 case MSR_IA32_LASTBRANCHFROMIP: 2387 case MSR_IA32_LASTBRANCHTOIP: 2388 case MSR_IA32_LASTINTFROMIP: 2389 case MSR_IA32_LASTINTTOIP: 2390 case MSR_K8_SYSCFG: 2391 case MSR_K7_HWCR: 2392 case MSR_VM_HSAVE_PA: 2393 case MSR_K8_INT_PENDING_MSG: 2394 case MSR_AMD64_NB_CFG: 2395 case MSR_FAM10H_MMIO_CONF_BASE: 2396 case MSR_AMD64_BU_CFG2: 2397 msr_info->data = 0; 2398 break; 2399 case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3: 2400 case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3: 2401 case MSR_P6_PERFCTR0 ... MSR_P6_PERFCTR1: 2402 case MSR_P6_EVNTSEL0 ... MSR_P6_EVNTSEL1: 2403 if (kvm_pmu_is_valid_msr(vcpu, msr_info->index)) 2404 return kvm_pmu_get_msr(vcpu, msr_info->index, &msr_info->data); 2405 msr_info->data = 0; 2406 break; 2407 case MSR_IA32_UCODE_REV: 2408 msr_info->data = 0x100000000ULL; 2409 break; 2410 case MSR_MTRRcap: 2411 case 0x200 ... 0x2ff: 2412 return kvm_mtrr_get_msr(vcpu, msr_info->index, &msr_info->data); 2413 case 0xcd: /* fsb frequency */ 2414 msr_info->data = 3; 2415 break; 2416 /* 2417 * MSR_EBC_FREQUENCY_ID 2418 * Conservative value valid for even the basic CPU models. 2419 * Models 0,1: 000 in bits 23:21 indicating a bus speed of 2420 * 100MHz, model 2 000 in bits 18:16 indicating 100MHz, 2421 * and 266MHz for model 3, or 4. Set Core Clock 2422 * Frequency to System Bus Frequency Ratio to 1 (bits 2423 * 31:24) even though these are only valid for CPU 2424 * models > 2, however guests may end up dividing or 2425 * multiplying by zero otherwise. 2426 */ 2427 case MSR_EBC_FREQUENCY_ID: 2428 msr_info->data = 1 << 24; 2429 break; 2430 case MSR_IA32_APICBASE: 2431 msr_info->data = kvm_get_apic_base(vcpu); 2432 break; 2433 case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff: 2434 return kvm_x2apic_msr_read(vcpu, msr_info->index, &msr_info->data); 2435 break; 2436 case MSR_IA32_TSCDEADLINE: 2437 msr_info->data = kvm_get_lapic_tscdeadline_msr(vcpu); 2438 break; 2439 case MSR_IA32_TSC_ADJUST: 2440 msr_info->data = (u64)vcpu->arch.ia32_tsc_adjust_msr; 2441 break; 2442 case MSR_IA32_MISC_ENABLE: 2443 msr_info->data = vcpu->arch.ia32_misc_enable_msr; 2444 break; 2445 case MSR_IA32_SMBASE: 2446 if (!msr_info->host_initiated) 2447 return 1; 2448 msr_info->data = vcpu->arch.smbase; 2449 break; 2450 case MSR_IA32_PERF_STATUS: 2451 /* TSC increment by tick */ 2452 msr_info->data = 1000ULL; 2453 /* CPU multiplier */ 2454 msr_info->data |= (((uint64_t)4ULL) << 40); 2455 break; 2456 case MSR_EFER: 2457 msr_info->data = vcpu->arch.efer; 2458 break; 2459 case MSR_KVM_WALL_CLOCK: 2460 case MSR_KVM_WALL_CLOCK_NEW: 2461 msr_info->data = vcpu->kvm->arch.wall_clock; 2462 break; 2463 case MSR_KVM_SYSTEM_TIME: 2464 case MSR_KVM_SYSTEM_TIME_NEW: 2465 msr_info->data = vcpu->arch.time; 2466 break; 2467 case MSR_KVM_ASYNC_PF_EN: 2468 msr_info->data = vcpu->arch.apf.msr_val; 2469 break; 2470 case MSR_KVM_STEAL_TIME: 2471 msr_info->data = vcpu->arch.st.msr_val; 2472 break; 2473 case MSR_KVM_PV_EOI_EN: 2474 msr_info->data = vcpu->arch.pv_eoi.msr_val; 2475 break; 2476 case MSR_IA32_P5_MC_ADDR: 2477 case MSR_IA32_P5_MC_TYPE: 2478 case MSR_IA32_MCG_CAP: 2479 case MSR_IA32_MCG_CTL: 2480 case MSR_IA32_MCG_STATUS: 2481 case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1: 2482 return get_msr_mce(vcpu, msr_info->index, &msr_info->data); 2483 case MSR_K7_CLK_CTL: 2484 /* 2485 * Provide expected ramp-up count for K7. All other 2486 * are set to zero, indicating minimum divisors for 2487 * every field. 2488 * 2489 * This prevents guest kernels on AMD host with CPU 2490 * type 6, model 8 and higher from exploding due to 2491 * the rdmsr failing. 2492 */ 2493 msr_info->data = 0x20000000; 2494 break; 2495 case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15: 2496 if (kvm_hv_msr_partition_wide(msr_info->index)) { 2497 int r; 2498 mutex_lock(&vcpu->kvm->lock); 2499 r = get_msr_hyperv_pw(vcpu, msr_info->index, &msr_info->data); 2500 mutex_unlock(&vcpu->kvm->lock); 2501 return r; 2502 } else 2503 return get_msr_hyperv(vcpu, msr_info->index, &msr_info->data); 2504 break; 2505 case MSR_IA32_BBL_CR_CTL3: 2506 /* This legacy MSR exists but isn't fully documented in current 2507 * silicon. It is however accessed by winxp in very narrow 2508 * scenarios where it sets bit #19, itself documented as 2509 * a "reserved" bit. Best effort attempt to source coherent 2510 * read data here should the balance of the register be 2511 * interpreted by the guest: 2512 * 2513 * L2 cache control register 3: 64GB range, 256KB size, 2514 * enabled, latency 0x1, configured 2515 */ 2516 msr_info->data = 0xbe702111; 2517 break; 2518 case MSR_AMD64_OSVW_ID_LENGTH: 2519 if (!guest_cpuid_has_osvw(vcpu)) 2520 return 1; 2521 msr_info->data = vcpu->arch.osvw.length; 2522 break; 2523 case MSR_AMD64_OSVW_STATUS: 2524 if (!guest_cpuid_has_osvw(vcpu)) 2525 return 1; 2526 msr_info->data = vcpu->arch.osvw.status; 2527 break; 2528 default: 2529 if (kvm_pmu_is_valid_msr(vcpu, msr_info->index)) 2530 return kvm_pmu_get_msr(vcpu, msr_info->index, &msr_info->data); 2531 if (!ignore_msrs) { 2532 vcpu_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr_info->index); 2533 return 1; 2534 } else { 2535 vcpu_unimpl(vcpu, "ignored rdmsr: 0x%x\n", msr_info->index); 2536 msr_info->data = 0; 2537 } 2538 break; 2539 } 2540 return 0; 2541 } 2542 EXPORT_SYMBOL_GPL(kvm_get_msr_common); 2543 2544 /* 2545 * Read or write a bunch of msrs. All parameters are kernel addresses. 2546 * 2547 * @return number of msrs set successfully. 2548 */ 2549 static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs, 2550 struct kvm_msr_entry *entries, 2551 int (*do_msr)(struct kvm_vcpu *vcpu, 2552 unsigned index, u64 *data)) 2553 { 2554 int i, idx; 2555 2556 idx = srcu_read_lock(&vcpu->kvm->srcu); 2557 for (i = 0; i < msrs->nmsrs; ++i) 2558 if (do_msr(vcpu, entries[i].index, &entries[i].data)) 2559 break; 2560 srcu_read_unlock(&vcpu->kvm->srcu, idx); 2561 2562 return i; 2563 } 2564 2565 /* 2566 * Read or write a bunch of msrs. Parameters are user addresses. 2567 * 2568 * @return number of msrs set successfully. 2569 */ 2570 static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs, 2571 int (*do_msr)(struct kvm_vcpu *vcpu, 2572 unsigned index, u64 *data), 2573 int writeback) 2574 { 2575 struct kvm_msrs msrs; 2576 struct kvm_msr_entry *entries; 2577 int r, n; 2578 unsigned size; 2579 2580 r = -EFAULT; 2581 if (copy_from_user(&msrs, user_msrs, sizeof msrs)) 2582 goto out; 2583 2584 r = -E2BIG; 2585 if (msrs.nmsrs >= MAX_IO_MSRS) 2586 goto out; 2587 2588 size = sizeof(struct kvm_msr_entry) * msrs.nmsrs; 2589 entries = memdup_user(user_msrs->entries, size); 2590 if (IS_ERR(entries)) { 2591 r = PTR_ERR(entries); 2592 goto out; 2593 } 2594 2595 r = n = __msr_io(vcpu, &msrs, entries, do_msr); 2596 if (r < 0) 2597 goto out_free; 2598 2599 r = -EFAULT; 2600 if (writeback && copy_to_user(user_msrs->entries, entries, size)) 2601 goto out_free; 2602 2603 r = n; 2604 2605 out_free: 2606 kfree(entries); 2607 out: 2608 return r; 2609 } 2610 2611 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) 2612 { 2613 int r; 2614 2615 switch (ext) { 2616 case KVM_CAP_IRQCHIP: 2617 case KVM_CAP_HLT: 2618 case KVM_CAP_MMU_SHADOW_CACHE_CONTROL: 2619 case KVM_CAP_SET_TSS_ADDR: 2620 case KVM_CAP_EXT_CPUID: 2621 case KVM_CAP_EXT_EMUL_CPUID: 2622 case KVM_CAP_CLOCKSOURCE: 2623 case KVM_CAP_PIT: 2624 case KVM_CAP_NOP_IO_DELAY: 2625 case KVM_CAP_MP_STATE: 2626 case KVM_CAP_SYNC_MMU: 2627 case KVM_CAP_USER_NMI: 2628 case KVM_CAP_REINJECT_CONTROL: 2629 case KVM_CAP_IRQ_INJECT_STATUS: 2630 case KVM_CAP_IOEVENTFD: 2631 case KVM_CAP_IOEVENTFD_NO_LENGTH: 2632 case KVM_CAP_PIT2: 2633 case KVM_CAP_PIT_STATE2: 2634 case KVM_CAP_SET_IDENTITY_MAP_ADDR: 2635 case KVM_CAP_XEN_HVM: 2636 case KVM_CAP_ADJUST_CLOCK: 2637 case KVM_CAP_VCPU_EVENTS: 2638 case KVM_CAP_HYPERV: 2639 case KVM_CAP_HYPERV_VAPIC: 2640 case KVM_CAP_HYPERV_SPIN: 2641 case KVM_CAP_PCI_SEGMENT: 2642 case KVM_CAP_DEBUGREGS: 2643 case KVM_CAP_X86_ROBUST_SINGLESTEP: 2644 case KVM_CAP_XSAVE: 2645 case KVM_CAP_ASYNC_PF: 2646 case KVM_CAP_GET_TSC_KHZ: 2647 case KVM_CAP_KVMCLOCK_CTRL: 2648 case KVM_CAP_READONLY_MEM: 2649 case KVM_CAP_HYPERV_TIME: 2650 case KVM_CAP_IOAPIC_POLARITY_IGNORED: 2651 case KVM_CAP_TSC_DEADLINE_TIMER: 2652 case KVM_CAP_ENABLE_CAP_VM: 2653 case KVM_CAP_DISABLE_QUIRKS: 2654 #ifdef CONFIG_KVM_DEVICE_ASSIGNMENT 2655 case KVM_CAP_ASSIGN_DEV_IRQ: 2656 case KVM_CAP_PCI_2_3: 2657 #endif 2658 r = 1; 2659 break; 2660 case KVM_CAP_X86_SMM: 2661 /* SMBASE is usually relocated above 1M on modern chipsets, 2662 * and SMM handlers might indeed rely on 4G segment limits, 2663 * so do not report SMM to be available if real mode is 2664 * emulated via vm86 mode. Still, do not go to great lengths 2665 * to avoid userspace's usage of the feature, because it is a 2666 * fringe case that is not enabled except via specific settings 2667 * of the module parameters. 2668 */ 2669 r = kvm_x86_ops->cpu_has_high_real_mode_segbase(); 2670 break; 2671 case KVM_CAP_COALESCED_MMIO: 2672 r = KVM_COALESCED_MMIO_PAGE_OFFSET; 2673 break; 2674 case KVM_CAP_VAPIC: 2675 r = !kvm_x86_ops->cpu_has_accelerated_tpr(); 2676 break; 2677 case KVM_CAP_NR_VCPUS: 2678 r = KVM_SOFT_MAX_VCPUS; 2679 break; 2680 case KVM_CAP_MAX_VCPUS: 2681 r = KVM_MAX_VCPUS; 2682 break; 2683 case KVM_CAP_NR_MEMSLOTS: 2684 r = KVM_USER_MEM_SLOTS; 2685 break; 2686 case KVM_CAP_PV_MMU: /* obsolete */ 2687 r = 0; 2688 break; 2689 #ifdef CONFIG_KVM_DEVICE_ASSIGNMENT 2690 case KVM_CAP_IOMMU: 2691 r = iommu_present(&pci_bus_type); 2692 break; 2693 #endif 2694 case KVM_CAP_MCE: 2695 r = KVM_MAX_MCE_BANKS; 2696 break; 2697 case KVM_CAP_XCRS: 2698 r = cpu_has_xsave; 2699 break; 2700 case KVM_CAP_TSC_CONTROL: 2701 r = kvm_has_tsc_control; 2702 break; 2703 default: 2704 r = 0; 2705 break; 2706 } 2707 return r; 2708 2709 } 2710 2711 long kvm_arch_dev_ioctl(struct file *filp, 2712 unsigned int ioctl, unsigned long arg) 2713 { 2714 void __user *argp = (void __user *)arg; 2715 long r; 2716 2717 switch (ioctl) { 2718 case KVM_GET_MSR_INDEX_LIST: { 2719 struct kvm_msr_list __user *user_msr_list = argp; 2720 struct kvm_msr_list msr_list; 2721 unsigned n; 2722 2723 r = -EFAULT; 2724 if (copy_from_user(&msr_list, user_msr_list, sizeof msr_list)) 2725 goto out; 2726 n = msr_list.nmsrs; 2727 msr_list.nmsrs = num_msrs_to_save + num_emulated_msrs; 2728 if (copy_to_user(user_msr_list, &msr_list, sizeof msr_list)) 2729 goto out; 2730 r = -E2BIG; 2731 if (n < msr_list.nmsrs) 2732 goto out; 2733 r = -EFAULT; 2734 if (copy_to_user(user_msr_list->indices, &msrs_to_save, 2735 num_msrs_to_save * sizeof(u32))) 2736 goto out; 2737 if (copy_to_user(user_msr_list->indices + num_msrs_to_save, 2738 &emulated_msrs, 2739 num_emulated_msrs * sizeof(u32))) 2740 goto out; 2741 r = 0; 2742 break; 2743 } 2744 case KVM_GET_SUPPORTED_CPUID: 2745 case KVM_GET_EMULATED_CPUID: { 2746 struct kvm_cpuid2 __user *cpuid_arg = argp; 2747 struct kvm_cpuid2 cpuid; 2748 2749 r = -EFAULT; 2750 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid)) 2751 goto out; 2752 2753 r = kvm_dev_ioctl_get_cpuid(&cpuid, cpuid_arg->entries, 2754 ioctl); 2755 if (r) 2756 goto out; 2757 2758 r = -EFAULT; 2759 if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid)) 2760 goto out; 2761 r = 0; 2762 break; 2763 } 2764 case KVM_X86_GET_MCE_CAP_SUPPORTED: { 2765 u64 mce_cap; 2766 2767 mce_cap = KVM_MCE_CAP_SUPPORTED; 2768 r = -EFAULT; 2769 if (copy_to_user(argp, &mce_cap, sizeof mce_cap)) 2770 goto out; 2771 r = 0; 2772 break; 2773 } 2774 default: 2775 r = -EINVAL; 2776 } 2777 out: 2778 return r; 2779 } 2780 2781 static void wbinvd_ipi(void *garbage) 2782 { 2783 wbinvd(); 2784 } 2785 2786 static bool need_emulate_wbinvd(struct kvm_vcpu *vcpu) 2787 { 2788 return kvm_arch_has_noncoherent_dma(vcpu->kvm); 2789 } 2790 2791 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 2792 { 2793 /* Address WBINVD may be executed by guest */ 2794 if (need_emulate_wbinvd(vcpu)) { 2795 if (kvm_x86_ops->has_wbinvd_exit()) 2796 cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask); 2797 else if (vcpu->cpu != -1 && vcpu->cpu != cpu) 2798 smp_call_function_single(vcpu->cpu, 2799 wbinvd_ipi, NULL, 1); 2800 } 2801 2802 kvm_x86_ops->vcpu_load(vcpu, cpu); 2803 2804 /* Apply any externally detected TSC adjustments (due to suspend) */ 2805 if (unlikely(vcpu->arch.tsc_offset_adjustment)) { 2806 adjust_tsc_offset_host(vcpu, vcpu->arch.tsc_offset_adjustment); 2807 vcpu->arch.tsc_offset_adjustment = 0; 2808 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); 2809 } 2810 2811 if (unlikely(vcpu->cpu != cpu) || check_tsc_unstable()) { 2812 s64 tsc_delta = !vcpu->arch.last_host_tsc ? 0 : 2813 native_read_tsc() - vcpu->arch.last_host_tsc; 2814 if (tsc_delta < 0) 2815 mark_tsc_unstable("KVM discovered backwards TSC"); 2816 if (check_tsc_unstable()) { 2817 u64 offset = kvm_x86_ops->compute_tsc_offset(vcpu, 2818 vcpu->arch.last_guest_tsc); 2819 kvm_x86_ops->write_tsc_offset(vcpu, offset); 2820 vcpu->arch.tsc_catchup = 1; 2821 } 2822 /* 2823 * On a host with synchronized TSC, there is no need to update 2824 * kvmclock on vcpu->cpu migration 2825 */ 2826 if (!vcpu->kvm->arch.use_master_clock || vcpu->cpu == -1) 2827 kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu); 2828 if (vcpu->cpu != cpu) 2829 kvm_migrate_timers(vcpu); 2830 vcpu->cpu = cpu; 2831 } 2832 2833 accumulate_steal_time(vcpu); 2834 kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu); 2835 } 2836 2837 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) 2838 { 2839 kvm_x86_ops->vcpu_put(vcpu); 2840 kvm_put_guest_fpu(vcpu); 2841 vcpu->arch.last_host_tsc = native_read_tsc(); 2842 } 2843 2844 static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu, 2845 struct kvm_lapic_state *s) 2846 { 2847 kvm_x86_ops->sync_pir_to_irr(vcpu); 2848 memcpy(s->regs, vcpu->arch.apic->regs, sizeof *s); 2849 2850 return 0; 2851 } 2852 2853 static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu, 2854 struct kvm_lapic_state *s) 2855 { 2856 kvm_apic_post_state_restore(vcpu, s); 2857 update_cr8_intercept(vcpu); 2858 2859 return 0; 2860 } 2861 2862 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, 2863 struct kvm_interrupt *irq) 2864 { 2865 if (irq->irq >= KVM_NR_INTERRUPTS) 2866 return -EINVAL; 2867 if (irqchip_in_kernel(vcpu->kvm)) 2868 return -ENXIO; 2869 2870 kvm_queue_interrupt(vcpu, irq->irq, false); 2871 kvm_make_request(KVM_REQ_EVENT, vcpu); 2872 2873 return 0; 2874 } 2875 2876 static int kvm_vcpu_ioctl_nmi(struct kvm_vcpu *vcpu) 2877 { 2878 kvm_inject_nmi(vcpu); 2879 2880 return 0; 2881 } 2882 2883 static int kvm_vcpu_ioctl_smi(struct kvm_vcpu *vcpu) 2884 { 2885 kvm_make_request(KVM_REQ_SMI, vcpu); 2886 2887 return 0; 2888 } 2889 2890 static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu, 2891 struct kvm_tpr_access_ctl *tac) 2892 { 2893 if (tac->flags) 2894 return -EINVAL; 2895 vcpu->arch.tpr_access_reporting = !!tac->enabled; 2896 return 0; 2897 } 2898 2899 static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu, 2900 u64 mcg_cap) 2901 { 2902 int r; 2903 unsigned bank_num = mcg_cap & 0xff, bank; 2904 2905 r = -EINVAL; 2906 if (!bank_num || bank_num >= KVM_MAX_MCE_BANKS) 2907 goto out; 2908 if (mcg_cap & ~(KVM_MCE_CAP_SUPPORTED | 0xff | 0xff0000)) 2909 goto out; 2910 r = 0; 2911 vcpu->arch.mcg_cap = mcg_cap; 2912 /* Init IA32_MCG_CTL to all 1s */ 2913 if (mcg_cap & MCG_CTL_P) 2914 vcpu->arch.mcg_ctl = ~(u64)0; 2915 /* Init IA32_MCi_CTL to all 1s */ 2916 for (bank = 0; bank < bank_num; bank++) 2917 vcpu->arch.mce_banks[bank*4] = ~(u64)0; 2918 out: 2919 return r; 2920 } 2921 2922 static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu *vcpu, 2923 struct kvm_x86_mce *mce) 2924 { 2925 u64 mcg_cap = vcpu->arch.mcg_cap; 2926 unsigned bank_num = mcg_cap & 0xff; 2927 u64 *banks = vcpu->arch.mce_banks; 2928 2929 if (mce->bank >= bank_num || !(mce->status & MCI_STATUS_VAL)) 2930 return -EINVAL; 2931 /* 2932 * if IA32_MCG_CTL is not all 1s, the uncorrected error 2933 * reporting is disabled 2934 */ 2935 if ((mce->status & MCI_STATUS_UC) && (mcg_cap & MCG_CTL_P) && 2936 vcpu->arch.mcg_ctl != ~(u64)0) 2937 return 0; 2938 banks += 4 * mce->bank; 2939 /* 2940 * if IA32_MCi_CTL is not all 1s, the uncorrected error 2941 * reporting is disabled for the bank 2942 */ 2943 if ((mce->status & MCI_STATUS_UC) && banks[0] != ~(u64)0) 2944 return 0; 2945 if (mce->status & MCI_STATUS_UC) { 2946 if ((vcpu->arch.mcg_status & MCG_STATUS_MCIP) || 2947 !kvm_read_cr4_bits(vcpu, X86_CR4_MCE)) { 2948 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); 2949 return 0; 2950 } 2951 if (banks[1] & MCI_STATUS_VAL) 2952 mce->status |= MCI_STATUS_OVER; 2953 banks[2] = mce->addr; 2954 banks[3] = mce->misc; 2955 vcpu->arch.mcg_status = mce->mcg_status; 2956 banks[1] = mce->status; 2957 kvm_queue_exception(vcpu, MC_VECTOR); 2958 } else if (!(banks[1] & MCI_STATUS_VAL) 2959 || !(banks[1] & MCI_STATUS_UC)) { 2960 if (banks[1] & MCI_STATUS_VAL) 2961 mce->status |= MCI_STATUS_OVER; 2962 banks[2] = mce->addr; 2963 banks[3] = mce->misc; 2964 banks[1] = mce->status; 2965 } else 2966 banks[1] |= MCI_STATUS_OVER; 2967 return 0; 2968 } 2969 2970 static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu, 2971 struct kvm_vcpu_events *events) 2972 { 2973 process_nmi(vcpu); 2974 events->exception.injected = 2975 vcpu->arch.exception.pending && 2976 !kvm_exception_is_soft(vcpu->arch.exception.nr); 2977 events->exception.nr = vcpu->arch.exception.nr; 2978 events->exception.has_error_code = vcpu->arch.exception.has_error_code; 2979 events->exception.pad = 0; 2980 events->exception.error_code = vcpu->arch.exception.error_code; 2981 2982 events->interrupt.injected = 2983 vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft; 2984 events->interrupt.nr = vcpu->arch.interrupt.nr; 2985 events->interrupt.soft = 0; 2986 events->interrupt.shadow = kvm_x86_ops->get_interrupt_shadow(vcpu); 2987 2988 events->nmi.injected = vcpu->arch.nmi_injected; 2989 events->nmi.pending = vcpu->arch.nmi_pending != 0; 2990 events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu); 2991 events->nmi.pad = 0; 2992 2993 events->sipi_vector = 0; /* never valid when reporting to user space */ 2994 2995 events->smi.smm = is_smm(vcpu); 2996 events->smi.pending = vcpu->arch.smi_pending; 2997 events->smi.smm_inside_nmi = 2998 !!(vcpu->arch.hflags & HF_SMM_INSIDE_NMI_MASK); 2999 events->smi.latched_init = kvm_lapic_latched_init(vcpu); 3000 3001 events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING 3002 | KVM_VCPUEVENT_VALID_SHADOW 3003 | KVM_VCPUEVENT_VALID_SMM); 3004 memset(&events->reserved, 0, sizeof(events->reserved)); 3005 } 3006 3007 static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, 3008 struct kvm_vcpu_events *events) 3009 { 3010 if (events->flags & ~(KVM_VCPUEVENT_VALID_NMI_PENDING 3011 | KVM_VCPUEVENT_VALID_SIPI_VECTOR 3012 | KVM_VCPUEVENT_VALID_SHADOW 3013 | KVM_VCPUEVENT_VALID_SMM)) 3014 return -EINVAL; 3015 3016 process_nmi(vcpu); 3017 vcpu->arch.exception.pending = events->exception.injected; 3018 vcpu->arch.exception.nr = events->exception.nr; 3019 vcpu->arch.exception.has_error_code = events->exception.has_error_code; 3020 vcpu->arch.exception.error_code = events->exception.error_code; 3021 3022 vcpu->arch.interrupt.pending = events->interrupt.injected; 3023 vcpu->arch.interrupt.nr = events->interrupt.nr; 3024 vcpu->arch.interrupt.soft = events->interrupt.soft; 3025 if (events->flags & KVM_VCPUEVENT_VALID_SHADOW) 3026 kvm_x86_ops->set_interrupt_shadow(vcpu, 3027 events->interrupt.shadow); 3028 3029 vcpu->arch.nmi_injected = events->nmi.injected; 3030 if (events->flags & KVM_VCPUEVENT_VALID_NMI_PENDING) 3031 vcpu->arch.nmi_pending = events->nmi.pending; 3032 kvm_x86_ops->set_nmi_mask(vcpu, events->nmi.masked); 3033 3034 if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR && 3035 kvm_vcpu_has_lapic(vcpu)) 3036 vcpu->arch.apic->sipi_vector = events->sipi_vector; 3037 3038 if (events->flags & KVM_VCPUEVENT_VALID_SMM) { 3039 if (events->smi.smm) 3040 vcpu->arch.hflags |= HF_SMM_MASK; 3041 else 3042 vcpu->arch.hflags &= ~HF_SMM_MASK; 3043 vcpu->arch.smi_pending = events->smi.pending; 3044 if (events->smi.smm_inside_nmi) 3045 vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK; 3046 else 3047 vcpu->arch.hflags &= ~HF_SMM_INSIDE_NMI_MASK; 3048 if (kvm_vcpu_has_lapic(vcpu)) { 3049 if (events->smi.latched_init) 3050 set_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events); 3051 else 3052 clear_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events); 3053 } 3054 } 3055 3056 kvm_make_request(KVM_REQ_EVENT, vcpu); 3057 3058 return 0; 3059 } 3060 3061 static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu, 3062 struct kvm_debugregs *dbgregs) 3063 { 3064 unsigned long val; 3065 3066 memcpy(dbgregs->db, vcpu->arch.db, sizeof(vcpu->arch.db)); 3067 kvm_get_dr(vcpu, 6, &val); 3068 dbgregs->dr6 = val; 3069 dbgregs->dr7 = vcpu->arch.dr7; 3070 dbgregs->flags = 0; 3071 memset(&dbgregs->reserved, 0, sizeof(dbgregs->reserved)); 3072 } 3073 3074 static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu, 3075 struct kvm_debugregs *dbgregs) 3076 { 3077 if (dbgregs->flags) 3078 return -EINVAL; 3079 3080 memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db)); 3081 kvm_update_dr0123(vcpu); 3082 vcpu->arch.dr6 = dbgregs->dr6; 3083 kvm_update_dr6(vcpu); 3084 vcpu->arch.dr7 = dbgregs->dr7; 3085 kvm_update_dr7(vcpu); 3086 3087 return 0; 3088 } 3089 3090 #define XSTATE_COMPACTION_ENABLED (1ULL << 63) 3091 3092 static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu) 3093 { 3094 struct xregs_state *xsave = &vcpu->arch.guest_fpu.state.xsave; 3095 u64 xstate_bv = xsave->header.xfeatures; 3096 u64 valid; 3097 3098 /* 3099 * Copy legacy XSAVE area, to avoid complications with CPUID 3100 * leaves 0 and 1 in the loop below. 3101 */ 3102 memcpy(dest, xsave, XSAVE_HDR_OFFSET); 3103 3104 /* Set XSTATE_BV */ 3105 *(u64 *)(dest + XSAVE_HDR_OFFSET) = xstate_bv; 3106 3107 /* 3108 * Copy each region from the possibly compacted offset to the 3109 * non-compacted offset. 3110 */ 3111 valid = xstate_bv & ~XSTATE_FPSSE; 3112 while (valid) { 3113 u64 feature = valid & -valid; 3114 int index = fls64(feature) - 1; 3115 void *src = get_xsave_addr(xsave, feature); 3116 3117 if (src) { 3118 u32 size, offset, ecx, edx; 3119 cpuid_count(XSTATE_CPUID, index, 3120 &size, &offset, &ecx, &edx); 3121 memcpy(dest + offset, src, size); 3122 } 3123 3124 valid -= feature; 3125 } 3126 } 3127 3128 static void load_xsave(struct kvm_vcpu *vcpu, u8 *src) 3129 { 3130 struct xregs_state *xsave = &vcpu->arch.guest_fpu.state.xsave; 3131 u64 xstate_bv = *(u64 *)(src + XSAVE_HDR_OFFSET); 3132 u64 valid; 3133 3134 /* 3135 * Copy legacy XSAVE area, to avoid complications with CPUID 3136 * leaves 0 and 1 in the loop below. 3137 */ 3138 memcpy(xsave, src, XSAVE_HDR_OFFSET); 3139 3140 /* Set XSTATE_BV and possibly XCOMP_BV. */ 3141 xsave->header.xfeatures = xstate_bv; 3142 if (cpu_has_xsaves) 3143 xsave->header.xcomp_bv = host_xcr0 | XSTATE_COMPACTION_ENABLED; 3144 3145 /* 3146 * Copy each region from the non-compacted offset to the 3147 * possibly compacted offset. 3148 */ 3149 valid = xstate_bv & ~XSTATE_FPSSE; 3150 while (valid) { 3151 u64 feature = valid & -valid; 3152 int index = fls64(feature) - 1; 3153 void *dest = get_xsave_addr(xsave, feature); 3154 3155 if (dest) { 3156 u32 size, offset, ecx, edx; 3157 cpuid_count(XSTATE_CPUID, index, 3158 &size, &offset, &ecx, &edx); 3159 memcpy(dest, src + offset, size); 3160 } else 3161 WARN_ON_ONCE(1); 3162 3163 valid -= feature; 3164 } 3165 } 3166 3167 static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu, 3168 struct kvm_xsave *guest_xsave) 3169 { 3170 if (cpu_has_xsave) { 3171 memset(guest_xsave, 0, sizeof(struct kvm_xsave)); 3172 fill_xsave((u8 *) guest_xsave->region, vcpu); 3173 } else { 3174 memcpy(guest_xsave->region, 3175 &vcpu->arch.guest_fpu.state.fxsave, 3176 sizeof(struct fxregs_state)); 3177 *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)] = 3178 XSTATE_FPSSE; 3179 } 3180 } 3181 3182 static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu, 3183 struct kvm_xsave *guest_xsave) 3184 { 3185 u64 xstate_bv = 3186 *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)]; 3187 3188 if (cpu_has_xsave) { 3189 /* 3190 * Here we allow setting states that are not present in 3191 * CPUID leaf 0xD, index 0, EDX:EAX. This is for compatibility 3192 * with old userspace. 3193 */ 3194 if (xstate_bv & ~kvm_supported_xcr0()) 3195 return -EINVAL; 3196 load_xsave(vcpu, (u8 *)guest_xsave->region); 3197 } else { 3198 if (xstate_bv & ~XSTATE_FPSSE) 3199 return -EINVAL; 3200 memcpy(&vcpu->arch.guest_fpu.state.fxsave, 3201 guest_xsave->region, sizeof(struct fxregs_state)); 3202 } 3203 return 0; 3204 } 3205 3206 static void kvm_vcpu_ioctl_x86_get_xcrs(struct kvm_vcpu *vcpu, 3207 struct kvm_xcrs *guest_xcrs) 3208 { 3209 if (!cpu_has_xsave) { 3210 guest_xcrs->nr_xcrs = 0; 3211 return; 3212 } 3213 3214 guest_xcrs->nr_xcrs = 1; 3215 guest_xcrs->flags = 0; 3216 guest_xcrs->xcrs[0].xcr = XCR_XFEATURE_ENABLED_MASK; 3217 guest_xcrs->xcrs[0].value = vcpu->arch.xcr0; 3218 } 3219 3220 static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu, 3221 struct kvm_xcrs *guest_xcrs) 3222 { 3223 int i, r = 0; 3224 3225 if (!cpu_has_xsave) 3226 return -EINVAL; 3227 3228 if (guest_xcrs->nr_xcrs > KVM_MAX_XCRS || guest_xcrs->flags) 3229 return -EINVAL; 3230 3231 for (i = 0; i < guest_xcrs->nr_xcrs; i++) 3232 /* Only support XCR0 currently */ 3233 if (guest_xcrs->xcrs[i].xcr == XCR_XFEATURE_ENABLED_MASK) { 3234 r = __kvm_set_xcr(vcpu, XCR_XFEATURE_ENABLED_MASK, 3235 guest_xcrs->xcrs[i].value); 3236 break; 3237 } 3238 if (r) 3239 r = -EINVAL; 3240 return r; 3241 } 3242 3243 /* 3244 * kvm_set_guest_paused() indicates to the guest kernel that it has been 3245 * stopped by the hypervisor. This function will be called from the host only. 3246 * EINVAL is returned when the host attempts to set the flag for a guest that 3247 * does not support pv clocks. 3248 */ 3249 static int kvm_set_guest_paused(struct kvm_vcpu *vcpu) 3250 { 3251 if (!vcpu->arch.pv_time_enabled) 3252 return -EINVAL; 3253 vcpu->arch.pvclock_set_guest_stopped_request = true; 3254 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); 3255 return 0; 3256 } 3257 3258 long kvm_arch_vcpu_ioctl(struct file *filp, 3259 unsigned int ioctl, unsigned long arg) 3260 { 3261 struct kvm_vcpu *vcpu = filp->private_data; 3262 void __user *argp = (void __user *)arg; 3263 int r; 3264 union { 3265 struct kvm_lapic_state *lapic; 3266 struct kvm_xsave *xsave; 3267 struct kvm_xcrs *xcrs; 3268 void *buffer; 3269 } u; 3270 3271 u.buffer = NULL; 3272 switch (ioctl) { 3273 case KVM_GET_LAPIC: { 3274 r = -EINVAL; 3275 if (!vcpu->arch.apic) 3276 goto out; 3277 u.lapic = kzalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL); 3278 3279 r = -ENOMEM; 3280 if (!u.lapic) 3281 goto out; 3282 r = kvm_vcpu_ioctl_get_lapic(vcpu, u.lapic); 3283 if (r) 3284 goto out; 3285 r = -EFAULT; 3286 if (copy_to_user(argp, u.lapic, sizeof(struct kvm_lapic_state))) 3287 goto out; 3288 r = 0; 3289 break; 3290 } 3291 case KVM_SET_LAPIC: { 3292 r = -EINVAL; 3293 if (!vcpu->arch.apic) 3294 goto out; 3295 u.lapic = memdup_user(argp, sizeof(*u.lapic)); 3296 if (IS_ERR(u.lapic)) 3297 return PTR_ERR(u.lapic); 3298 3299 r = kvm_vcpu_ioctl_set_lapic(vcpu, u.lapic); 3300 break; 3301 } 3302 case KVM_INTERRUPT: { 3303 struct kvm_interrupt irq; 3304 3305 r = -EFAULT; 3306 if (copy_from_user(&irq, argp, sizeof irq)) 3307 goto out; 3308 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq); 3309 break; 3310 } 3311 case KVM_NMI: { 3312 r = kvm_vcpu_ioctl_nmi(vcpu); 3313 break; 3314 } 3315 case KVM_SMI: { 3316 r = kvm_vcpu_ioctl_smi(vcpu); 3317 break; 3318 } 3319 case KVM_SET_CPUID: { 3320 struct kvm_cpuid __user *cpuid_arg = argp; 3321 struct kvm_cpuid cpuid; 3322 3323 r = -EFAULT; 3324 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid)) 3325 goto out; 3326 r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries); 3327 break; 3328 } 3329 case KVM_SET_CPUID2: { 3330 struct kvm_cpuid2 __user *cpuid_arg = argp; 3331 struct kvm_cpuid2 cpuid; 3332 3333 r = -EFAULT; 3334 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid)) 3335 goto out; 3336 r = kvm_vcpu_ioctl_set_cpuid2(vcpu, &cpuid, 3337 cpuid_arg->entries); 3338 break; 3339 } 3340 case KVM_GET_CPUID2: { 3341 struct kvm_cpuid2 __user *cpuid_arg = argp; 3342 struct kvm_cpuid2 cpuid; 3343 3344 r = -EFAULT; 3345 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid)) 3346 goto out; 3347 r = kvm_vcpu_ioctl_get_cpuid2(vcpu, &cpuid, 3348 cpuid_arg->entries); 3349 if (r) 3350 goto out; 3351 r = -EFAULT; 3352 if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid)) 3353 goto out; 3354 r = 0; 3355 break; 3356 } 3357 case KVM_GET_MSRS: 3358 r = msr_io(vcpu, argp, do_get_msr, 1); 3359 break; 3360 case KVM_SET_MSRS: 3361 r = msr_io(vcpu, argp, do_set_msr, 0); 3362 break; 3363 case KVM_TPR_ACCESS_REPORTING: { 3364 struct kvm_tpr_access_ctl tac; 3365 3366 r = -EFAULT; 3367 if (copy_from_user(&tac, argp, sizeof tac)) 3368 goto out; 3369 r = vcpu_ioctl_tpr_access_reporting(vcpu, &tac); 3370 if (r) 3371 goto out; 3372 r = -EFAULT; 3373 if (copy_to_user(argp, &tac, sizeof tac)) 3374 goto out; 3375 r = 0; 3376 break; 3377 }; 3378 case KVM_SET_VAPIC_ADDR: { 3379 struct kvm_vapic_addr va; 3380 3381 r = -EINVAL; 3382 if (!irqchip_in_kernel(vcpu->kvm)) 3383 goto out; 3384 r = -EFAULT; 3385 if (copy_from_user(&va, argp, sizeof va)) 3386 goto out; 3387 r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); 3388 break; 3389 } 3390 case KVM_X86_SETUP_MCE: { 3391 u64 mcg_cap; 3392 3393 r = -EFAULT; 3394 if (copy_from_user(&mcg_cap, argp, sizeof mcg_cap)) 3395 goto out; 3396 r = kvm_vcpu_ioctl_x86_setup_mce(vcpu, mcg_cap); 3397 break; 3398 } 3399 case KVM_X86_SET_MCE: { 3400 struct kvm_x86_mce mce; 3401 3402 r = -EFAULT; 3403 if (copy_from_user(&mce, argp, sizeof mce)) 3404 goto out; 3405 r = kvm_vcpu_ioctl_x86_set_mce(vcpu, &mce); 3406 break; 3407 } 3408 case KVM_GET_VCPU_EVENTS: { 3409 struct kvm_vcpu_events events; 3410 3411 kvm_vcpu_ioctl_x86_get_vcpu_events(vcpu, &events); 3412 3413 r = -EFAULT; 3414 if (copy_to_user(argp, &events, sizeof(struct kvm_vcpu_events))) 3415 break; 3416 r = 0; 3417 break; 3418 } 3419 case KVM_SET_VCPU_EVENTS: { 3420 struct kvm_vcpu_events events; 3421 3422 r = -EFAULT; 3423 if (copy_from_user(&events, argp, sizeof(struct kvm_vcpu_events))) 3424 break; 3425 3426 r = kvm_vcpu_ioctl_x86_set_vcpu_events(vcpu, &events); 3427 break; 3428 } 3429 case KVM_GET_DEBUGREGS: { 3430 struct kvm_debugregs dbgregs; 3431 3432 kvm_vcpu_ioctl_x86_get_debugregs(vcpu, &dbgregs); 3433 3434 r = -EFAULT; 3435 if (copy_to_user(argp, &dbgregs, 3436 sizeof(struct kvm_debugregs))) 3437 break; 3438 r = 0; 3439 break; 3440 } 3441 case KVM_SET_DEBUGREGS: { 3442 struct kvm_debugregs dbgregs; 3443 3444 r = -EFAULT; 3445 if (copy_from_user(&dbgregs, argp, 3446 sizeof(struct kvm_debugregs))) 3447 break; 3448 3449 r = kvm_vcpu_ioctl_x86_set_debugregs(vcpu, &dbgregs); 3450 break; 3451 } 3452 case KVM_GET_XSAVE: { 3453 u.xsave = kzalloc(sizeof(struct kvm_xsave), GFP_KERNEL); 3454 r = -ENOMEM; 3455 if (!u.xsave) 3456 break; 3457 3458 kvm_vcpu_ioctl_x86_get_xsave(vcpu, u.xsave); 3459 3460 r = -EFAULT; 3461 if (copy_to_user(argp, u.xsave, sizeof(struct kvm_xsave))) 3462 break; 3463 r = 0; 3464 break; 3465 } 3466 case KVM_SET_XSAVE: { 3467 u.xsave = memdup_user(argp, sizeof(*u.xsave)); 3468 if (IS_ERR(u.xsave)) 3469 return PTR_ERR(u.xsave); 3470 3471 r = kvm_vcpu_ioctl_x86_set_xsave(vcpu, u.xsave); 3472 break; 3473 } 3474 case KVM_GET_XCRS: { 3475 u.xcrs = kzalloc(sizeof(struct kvm_xcrs), GFP_KERNEL); 3476 r = -ENOMEM; 3477 if (!u.xcrs) 3478 break; 3479 3480 kvm_vcpu_ioctl_x86_get_xcrs(vcpu, u.xcrs); 3481 3482 r = -EFAULT; 3483 if (copy_to_user(argp, u.xcrs, 3484 sizeof(struct kvm_xcrs))) 3485 break; 3486 r = 0; 3487 break; 3488 } 3489 case KVM_SET_XCRS: { 3490 u.xcrs = memdup_user(argp, sizeof(*u.xcrs)); 3491 if (IS_ERR(u.xcrs)) 3492 return PTR_ERR(u.xcrs); 3493 3494 r = kvm_vcpu_ioctl_x86_set_xcrs(vcpu, u.xcrs); 3495 break; 3496 } 3497 case KVM_SET_TSC_KHZ: { 3498 u32 user_tsc_khz; 3499 3500 r = -EINVAL; 3501 user_tsc_khz = (u32)arg; 3502 3503 if (user_tsc_khz >= kvm_max_guest_tsc_khz) 3504 goto out; 3505 3506 if (user_tsc_khz == 0) 3507 user_tsc_khz = tsc_khz; 3508 3509 kvm_set_tsc_khz(vcpu, user_tsc_khz); 3510 3511 r = 0; 3512 goto out; 3513 } 3514 case KVM_GET_TSC_KHZ: { 3515 r = vcpu->arch.virtual_tsc_khz; 3516 goto out; 3517 } 3518 case KVM_KVMCLOCK_CTRL: { 3519 r = kvm_set_guest_paused(vcpu); 3520 goto out; 3521 } 3522 default: 3523 r = -EINVAL; 3524 } 3525 out: 3526 kfree(u.buffer); 3527 return r; 3528 } 3529 3530 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) 3531 { 3532 return VM_FAULT_SIGBUS; 3533 } 3534 3535 static int kvm_vm_ioctl_set_tss_addr(struct kvm *kvm, unsigned long addr) 3536 { 3537 int ret; 3538 3539 if (addr > (unsigned int)(-3 * PAGE_SIZE)) 3540 return -EINVAL; 3541 ret = kvm_x86_ops->set_tss_addr(kvm, addr); 3542 return ret; 3543 } 3544 3545 static int kvm_vm_ioctl_set_identity_map_addr(struct kvm *kvm, 3546 u64 ident_addr) 3547 { 3548 kvm->arch.ept_identity_map_addr = ident_addr; 3549 return 0; 3550 } 3551 3552 static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm, 3553 u32 kvm_nr_mmu_pages) 3554 { 3555 if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES) 3556 return -EINVAL; 3557 3558 mutex_lock(&kvm->slots_lock); 3559 3560 kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages); 3561 kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages; 3562 3563 mutex_unlock(&kvm->slots_lock); 3564 return 0; 3565 } 3566 3567 static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm) 3568 { 3569 return kvm->arch.n_max_mmu_pages; 3570 } 3571 3572 static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip) 3573 { 3574 int r; 3575 3576 r = 0; 3577 switch (chip->chip_id) { 3578 case KVM_IRQCHIP_PIC_MASTER: 3579 memcpy(&chip->chip.pic, 3580 &pic_irqchip(kvm)->pics[0], 3581 sizeof(struct kvm_pic_state)); 3582 break; 3583 case KVM_IRQCHIP_PIC_SLAVE: 3584 memcpy(&chip->chip.pic, 3585 &pic_irqchip(kvm)->pics[1], 3586 sizeof(struct kvm_pic_state)); 3587 break; 3588 case KVM_IRQCHIP_IOAPIC: 3589 r = kvm_get_ioapic(kvm, &chip->chip.ioapic); 3590 break; 3591 default: 3592 r = -EINVAL; 3593 break; 3594 } 3595 return r; 3596 } 3597 3598 static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip) 3599 { 3600 int r; 3601 3602 r = 0; 3603 switch (chip->chip_id) { 3604 case KVM_IRQCHIP_PIC_MASTER: 3605 spin_lock(&pic_irqchip(kvm)->lock); 3606 memcpy(&pic_irqchip(kvm)->pics[0], 3607 &chip->chip.pic, 3608 sizeof(struct kvm_pic_state)); 3609 spin_unlock(&pic_irqchip(kvm)->lock); 3610 break; 3611 case KVM_IRQCHIP_PIC_SLAVE: 3612 spin_lock(&pic_irqchip(kvm)->lock); 3613 memcpy(&pic_irqchip(kvm)->pics[1], 3614 &chip->chip.pic, 3615 sizeof(struct kvm_pic_state)); 3616 spin_unlock(&pic_irqchip(kvm)->lock); 3617 break; 3618 case KVM_IRQCHIP_IOAPIC: 3619 r = kvm_set_ioapic(kvm, &chip->chip.ioapic); 3620 break; 3621 default: 3622 r = -EINVAL; 3623 break; 3624 } 3625 kvm_pic_update_irq(pic_irqchip(kvm)); 3626 return r; 3627 } 3628 3629 static int kvm_vm_ioctl_get_pit(struct kvm *kvm, struct kvm_pit_state *ps) 3630 { 3631 int r = 0; 3632 3633 mutex_lock(&kvm->arch.vpit->pit_state.lock); 3634 memcpy(ps, &kvm->arch.vpit->pit_state, sizeof(struct kvm_pit_state)); 3635 mutex_unlock(&kvm->arch.vpit->pit_state.lock); 3636 return r; 3637 } 3638 3639 static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps) 3640 { 3641 int r = 0; 3642 3643 mutex_lock(&kvm->arch.vpit->pit_state.lock); 3644 memcpy(&kvm->arch.vpit->pit_state, ps, sizeof(struct kvm_pit_state)); 3645 kvm_pit_load_count(kvm, 0, ps->channels[0].count, 0); 3646 mutex_unlock(&kvm->arch.vpit->pit_state.lock); 3647 return r; 3648 } 3649 3650 static int kvm_vm_ioctl_get_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps) 3651 { 3652 int r = 0; 3653 3654 mutex_lock(&kvm->arch.vpit->pit_state.lock); 3655 memcpy(ps->channels, &kvm->arch.vpit->pit_state.channels, 3656 sizeof(ps->channels)); 3657 ps->flags = kvm->arch.vpit->pit_state.flags; 3658 mutex_unlock(&kvm->arch.vpit->pit_state.lock); 3659 memset(&ps->reserved, 0, sizeof(ps->reserved)); 3660 return r; 3661 } 3662 3663 static int kvm_vm_ioctl_set_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps) 3664 { 3665 int r = 0, start = 0; 3666 u32 prev_legacy, cur_legacy; 3667 mutex_lock(&kvm->arch.vpit->pit_state.lock); 3668 prev_legacy = kvm->arch.vpit->pit_state.flags & KVM_PIT_FLAGS_HPET_LEGACY; 3669 cur_legacy = ps->flags & KVM_PIT_FLAGS_HPET_LEGACY; 3670 if (!prev_legacy && cur_legacy) 3671 start = 1; 3672 memcpy(&kvm->arch.vpit->pit_state.channels, &ps->channels, 3673 sizeof(kvm->arch.vpit->pit_state.channels)); 3674 kvm->arch.vpit->pit_state.flags = ps->flags; 3675 kvm_pit_load_count(kvm, 0, kvm->arch.vpit->pit_state.channels[0].count, start); 3676 mutex_unlock(&kvm->arch.vpit->pit_state.lock); 3677 return r; 3678 } 3679 3680 static int kvm_vm_ioctl_reinject(struct kvm *kvm, 3681 struct kvm_reinject_control *control) 3682 { 3683 if (!kvm->arch.vpit) 3684 return -ENXIO; 3685 mutex_lock(&kvm->arch.vpit->pit_state.lock); 3686 kvm->arch.vpit->pit_state.reinject = control->pit_reinject; 3687 mutex_unlock(&kvm->arch.vpit->pit_state.lock); 3688 return 0; 3689 } 3690 3691 /** 3692 * kvm_vm_ioctl_get_dirty_log - get and clear the log of dirty pages in a slot 3693 * @kvm: kvm instance 3694 * @log: slot id and address to which we copy the log 3695 * 3696 * Steps 1-4 below provide general overview of dirty page logging. See 3697 * kvm_get_dirty_log_protect() function description for additional details. 3698 * 3699 * We call kvm_get_dirty_log_protect() to handle steps 1-3, upon return we 3700 * always flush the TLB (step 4) even if previous step failed and the dirty 3701 * bitmap may be corrupt. Regardless of previous outcome the KVM logging API 3702 * does not preclude user space subsequent dirty log read. Flushing TLB ensures 3703 * writes will be marked dirty for next log read. 3704 * 3705 * 1. Take a snapshot of the bit and clear it if needed. 3706 * 2. Write protect the corresponding page. 3707 * 3. Copy the snapshot to the userspace. 3708 * 4. Flush TLB's if needed. 3709 */ 3710 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) 3711 { 3712 bool is_dirty = false; 3713 int r; 3714 3715 mutex_lock(&kvm->slots_lock); 3716 3717 /* 3718 * Flush potentially hardware-cached dirty pages to dirty_bitmap. 3719 */ 3720 if (kvm_x86_ops->flush_log_dirty) 3721 kvm_x86_ops->flush_log_dirty(kvm); 3722 3723 r = kvm_get_dirty_log_protect(kvm, log, &is_dirty); 3724 3725 /* 3726 * All the TLBs can be flushed out of mmu lock, see the comments in 3727 * kvm_mmu_slot_remove_write_access(). 3728 */ 3729 lockdep_assert_held(&kvm->slots_lock); 3730 if (is_dirty) 3731 kvm_flush_remote_tlbs(kvm); 3732 3733 mutex_unlock(&kvm->slots_lock); 3734 return r; 3735 } 3736 3737 int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event, 3738 bool line_status) 3739 { 3740 if (!irqchip_in_kernel(kvm)) 3741 return -ENXIO; 3742 3743 irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 3744 irq_event->irq, irq_event->level, 3745 line_status); 3746 return 0; 3747 } 3748 3749 static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, 3750 struct kvm_enable_cap *cap) 3751 { 3752 int r; 3753 3754 if (cap->flags) 3755 return -EINVAL; 3756 3757 switch (cap->cap) { 3758 case KVM_CAP_DISABLE_QUIRKS: 3759 kvm->arch.disabled_quirks = cap->args[0]; 3760 r = 0; 3761 break; 3762 default: 3763 r = -EINVAL; 3764 break; 3765 } 3766 return r; 3767 } 3768 3769 long kvm_arch_vm_ioctl(struct file *filp, 3770 unsigned int ioctl, unsigned long arg) 3771 { 3772 struct kvm *kvm = filp->private_data; 3773 void __user *argp = (void __user *)arg; 3774 int r = -ENOTTY; 3775 /* 3776 * This union makes it completely explicit to gcc-3.x 3777 * that these two variables' stack usage should be 3778 * combined, not added together. 3779 */ 3780 union { 3781 struct kvm_pit_state ps; 3782 struct kvm_pit_state2 ps2; 3783 struct kvm_pit_config pit_config; 3784 } u; 3785 3786 switch (ioctl) { 3787 case KVM_SET_TSS_ADDR: 3788 r = kvm_vm_ioctl_set_tss_addr(kvm, arg); 3789 break; 3790 case KVM_SET_IDENTITY_MAP_ADDR: { 3791 u64 ident_addr; 3792 3793 r = -EFAULT; 3794 if (copy_from_user(&ident_addr, argp, sizeof ident_addr)) 3795 goto out; 3796 r = kvm_vm_ioctl_set_identity_map_addr(kvm, ident_addr); 3797 break; 3798 } 3799 case KVM_SET_NR_MMU_PAGES: 3800 r = kvm_vm_ioctl_set_nr_mmu_pages(kvm, arg); 3801 break; 3802 case KVM_GET_NR_MMU_PAGES: 3803 r = kvm_vm_ioctl_get_nr_mmu_pages(kvm); 3804 break; 3805 case KVM_CREATE_IRQCHIP: { 3806 struct kvm_pic *vpic; 3807 3808 mutex_lock(&kvm->lock); 3809 r = -EEXIST; 3810 if (kvm->arch.vpic) 3811 goto create_irqchip_unlock; 3812 r = -EINVAL; 3813 if (atomic_read(&kvm->online_vcpus)) 3814 goto create_irqchip_unlock; 3815 r = -ENOMEM; 3816 vpic = kvm_create_pic(kvm); 3817 if (vpic) { 3818 r = kvm_ioapic_init(kvm); 3819 if (r) { 3820 mutex_lock(&kvm->slots_lock); 3821 kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, 3822 &vpic->dev_master); 3823 kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, 3824 &vpic->dev_slave); 3825 kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, 3826 &vpic->dev_eclr); 3827 mutex_unlock(&kvm->slots_lock); 3828 kfree(vpic); 3829 goto create_irqchip_unlock; 3830 } 3831 } else 3832 goto create_irqchip_unlock; 3833 smp_wmb(); 3834 kvm->arch.vpic = vpic; 3835 smp_wmb(); 3836 r = kvm_setup_default_irq_routing(kvm); 3837 if (r) { 3838 mutex_lock(&kvm->slots_lock); 3839 mutex_lock(&kvm->irq_lock); 3840 kvm_ioapic_destroy(kvm); 3841 kvm_destroy_pic(kvm); 3842 mutex_unlock(&kvm->irq_lock); 3843 mutex_unlock(&kvm->slots_lock); 3844 } 3845 create_irqchip_unlock: 3846 mutex_unlock(&kvm->lock); 3847 break; 3848 } 3849 case KVM_CREATE_PIT: 3850 u.pit_config.flags = KVM_PIT_SPEAKER_DUMMY; 3851 goto create_pit; 3852 case KVM_CREATE_PIT2: 3853 r = -EFAULT; 3854 if (copy_from_user(&u.pit_config, argp, 3855 sizeof(struct kvm_pit_config))) 3856 goto out; 3857 create_pit: 3858 mutex_lock(&kvm->slots_lock); 3859 r = -EEXIST; 3860 if (kvm->arch.vpit) 3861 goto create_pit_unlock; 3862 r = -ENOMEM; 3863 kvm->arch.vpit = kvm_create_pit(kvm, u.pit_config.flags); 3864 if (kvm->arch.vpit) 3865 r = 0; 3866 create_pit_unlock: 3867 mutex_unlock(&kvm->slots_lock); 3868 break; 3869 case KVM_GET_IRQCHIP: { 3870 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */ 3871 struct kvm_irqchip *chip; 3872 3873 chip = memdup_user(argp, sizeof(*chip)); 3874 if (IS_ERR(chip)) { 3875 r = PTR_ERR(chip); 3876 goto out; 3877 } 3878 3879 r = -ENXIO; 3880 if (!irqchip_in_kernel(kvm)) 3881 goto get_irqchip_out; 3882 r = kvm_vm_ioctl_get_irqchip(kvm, chip); 3883 if (r) 3884 goto get_irqchip_out; 3885 r = -EFAULT; 3886 if (copy_to_user(argp, chip, sizeof *chip)) 3887 goto get_irqchip_out; 3888 r = 0; 3889 get_irqchip_out: 3890 kfree(chip); 3891 break; 3892 } 3893 case KVM_SET_IRQCHIP: { 3894 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */ 3895 struct kvm_irqchip *chip; 3896 3897 chip = memdup_user(argp, sizeof(*chip)); 3898 if (IS_ERR(chip)) { 3899 r = PTR_ERR(chip); 3900 goto out; 3901 } 3902 3903 r = -ENXIO; 3904 if (!irqchip_in_kernel(kvm)) 3905 goto set_irqchip_out; 3906 r = kvm_vm_ioctl_set_irqchip(kvm, chip); 3907 if (r) 3908 goto set_irqchip_out; 3909 r = 0; 3910 set_irqchip_out: 3911 kfree(chip); 3912 break; 3913 } 3914 case KVM_GET_PIT: { 3915 r = -EFAULT; 3916 if (copy_from_user(&u.ps, argp, sizeof(struct kvm_pit_state))) 3917 goto out; 3918 r = -ENXIO; 3919 if (!kvm->arch.vpit) 3920 goto out; 3921 r = kvm_vm_ioctl_get_pit(kvm, &u.ps); 3922 if (r) 3923 goto out; 3924 r = -EFAULT; 3925 if (copy_to_user(argp, &u.ps, sizeof(struct kvm_pit_state))) 3926 goto out; 3927 r = 0; 3928 break; 3929 } 3930 case KVM_SET_PIT: { 3931 r = -EFAULT; 3932 if (copy_from_user(&u.ps, argp, sizeof u.ps)) 3933 goto out; 3934 r = -ENXIO; 3935 if (!kvm->arch.vpit) 3936 goto out; 3937 r = kvm_vm_ioctl_set_pit(kvm, &u.ps); 3938 break; 3939 } 3940 case KVM_GET_PIT2: { 3941 r = -ENXIO; 3942 if (!kvm->arch.vpit) 3943 goto out; 3944 r = kvm_vm_ioctl_get_pit2(kvm, &u.ps2); 3945 if (r) 3946 goto out; 3947 r = -EFAULT; 3948 if (copy_to_user(argp, &u.ps2, sizeof(u.ps2))) 3949 goto out; 3950 r = 0; 3951 break; 3952 } 3953 case KVM_SET_PIT2: { 3954 r = -EFAULT; 3955 if (copy_from_user(&u.ps2, argp, sizeof(u.ps2))) 3956 goto out; 3957 r = -ENXIO; 3958 if (!kvm->arch.vpit) 3959 goto out; 3960 r = kvm_vm_ioctl_set_pit2(kvm, &u.ps2); 3961 break; 3962 } 3963 case KVM_REINJECT_CONTROL: { 3964 struct kvm_reinject_control control; 3965 r = -EFAULT; 3966 if (copy_from_user(&control, argp, sizeof(control))) 3967 goto out; 3968 r = kvm_vm_ioctl_reinject(kvm, &control); 3969 break; 3970 } 3971 case KVM_XEN_HVM_CONFIG: { 3972 r = -EFAULT; 3973 if (copy_from_user(&kvm->arch.xen_hvm_config, argp, 3974 sizeof(struct kvm_xen_hvm_config))) 3975 goto out; 3976 r = -EINVAL; 3977 if (kvm->arch.xen_hvm_config.flags) 3978 goto out; 3979 r = 0; 3980 break; 3981 } 3982 case KVM_SET_CLOCK: { 3983 struct kvm_clock_data user_ns; 3984 u64 now_ns; 3985 s64 delta; 3986 3987 r = -EFAULT; 3988 if (copy_from_user(&user_ns, argp, sizeof(user_ns))) 3989 goto out; 3990 3991 r = -EINVAL; 3992 if (user_ns.flags) 3993 goto out; 3994 3995 r = 0; 3996 local_irq_disable(); 3997 now_ns = get_kernel_ns(); 3998 delta = user_ns.clock - now_ns; 3999 local_irq_enable(); 4000 kvm->arch.kvmclock_offset = delta; 4001 kvm_gen_update_masterclock(kvm); 4002 break; 4003 } 4004 case KVM_GET_CLOCK: { 4005 struct kvm_clock_data user_ns; 4006 u64 now_ns; 4007 4008 local_irq_disable(); 4009 now_ns = get_kernel_ns(); 4010 user_ns.clock = kvm->arch.kvmclock_offset + now_ns; 4011 local_irq_enable(); 4012 user_ns.flags = 0; 4013 memset(&user_ns.pad, 0, sizeof(user_ns.pad)); 4014 4015 r = -EFAULT; 4016 if (copy_to_user(argp, &user_ns, sizeof(user_ns))) 4017 goto out; 4018 r = 0; 4019 break; 4020 } 4021 case KVM_ENABLE_CAP: { 4022 struct kvm_enable_cap cap; 4023 4024 r = -EFAULT; 4025 if (copy_from_user(&cap, argp, sizeof(cap))) 4026 goto out; 4027 r = kvm_vm_ioctl_enable_cap(kvm, &cap); 4028 break; 4029 } 4030 default: 4031 r = kvm_vm_ioctl_assigned_device(kvm, ioctl, arg); 4032 } 4033 out: 4034 return r; 4035 } 4036 4037 static void kvm_init_msr_list(void) 4038 { 4039 u32 dummy[2]; 4040 unsigned i, j; 4041 4042 for (i = j = 0; i < ARRAY_SIZE(msrs_to_save); i++) { 4043 if (rdmsr_safe(msrs_to_save[i], &dummy[0], &dummy[1]) < 0) 4044 continue; 4045 4046 /* 4047 * Even MSRs that are valid in the host may not be exposed 4048 * to the guests in some cases. We could work around this 4049 * in VMX with the generic MSR save/load machinery, but it 4050 * is not really worthwhile since it will really only 4051 * happen with nested virtualization. 4052 */ 4053 switch (msrs_to_save[i]) { 4054 case MSR_IA32_BNDCFGS: 4055 if (!kvm_x86_ops->mpx_supported()) 4056 continue; 4057 break; 4058 default: 4059 break; 4060 } 4061 4062 if (j < i) 4063 msrs_to_save[j] = msrs_to_save[i]; 4064 j++; 4065 } 4066 num_msrs_to_save = j; 4067 4068 for (i = j = 0; i < ARRAY_SIZE(emulated_msrs); i++) { 4069 switch (emulated_msrs[i]) { 4070 case MSR_IA32_SMBASE: 4071 if (!kvm_x86_ops->cpu_has_high_real_mode_segbase()) 4072 continue; 4073 break; 4074 default: 4075 break; 4076 } 4077 4078 if (j < i) 4079 emulated_msrs[j] = emulated_msrs[i]; 4080 j++; 4081 } 4082 num_emulated_msrs = j; 4083 } 4084 4085 static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len, 4086 const void *v) 4087 { 4088 int handled = 0; 4089 int n; 4090 4091 do { 4092 n = min(len, 8); 4093 if (!(vcpu->arch.apic && 4094 !kvm_iodevice_write(vcpu, &vcpu->arch.apic->dev, addr, n, v)) 4095 && kvm_io_bus_write(vcpu, KVM_MMIO_BUS, addr, n, v)) 4096 break; 4097 handled += n; 4098 addr += n; 4099 len -= n; 4100 v += n; 4101 } while (len); 4102 4103 return handled; 4104 } 4105 4106 static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v) 4107 { 4108 int handled = 0; 4109 int n; 4110 4111 do { 4112 n = min(len, 8); 4113 if (!(vcpu->arch.apic && 4114 !kvm_iodevice_read(vcpu, &vcpu->arch.apic->dev, 4115 addr, n, v)) 4116 && kvm_io_bus_read(vcpu, KVM_MMIO_BUS, addr, n, v)) 4117 break; 4118 trace_kvm_mmio(KVM_TRACE_MMIO_READ, n, addr, *(u64 *)v); 4119 handled += n; 4120 addr += n; 4121 len -= n; 4122 v += n; 4123 } while (len); 4124 4125 return handled; 4126 } 4127 4128 static void kvm_set_segment(struct kvm_vcpu *vcpu, 4129 struct kvm_segment *var, int seg) 4130 { 4131 kvm_x86_ops->set_segment(vcpu, var, seg); 4132 } 4133 4134 void kvm_get_segment(struct kvm_vcpu *vcpu, 4135 struct kvm_segment *var, int seg) 4136 { 4137 kvm_x86_ops->get_segment(vcpu, var, seg); 4138 } 4139 4140 gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access, 4141 struct x86_exception *exception) 4142 { 4143 gpa_t t_gpa; 4144 4145 BUG_ON(!mmu_is_nested(vcpu)); 4146 4147 /* NPT walks are always user-walks */ 4148 access |= PFERR_USER_MASK; 4149 t_gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gpa, access, exception); 4150 4151 return t_gpa; 4152 } 4153 4154 gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, 4155 struct x86_exception *exception) 4156 { 4157 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; 4158 return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); 4159 } 4160 4161 gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, 4162 struct x86_exception *exception) 4163 { 4164 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; 4165 access |= PFERR_FETCH_MASK; 4166 return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); 4167 } 4168 4169 gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, 4170 struct x86_exception *exception) 4171 { 4172 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; 4173 access |= PFERR_WRITE_MASK; 4174 return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); 4175 } 4176 4177 /* uses this to access any guest's mapped memory without checking CPL */ 4178 gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, 4179 struct x86_exception *exception) 4180 { 4181 return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, 0, exception); 4182 } 4183 4184 static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes, 4185 struct kvm_vcpu *vcpu, u32 access, 4186 struct x86_exception *exception) 4187 { 4188 void *data = val; 4189 int r = X86EMUL_CONTINUE; 4190 4191 while (bytes) { 4192 gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access, 4193 exception); 4194 unsigned offset = addr & (PAGE_SIZE-1); 4195 unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset); 4196 int ret; 4197 4198 if (gpa == UNMAPPED_GVA) 4199 return X86EMUL_PROPAGATE_FAULT; 4200 ret = kvm_vcpu_read_guest_page(vcpu, gpa >> PAGE_SHIFT, data, 4201 offset, toread); 4202 if (ret < 0) { 4203 r = X86EMUL_IO_NEEDED; 4204 goto out; 4205 } 4206 4207 bytes -= toread; 4208 data += toread; 4209 addr += toread; 4210 } 4211 out: 4212 return r; 4213 } 4214 4215 /* used for instruction fetching */ 4216 static int kvm_fetch_guest_virt(struct x86_emulate_ctxt *ctxt, 4217 gva_t addr, void *val, unsigned int bytes, 4218 struct x86_exception *exception) 4219 { 4220 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 4221 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; 4222 unsigned offset; 4223 int ret; 4224 4225 /* Inline kvm_read_guest_virt_helper for speed. */ 4226 gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access|PFERR_FETCH_MASK, 4227 exception); 4228 if (unlikely(gpa == UNMAPPED_GVA)) 4229 return X86EMUL_PROPAGATE_FAULT; 4230 4231 offset = addr & (PAGE_SIZE-1); 4232 if (WARN_ON(offset + bytes > PAGE_SIZE)) 4233 bytes = (unsigned)PAGE_SIZE - offset; 4234 ret = kvm_vcpu_read_guest_page(vcpu, gpa >> PAGE_SHIFT, val, 4235 offset, bytes); 4236 if (unlikely(ret < 0)) 4237 return X86EMUL_IO_NEEDED; 4238 4239 return X86EMUL_CONTINUE; 4240 } 4241 4242 int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt, 4243 gva_t addr, void *val, unsigned int bytes, 4244 struct x86_exception *exception) 4245 { 4246 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 4247 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; 4248 4249 return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access, 4250 exception); 4251 } 4252 EXPORT_SYMBOL_GPL(kvm_read_guest_virt); 4253 4254 static int kvm_read_guest_virt_system(struct x86_emulate_ctxt *ctxt, 4255 gva_t addr, void *val, unsigned int bytes, 4256 struct x86_exception *exception) 4257 { 4258 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 4259 return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, 0, exception); 4260 } 4261 4262 int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt, 4263 gva_t addr, void *val, 4264 unsigned int bytes, 4265 struct x86_exception *exception) 4266 { 4267 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 4268 void *data = val; 4269 int r = X86EMUL_CONTINUE; 4270 4271 while (bytes) { 4272 gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, 4273 PFERR_WRITE_MASK, 4274 exception); 4275 unsigned offset = addr & (PAGE_SIZE-1); 4276 unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset); 4277 int ret; 4278 4279 if (gpa == UNMAPPED_GVA) 4280 return X86EMUL_PROPAGATE_FAULT; 4281 ret = kvm_vcpu_write_guest(vcpu, gpa, data, towrite); 4282 if (ret < 0) { 4283 r = X86EMUL_IO_NEEDED; 4284 goto out; 4285 } 4286 4287 bytes -= towrite; 4288 data += towrite; 4289 addr += towrite; 4290 } 4291 out: 4292 return r; 4293 } 4294 EXPORT_SYMBOL_GPL(kvm_write_guest_virt_system); 4295 4296 static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva, 4297 gpa_t *gpa, struct x86_exception *exception, 4298 bool write) 4299 { 4300 u32 access = ((kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0) 4301 | (write ? PFERR_WRITE_MASK : 0); 4302 4303 if (vcpu_match_mmio_gva(vcpu, gva) 4304 && !permission_fault(vcpu, vcpu->arch.walk_mmu, 4305 vcpu->arch.access, access)) { 4306 *gpa = vcpu->arch.mmio_gfn << PAGE_SHIFT | 4307 (gva & (PAGE_SIZE - 1)); 4308 trace_vcpu_match_mmio(gva, *gpa, write, false); 4309 return 1; 4310 } 4311 4312 *gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); 4313 4314 if (*gpa == UNMAPPED_GVA) 4315 return -1; 4316 4317 /* For APIC access vmexit */ 4318 if ((*gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE) 4319 return 1; 4320 4321 if (vcpu_match_mmio_gpa(vcpu, *gpa)) { 4322 trace_vcpu_match_mmio(gva, *gpa, write, true); 4323 return 1; 4324 } 4325 4326 return 0; 4327 } 4328 4329 int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, 4330 const void *val, int bytes) 4331 { 4332 int ret; 4333 4334 ret = kvm_vcpu_write_guest(vcpu, gpa, val, bytes); 4335 if (ret < 0) 4336 return 0; 4337 kvm_mmu_pte_write(vcpu, gpa, val, bytes); 4338 return 1; 4339 } 4340 4341 struct read_write_emulator_ops { 4342 int (*read_write_prepare)(struct kvm_vcpu *vcpu, void *val, 4343 int bytes); 4344 int (*read_write_emulate)(struct kvm_vcpu *vcpu, gpa_t gpa, 4345 void *val, int bytes); 4346 int (*read_write_mmio)(struct kvm_vcpu *vcpu, gpa_t gpa, 4347 int bytes, void *val); 4348 int (*read_write_exit_mmio)(struct kvm_vcpu *vcpu, gpa_t gpa, 4349 void *val, int bytes); 4350 bool write; 4351 }; 4352 4353 static int read_prepare(struct kvm_vcpu *vcpu, void *val, int bytes) 4354 { 4355 if (vcpu->mmio_read_completed) { 4356 trace_kvm_mmio(KVM_TRACE_MMIO_READ, bytes, 4357 vcpu->mmio_fragments[0].gpa, *(u64 *)val); 4358 vcpu->mmio_read_completed = 0; 4359 return 1; 4360 } 4361 4362 return 0; 4363 } 4364 4365 static int read_emulate(struct kvm_vcpu *vcpu, gpa_t gpa, 4366 void *val, int bytes) 4367 { 4368 return !kvm_vcpu_read_guest(vcpu, gpa, val, bytes); 4369 } 4370 4371 static int write_emulate(struct kvm_vcpu *vcpu, gpa_t gpa, 4372 void *val, int bytes) 4373 { 4374 return emulator_write_phys(vcpu, gpa, val, bytes); 4375 } 4376 4377 static int write_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes, void *val) 4378 { 4379 trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, bytes, gpa, *(u64 *)val); 4380 return vcpu_mmio_write(vcpu, gpa, bytes, val); 4381 } 4382 4383 static int read_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, 4384 void *val, int bytes) 4385 { 4386 trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, bytes, gpa, 0); 4387 return X86EMUL_IO_NEEDED; 4388 } 4389 4390 static int write_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, 4391 void *val, int bytes) 4392 { 4393 struct kvm_mmio_fragment *frag = &vcpu->mmio_fragments[0]; 4394 4395 memcpy(vcpu->run->mmio.data, frag->data, min(8u, frag->len)); 4396 return X86EMUL_CONTINUE; 4397 } 4398 4399 static const struct read_write_emulator_ops read_emultor = { 4400 .read_write_prepare = read_prepare, 4401 .read_write_emulate = read_emulate, 4402 .read_write_mmio = vcpu_mmio_read, 4403 .read_write_exit_mmio = read_exit_mmio, 4404 }; 4405 4406 static const struct read_write_emulator_ops write_emultor = { 4407 .read_write_emulate = write_emulate, 4408 .read_write_mmio = write_mmio, 4409 .read_write_exit_mmio = write_exit_mmio, 4410 .write = true, 4411 }; 4412 4413 static int emulator_read_write_onepage(unsigned long addr, void *val, 4414 unsigned int bytes, 4415 struct x86_exception *exception, 4416 struct kvm_vcpu *vcpu, 4417 const struct read_write_emulator_ops *ops) 4418 { 4419 gpa_t gpa; 4420 int handled, ret; 4421 bool write = ops->write; 4422 struct kvm_mmio_fragment *frag; 4423 4424 ret = vcpu_mmio_gva_to_gpa(vcpu, addr, &gpa, exception, write); 4425 4426 if (ret < 0) 4427 return X86EMUL_PROPAGATE_FAULT; 4428 4429 /* For APIC access vmexit */ 4430 if (ret) 4431 goto mmio; 4432 4433 if (ops->read_write_emulate(vcpu, gpa, val, bytes)) 4434 return X86EMUL_CONTINUE; 4435 4436 mmio: 4437 /* 4438 * Is this MMIO handled locally? 4439 */ 4440 handled = ops->read_write_mmio(vcpu, gpa, bytes, val); 4441 if (handled == bytes) 4442 return X86EMUL_CONTINUE; 4443 4444 gpa += handled; 4445 bytes -= handled; 4446 val += handled; 4447 4448 WARN_ON(vcpu->mmio_nr_fragments >= KVM_MAX_MMIO_FRAGMENTS); 4449 frag = &vcpu->mmio_fragments[vcpu->mmio_nr_fragments++]; 4450 frag->gpa = gpa; 4451 frag->data = val; 4452 frag->len = bytes; 4453 return X86EMUL_CONTINUE; 4454 } 4455 4456 static int emulator_read_write(struct x86_emulate_ctxt *ctxt, 4457 unsigned long addr, 4458 void *val, unsigned int bytes, 4459 struct x86_exception *exception, 4460 const struct read_write_emulator_ops *ops) 4461 { 4462 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 4463 gpa_t gpa; 4464 int rc; 4465 4466 if (ops->read_write_prepare && 4467 ops->read_write_prepare(vcpu, val, bytes)) 4468 return X86EMUL_CONTINUE; 4469 4470 vcpu->mmio_nr_fragments = 0; 4471 4472 /* Crossing a page boundary? */ 4473 if (((addr + bytes - 1) ^ addr) & PAGE_MASK) { 4474 int now; 4475 4476 now = -addr & ~PAGE_MASK; 4477 rc = emulator_read_write_onepage(addr, val, now, exception, 4478 vcpu, ops); 4479 4480 if (rc != X86EMUL_CONTINUE) 4481 return rc; 4482 addr += now; 4483 if (ctxt->mode != X86EMUL_MODE_PROT64) 4484 addr = (u32)addr; 4485 val += now; 4486 bytes -= now; 4487 } 4488 4489 rc = emulator_read_write_onepage(addr, val, bytes, exception, 4490 vcpu, ops); 4491 if (rc != X86EMUL_CONTINUE) 4492 return rc; 4493 4494 if (!vcpu->mmio_nr_fragments) 4495 return rc; 4496 4497 gpa = vcpu->mmio_fragments[0].gpa; 4498 4499 vcpu->mmio_needed = 1; 4500 vcpu->mmio_cur_fragment = 0; 4501 4502 vcpu->run->mmio.len = min(8u, vcpu->mmio_fragments[0].len); 4503 vcpu->run->mmio.is_write = vcpu->mmio_is_write = ops->write; 4504 vcpu->run->exit_reason = KVM_EXIT_MMIO; 4505 vcpu->run->mmio.phys_addr = gpa; 4506 4507 return ops->read_write_exit_mmio(vcpu, gpa, val, bytes); 4508 } 4509 4510 static int emulator_read_emulated(struct x86_emulate_ctxt *ctxt, 4511 unsigned long addr, 4512 void *val, 4513 unsigned int bytes, 4514 struct x86_exception *exception) 4515 { 4516 return emulator_read_write(ctxt, addr, val, bytes, 4517 exception, &read_emultor); 4518 } 4519 4520 static int emulator_write_emulated(struct x86_emulate_ctxt *ctxt, 4521 unsigned long addr, 4522 const void *val, 4523 unsigned int bytes, 4524 struct x86_exception *exception) 4525 { 4526 return emulator_read_write(ctxt, addr, (void *)val, bytes, 4527 exception, &write_emultor); 4528 } 4529 4530 #define CMPXCHG_TYPE(t, ptr, old, new) \ 4531 (cmpxchg((t *)(ptr), *(t *)(old), *(t *)(new)) == *(t *)(old)) 4532 4533 #ifdef CONFIG_X86_64 4534 # define CMPXCHG64(ptr, old, new) CMPXCHG_TYPE(u64, ptr, old, new) 4535 #else 4536 # define CMPXCHG64(ptr, old, new) \ 4537 (cmpxchg64((u64 *)(ptr), *(u64 *)(old), *(u64 *)(new)) == *(u64 *)(old)) 4538 #endif 4539 4540 static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt, 4541 unsigned long addr, 4542 const void *old, 4543 const void *new, 4544 unsigned int bytes, 4545 struct x86_exception *exception) 4546 { 4547 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 4548 gpa_t gpa; 4549 struct page *page; 4550 char *kaddr; 4551 bool exchanged; 4552 4553 /* guests cmpxchg8b have to be emulated atomically */ 4554 if (bytes > 8 || (bytes & (bytes - 1))) 4555 goto emul_write; 4556 4557 gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, NULL); 4558 4559 if (gpa == UNMAPPED_GVA || 4560 (gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE) 4561 goto emul_write; 4562 4563 if (((gpa + bytes - 1) & PAGE_MASK) != (gpa & PAGE_MASK)) 4564 goto emul_write; 4565 4566 page = kvm_vcpu_gfn_to_page(vcpu, gpa >> PAGE_SHIFT); 4567 if (is_error_page(page)) 4568 goto emul_write; 4569 4570 kaddr = kmap_atomic(page); 4571 kaddr += offset_in_page(gpa); 4572 switch (bytes) { 4573 case 1: 4574 exchanged = CMPXCHG_TYPE(u8, kaddr, old, new); 4575 break; 4576 case 2: 4577 exchanged = CMPXCHG_TYPE(u16, kaddr, old, new); 4578 break; 4579 case 4: 4580 exchanged = CMPXCHG_TYPE(u32, kaddr, old, new); 4581 break; 4582 case 8: 4583 exchanged = CMPXCHG64(kaddr, old, new); 4584 break; 4585 default: 4586 BUG(); 4587 } 4588 kunmap_atomic(kaddr); 4589 kvm_release_page_dirty(page); 4590 4591 if (!exchanged) 4592 return X86EMUL_CMPXCHG_FAILED; 4593 4594 kvm_vcpu_mark_page_dirty(vcpu, gpa >> PAGE_SHIFT); 4595 kvm_mmu_pte_write(vcpu, gpa, new, bytes); 4596 4597 return X86EMUL_CONTINUE; 4598 4599 emul_write: 4600 printk_once(KERN_WARNING "kvm: emulating exchange as write\n"); 4601 4602 return emulator_write_emulated(ctxt, addr, new, bytes, exception); 4603 } 4604 4605 static int kernel_pio(struct kvm_vcpu *vcpu, void *pd) 4606 { 4607 /* TODO: String I/O for in kernel device */ 4608 int r; 4609 4610 if (vcpu->arch.pio.in) 4611 r = kvm_io_bus_read(vcpu, KVM_PIO_BUS, vcpu->arch.pio.port, 4612 vcpu->arch.pio.size, pd); 4613 else 4614 r = kvm_io_bus_write(vcpu, KVM_PIO_BUS, 4615 vcpu->arch.pio.port, vcpu->arch.pio.size, 4616 pd); 4617 return r; 4618 } 4619 4620 static int emulator_pio_in_out(struct kvm_vcpu *vcpu, int size, 4621 unsigned short port, void *val, 4622 unsigned int count, bool in) 4623 { 4624 vcpu->arch.pio.port = port; 4625 vcpu->arch.pio.in = in; 4626 vcpu->arch.pio.count = count; 4627 vcpu->arch.pio.size = size; 4628 4629 if (!kernel_pio(vcpu, vcpu->arch.pio_data)) { 4630 vcpu->arch.pio.count = 0; 4631 return 1; 4632 } 4633 4634 vcpu->run->exit_reason = KVM_EXIT_IO; 4635 vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT; 4636 vcpu->run->io.size = size; 4637 vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE; 4638 vcpu->run->io.count = count; 4639 vcpu->run->io.port = port; 4640 4641 return 0; 4642 } 4643 4644 static int emulator_pio_in_emulated(struct x86_emulate_ctxt *ctxt, 4645 int size, unsigned short port, void *val, 4646 unsigned int count) 4647 { 4648 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 4649 int ret; 4650 4651 if (vcpu->arch.pio.count) 4652 goto data_avail; 4653 4654 ret = emulator_pio_in_out(vcpu, size, port, val, count, true); 4655 if (ret) { 4656 data_avail: 4657 memcpy(val, vcpu->arch.pio_data, size * count); 4658 trace_kvm_pio(KVM_PIO_IN, port, size, count, vcpu->arch.pio_data); 4659 vcpu->arch.pio.count = 0; 4660 return 1; 4661 } 4662 4663 return 0; 4664 } 4665 4666 static int emulator_pio_out_emulated(struct x86_emulate_ctxt *ctxt, 4667 int size, unsigned short port, 4668 const void *val, unsigned int count) 4669 { 4670 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 4671 4672 memcpy(vcpu->arch.pio_data, val, size * count); 4673 trace_kvm_pio(KVM_PIO_OUT, port, size, count, vcpu->arch.pio_data); 4674 return emulator_pio_in_out(vcpu, size, port, (void *)val, count, false); 4675 } 4676 4677 static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg) 4678 { 4679 return kvm_x86_ops->get_segment_base(vcpu, seg); 4680 } 4681 4682 static void emulator_invlpg(struct x86_emulate_ctxt *ctxt, ulong address) 4683 { 4684 kvm_mmu_invlpg(emul_to_vcpu(ctxt), address); 4685 } 4686 4687 int kvm_emulate_wbinvd_noskip(struct kvm_vcpu *vcpu) 4688 { 4689 if (!need_emulate_wbinvd(vcpu)) 4690 return X86EMUL_CONTINUE; 4691 4692 if (kvm_x86_ops->has_wbinvd_exit()) { 4693 int cpu = get_cpu(); 4694 4695 cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask); 4696 smp_call_function_many(vcpu->arch.wbinvd_dirty_mask, 4697 wbinvd_ipi, NULL, 1); 4698 put_cpu(); 4699 cpumask_clear(vcpu->arch.wbinvd_dirty_mask); 4700 } else 4701 wbinvd(); 4702 return X86EMUL_CONTINUE; 4703 } 4704 4705 int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu) 4706 { 4707 kvm_x86_ops->skip_emulated_instruction(vcpu); 4708 return kvm_emulate_wbinvd_noskip(vcpu); 4709 } 4710 EXPORT_SYMBOL_GPL(kvm_emulate_wbinvd); 4711 4712 4713 4714 static void emulator_wbinvd(struct x86_emulate_ctxt *ctxt) 4715 { 4716 kvm_emulate_wbinvd_noskip(emul_to_vcpu(ctxt)); 4717 } 4718 4719 static int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, 4720 unsigned long *dest) 4721 { 4722 return kvm_get_dr(emul_to_vcpu(ctxt), dr, dest); 4723 } 4724 4725 static int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, 4726 unsigned long value) 4727 { 4728 4729 return __kvm_set_dr(emul_to_vcpu(ctxt), dr, value); 4730 } 4731 4732 static u64 mk_cr_64(u64 curr_cr, u32 new_val) 4733 { 4734 return (curr_cr & ~((1ULL << 32) - 1)) | new_val; 4735 } 4736 4737 static unsigned long emulator_get_cr(struct x86_emulate_ctxt *ctxt, int cr) 4738 { 4739 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 4740 unsigned long value; 4741 4742 switch (cr) { 4743 case 0: 4744 value = kvm_read_cr0(vcpu); 4745 break; 4746 case 2: 4747 value = vcpu->arch.cr2; 4748 break; 4749 case 3: 4750 value = kvm_read_cr3(vcpu); 4751 break; 4752 case 4: 4753 value = kvm_read_cr4(vcpu); 4754 break; 4755 case 8: 4756 value = kvm_get_cr8(vcpu); 4757 break; 4758 default: 4759 kvm_err("%s: unexpected cr %u\n", __func__, cr); 4760 return 0; 4761 } 4762 4763 return value; 4764 } 4765 4766 static int emulator_set_cr(struct x86_emulate_ctxt *ctxt, int cr, ulong val) 4767 { 4768 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 4769 int res = 0; 4770 4771 switch (cr) { 4772 case 0: 4773 res = kvm_set_cr0(vcpu, mk_cr_64(kvm_read_cr0(vcpu), val)); 4774 break; 4775 case 2: 4776 vcpu->arch.cr2 = val; 4777 break; 4778 case 3: 4779 res = kvm_set_cr3(vcpu, val); 4780 break; 4781 case 4: 4782 res = kvm_set_cr4(vcpu, mk_cr_64(kvm_read_cr4(vcpu), val)); 4783 break; 4784 case 8: 4785 res = kvm_set_cr8(vcpu, val); 4786 break; 4787 default: 4788 kvm_err("%s: unexpected cr %u\n", __func__, cr); 4789 res = -1; 4790 } 4791 4792 return res; 4793 } 4794 4795 static int emulator_get_cpl(struct x86_emulate_ctxt *ctxt) 4796 { 4797 return kvm_x86_ops->get_cpl(emul_to_vcpu(ctxt)); 4798 } 4799 4800 static void emulator_get_gdt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt) 4801 { 4802 kvm_x86_ops->get_gdt(emul_to_vcpu(ctxt), dt); 4803 } 4804 4805 static void emulator_get_idt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt) 4806 { 4807 kvm_x86_ops->get_idt(emul_to_vcpu(ctxt), dt); 4808 } 4809 4810 static void emulator_set_gdt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt) 4811 { 4812 kvm_x86_ops->set_gdt(emul_to_vcpu(ctxt), dt); 4813 } 4814 4815 static void emulator_set_idt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt) 4816 { 4817 kvm_x86_ops->set_idt(emul_to_vcpu(ctxt), dt); 4818 } 4819 4820 static unsigned long emulator_get_cached_segment_base( 4821 struct x86_emulate_ctxt *ctxt, int seg) 4822 { 4823 return get_segment_base(emul_to_vcpu(ctxt), seg); 4824 } 4825 4826 static bool emulator_get_segment(struct x86_emulate_ctxt *ctxt, u16 *selector, 4827 struct desc_struct *desc, u32 *base3, 4828 int seg) 4829 { 4830 struct kvm_segment var; 4831 4832 kvm_get_segment(emul_to_vcpu(ctxt), &var, seg); 4833 *selector = var.selector; 4834 4835 if (var.unusable) { 4836 memset(desc, 0, sizeof(*desc)); 4837 return false; 4838 } 4839 4840 if (var.g) 4841 var.limit >>= 12; 4842 set_desc_limit(desc, var.limit); 4843 set_desc_base(desc, (unsigned long)var.base); 4844 #ifdef CONFIG_X86_64 4845 if (base3) 4846 *base3 = var.base >> 32; 4847 #endif 4848 desc->type = var.type; 4849 desc->s = var.s; 4850 desc->dpl = var.dpl; 4851 desc->p = var.present; 4852 desc->avl = var.avl; 4853 desc->l = var.l; 4854 desc->d = var.db; 4855 desc->g = var.g; 4856 4857 return true; 4858 } 4859 4860 static void emulator_set_segment(struct x86_emulate_ctxt *ctxt, u16 selector, 4861 struct desc_struct *desc, u32 base3, 4862 int seg) 4863 { 4864 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 4865 struct kvm_segment var; 4866 4867 var.selector = selector; 4868 var.base = get_desc_base(desc); 4869 #ifdef CONFIG_X86_64 4870 var.base |= ((u64)base3) << 32; 4871 #endif 4872 var.limit = get_desc_limit(desc); 4873 if (desc->g) 4874 var.limit = (var.limit << 12) | 0xfff; 4875 var.type = desc->type; 4876 var.dpl = desc->dpl; 4877 var.db = desc->d; 4878 var.s = desc->s; 4879 var.l = desc->l; 4880 var.g = desc->g; 4881 var.avl = desc->avl; 4882 var.present = desc->p; 4883 var.unusable = !var.present; 4884 var.padding = 0; 4885 4886 kvm_set_segment(vcpu, &var, seg); 4887 return; 4888 } 4889 4890 static int emulator_get_msr(struct x86_emulate_ctxt *ctxt, 4891 u32 msr_index, u64 *pdata) 4892 { 4893 struct msr_data msr; 4894 int r; 4895 4896 msr.index = msr_index; 4897 msr.host_initiated = false; 4898 r = kvm_get_msr(emul_to_vcpu(ctxt), &msr); 4899 if (r) 4900 return r; 4901 4902 *pdata = msr.data; 4903 return 0; 4904 } 4905 4906 static int emulator_set_msr(struct x86_emulate_ctxt *ctxt, 4907 u32 msr_index, u64 data) 4908 { 4909 struct msr_data msr; 4910 4911 msr.data = data; 4912 msr.index = msr_index; 4913 msr.host_initiated = false; 4914 return kvm_set_msr(emul_to_vcpu(ctxt), &msr); 4915 } 4916 4917 static u64 emulator_get_smbase(struct x86_emulate_ctxt *ctxt) 4918 { 4919 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 4920 4921 return vcpu->arch.smbase; 4922 } 4923 4924 static void emulator_set_smbase(struct x86_emulate_ctxt *ctxt, u64 smbase) 4925 { 4926 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 4927 4928 vcpu->arch.smbase = smbase; 4929 } 4930 4931 static int emulator_check_pmc(struct x86_emulate_ctxt *ctxt, 4932 u32 pmc) 4933 { 4934 return kvm_pmu_is_valid_msr_idx(emul_to_vcpu(ctxt), pmc); 4935 } 4936 4937 static int emulator_read_pmc(struct x86_emulate_ctxt *ctxt, 4938 u32 pmc, u64 *pdata) 4939 { 4940 return kvm_pmu_rdpmc(emul_to_vcpu(ctxt), pmc, pdata); 4941 } 4942 4943 static void emulator_halt(struct x86_emulate_ctxt *ctxt) 4944 { 4945 emul_to_vcpu(ctxt)->arch.halt_request = 1; 4946 } 4947 4948 static void emulator_get_fpu(struct x86_emulate_ctxt *ctxt) 4949 { 4950 preempt_disable(); 4951 kvm_load_guest_fpu(emul_to_vcpu(ctxt)); 4952 /* 4953 * CR0.TS may reference the host fpu state, not the guest fpu state, 4954 * so it may be clear at this point. 4955 */ 4956 clts(); 4957 } 4958 4959 static void emulator_put_fpu(struct x86_emulate_ctxt *ctxt) 4960 { 4961 preempt_enable(); 4962 } 4963 4964 static int emulator_intercept(struct x86_emulate_ctxt *ctxt, 4965 struct x86_instruction_info *info, 4966 enum x86_intercept_stage stage) 4967 { 4968 return kvm_x86_ops->check_intercept(emul_to_vcpu(ctxt), info, stage); 4969 } 4970 4971 static void emulator_get_cpuid(struct x86_emulate_ctxt *ctxt, 4972 u32 *eax, u32 *ebx, u32 *ecx, u32 *edx) 4973 { 4974 kvm_cpuid(emul_to_vcpu(ctxt), eax, ebx, ecx, edx); 4975 } 4976 4977 static ulong emulator_read_gpr(struct x86_emulate_ctxt *ctxt, unsigned reg) 4978 { 4979 return kvm_register_read(emul_to_vcpu(ctxt), reg); 4980 } 4981 4982 static void emulator_write_gpr(struct x86_emulate_ctxt *ctxt, unsigned reg, ulong val) 4983 { 4984 kvm_register_write(emul_to_vcpu(ctxt), reg, val); 4985 } 4986 4987 static void emulator_set_nmi_mask(struct x86_emulate_ctxt *ctxt, bool masked) 4988 { 4989 kvm_x86_ops->set_nmi_mask(emul_to_vcpu(ctxt), masked); 4990 } 4991 4992 static const struct x86_emulate_ops emulate_ops = { 4993 .read_gpr = emulator_read_gpr, 4994 .write_gpr = emulator_write_gpr, 4995 .read_std = kvm_read_guest_virt_system, 4996 .write_std = kvm_write_guest_virt_system, 4997 .fetch = kvm_fetch_guest_virt, 4998 .read_emulated = emulator_read_emulated, 4999 .write_emulated = emulator_write_emulated, 5000 .cmpxchg_emulated = emulator_cmpxchg_emulated, 5001 .invlpg = emulator_invlpg, 5002 .pio_in_emulated = emulator_pio_in_emulated, 5003 .pio_out_emulated = emulator_pio_out_emulated, 5004 .get_segment = emulator_get_segment, 5005 .set_segment = emulator_set_segment, 5006 .get_cached_segment_base = emulator_get_cached_segment_base, 5007 .get_gdt = emulator_get_gdt, 5008 .get_idt = emulator_get_idt, 5009 .set_gdt = emulator_set_gdt, 5010 .set_idt = emulator_set_idt, 5011 .get_cr = emulator_get_cr, 5012 .set_cr = emulator_set_cr, 5013 .cpl = emulator_get_cpl, 5014 .get_dr = emulator_get_dr, 5015 .set_dr = emulator_set_dr, 5016 .get_smbase = emulator_get_smbase, 5017 .set_smbase = emulator_set_smbase, 5018 .set_msr = emulator_set_msr, 5019 .get_msr = emulator_get_msr, 5020 .check_pmc = emulator_check_pmc, 5021 .read_pmc = emulator_read_pmc, 5022 .halt = emulator_halt, 5023 .wbinvd = emulator_wbinvd, 5024 .fix_hypercall = emulator_fix_hypercall, 5025 .get_fpu = emulator_get_fpu, 5026 .put_fpu = emulator_put_fpu, 5027 .intercept = emulator_intercept, 5028 .get_cpuid = emulator_get_cpuid, 5029 .set_nmi_mask = emulator_set_nmi_mask, 5030 }; 5031 5032 static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask) 5033 { 5034 u32 int_shadow = kvm_x86_ops->get_interrupt_shadow(vcpu); 5035 /* 5036 * an sti; sti; sequence only disable interrupts for the first 5037 * instruction. So, if the last instruction, be it emulated or 5038 * not, left the system with the INT_STI flag enabled, it 5039 * means that the last instruction is an sti. We should not 5040 * leave the flag on in this case. The same goes for mov ss 5041 */ 5042 if (int_shadow & mask) 5043 mask = 0; 5044 if (unlikely(int_shadow || mask)) { 5045 kvm_x86_ops->set_interrupt_shadow(vcpu, mask); 5046 if (!mask) 5047 kvm_make_request(KVM_REQ_EVENT, vcpu); 5048 } 5049 } 5050 5051 static bool inject_emulated_exception(struct kvm_vcpu *vcpu) 5052 { 5053 struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; 5054 if (ctxt->exception.vector == PF_VECTOR) 5055 return kvm_propagate_fault(vcpu, &ctxt->exception); 5056 5057 if (ctxt->exception.error_code_valid) 5058 kvm_queue_exception_e(vcpu, ctxt->exception.vector, 5059 ctxt->exception.error_code); 5060 else 5061 kvm_queue_exception(vcpu, ctxt->exception.vector); 5062 return false; 5063 } 5064 5065 static void init_emulate_ctxt(struct kvm_vcpu *vcpu) 5066 { 5067 struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; 5068 int cs_db, cs_l; 5069 5070 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); 5071 5072 ctxt->eflags = kvm_get_rflags(vcpu); 5073 ctxt->eip = kvm_rip_read(vcpu); 5074 ctxt->mode = (!is_protmode(vcpu)) ? X86EMUL_MODE_REAL : 5075 (ctxt->eflags & X86_EFLAGS_VM) ? X86EMUL_MODE_VM86 : 5076 (cs_l && is_long_mode(vcpu)) ? X86EMUL_MODE_PROT64 : 5077 cs_db ? X86EMUL_MODE_PROT32 : 5078 X86EMUL_MODE_PROT16; 5079 BUILD_BUG_ON(HF_GUEST_MASK != X86EMUL_GUEST_MASK); 5080 BUILD_BUG_ON(HF_SMM_MASK != X86EMUL_SMM_MASK); 5081 BUILD_BUG_ON(HF_SMM_INSIDE_NMI_MASK != X86EMUL_SMM_INSIDE_NMI_MASK); 5082 ctxt->emul_flags = vcpu->arch.hflags; 5083 5084 init_decode_cache(ctxt); 5085 vcpu->arch.emulate_regs_need_sync_from_vcpu = false; 5086 } 5087 5088 int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip) 5089 { 5090 struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; 5091 int ret; 5092 5093 init_emulate_ctxt(vcpu); 5094 5095 ctxt->op_bytes = 2; 5096 ctxt->ad_bytes = 2; 5097 ctxt->_eip = ctxt->eip + inc_eip; 5098 ret = emulate_int_real(ctxt, irq); 5099 5100 if (ret != X86EMUL_CONTINUE) 5101 return EMULATE_FAIL; 5102 5103 ctxt->eip = ctxt->_eip; 5104 kvm_rip_write(vcpu, ctxt->eip); 5105 kvm_set_rflags(vcpu, ctxt->eflags); 5106 5107 if (irq == NMI_VECTOR) 5108 vcpu->arch.nmi_pending = 0; 5109 else 5110 vcpu->arch.interrupt.pending = false; 5111 5112 return EMULATE_DONE; 5113 } 5114 EXPORT_SYMBOL_GPL(kvm_inject_realmode_interrupt); 5115 5116 static int handle_emulation_failure(struct kvm_vcpu *vcpu) 5117 { 5118 int r = EMULATE_DONE; 5119 5120 ++vcpu->stat.insn_emulation_fail; 5121 trace_kvm_emulate_insn_failed(vcpu); 5122 if (!is_guest_mode(vcpu) && kvm_x86_ops->get_cpl(vcpu) == 0) { 5123 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 5124 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; 5125 vcpu->run->internal.ndata = 0; 5126 r = EMULATE_FAIL; 5127 } 5128 kvm_queue_exception(vcpu, UD_VECTOR); 5129 5130 return r; 5131 } 5132 5133 static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t cr2, 5134 bool write_fault_to_shadow_pgtable, 5135 int emulation_type) 5136 { 5137 gpa_t gpa = cr2; 5138 pfn_t pfn; 5139 5140 if (emulation_type & EMULTYPE_NO_REEXECUTE) 5141 return false; 5142 5143 if (!vcpu->arch.mmu.direct_map) { 5144 /* 5145 * Write permission should be allowed since only 5146 * write access need to be emulated. 5147 */ 5148 gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2, NULL); 5149 5150 /* 5151 * If the mapping is invalid in guest, let cpu retry 5152 * it to generate fault. 5153 */ 5154 if (gpa == UNMAPPED_GVA) 5155 return true; 5156 } 5157 5158 /* 5159 * Do not retry the unhandleable instruction if it faults on the 5160 * readonly host memory, otherwise it will goto a infinite loop: 5161 * retry instruction -> write #PF -> emulation fail -> retry 5162 * instruction -> ... 5163 */ 5164 pfn = gfn_to_pfn(vcpu->kvm, gpa_to_gfn(gpa)); 5165 5166 /* 5167 * If the instruction failed on the error pfn, it can not be fixed, 5168 * report the error to userspace. 5169 */ 5170 if (is_error_noslot_pfn(pfn)) 5171 return false; 5172 5173 kvm_release_pfn_clean(pfn); 5174 5175 /* The instructions are well-emulated on direct mmu. */ 5176 if (vcpu->arch.mmu.direct_map) { 5177 unsigned int indirect_shadow_pages; 5178 5179 spin_lock(&vcpu->kvm->mmu_lock); 5180 indirect_shadow_pages = vcpu->kvm->arch.indirect_shadow_pages; 5181 spin_unlock(&vcpu->kvm->mmu_lock); 5182 5183 if (indirect_shadow_pages) 5184 kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)); 5185 5186 return true; 5187 } 5188 5189 /* 5190 * if emulation was due to access to shadowed page table 5191 * and it failed try to unshadow page and re-enter the 5192 * guest to let CPU execute the instruction. 5193 */ 5194 kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)); 5195 5196 /* 5197 * If the access faults on its page table, it can not 5198 * be fixed by unprotecting shadow page and it should 5199 * be reported to userspace. 5200 */ 5201 return !write_fault_to_shadow_pgtable; 5202 } 5203 5204 static bool retry_instruction(struct x86_emulate_ctxt *ctxt, 5205 unsigned long cr2, int emulation_type) 5206 { 5207 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 5208 unsigned long last_retry_eip, last_retry_addr, gpa = cr2; 5209 5210 last_retry_eip = vcpu->arch.last_retry_eip; 5211 last_retry_addr = vcpu->arch.last_retry_addr; 5212 5213 /* 5214 * If the emulation is caused by #PF and it is non-page_table 5215 * writing instruction, it means the VM-EXIT is caused by shadow 5216 * page protected, we can zap the shadow page and retry this 5217 * instruction directly. 5218 * 5219 * Note: if the guest uses a non-page-table modifying instruction 5220 * on the PDE that points to the instruction, then we will unmap 5221 * the instruction and go to an infinite loop. So, we cache the 5222 * last retried eip and the last fault address, if we meet the eip 5223 * and the address again, we can break out of the potential infinite 5224 * loop. 5225 */ 5226 vcpu->arch.last_retry_eip = vcpu->arch.last_retry_addr = 0; 5227 5228 if (!(emulation_type & EMULTYPE_RETRY)) 5229 return false; 5230 5231 if (x86_page_table_writing_insn(ctxt)) 5232 return false; 5233 5234 if (ctxt->eip == last_retry_eip && last_retry_addr == cr2) 5235 return false; 5236 5237 vcpu->arch.last_retry_eip = ctxt->eip; 5238 vcpu->arch.last_retry_addr = cr2; 5239 5240 if (!vcpu->arch.mmu.direct_map) 5241 gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2, NULL); 5242 5243 kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)); 5244 5245 return true; 5246 } 5247 5248 static int complete_emulated_mmio(struct kvm_vcpu *vcpu); 5249 static int complete_emulated_pio(struct kvm_vcpu *vcpu); 5250 5251 static void kvm_smm_changed(struct kvm_vcpu *vcpu) 5252 { 5253 if (!(vcpu->arch.hflags & HF_SMM_MASK)) { 5254 /* This is a good place to trace that we are exiting SMM. */ 5255 trace_kvm_enter_smm(vcpu->vcpu_id, vcpu->arch.smbase, false); 5256 5257 if (unlikely(vcpu->arch.smi_pending)) { 5258 kvm_make_request(KVM_REQ_SMI, vcpu); 5259 vcpu->arch.smi_pending = 0; 5260 } else { 5261 /* Process a latched INIT, if any. */ 5262 kvm_make_request(KVM_REQ_EVENT, vcpu); 5263 } 5264 } 5265 5266 kvm_mmu_reset_context(vcpu); 5267 } 5268 5269 static void kvm_set_hflags(struct kvm_vcpu *vcpu, unsigned emul_flags) 5270 { 5271 unsigned changed = vcpu->arch.hflags ^ emul_flags; 5272 5273 vcpu->arch.hflags = emul_flags; 5274 5275 if (changed & HF_SMM_MASK) 5276 kvm_smm_changed(vcpu); 5277 } 5278 5279 static int kvm_vcpu_check_hw_bp(unsigned long addr, u32 type, u32 dr7, 5280 unsigned long *db) 5281 { 5282 u32 dr6 = 0; 5283 int i; 5284 u32 enable, rwlen; 5285 5286 enable = dr7; 5287 rwlen = dr7 >> 16; 5288 for (i = 0; i < 4; i++, enable >>= 2, rwlen >>= 4) 5289 if ((enable & 3) && (rwlen & 15) == type && db[i] == addr) 5290 dr6 |= (1 << i); 5291 return dr6; 5292 } 5293 5294 static void kvm_vcpu_check_singlestep(struct kvm_vcpu *vcpu, unsigned long rflags, int *r) 5295 { 5296 struct kvm_run *kvm_run = vcpu->run; 5297 5298 /* 5299 * rflags is the old, "raw" value of the flags. The new value has 5300 * not been saved yet. 5301 * 5302 * This is correct even for TF set by the guest, because "the 5303 * processor will not generate this exception after the instruction 5304 * that sets the TF flag". 5305 */ 5306 if (unlikely(rflags & X86_EFLAGS_TF)) { 5307 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) { 5308 kvm_run->debug.arch.dr6 = DR6_BS | DR6_FIXED_1 | 5309 DR6_RTM; 5310 kvm_run->debug.arch.pc = vcpu->arch.singlestep_rip; 5311 kvm_run->debug.arch.exception = DB_VECTOR; 5312 kvm_run->exit_reason = KVM_EXIT_DEBUG; 5313 *r = EMULATE_USER_EXIT; 5314 } else { 5315 vcpu->arch.emulate_ctxt.eflags &= ~X86_EFLAGS_TF; 5316 /* 5317 * "Certain debug exceptions may clear bit 0-3. The 5318 * remaining contents of the DR6 register are never 5319 * cleared by the processor". 5320 */ 5321 vcpu->arch.dr6 &= ~15; 5322 vcpu->arch.dr6 |= DR6_BS | DR6_RTM; 5323 kvm_queue_exception(vcpu, DB_VECTOR); 5324 } 5325 } 5326 } 5327 5328 static bool kvm_vcpu_check_breakpoint(struct kvm_vcpu *vcpu, int *r) 5329 { 5330 if (unlikely(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) && 5331 (vcpu->arch.guest_debug_dr7 & DR7_BP_EN_MASK)) { 5332 struct kvm_run *kvm_run = vcpu->run; 5333 unsigned long eip = kvm_get_linear_rip(vcpu); 5334 u32 dr6 = kvm_vcpu_check_hw_bp(eip, 0, 5335 vcpu->arch.guest_debug_dr7, 5336 vcpu->arch.eff_db); 5337 5338 if (dr6 != 0) { 5339 kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1 | DR6_RTM; 5340 kvm_run->debug.arch.pc = eip; 5341 kvm_run->debug.arch.exception = DB_VECTOR; 5342 kvm_run->exit_reason = KVM_EXIT_DEBUG; 5343 *r = EMULATE_USER_EXIT; 5344 return true; 5345 } 5346 } 5347 5348 if (unlikely(vcpu->arch.dr7 & DR7_BP_EN_MASK) && 5349 !(kvm_get_rflags(vcpu) & X86_EFLAGS_RF)) { 5350 unsigned long eip = kvm_get_linear_rip(vcpu); 5351 u32 dr6 = kvm_vcpu_check_hw_bp(eip, 0, 5352 vcpu->arch.dr7, 5353 vcpu->arch.db); 5354 5355 if (dr6 != 0) { 5356 vcpu->arch.dr6 &= ~15; 5357 vcpu->arch.dr6 |= dr6 | DR6_RTM; 5358 kvm_queue_exception(vcpu, DB_VECTOR); 5359 *r = EMULATE_DONE; 5360 return true; 5361 } 5362 } 5363 5364 return false; 5365 } 5366 5367 int x86_emulate_instruction(struct kvm_vcpu *vcpu, 5368 unsigned long cr2, 5369 int emulation_type, 5370 void *insn, 5371 int insn_len) 5372 { 5373 int r; 5374 struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; 5375 bool writeback = true; 5376 bool write_fault_to_spt = vcpu->arch.write_fault_to_shadow_pgtable; 5377 5378 /* 5379 * Clear write_fault_to_shadow_pgtable here to ensure it is 5380 * never reused. 5381 */ 5382 vcpu->arch.write_fault_to_shadow_pgtable = false; 5383 kvm_clear_exception_queue(vcpu); 5384 5385 if (!(emulation_type & EMULTYPE_NO_DECODE)) { 5386 init_emulate_ctxt(vcpu); 5387 5388 /* 5389 * We will reenter on the same instruction since 5390 * we do not set complete_userspace_io. This does not 5391 * handle watchpoints yet, those would be handled in 5392 * the emulate_ops. 5393 */ 5394 if (kvm_vcpu_check_breakpoint(vcpu, &r)) 5395 return r; 5396 5397 ctxt->interruptibility = 0; 5398 ctxt->have_exception = false; 5399 ctxt->exception.vector = -1; 5400 ctxt->perm_ok = false; 5401 5402 ctxt->ud = emulation_type & EMULTYPE_TRAP_UD; 5403 5404 r = x86_decode_insn(ctxt, insn, insn_len); 5405 5406 trace_kvm_emulate_insn_start(vcpu); 5407 ++vcpu->stat.insn_emulation; 5408 if (r != EMULATION_OK) { 5409 if (emulation_type & EMULTYPE_TRAP_UD) 5410 return EMULATE_FAIL; 5411 if (reexecute_instruction(vcpu, cr2, write_fault_to_spt, 5412 emulation_type)) 5413 return EMULATE_DONE; 5414 if (emulation_type & EMULTYPE_SKIP) 5415 return EMULATE_FAIL; 5416 return handle_emulation_failure(vcpu); 5417 } 5418 } 5419 5420 if (emulation_type & EMULTYPE_SKIP) { 5421 kvm_rip_write(vcpu, ctxt->_eip); 5422 if (ctxt->eflags & X86_EFLAGS_RF) 5423 kvm_set_rflags(vcpu, ctxt->eflags & ~X86_EFLAGS_RF); 5424 return EMULATE_DONE; 5425 } 5426 5427 if (retry_instruction(ctxt, cr2, emulation_type)) 5428 return EMULATE_DONE; 5429 5430 /* this is needed for vmware backdoor interface to work since it 5431 changes registers values during IO operation */ 5432 if (vcpu->arch.emulate_regs_need_sync_from_vcpu) { 5433 vcpu->arch.emulate_regs_need_sync_from_vcpu = false; 5434 emulator_invalidate_register_cache(ctxt); 5435 } 5436 5437 restart: 5438 r = x86_emulate_insn(ctxt); 5439 5440 if (r == EMULATION_INTERCEPTED) 5441 return EMULATE_DONE; 5442 5443 if (r == EMULATION_FAILED) { 5444 if (reexecute_instruction(vcpu, cr2, write_fault_to_spt, 5445 emulation_type)) 5446 return EMULATE_DONE; 5447 5448 return handle_emulation_failure(vcpu); 5449 } 5450 5451 if (ctxt->have_exception) { 5452 r = EMULATE_DONE; 5453 if (inject_emulated_exception(vcpu)) 5454 return r; 5455 } else if (vcpu->arch.pio.count) { 5456 if (!vcpu->arch.pio.in) { 5457 /* FIXME: return into emulator if single-stepping. */ 5458 vcpu->arch.pio.count = 0; 5459 } else { 5460 writeback = false; 5461 vcpu->arch.complete_userspace_io = complete_emulated_pio; 5462 } 5463 r = EMULATE_USER_EXIT; 5464 } else if (vcpu->mmio_needed) { 5465 if (!vcpu->mmio_is_write) 5466 writeback = false; 5467 r = EMULATE_USER_EXIT; 5468 vcpu->arch.complete_userspace_io = complete_emulated_mmio; 5469 } else if (r == EMULATION_RESTART) 5470 goto restart; 5471 else 5472 r = EMULATE_DONE; 5473 5474 if (writeback) { 5475 unsigned long rflags = kvm_x86_ops->get_rflags(vcpu); 5476 toggle_interruptibility(vcpu, ctxt->interruptibility); 5477 vcpu->arch.emulate_regs_need_sync_to_vcpu = false; 5478 if (vcpu->arch.hflags != ctxt->emul_flags) 5479 kvm_set_hflags(vcpu, ctxt->emul_flags); 5480 kvm_rip_write(vcpu, ctxt->eip); 5481 if (r == EMULATE_DONE) 5482 kvm_vcpu_check_singlestep(vcpu, rflags, &r); 5483 if (!ctxt->have_exception || 5484 exception_type(ctxt->exception.vector) == EXCPT_TRAP) 5485 __kvm_set_rflags(vcpu, ctxt->eflags); 5486 5487 /* 5488 * For STI, interrupts are shadowed; so KVM_REQ_EVENT will 5489 * do nothing, and it will be requested again as soon as 5490 * the shadow expires. But we still need to check here, 5491 * because POPF has no interrupt shadow. 5492 */ 5493 if (unlikely((ctxt->eflags & ~rflags) & X86_EFLAGS_IF)) 5494 kvm_make_request(KVM_REQ_EVENT, vcpu); 5495 } else 5496 vcpu->arch.emulate_regs_need_sync_to_vcpu = true; 5497 5498 return r; 5499 } 5500 EXPORT_SYMBOL_GPL(x86_emulate_instruction); 5501 5502 int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, unsigned short port) 5503 { 5504 unsigned long val = kvm_register_read(vcpu, VCPU_REGS_RAX); 5505 int ret = emulator_pio_out_emulated(&vcpu->arch.emulate_ctxt, 5506 size, port, &val, 1); 5507 /* do not return to emulator after return from userspace */ 5508 vcpu->arch.pio.count = 0; 5509 return ret; 5510 } 5511 EXPORT_SYMBOL_GPL(kvm_fast_pio_out); 5512 5513 static void tsc_bad(void *info) 5514 { 5515 __this_cpu_write(cpu_tsc_khz, 0); 5516 } 5517 5518 static void tsc_khz_changed(void *data) 5519 { 5520 struct cpufreq_freqs *freq = data; 5521 unsigned long khz = 0; 5522 5523 if (data) 5524 khz = freq->new; 5525 else if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) 5526 khz = cpufreq_quick_get(raw_smp_processor_id()); 5527 if (!khz) 5528 khz = tsc_khz; 5529 __this_cpu_write(cpu_tsc_khz, khz); 5530 } 5531 5532 static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val, 5533 void *data) 5534 { 5535 struct cpufreq_freqs *freq = data; 5536 struct kvm *kvm; 5537 struct kvm_vcpu *vcpu; 5538 int i, send_ipi = 0; 5539 5540 /* 5541 * We allow guests to temporarily run on slowing clocks, 5542 * provided we notify them after, or to run on accelerating 5543 * clocks, provided we notify them before. Thus time never 5544 * goes backwards. 5545 * 5546 * However, we have a problem. We can't atomically update 5547 * the frequency of a given CPU from this function; it is 5548 * merely a notifier, which can be called from any CPU. 5549 * Changing the TSC frequency at arbitrary points in time 5550 * requires a recomputation of local variables related to 5551 * the TSC for each VCPU. We must flag these local variables 5552 * to be updated and be sure the update takes place with the 5553 * new frequency before any guests proceed. 5554 * 5555 * Unfortunately, the combination of hotplug CPU and frequency 5556 * change creates an intractable locking scenario; the order 5557 * of when these callouts happen is undefined with respect to 5558 * CPU hotplug, and they can race with each other. As such, 5559 * merely setting per_cpu(cpu_tsc_khz) = X during a hotadd is 5560 * undefined; you can actually have a CPU frequency change take 5561 * place in between the computation of X and the setting of the 5562 * variable. To protect against this problem, all updates of 5563 * the per_cpu tsc_khz variable are done in an interrupt 5564 * protected IPI, and all callers wishing to update the value 5565 * must wait for a synchronous IPI to complete (which is trivial 5566 * if the caller is on the CPU already). This establishes the 5567 * necessary total order on variable updates. 5568 * 5569 * Note that because a guest time update may take place 5570 * anytime after the setting of the VCPU's request bit, the 5571 * correct TSC value must be set before the request. However, 5572 * to ensure the update actually makes it to any guest which 5573 * starts running in hardware virtualization between the set 5574 * and the acquisition of the spinlock, we must also ping the 5575 * CPU after setting the request bit. 5576 * 5577 */ 5578 5579 if (val == CPUFREQ_PRECHANGE && freq->old > freq->new) 5580 return 0; 5581 if (val == CPUFREQ_POSTCHANGE && freq->old < freq->new) 5582 return 0; 5583 5584 smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1); 5585 5586 spin_lock(&kvm_lock); 5587 list_for_each_entry(kvm, &vm_list, vm_list) { 5588 kvm_for_each_vcpu(i, vcpu, kvm) { 5589 if (vcpu->cpu != freq->cpu) 5590 continue; 5591 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); 5592 if (vcpu->cpu != smp_processor_id()) 5593 send_ipi = 1; 5594 } 5595 } 5596 spin_unlock(&kvm_lock); 5597 5598 if (freq->old < freq->new && send_ipi) { 5599 /* 5600 * We upscale the frequency. Must make the guest 5601 * doesn't see old kvmclock values while running with 5602 * the new frequency, otherwise we risk the guest sees 5603 * time go backwards. 5604 * 5605 * In case we update the frequency for another cpu 5606 * (which might be in guest context) send an interrupt 5607 * to kick the cpu out of guest context. Next time 5608 * guest context is entered kvmclock will be updated, 5609 * so the guest will not see stale values. 5610 */ 5611 smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1); 5612 } 5613 return 0; 5614 } 5615 5616 static struct notifier_block kvmclock_cpufreq_notifier_block = { 5617 .notifier_call = kvmclock_cpufreq_notifier 5618 }; 5619 5620 static int kvmclock_cpu_notifier(struct notifier_block *nfb, 5621 unsigned long action, void *hcpu) 5622 { 5623 unsigned int cpu = (unsigned long)hcpu; 5624 5625 switch (action) { 5626 case CPU_ONLINE: 5627 case CPU_DOWN_FAILED: 5628 smp_call_function_single(cpu, tsc_khz_changed, NULL, 1); 5629 break; 5630 case CPU_DOWN_PREPARE: 5631 smp_call_function_single(cpu, tsc_bad, NULL, 1); 5632 break; 5633 } 5634 return NOTIFY_OK; 5635 } 5636 5637 static struct notifier_block kvmclock_cpu_notifier_block = { 5638 .notifier_call = kvmclock_cpu_notifier, 5639 .priority = -INT_MAX 5640 }; 5641 5642 static void kvm_timer_init(void) 5643 { 5644 int cpu; 5645 5646 max_tsc_khz = tsc_khz; 5647 5648 cpu_notifier_register_begin(); 5649 if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) { 5650 #ifdef CONFIG_CPU_FREQ 5651 struct cpufreq_policy policy; 5652 memset(&policy, 0, sizeof(policy)); 5653 cpu = get_cpu(); 5654 cpufreq_get_policy(&policy, cpu); 5655 if (policy.cpuinfo.max_freq) 5656 max_tsc_khz = policy.cpuinfo.max_freq; 5657 put_cpu(); 5658 #endif 5659 cpufreq_register_notifier(&kvmclock_cpufreq_notifier_block, 5660 CPUFREQ_TRANSITION_NOTIFIER); 5661 } 5662 pr_debug("kvm: max_tsc_khz = %ld\n", max_tsc_khz); 5663 for_each_online_cpu(cpu) 5664 smp_call_function_single(cpu, tsc_khz_changed, NULL, 1); 5665 5666 __register_hotcpu_notifier(&kvmclock_cpu_notifier_block); 5667 cpu_notifier_register_done(); 5668 5669 } 5670 5671 static DEFINE_PER_CPU(struct kvm_vcpu *, current_vcpu); 5672 5673 int kvm_is_in_guest(void) 5674 { 5675 return __this_cpu_read(current_vcpu) != NULL; 5676 } 5677 5678 static int kvm_is_user_mode(void) 5679 { 5680 int user_mode = 3; 5681 5682 if (__this_cpu_read(current_vcpu)) 5683 user_mode = kvm_x86_ops->get_cpl(__this_cpu_read(current_vcpu)); 5684 5685 return user_mode != 0; 5686 } 5687 5688 static unsigned long kvm_get_guest_ip(void) 5689 { 5690 unsigned long ip = 0; 5691 5692 if (__this_cpu_read(current_vcpu)) 5693 ip = kvm_rip_read(__this_cpu_read(current_vcpu)); 5694 5695 return ip; 5696 } 5697 5698 static struct perf_guest_info_callbacks kvm_guest_cbs = { 5699 .is_in_guest = kvm_is_in_guest, 5700 .is_user_mode = kvm_is_user_mode, 5701 .get_guest_ip = kvm_get_guest_ip, 5702 }; 5703 5704 void kvm_before_handle_nmi(struct kvm_vcpu *vcpu) 5705 { 5706 __this_cpu_write(current_vcpu, vcpu); 5707 } 5708 EXPORT_SYMBOL_GPL(kvm_before_handle_nmi); 5709 5710 void kvm_after_handle_nmi(struct kvm_vcpu *vcpu) 5711 { 5712 __this_cpu_write(current_vcpu, NULL); 5713 } 5714 EXPORT_SYMBOL_GPL(kvm_after_handle_nmi); 5715 5716 static void kvm_set_mmio_spte_mask(void) 5717 { 5718 u64 mask; 5719 int maxphyaddr = boot_cpu_data.x86_phys_bits; 5720 5721 /* 5722 * Set the reserved bits and the present bit of an paging-structure 5723 * entry to generate page fault with PFER.RSV = 1. 5724 */ 5725 /* Mask the reserved physical address bits. */ 5726 mask = rsvd_bits(maxphyaddr, 51); 5727 5728 /* Bit 62 is always reserved for 32bit host. */ 5729 mask |= 0x3ull << 62; 5730 5731 /* Set the present bit. */ 5732 mask |= 1ull; 5733 5734 #ifdef CONFIG_X86_64 5735 /* 5736 * If reserved bit is not supported, clear the present bit to disable 5737 * mmio page fault. 5738 */ 5739 if (maxphyaddr == 52) 5740 mask &= ~1ull; 5741 #endif 5742 5743 kvm_mmu_set_mmio_spte_mask(mask); 5744 } 5745 5746 #ifdef CONFIG_X86_64 5747 static void pvclock_gtod_update_fn(struct work_struct *work) 5748 { 5749 struct kvm *kvm; 5750 5751 struct kvm_vcpu *vcpu; 5752 int i; 5753 5754 spin_lock(&kvm_lock); 5755 list_for_each_entry(kvm, &vm_list, vm_list) 5756 kvm_for_each_vcpu(i, vcpu, kvm) 5757 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu); 5758 atomic_set(&kvm_guest_has_master_clock, 0); 5759 spin_unlock(&kvm_lock); 5760 } 5761 5762 static DECLARE_WORK(pvclock_gtod_work, pvclock_gtod_update_fn); 5763 5764 /* 5765 * Notification about pvclock gtod data update. 5766 */ 5767 static int pvclock_gtod_notify(struct notifier_block *nb, unsigned long unused, 5768 void *priv) 5769 { 5770 struct pvclock_gtod_data *gtod = &pvclock_gtod_data; 5771 struct timekeeper *tk = priv; 5772 5773 update_pvclock_gtod(tk); 5774 5775 /* disable master clock if host does not trust, or does not 5776 * use, TSC clocksource 5777 */ 5778 if (gtod->clock.vclock_mode != VCLOCK_TSC && 5779 atomic_read(&kvm_guest_has_master_clock) != 0) 5780 queue_work(system_long_wq, &pvclock_gtod_work); 5781 5782 return 0; 5783 } 5784 5785 static struct notifier_block pvclock_gtod_notifier = { 5786 .notifier_call = pvclock_gtod_notify, 5787 }; 5788 #endif 5789 5790 int kvm_arch_init(void *opaque) 5791 { 5792 int r; 5793 struct kvm_x86_ops *ops = opaque; 5794 5795 if (kvm_x86_ops) { 5796 printk(KERN_ERR "kvm: already loaded the other module\n"); 5797 r = -EEXIST; 5798 goto out; 5799 } 5800 5801 if (!ops->cpu_has_kvm_support()) { 5802 printk(KERN_ERR "kvm: no hardware support\n"); 5803 r = -EOPNOTSUPP; 5804 goto out; 5805 } 5806 if (ops->disabled_by_bios()) { 5807 printk(KERN_ERR "kvm: disabled by bios\n"); 5808 r = -EOPNOTSUPP; 5809 goto out; 5810 } 5811 5812 r = -ENOMEM; 5813 shared_msrs = alloc_percpu(struct kvm_shared_msrs); 5814 if (!shared_msrs) { 5815 printk(KERN_ERR "kvm: failed to allocate percpu kvm_shared_msrs\n"); 5816 goto out; 5817 } 5818 5819 r = kvm_mmu_module_init(); 5820 if (r) 5821 goto out_free_percpu; 5822 5823 kvm_set_mmio_spte_mask(); 5824 5825 kvm_x86_ops = ops; 5826 5827 kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK, 5828 PT_DIRTY_MASK, PT64_NX_MASK, 0); 5829 5830 kvm_timer_init(); 5831 5832 perf_register_guest_info_callbacks(&kvm_guest_cbs); 5833 5834 if (cpu_has_xsave) 5835 host_xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK); 5836 5837 kvm_lapic_init(); 5838 #ifdef CONFIG_X86_64 5839 pvclock_gtod_register_notifier(&pvclock_gtod_notifier); 5840 #endif 5841 5842 return 0; 5843 5844 out_free_percpu: 5845 free_percpu(shared_msrs); 5846 out: 5847 return r; 5848 } 5849 5850 void kvm_arch_exit(void) 5851 { 5852 perf_unregister_guest_info_callbacks(&kvm_guest_cbs); 5853 5854 if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) 5855 cpufreq_unregister_notifier(&kvmclock_cpufreq_notifier_block, 5856 CPUFREQ_TRANSITION_NOTIFIER); 5857 unregister_hotcpu_notifier(&kvmclock_cpu_notifier_block); 5858 #ifdef CONFIG_X86_64 5859 pvclock_gtod_unregister_notifier(&pvclock_gtod_notifier); 5860 #endif 5861 kvm_x86_ops = NULL; 5862 kvm_mmu_module_exit(); 5863 free_percpu(shared_msrs); 5864 } 5865 5866 int kvm_vcpu_halt(struct kvm_vcpu *vcpu) 5867 { 5868 ++vcpu->stat.halt_exits; 5869 if (irqchip_in_kernel(vcpu->kvm)) { 5870 vcpu->arch.mp_state = KVM_MP_STATE_HALTED; 5871 return 1; 5872 } else { 5873 vcpu->run->exit_reason = KVM_EXIT_HLT; 5874 return 0; 5875 } 5876 } 5877 EXPORT_SYMBOL_GPL(kvm_vcpu_halt); 5878 5879 int kvm_emulate_halt(struct kvm_vcpu *vcpu) 5880 { 5881 kvm_x86_ops->skip_emulated_instruction(vcpu); 5882 return kvm_vcpu_halt(vcpu); 5883 } 5884 EXPORT_SYMBOL_GPL(kvm_emulate_halt); 5885 5886 int kvm_hv_hypercall(struct kvm_vcpu *vcpu) 5887 { 5888 u64 param, ingpa, outgpa, ret; 5889 uint16_t code, rep_idx, rep_cnt, res = HV_STATUS_SUCCESS, rep_done = 0; 5890 bool fast, longmode; 5891 5892 /* 5893 * hypercall generates UD from non zero cpl and real mode 5894 * per HYPER-V spec 5895 */ 5896 if (kvm_x86_ops->get_cpl(vcpu) != 0 || !is_protmode(vcpu)) { 5897 kvm_queue_exception(vcpu, UD_VECTOR); 5898 return 0; 5899 } 5900 5901 longmode = is_64_bit_mode(vcpu); 5902 5903 if (!longmode) { 5904 param = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDX) << 32) | 5905 (kvm_register_read(vcpu, VCPU_REGS_RAX) & 0xffffffff); 5906 ingpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RBX) << 32) | 5907 (kvm_register_read(vcpu, VCPU_REGS_RCX) & 0xffffffff); 5908 outgpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDI) << 32) | 5909 (kvm_register_read(vcpu, VCPU_REGS_RSI) & 0xffffffff); 5910 } 5911 #ifdef CONFIG_X86_64 5912 else { 5913 param = kvm_register_read(vcpu, VCPU_REGS_RCX); 5914 ingpa = kvm_register_read(vcpu, VCPU_REGS_RDX); 5915 outgpa = kvm_register_read(vcpu, VCPU_REGS_R8); 5916 } 5917 #endif 5918 5919 code = param & 0xffff; 5920 fast = (param >> 16) & 0x1; 5921 rep_cnt = (param >> 32) & 0xfff; 5922 rep_idx = (param >> 48) & 0xfff; 5923 5924 trace_kvm_hv_hypercall(code, fast, rep_cnt, rep_idx, ingpa, outgpa); 5925 5926 switch (code) { 5927 case HV_X64_HV_NOTIFY_LONG_SPIN_WAIT: 5928 kvm_vcpu_on_spin(vcpu); 5929 break; 5930 default: 5931 res = HV_STATUS_INVALID_HYPERCALL_CODE; 5932 break; 5933 } 5934 5935 ret = res | (((u64)rep_done & 0xfff) << 32); 5936 if (longmode) { 5937 kvm_register_write(vcpu, VCPU_REGS_RAX, ret); 5938 } else { 5939 kvm_register_write(vcpu, VCPU_REGS_RDX, ret >> 32); 5940 kvm_register_write(vcpu, VCPU_REGS_RAX, ret & 0xffffffff); 5941 } 5942 5943 return 1; 5944 } 5945 5946 /* 5947 * kvm_pv_kick_cpu_op: Kick a vcpu. 5948 * 5949 * @apicid - apicid of vcpu to be kicked. 5950 */ 5951 static void kvm_pv_kick_cpu_op(struct kvm *kvm, unsigned long flags, int apicid) 5952 { 5953 struct kvm_lapic_irq lapic_irq; 5954 5955 lapic_irq.shorthand = 0; 5956 lapic_irq.dest_mode = 0; 5957 lapic_irq.dest_id = apicid; 5958 lapic_irq.msi_redir_hint = false; 5959 5960 lapic_irq.delivery_mode = APIC_DM_REMRD; 5961 kvm_irq_delivery_to_apic(kvm, NULL, &lapic_irq, NULL); 5962 } 5963 5964 int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) 5965 { 5966 unsigned long nr, a0, a1, a2, a3, ret; 5967 int op_64_bit, r = 1; 5968 5969 kvm_x86_ops->skip_emulated_instruction(vcpu); 5970 5971 if (kvm_hv_hypercall_enabled(vcpu->kvm)) 5972 return kvm_hv_hypercall(vcpu); 5973 5974 nr = kvm_register_read(vcpu, VCPU_REGS_RAX); 5975 a0 = kvm_register_read(vcpu, VCPU_REGS_RBX); 5976 a1 = kvm_register_read(vcpu, VCPU_REGS_RCX); 5977 a2 = kvm_register_read(vcpu, VCPU_REGS_RDX); 5978 a3 = kvm_register_read(vcpu, VCPU_REGS_RSI); 5979 5980 trace_kvm_hypercall(nr, a0, a1, a2, a3); 5981 5982 op_64_bit = is_64_bit_mode(vcpu); 5983 if (!op_64_bit) { 5984 nr &= 0xFFFFFFFF; 5985 a0 &= 0xFFFFFFFF; 5986 a1 &= 0xFFFFFFFF; 5987 a2 &= 0xFFFFFFFF; 5988 a3 &= 0xFFFFFFFF; 5989 } 5990 5991 if (kvm_x86_ops->get_cpl(vcpu) != 0) { 5992 ret = -KVM_EPERM; 5993 goto out; 5994 } 5995 5996 switch (nr) { 5997 case KVM_HC_VAPIC_POLL_IRQ: 5998 ret = 0; 5999 break; 6000 case KVM_HC_KICK_CPU: 6001 kvm_pv_kick_cpu_op(vcpu->kvm, a0, a1); 6002 ret = 0; 6003 break; 6004 default: 6005 ret = -KVM_ENOSYS; 6006 break; 6007 } 6008 out: 6009 if (!op_64_bit) 6010 ret = (u32)ret; 6011 kvm_register_write(vcpu, VCPU_REGS_RAX, ret); 6012 ++vcpu->stat.hypercalls; 6013 return r; 6014 } 6015 EXPORT_SYMBOL_GPL(kvm_emulate_hypercall); 6016 6017 static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt) 6018 { 6019 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 6020 char instruction[3]; 6021 unsigned long rip = kvm_rip_read(vcpu); 6022 6023 kvm_x86_ops->patch_hypercall(vcpu, instruction); 6024 6025 return emulator_write_emulated(ctxt, rip, instruction, 3, NULL); 6026 } 6027 6028 /* 6029 * Check if userspace requested an interrupt window, and that the 6030 * interrupt window is open. 6031 * 6032 * No need to exit to userspace if we already have an interrupt queued. 6033 */ 6034 static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu) 6035 { 6036 return (!irqchip_in_kernel(vcpu->kvm) && !kvm_cpu_has_interrupt(vcpu) && 6037 vcpu->run->request_interrupt_window && 6038 kvm_arch_interrupt_allowed(vcpu)); 6039 } 6040 6041 static void post_kvm_run_save(struct kvm_vcpu *vcpu) 6042 { 6043 struct kvm_run *kvm_run = vcpu->run; 6044 6045 kvm_run->if_flag = (kvm_get_rflags(vcpu) & X86_EFLAGS_IF) != 0; 6046 kvm_run->flags = is_smm(vcpu) ? KVM_RUN_X86_SMM : 0; 6047 kvm_run->cr8 = kvm_get_cr8(vcpu); 6048 kvm_run->apic_base = kvm_get_apic_base(vcpu); 6049 if (irqchip_in_kernel(vcpu->kvm)) 6050 kvm_run->ready_for_interrupt_injection = 1; 6051 else 6052 kvm_run->ready_for_interrupt_injection = 6053 kvm_arch_interrupt_allowed(vcpu) && 6054 !kvm_cpu_has_interrupt(vcpu) && 6055 !kvm_event_needs_reinjection(vcpu); 6056 } 6057 6058 static void update_cr8_intercept(struct kvm_vcpu *vcpu) 6059 { 6060 int max_irr, tpr; 6061 6062 if (!kvm_x86_ops->update_cr8_intercept) 6063 return; 6064 6065 if (!vcpu->arch.apic) 6066 return; 6067 6068 if (!vcpu->arch.apic->vapic_addr) 6069 max_irr = kvm_lapic_find_highest_irr(vcpu); 6070 else 6071 max_irr = -1; 6072 6073 if (max_irr != -1) 6074 max_irr >>= 4; 6075 6076 tpr = kvm_lapic_get_cr8(vcpu); 6077 6078 kvm_x86_ops->update_cr8_intercept(vcpu, tpr, max_irr); 6079 } 6080 6081 static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win) 6082 { 6083 int r; 6084 6085 /* try to reinject previous events if any */ 6086 if (vcpu->arch.exception.pending) { 6087 trace_kvm_inj_exception(vcpu->arch.exception.nr, 6088 vcpu->arch.exception.has_error_code, 6089 vcpu->arch.exception.error_code); 6090 6091 if (exception_type(vcpu->arch.exception.nr) == EXCPT_FAULT) 6092 __kvm_set_rflags(vcpu, kvm_get_rflags(vcpu) | 6093 X86_EFLAGS_RF); 6094 6095 if (vcpu->arch.exception.nr == DB_VECTOR && 6096 (vcpu->arch.dr7 & DR7_GD)) { 6097 vcpu->arch.dr7 &= ~DR7_GD; 6098 kvm_update_dr7(vcpu); 6099 } 6100 6101 kvm_x86_ops->queue_exception(vcpu, vcpu->arch.exception.nr, 6102 vcpu->arch.exception.has_error_code, 6103 vcpu->arch.exception.error_code, 6104 vcpu->arch.exception.reinject); 6105 return 0; 6106 } 6107 6108 if (vcpu->arch.nmi_injected) { 6109 kvm_x86_ops->set_nmi(vcpu); 6110 return 0; 6111 } 6112 6113 if (vcpu->arch.interrupt.pending) { 6114 kvm_x86_ops->set_irq(vcpu); 6115 return 0; 6116 } 6117 6118 if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) { 6119 r = kvm_x86_ops->check_nested_events(vcpu, req_int_win); 6120 if (r != 0) 6121 return r; 6122 } 6123 6124 /* try to inject new event if pending */ 6125 if (vcpu->arch.nmi_pending) { 6126 if (kvm_x86_ops->nmi_allowed(vcpu)) { 6127 --vcpu->arch.nmi_pending; 6128 vcpu->arch.nmi_injected = true; 6129 kvm_x86_ops->set_nmi(vcpu); 6130 } 6131 } else if (kvm_cpu_has_injectable_intr(vcpu)) { 6132 /* 6133 * Because interrupts can be injected asynchronously, we are 6134 * calling check_nested_events again here to avoid a race condition. 6135 * See https://lkml.org/lkml/2014/7/2/60 for discussion about this 6136 * proposal and current concerns. Perhaps we should be setting 6137 * KVM_REQ_EVENT only on certain events and not unconditionally? 6138 */ 6139 if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) { 6140 r = kvm_x86_ops->check_nested_events(vcpu, req_int_win); 6141 if (r != 0) 6142 return r; 6143 } 6144 if (kvm_x86_ops->interrupt_allowed(vcpu)) { 6145 kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu), 6146 false); 6147 kvm_x86_ops->set_irq(vcpu); 6148 } 6149 } 6150 return 0; 6151 } 6152 6153 static void process_nmi(struct kvm_vcpu *vcpu) 6154 { 6155 unsigned limit = 2; 6156 6157 /* 6158 * x86 is limited to one NMI running, and one NMI pending after it. 6159 * If an NMI is already in progress, limit further NMIs to just one. 6160 * Otherwise, allow two (and we'll inject the first one immediately). 6161 */ 6162 if (kvm_x86_ops->get_nmi_mask(vcpu) || vcpu->arch.nmi_injected) 6163 limit = 1; 6164 6165 vcpu->arch.nmi_pending += atomic_xchg(&vcpu->arch.nmi_queued, 0); 6166 vcpu->arch.nmi_pending = min(vcpu->arch.nmi_pending, limit); 6167 kvm_make_request(KVM_REQ_EVENT, vcpu); 6168 } 6169 6170 #define put_smstate(type, buf, offset, val) \ 6171 *(type *)((buf) + (offset) - 0x7e00) = val 6172 6173 static u32 process_smi_get_segment_flags(struct kvm_segment *seg) 6174 { 6175 u32 flags = 0; 6176 flags |= seg->g << 23; 6177 flags |= seg->db << 22; 6178 flags |= seg->l << 21; 6179 flags |= seg->avl << 20; 6180 flags |= seg->present << 15; 6181 flags |= seg->dpl << 13; 6182 flags |= seg->s << 12; 6183 flags |= seg->type << 8; 6184 return flags; 6185 } 6186 6187 static void process_smi_save_seg_32(struct kvm_vcpu *vcpu, char *buf, int n) 6188 { 6189 struct kvm_segment seg; 6190 int offset; 6191 6192 kvm_get_segment(vcpu, &seg, n); 6193 put_smstate(u32, buf, 0x7fa8 + n * 4, seg.selector); 6194 6195 if (n < 3) 6196 offset = 0x7f84 + n * 12; 6197 else 6198 offset = 0x7f2c + (n - 3) * 12; 6199 6200 put_smstate(u32, buf, offset + 8, seg.base); 6201 put_smstate(u32, buf, offset + 4, seg.limit); 6202 put_smstate(u32, buf, offset, process_smi_get_segment_flags(&seg)); 6203 } 6204 6205 static void process_smi_save_seg_64(struct kvm_vcpu *vcpu, char *buf, int n) 6206 { 6207 struct kvm_segment seg; 6208 int offset; 6209 u16 flags; 6210 6211 kvm_get_segment(vcpu, &seg, n); 6212 offset = 0x7e00 + n * 16; 6213 6214 flags = process_smi_get_segment_flags(&seg) >> 8; 6215 put_smstate(u16, buf, offset, seg.selector); 6216 put_smstate(u16, buf, offset + 2, flags); 6217 put_smstate(u32, buf, offset + 4, seg.limit); 6218 put_smstate(u64, buf, offset + 8, seg.base); 6219 } 6220 6221 static void process_smi_save_state_32(struct kvm_vcpu *vcpu, char *buf) 6222 { 6223 struct desc_ptr dt; 6224 struct kvm_segment seg; 6225 unsigned long val; 6226 int i; 6227 6228 put_smstate(u32, buf, 0x7ffc, kvm_read_cr0(vcpu)); 6229 put_smstate(u32, buf, 0x7ff8, kvm_read_cr3(vcpu)); 6230 put_smstate(u32, buf, 0x7ff4, kvm_get_rflags(vcpu)); 6231 put_smstate(u32, buf, 0x7ff0, kvm_rip_read(vcpu)); 6232 6233 for (i = 0; i < 8; i++) 6234 put_smstate(u32, buf, 0x7fd0 + i * 4, kvm_register_read(vcpu, i)); 6235 6236 kvm_get_dr(vcpu, 6, &val); 6237 put_smstate(u32, buf, 0x7fcc, (u32)val); 6238 kvm_get_dr(vcpu, 7, &val); 6239 put_smstate(u32, buf, 0x7fc8, (u32)val); 6240 6241 kvm_get_segment(vcpu, &seg, VCPU_SREG_TR); 6242 put_smstate(u32, buf, 0x7fc4, seg.selector); 6243 put_smstate(u32, buf, 0x7f64, seg.base); 6244 put_smstate(u32, buf, 0x7f60, seg.limit); 6245 put_smstate(u32, buf, 0x7f5c, process_smi_get_segment_flags(&seg)); 6246 6247 kvm_get_segment(vcpu, &seg, VCPU_SREG_LDTR); 6248 put_smstate(u32, buf, 0x7fc0, seg.selector); 6249 put_smstate(u32, buf, 0x7f80, seg.base); 6250 put_smstate(u32, buf, 0x7f7c, seg.limit); 6251 put_smstate(u32, buf, 0x7f78, process_smi_get_segment_flags(&seg)); 6252 6253 kvm_x86_ops->get_gdt(vcpu, &dt); 6254 put_smstate(u32, buf, 0x7f74, dt.address); 6255 put_smstate(u32, buf, 0x7f70, dt.size); 6256 6257 kvm_x86_ops->get_idt(vcpu, &dt); 6258 put_smstate(u32, buf, 0x7f58, dt.address); 6259 put_smstate(u32, buf, 0x7f54, dt.size); 6260 6261 for (i = 0; i < 6; i++) 6262 process_smi_save_seg_32(vcpu, buf, i); 6263 6264 put_smstate(u32, buf, 0x7f14, kvm_read_cr4(vcpu)); 6265 6266 /* revision id */ 6267 put_smstate(u32, buf, 0x7efc, 0x00020000); 6268 put_smstate(u32, buf, 0x7ef8, vcpu->arch.smbase); 6269 } 6270 6271 static void process_smi_save_state_64(struct kvm_vcpu *vcpu, char *buf) 6272 { 6273 #ifdef CONFIG_X86_64 6274 struct desc_ptr dt; 6275 struct kvm_segment seg; 6276 unsigned long val; 6277 int i; 6278 6279 for (i = 0; i < 16; i++) 6280 put_smstate(u64, buf, 0x7ff8 - i * 8, kvm_register_read(vcpu, i)); 6281 6282 put_smstate(u64, buf, 0x7f78, kvm_rip_read(vcpu)); 6283 put_smstate(u32, buf, 0x7f70, kvm_get_rflags(vcpu)); 6284 6285 kvm_get_dr(vcpu, 6, &val); 6286 put_smstate(u64, buf, 0x7f68, val); 6287 kvm_get_dr(vcpu, 7, &val); 6288 put_smstate(u64, buf, 0x7f60, val); 6289 6290 put_smstate(u64, buf, 0x7f58, kvm_read_cr0(vcpu)); 6291 put_smstate(u64, buf, 0x7f50, kvm_read_cr3(vcpu)); 6292 put_smstate(u64, buf, 0x7f48, kvm_read_cr4(vcpu)); 6293 6294 put_smstate(u32, buf, 0x7f00, vcpu->arch.smbase); 6295 6296 /* revision id */ 6297 put_smstate(u32, buf, 0x7efc, 0x00020064); 6298 6299 put_smstate(u64, buf, 0x7ed0, vcpu->arch.efer); 6300 6301 kvm_get_segment(vcpu, &seg, VCPU_SREG_TR); 6302 put_smstate(u16, buf, 0x7e90, seg.selector); 6303 put_smstate(u16, buf, 0x7e92, process_smi_get_segment_flags(&seg) >> 8); 6304 put_smstate(u32, buf, 0x7e94, seg.limit); 6305 put_smstate(u64, buf, 0x7e98, seg.base); 6306 6307 kvm_x86_ops->get_idt(vcpu, &dt); 6308 put_smstate(u32, buf, 0x7e84, dt.size); 6309 put_smstate(u64, buf, 0x7e88, dt.address); 6310 6311 kvm_get_segment(vcpu, &seg, VCPU_SREG_LDTR); 6312 put_smstate(u16, buf, 0x7e70, seg.selector); 6313 put_smstate(u16, buf, 0x7e72, process_smi_get_segment_flags(&seg) >> 8); 6314 put_smstate(u32, buf, 0x7e74, seg.limit); 6315 put_smstate(u64, buf, 0x7e78, seg.base); 6316 6317 kvm_x86_ops->get_gdt(vcpu, &dt); 6318 put_smstate(u32, buf, 0x7e64, dt.size); 6319 put_smstate(u64, buf, 0x7e68, dt.address); 6320 6321 for (i = 0; i < 6; i++) 6322 process_smi_save_seg_64(vcpu, buf, i); 6323 #else 6324 WARN_ON_ONCE(1); 6325 #endif 6326 } 6327 6328 static void process_smi(struct kvm_vcpu *vcpu) 6329 { 6330 struct kvm_segment cs, ds; 6331 char buf[512]; 6332 u32 cr0; 6333 6334 if (is_smm(vcpu)) { 6335 vcpu->arch.smi_pending = true; 6336 return; 6337 } 6338 6339 trace_kvm_enter_smm(vcpu->vcpu_id, vcpu->arch.smbase, true); 6340 vcpu->arch.hflags |= HF_SMM_MASK; 6341 memset(buf, 0, 512); 6342 if (guest_cpuid_has_longmode(vcpu)) 6343 process_smi_save_state_64(vcpu, buf); 6344 else 6345 process_smi_save_state_32(vcpu, buf); 6346 6347 kvm_vcpu_write_guest(vcpu, vcpu->arch.smbase + 0xfe00, buf, sizeof(buf)); 6348 6349 if (kvm_x86_ops->get_nmi_mask(vcpu)) 6350 vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK; 6351 else 6352 kvm_x86_ops->set_nmi_mask(vcpu, true); 6353 6354 kvm_set_rflags(vcpu, X86_EFLAGS_FIXED); 6355 kvm_rip_write(vcpu, 0x8000); 6356 6357 cr0 = vcpu->arch.cr0 & ~(X86_CR0_PE | X86_CR0_EM | X86_CR0_TS | X86_CR0_PG); 6358 kvm_x86_ops->set_cr0(vcpu, cr0); 6359 vcpu->arch.cr0 = cr0; 6360 6361 kvm_x86_ops->set_cr4(vcpu, 0); 6362 6363 __kvm_set_dr(vcpu, 7, DR7_FIXED_1); 6364 6365 cs.selector = (vcpu->arch.smbase >> 4) & 0xffff; 6366 cs.base = vcpu->arch.smbase; 6367 6368 ds.selector = 0; 6369 ds.base = 0; 6370 6371 cs.limit = ds.limit = 0xffffffff; 6372 cs.type = ds.type = 0x3; 6373 cs.dpl = ds.dpl = 0; 6374 cs.db = ds.db = 0; 6375 cs.s = ds.s = 1; 6376 cs.l = ds.l = 0; 6377 cs.g = ds.g = 1; 6378 cs.avl = ds.avl = 0; 6379 cs.present = ds.present = 1; 6380 cs.unusable = ds.unusable = 0; 6381 cs.padding = ds.padding = 0; 6382 6383 kvm_set_segment(vcpu, &cs, VCPU_SREG_CS); 6384 kvm_set_segment(vcpu, &ds, VCPU_SREG_DS); 6385 kvm_set_segment(vcpu, &ds, VCPU_SREG_ES); 6386 kvm_set_segment(vcpu, &ds, VCPU_SREG_FS); 6387 kvm_set_segment(vcpu, &ds, VCPU_SREG_GS); 6388 kvm_set_segment(vcpu, &ds, VCPU_SREG_SS); 6389 6390 if (guest_cpuid_has_longmode(vcpu)) 6391 kvm_x86_ops->set_efer(vcpu, 0); 6392 6393 kvm_update_cpuid(vcpu); 6394 kvm_mmu_reset_context(vcpu); 6395 } 6396 6397 static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu) 6398 { 6399 u64 eoi_exit_bitmap[4]; 6400 u32 tmr[8]; 6401 6402 if (!kvm_apic_hw_enabled(vcpu->arch.apic)) 6403 return; 6404 6405 memset(eoi_exit_bitmap, 0, 32); 6406 memset(tmr, 0, 32); 6407 6408 kvm_ioapic_scan_entry(vcpu, eoi_exit_bitmap, tmr); 6409 kvm_x86_ops->load_eoi_exitmap(vcpu, eoi_exit_bitmap); 6410 kvm_apic_update_tmr(vcpu, tmr); 6411 } 6412 6413 static void kvm_vcpu_flush_tlb(struct kvm_vcpu *vcpu) 6414 { 6415 ++vcpu->stat.tlb_flush; 6416 kvm_x86_ops->tlb_flush(vcpu); 6417 } 6418 6419 void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu) 6420 { 6421 struct page *page = NULL; 6422 6423 if (!irqchip_in_kernel(vcpu->kvm)) 6424 return; 6425 6426 if (!kvm_x86_ops->set_apic_access_page_addr) 6427 return; 6428 6429 page = gfn_to_page(vcpu->kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT); 6430 if (is_error_page(page)) 6431 return; 6432 kvm_x86_ops->set_apic_access_page_addr(vcpu, page_to_phys(page)); 6433 6434 /* 6435 * Do not pin apic access page in memory, the MMU notifier 6436 * will call us again if it is migrated or swapped out. 6437 */ 6438 put_page(page); 6439 } 6440 EXPORT_SYMBOL_GPL(kvm_vcpu_reload_apic_access_page); 6441 6442 void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm, 6443 unsigned long address) 6444 { 6445 /* 6446 * The physical address of apic access page is stored in the VMCS. 6447 * Update it when it becomes invalid. 6448 */ 6449 if (address == gfn_to_hva(kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT)) 6450 kvm_make_all_cpus_request(kvm, KVM_REQ_APIC_PAGE_RELOAD); 6451 } 6452 6453 /* 6454 * Returns 1 to let vcpu_run() continue the guest execution loop without 6455 * exiting to the userspace. Otherwise, the value will be returned to the 6456 * userspace. 6457 */ 6458 static int vcpu_enter_guest(struct kvm_vcpu *vcpu) 6459 { 6460 int r; 6461 bool req_int_win = !irqchip_in_kernel(vcpu->kvm) && 6462 vcpu->run->request_interrupt_window; 6463 bool req_immediate_exit = false; 6464 6465 if (vcpu->requests) { 6466 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) 6467 kvm_mmu_unload(vcpu); 6468 if (kvm_check_request(KVM_REQ_MIGRATE_TIMER, vcpu)) 6469 __kvm_migrate_timers(vcpu); 6470 if (kvm_check_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu)) 6471 kvm_gen_update_masterclock(vcpu->kvm); 6472 if (kvm_check_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu)) 6473 kvm_gen_kvmclock_update(vcpu); 6474 if (kvm_check_request(KVM_REQ_CLOCK_UPDATE, vcpu)) { 6475 r = kvm_guest_time_update(vcpu); 6476 if (unlikely(r)) 6477 goto out; 6478 } 6479 if (kvm_check_request(KVM_REQ_MMU_SYNC, vcpu)) 6480 kvm_mmu_sync_roots(vcpu); 6481 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) 6482 kvm_vcpu_flush_tlb(vcpu); 6483 if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) { 6484 vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS; 6485 r = 0; 6486 goto out; 6487 } 6488 if (kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu)) { 6489 vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN; 6490 r = 0; 6491 goto out; 6492 } 6493 if (kvm_check_request(KVM_REQ_DEACTIVATE_FPU, vcpu)) { 6494 vcpu->fpu_active = 0; 6495 kvm_x86_ops->fpu_deactivate(vcpu); 6496 } 6497 if (kvm_check_request(KVM_REQ_APF_HALT, vcpu)) { 6498 /* Page is swapped out. Do synthetic halt */ 6499 vcpu->arch.apf.halted = true; 6500 r = 1; 6501 goto out; 6502 } 6503 if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu)) 6504 record_steal_time(vcpu); 6505 if (kvm_check_request(KVM_REQ_SMI, vcpu)) 6506 process_smi(vcpu); 6507 if (kvm_check_request(KVM_REQ_NMI, vcpu)) 6508 process_nmi(vcpu); 6509 if (kvm_check_request(KVM_REQ_PMU, vcpu)) 6510 kvm_pmu_handle_event(vcpu); 6511 if (kvm_check_request(KVM_REQ_PMI, vcpu)) 6512 kvm_pmu_deliver_pmi(vcpu); 6513 if (kvm_check_request(KVM_REQ_SCAN_IOAPIC, vcpu)) 6514 vcpu_scan_ioapic(vcpu); 6515 if (kvm_check_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu)) 6516 kvm_vcpu_reload_apic_access_page(vcpu); 6517 } 6518 6519 if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) { 6520 kvm_apic_accept_events(vcpu); 6521 if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) { 6522 r = 1; 6523 goto out; 6524 } 6525 6526 if (inject_pending_event(vcpu, req_int_win) != 0) 6527 req_immediate_exit = true; 6528 /* enable NMI/IRQ window open exits if needed */ 6529 else if (vcpu->arch.nmi_pending) 6530 kvm_x86_ops->enable_nmi_window(vcpu); 6531 else if (kvm_cpu_has_injectable_intr(vcpu) || req_int_win) 6532 kvm_x86_ops->enable_irq_window(vcpu); 6533 6534 if (kvm_lapic_enabled(vcpu)) { 6535 /* 6536 * Update architecture specific hints for APIC 6537 * virtual interrupt delivery. 6538 */ 6539 if (kvm_x86_ops->hwapic_irr_update) 6540 kvm_x86_ops->hwapic_irr_update(vcpu, 6541 kvm_lapic_find_highest_irr(vcpu)); 6542 update_cr8_intercept(vcpu); 6543 kvm_lapic_sync_to_vapic(vcpu); 6544 } 6545 } 6546 6547 r = kvm_mmu_reload(vcpu); 6548 if (unlikely(r)) { 6549 goto cancel_injection; 6550 } 6551 6552 preempt_disable(); 6553 6554 kvm_x86_ops->prepare_guest_switch(vcpu); 6555 if (vcpu->fpu_active) 6556 kvm_load_guest_fpu(vcpu); 6557 kvm_load_guest_xcr0(vcpu); 6558 6559 vcpu->mode = IN_GUEST_MODE; 6560 6561 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); 6562 6563 /* We should set ->mode before check ->requests, 6564 * see the comment in make_all_cpus_request. 6565 */ 6566 smp_mb__after_srcu_read_unlock(); 6567 6568 local_irq_disable(); 6569 6570 if (vcpu->mode == EXITING_GUEST_MODE || vcpu->requests 6571 || need_resched() || signal_pending(current)) { 6572 vcpu->mode = OUTSIDE_GUEST_MODE; 6573 smp_wmb(); 6574 local_irq_enable(); 6575 preempt_enable(); 6576 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); 6577 r = 1; 6578 goto cancel_injection; 6579 } 6580 6581 if (req_immediate_exit) 6582 smp_send_reschedule(vcpu->cpu); 6583 6584 __kvm_guest_enter(); 6585 6586 if (unlikely(vcpu->arch.switch_db_regs)) { 6587 set_debugreg(0, 7); 6588 set_debugreg(vcpu->arch.eff_db[0], 0); 6589 set_debugreg(vcpu->arch.eff_db[1], 1); 6590 set_debugreg(vcpu->arch.eff_db[2], 2); 6591 set_debugreg(vcpu->arch.eff_db[3], 3); 6592 set_debugreg(vcpu->arch.dr6, 6); 6593 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD; 6594 } 6595 6596 trace_kvm_entry(vcpu->vcpu_id); 6597 wait_lapic_expire(vcpu); 6598 kvm_x86_ops->run(vcpu); 6599 6600 /* 6601 * Do this here before restoring debug registers on the host. And 6602 * since we do this before handling the vmexit, a DR access vmexit 6603 * can (a) read the correct value of the debug registers, (b) set 6604 * KVM_DEBUGREG_WONT_EXIT again. 6605 */ 6606 if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)) { 6607 int i; 6608 6609 WARN_ON(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP); 6610 kvm_x86_ops->sync_dirty_debug_regs(vcpu); 6611 for (i = 0; i < KVM_NR_DB_REGS; i++) 6612 vcpu->arch.eff_db[i] = vcpu->arch.db[i]; 6613 } 6614 6615 /* 6616 * If the guest has used debug registers, at least dr7 6617 * will be disabled while returning to the host. 6618 * If we don't have active breakpoints in the host, we don't 6619 * care about the messed up debug address registers. But if 6620 * we have some of them active, restore the old state. 6621 */ 6622 if (hw_breakpoint_active()) 6623 hw_breakpoint_restore(); 6624 6625 vcpu->arch.last_guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu, 6626 native_read_tsc()); 6627 6628 vcpu->mode = OUTSIDE_GUEST_MODE; 6629 smp_wmb(); 6630 6631 /* Interrupt is enabled by handle_external_intr() */ 6632 kvm_x86_ops->handle_external_intr(vcpu); 6633 6634 ++vcpu->stat.exits; 6635 6636 /* 6637 * We must have an instruction between local_irq_enable() and 6638 * kvm_guest_exit(), so the timer interrupt isn't delayed by 6639 * the interrupt shadow. The stat.exits increment will do nicely. 6640 * But we need to prevent reordering, hence this barrier(): 6641 */ 6642 barrier(); 6643 6644 kvm_guest_exit(); 6645 6646 preempt_enable(); 6647 6648 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); 6649 6650 /* 6651 * Profile KVM exit RIPs: 6652 */ 6653 if (unlikely(prof_on == KVM_PROFILING)) { 6654 unsigned long rip = kvm_rip_read(vcpu); 6655 profile_hit(KVM_PROFILING, (void *)rip); 6656 } 6657 6658 if (unlikely(vcpu->arch.tsc_always_catchup)) 6659 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); 6660 6661 if (vcpu->arch.apic_attention) 6662 kvm_lapic_sync_from_vapic(vcpu); 6663 6664 r = kvm_x86_ops->handle_exit(vcpu); 6665 return r; 6666 6667 cancel_injection: 6668 kvm_x86_ops->cancel_injection(vcpu); 6669 if (unlikely(vcpu->arch.apic_attention)) 6670 kvm_lapic_sync_from_vapic(vcpu); 6671 out: 6672 return r; 6673 } 6674 6675 static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu) 6676 { 6677 if (!kvm_arch_vcpu_runnable(vcpu)) { 6678 srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); 6679 kvm_vcpu_block(vcpu); 6680 vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); 6681 if (!kvm_check_request(KVM_REQ_UNHALT, vcpu)) 6682 return 1; 6683 } 6684 6685 kvm_apic_accept_events(vcpu); 6686 switch(vcpu->arch.mp_state) { 6687 case KVM_MP_STATE_HALTED: 6688 vcpu->arch.pv.pv_unhalted = false; 6689 vcpu->arch.mp_state = 6690 KVM_MP_STATE_RUNNABLE; 6691 case KVM_MP_STATE_RUNNABLE: 6692 vcpu->arch.apf.halted = false; 6693 break; 6694 case KVM_MP_STATE_INIT_RECEIVED: 6695 break; 6696 default: 6697 return -EINTR; 6698 break; 6699 } 6700 return 1; 6701 } 6702 6703 static int vcpu_run(struct kvm_vcpu *vcpu) 6704 { 6705 int r; 6706 struct kvm *kvm = vcpu->kvm; 6707 6708 vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); 6709 6710 for (;;) { 6711 if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE && 6712 !vcpu->arch.apf.halted) 6713 r = vcpu_enter_guest(vcpu); 6714 else 6715 r = vcpu_block(kvm, vcpu); 6716 if (r <= 0) 6717 break; 6718 6719 clear_bit(KVM_REQ_PENDING_TIMER, &vcpu->requests); 6720 if (kvm_cpu_has_pending_timer(vcpu)) 6721 kvm_inject_pending_timer_irqs(vcpu); 6722 6723 if (dm_request_for_irq_injection(vcpu)) { 6724 r = -EINTR; 6725 vcpu->run->exit_reason = KVM_EXIT_INTR; 6726 ++vcpu->stat.request_irq_exits; 6727 break; 6728 } 6729 6730 kvm_check_async_pf_completion(vcpu); 6731 6732 if (signal_pending(current)) { 6733 r = -EINTR; 6734 vcpu->run->exit_reason = KVM_EXIT_INTR; 6735 ++vcpu->stat.signal_exits; 6736 break; 6737 } 6738 if (need_resched()) { 6739 srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); 6740 cond_resched(); 6741 vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); 6742 } 6743 } 6744 6745 srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); 6746 6747 return r; 6748 } 6749 6750 static inline int complete_emulated_io(struct kvm_vcpu *vcpu) 6751 { 6752 int r; 6753 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); 6754 r = emulate_instruction(vcpu, EMULTYPE_NO_DECODE); 6755 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); 6756 if (r != EMULATE_DONE) 6757 return 0; 6758 return 1; 6759 } 6760 6761 static int complete_emulated_pio(struct kvm_vcpu *vcpu) 6762 { 6763 BUG_ON(!vcpu->arch.pio.count); 6764 6765 return complete_emulated_io(vcpu); 6766 } 6767 6768 /* 6769 * Implements the following, as a state machine: 6770 * 6771 * read: 6772 * for each fragment 6773 * for each mmio piece in the fragment 6774 * write gpa, len 6775 * exit 6776 * copy data 6777 * execute insn 6778 * 6779 * write: 6780 * for each fragment 6781 * for each mmio piece in the fragment 6782 * write gpa, len 6783 * copy data 6784 * exit 6785 */ 6786 static int complete_emulated_mmio(struct kvm_vcpu *vcpu) 6787 { 6788 struct kvm_run *run = vcpu->run; 6789 struct kvm_mmio_fragment *frag; 6790 unsigned len; 6791 6792 BUG_ON(!vcpu->mmio_needed); 6793 6794 /* Complete previous fragment */ 6795 frag = &vcpu->mmio_fragments[vcpu->mmio_cur_fragment]; 6796 len = min(8u, frag->len); 6797 if (!vcpu->mmio_is_write) 6798 memcpy(frag->data, run->mmio.data, len); 6799 6800 if (frag->len <= 8) { 6801 /* Switch to the next fragment. */ 6802 frag++; 6803 vcpu->mmio_cur_fragment++; 6804 } else { 6805 /* Go forward to the next mmio piece. */ 6806 frag->data += len; 6807 frag->gpa += len; 6808 frag->len -= len; 6809 } 6810 6811 if (vcpu->mmio_cur_fragment >= vcpu->mmio_nr_fragments) { 6812 vcpu->mmio_needed = 0; 6813 6814 /* FIXME: return into emulator if single-stepping. */ 6815 if (vcpu->mmio_is_write) 6816 return 1; 6817 vcpu->mmio_read_completed = 1; 6818 return complete_emulated_io(vcpu); 6819 } 6820 6821 run->exit_reason = KVM_EXIT_MMIO; 6822 run->mmio.phys_addr = frag->gpa; 6823 if (vcpu->mmio_is_write) 6824 memcpy(run->mmio.data, frag->data, min(8u, frag->len)); 6825 run->mmio.len = min(8u, frag->len); 6826 run->mmio.is_write = vcpu->mmio_is_write; 6827 vcpu->arch.complete_userspace_io = complete_emulated_mmio; 6828 return 0; 6829 } 6830 6831 6832 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 6833 { 6834 struct fpu *fpu = ¤t->thread.fpu; 6835 int r; 6836 sigset_t sigsaved; 6837 6838 fpu__activate_curr(fpu); 6839 6840 if (vcpu->sigset_active) 6841 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); 6842 6843 if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) { 6844 kvm_vcpu_block(vcpu); 6845 kvm_apic_accept_events(vcpu); 6846 clear_bit(KVM_REQ_UNHALT, &vcpu->requests); 6847 r = -EAGAIN; 6848 goto out; 6849 } 6850 6851 /* re-sync apic's tpr */ 6852 if (!irqchip_in_kernel(vcpu->kvm)) { 6853 if (kvm_set_cr8(vcpu, kvm_run->cr8) != 0) { 6854 r = -EINVAL; 6855 goto out; 6856 } 6857 } 6858 6859 if (unlikely(vcpu->arch.complete_userspace_io)) { 6860 int (*cui)(struct kvm_vcpu *) = vcpu->arch.complete_userspace_io; 6861 vcpu->arch.complete_userspace_io = NULL; 6862 r = cui(vcpu); 6863 if (r <= 0) 6864 goto out; 6865 } else 6866 WARN_ON(vcpu->arch.pio.count || vcpu->mmio_needed); 6867 6868 r = vcpu_run(vcpu); 6869 6870 out: 6871 post_kvm_run_save(vcpu); 6872 if (vcpu->sigset_active) 6873 sigprocmask(SIG_SETMASK, &sigsaved, NULL); 6874 6875 return r; 6876 } 6877 6878 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 6879 { 6880 if (vcpu->arch.emulate_regs_need_sync_to_vcpu) { 6881 /* 6882 * We are here if userspace calls get_regs() in the middle of 6883 * instruction emulation. Registers state needs to be copied 6884 * back from emulation context to vcpu. Userspace shouldn't do 6885 * that usually, but some bad designed PV devices (vmware 6886 * backdoor interface) need this to work 6887 */ 6888 emulator_writeback_register_cache(&vcpu->arch.emulate_ctxt); 6889 vcpu->arch.emulate_regs_need_sync_to_vcpu = false; 6890 } 6891 regs->rax = kvm_register_read(vcpu, VCPU_REGS_RAX); 6892 regs->rbx = kvm_register_read(vcpu, VCPU_REGS_RBX); 6893 regs->rcx = kvm_register_read(vcpu, VCPU_REGS_RCX); 6894 regs->rdx = kvm_register_read(vcpu, VCPU_REGS_RDX); 6895 regs->rsi = kvm_register_read(vcpu, VCPU_REGS_RSI); 6896 regs->rdi = kvm_register_read(vcpu, VCPU_REGS_RDI); 6897 regs->rsp = kvm_register_read(vcpu, VCPU_REGS_RSP); 6898 regs->rbp = kvm_register_read(vcpu, VCPU_REGS_RBP); 6899 #ifdef CONFIG_X86_64 6900 regs->r8 = kvm_register_read(vcpu, VCPU_REGS_R8); 6901 regs->r9 = kvm_register_read(vcpu, VCPU_REGS_R9); 6902 regs->r10 = kvm_register_read(vcpu, VCPU_REGS_R10); 6903 regs->r11 = kvm_register_read(vcpu, VCPU_REGS_R11); 6904 regs->r12 = kvm_register_read(vcpu, VCPU_REGS_R12); 6905 regs->r13 = kvm_register_read(vcpu, VCPU_REGS_R13); 6906 regs->r14 = kvm_register_read(vcpu, VCPU_REGS_R14); 6907 regs->r15 = kvm_register_read(vcpu, VCPU_REGS_R15); 6908 #endif 6909 6910 regs->rip = kvm_rip_read(vcpu); 6911 regs->rflags = kvm_get_rflags(vcpu); 6912 6913 return 0; 6914 } 6915 6916 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 6917 { 6918 vcpu->arch.emulate_regs_need_sync_from_vcpu = true; 6919 vcpu->arch.emulate_regs_need_sync_to_vcpu = false; 6920 6921 kvm_register_write(vcpu, VCPU_REGS_RAX, regs->rax); 6922 kvm_register_write(vcpu, VCPU_REGS_RBX, regs->rbx); 6923 kvm_register_write(vcpu, VCPU_REGS_RCX, regs->rcx); 6924 kvm_register_write(vcpu, VCPU_REGS_RDX, regs->rdx); 6925 kvm_register_write(vcpu, VCPU_REGS_RSI, regs->rsi); 6926 kvm_register_write(vcpu, VCPU_REGS_RDI, regs->rdi); 6927 kvm_register_write(vcpu, VCPU_REGS_RSP, regs->rsp); 6928 kvm_register_write(vcpu, VCPU_REGS_RBP, regs->rbp); 6929 #ifdef CONFIG_X86_64 6930 kvm_register_write(vcpu, VCPU_REGS_R8, regs->r8); 6931 kvm_register_write(vcpu, VCPU_REGS_R9, regs->r9); 6932 kvm_register_write(vcpu, VCPU_REGS_R10, regs->r10); 6933 kvm_register_write(vcpu, VCPU_REGS_R11, regs->r11); 6934 kvm_register_write(vcpu, VCPU_REGS_R12, regs->r12); 6935 kvm_register_write(vcpu, VCPU_REGS_R13, regs->r13); 6936 kvm_register_write(vcpu, VCPU_REGS_R14, regs->r14); 6937 kvm_register_write(vcpu, VCPU_REGS_R15, regs->r15); 6938 #endif 6939 6940 kvm_rip_write(vcpu, regs->rip); 6941 kvm_set_rflags(vcpu, regs->rflags); 6942 6943 vcpu->arch.exception.pending = false; 6944 6945 kvm_make_request(KVM_REQ_EVENT, vcpu); 6946 6947 return 0; 6948 } 6949 6950 void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l) 6951 { 6952 struct kvm_segment cs; 6953 6954 kvm_get_segment(vcpu, &cs, VCPU_SREG_CS); 6955 *db = cs.db; 6956 *l = cs.l; 6957 } 6958 EXPORT_SYMBOL_GPL(kvm_get_cs_db_l_bits); 6959 6960 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, 6961 struct kvm_sregs *sregs) 6962 { 6963 struct desc_ptr dt; 6964 6965 kvm_get_segment(vcpu, &sregs->cs, VCPU_SREG_CS); 6966 kvm_get_segment(vcpu, &sregs->ds, VCPU_SREG_DS); 6967 kvm_get_segment(vcpu, &sregs->es, VCPU_SREG_ES); 6968 kvm_get_segment(vcpu, &sregs->fs, VCPU_SREG_FS); 6969 kvm_get_segment(vcpu, &sregs->gs, VCPU_SREG_GS); 6970 kvm_get_segment(vcpu, &sregs->ss, VCPU_SREG_SS); 6971 6972 kvm_get_segment(vcpu, &sregs->tr, VCPU_SREG_TR); 6973 kvm_get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR); 6974 6975 kvm_x86_ops->get_idt(vcpu, &dt); 6976 sregs->idt.limit = dt.size; 6977 sregs->idt.base = dt.address; 6978 kvm_x86_ops->get_gdt(vcpu, &dt); 6979 sregs->gdt.limit = dt.size; 6980 sregs->gdt.base = dt.address; 6981 6982 sregs->cr0 = kvm_read_cr0(vcpu); 6983 sregs->cr2 = vcpu->arch.cr2; 6984 sregs->cr3 = kvm_read_cr3(vcpu); 6985 sregs->cr4 = kvm_read_cr4(vcpu); 6986 sregs->cr8 = kvm_get_cr8(vcpu); 6987 sregs->efer = vcpu->arch.efer; 6988 sregs->apic_base = kvm_get_apic_base(vcpu); 6989 6990 memset(sregs->interrupt_bitmap, 0, sizeof sregs->interrupt_bitmap); 6991 6992 if (vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft) 6993 set_bit(vcpu->arch.interrupt.nr, 6994 (unsigned long *)sregs->interrupt_bitmap); 6995 6996 return 0; 6997 } 6998 6999 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, 7000 struct kvm_mp_state *mp_state) 7001 { 7002 kvm_apic_accept_events(vcpu); 7003 if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED && 7004 vcpu->arch.pv.pv_unhalted) 7005 mp_state->mp_state = KVM_MP_STATE_RUNNABLE; 7006 else 7007 mp_state->mp_state = vcpu->arch.mp_state; 7008 7009 return 0; 7010 } 7011 7012 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, 7013 struct kvm_mp_state *mp_state) 7014 { 7015 if (!kvm_vcpu_has_lapic(vcpu) && 7016 mp_state->mp_state != KVM_MP_STATE_RUNNABLE) 7017 return -EINVAL; 7018 7019 if (mp_state->mp_state == KVM_MP_STATE_SIPI_RECEIVED) { 7020 vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED; 7021 set_bit(KVM_APIC_SIPI, &vcpu->arch.apic->pending_events); 7022 } else 7023 vcpu->arch.mp_state = mp_state->mp_state; 7024 kvm_make_request(KVM_REQ_EVENT, vcpu); 7025 return 0; 7026 } 7027 7028 int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index, 7029 int reason, bool has_error_code, u32 error_code) 7030 { 7031 struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; 7032 int ret; 7033 7034 init_emulate_ctxt(vcpu); 7035 7036 ret = emulator_task_switch(ctxt, tss_selector, idt_index, reason, 7037 has_error_code, error_code); 7038 7039 if (ret) 7040 return EMULATE_FAIL; 7041 7042 kvm_rip_write(vcpu, ctxt->eip); 7043 kvm_set_rflags(vcpu, ctxt->eflags); 7044 kvm_make_request(KVM_REQ_EVENT, vcpu); 7045 return EMULATE_DONE; 7046 } 7047 EXPORT_SYMBOL_GPL(kvm_task_switch); 7048 7049 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, 7050 struct kvm_sregs *sregs) 7051 { 7052 struct msr_data apic_base_msr; 7053 int mmu_reset_needed = 0; 7054 int pending_vec, max_bits, idx; 7055 struct desc_ptr dt; 7056 7057 if (!guest_cpuid_has_xsave(vcpu) && (sregs->cr4 & X86_CR4_OSXSAVE)) 7058 return -EINVAL; 7059 7060 dt.size = sregs->idt.limit; 7061 dt.address = sregs->idt.base; 7062 kvm_x86_ops->set_idt(vcpu, &dt); 7063 dt.size = sregs->gdt.limit; 7064 dt.address = sregs->gdt.base; 7065 kvm_x86_ops->set_gdt(vcpu, &dt); 7066 7067 vcpu->arch.cr2 = sregs->cr2; 7068 mmu_reset_needed |= kvm_read_cr3(vcpu) != sregs->cr3; 7069 vcpu->arch.cr3 = sregs->cr3; 7070 __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail); 7071 7072 kvm_set_cr8(vcpu, sregs->cr8); 7073 7074 mmu_reset_needed |= vcpu->arch.efer != sregs->efer; 7075 kvm_x86_ops->set_efer(vcpu, sregs->efer); 7076 apic_base_msr.data = sregs->apic_base; 7077 apic_base_msr.host_initiated = true; 7078 kvm_set_apic_base(vcpu, &apic_base_msr); 7079 7080 mmu_reset_needed |= kvm_read_cr0(vcpu) != sregs->cr0; 7081 kvm_x86_ops->set_cr0(vcpu, sregs->cr0); 7082 vcpu->arch.cr0 = sregs->cr0; 7083 7084 mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4; 7085 kvm_x86_ops->set_cr4(vcpu, sregs->cr4); 7086 if (sregs->cr4 & X86_CR4_OSXSAVE) 7087 kvm_update_cpuid(vcpu); 7088 7089 idx = srcu_read_lock(&vcpu->kvm->srcu); 7090 if (!is_long_mode(vcpu) && is_pae(vcpu)) { 7091 load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu)); 7092 mmu_reset_needed = 1; 7093 } 7094 srcu_read_unlock(&vcpu->kvm->srcu, idx); 7095 7096 if (mmu_reset_needed) 7097 kvm_mmu_reset_context(vcpu); 7098 7099 max_bits = KVM_NR_INTERRUPTS; 7100 pending_vec = find_first_bit( 7101 (const unsigned long *)sregs->interrupt_bitmap, max_bits); 7102 if (pending_vec < max_bits) { 7103 kvm_queue_interrupt(vcpu, pending_vec, false); 7104 pr_debug("Set back pending irq %d\n", pending_vec); 7105 } 7106 7107 kvm_set_segment(vcpu, &sregs->cs, VCPU_SREG_CS); 7108 kvm_set_segment(vcpu, &sregs->ds, VCPU_SREG_DS); 7109 kvm_set_segment(vcpu, &sregs->es, VCPU_SREG_ES); 7110 kvm_set_segment(vcpu, &sregs->fs, VCPU_SREG_FS); 7111 kvm_set_segment(vcpu, &sregs->gs, VCPU_SREG_GS); 7112 kvm_set_segment(vcpu, &sregs->ss, VCPU_SREG_SS); 7113 7114 kvm_set_segment(vcpu, &sregs->tr, VCPU_SREG_TR); 7115 kvm_set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR); 7116 7117 update_cr8_intercept(vcpu); 7118 7119 /* Older userspace won't unhalt the vcpu on reset. */ 7120 if (kvm_vcpu_is_bsp(vcpu) && kvm_rip_read(vcpu) == 0xfff0 && 7121 sregs->cs.selector == 0xf000 && sregs->cs.base == 0xffff0000 && 7122 !is_protmode(vcpu)) 7123 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; 7124 7125 kvm_make_request(KVM_REQ_EVENT, vcpu); 7126 7127 return 0; 7128 } 7129 7130 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, 7131 struct kvm_guest_debug *dbg) 7132 { 7133 unsigned long rflags; 7134 int i, r; 7135 7136 if (dbg->control & (KVM_GUESTDBG_INJECT_DB | KVM_GUESTDBG_INJECT_BP)) { 7137 r = -EBUSY; 7138 if (vcpu->arch.exception.pending) 7139 goto out; 7140 if (dbg->control & KVM_GUESTDBG_INJECT_DB) 7141 kvm_queue_exception(vcpu, DB_VECTOR); 7142 else 7143 kvm_queue_exception(vcpu, BP_VECTOR); 7144 } 7145 7146 /* 7147 * Read rflags as long as potentially injected trace flags are still 7148 * filtered out. 7149 */ 7150 rflags = kvm_get_rflags(vcpu); 7151 7152 vcpu->guest_debug = dbg->control; 7153 if (!(vcpu->guest_debug & KVM_GUESTDBG_ENABLE)) 7154 vcpu->guest_debug = 0; 7155 7156 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) { 7157 for (i = 0; i < KVM_NR_DB_REGS; ++i) 7158 vcpu->arch.eff_db[i] = dbg->arch.debugreg[i]; 7159 vcpu->arch.guest_debug_dr7 = dbg->arch.debugreg[7]; 7160 } else { 7161 for (i = 0; i < KVM_NR_DB_REGS; i++) 7162 vcpu->arch.eff_db[i] = vcpu->arch.db[i]; 7163 } 7164 kvm_update_dr7(vcpu); 7165 7166 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) 7167 vcpu->arch.singlestep_rip = kvm_rip_read(vcpu) + 7168 get_segment_base(vcpu, VCPU_SREG_CS); 7169 7170 /* 7171 * Trigger an rflags update that will inject or remove the trace 7172 * flags. 7173 */ 7174 kvm_set_rflags(vcpu, rflags); 7175 7176 kvm_x86_ops->update_db_bp_intercept(vcpu); 7177 7178 r = 0; 7179 7180 out: 7181 7182 return r; 7183 } 7184 7185 /* 7186 * Translate a guest virtual address to a guest physical address. 7187 */ 7188 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, 7189 struct kvm_translation *tr) 7190 { 7191 unsigned long vaddr = tr->linear_address; 7192 gpa_t gpa; 7193 int idx; 7194 7195 idx = srcu_read_lock(&vcpu->kvm->srcu); 7196 gpa = kvm_mmu_gva_to_gpa_system(vcpu, vaddr, NULL); 7197 srcu_read_unlock(&vcpu->kvm->srcu, idx); 7198 tr->physical_address = gpa; 7199 tr->valid = gpa != UNMAPPED_GVA; 7200 tr->writeable = 1; 7201 tr->usermode = 0; 7202 7203 return 0; 7204 } 7205 7206 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 7207 { 7208 struct fxregs_state *fxsave = 7209 &vcpu->arch.guest_fpu.state.fxsave; 7210 7211 memcpy(fpu->fpr, fxsave->st_space, 128); 7212 fpu->fcw = fxsave->cwd; 7213 fpu->fsw = fxsave->swd; 7214 fpu->ftwx = fxsave->twd; 7215 fpu->last_opcode = fxsave->fop; 7216 fpu->last_ip = fxsave->rip; 7217 fpu->last_dp = fxsave->rdp; 7218 memcpy(fpu->xmm, fxsave->xmm_space, sizeof fxsave->xmm_space); 7219 7220 return 0; 7221 } 7222 7223 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 7224 { 7225 struct fxregs_state *fxsave = 7226 &vcpu->arch.guest_fpu.state.fxsave; 7227 7228 memcpy(fxsave->st_space, fpu->fpr, 128); 7229 fxsave->cwd = fpu->fcw; 7230 fxsave->swd = fpu->fsw; 7231 fxsave->twd = fpu->ftwx; 7232 fxsave->fop = fpu->last_opcode; 7233 fxsave->rip = fpu->last_ip; 7234 fxsave->rdp = fpu->last_dp; 7235 memcpy(fxsave->xmm_space, fpu->xmm, sizeof fxsave->xmm_space); 7236 7237 return 0; 7238 } 7239 7240 static void fx_init(struct kvm_vcpu *vcpu) 7241 { 7242 fpstate_init(&vcpu->arch.guest_fpu.state); 7243 if (cpu_has_xsaves) 7244 vcpu->arch.guest_fpu.state.xsave.header.xcomp_bv = 7245 host_xcr0 | XSTATE_COMPACTION_ENABLED; 7246 7247 /* 7248 * Ensure guest xcr0 is valid for loading 7249 */ 7250 vcpu->arch.xcr0 = XSTATE_FP; 7251 7252 vcpu->arch.cr0 |= X86_CR0_ET; 7253 } 7254 7255 void kvm_load_guest_fpu(struct kvm_vcpu *vcpu) 7256 { 7257 if (vcpu->guest_fpu_loaded) 7258 return; 7259 7260 /* 7261 * Restore all possible states in the guest, 7262 * and assume host would use all available bits. 7263 * Guest xcr0 would be loaded later. 7264 */ 7265 kvm_put_guest_xcr0(vcpu); 7266 vcpu->guest_fpu_loaded = 1; 7267 __kernel_fpu_begin(); 7268 __copy_kernel_to_fpregs(&vcpu->arch.guest_fpu.state); 7269 trace_kvm_fpu(1); 7270 } 7271 7272 void kvm_put_guest_fpu(struct kvm_vcpu *vcpu) 7273 { 7274 kvm_put_guest_xcr0(vcpu); 7275 7276 if (!vcpu->guest_fpu_loaded) { 7277 vcpu->fpu_counter = 0; 7278 return; 7279 } 7280 7281 vcpu->guest_fpu_loaded = 0; 7282 copy_fpregs_to_fpstate(&vcpu->arch.guest_fpu); 7283 __kernel_fpu_end(); 7284 ++vcpu->stat.fpu_reload; 7285 /* 7286 * If using eager FPU mode, or if the guest is a frequent user 7287 * of the FPU, just leave the FPU active for next time. 7288 * Every 255 times fpu_counter rolls over to 0; a guest that uses 7289 * the FPU in bursts will revert to loading it on demand. 7290 */ 7291 if (!vcpu->arch.eager_fpu) { 7292 if (++vcpu->fpu_counter < 5) 7293 kvm_make_request(KVM_REQ_DEACTIVATE_FPU, vcpu); 7294 } 7295 trace_kvm_fpu(0); 7296 } 7297 7298 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) 7299 { 7300 kvmclock_reset(vcpu); 7301 7302 free_cpumask_var(vcpu->arch.wbinvd_dirty_mask); 7303 kvm_x86_ops->vcpu_free(vcpu); 7304 } 7305 7306 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, 7307 unsigned int id) 7308 { 7309 struct kvm_vcpu *vcpu; 7310 7311 if (check_tsc_unstable() && atomic_read(&kvm->online_vcpus) != 0) 7312 printk_once(KERN_WARNING 7313 "kvm: SMP vm created on host with unstable TSC; " 7314 "guest TSC will not be reliable\n"); 7315 7316 vcpu = kvm_x86_ops->vcpu_create(kvm, id); 7317 7318 /* 7319 * Activate fpu unconditionally in case the guest needs eager FPU. It will be 7320 * deactivated soon if it doesn't. 7321 */ 7322 kvm_x86_ops->fpu_activate(vcpu); 7323 return vcpu; 7324 } 7325 7326 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) 7327 { 7328 int r; 7329 7330 kvm_vcpu_mtrr_init(vcpu); 7331 r = vcpu_load(vcpu); 7332 if (r) 7333 return r; 7334 kvm_vcpu_reset(vcpu, false); 7335 kvm_mmu_setup(vcpu); 7336 vcpu_put(vcpu); 7337 return r; 7338 } 7339 7340 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) 7341 { 7342 struct msr_data msr; 7343 struct kvm *kvm = vcpu->kvm; 7344 7345 if (vcpu_load(vcpu)) 7346 return; 7347 msr.data = 0x0; 7348 msr.index = MSR_IA32_TSC; 7349 msr.host_initiated = true; 7350 kvm_write_tsc(vcpu, &msr); 7351 vcpu_put(vcpu); 7352 7353 if (!kvmclock_periodic_sync) 7354 return; 7355 7356 schedule_delayed_work(&kvm->arch.kvmclock_sync_work, 7357 KVMCLOCK_SYNC_PERIOD); 7358 } 7359 7360 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) 7361 { 7362 int r; 7363 vcpu->arch.apf.msr_val = 0; 7364 7365 r = vcpu_load(vcpu); 7366 BUG_ON(r); 7367 kvm_mmu_unload(vcpu); 7368 vcpu_put(vcpu); 7369 7370 kvm_x86_ops->vcpu_free(vcpu); 7371 } 7372 7373 void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) 7374 { 7375 vcpu->arch.hflags = 0; 7376 7377 atomic_set(&vcpu->arch.nmi_queued, 0); 7378 vcpu->arch.nmi_pending = 0; 7379 vcpu->arch.nmi_injected = false; 7380 kvm_clear_interrupt_queue(vcpu); 7381 kvm_clear_exception_queue(vcpu); 7382 7383 memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db)); 7384 kvm_update_dr0123(vcpu); 7385 vcpu->arch.dr6 = DR6_INIT; 7386 kvm_update_dr6(vcpu); 7387 vcpu->arch.dr7 = DR7_FIXED_1; 7388 kvm_update_dr7(vcpu); 7389 7390 vcpu->arch.cr2 = 0; 7391 7392 kvm_make_request(KVM_REQ_EVENT, vcpu); 7393 vcpu->arch.apf.msr_val = 0; 7394 vcpu->arch.st.msr_val = 0; 7395 7396 kvmclock_reset(vcpu); 7397 7398 kvm_clear_async_pf_completion_queue(vcpu); 7399 kvm_async_pf_hash_reset(vcpu); 7400 vcpu->arch.apf.halted = false; 7401 7402 if (!init_event) { 7403 kvm_pmu_reset(vcpu); 7404 vcpu->arch.smbase = 0x30000; 7405 } 7406 7407 memset(vcpu->arch.regs, 0, sizeof(vcpu->arch.regs)); 7408 vcpu->arch.regs_avail = ~0; 7409 vcpu->arch.regs_dirty = ~0; 7410 7411 kvm_x86_ops->vcpu_reset(vcpu, init_event); 7412 } 7413 7414 void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector) 7415 { 7416 struct kvm_segment cs; 7417 7418 kvm_get_segment(vcpu, &cs, VCPU_SREG_CS); 7419 cs.selector = vector << 8; 7420 cs.base = vector << 12; 7421 kvm_set_segment(vcpu, &cs, VCPU_SREG_CS); 7422 kvm_rip_write(vcpu, 0); 7423 } 7424 7425 int kvm_arch_hardware_enable(void) 7426 { 7427 struct kvm *kvm; 7428 struct kvm_vcpu *vcpu; 7429 int i; 7430 int ret; 7431 u64 local_tsc; 7432 u64 max_tsc = 0; 7433 bool stable, backwards_tsc = false; 7434 7435 kvm_shared_msr_cpu_online(); 7436 ret = kvm_x86_ops->hardware_enable(); 7437 if (ret != 0) 7438 return ret; 7439 7440 local_tsc = native_read_tsc(); 7441 stable = !check_tsc_unstable(); 7442 list_for_each_entry(kvm, &vm_list, vm_list) { 7443 kvm_for_each_vcpu(i, vcpu, kvm) { 7444 if (!stable && vcpu->cpu == smp_processor_id()) 7445 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); 7446 if (stable && vcpu->arch.last_host_tsc > local_tsc) { 7447 backwards_tsc = true; 7448 if (vcpu->arch.last_host_tsc > max_tsc) 7449 max_tsc = vcpu->arch.last_host_tsc; 7450 } 7451 } 7452 } 7453 7454 /* 7455 * Sometimes, even reliable TSCs go backwards. This happens on 7456 * platforms that reset TSC during suspend or hibernate actions, but 7457 * maintain synchronization. We must compensate. Fortunately, we can 7458 * detect that condition here, which happens early in CPU bringup, 7459 * before any KVM threads can be running. Unfortunately, we can't 7460 * bring the TSCs fully up to date with real time, as we aren't yet far 7461 * enough into CPU bringup that we know how much real time has actually 7462 * elapsed; our helper function, get_kernel_ns() will be using boot 7463 * variables that haven't been updated yet. 7464 * 7465 * So we simply find the maximum observed TSC above, then record the 7466 * adjustment to TSC in each VCPU. When the VCPU later gets loaded, 7467 * the adjustment will be applied. Note that we accumulate 7468 * adjustments, in case multiple suspend cycles happen before some VCPU 7469 * gets a chance to run again. In the event that no KVM threads get a 7470 * chance to run, we will miss the entire elapsed period, as we'll have 7471 * reset last_host_tsc, so VCPUs will not have the TSC adjusted and may 7472 * loose cycle time. This isn't too big a deal, since the loss will be 7473 * uniform across all VCPUs (not to mention the scenario is extremely 7474 * unlikely). It is possible that a second hibernate recovery happens 7475 * much faster than a first, causing the observed TSC here to be 7476 * smaller; this would require additional padding adjustment, which is 7477 * why we set last_host_tsc to the local tsc observed here. 7478 * 7479 * N.B. - this code below runs only on platforms with reliable TSC, 7480 * as that is the only way backwards_tsc is set above. Also note 7481 * that this runs for ALL vcpus, which is not a bug; all VCPUs should 7482 * have the same delta_cyc adjustment applied if backwards_tsc 7483 * is detected. Note further, this adjustment is only done once, 7484 * as we reset last_host_tsc on all VCPUs to stop this from being 7485 * called multiple times (one for each physical CPU bringup). 7486 * 7487 * Platforms with unreliable TSCs don't have to deal with this, they 7488 * will be compensated by the logic in vcpu_load, which sets the TSC to 7489 * catchup mode. This will catchup all VCPUs to real time, but cannot 7490 * guarantee that they stay in perfect synchronization. 7491 */ 7492 if (backwards_tsc) { 7493 u64 delta_cyc = max_tsc - local_tsc; 7494 backwards_tsc_observed = true; 7495 list_for_each_entry(kvm, &vm_list, vm_list) { 7496 kvm_for_each_vcpu(i, vcpu, kvm) { 7497 vcpu->arch.tsc_offset_adjustment += delta_cyc; 7498 vcpu->arch.last_host_tsc = local_tsc; 7499 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu); 7500 } 7501 7502 /* 7503 * We have to disable TSC offset matching.. if you were 7504 * booting a VM while issuing an S4 host suspend.... 7505 * you may have some problem. Solving this issue is 7506 * left as an exercise to the reader. 7507 */ 7508 kvm->arch.last_tsc_nsec = 0; 7509 kvm->arch.last_tsc_write = 0; 7510 } 7511 7512 } 7513 return 0; 7514 } 7515 7516 void kvm_arch_hardware_disable(void) 7517 { 7518 kvm_x86_ops->hardware_disable(); 7519 drop_user_return_notifiers(); 7520 } 7521 7522 int kvm_arch_hardware_setup(void) 7523 { 7524 int r; 7525 7526 r = kvm_x86_ops->hardware_setup(); 7527 if (r != 0) 7528 return r; 7529 7530 kvm_init_msr_list(); 7531 return 0; 7532 } 7533 7534 void kvm_arch_hardware_unsetup(void) 7535 { 7536 kvm_x86_ops->hardware_unsetup(); 7537 } 7538 7539 void kvm_arch_check_processor_compat(void *rtn) 7540 { 7541 kvm_x86_ops->check_processor_compatibility(rtn); 7542 } 7543 7544 bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu) 7545 { 7546 return irqchip_in_kernel(vcpu->kvm) == (vcpu->arch.apic != NULL); 7547 } 7548 7549 struct static_key kvm_no_apic_vcpu __read_mostly; 7550 7551 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) 7552 { 7553 struct page *page; 7554 struct kvm *kvm; 7555 int r; 7556 7557 BUG_ON(vcpu->kvm == NULL); 7558 kvm = vcpu->kvm; 7559 7560 vcpu->arch.pv.pv_unhalted = false; 7561 vcpu->arch.emulate_ctxt.ops = &emulate_ops; 7562 if (!irqchip_in_kernel(kvm) || kvm_vcpu_is_reset_bsp(vcpu)) 7563 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; 7564 else 7565 vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED; 7566 7567 page = alloc_page(GFP_KERNEL | __GFP_ZERO); 7568 if (!page) { 7569 r = -ENOMEM; 7570 goto fail; 7571 } 7572 vcpu->arch.pio_data = page_address(page); 7573 7574 kvm_set_tsc_khz(vcpu, max_tsc_khz); 7575 7576 r = kvm_mmu_create(vcpu); 7577 if (r < 0) 7578 goto fail_free_pio_data; 7579 7580 if (irqchip_in_kernel(kvm)) { 7581 r = kvm_create_lapic(vcpu); 7582 if (r < 0) 7583 goto fail_mmu_destroy; 7584 } else 7585 static_key_slow_inc(&kvm_no_apic_vcpu); 7586 7587 vcpu->arch.mce_banks = kzalloc(KVM_MAX_MCE_BANKS * sizeof(u64) * 4, 7588 GFP_KERNEL); 7589 if (!vcpu->arch.mce_banks) { 7590 r = -ENOMEM; 7591 goto fail_free_lapic; 7592 } 7593 vcpu->arch.mcg_cap = KVM_MAX_MCE_BANKS; 7594 7595 if (!zalloc_cpumask_var(&vcpu->arch.wbinvd_dirty_mask, GFP_KERNEL)) { 7596 r = -ENOMEM; 7597 goto fail_free_mce_banks; 7598 } 7599 7600 fx_init(vcpu); 7601 7602 vcpu->arch.ia32_tsc_adjust_msr = 0x0; 7603 vcpu->arch.pv_time_enabled = false; 7604 7605 vcpu->arch.guest_supported_xcr0 = 0; 7606 vcpu->arch.guest_xstate_size = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET; 7607 7608 vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu); 7609 7610 vcpu->arch.pat = MSR_IA32_CR_PAT_DEFAULT; 7611 7612 kvm_async_pf_hash_reset(vcpu); 7613 kvm_pmu_init(vcpu); 7614 7615 return 0; 7616 7617 fail_free_mce_banks: 7618 kfree(vcpu->arch.mce_banks); 7619 fail_free_lapic: 7620 kvm_free_lapic(vcpu); 7621 fail_mmu_destroy: 7622 kvm_mmu_destroy(vcpu); 7623 fail_free_pio_data: 7624 free_page((unsigned long)vcpu->arch.pio_data); 7625 fail: 7626 return r; 7627 } 7628 7629 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) 7630 { 7631 int idx; 7632 7633 kvm_pmu_destroy(vcpu); 7634 kfree(vcpu->arch.mce_banks); 7635 kvm_free_lapic(vcpu); 7636 idx = srcu_read_lock(&vcpu->kvm->srcu); 7637 kvm_mmu_destroy(vcpu); 7638 srcu_read_unlock(&vcpu->kvm->srcu, idx); 7639 free_page((unsigned long)vcpu->arch.pio_data); 7640 if (!irqchip_in_kernel(vcpu->kvm)) 7641 static_key_slow_dec(&kvm_no_apic_vcpu); 7642 } 7643 7644 void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) 7645 { 7646 kvm_x86_ops->sched_in(vcpu, cpu); 7647 } 7648 7649 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) 7650 { 7651 if (type) 7652 return -EINVAL; 7653 7654 INIT_HLIST_HEAD(&kvm->arch.mask_notifier_list); 7655 INIT_LIST_HEAD(&kvm->arch.active_mmu_pages); 7656 INIT_LIST_HEAD(&kvm->arch.zapped_obsolete_pages); 7657 INIT_LIST_HEAD(&kvm->arch.assigned_dev_head); 7658 atomic_set(&kvm->arch.noncoherent_dma_count, 0); 7659 7660 /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */ 7661 set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap); 7662 /* Reserve bit 1 of irq_sources_bitmap for irqfd-resampler */ 7663 set_bit(KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID, 7664 &kvm->arch.irq_sources_bitmap); 7665 7666 raw_spin_lock_init(&kvm->arch.tsc_write_lock); 7667 mutex_init(&kvm->arch.apic_map_lock); 7668 spin_lock_init(&kvm->arch.pvclock_gtod_sync_lock); 7669 7670 pvclock_update_vm_gtod_copy(kvm); 7671 7672 INIT_DELAYED_WORK(&kvm->arch.kvmclock_update_work, kvmclock_update_fn); 7673 INIT_DELAYED_WORK(&kvm->arch.kvmclock_sync_work, kvmclock_sync_fn); 7674 7675 return 0; 7676 } 7677 7678 static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu) 7679 { 7680 int r; 7681 r = vcpu_load(vcpu); 7682 BUG_ON(r); 7683 kvm_mmu_unload(vcpu); 7684 vcpu_put(vcpu); 7685 } 7686 7687 static void kvm_free_vcpus(struct kvm *kvm) 7688 { 7689 unsigned int i; 7690 struct kvm_vcpu *vcpu; 7691 7692 /* 7693 * Unpin any mmu pages first. 7694 */ 7695 kvm_for_each_vcpu(i, vcpu, kvm) { 7696 kvm_clear_async_pf_completion_queue(vcpu); 7697 kvm_unload_vcpu_mmu(vcpu); 7698 } 7699 kvm_for_each_vcpu(i, vcpu, kvm) 7700 kvm_arch_vcpu_free(vcpu); 7701 7702 mutex_lock(&kvm->lock); 7703 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) 7704 kvm->vcpus[i] = NULL; 7705 7706 atomic_set(&kvm->online_vcpus, 0); 7707 mutex_unlock(&kvm->lock); 7708 } 7709 7710 void kvm_arch_sync_events(struct kvm *kvm) 7711 { 7712 cancel_delayed_work_sync(&kvm->arch.kvmclock_sync_work); 7713 cancel_delayed_work_sync(&kvm->arch.kvmclock_update_work); 7714 kvm_free_all_assigned_devices(kvm); 7715 kvm_free_pit(kvm); 7716 } 7717 7718 int __x86_set_memory_region(struct kvm *kvm, 7719 const struct kvm_userspace_memory_region *mem) 7720 { 7721 int i, r; 7722 7723 /* Called with kvm->slots_lock held. */ 7724 BUG_ON(mem->slot >= KVM_MEM_SLOTS_NUM); 7725 7726 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { 7727 struct kvm_userspace_memory_region m = *mem; 7728 7729 m.slot |= i << 16; 7730 r = __kvm_set_memory_region(kvm, &m); 7731 if (r < 0) 7732 return r; 7733 } 7734 7735 return 0; 7736 } 7737 EXPORT_SYMBOL_GPL(__x86_set_memory_region); 7738 7739 int x86_set_memory_region(struct kvm *kvm, 7740 const struct kvm_userspace_memory_region *mem) 7741 { 7742 int r; 7743 7744 mutex_lock(&kvm->slots_lock); 7745 r = __x86_set_memory_region(kvm, mem); 7746 mutex_unlock(&kvm->slots_lock); 7747 7748 return r; 7749 } 7750 EXPORT_SYMBOL_GPL(x86_set_memory_region); 7751 7752 void kvm_arch_destroy_vm(struct kvm *kvm) 7753 { 7754 if (current->mm == kvm->mm) { 7755 /* 7756 * Free memory regions allocated on behalf of userspace, 7757 * unless the the memory map has changed due to process exit 7758 * or fd copying. 7759 */ 7760 struct kvm_userspace_memory_region mem; 7761 memset(&mem, 0, sizeof(mem)); 7762 mem.slot = APIC_ACCESS_PAGE_PRIVATE_MEMSLOT; 7763 x86_set_memory_region(kvm, &mem); 7764 7765 mem.slot = IDENTITY_PAGETABLE_PRIVATE_MEMSLOT; 7766 x86_set_memory_region(kvm, &mem); 7767 7768 mem.slot = TSS_PRIVATE_MEMSLOT; 7769 x86_set_memory_region(kvm, &mem); 7770 } 7771 kvm_iommu_unmap_guest(kvm); 7772 kfree(kvm->arch.vpic); 7773 kfree(kvm->arch.vioapic); 7774 kvm_free_vcpus(kvm); 7775 kfree(rcu_dereference_check(kvm->arch.apic_map, 1)); 7776 } 7777 7778 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, 7779 struct kvm_memory_slot *dont) 7780 { 7781 int i; 7782 7783 for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) { 7784 if (!dont || free->arch.rmap[i] != dont->arch.rmap[i]) { 7785 kvfree(free->arch.rmap[i]); 7786 free->arch.rmap[i] = NULL; 7787 } 7788 if (i == 0) 7789 continue; 7790 7791 if (!dont || free->arch.lpage_info[i - 1] != 7792 dont->arch.lpage_info[i - 1]) { 7793 kvfree(free->arch.lpage_info[i - 1]); 7794 free->arch.lpage_info[i - 1] = NULL; 7795 } 7796 } 7797 } 7798 7799 int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, 7800 unsigned long npages) 7801 { 7802 int i; 7803 7804 for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) { 7805 unsigned long ugfn; 7806 int lpages; 7807 int level = i + 1; 7808 7809 lpages = gfn_to_index(slot->base_gfn + npages - 1, 7810 slot->base_gfn, level) + 1; 7811 7812 slot->arch.rmap[i] = 7813 kvm_kvzalloc(lpages * sizeof(*slot->arch.rmap[i])); 7814 if (!slot->arch.rmap[i]) 7815 goto out_free; 7816 if (i == 0) 7817 continue; 7818 7819 slot->arch.lpage_info[i - 1] = kvm_kvzalloc(lpages * 7820 sizeof(*slot->arch.lpage_info[i - 1])); 7821 if (!slot->arch.lpage_info[i - 1]) 7822 goto out_free; 7823 7824 if (slot->base_gfn & (KVM_PAGES_PER_HPAGE(level) - 1)) 7825 slot->arch.lpage_info[i - 1][0].write_count = 1; 7826 if ((slot->base_gfn + npages) & (KVM_PAGES_PER_HPAGE(level) - 1)) 7827 slot->arch.lpage_info[i - 1][lpages - 1].write_count = 1; 7828 ugfn = slot->userspace_addr >> PAGE_SHIFT; 7829 /* 7830 * If the gfn and userspace address are not aligned wrt each 7831 * other, or if explicitly asked to, disable large page 7832 * support for this slot 7833 */ 7834 if ((slot->base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE(level) - 1) || 7835 !kvm_largepages_enabled()) { 7836 unsigned long j; 7837 7838 for (j = 0; j < lpages; ++j) 7839 slot->arch.lpage_info[i - 1][j].write_count = 1; 7840 } 7841 } 7842 7843 return 0; 7844 7845 out_free: 7846 for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) { 7847 kvfree(slot->arch.rmap[i]); 7848 slot->arch.rmap[i] = NULL; 7849 if (i == 0) 7850 continue; 7851 7852 kvfree(slot->arch.lpage_info[i - 1]); 7853 slot->arch.lpage_info[i - 1] = NULL; 7854 } 7855 return -ENOMEM; 7856 } 7857 7858 void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots) 7859 { 7860 /* 7861 * memslots->generation has been incremented. 7862 * mmio generation may have reached its maximum value. 7863 */ 7864 kvm_mmu_invalidate_mmio_sptes(kvm, slots); 7865 } 7866 7867 int kvm_arch_prepare_memory_region(struct kvm *kvm, 7868 struct kvm_memory_slot *memslot, 7869 const struct kvm_userspace_memory_region *mem, 7870 enum kvm_mr_change change) 7871 { 7872 /* 7873 * Only private memory slots need to be mapped here since 7874 * KVM_SET_MEMORY_REGION ioctl is no longer supported. 7875 */ 7876 if ((memslot->id >= KVM_USER_MEM_SLOTS) && (change == KVM_MR_CREATE)) { 7877 unsigned long userspace_addr; 7878 7879 /* 7880 * MAP_SHARED to prevent internal slot pages from being moved 7881 * by fork()/COW. 7882 */ 7883 userspace_addr = vm_mmap(NULL, 0, memslot->npages * PAGE_SIZE, 7884 PROT_READ | PROT_WRITE, 7885 MAP_SHARED | MAP_ANONYMOUS, 0); 7886 7887 if (IS_ERR((void *)userspace_addr)) 7888 return PTR_ERR((void *)userspace_addr); 7889 7890 memslot->userspace_addr = userspace_addr; 7891 } 7892 7893 return 0; 7894 } 7895 7896 static void kvm_mmu_slot_apply_flags(struct kvm *kvm, 7897 struct kvm_memory_slot *new) 7898 { 7899 /* Still write protect RO slot */ 7900 if (new->flags & KVM_MEM_READONLY) { 7901 kvm_mmu_slot_remove_write_access(kvm, new); 7902 return; 7903 } 7904 7905 /* 7906 * Call kvm_x86_ops dirty logging hooks when they are valid. 7907 * 7908 * kvm_x86_ops->slot_disable_log_dirty is called when: 7909 * 7910 * - KVM_MR_CREATE with dirty logging is disabled 7911 * - KVM_MR_FLAGS_ONLY with dirty logging is disabled in new flag 7912 * 7913 * The reason is, in case of PML, we need to set D-bit for any slots 7914 * with dirty logging disabled in order to eliminate unnecessary GPA 7915 * logging in PML buffer (and potential PML buffer full VMEXT). This 7916 * guarantees leaving PML enabled during guest's lifetime won't have 7917 * any additonal overhead from PML when guest is running with dirty 7918 * logging disabled for memory slots. 7919 * 7920 * kvm_x86_ops->slot_enable_log_dirty is called when switching new slot 7921 * to dirty logging mode. 7922 * 7923 * If kvm_x86_ops dirty logging hooks are invalid, use write protect. 7924 * 7925 * In case of write protect: 7926 * 7927 * Write protect all pages for dirty logging. 7928 * 7929 * All the sptes including the large sptes which point to this 7930 * slot are set to readonly. We can not create any new large 7931 * spte on this slot until the end of the logging. 7932 * 7933 * See the comments in fast_page_fault(). 7934 */ 7935 if (new->flags & KVM_MEM_LOG_DIRTY_PAGES) { 7936 if (kvm_x86_ops->slot_enable_log_dirty) 7937 kvm_x86_ops->slot_enable_log_dirty(kvm, new); 7938 else 7939 kvm_mmu_slot_remove_write_access(kvm, new); 7940 } else { 7941 if (kvm_x86_ops->slot_disable_log_dirty) 7942 kvm_x86_ops->slot_disable_log_dirty(kvm, new); 7943 } 7944 } 7945 7946 void kvm_arch_commit_memory_region(struct kvm *kvm, 7947 const struct kvm_userspace_memory_region *mem, 7948 const struct kvm_memory_slot *old, 7949 const struct kvm_memory_slot *new, 7950 enum kvm_mr_change change) 7951 { 7952 int nr_mmu_pages = 0; 7953 7954 if (change == KVM_MR_DELETE && old->id >= KVM_USER_MEM_SLOTS) { 7955 int ret; 7956 7957 ret = vm_munmap(old->userspace_addr, 7958 old->npages * PAGE_SIZE); 7959 if (ret < 0) 7960 printk(KERN_WARNING 7961 "kvm_vm_ioctl_set_memory_region: " 7962 "failed to munmap memory\n"); 7963 } 7964 7965 if (!kvm->arch.n_requested_mmu_pages) 7966 nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm); 7967 7968 if (nr_mmu_pages) 7969 kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages); 7970 7971 /* 7972 * Dirty logging tracks sptes in 4k granularity, meaning that large 7973 * sptes have to be split. If live migration is successful, the guest 7974 * in the source machine will be destroyed and large sptes will be 7975 * created in the destination. However, if the guest continues to run 7976 * in the source machine (for example if live migration fails), small 7977 * sptes will remain around and cause bad performance. 7978 * 7979 * Scan sptes if dirty logging has been stopped, dropping those 7980 * which can be collapsed into a single large-page spte. Later 7981 * page faults will create the large-page sptes. 7982 */ 7983 if ((change != KVM_MR_DELETE) && 7984 (old->flags & KVM_MEM_LOG_DIRTY_PAGES) && 7985 !(new->flags & KVM_MEM_LOG_DIRTY_PAGES)) 7986 kvm_mmu_zap_collapsible_sptes(kvm, new); 7987 7988 /* 7989 * Set up write protection and/or dirty logging for the new slot. 7990 * 7991 * For KVM_MR_DELETE and KVM_MR_MOVE, the shadow pages of old slot have 7992 * been zapped so no dirty logging staff is needed for old slot. For 7993 * KVM_MR_FLAGS_ONLY, the old slot is essentially the same one as the 7994 * new and it's also covered when dealing with the new slot. 7995 * 7996 * FIXME: const-ify all uses of struct kvm_memory_slot. 7997 */ 7998 if (change != KVM_MR_DELETE) 7999 kvm_mmu_slot_apply_flags(kvm, (struct kvm_memory_slot *) new); 8000 } 8001 8002 void kvm_arch_flush_shadow_all(struct kvm *kvm) 8003 { 8004 kvm_mmu_invalidate_zap_all_pages(kvm); 8005 } 8006 8007 void kvm_arch_flush_shadow_memslot(struct kvm *kvm, 8008 struct kvm_memory_slot *slot) 8009 { 8010 kvm_mmu_invalidate_zap_all_pages(kvm); 8011 } 8012 8013 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) 8014 { 8015 if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) 8016 kvm_x86_ops->check_nested_events(vcpu, false); 8017 8018 return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE && 8019 !vcpu->arch.apf.halted) 8020 || !list_empty_careful(&vcpu->async_pf.done) 8021 || kvm_apic_has_events(vcpu) 8022 || vcpu->arch.pv.pv_unhalted 8023 || atomic_read(&vcpu->arch.nmi_queued) || 8024 (kvm_arch_interrupt_allowed(vcpu) && 8025 kvm_cpu_has_interrupt(vcpu)); 8026 } 8027 8028 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) 8029 { 8030 return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE; 8031 } 8032 8033 int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu) 8034 { 8035 return kvm_x86_ops->interrupt_allowed(vcpu); 8036 } 8037 8038 unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu) 8039 { 8040 if (is_64_bit_mode(vcpu)) 8041 return kvm_rip_read(vcpu); 8042 return (u32)(get_segment_base(vcpu, VCPU_SREG_CS) + 8043 kvm_rip_read(vcpu)); 8044 } 8045 EXPORT_SYMBOL_GPL(kvm_get_linear_rip); 8046 8047 bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip) 8048 { 8049 return kvm_get_linear_rip(vcpu) == linear_rip; 8050 } 8051 EXPORT_SYMBOL_GPL(kvm_is_linear_rip); 8052 8053 unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu) 8054 { 8055 unsigned long rflags; 8056 8057 rflags = kvm_x86_ops->get_rflags(vcpu); 8058 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) 8059 rflags &= ~X86_EFLAGS_TF; 8060 return rflags; 8061 } 8062 EXPORT_SYMBOL_GPL(kvm_get_rflags); 8063 8064 static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) 8065 { 8066 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP && 8067 kvm_is_linear_rip(vcpu, vcpu->arch.singlestep_rip)) 8068 rflags |= X86_EFLAGS_TF; 8069 kvm_x86_ops->set_rflags(vcpu, rflags); 8070 } 8071 8072 void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) 8073 { 8074 __kvm_set_rflags(vcpu, rflags); 8075 kvm_make_request(KVM_REQ_EVENT, vcpu); 8076 } 8077 EXPORT_SYMBOL_GPL(kvm_set_rflags); 8078 8079 void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work) 8080 { 8081 int r; 8082 8083 if ((vcpu->arch.mmu.direct_map != work->arch.direct_map) || 8084 work->wakeup_all) 8085 return; 8086 8087 r = kvm_mmu_reload(vcpu); 8088 if (unlikely(r)) 8089 return; 8090 8091 if (!vcpu->arch.mmu.direct_map && 8092 work->arch.cr3 != vcpu->arch.mmu.get_cr3(vcpu)) 8093 return; 8094 8095 vcpu->arch.mmu.page_fault(vcpu, work->gva, 0, true); 8096 } 8097 8098 static inline u32 kvm_async_pf_hash_fn(gfn_t gfn) 8099 { 8100 return hash_32(gfn & 0xffffffff, order_base_2(ASYNC_PF_PER_VCPU)); 8101 } 8102 8103 static inline u32 kvm_async_pf_next_probe(u32 key) 8104 { 8105 return (key + 1) & (roundup_pow_of_two(ASYNC_PF_PER_VCPU) - 1); 8106 } 8107 8108 static void kvm_add_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) 8109 { 8110 u32 key = kvm_async_pf_hash_fn(gfn); 8111 8112 while (vcpu->arch.apf.gfns[key] != ~0) 8113 key = kvm_async_pf_next_probe(key); 8114 8115 vcpu->arch.apf.gfns[key] = gfn; 8116 } 8117 8118 static u32 kvm_async_pf_gfn_slot(struct kvm_vcpu *vcpu, gfn_t gfn) 8119 { 8120 int i; 8121 u32 key = kvm_async_pf_hash_fn(gfn); 8122 8123 for (i = 0; i < roundup_pow_of_two(ASYNC_PF_PER_VCPU) && 8124 (vcpu->arch.apf.gfns[key] != gfn && 8125 vcpu->arch.apf.gfns[key] != ~0); i++) 8126 key = kvm_async_pf_next_probe(key); 8127 8128 return key; 8129 } 8130 8131 bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) 8132 { 8133 return vcpu->arch.apf.gfns[kvm_async_pf_gfn_slot(vcpu, gfn)] == gfn; 8134 } 8135 8136 static void kvm_del_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) 8137 { 8138 u32 i, j, k; 8139 8140 i = j = kvm_async_pf_gfn_slot(vcpu, gfn); 8141 while (true) { 8142 vcpu->arch.apf.gfns[i] = ~0; 8143 do { 8144 j = kvm_async_pf_next_probe(j); 8145 if (vcpu->arch.apf.gfns[j] == ~0) 8146 return; 8147 k = kvm_async_pf_hash_fn(vcpu->arch.apf.gfns[j]); 8148 /* 8149 * k lies cyclically in ]i,j] 8150 * | i.k.j | 8151 * |....j i.k.| or |.k..j i...| 8152 */ 8153 } while ((i <= j) ? (i < k && k <= j) : (i < k || k <= j)); 8154 vcpu->arch.apf.gfns[i] = vcpu->arch.apf.gfns[j]; 8155 i = j; 8156 } 8157 } 8158 8159 static int apf_put_user(struct kvm_vcpu *vcpu, u32 val) 8160 { 8161 8162 return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, &val, 8163 sizeof(val)); 8164 } 8165 8166 void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu, 8167 struct kvm_async_pf *work) 8168 { 8169 struct x86_exception fault; 8170 8171 trace_kvm_async_pf_not_present(work->arch.token, work->gva); 8172 kvm_add_async_pf_gfn(vcpu, work->arch.gfn); 8173 8174 if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) || 8175 (vcpu->arch.apf.send_user_only && 8176 kvm_x86_ops->get_cpl(vcpu) == 0)) 8177 kvm_make_request(KVM_REQ_APF_HALT, vcpu); 8178 else if (!apf_put_user(vcpu, KVM_PV_REASON_PAGE_NOT_PRESENT)) { 8179 fault.vector = PF_VECTOR; 8180 fault.error_code_valid = true; 8181 fault.error_code = 0; 8182 fault.nested_page_fault = false; 8183 fault.address = work->arch.token; 8184 kvm_inject_page_fault(vcpu, &fault); 8185 } 8186 } 8187 8188 void kvm_arch_async_page_present(struct kvm_vcpu *vcpu, 8189 struct kvm_async_pf *work) 8190 { 8191 struct x86_exception fault; 8192 8193 trace_kvm_async_pf_ready(work->arch.token, work->gva); 8194 if (work->wakeup_all) 8195 work->arch.token = ~0; /* broadcast wakeup */ 8196 else 8197 kvm_del_async_pf_gfn(vcpu, work->arch.gfn); 8198 8199 if ((vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) && 8200 !apf_put_user(vcpu, KVM_PV_REASON_PAGE_READY)) { 8201 fault.vector = PF_VECTOR; 8202 fault.error_code_valid = true; 8203 fault.error_code = 0; 8204 fault.nested_page_fault = false; 8205 fault.address = work->arch.token; 8206 kvm_inject_page_fault(vcpu, &fault); 8207 } 8208 vcpu->arch.apf.halted = false; 8209 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; 8210 } 8211 8212 bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu) 8213 { 8214 if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED)) 8215 return true; 8216 else 8217 return !kvm_event_needs_reinjection(vcpu) && 8218 kvm_x86_ops->interrupt_allowed(vcpu); 8219 } 8220 8221 void kvm_arch_register_noncoherent_dma(struct kvm *kvm) 8222 { 8223 atomic_inc(&kvm->arch.noncoherent_dma_count); 8224 } 8225 EXPORT_SYMBOL_GPL(kvm_arch_register_noncoherent_dma); 8226 8227 void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm) 8228 { 8229 atomic_dec(&kvm->arch.noncoherent_dma_count); 8230 } 8231 EXPORT_SYMBOL_GPL(kvm_arch_unregister_noncoherent_dma); 8232 8233 bool kvm_arch_has_noncoherent_dma(struct kvm *kvm) 8234 { 8235 return atomic_read(&kvm->arch.noncoherent_dma_count); 8236 } 8237 EXPORT_SYMBOL_GPL(kvm_arch_has_noncoherent_dma); 8238 8239 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit); 8240 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq); 8241 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_page_fault); 8242 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_msr); 8243 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_cr); 8244 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmrun); 8245 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit); 8246 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit_inject); 8247 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intr_vmexit); 8248 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_invlpga); 8249 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_skinit); 8250 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intercepts); 8251 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_write_tsc_offset); 8252 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_ple_window); 8253 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_pml_full); 8254